cache.cc (12348:bef2d9d3c353) cache.cc (12349:47f454120200)
1/*
2 * Copyright (c) 2010-2017 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software

--- 30 unchanged lines hidden (view full) ---

39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 * Dave Greene
43 * Nathan Binkert
44 * Steve Reinhardt
45 * Ron Dreslinski
46 * Andreas Sandberg
1/*
2 * Copyright (c) 2010-2017 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software

--- 30 unchanged lines hidden (view full) ---

39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 * Dave Greene
43 * Nathan Binkert
44 * Steve Reinhardt
45 * Ron Dreslinski
46 * Andreas Sandberg
47 * Nikos Nikoleris
47 */
48
49/**
50 * @file
51 * Cache definitions.
52 */
53
54#include "mem/cache/cache.hh"

--- 262 unchanged lines hidden (view full) ---

317
318 // Here lat is the value passed as parameter to accessBlock() function
319 // that can modify its value.
320 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), lat);
321
322 DPRINTF(Cache, "%s %s\n", pkt->print(),
323 blk ? "hit " + blk->print() : "miss");
324
48 */
49
50/**
51 * @file
52 * Cache definitions.
53 */
54
55#include "mem/cache/cache.hh"

--- 262 unchanged lines hidden (view full) ---

318
319 // Here lat is the value passed as parameter to accessBlock() function
320 // that can modify its value.
321 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), lat);
322
323 DPRINTF(Cache, "%s %s\n", pkt->print(),
324 blk ? "hit " + blk->print() : "miss");
325
326 if (pkt->req->isCacheMaintenance()) {
327 // A cache maintenance operation is always forwarded to the
328 // memory below even if the block is found in dirty state.
325
329
330 // We defer any changes to the state of the block until we
331 // create and mark as in service the mshr for the downstream
332 // packet.
333 return false;
334 }
335
326 if (pkt->isEviction()) {
327 // We check for presence of block in above caches before issuing
328 // Writeback or CleanEvict to write buffer. Therefore the only
329 // possible cases can be of a CleanEvict packet coming from above
330 // encountering a Writeback generated in this cache peer cache and
331 // waiting in the write buffer. Cases of upper level peer caches
332 // generating CleanEvict and Writeback or simply CleanEvict and
333 // CleanEvict almost simultaneously will be caught by snoops sent out

--- 311 unchanged lines hidden (view full) ---

645 // @todo This should really enqueue the packet rather
646 bool M5_VAR_USED success = memSidePort->sendTimingReq(pkt);
647 assert(success);
648 return true;
649 }
650
651 promoteWholeLineWrites(pkt);
652
336 if (pkt->isEviction()) {
337 // We check for presence of block in above caches before issuing
338 // Writeback or CleanEvict to write buffer. Therefore the only
339 // possible cases can be of a CleanEvict packet coming from above
340 // encountering a Writeback generated in this cache peer cache and
341 // waiting in the write buffer. Cases of upper level peer caches
342 // generating CleanEvict and Writeback or simply CleanEvict and
343 // CleanEvict almost simultaneously will be caught by snoops sent out

--- 311 unchanged lines hidden (view full) ---

655 // @todo This should really enqueue the packet rather
656 bool M5_VAR_USED success = memSidePort->sendTimingReq(pkt);
657 assert(success);
658 return true;
659 }
660
661 promoteWholeLineWrites(pkt);
662
663 // Cache maintenance operations have to visit all the caches down
664 // to the specified xbar (PoC, PoU, etc.). Even if a cache above
665 // is responding we forward the packet to the memory below rather
666 // than creating an express snoop.
653 if (pkt->cacheResponding()) {
654 // a cache above us (but not where the packet came from) is
655 // responding to the request, in other words it has the line
656 // in Modified or Owned state
657 DPRINTF(Cache, "Cache above responding to %s: not responding\n",
658 pkt->print());
659
660 // if the packet needs the block to be writable, and the cache

--- 97 unchanged lines hidden (view full) ---

758 // hit (for all other request types)
759
760 if (prefetcher && (prefetchOnAccess ||
761 (blk && blk->wasPrefetched()))) {
762 if (blk)
763 blk->status &= ~BlkHWPrefetched;
764
765 // Don't notify on SWPrefetch
667 if (pkt->cacheResponding()) {
668 // a cache above us (but not where the packet came from) is
669 // responding to the request, in other words it has the line
670 // in Modified or Owned state
671 DPRINTF(Cache, "Cache above responding to %s: not responding\n",
672 pkt->print());
673
674 // if the packet needs the block to be writable, and the cache

--- 97 unchanged lines hidden (view full) ---

772 // hit (for all other request types)
773
774 if (prefetcher && (prefetchOnAccess ||
775 (blk && blk->wasPrefetched()))) {
776 if (blk)
777 blk->status &= ~BlkHWPrefetched;
778
779 // Don't notify on SWPrefetch
766 if (!pkt->cmd.isSWPrefetch())
780 if (!pkt->cmd.isSWPrefetch()) {
781 assert(!pkt->req->isCacheMaintenance());
767 next_pf_time = prefetcher->notify(pkt);
782 next_pf_time = prefetcher->notify(pkt);
783 }
768 }
769
770 if (needsResponse) {
771 pkt->makeTimingResponse();
772 // @todo: Make someone pay for this
773 pkt->headerDelay = pkt->payloadDelay = 0;
774
775 // In this case we are considering request_time that takes

--- 74 unchanged lines hidden (view full) ---

850
851 // Coalesce unless it was a software prefetch (see above).
852 if (pkt) {
853 assert(!pkt->isWriteback());
854 // CleanEvicts corresponding to blocks which have
855 // outstanding requests in MSHRs are simply sunk here
856 if (pkt->cmd == MemCmd::CleanEvict) {
857 pendingDelete.reset(pkt);
784 }
785
786 if (needsResponse) {
787 pkt->makeTimingResponse();
788 // @todo: Make someone pay for this
789 pkt->headerDelay = pkt->payloadDelay = 0;
790
791 // In this case we are considering request_time that takes

--- 74 unchanged lines hidden (view full) ---

866
867 // Coalesce unless it was a software prefetch (see above).
868 if (pkt) {
869 assert(!pkt->isWriteback());
870 // CleanEvicts corresponding to blocks which have
871 // outstanding requests in MSHRs are simply sunk here
872 if (pkt->cmd == MemCmd::CleanEvict) {
873 pendingDelete.reset(pkt);
874 } else if (pkt->cmd == MemCmd::WriteClean) {
875 // A WriteClean should never coalesce with any
876 // outstanding cache maintenance requests.
877
878 // We use forward_time here because there is an
879 // uncached memory write, forwarded to WriteBuffer.
880 allocateWriteBuffer(pkt, forward_time);
858 } else {
859 DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__,
860 pkt->print());
861
862 assert(pkt->req->masterId() < system->maxMasters());
863 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
864 // We use forward_time here because it is the same
865 // considering new targets. We have multiple

--- 17 unchanged lines hidden (view full) ---

883 // satisfied or not, reguardless if the request is in the MSHR
884 // or not. The request could be a ReadReq hit, but still not
885 // satisfied (potentially because of a prior write to the same
886 // cache line. So, even when not satisfied, tehre is an MSHR
887 // already allocated for this, we need to let the prefetcher
888 // know about the request
889 if (prefetcher) {
890 // Don't notify on SWPrefetch
881 } else {
882 DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__,
883 pkt->print());
884
885 assert(pkt->req->masterId() < system->maxMasters());
886 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
887 // We use forward_time here because it is the same
888 // considering new targets. We have multiple

--- 17 unchanged lines hidden (view full) ---

906 // satisfied or not, reguardless if the request is in the MSHR
907 // or not. The request could be a ReadReq hit, but still not
908 // satisfied (potentially because of a prior write to the same
909 // cache line. So, even when not satisfied, tehre is an MSHR
910 // already allocated for this, we need to let the prefetcher
911 // know about the request
912 if (prefetcher) {
913 // Don't notify on SWPrefetch
891 if (!pkt->cmd.isSWPrefetch())
914 if (!pkt->cmd.isSWPrefetch() &&
915 !pkt->req->isCacheMaintenance())
892 next_pf_time = prefetcher->notify(pkt);
893 }
894 }
895 } else {
896 // no MSHR
897 assert(pkt->req->masterId() < system->maxMasters());
898 if (pkt->req->isUncacheable()) {
899 mshr_uncacheable[pkt->cmdToIndex()][pkt->req->masterId()]++;

--- 21 unchanged lines hidden (view full) ---

921 // that yet. Note that we do need to leave the
922 // block valid so that it stays in the cache, in
923 // case we get an upgrade response (and hence no
924 // new data) when the write miss completes.
925 // As long as CPUs do proper store/load forwarding
926 // internally, and have a sufficiently weak memory
927 // model, this is probably unnecessary, but at some
928 // point it must have seemed like we needed it...
916 next_pf_time = prefetcher->notify(pkt);
917 }
918 }
919 } else {
920 // no MSHR
921 assert(pkt->req->masterId() < system->maxMasters());
922 if (pkt->req->isUncacheable()) {
923 mshr_uncacheable[pkt->cmdToIndex()][pkt->req->masterId()]++;

--- 21 unchanged lines hidden (view full) ---

945 // that yet. Note that we do need to leave the
946 // block valid so that it stays in the cache, in
947 // case we get an upgrade response (and hence no
948 // new data) when the write miss completes.
949 // As long as CPUs do proper store/load forwarding
950 // internally, and have a sufficiently weak memory
951 // model, this is probably unnecessary, but at some
952 // point it must have seemed like we needed it...
929 assert(pkt->needsWritable());
930 assert(!blk->isWritable());
953 assert((pkt->needsWritable() && !blk->isWritable()) ||
954 pkt->req->isCacheMaintenance());
931 blk->status &= ~BlkReadable;
932 }
933 // Here we are using forward_time, modelling the latency of
934 // a miss (outbound) just as forwardLatency, neglecting the
935 // lookupLatency component.
936 allocateMissBuffer(pkt, forward_time);
937 }
938
939 if (prefetcher) {
940 // Don't notify on SWPrefetch
955 blk->status &= ~BlkReadable;
956 }
957 // Here we are using forward_time, modelling the latency of
958 // a miss (outbound) just as forwardLatency, neglecting the
959 // lookupLatency component.
960 allocateMissBuffer(pkt, forward_time);
961 }
962
963 if (prefetcher) {
964 // Don't notify on SWPrefetch
941 if (!pkt->cmd.isSWPrefetch())
965 if (!pkt->cmd.isSWPrefetch() &&
966 !pkt->req->isCacheMaintenance())
942 next_pf_time = prefetcher->notify(pkt);
943 }
944 }
945 }
946
947 if (next_pf_time != MaxTick)
948 schedMemSideSendEvent(next_pf_time);
949

--- 6 unchanged lines hidden (view full) ---

956{
957 // should never see evictions here
958 assert(!cpu_pkt->isEviction());
959
960 bool blkValid = blk && blk->isValid();
961
962 if (cpu_pkt->req->isUncacheable() ||
963 (!blkValid && cpu_pkt->isUpgrade()) ||
967 next_pf_time = prefetcher->notify(pkt);
968 }
969 }
970 }
971
972 if (next_pf_time != MaxTick)
973 schedMemSideSendEvent(next_pf_time);
974

--- 6 unchanged lines hidden (view full) ---

981{
982 // should never see evictions here
983 assert(!cpu_pkt->isEviction());
984
985 bool blkValid = blk && blk->isValid();
986
987 if (cpu_pkt->req->isUncacheable() ||
988 (!blkValid && cpu_pkt->isUpgrade()) ||
964 cpu_pkt->cmd == MemCmd::InvalidateReq) {
989 cpu_pkt->cmd == MemCmd::InvalidateReq || cpu_pkt->isClean()) {
965 // uncacheable requests and upgrades from upper-level caches
966 // that missed completely just go through as is
967 return nullptr;
968 }
969
970 assert(cpu_pkt->needsResponse());
971
972 MemCmd cmd;

--- 60 unchanged lines hidden (view full) ---

1033 // Forward the request if the system is in cache bypass mode.
1034 if (system->bypassCaches())
1035 return ticksToCycles(memSidePort->sendAtomic(pkt));
1036
1037 promoteWholeLineWrites(pkt);
1038
1039 // follow the same flow as in recvTimingReq, and check if a cache
1040 // above us is responding
990 // uncacheable requests and upgrades from upper-level caches
991 // that missed completely just go through as is
992 return nullptr;
993 }
994
995 assert(cpu_pkt->needsResponse());
996
997 MemCmd cmd;

--- 60 unchanged lines hidden (view full) ---

1058 // Forward the request if the system is in cache bypass mode.
1059 if (system->bypassCaches())
1060 return ticksToCycles(memSidePort->sendAtomic(pkt));
1061
1062 promoteWholeLineWrites(pkt);
1063
1064 // follow the same flow as in recvTimingReq, and check if a cache
1065 // above us is responding
1041 if (pkt->cacheResponding()) {
1066 if (pkt->cacheResponding() && !pkt->isClean()) {
1067 assert(!pkt->req->isCacheInvalidate());
1042 DPRINTF(Cache, "Cache above responding to %s: not responding\n",
1043 pkt->print());
1044
1045 // if a cache is responding, and it had the line in Owned
1046 // rather than Modified state, we need to invalidate any
1047 // copies that are not on the same path to memory
1048 assert(pkt->needsWritable() && !pkt->responderHadWritable());
1049 lat += ticksToCycles(memSidePort->sendAtomic(pkt));

--- 4 unchanged lines hidden (view full) ---

1054 // should assert here that there are no outstanding MSHRs or
1055 // writebacks... that would mean that someone used an atomic
1056 // access in timing mode
1057
1058 CacheBlk *blk = nullptr;
1059 PacketList writebacks;
1060 bool satisfied = access(pkt, blk, lat, writebacks);
1061
1068 DPRINTF(Cache, "Cache above responding to %s: not responding\n",
1069 pkt->print());
1070
1071 // if a cache is responding, and it had the line in Owned
1072 // rather than Modified state, we need to invalidate any
1073 // copies that are not on the same path to memory
1074 assert(pkt->needsWritable() && !pkt->responderHadWritable());
1075 lat += ticksToCycles(memSidePort->sendAtomic(pkt));

--- 4 unchanged lines hidden (view full) ---

1080 // should assert here that there are no outstanding MSHRs or
1081 // writebacks... that would mean that someone used an atomic
1082 // access in timing mode
1083
1084 CacheBlk *blk = nullptr;
1085 PacketList writebacks;
1086 bool satisfied = access(pkt, blk, lat, writebacks);
1087
1088 if (pkt->isClean() && blk && blk->isDirty()) {
1089 // A cache clean opearation is looking for a dirty
1090 // block. If a dirty block is encountered a WriteClean
1091 // will update any copies to the path to the memory
1092 // until the point of reference.
1093 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n",
1094 __func__, pkt->print(), blk->print());
1095 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest());
1096 writebacks.push_back(wb_pkt);
1097 pkt->setSatisfied();
1098 }
1099
1062 // handle writebacks resulting from the access here to ensure they
1063 // logically proceed anything happening below
1064 doWritebacksAtomic(writebacks);
1065
1066 if (!satisfied) {
1067 // MISS
1068
1069 // deal with the packets that go through the write path of

--- 269 unchanged lines hidden (view full) ---

1339 bool is_fill = !mshr->isForward &&
1340 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp);
1341
1342 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
1343 const bool valid_blk = blk && blk->isValid();
1344 // If the response indicates that there are no sharers and we
1345 // either had the block already or the response is filling we can
1346 // promote our copy to writable
1100 // handle writebacks resulting from the access here to ensure they
1101 // logically proceed anything happening below
1102 doWritebacksAtomic(writebacks);
1103
1104 if (!satisfied) {
1105 // MISS
1106
1107 // deal with the packets that go through the write path of

--- 269 unchanged lines hidden (view full) ---

1377 bool is_fill = !mshr->isForward &&
1378 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp);
1379
1380 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
1381 const bool valid_blk = blk && blk->isValid();
1382 // If the response indicates that there are no sharers and we
1383 // either had the block already or the response is filling we can
1384 // promote our copy to writable
1347 if (!pkt->hasSharers() && (is_fill || valid_blk)) {
1385 if (!pkt->hasSharers() &&
1386 (is_fill || (valid_blk && !pkt->req->isCacheInvalidate()))) {
1348 mshr->promoteWritable();
1349 }
1350
1351 if (is_fill && !is_error) {
1352 DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n",
1353 pkt->getAddr());
1354
1355 blk = handleFill(pkt, blk, writebacks, mshr->allocOnFill());
1356 assert(blk != nullptr);
1357 }
1358
1359 // allow invalidation responses originating from write-line
1360 // requests to be discarded
1361 bool is_invalidate = pkt->isInvalidate();
1362
1387 mshr->promoteWritable();
1388 }
1389
1390 if (is_fill && !is_error) {
1391 DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n",
1392 pkt->getAddr());
1393
1394 blk = handleFill(pkt, blk, writebacks, mshr->allocOnFill());
1395 assert(blk != nullptr);
1396 }
1397
1398 // allow invalidation responses originating from write-line
1399 // requests to be discarded
1400 bool is_invalidate = pkt->isInvalidate();
1401
1402 // The block was marked as not readable while there was a pending
1403 // cache maintenance operation, restore its flag.
1404 if (pkt->isClean() && !is_invalidate && valid_blk) {
1405 blk->status |= BlkReadable;
1406 }
1407
1363 // First offset for critical word first calculations
1364 int initial_offset = initial_tgt->pkt->getOffset(blkSize);
1365
1366 bool from_cache = false;
1367 MSHR::TargetList targets = mshr->extractServiceableTargets(pkt);
1368 for (auto &target: targets) {
1369 Packet *tgt_pkt = target.pkt;
1370 switch (target.source) {

--- 126 unchanged lines hidden (view full) ---

1497
1498 case MSHR::Target::FromSnoop:
1499 // I don't believe that a snoop can be in an error state
1500 assert(!is_error);
1501 // response to snoop request
1502 DPRINTF(Cache, "processing deferred snoop...\n");
1503 // If the response is invalidating, a snooping target can
1504 // be satisfied if it is also invalidating. If the reponse is, not
1408 // First offset for critical word first calculations
1409 int initial_offset = initial_tgt->pkt->getOffset(blkSize);
1410
1411 bool from_cache = false;
1412 MSHR::TargetList targets = mshr->extractServiceableTargets(pkt);
1413 for (auto &target: targets) {
1414 Packet *tgt_pkt = target.pkt;
1415 switch (target.source) {

--- 126 unchanged lines hidden (view full) ---

1542
1543 case MSHR::Target::FromSnoop:
1544 // I don't believe that a snoop can be in an error state
1545 assert(!is_error);
1546 // response to snoop request
1547 DPRINTF(Cache, "processing deferred snoop...\n");
1548 // If the response is invalidating, a snooping target can
1549 // be satisfied if it is also invalidating. If the reponse is, not
1505 // only invalidating, but more specifically an InvalidateResp, the
1506 // MSHR was created due to an InvalidateReq and a cache above is
1507 // waiting to satisfy a WriteLineReq. In this case even an
1550 // only invalidating, but more specifically an InvalidateResp and
1551 // the MSHR was created due to an InvalidateReq then a cache above
1552 // is waiting to satisfy a WriteLineReq. In this case even an
1508 // non-invalidating snoop is added as a target here since this is
1509 // the ordering point. When the InvalidateResp reaches this cache,
1510 // the snooping target will snoop further the cache above with the
1511 // WriteLineReq.
1553 // non-invalidating snoop is added as a target here since this is
1554 // the ordering point. When the InvalidateResp reaches this cache,
1555 // the snooping target will snoop further the cache above with the
1556 // WriteLineReq.
1512 assert(!(is_invalidate &&
1513 pkt->cmd != MemCmd::InvalidateResp &&
1514 !mshr->hasPostInvalidate()));
1557 assert(!is_invalidate || pkt->cmd == MemCmd::InvalidateResp ||
1558 pkt->req->isCacheMaintenance() ||
1559 mshr->hasPostInvalidate());
1515 handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate());
1516 break;
1517
1518 default:
1519 panic("Illegal target->source enum %d\n", target.source);
1520 }
1521 }
1522

--- 499 unchanged lines hidden (view full) ---

2022 pkt->setHasSharers();
2023 }
2024 // If this request is a prefetch or clean evict and an upper level
2025 // signals block present, make sure to propagate the block
2026 // presence to the requester.
2027 if (snoopPkt.isBlockCached()) {
2028 pkt->setBlockCached();
2029 }
1560 handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate());
1561 break;
1562
1563 default:
1564 panic("Illegal target->source enum %d\n", target.source);
1565 }
1566 }
1567

--- 499 unchanged lines hidden (view full) ---

2067 pkt->setHasSharers();
2068 }
2069 // If this request is a prefetch or clean evict and an upper level
2070 // signals block present, make sure to propagate the block
2071 // presence to the requester.
2072 if (snoopPkt.isBlockCached()) {
2073 pkt->setBlockCached();
2074 }
2075 // If the request was satisfied by snooping the cache
2076 // above, mark the original packet as satisfied too.
2077 if (snoopPkt.satisfied()) {
2078 pkt->setSatisfied();
2079 }
2030 } else {
2031 cpuSidePort->sendAtomicSnoop(pkt);
2032 if (!alreadyResponded && pkt->cacheResponding()) {
2033 // cache-to-cache response from some upper cache:
2034 // forward response to original requester
2035 assert(pkt->isResponse());
2036 }
2037 }
2038 }
2039
2080 } else {
2081 cpuSidePort->sendAtomicSnoop(pkt);
2082 if (!alreadyResponded && pkt->cacheResponding()) {
2083 // cache-to-cache response from some upper cache:
2084 // forward response to original requester
2085 assert(pkt->isResponse());
2086 }
2087 }
2088 }
2089
2040 if (!blk || !blk->isValid()) {
2090 bool respond = false;
2091 bool blk_valid = blk && blk->isValid();
2092 if (pkt->isClean()) {
2093 if (blk_valid && blk->isDirty()) {
2094 DPRINTF(CacheVerbose, "%s: packet (snoop) %s found block: %s\n",
2095 __func__, pkt->print(), blk->print());
2096 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest());
2097 PacketList writebacks;
2098 writebacks.push_back(wb_pkt);
2099
2100 if (is_timing) {
2101 // anything that is merely forwarded pays for the forward
2102 // latency and the delay provided by the crossbar
2103 Tick forward_time = clockEdge(forwardLatency) +
2104 pkt->headerDelay;
2105 doWritebacks(writebacks, forward_time);
2106 } else {
2107 doWritebacksAtomic(writebacks);
2108 }
2109 pkt->setSatisfied();
2110 }
2111 } else if (!blk_valid) {
2041 DPRINTF(CacheVerbose, "%s: snoop miss for %s\n", __func__,
2042 pkt->print());
2043 if (is_deferred) {
2044 // we no longer have the block, and will not respond, but a
2045 // packet was allocated in MSHR::handleSnoop and we have
2046 // to delete it
2047 assert(pkt->needsResponse());
2048
2049 // we have passed the block to a cache upstream, that
2050 // cache should be responding
2051 assert(pkt->cacheResponding());
2052
2053 delete pkt;
2054 }
2055 return snoop_delay;
2056 } else {
2057 DPRINTF(Cache, "%s: snoop hit for %s, old state is %s\n", __func__,
2058 pkt->print(), blk->print());
2112 DPRINTF(CacheVerbose, "%s: snoop miss for %s\n", __func__,
2113 pkt->print());
2114 if (is_deferred) {
2115 // we no longer have the block, and will not respond, but a
2116 // packet was allocated in MSHR::handleSnoop and we have
2117 // to delete it
2118 assert(pkt->needsResponse());
2119
2120 // we have passed the block to a cache upstream, that
2121 // cache should be responding
2122 assert(pkt->cacheResponding());
2123
2124 delete pkt;
2125 }
2126 return snoop_delay;
2127 } else {
2128 DPRINTF(Cache, "%s: snoop hit for %s, old state is %s\n", __func__,
2129 pkt->print(), blk->print());
2059 }
2060
2130
2061 chatty_assert(!(isReadOnly && blk->isDirty()),
2062 "Should never have a dirty block in a read-only cache %s\n",
2063 name());
2131 // We may end up modifying both the block state and the packet (if
2132 // we respond in atomic mode), so just figure out what to do now
2133 // and then do it later. We respond to all snoops that need
2134 // responses provided we have the block in dirty state. The
2135 // invalidation itself is taken care of below. We don't respond to
2136 // cache maintenance operations as this is done by the destination
2137 // xbar.
2138 respond = blk->isDirty() && pkt->needsResponse();
2064
2139
2065 // We may end up modifying both the block state and the packet (if
2066 // we respond in atomic mode), so just figure out what to do now
2067 // and then do it later. We respond to all snoops that need
2068 // responses provided we have the block in dirty state. The
2069 // invalidation itself is taken care of below.
2070 bool respond = blk->isDirty() && pkt->needsResponse();
2071 bool have_writable = blk->isWritable();
2140 chatty_assert(!(isReadOnly && blk->isDirty()), "Should never have "
2141 "a dirty block in a read-only cache %s\n", name());
2142 }
2072
2073 // Invalidate any prefetch's from below that would strip write permissions
2074 // MemCmd::HardPFReq is only observed by upstream caches. After missing
2075 // above and in it's own cache, a new MemCmd::ReadReq is created that
2076 // downstream caches observe.
2077 if (pkt->mustCheckAbove()) {
2078 DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s "
2079 "from lower cache\n", pkt->getAddr(), pkt->print());

--- 8 unchanged lines hidden (view full) ---

2088
2089 // if the requesting packet is uncacheable, retain the line in
2090 // the current state, otherwhise unset the writable flag,
2091 // which means we go from Modified to Owned (and will respond
2092 // below), remain in Owned (and will respond below), from
2093 // Exclusive to Shared, or remain in Shared
2094 if (!pkt->req->isUncacheable())
2095 blk->status &= ~BlkWritable;
2143
2144 // Invalidate any prefetch's from below that would strip write permissions
2145 // MemCmd::HardPFReq is only observed by upstream caches. After missing
2146 // above and in it's own cache, a new MemCmd::ReadReq is created that
2147 // downstream caches observe.
2148 if (pkt->mustCheckAbove()) {
2149 DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s "
2150 "from lower cache\n", pkt->getAddr(), pkt->print());

--- 8 unchanged lines hidden (view full) ---

2159
2160 // if the requesting packet is uncacheable, retain the line in
2161 // the current state, otherwhise unset the writable flag,
2162 // which means we go from Modified to Owned (and will respond
2163 // below), remain in Owned (and will respond below), from
2164 // Exclusive to Shared, or remain in Shared
2165 if (!pkt->req->isUncacheable())
2166 blk->status &= ~BlkWritable;
2167 DPRINTF(Cache, "new state is %s\n", blk->print());
2096 }
2097
2098 if (respond) {
2099 // prevent anyone else from responding, cache as well as
2100 // memory, and also prevent any memory from even seeing the
2101 // request
2102 pkt->setCacheResponding();
2168 }
2169
2170 if (respond) {
2171 // prevent anyone else from responding, cache as well as
2172 // memory, and also prevent any memory from even seeing the
2173 // request
2174 pkt->setCacheResponding();
2103 if (have_writable) {
2175 if (!pkt->isClean() && blk->isWritable()) {
2104 // inform the cache hierarchy that this cache had the line
2105 // in the Modified state so that we avoid unnecessary
2106 // invalidations (see Packet::setResponderHadWritable)
2107 pkt->setResponderHadWritable();
2108
2109 // in the case of an uncacheable request there is no point
2110 // in setting the responderHadWritable flag, but since the
2111 // recipient does not care there is no harm in doing so

--- 32 unchanged lines hidden (view full) ---

2144 delete pkt->req;
2145 }
2146
2147 delete pkt;
2148 }
2149
2150 // Do this last in case it deallocates block data or something
2151 // like that
2176 // inform the cache hierarchy that this cache had the line
2177 // in the Modified state so that we avoid unnecessary
2178 // invalidations (see Packet::setResponderHadWritable)
2179 pkt->setResponderHadWritable();
2180
2181 // in the case of an uncacheable request there is no point
2182 // in setting the responderHadWritable flag, but since the
2183 // recipient does not care there is no harm in doing so

--- 32 unchanged lines hidden (view full) ---

2216 delete pkt->req;
2217 }
2218
2219 delete pkt;
2220 }
2221
2222 // Do this last in case it deallocates block data or something
2223 // like that
2152 if (invalidate) {
2224 if (blk_valid && invalidate) {
2153 invalidateBlock(blk);
2225 invalidateBlock(blk);
2226 DPRINTF(Cache, "new state is %s\n", blk->print());
2154 }
2155
2227 }
2228
2156 DPRINTF(Cache, "new state is %s\n", blk->print());
2157
2158 return snoop_delay;
2159}
2160
2161
2162void
2163Cache::recvTimingSnoopReq(PacketPtr pkt)
2164{
2165 DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print());

--- 24 unchanged lines hidden (view full) ---

2190 // MSHR hit, set setBlockCached.
2191 if (mshr && pkt->mustCheckAbove()) {
2192 DPRINTF(Cache, "Setting block cached for %s from lower cache on "
2193 "mshr hit\n", pkt->print());
2194 pkt->setBlockCached();
2195 return;
2196 }
2197
2229 return snoop_delay;
2230}
2231
2232
2233void
2234Cache::recvTimingSnoopReq(PacketPtr pkt)
2235{
2236 DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print());

--- 24 unchanged lines hidden (view full) ---

2261 // MSHR hit, set setBlockCached.
2262 if (mshr && pkt->mustCheckAbove()) {
2263 DPRINTF(Cache, "Setting block cached for %s from lower cache on "
2264 "mshr hit\n", pkt->print());
2265 pkt->setBlockCached();
2266 return;
2267 }
2268
2269 // Bypass any existing cache maintenance requests if the request
2270 // has been satisfied already (i.e., the dirty block has been
2271 // found).
2272 if (mshr && pkt->req->isCacheMaintenance() && pkt->satisfied()) {
2273 return;
2274 }
2275
2198 // Let the MSHR itself track the snoop and decide whether we want
2199 // to go ahead and do the regular cache snoop
2200 if (mshr && mshr->handleSnoop(pkt, order++)) {
2201 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)."
2202 "mshrs: %s\n", blk_addr, is_secure ? "s" : "ns",
2203 mshr->print());
2204
2205 if (mshr->getNumTargets() > numTarget)

--- 48 unchanged lines hidden (view full) ---

2254 if (have_writable) {
2255 pkt->setResponderHadWritable();
2256 }
2257
2258 doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(),
2259 false, false);
2260 }
2261
2276 // Let the MSHR itself track the snoop and decide whether we want
2277 // to go ahead and do the regular cache snoop
2278 if (mshr && mshr->handleSnoop(pkt, order++)) {
2279 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)."
2280 "mshrs: %s\n", blk_addr, is_secure ? "s" : "ns",
2281 mshr->print());
2282
2283 if (mshr->getNumTargets() > numTarget)

--- 48 unchanged lines hidden (view full) ---

2332 if (have_writable) {
2333 pkt->setResponderHadWritable();
2334 }
2335
2336 doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(),
2337 false, false);
2338 }
2339
2262 if (invalidate) {
2340 if (invalidate && wb_pkt->cmd != MemCmd::WriteClean) {
2263 // Invalidation trumps our writeback... discard here
2264 // Note: markInService will remove entry from writeback buffer.
2265 markInService(wb_entry);
2266 delete wb_pkt;
2267 }
2268 }
2269
2270 // If this was a shared writeback, there may still be

--- 252 unchanged lines hidden (view full) ---

2523 pkt = new Packet(tgt_pkt, false, true);
2524 assert(!pkt->isWrite());
2525 }
2526
2527 // play it safe and append (rather than set) the sender state,
2528 // as forwarded packets may already have existing state
2529 pkt->pushSenderState(mshr);
2530
2341 // Invalidation trumps our writeback... discard here
2342 // Note: markInService will remove entry from writeback buffer.
2343 markInService(wb_entry);
2344 delete wb_pkt;
2345 }
2346 }
2347
2348 // If this was a shared writeback, there may still be

--- 252 unchanged lines hidden (view full) ---

2601 pkt = new Packet(tgt_pkt, false, true);
2602 assert(!pkt->isWrite());
2603 }
2604
2605 // play it safe and append (rather than set) the sender state,
2606 // as forwarded packets may already have existing state
2607 pkt->pushSenderState(mshr);
2608
2609 if (pkt->isClean() && blk && blk->isDirty()) {
2610 // A cache clean opearation is looking for a dirty block. Mark
2611 // the packet so that the destination xbar can determine that
2612 // there will be a follow-up write packet as well.
2613 pkt->setSatisfied();
2614 }
2615
2531 if (!memSidePort->sendTimingReq(pkt)) {
2532 // we are awaiting a retry, but we
2533 // delete the packet and will be creating a new packet
2534 // when we get the opportunity
2535 delete pkt;
2536
2537 // note that we have now masked any requestBus and
2538 // schedSendEvent (we will wait for a retry before

--- 7 unchanged lines hidden (view full) ---

2546 // above them) as a snoop. Thus at this point we know if
2547 // any of the neighbouring caches are responding, and if
2548 // so, we know it is dirty, and we can determine if it is
2549 // being passed as Modified, making our MSHR the ordering
2550 // point
2551 bool pending_modified_resp = !pkt->hasSharers() &&
2552 pkt->cacheResponding();
2553 markInService(mshr, pending_modified_resp);
2616 if (!memSidePort->sendTimingReq(pkt)) {
2617 // we are awaiting a retry, but we
2618 // delete the packet and will be creating a new packet
2619 // when we get the opportunity
2620 delete pkt;
2621
2622 // note that we have now masked any requestBus and
2623 // schedSendEvent (we will wait for a retry before

--- 7 unchanged lines hidden (view full) ---

2631 // above them) as a snoop. Thus at this point we know if
2632 // any of the neighbouring caches are responding, and if
2633 // so, we know it is dirty, and we can determine if it is
2634 // being passed as Modified, making our MSHR the ordering
2635 // point
2636 bool pending_modified_resp = !pkt->hasSharers() &&
2637 pkt->cacheResponding();
2638 markInService(mshr, pending_modified_resp);
2639 if (pkt->isClean() && blk && blk->isDirty()) {
2640 // A cache clean opearation is looking for a dirty
2641 // block. If a dirty block is encountered a WriteClean
2642 // will update any copies to the path to the memory
2643 // until the point of reference.
2644 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n",
2645 __func__, pkt->print(), blk->print());
2646 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest());
2647 PacketList writebacks;
2648 writebacks.push_back(wb_pkt);
2649 doWritebacks(writebacks, 0);
2650 }
2651
2554 return false;
2555 }
2556}
2557
2558bool
2559Cache::sendWriteQueuePacket(WriteQueueEntry* wq_entry)
2560{
2561 assert(wq_entry);

--- 203 unchanged lines hidden ---
2652 return false;
2653 }
2654}
2655
2656bool
2657Cache::sendWriteQueuePacket(WriteQueueEntry* wq_entry)
2658{
2659 assert(wq_entry);

--- 203 unchanged lines hidden ---