1/*
2 * Copyright (c) 2010-2017 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software

--- 30 unchanged lines hidden (view full) ---

39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 * Dave Greene
43 * Nathan Binkert
44 * Steve Reinhardt
45 * Ron Dreslinski
46 * Andreas Sandberg
47 * Nikos Nikoleris
48 */
49
50/**
51 * @file
52 * Cache definitions.
53 */
54
55#include "mem/cache/cache.hh"

--- 262 unchanged lines hidden (view full) ---

318
319 // Here lat is the value passed as parameter to accessBlock() function
320 // that can modify its value.
321 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), lat);
322
323 DPRINTF(Cache, "%s %s\n", pkt->print(),
324 blk ? "hit " + blk->print() : "miss");
325
326 if (pkt->req->isCacheMaintenance()) {
327 // A cache maintenance operation is always forwarded to the
328 // memory below even if the block is found in dirty state.
329
330 // We defer any changes to the state of the block until we
331 // create and mark as in service the mshr for the downstream
332 // packet.
333 return false;
334 }
335
336 if (pkt->isEviction()) {
337 // We check for presence of block in above caches before issuing
338 // Writeback or CleanEvict to write buffer. Therefore the only
339 // possible cases can be of a CleanEvict packet coming from above
340 // encountering a Writeback generated in this cache peer cache and
341 // waiting in the write buffer. Cases of upper level peer caches
342 // generating CleanEvict and Writeback or simply CleanEvict and
343 // CleanEvict almost simultaneously will be caught by snoops sent out

--- 311 unchanged lines hidden (view full) ---

655 // @todo This should really enqueue the packet rather
656 bool M5_VAR_USED success = memSidePort->sendTimingReq(pkt);
657 assert(success);
658 return true;
659 }
660
661 promoteWholeLineWrites(pkt);
662
663 // Cache maintenance operations have to visit all the caches down
664 // to the specified xbar (PoC, PoU, etc.). Even if a cache above
665 // is responding we forward the packet to the memory below rather
666 // than creating an express snoop.
667 if (pkt->cacheResponding()) {
668 // a cache above us (but not where the packet came from) is
669 // responding to the request, in other words it has the line
670 // in Modified or Owned state
671 DPRINTF(Cache, "Cache above responding to %s: not responding\n",
672 pkt->print());
673
674 // if the packet needs the block to be writable, and the cache

--- 97 unchanged lines hidden (view full) ---

772 // hit (for all other request types)
773
774 if (prefetcher && (prefetchOnAccess ||
775 (blk && blk->wasPrefetched()))) {
776 if (blk)
777 blk->status &= ~BlkHWPrefetched;
778
779 // Don't notify on SWPrefetch
766 if (!pkt->cmd.isSWPrefetch())
780 if (!pkt->cmd.isSWPrefetch()) {
781 assert(!pkt->req->isCacheMaintenance());
782 next_pf_time = prefetcher->notify(pkt);
783 }
784 }
785
786 if (needsResponse) {
787 pkt->makeTimingResponse();
788 // @todo: Make someone pay for this
789 pkt->headerDelay = pkt->payloadDelay = 0;
790
791 // In this case we are considering request_time that takes

--- 74 unchanged lines hidden (view full) ---

866
867 // Coalesce unless it was a software prefetch (see above).
868 if (pkt) {
869 assert(!pkt->isWriteback());
870 // CleanEvicts corresponding to blocks which have
871 // outstanding requests in MSHRs are simply sunk here
872 if (pkt->cmd == MemCmd::CleanEvict) {
873 pendingDelete.reset(pkt);
874 } else if (pkt->cmd == MemCmd::WriteClean) {
875 // A WriteClean should never coalesce with any
876 // outstanding cache maintenance requests.
877
878 // We use forward_time here because there is an
879 // uncached memory write, forwarded to WriteBuffer.
880 allocateWriteBuffer(pkt, forward_time);
881 } else {
882 DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__,
883 pkt->print());
884
885 assert(pkt->req->masterId() < system->maxMasters());
886 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
887 // We use forward_time here because it is the same
888 // considering new targets. We have multiple

--- 17 unchanged lines hidden (view full) ---

906 // satisfied or not, reguardless if the request is in the MSHR
907 // or not. The request could be a ReadReq hit, but still not
908 // satisfied (potentially because of a prior write to the same
909 // cache line. So, even when not satisfied, tehre is an MSHR
910 // already allocated for this, we need to let the prefetcher
911 // know about the request
912 if (prefetcher) {
913 // Don't notify on SWPrefetch
891 if (!pkt->cmd.isSWPrefetch())
914 if (!pkt->cmd.isSWPrefetch() &&
915 !pkt->req->isCacheMaintenance())
916 next_pf_time = prefetcher->notify(pkt);
917 }
918 }
919 } else {
920 // no MSHR
921 assert(pkt->req->masterId() < system->maxMasters());
922 if (pkt->req->isUncacheable()) {
923 mshr_uncacheable[pkt->cmdToIndex()][pkt->req->masterId()]++;

--- 21 unchanged lines hidden (view full) ---

945 // that yet. Note that we do need to leave the
946 // block valid so that it stays in the cache, in
947 // case we get an upgrade response (and hence no
948 // new data) when the write miss completes.
949 // As long as CPUs do proper store/load forwarding
950 // internally, and have a sufficiently weak memory
951 // model, this is probably unnecessary, but at some
952 // point it must have seemed like we needed it...
929 assert(pkt->needsWritable());
930 assert(!blk->isWritable());
953 assert((pkt->needsWritable() && !blk->isWritable()) ||
954 pkt->req->isCacheMaintenance());
955 blk->status &= ~BlkReadable;
956 }
957 // Here we are using forward_time, modelling the latency of
958 // a miss (outbound) just as forwardLatency, neglecting the
959 // lookupLatency component.
960 allocateMissBuffer(pkt, forward_time);
961 }
962
963 if (prefetcher) {
964 // Don't notify on SWPrefetch
941 if (!pkt->cmd.isSWPrefetch())
965 if (!pkt->cmd.isSWPrefetch() &&
966 !pkt->req->isCacheMaintenance())
967 next_pf_time = prefetcher->notify(pkt);
968 }
969 }
970 }
971
972 if (next_pf_time != MaxTick)
973 schedMemSideSendEvent(next_pf_time);
974

--- 6 unchanged lines hidden (view full) ---

981{
982 // should never see evictions here
983 assert(!cpu_pkt->isEviction());
984
985 bool blkValid = blk && blk->isValid();
986
987 if (cpu_pkt->req->isUncacheable() ||
988 (!blkValid && cpu_pkt->isUpgrade()) ||
964 cpu_pkt->cmd == MemCmd::InvalidateReq) {
989 cpu_pkt->cmd == MemCmd::InvalidateReq || cpu_pkt->isClean()) {
990 // uncacheable requests and upgrades from upper-level caches
991 // that missed completely just go through as is
992 return nullptr;
993 }
994
995 assert(cpu_pkt->needsResponse());
996
997 MemCmd cmd;

--- 60 unchanged lines hidden (view full) ---

1058 // Forward the request if the system is in cache bypass mode.
1059 if (system->bypassCaches())
1060 return ticksToCycles(memSidePort->sendAtomic(pkt));
1061
1062 promoteWholeLineWrites(pkt);
1063
1064 // follow the same flow as in recvTimingReq, and check if a cache
1065 // above us is responding
1041 if (pkt->cacheResponding()) {
1066 if (pkt->cacheResponding() && !pkt->isClean()) {
1067 assert(!pkt->req->isCacheInvalidate());
1068 DPRINTF(Cache, "Cache above responding to %s: not responding\n",
1069 pkt->print());
1070
1071 // if a cache is responding, and it had the line in Owned
1072 // rather than Modified state, we need to invalidate any
1073 // copies that are not on the same path to memory
1074 assert(pkt->needsWritable() && !pkt->responderHadWritable());
1075 lat += ticksToCycles(memSidePort->sendAtomic(pkt));

--- 4 unchanged lines hidden (view full) ---

1080 // should assert here that there are no outstanding MSHRs or
1081 // writebacks... that would mean that someone used an atomic
1082 // access in timing mode
1083
1084 CacheBlk *blk = nullptr;
1085 PacketList writebacks;
1086 bool satisfied = access(pkt, blk, lat, writebacks);
1087
1088 if (pkt->isClean() && blk && blk->isDirty()) {
1089 // A cache clean opearation is looking for a dirty
1090 // block. If a dirty block is encountered a WriteClean
1091 // will update any copies to the path to the memory
1092 // until the point of reference.
1093 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n",
1094 __func__, pkt->print(), blk->print());
1095 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest());
1096 writebacks.push_back(wb_pkt);
1097 pkt->setSatisfied();
1098 }
1099
1100 // handle writebacks resulting from the access here to ensure they
1101 // logically proceed anything happening below
1102 doWritebacksAtomic(writebacks);
1103
1104 if (!satisfied) {
1105 // MISS
1106
1107 // deal with the packets that go through the write path of

--- 269 unchanged lines hidden (view full) ---

1377 bool is_fill = !mshr->isForward &&
1378 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp);
1379
1380 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
1381 const bool valid_blk = blk && blk->isValid();
1382 // If the response indicates that there are no sharers and we
1383 // either had the block already or the response is filling we can
1384 // promote our copy to writable
1347 if (!pkt->hasSharers() && (is_fill || valid_blk)) {
1385 if (!pkt->hasSharers() &&
1386 (is_fill || (valid_blk && !pkt->req->isCacheInvalidate()))) {
1387 mshr->promoteWritable();
1388 }
1389
1390 if (is_fill && !is_error) {
1391 DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n",
1392 pkt->getAddr());
1393
1394 blk = handleFill(pkt, blk, writebacks, mshr->allocOnFill());
1395 assert(blk != nullptr);
1396 }
1397
1398 // allow invalidation responses originating from write-line
1399 // requests to be discarded
1400 bool is_invalidate = pkt->isInvalidate();
1401
1402 // The block was marked as not readable while there was a pending
1403 // cache maintenance operation, restore its flag.
1404 if (pkt->isClean() && !is_invalidate && valid_blk) {
1405 blk->status |= BlkReadable;
1406 }
1407
1408 // First offset for critical word first calculations
1409 int initial_offset = initial_tgt->pkt->getOffset(blkSize);
1410
1411 bool from_cache = false;
1412 MSHR::TargetList targets = mshr->extractServiceableTargets(pkt);
1413 for (auto &target: targets) {
1414 Packet *tgt_pkt = target.pkt;
1415 switch (target.source) {

--- 126 unchanged lines hidden (view full) ---

1542
1543 case MSHR::Target::FromSnoop:
1544 // I don't believe that a snoop can be in an error state
1545 assert(!is_error);
1546 // response to snoop request
1547 DPRINTF(Cache, "processing deferred snoop...\n");
1548 // If the response is invalidating, a snooping target can
1549 // be satisfied if it is also invalidating. If the reponse is, not
1505 // only invalidating, but more specifically an InvalidateResp, the
1506 // MSHR was created due to an InvalidateReq and a cache above is
1507 // waiting to satisfy a WriteLineReq. In this case even an
1550 // only invalidating, but more specifically an InvalidateResp and
1551 // the MSHR was created due to an InvalidateReq then a cache above
1552 // is waiting to satisfy a WriteLineReq. In this case even an
1553 // non-invalidating snoop is added as a target here since this is
1554 // the ordering point. When the InvalidateResp reaches this cache,
1555 // the snooping target will snoop further the cache above with the
1556 // WriteLineReq.
1512 assert(!(is_invalidate &&
1513 pkt->cmd != MemCmd::InvalidateResp &&
1514 !mshr->hasPostInvalidate()));
1557 assert(!is_invalidate || pkt->cmd == MemCmd::InvalidateResp ||
1558 pkt->req->isCacheMaintenance() ||
1559 mshr->hasPostInvalidate());
1560 handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate());
1561 break;
1562
1563 default:
1564 panic("Illegal target->source enum %d\n", target.source);
1565 }
1566 }
1567

--- 499 unchanged lines hidden (view full) ---

2067 pkt->setHasSharers();
2068 }
2069 // If this request is a prefetch or clean evict and an upper level
2070 // signals block present, make sure to propagate the block
2071 // presence to the requester.
2072 if (snoopPkt.isBlockCached()) {
2073 pkt->setBlockCached();
2074 }
2075 // If the request was satisfied by snooping the cache
2076 // above, mark the original packet as satisfied too.
2077 if (snoopPkt.satisfied()) {
2078 pkt->setSatisfied();
2079 }
2080 } else {
2081 cpuSidePort->sendAtomicSnoop(pkt);
2082 if (!alreadyResponded && pkt->cacheResponding()) {
2083 // cache-to-cache response from some upper cache:
2084 // forward response to original requester
2085 assert(pkt->isResponse());
2086 }
2087 }
2088 }
2089
2040 if (!blk || !blk->isValid()) {
2090 bool respond = false;
2091 bool blk_valid = blk && blk->isValid();
2092 if (pkt->isClean()) {
2093 if (blk_valid && blk->isDirty()) {
2094 DPRINTF(CacheVerbose, "%s: packet (snoop) %s found block: %s\n",
2095 __func__, pkt->print(), blk->print());
2096 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest());
2097 PacketList writebacks;
2098 writebacks.push_back(wb_pkt);
2099
2100 if (is_timing) {
2101 // anything that is merely forwarded pays for the forward
2102 // latency and the delay provided by the crossbar
2103 Tick forward_time = clockEdge(forwardLatency) +
2104 pkt->headerDelay;
2105 doWritebacks(writebacks, forward_time);
2106 } else {
2107 doWritebacksAtomic(writebacks);
2108 }
2109 pkt->setSatisfied();
2110 }
2111 } else if (!blk_valid) {
2112 DPRINTF(CacheVerbose, "%s: snoop miss for %s\n", __func__,
2113 pkt->print());
2114 if (is_deferred) {
2115 // we no longer have the block, and will not respond, but a
2116 // packet was allocated in MSHR::handleSnoop and we have
2117 // to delete it
2118 assert(pkt->needsResponse());
2119
2120 // we have passed the block to a cache upstream, that
2121 // cache should be responding
2122 assert(pkt->cacheResponding());
2123
2124 delete pkt;
2125 }
2126 return snoop_delay;
2127 } else {
2128 DPRINTF(Cache, "%s: snoop hit for %s, old state is %s\n", __func__,
2129 pkt->print(), blk->print());
2059 }
2130
2061 chatty_assert(!(isReadOnly && blk->isDirty()),
2062 "Should never have a dirty block in a read-only cache %s\n",
2063 name());
2131 // We may end up modifying both the block state and the packet (if
2132 // we respond in atomic mode), so just figure out what to do now
2133 // and then do it later. We respond to all snoops that need
2134 // responses provided we have the block in dirty state. The
2135 // invalidation itself is taken care of below. We don't respond to
2136 // cache maintenance operations as this is done by the destination
2137 // xbar.
2138 respond = blk->isDirty() && pkt->needsResponse();
2139
2065 // We may end up modifying both the block state and the packet (if
2066 // we respond in atomic mode), so just figure out what to do now
2067 // and then do it later. We respond to all snoops that need
2068 // responses provided we have the block in dirty state. The
2069 // invalidation itself is taken care of below.
2070 bool respond = blk->isDirty() && pkt->needsResponse();
2071 bool have_writable = blk->isWritable();
2140 chatty_assert(!(isReadOnly && blk->isDirty()), "Should never have "
2141 "a dirty block in a read-only cache %s\n", name());
2142 }
2143
2144 // Invalidate any prefetch's from below that would strip write permissions
2145 // MemCmd::HardPFReq is only observed by upstream caches. After missing
2146 // above and in it's own cache, a new MemCmd::ReadReq is created that
2147 // downstream caches observe.
2148 if (pkt->mustCheckAbove()) {
2149 DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s "
2150 "from lower cache\n", pkt->getAddr(), pkt->print());

--- 8 unchanged lines hidden (view full) ---

2159
2160 // if the requesting packet is uncacheable, retain the line in
2161 // the current state, otherwhise unset the writable flag,
2162 // which means we go from Modified to Owned (and will respond
2163 // below), remain in Owned (and will respond below), from
2164 // Exclusive to Shared, or remain in Shared
2165 if (!pkt->req->isUncacheable())
2166 blk->status &= ~BlkWritable;
2167 DPRINTF(Cache, "new state is %s\n", blk->print());
2168 }
2169
2170 if (respond) {
2171 // prevent anyone else from responding, cache as well as
2172 // memory, and also prevent any memory from even seeing the
2173 // request
2174 pkt->setCacheResponding();
2103 if (have_writable) {
2175 if (!pkt->isClean() && blk->isWritable()) {
2176 // inform the cache hierarchy that this cache had the line
2177 // in the Modified state so that we avoid unnecessary
2178 // invalidations (see Packet::setResponderHadWritable)
2179 pkt->setResponderHadWritable();
2180
2181 // in the case of an uncacheable request there is no point
2182 // in setting the responderHadWritable flag, but since the
2183 // recipient does not care there is no harm in doing so

--- 32 unchanged lines hidden (view full) ---

2216 delete pkt->req;
2217 }
2218
2219 delete pkt;
2220 }
2221
2222 // Do this last in case it deallocates block data or something
2223 // like that
2152 if (invalidate) {
2224 if (blk_valid && invalidate) {
2225 invalidateBlock(blk);
2226 DPRINTF(Cache, "new state is %s\n", blk->print());
2227 }
2228
2156 DPRINTF(Cache, "new state is %s\n", blk->print());
2157
2229 return snoop_delay;
2230}
2231
2232
2233void
2234Cache::recvTimingSnoopReq(PacketPtr pkt)
2235{
2236 DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print());

--- 24 unchanged lines hidden (view full) ---

2261 // MSHR hit, set setBlockCached.
2262 if (mshr && pkt->mustCheckAbove()) {
2263 DPRINTF(Cache, "Setting block cached for %s from lower cache on "
2264 "mshr hit\n", pkt->print());
2265 pkt->setBlockCached();
2266 return;
2267 }
2268
2269 // Bypass any existing cache maintenance requests if the request
2270 // has been satisfied already (i.e., the dirty block has been
2271 // found).
2272 if (mshr && pkt->req->isCacheMaintenance() && pkt->satisfied()) {
2273 return;
2274 }
2275
2276 // Let the MSHR itself track the snoop and decide whether we want
2277 // to go ahead and do the regular cache snoop
2278 if (mshr && mshr->handleSnoop(pkt, order++)) {
2279 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)."
2280 "mshrs: %s\n", blk_addr, is_secure ? "s" : "ns",
2281 mshr->print());
2282
2283 if (mshr->getNumTargets() > numTarget)

--- 48 unchanged lines hidden (view full) ---

2332 if (have_writable) {
2333 pkt->setResponderHadWritable();
2334 }
2335
2336 doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(),
2337 false, false);
2338 }
2339
2262 if (invalidate) {
2340 if (invalidate && wb_pkt->cmd != MemCmd::WriteClean) {
2341 // Invalidation trumps our writeback... discard here
2342 // Note: markInService will remove entry from writeback buffer.
2343 markInService(wb_entry);
2344 delete wb_pkt;
2345 }
2346 }
2347
2348 // If this was a shared writeback, there may still be

--- 252 unchanged lines hidden (view full) ---

2601 pkt = new Packet(tgt_pkt, false, true);
2602 assert(!pkt->isWrite());
2603 }
2604
2605 // play it safe and append (rather than set) the sender state,
2606 // as forwarded packets may already have existing state
2607 pkt->pushSenderState(mshr);
2608
2609 if (pkt->isClean() && blk && blk->isDirty()) {
2610 // A cache clean opearation is looking for a dirty block. Mark
2611 // the packet so that the destination xbar can determine that
2612 // there will be a follow-up write packet as well.
2613 pkt->setSatisfied();
2614 }
2615
2616 if (!memSidePort->sendTimingReq(pkt)) {
2617 // we are awaiting a retry, but we
2618 // delete the packet and will be creating a new packet
2619 // when we get the opportunity
2620 delete pkt;
2621
2622 // note that we have now masked any requestBus and
2623 // schedSendEvent (we will wait for a retry before

--- 7 unchanged lines hidden (view full) ---

2631 // above them) as a snoop. Thus at this point we know if
2632 // any of the neighbouring caches are responding, and if
2633 // so, we know it is dirty, and we can determine if it is
2634 // being passed as Modified, making our MSHR the ordering
2635 // point
2636 bool pending_modified_resp = !pkt->hasSharers() &&
2637 pkt->cacheResponding();
2638 markInService(mshr, pending_modified_resp);
2639 if (pkt->isClean() && blk && blk->isDirty()) {
2640 // A cache clean opearation is looking for a dirty
2641 // block. If a dirty block is encountered a WriteClean
2642 // will update any copies to the path to the memory
2643 // until the point of reference.
2644 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n",
2645 __func__, pkt->print(), blk->print());
2646 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest());
2647 PacketList writebacks;
2648 writebacks.push_back(wb_pkt);
2649 doWritebacks(writebacks, 0);
2650 }
2651
2652 return false;
2653 }
2654}
2655
2656bool
2657Cache::sendWriteQueuePacket(WriteQueueEntry* wq_entry)
2658{
2659 assert(wq_entry);

--- 203 unchanged lines hidden ---