Deleted Added
sdiff udiff text old ( 12348:bef2d9d3c353 ) new ( 12349:47f454120200 )
full compact
1/*
2 * Copyright (c) 2010-2017 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software

--- 30 unchanged lines hidden (view full) ---

39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 * Dave Greene
43 * Nathan Binkert
44 * Steve Reinhardt
45 * Ron Dreslinski
46 * Andreas Sandberg
47 */
48
49/**
50 * @file
51 * Cache definitions.
52 */
53
54#include "mem/cache/cache.hh"

--- 262 unchanged lines hidden (view full) ---

317
318 // Here lat is the value passed as parameter to accessBlock() function
319 // that can modify its value.
320 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), lat);
321
322 DPRINTF(Cache, "%s %s\n", pkt->print(),
323 blk ? "hit " + blk->print() : "miss");
324
325
326 if (pkt->isEviction()) {
327 // We check for presence of block in above caches before issuing
328 // Writeback or CleanEvict to write buffer. Therefore the only
329 // possible cases can be of a CleanEvict packet coming from above
330 // encountering a Writeback generated in this cache peer cache and
331 // waiting in the write buffer. Cases of upper level peer caches
332 // generating CleanEvict and Writeback or simply CleanEvict and
333 // CleanEvict almost simultaneously will be caught by snoops sent out

--- 311 unchanged lines hidden (view full) ---

645 // @todo This should really enqueue the packet rather
646 bool M5_VAR_USED success = memSidePort->sendTimingReq(pkt);
647 assert(success);
648 return true;
649 }
650
651 promoteWholeLineWrites(pkt);
652
653 if (pkt->cacheResponding()) {
654 // a cache above us (but not where the packet came from) is
655 // responding to the request, in other words it has the line
656 // in Modified or Owned state
657 DPRINTF(Cache, "Cache above responding to %s: not responding\n",
658 pkt->print());
659
660 // if the packet needs the block to be writable, and the cache

--- 97 unchanged lines hidden (view full) ---

758 // hit (for all other request types)
759
760 if (prefetcher && (prefetchOnAccess ||
761 (blk && blk->wasPrefetched()))) {
762 if (blk)
763 blk->status &= ~BlkHWPrefetched;
764
765 // Don't notify on SWPrefetch
766 if (!pkt->cmd.isSWPrefetch())
767 next_pf_time = prefetcher->notify(pkt);
768 }
769
770 if (needsResponse) {
771 pkt->makeTimingResponse();
772 // @todo: Make someone pay for this
773 pkt->headerDelay = pkt->payloadDelay = 0;
774
775 // In this case we are considering request_time that takes

--- 74 unchanged lines hidden (view full) ---

850
851 // Coalesce unless it was a software prefetch (see above).
852 if (pkt) {
853 assert(!pkt->isWriteback());
854 // CleanEvicts corresponding to blocks which have
855 // outstanding requests in MSHRs are simply sunk here
856 if (pkt->cmd == MemCmd::CleanEvict) {
857 pendingDelete.reset(pkt);
858 } else {
859 DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__,
860 pkt->print());
861
862 assert(pkt->req->masterId() < system->maxMasters());
863 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
864 // We use forward_time here because it is the same
865 // considering new targets. We have multiple

--- 17 unchanged lines hidden (view full) ---

883 // satisfied or not, reguardless if the request is in the MSHR
884 // or not. The request could be a ReadReq hit, but still not
885 // satisfied (potentially because of a prior write to the same
886 // cache line. So, even when not satisfied, tehre is an MSHR
887 // already allocated for this, we need to let the prefetcher
888 // know about the request
889 if (prefetcher) {
890 // Don't notify on SWPrefetch
891 if (!pkt->cmd.isSWPrefetch())
892 next_pf_time = prefetcher->notify(pkt);
893 }
894 }
895 } else {
896 // no MSHR
897 assert(pkt->req->masterId() < system->maxMasters());
898 if (pkt->req->isUncacheable()) {
899 mshr_uncacheable[pkt->cmdToIndex()][pkt->req->masterId()]++;

--- 21 unchanged lines hidden (view full) ---

921 // that yet. Note that we do need to leave the
922 // block valid so that it stays in the cache, in
923 // case we get an upgrade response (and hence no
924 // new data) when the write miss completes.
925 // As long as CPUs do proper store/load forwarding
926 // internally, and have a sufficiently weak memory
927 // model, this is probably unnecessary, but at some
928 // point it must have seemed like we needed it...
929 assert(pkt->needsWritable());
930 assert(!blk->isWritable());
931 blk->status &= ~BlkReadable;
932 }
933 // Here we are using forward_time, modelling the latency of
934 // a miss (outbound) just as forwardLatency, neglecting the
935 // lookupLatency component.
936 allocateMissBuffer(pkt, forward_time);
937 }
938
939 if (prefetcher) {
940 // Don't notify on SWPrefetch
941 if (!pkt->cmd.isSWPrefetch())
942 next_pf_time = prefetcher->notify(pkt);
943 }
944 }
945 }
946
947 if (next_pf_time != MaxTick)
948 schedMemSideSendEvent(next_pf_time);
949

--- 6 unchanged lines hidden (view full) ---

956{
957 // should never see evictions here
958 assert(!cpu_pkt->isEviction());
959
960 bool blkValid = blk && blk->isValid();
961
962 if (cpu_pkt->req->isUncacheable() ||
963 (!blkValid && cpu_pkt->isUpgrade()) ||
964 cpu_pkt->cmd == MemCmd::InvalidateReq) {
965 // uncacheable requests and upgrades from upper-level caches
966 // that missed completely just go through as is
967 return nullptr;
968 }
969
970 assert(cpu_pkt->needsResponse());
971
972 MemCmd cmd;

--- 60 unchanged lines hidden (view full) ---

1033 // Forward the request if the system is in cache bypass mode.
1034 if (system->bypassCaches())
1035 return ticksToCycles(memSidePort->sendAtomic(pkt));
1036
1037 promoteWholeLineWrites(pkt);
1038
1039 // follow the same flow as in recvTimingReq, and check if a cache
1040 // above us is responding
1041 if (pkt->cacheResponding()) {
1042 DPRINTF(Cache, "Cache above responding to %s: not responding\n",
1043 pkt->print());
1044
1045 // if a cache is responding, and it had the line in Owned
1046 // rather than Modified state, we need to invalidate any
1047 // copies that are not on the same path to memory
1048 assert(pkt->needsWritable() && !pkt->responderHadWritable());
1049 lat += ticksToCycles(memSidePort->sendAtomic(pkt));

--- 4 unchanged lines hidden (view full) ---

1054 // should assert here that there are no outstanding MSHRs or
1055 // writebacks... that would mean that someone used an atomic
1056 // access in timing mode
1057
1058 CacheBlk *blk = nullptr;
1059 PacketList writebacks;
1060 bool satisfied = access(pkt, blk, lat, writebacks);
1061
1062 // handle writebacks resulting from the access here to ensure they
1063 // logically proceed anything happening below
1064 doWritebacksAtomic(writebacks);
1065
1066 if (!satisfied) {
1067 // MISS
1068
1069 // deal with the packets that go through the write path of

--- 269 unchanged lines hidden (view full) ---

1339 bool is_fill = !mshr->isForward &&
1340 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp);
1341
1342 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
1343 const bool valid_blk = blk && blk->isValid();
1344 // If the response indicates that there are no sharers and we
1345 // either had the block already or the response is filling we can
1346 // promote our copy to writable
1347 if (!pkt->hasSharers() && (is_fill || valid_blk)) {
1348 mshr->promoteWritable();
1349 }
1350
1351 if (is_fill && !is_error) {
1352 DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n",
1353 pkt->getAddr());
1354
1355 blk = handleFill(pkt, blk, writebacks, mshr->allocOnFill());
1356 assert(blk != nullptr);
1357 }
1358
1359 // allow invalidation responses originating from write-line
1360 // requests to be discarded
1361 bool is_invalidate = pkt->isInvalidate();
1362
1363 // First offset for critical word first calculations
1364 int initial_offset = initial_tgt->pkt->getOffset(blkSize);
1365
1366 bool from_cache = false;
1367 MSHR::TargetList targets = mshr->extractServiceableTargets(pkt);
1368 for (auto &target: targets) {
1369 Packet *tgt_pkt = target.pkt;
1370 switch (target.source) {

--- 126 unchanged lines hidden (view full) ---

1497
1498 case MSHR::Target::FromSnoop:
1499 // I don't believe that a snoop can be in an error state
1500 assert(!is_error);
1501 // response to snoop request
1502 DPRINTF(Cache, "processing deferred snoop...\n");
1503 // If the response is invalidating, a snooping target can
1504 // be satisfied if it is also invalidating. If the reponse is, not
1505 // only invalidating, but more specifically an InvalidateResp, the
1506 // MSHR was created due to an InvalidateReq and a cache above is
1507 // waiting to satisfy a WriteLineReq. In this case even an
1508 // non-invalidating snoop is added as a target here since this is
1509 // the ordering point. When the InvalidateResp reaches this cache,
1510 // the snooping target will snoop further the cache above with the
1511 // WriteLineReq.
1512 assert(!(is_invalidate &&
1513 pkt->cmd != MemCmd::InvalidateResp &&
1514 !mshr->hasPostInvalidate()));
1515 handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate());
1516 break;
1517
1518 default:
1519 panic("Illegal target->source enum %d\n", target.source);
1520 }
1521 }
1522

--- 499 unchanged lines hidden (view full) ---

2022 pkt->setHasSharers();
2023 }
2024 // If this request is a prefetch or clean evict and an upper level
2025 // signals block present, make sure to propagate the block
2026 // presence to the requester.
2027 if (snoopPkt.isBlockCached()) {
2028 pkt->setBlockCached();
2029 }
2030 } else {
2031 cpuSidePort->sendAtomicSnoop(pkt);
2032 if (!alreadyResponded && pkt->cacheResponding()) {
2033 // cache-to-cache response from some upper cache:
2034 // forward response to original requester
2035 assert(pkt->isResponse());
2036 }
2037 }
2038 }
2039
2040 if (!blk || !blk->isValid()) {
2041 DPRINTF(CacheVerbose, "%s: snoop miss for %s\n", __func__,
2042 pkt->print());
2043 if (is_deferred) {
2044 // we no longer have the block, and will not respond, but a
2045 // packet was allocated in MSHR::handleSnoop and we have
2046 // to delete it
2047 assert(pkt->needsResponse());
2048
2049 // we have passed the block to a cache upstream, that
2050 // cache should be responding
2051 assert(pkt->cacheResponding());
2052
2053 delete pkt;
2054 }
2055 return snoop_delay;
2056 } else {
2057 DPRINTF(Cache, "%s: snoop hit for %s, old state is %s\n", __func__,
2058 pkt->print(), blk->print());
2059 }
2060
2061 chatty_assert(!(isReadOnly && blk->isDirty()),
2062 "Should never have a dirty block in a read-only cache %s\n",
2063 name());
2064
2065 // We may end up modifying both the block state and the packet (if
2066 // we respond in atomic mode), so just figure out what to do now
2067 // and then do it later. We respond to all snoops that need
2068 // responses provided we have the block in dirty state. The
2069 // invalidation itself is taken care of below.
2070 bool respond = blk->isDirty() && pkt->needsResponse();
2071 bool have_writable = blk->isWritable();
2072
2073 // Invalidate any prefetch's from below that would strip write permissions
2074 // MemCmd::HardPFReq is only observed by upstream caches. After missing
2075 // above and in it's own cache, a new MemCmd::ReadReq is created that
2076 // downstream caches observe.
2077 if (pkt->mustCheckAbove()) {
2078 DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s "
2079 "from lower cache\n", pkt->getAddr(), pkt->print());

--- 8 unchanged lines hidden (view full) ---

2088
2089 // if the requesting packet is uncacheable, retain the line in
2090 // the current state, otherwhise unset the writable flag,
2091 // which means we go from Modified to Owned (and will respond
2092 // below), remain in Owned (and will respond below), from
2093 // Exclusive to Shared, or remain in Shared
2094 if (!pkt->req->isUncacheable())
2095 blk->status &= ~BlkWritable;
2096 }
2097
2098 if (respond) {
2099 // prevent anyone else from responding, cache as well as
2100 // memory, and also prevent any memory from even seeing the
2101 // request
2102 pkt->setCacheResponding();
2103 if (have_writable) {
2104 // inform the cache hierarchy that this cache had the line
2105 // in the Modified state so that we avoid unnecessary
2106 // invalidations (see Packet::setResponderHadWritable)
2107 pkt->setResponderHadWritable();
2108
2109 // in the case of an uncacheable request there is no point
2110 // in setting the responderHadWritable flag, but since the
2111 // recipient does not care there is no harm in doing so

--- 32 unchanged lines hidden (view full) ---

2144 delete pkt->req;
2145 }
2146
2147 delete pkt;
2148 }
2149
2150 // Do this last in case it deallocates block data or something
2151 // like that
2152 if (invalidate) {
2153 invalidateBlock(blk);
2154 }
2155
2156 DPRINTF(Cache, "new state is %s\n", blk->print());
2157
2158 return snoop_delay;
2159}
2160
2161
2162void
2163Cache::recvTimingSnoopReq(PacketPtr pkt)
2164{
2165 DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print());

--- 24 unchanged lines hidden (view full) ---

2190 // MSHR hit, set setBlockCached.
2191 if (mshr && pkt->mustCheckAbove()) {
2192 DPRINTF(Cache, "Setting block cached for %s from lower cache on "
2193 "mshr hit\n", pkt->print());
2194 pkt->setBlockCached();
2195 return;
2196 }
2197
2198 // Let the MSHR itself track the snoop and decide whether we want
2199 // to go ahead and do the regular cache snoop
2200 if (mshr && mshr->handleSnoop(pkt, order++)) {
2201 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)."
2202 "mshrs: %s\n", blk_addr, is_secure ? "s" : "ns",
2203 mshr->print());
2204
2205 if (mshr->getNumTargets() > numTarget)

--- 48 unchanged lines hidden (view full) ---

2254 if (have_writable) {
2255 pkt->setResponderHadWritable();
2256 }
2257
2258 doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(),
2259 false, false);
2260 }
2261
2262 if (invalidate) {
2263 // Invalidation trumps our writeback... discard here
2264 // Note: markInService will remove entry from writeback buffer.
2265 markInService(wb_entry);
2266 delete wb_pkt;
2267 }
2268 }
2269
2270 // If this was a shared writeback, there may still be

--- 252 unchanged lines hidden (view full) ---

2523 pkt = new Packet(tgt_pkt, false, true);
2524 assert(!pkt->isWrite());
2525 }
2526
2527 // play it safe and append (rather than set) the sender state,
2528 // as forwarded packets may already have existing state
2529 pkt->pushSenderState(mshr);
2530
2531 if (!memSidePort->sendTimingReq(pkt)) {
2532 // we are awaiting a retry, but we
2533 // delete the packet and will be creating a new packet
2534 // when we get the opportunity
2535 delete pkt;
2536
2537 // note that we have now masked any requestBus and
2538 // schedSendEvent (we will wait for a retry before

--- 7 unchanged lines hidden (view full) ---

2546 // above them) as a snoop. Thus at this point we know if
2547 // any of the neighbouring caches are responding, and if
2548 // so, we know it is dirty, and we can determine if it is
2549 // being passed as Modified, making our MSHR the ordering
2550 // point
2551 bool pending_modified_resp = !pkt->hasSharers() &&
2552 pkt->cacheResponding();
2553 markInService(mshr, pending_modified_resp);
2554 return false;
2555 }
2556}
2557
2558bool
2559Cache::sendWriteQueuePacket(WriteQueueEntry* wq_entry)
2560{
2561 assert(wq_entry);

--- 203 unchanged lines hidden ---