Deleted Added
sdiff udiff text old ( 12723:530dc4bf1a00 ) new ( 12724:4f6fac3191d2 )
full compact
1/*
2 * Copyright (c) 2010-2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software

--- 40 unchanged lines hidden (view full) ---

49
50/**
51 * @file
52 * Cache definitions.
53 */
54
55#include "mem/cache/cache.hh"
56
57#include <cassert>
58
59#include "base/compiler.hh"
60#include "base/logging.hh"
61#include "base/trace.hh"
62#include "base/types.hh"
63#include "debug/Cache.hh"
64#include "debug/CacheTags.hh"
65#include "debug/CacheVerbose.hh"
66#include "enums/Clusivity.hh"
67#include "mem/cache/blk.hh"
68#include "mem/cache/mshr.hh"
69#include "mem/cache/tags/base.hh"
70#include "mem/cache/write_queue_entry.hh"
71#include "mem/request.hh"
72#include "params/Cache.hh"
73
74Cache::Cache(const CacheParams *p)
75 : BaseCache(p, p->system->cacheLineSize()),
76 doFastWrites(true)
77{
78}
79
80void
81Cache::satisfyRequest(PacketPtr pkt, CacheBlk *blk,
82 bool deferred_response, bool pending_downgrade)
83{
84 BaseCache::satisfyRequest(pkt, blk);
85
86 if (pkt->isRead()) {
87 // determine if this read is from a (coherent) cache or not
88 if (pkt->fromCache()) {
89 assert(pkt->getSize() == blkSize);
90 // special handling for coherent block requests from
91 // upper-level caches
92 if (pkt->needsWritable()) {
93 // sanity check
94 assert(pkt->cmd == MemCmd::ReadExReq ||

--- 51 unchanged lines hidden (view full) ---

146 pkt->setHasSharers();
147 }
148 }
149 } else {
150 // otherwise only respond with a shared copy
151 pkt->setHasSharers();
152 }
153 }
154 }
155}
156
157/////////////////////////////////////////////////////
158//
159// Access path: requests coming in from the CPU side
160//
161/////////////////////////////////////////////////////
162
163bool
164Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
165 PacketList &writebacks)
166{
167
168 if (pkt->req->isUncacheable()) {
169 assert(pkt->isRequest());
170
171 chatty_assert(!(isReadOnly && pkt->isWrite()),
172 "Should never see a write in a read-only cache %s\n",
173 name());
174
175 DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
176
177 // flush and invalidate any existing block
178 CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure()));
179 if (old_blk && old_blk->isValid()) {
180 evictBlock(old_blk, writebacks);
181 }
182
183 blk = nullptr;
184 // lookupLatency is the latency in case the request is uncacheable.
185 lat = lookupLatency;
186 return false;
187 }
188
189 return BaseCache::access(pkt, blk, lat, writebacks);
190}
191
192void
193Cache::doWritebacks(PacketList& writebacks, Tick forward_time)
194{
195 while (!writebacks.empty()) {
196 PacketPtr wbPkt = writebacks.front();
197 // We use forwardLatency here because we are copying writebacks to
198 // write buffer.
199
200 // Call isCachedAbove for Writebacks, CleanEvicts and

--- 42 unchanged lines hidden (view full) ---

243 if (wbPkt->cmd == MemCmd::WritebackDirty ||
244 wbPkt->cmd == MemCmd::WriteClean) {
245 // Set BLOCK_CACHED flag in Writeback and send below,
246 // so that the Writeback does not reset the bit
247 // corresponding to this address in the snoop filter
248 // below. We can discard CleanEvicts because cached
249 // copies exist above. Atomic mode isCachedAbove
250 // modifies packet to set BLOCK_CACHED flag
251 memSidePort.sendAtomic(wbPkt);
252 }
253 } else {
254 // If the block is not cached above, send packet below. Both
255 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
256 // reset the bit corresponding to this address in the snoop filter
257 // below.
258 memSidePort.sendAtomic(wbPkt);
259 }
260 writebacks.pop_front();
261 // In case of CleanEvicts, the packet destructor will delete the
262 // request object because this is a non-snoop request packet which
263 // does not require a response.
264 delete wbPkt;
265 }
266}

--- 28 unchanged lines hidden (view full) ---

295
296 // forwardLatency is set here because there is a response from an
297 // upper level cache.
298 // To pay the delay that occurs if the packet comes from the bus,
299 // we charge also headerDelay.
300 Tick snoop_resp_time = clockEdge(forwardLatency) + pkt->headerDelay;
301 // Reset the timing of the packet.
302 pkt->headerDelay = pkt->payloadDelay = 0;
303 memSidePort.schedTimingSnoopResp(pkt, snoop_resp_time);
304}
305
306void
307Cache::promoteWholeLineWrites(PacketPtr pkt)
308{
309 // Cache line clearing instructions
310 if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) &&
311 (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0)) {

--- 5 unchanged lines hidden (view full) ---

317void
318Cache::handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time)
319{
320 // should never be satisfying an uncacheable access as we
321 // flush and invalidate any existing block as part of the
322 // lookup
323 assert(!pkt->req->isUncacheable());
324
325 BaseCache::handleTimingReqHit(pkt, blk, request_time);
326}
327
328void
329Cache::handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk, Tick forward_time,
330 Tick request_time)
331{
332 if (pkt->req->isUncacheable()) {
333 // ignore any existing MSHR if we are dealing with an
334 // uncacheable request
335
336 // should have flushed and have no valid block
337 assert(!blk || !blk->isValid());
338
339 mshr_uncacheable[pkt->cmdToIndex()][pkt->req->masterId()]++;
340
341 if (pkt->isWrite()) {
342 allocateWriteBuffer(pkt, forward_time);
343 } else {
344 assert(pkt->isRead());
345
346 // uncacheable accesses always allocate a new MSHR
347
348 // Here we are using forward_time, modelling the latency of
349 // a miss (outbound) just as forwardLatency, neglecting the
350 // lookupLatency component.
351 allocateMissBuffer(pkt, forward_time);
352 }
353
354 return;
355 }
356
357 Addr blk_addr = pkt->getBlockAddr(blkSize);
358
359 MSHR *mshr = mshrQueue.findMatch(blk_addr, pkt->isSecure());
360
361 // Software prefetch handling:
362 // To keep the core from waiting on data it won't look at
363 // anyway, send back a response with dummy data. Miss handling
364 // will continue asynchronously. Unfortunately, the core will
365 // insist upon freeing original Packet/Request, so we have to
366 // create a new pair with a different lifecycle. Note that this
367 // processing happens before any MSHR munging on the behalf of

--- 21 unchanged lines hidden (view full) ---

389 assert(pf->getAddr() == pkt->getAddr());
390 assert(pf->getSize() == pkt->getSize());
391 }
392
393 pkt->makeTimingResponse();
394
395 // request_time is used here, taking into account lat and the delay
396 // charged if the packet comes from the xbar.
397 cpuSidePort.schedTimingResp(pkt, request_time, true);
398
399 // If an outstanding request is in progress (we found an
400 // MSHR) this is set to null
401 pkt = pf;
402 }
403
404 BaseCache::handleTimingReqMiss(pkt, mshr, blk, forward_time, request_time);
405}
406
407void
408Cache::recvTimingReq(PacketPtr pkt)
409{
410 DPRINTF(CacheTags, "%s tags:\n%s\n", __func__, tags->print());
411
412 assert(pkt->isRequest());
413
414 // Just forward the packet if caches are disabled.
415 if (system->bypassCaches()) {
416 // @todo This should really enqueue the packet rather
417 bool M5_VAR_USED success = memSidePort.sendTimingReq(pkt);
418 assert(success);
419 return;
420 }
421
422 promoteWholeLineWrites(pkt);
423
424 if (pkt->cacheResponding()) {
425 // a cache above us (but not where the packet came from) is
426 // responding to the request, in other words it has the line
427 // in Modified or Owned state
428 DPRINTF(Cache, "Cache above responding to %s: not responding\n",
429 pkt->print());
430
431 // if the packet needs the block to be writable, and the cache

--- 30 unchanged lines hidden (view full) ---

462 // copy (Modified or Owned) that will supply the right
463 // data
464 snoop_pkt->setExpressSnoop();
465 snoop_pkt->setCacheResponding();
466
467 // this express snoop travels towards the memory, and at
468 // every crossbar it is snooped upwards thus reaching
469 // every cache in the system
470 bool M5_VAR_USED success = memSidePort.sendTimingReq(snoop_pkt);
471 // express snoops always succeed
472 assert(success);
473
474 // main memory will delete the snoop packet
475
476 // queue for deletion, as opposed to immediate deletion, as
477 // the sending cache is still relying on the packet
478 pendingDelete.reset(pkt);
479
480 // no need to take any further action in this particular cache
481 // as an upstram cache has already committed to responding,
482 // and we have already sent out any express snoops in the
483 // section above to ensure all other copies in the system are
484 // invalidated
485 return;
486 }
487
488 BaseCache::recvTimingReq(pkt);
489}
490
491PacketPtr
492Cache::createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk,
493 bool needsWritable) const
494{
495 // should never see evictions here
496 assert(!cpu_pkt->isEviction());

--- 78 unchanged lines hidden (view full) ---

575Cycles
576Cache::handleAtomicReqMiss(PacketPtr pkt, CacheBlk *blk,
577 PacketList &writebacks)
578{
579 // deal with the packets that go through the write path of
580 // the cache, i.e. any evictions and writes
581 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean ||
582 (pkt->req->isUncacheable() && pkt->isWrite())) {
583 Cycles latency = ticksToCycles(memSidePort.sendAtomic(pkt));
584
585 // at this point, if the request was an uncacheable write
586 // request, it has been satisfied by a memory below and the
587 // packet carries the response back
588 assert(!(pkt->req->isUncacheable() && pkt->isWrite()) ||
589 pkt->isResponse());
590
591 return latency;

--- 13 unchanged lines hidden (view full) ---

605
606 DPRINTF(Cache, "%s: Sending an atomic %s\n", __func__,
607 bus_pkt->print());
608
609#if TRACING_ON
610 CacheBlk::State old_state = blk ? blk->status : 0;
611#endif
612
613 Cycles latency = ticksToCycles(memSidePort.sendAtomic(bus_pkt));
614
615 bool is_invalidate = bus_pkt->isInvalidate();
616
617 // We are now dealing with the response handling
618 DPRINTF(Cache, "%s: Receive response: %s in state %i\n", __func__,
619 bus_pkt->print(), old_state);
620
621 // If packet was a forward, the response (if any) is already

--- 38 unchanged lines hidden (view full) ---

660 }
661
662 return latency;
663}
664
665Tick
666Cache::recvAtomic(PacketPtr pkt)
667{
668 // Forward the request if the system is in cache bypass mode.
669 if (system->bypassCaches())
670 return ticksToCycles(memSidePort.sendAtomic(pkt));
671
672 promoteWholeLineWrites(pkt);
673
674 return BaseCache::recvAtomic(pkt);
675}
676
677
678/////////////////////////////////////////////////////
679//
680// Response handling: responses from the memory side
681//
682/////////////////////////////////////////////////////
683
684
685void
686Cache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk,
687 PacketList &writebacks)
688{
689 MSHR::Target *initial_tgt = mshr->getTarget();
690 // First offset for critical word first calculations
691 const int initial_offset = initial_tgt->pkt->getOffset(blkSize);
692
693 const bool is_error = pkt->isError();

--- 114 unchanged lines hidden (view full) ---

808 // propagate that. Response should not have
809 // isInvalidate() set otherwise.
810 tgt_pkt->cmd = MemCmd::ReadRespWithInvalidate;
811 DPRINTF(Cache, "%s: updated cmd to %s\n", __func__,
812 tgt_pkt->print());
813 }
814 // Reset the bus additional time as it is now accounted for
815 tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0;
816 cpuSidePort.schedTimingResp(tgt_pkt, completion_time, true);
817 break;
818
819 case MSHR::Target::FromPrefetcher:
820 assert(tgt_pkt->cmd == MemCmd::HardPFReq);
821 if (blk)
822 blk->status |= BlkHWPrefetched;
823 delete tgt_pkt->req;
824 delete tgt_pkt;

--- 33 unchanged lines hidden (view full) ---

858 if (is_invalidate || mshr->hasPostInvalidate()) {
859 invalidateBlock(blk);
860 } else if (mshr->hasPostDowngrade()) {
861 blk->status &= ~BlkWritable;
862 }
863 }
864}
865
866PacketPtr
867Cache::evictBlock(CacheBlk *blk)
868{
869 PacketPtr pkt = (blk->isDirty() || writebackClean) ?
870 writebackBlk(blk) : cleanEvictBlk(blk);
871
872 invalidateBlock(blk);
873

--- 5 unchanged lines hidden (view full) ---

879{
880 PacketPtr pkt = evictBlock(blk);
881 if (pkt) {
882 writebacks.push_back(pkt);
883 }
884}
885
886PacketPtr
887Cache::cleanEvictBlk(CacheBlk *blk)
888{
889 assert(!writebackClean);
890 assert(blk && blk->isValid() && !blk->isDirty());
891 // Creating a zero sized write, a message to the snoop filter
892 Request *req =
893 new Request(tags->regenerateBlkAddr(blk), blkSize, 0,
894 Request::wbMasterId);

--- 4 unchanged lines hidden (view full) ---

899
900 PacketPtr pkt = new Packet(req, MemCmd::CleanEvict);
901 pkt->allocate();
902 DPRINTF(Cache, "Create CleanEvict %s\n", pkt->print());
903
904 return pkt;
905}
906
907
908/////////////////////////////////////////////////////
909//
910// Snoop path: requests coming in from the memory side
911//
912/////////////////////////////////////////////////////
913
914void
915Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,

--- 32 unchanged lines hidden (view full) ---

948 // Here we consider forward_time, paying for just forward latency and
949 // also charging the delay provided by the xbar.
950 // forward_time is used as send_time in next allocateWriteBuffer().
951 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
952 // Here we reset the timing of the packet.
953 pkt->headerDelay = pkt->payloadDelay = 0;
954 DPRINTF(CacheVerbose, "%s: created response: %s tick: %lu\n", __func__,
955 pkt->print(), forward_time);
956 memSidePort.schedTimingSnoopResp(pkt, forward_time, true);
957}
958
959uint32_t
960Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
961 bool is_deferred, bool pending_inval)
962{
963 DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print());
964 // deferred snoops can only happen in timing mode

--- 27 unchanged lines hidden (view full) ---

992 // forwarding it upwards, we also allocate data (passing
993 // the pointer along in case of static data), in case
994 // there is a snoop hit in upper levels
995 Packet snoopPkt(pkt, true, true);
996 snoopPkt.setExpressSnoop();
997 // the snoop packet does not need to wait any additional
998 // time
999 snoopPkt.headerDelay = snoopPkt.payloadDelay = 0;
1000 cpuSidePort.sendTimingSnoopReq(&snoopPkt);
1001
1002 // add the header delay (including crossbar and snoop
1003 // delays) of the upward snoop to the snoop delay for this
1004 // cache
1005 snoop_delay += snoopPkt.headerDelay;
1006
1007 if (snoopPkt.cacheResponding()) {
1008 // cache-to-cache response from some upper cache

--- 12 unchanged lines hidden (view full) ---

1021 pkt->setBlockCached();
1022 }
1023 // If the request was satisfied by snooping the cache
1024 // above, mark the original packet as satisfied too.
1025 if (snoopPkt.satisfied()) {
1026 pkt->setSatisfied();
1027 }
1028 } else {
1029 cpuSidePort.sendAtomicSnoop(pkt);
1030 if (!alreadyResponded && pkt->cacheResponding()) {
1031 // cache-to-cache response from some upper cache:
1032 // forward response to original requester
1033 assert(pkt->isResponse());
1034 }
1035 }
1036 }
1037

--- 263 unchanged lines hidden (view full) ---

1301 uint32_t snoop_delay = handleSnoop(pkt, blk, true, false, false);
1302
1303 // Override what we did when we first saw the snoop, as we now
1304 // also have the cost of the upwards snoops to account for
1305 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, snoop_delay +
1306 lookupLatency * clockPeriod());
1307}
1308
1309Tick
1310Cache::recvAtomicSnoop(PacketPtr pkt)
1311{
1312 // Snoops shouldn't happen when bypassing caches
1313 assert(!system->bypassCaches());
1314
1315 // no need to snoop requests that are not in range.
1316 if (!inRange(pkt->getAddr())) {
1317 return 0;
1318 }
1319
1320 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
1321 uint32_t snoop_delay = handleSnoop(pkt, blk, false, false, false);
1322 return snoop_delay + lookupLatency * clockPeriod();
1323}
1324
1325bool
1326Cache::isCachedAbove(PacketPtr pkt, bool is_timing)
1327{
1328 if (!forwardSnoops)
1329 return false;
1330 // Mirroring the flow of HardPFReqs, the cache sends CleanEvict and
1331 // Writeback snoops into upper level caches to check for copies of the
1332 // same block. Using the BLOCK_CACHED flag with the Writeback/CleanEvict
1333 // packet, the cache can inform the crossbar below of presence or absence
1334 // of the block.
1335 if (is_timing) {
1336 Packet snoop_pkt(pkt, true, false);
1337 snoop_pkt.setExpressSnoop();
1338 // Assert that packet is either Writeback or CleanEvict and not a
1339 // prefetch request because prefetch requests need an MSHR and may
1340 // generate a snoop response.
1341 assert(pkt->isEviction() || pkt->cmd == MemCmd::WriteClean);
1342 snoop_pkt.senderState = nullptr;
1343 cpuSidePort.sendTimingSnoopReq(&snoop_pkt);
1344 // Writeback/CleanEvict snoops do not generate a snoop response.
1345 assert(!(snoop_pkt.cacheResponding()));
1346 return snoop_pkt.isBlockCached();
1347 } else {
1348 cpuSidePort.sendAtomicSnoop(pkt);
1349 return pkt->isBlockCached();
1350 }
1351}
1352
1353bool
1354Cache::sendMSHRQueuePacket(MSHR* mshr)
1355{
1356 assert(mshr);
1357
1358 // use request from 1st target
1359 PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1360
1361 if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) {
1362 DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print());
1363
1364 // we should never have hardware prefetches to allocated
1365 // blocks
1366 assert(!tags->findBlock(mshr->blkAddr, mshr->isSecure));
1367
1368 // We need to check the caches above us to verify that
1369 // they don't have a copy of this block in the dirty state
1370 // at the moment. Without this check we could get a stale
1371 // copy from memory that might get used in place of the
1372 // dirty one.
1373 Packet snoop_pkt(tgt_pkt, true, false);
1374 snoop_pkt.setExpressSnoop();
1375 // We are sending this packet upwards, but if it hits we will
1376 // get a snoop response that we end up treating just like a
1377 // normal response, hence it needs the MSHR as its sender
1378 // state
1379 snoop_pkt.senderState = mshr;
1380 cpuSidePort.sendTimingSnoopReq(&snoop_pkt);
1381
1382 // Check to see if the prefetch was squashed by an upper cache (to
1383 // prevent us from grabbing the line) or if a Check to see if a
1384 // writeback arrived between the time the prefetch was placed in
1385 // the MSHRs and when it was selected to be sent or if the
1386 // prefetch was squashed by an upper cache.
1387
1388 // It is important to check cacheResponding before

--- 32 unchanged lines hidden (view full) ---

1421 // given that no response is expected, delete Request and Packet
1422 delete tgt_pkt->req;
1423 delete tgt_pkt;
1424
1425 return false;
1426 }
1427 }
1428
1429 return BaseCache::sendMSHRQueuePacket(mshr);
1430}
1431
1432Cache*
1433CacheParams::create()
1434{
1435 assert(tags);
1436 assert(replacement_policy);
1437
1438 return new Cache(this);
1439}