Deleted Added
sdiff udiff text old ( 13948:f8666d4d5855 ) new ( 13954:2f400a5f2627 )
full compact
1/*
2 * Copyright (c) 2010-2019 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 * Dave Greene
43 * Nathan Binkert
44 * Steve Reinhardt
45 * Ron Dreslinski
46 * Andreas Sandberg
47 * Nikos Nikoleris
48 */
49
50/**
51 * @file
52 * Cache definitions.
53 */
54
55#include "mem/cache/cache.hh"
56
57#include <cassert>
58
59#include "base/compiler.hh"
60#include "base/logging.hh"
61#include "base/trace.hh"
62#include "base/types.hh"
63#include "debug/Cache.hh"
64#include "debug/CacheTags.hh"
65#include "debug/CacheVerbose.hh"
66#include "enums/Clusivity.hh"
67#include "mem/cache/cache_blk.hh"
68#include "mem/cache/mshr.hh"
69#include "mem/cache/tags/base.hh"
70#include "mem/cache/write_queue_entry.hh"
71#include "mem/request.hh"
72#include "params/Cache.hh"
73
74Cache::Cache(const CacheParams *p)
75 : BaseCache(p, p->system->cacheLineSize()),
76 doFastWrites(true)
77{
78}
79
80void
81Cache::satisfyRequest(PacketPtr pkt, CacheBlk *blk,
82 bool deferred_response, bool pending_downgrade)
83{
84 BaseCache::satisfyRequest(pkt, blk);
85
86 if (pkt->isRead()) {
87 // determine if this read is from a (coherent) cache or not
88 if (pkt->fromCache()) {
89 assert(pkt->getSize() == blkSize);
90 // special handling for coherent block requests from
91 // upper-level caches
92 if (pkt->needsWritable()) {
93 // sanity check
94 assert(pkt->cmd == MemCmd::ReadExReq ||
95 pkt->cmd == MemCmd::SCUpgradeFailReq);
96 assert(!pkt->hasSharers());
97
98 // if we have a dirty copy, make sure the recipient
99 // keeps it marked dirty (in the modified state)
100 if (blk->isDirty()) {
101 pkt->setCacheResponding();
102 blk->status &= ~BlkDirty;
103 }
104 } else if (blk->isWritable() && !pending_downgrade &&
105 !pkt->hasSharers() &&
106 pkt->cmd != MemCmd::ReadCleanReq) {
107 // we can give the requester a writable copy on a read
108 // request if:
109 // - we have a writable copy at this level (& below)
110 // - we don't have a pending snoop from below
111 // signaling another read request
112 // - no other cache above has a copy (otherwise it
113 // would have set hasSharers flag when
114 // snooping the packet)
115 // - the read has explicitly asked for a clean
116 // copy of the line
117 if (blk->isDirty()) {
118 // special considerations if we're owner:
119 if (!deferred_response) {
120 // respond with the line in Modified state
121 // (cacheResponding set, hasSharers not set)
122 pkt->setCacheResponding();
123
124 // if this cache is mostly inclusive, we
125 // keep the block in the Exclusive state,
126 // and pass it upwards as Modified
127 // (writable and dirty), hence we have
128 // multiple caches, all on the same path
129 // towards memory, all considering the
130 // same block writable, but only one
131 // considering it Modified
132
133 // we get away with multiple caches (on
134 // the same path to memory) considering
135 // the block writeable as we always enter
136 // the cache hierarchy through a cache,
137 // and first snoop upwards in all other
138 // branches
139 blk->status &= ~BlkDirty;
140 } else {
141 // if we're responding after our own miss,
142 // there's a window where the recipient didn't
143 // know it was getting ownership and may not
144 // have responded to snoops correctly, so we
145 // have to respond with a shared line
146 pkt->setHasSharers();
147 }
148 }
149 } else {
150 // otherwise only respond with a shared copy
151 pkt->setHasSharers();
152 }
153 }
154 }
155}
156
157/////////////////////////////////////////////////////
158//
159// Access path: requests coming in from the CPU side
160//
161/////////////////////////////////////////////////////
162
163bool
164Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat)
165{
166
167 if (pkt->req->isUncacheable()) {
168 assert(pkt->isRequest());
169
170 chatty_assert(!(isReadOnly && pkt->isWrite()),
171 "Should never see a write in a read-only cache %s\n",
172 name());
173
174 DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
175
176 // lookupLatency is the latency in case the request is uncacheable.
177 lat = lookupLatency;
178
179 // flush and invalidate any existing block
180 CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure()));
181 if (old_blk && old_blk->isValid()) {
182 BaseCache::evictBlock(old_blk, clockEdge(lat + forwardLatency));
183 }
184
185 blk = nullptr;
186 return false;
187 }
188
189 return BaseCache::access(pkt, blk, lat);
190}
191
192void
193Cache::doWritebacks(PacketPtr pkt, Tick forward_time)
194{
195 // We use forwardLatency here because we are copying writebacks to
196 // write buffer.
197
198 // Call isCachedAbove for Writebacks, CleanEvicts and
199 // WriteCleans to discover if the block is cached above.
200 if (isCachedAbove(pkt)) {
201 if (pkt->cmd == MemCmd::CleanEvict) {
202 // Delete CleanEvict because cached copies exist above. The
203 // packet destructor will delete the request object because
204 // this is a non-snoop request packet which does not require a
205 // response.
206 delete pkt;
207 } else if (pkt->cmd == MemCmd::WritebackClean) {
208 // clean writeback, do not send since the block is
209 // still cached above
210 assert(writebackClean);
211 delete pkt;
212 } else {
213 assert(pkt->cmd == MemCmd::WritebackDirty ||
214 pkt->cmd == MemCmd::WriteClean);
215 // Set BLOCK_CACHED flag in Writeback and send below, so that
216 // the Writeback does not reset the bit corresponding to this
217 // address in the snoop filter below.
218 pkt->setBlockCached();
219 allocateWriteBuffer(pkt, forward_time);
220 }
221 } else {
222 // If the block is not cached above, send packet below. Both
223 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
224 // reset the bit corresponding to this address in the snoop filter
225 // below.
226 allocateWriteBuffer(pkt, forward_time);
227 }
228}
229
230void
231Cache::doWritebacksAtomic(PacketPtr pkt)
232{
233 // Call isCachedAbove for both Writebacks and CleanEvicts. If
234 // isCachedAbove returns true we set BLOCK_CACHED flag in Writebacks
235 // and discard CleanEvicts.
236 if (isCachedAbove(pkt, false)) {
237 if (pkt->cmd == MemCmd::WritebackDirty ||
238 pkt->cmd == MemCmd::WriteClean) {
239 // Set BLOCK_CACHED flag in Writeback and send below,
240 // so that the Writeback does not reset the bit
241 // corresponding to this address in the snoop filter
242 // below. We can discard CleanEvicts because cached
243 // copies exist above. Atomic mode isCachedAbove
244 // modifies packet to set BLOCK_CACHED flag
245 memSidePort.sendAtomic(pkt);
246 }
247 } else {
248 // If the block is not cached above, send packet below. Both
249 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
250 // reset the bit corresponding to this address in the snoop filter
251 // below.
252 memSidePort.sendAtomic(pkt);
253 }
254
255 // In case of CleanEvicts, the packet destructor will delete the
256 // request object because this is a non-snoop request packet which
257 // does not require a response.
258 delete pkt;
259}
260
261void
262Cache::recvTimingSnoopResp(PacketPtr pkt)
263{
264 DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
265
266 // determine if the response is from a snoop request we created
267 // (in which case it should be in the outstandingSnoop), or if we
268 // merely forwarded someone else's snoop request
269 const bool forwardAsSnoop = outstandingSnoop.find(pkt->req) ==
270 outstandingSnoop.end();
271
272 if (!forwardAsSnoop) {
273 // the packet came from this cache, so sink it here and do not
274 // forward it
275 assert(pkt->cmd == MemCmd::HardPFResp);
276
277 outstandingSnoop.erase(pkt->req);
278
279 DPRINTF(Cache, "Got prefetch response from above for addr "
280 "%#llx (%s)\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
281 recvTimingResp(pkt);
282 return;
283 }
284
285 // forwardLatency is set here because there is a response from an
286 // upper level cache.
287 // To pay the delay that occurs if the packet comes from the bus,
288 // we charge also headerDelay.
289 Tick snoop_resp_time = clockEdge(forwardLatency) + pkt->headerDelay;
290 // Reset the timing of the packet.
291 pkt->headerDelay = pkt->payloadDelay = 0;
292 memSidePort.schedTimingSnoopResp(pkt, snoop_resp_time);
293}
294
295void
296Cache::promoteWholeLineWrites(PacketPtr pkt)
297{
298 // Cache line clearing instructions
299 if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) &&
300 (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0)) {
301 pkt->cmd = MemCmd::WriteLineReq;
302 DPRINTF(Cache, "packet promoted from Write to WriteLineReq\n");
303 }
304}
305
306void
307Cache::handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time)
308{
309 // should never be satisfying an uncacheable access as we
310 // flush and invalidate any existing block as part of the
311 // lookup
312 assert(!pkt->req->isUncacheable());
313
314 BaseCache::handleTimingReqHit(pkt, blk, request_time);
315}
316
317void
318Cache::handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk, Tick forward_time,
319 Tick request_time)
320{
321 if (pkt->req->isUncacheable()) {
322 // ignore any existing MSHR if we are dealing with an
323 // uncacheable request
324
325 // should have flushed and have no valid block
326 assert(!blk || !blk->isValid());
327
328 mshr_uncacheable[pkt->cmdToIndex()][pkt->req->masterId()]++;
329
330 if (pkt->isWrite()) {
331 allocateWriteBuffer(pkt, forward_time);
332 } else {
333 assert(pkt->isRead());
334
335 // uncacheable accesses always allocate a new MSHR
336
337 // Here we are using forward_time, modelling the latency of
338 // a miss (outbound) just as forwardLatency, neglecting the
339 // lookupLatency component.
340 allocateMissBuffer(pkt, forward_time);
341 }
342
343 return;
344 }
345
346 Addr blk_addr = pkt->getBlockAddr(blkSize);
347
348 MSHR *mshr = mshrQueue.findMatch(blk_addr, pkt->isSecure());
349
350 // Software prefetch handling:
351 // To keep the core from waiting on data it won't look at
352 // anyway, send back a response with dummy data. Miss handling
353 // will continue asynchronously. Unfortunately, the core will
354 // insist upon freeing original Packet/Request, so we have to
355 // create a new pair with a different lifecycle. Note that this
356 // processing happens before any MSHR munging on the behalf of
357 // this request because this new Request will be the one stored
358 // into the MSHRs, not the original.
359 if (pkt->cmd.isSWPrefetch()) {
360 assert(pkt->needsResponse());
361 assert(pkt->req->hasPaddr());
362 assert(!pkt->req->isUncacheable());
363
364 // There's no reason to add a prefetch as an additional target
365 // to an existing MSHR. If an outstanding request is already
366 // in progress, there is nothing for the prefetch to do.
367 // If this is the case, we don't even create a request at all.
368 PacketPtr pf = nullptr;
369
370 if (!mshr) {
371 // copy the request and create a new SoftPFReq packet
372 RequestPtr req = std::make_shared<Request>(pkt->req->getPaddr(),
373 pkt->req->getSize(),
374 pkt->req->getFlags(),
375 pkt->req->masterId());
376 pf = new Packet(req, pkt->cmd);
377 pf->allocate();
378 assert(pf->matchAddr(pkt));
379 assert(pf->getSize() == pkt->getSize());
380 }
381
382 pkt->makeTimingResponse();
383
384 // request_time is used here, taking into account lat and the delay
385 // charged if the packet comes from the xbar.
386 cpuSidePort.schedTimingResp(pkt, request_time);
387
388 // If an outstanding request is in progress (we found an
389 // MSHR) this is set to null
390 pkt = pf;
391 }
392
393 BaseCache::handleTimingReqMiss(pkt, mshr, blk, forward_time, request_time);
394}
395
396void
397Cache::recvTimingReq(PacketPtr pkt)
398{
399 DPRINTF(CacheTags, "%s tags:\n%s\n", __func__, tags->print());
400
401 promoteWholeLineWrites(pkt);
402
403 if (pkt->cacheResponding()) {
404 // a cache above us (but not where the packet came from) is
405 // responding to the request, in other words it has the line
406 // in Modified or Owned state
407 DPRINTF(Cache, "Cache above responding to %s: not responding\n",
408 pkt->print());
409
410 // if the packet needs the block to be writable, and the cache
411 // that has promised to respond (setting the cache responding
412 // flag) is not providing writable (it is in Owned rather than
413 // the Modified state), we know that there may be other Shared
414 // copies in the system; go out and invalidate them all
415 assert(pkt->needsWritable() && !pkt->responderHadWritable());
416
417 // an upstream cache that had the line in Owned state
418 // (dirty, but not writable), is responding and thus
419 // transferring the dirty line from one branch of the
420 // cache hierarchy to another
421
422 // send out an express snoop and invalidate all other
423 // copies (snooping a packet that needs writable is the
424 // same as an invalidation), thus turning the Owned line
425 // into a Modified line, note that we don't invalidate the
426 // block in the current cache or any other cache on the
427 // path to memory
428
429 // create a downstream express snoop with cleared packet
430 // flags, there is no need to allocate any data as the
431 // packet is merely used to co-ordinate state transitions
432 Packet *snoop_pkt = new Packet(pkt, true, false);
433
434 // also reset the bus time that the original packet has
435 // not yet paid for
436 snoop_pkt->headerDelay = snoop_pkt->payloadDelay = 0;
437
438 // make this an instantaneous express snoop, and let the
439 // other caches in the system know that the another cache
440 // is responding, because we have found the authorative
441 // copy (Modified or Owned) that will supply the right
442 // data
443 snoop_pkt->setExpressSnoop();
444 snoop_pkt->setCacheResponding();
445
446 // this express snoop travels towards the memory, and at
447 // every crossbar it is snooped upwards thus reaching
448 // every cache in the system
449 bool M5_VAR_USED success = memSidePort.sendTimingReq(snoop_pkt);
450 // express snoops always succeed
451 assert(success);
452
453 // main memory will delete the snoop packet
454
455 // queue for deletion, as opposed to immediate deletion, as
456 // the sending cache is still relying on the packet
457 pendingDelete.reset(pkt);
458
459 // no need to take any further action in this particular cache
460 // as an upstram cache has already committed to responding,
461 // and we have already sent out any express snoops in the
462 // section above to ensure all other copies in the system are
463 // invalidated
464 return;
465 }
466
467 BaseCache::recvTimingReq(pkt);
468}
469
470PacketPtr
471Cache::createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk,
472 bool needsWritable,
473 bool is_whole_line_write) const
474{
475 // should never see evictions here
476 assert(!cpu_pkt->isEviction());
477
478 bool blkValid = blk && blk->isValid();
479
480 if (cpu_pkt->req->isUncacheable() ||
481 (!blkValid && cpu_pkt->isUpgrade()) ||
482 cpu_pkt->cmd == MemCmd::InvalidateReq || cpu_pkt->isClean()) {
483 // uncacheable requests and upgrades from upper-level caches
484 // that missed completely just go through as is
485 return nullptr;
486 }
487
488 assert(cpu_pkt->needsResponse());
489
490 MemCmd cmd;
491 // @TODO make useUpgrades a parameter.
492 // Note that ownership protocols require upgrade, otherwise a
493 // write miss on a shared owned block will generate a ReadExcl,
494 // which will clobber the owned copy.
495 const bool useUpgrades = true;
496 assert(cpu_pkt->cmd != MemCmd::WriteLineReq || is_whole_line_write);
497 if (is_whole_line_write) {
498 assert(!blkValid || !blk->isWritable());
499 // forward as invalidate to all other caches, this gives us
500 // the line in Exclusive state, and invalidates all other
501 // copies
502 cmd = MemCmd::InvalidateReq;
503 } else if (blkValid && useUpgrades) {
504 // only reason to be here is that blk is read only and we need
505 // it to be writable
506 assert(needsWritable);
507 assert(!blk->isWritable());
508 cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
509 } else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq ||
510 cpu_pkt->cmd == MemCmd::StoreCondFailReq) {
511 // Even though this SC will fail, we still need to send out the
512 // request and get the data to supply it to other snoopers in the case
513 // where the determination the StoreCond fails is delayed due to
514 // all caches not being on the same local bus.
515 cmd = MemCmd::SCUpgradeFailReq;
516 } else {
517 // block is invalid
518
519 // If the request does not need a writable there are two cases
520 // where we need to ensure the response will not fetch the
521 // block in dirty state:
522 // * this cache is read only and it does not perform
523 // writebacks,
524 // * this cache is mostly exclusive and will not fill (since
525 // it does not fill it will have to writeback the dirty data
526 // immediately which generates uneccesary writebacks).
527 bool force_clean_rsp = isReadOnly || clusivity == Enums::mostly_excl;
528 cmd = needsWritable ? MemCmd::ReadExReq :
529 (force_clean_rsp ? MemCmd::ReadCleanReq : MemCmd::ReadSharedReq);
530 }
531 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize);
532
533 // if there are upstream caches that have already marked the
534 // packet as having sharers (not passing writable), pass that info
535 // downstream
536 if (cpu_pkt->hasSharers() && !needsWritable) {
537 // note that cpu_pkt may have spent a considerable time in the
538 // MSHR queue and that the information could possibly be out
539 // of date, however, there is no harm in conservatively
540 // assuming the block has sharers
541 pkt->setHasSharers();
542 DPRINTF(Cache, "%s: passing hasSharers from %s to %s\n",
543 __func__, cpu_pkt->print(), pkt->print());
544 }
545
546 // the packet should be block aligned
547 assert(pkt->getAddr() == pkt->getBlockAddr(blkSize));
548
549 pkt->allocate();
550 DPRINTF(Cache, "%s: created %s from %s\n", __func__, pkt->print(),
551 cpu_pkt->print());
552 return pkt;
553}
554
555
556Cycles
557Cache::handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk)
558{
559 // deal with the packets that go through the write path of
560 // the cache, i.e. any evictions and writes
561 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean ||
562 (pkt->req->isUncacheable() && pkt->isWrite())) {
563 Cycles latency = ticksToCycles(memSidePort.sendAtomic(pkt));
564
565 // at this point, if the request was an uncacheable write
566 // request, it has been satisfied by a memory below and the
567 // packet carries the response back
568 assert(!(pkt->req->isUncacheable() && pkt->isWrite()) ||
569 pkt->isResponse());
570
571 return latency;
572 }
573
574 // only misses left
575
576 PacketPtr bus_pkt = createMissPacket(pkt, blk, pkt->needsWritable(),
577 pkt->isWholeLineWrite(blkSize));
578
579 bool is_forward = (bus_pkt == nullptr);
580
581 if (is_forward) {
582 // just forwarding the same request to the next level
583 // no local cache operation involved
584 bus_pkt = pkt;
585 }
586
587 DPRINTF(Cache, "%s: Sending an atomic %s\n", __func__,
588 bus_pkt->print());
589
590#if TRACING_ON
591 CacheBlk::State old_state = blk ? blk->status : 0;
592#endif
593
594 Cycles latency = ticksToCycles(memSidePort.sendAtomic(bus_pkt));
595
596 bool is_invalidate = bus_pkt->isInvalidate();
597
598 // We are now dealing with the response handling
599 DPRINTF(Cache, "%s: Receive response: %s in state %i\n", __func__,
600 bus_pkt->print(), old_state);
601
602 // If packet was a forward, the response (if any) is already
603 // in place in the bus_pkt == pkt structure, so we don't need
604 // to do anything. Otherwise, use the separate bus_pkt to
605 // generate response to pkt and then delete it.
606 if (!is_forward) {
607 if (pkt->needsResponse()) {
608 assert(bus_pkt->isResponse());
609 if (bus_pkt->isError()) {
610 pkt->makeAtomicResponse();
611 pkt->copyError(bus_pkt);
612 } else if (pkt->isWholeLineWrite(blkSize)) {
613 // note the use of pkt, not bus_pkt here.
614
615 // write-line request to the cache that promoted
616 // the write to a whole line
617 const bool allocate = allocOnFill(pkt->cmd) &&
618 (!writeAllocator || writeAllocator->allocate());
619 blk = handleFill(bus_pkt, blk, allocate);
620 assert(blk != NULL);
621 is_invalidate = false;
622 satisfyRequest(pkt, blk);
623 } else if (bus_pkt->isRead() ||
624 bus_pkt->cmd == MemCmd::UpgradeResp) {
625 // we're updating cache state to allow us to
626 // satisfy the upstream request from the cache
627 blk = handleFill(bus_pkt, blk, allocOnFill(pkt->cmd));
628 satisfyRequest(pkt, blk);
629 maintainClusivity(pkt->fromCache(), blk);
630 } else {
631 // we're satisfying the upstream request without
632 // modifying cache state, e.g., a write-through
633 pkt->makeAtomicResponse();
634 }
635 }
636 delete bus_pkt;
637 }
638
639 if (is_invalidate && blk && blk->isValid()) {
640 invalidateBlock(blk);
641 }
642
643 return latency;
644}
645
646Tick
647Cache::recvAtomic(PacketPtr pkt)
648{
649 promoteWholeLineWrites(pkt);
650
651 // follow the same flow as in recvTimingReq, and check if a cache
652 // above us is responding
653 if (pkt->cacheResponding()) {
654 assert(!pkt->req->isCacheInvalidate());
655 DPRINTF(Cache, "Cache above responding to %s: not responding\n",
656 pkt->print());
657
658 // if a cache is responding, and it had the line in Owned
659 // rather than Modified state, we need to invalidate any
660 // copies that are not on the same path to memory
661 assert(pkt->needsWritable() && !pkt->responderHadWritable());
662
663 return memSidePort.sendAtomic(pkt);
664 }
665
666 return BaseCache::recvAtomic(pkt);
667}
668
669
670/////////////////////////////////////////////////////
671//
672// Response handling: responses from the memory side
673//
674/////////////////////////////////////////////////////
675
676
677void
678Cache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk)
679{
680 QueueEntry::Target *initial_tgt = mshr->getTarget();
681 // First offset for critical word first calculations
682 const int initial_offset = initial_tgt->pkt->getOffset(blkSize);
683
684 const bool is_error = pkt->isError();
685 // allow invalidation responses originating from write-line
686 // requests to be discarded
687 bool is_invalidate = pkt->isInvalidate() &&
688 !mshr->wasWholeLineWrite;
689
690 MSHR::TargetList targets = mshr->extractServiceableTargets(pkt);
691 for (auto &target: targets) {
692 Packet *tgt_pkt = target.pkt;
693 switch (target.source) {
694 case MSHR::Target::FromCPU:
695 Tick completion_time;
696 // Here we charge on completion_time the delay of the xbar if the
697 // packet comes from it, charged on headerDelay.
698 completion_time = pkt->headerDelay;
699
700 // Software prefetch handling for cache closest to core
701 if (tgt_pkt->cmd.isSWPrefetch()) {
702 // a software prefetch would have already been ack'd
703 // immediately with dummy data so the core would be able to
704 // retire it. This request completes right here, so we
705 // deallocate it.
706 delete tgt_pkt;
707 break; // skip response
708 }
709
710 // unlike the other packet flows, where data is found in other
711 // caches or memory and brought back, write-line requests always
712 // have the data right away, so the above check for "is fill?"
713 // cannot actually be determined until examining the stored MSHR
714 // state. We "catch up" with that logic here, which is duplicated
715 // from above.
716 if (tgt_pkt->cmd == MemCmd::WriteLineReq) {
717 assert(!is_error);
718 assert(blk);
719 assert(blk->isWritable());
720 }
721
722 if (blk && blk->isValid() && !mshr->isForward) {
723 satisfyRequest(tgt_pkt, blk, true, mshr->hasPostDowngrade());
724
725 // How many bytes past the first request is this one
726 int transfer_offset =
727 tgt_pkt->getOffset(blkSize) - initial_offset;
728 if (transfer_offset < 0) {
729 transfer_offset += blkSize;
730 }
731
732 // If not critical word (offset) return payloadDelay.
733 // responseLatency is the latency of the return path
734 // from lower level caches/memory to an upper level cache or
735 // the core.
736 completion_time += clockEdge(responseLatency) +
737 (transfer_offset ? pkt->payloadDelay : 0);
738
739 assert(!tgt_pkt->req->isUncacheable());
740
741 assert(tgt_pkt->req->masterId() < system->maxMasters());
742 missLatency[tgt_pkt->cmdToIndex()][tgt_pkt->req->masterId()] +=
743 completion_time - target.recvTime;
744 } else if (pkt->cmd == MemCmd::UpgradeFailResp) {
745 // failed StoreCond upgrade
746 assert(tgt_pkt->cmd == MemCmd::StoreCondReq ||
747 tgt_pkt->cmd == MemCmd::StoreCondFailReq ||
748 tgt_pkt->cmd == MemCmd::SCUpgradeFailReq);
749 // responseLatency is the latency of the return path
750 // from lower level caches/memory to an upper level cache or
751 // the core.
752 completion_time += clockEdge(responseLatency) +
753 pkt->payloadDelay;
754 tgt_pkt->req->setExtraData(0);
755 } else {
756 // We are about to send a response to a cache above
757 // that asked for an invalidation; we need to
758 // invalidate our copy immediately as the most
759 // up-to-date copy of the block will now be in the
760 // cache above. It will also prevent this cache from
761 // responding (if the block was previously dirty) to
762 // snoops as they should snoop the caches above where
763 // they will get the response from.
764 if (is_invalidate && blk && blk->isValid()) {
765 invalidateBlock(blk);
766 }
767 // not a cache fill, just forwarding response
768 // responseLatency is the latency of the return path
769 // from lower level cahces/memory to the core.
770 completion_time += clockEdge(responseLatency) +
771 pkt->payloadDelay;
772 if (pkt->isRead() && !is_error) {
773 // sanity check
774 assert(pkt->matchAddr(tgt_pkt));
775 assert(pkt->getSize() >= tgt_pkt->getSize());
776
777 tgt_pkt->setData(pkt->getConstPtr<uint8_t>());
778 }
779
780 // this response did not allocate here and therefore
781 // it was not consumed, make sure that any flags are
782 // carried over to cache above
783 tgt_pkt->copyResponderFlags(pkt);
784 }
785 tgt_pkt->makeTimingResponse();
786 // if this packet is an error copy that to the new packet
787 if (is_error)
788 tgt_pkt->copyError(pkt);
789 if (tgt_pkt->cmd == MemCmd::ReadResp &&
790 (is_invalidate || mshr->hasPostInvalidate())) {
791 // If intermediate cache got ReadRespWithInvalidate,
792 // propagate that. Response should not have
793 // isInvalidate() set otherwise.
794 tgt_pkt->cmd = MemCmd::ReadRespWithInvalidate;
795 DPRINTF(Cache, "%s: updated cmd to %s\n", __func__,
796 tgt_pkt->print());
797 }
798 // Reset the bus additional time as it is now accounted for
799 tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0;
800 cpuSidePort.schedTimingResp(tgt_pkt, completion_time);
801 break;
802
803 case MSHR::Target::FromPrefetcher:
804 assert(tgt_pkt->cmd == MemCmd::HardPFReq);
805 if (blk)
806 blk->status |= BlkHWPrefetched;
807 delete tgt_pkt;
808 break;
809
810 case MSHR::Target::FromSnoop:
811 // I don't believe that a snoop can be in an error state
812 assert(!is_error);
813 // response to snoop request
814 DPRINTF(Cache, "processing deferred snoop...\n");
815 // If the response is invalidating, a snooping target can
816 // be satisfied if it is also invalidating. If the reponse is, not
817 // only invalidating, but more specifically an InvalidateResp and
818 // the MSHR was created due to an InvalidateReq then a cache above
819 // is waiting to satisfy a WriteLineReq. In this case even an
820 // non-invalidating snoop is added as a target here since this is
821 // the ordering point. When the InvalidateResp reaches this cache,
822 // the snooping target will snoop further the cache above with the
823 // WriteLineReq.
824 assert(!is_invalidate || pkt->cmd == MemCmd::InvalidateResp ||
825 pkt->req->isCacheMaintenance() ||
826 mshr->hasPostInvalidate());
827 handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate());
828 break;
829
830 default:
831 panic("Illegal target->source enum %d\n", target.source);
832 }
833 }
834
835 maintainClusivity(targets.hasFromCache, blk);
836
837 if (blk && blk->isValid()) {
838 // an invalidate response stemming from a write line request
839 // should not invalidate the block, so check if the
840 // invalidation should be discarded
841 if (is_invalidate || mshr->hasPostInvalidate()) {
842 invalidateBlock(blk);
843 } else if (mshr->hasPostDowngrade()) {
844 blk->status &= ~BlkWritable;
845 }
846 }
847}
848
849PacketPtr
850Cache::evictBlock(CacheBlk *blk)
851{
852 PacketPtr pkt = (blk->isDirty() || writebackClean) ?
853 writebackBlk(blk) : cleanEvictBlk(blk);
854
855 invalidateBlock(blk);
856
857 return pkt;
858}
859
860PacketPtr
861Cache::cleanEvictBlk(CacheBlk *blk)
862{
863 assert(!writebackClean);
864 assert(blk && blk->isValid() && !blk->isDirty());
865
866 // Creating a zero sized write, a message to the snoop filter
867 RequestPtr req = std::make_shared<Request>(
868 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
869
870 if (blk->isSecure())
871 req->setFlags(Request::SECURE);
872
873 req->taskId(blk->task_id);
874
875 PacketPtr pkt = new Packet(req, MemCmd::CleanEvict);
876 pkt->allocate();
877 DPRINTF(Cache, "Create CleanEvict %s\n", pkt->print());
878
879 return pkt;
880}
881
882/////////////////////////////////////////////////////
883//
884// Snoop path: requests coming in from the memory side
885//
886/////////////////////////////////////////////////////
887
888void
889Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
890 bool already_copied, bool pending_inval)
891{
892 // sanity check
893 assert(req_pkt->isRequest());
894 assert(req_pkt->needsResponse());
895
896 DPRINTF(Cache, "%s: for %s\n", __func__, req_pkt->print());
897 // timing-mode snoop responses require a new packet, unless we
898 // already made a copy...
899 PacketPtr pkt = req_pkt;
900 if (!already_copied)
901 // do not clear flags, and allocate space for data if the
902 // packet needs it (the only packets that carry data are read
903 // responses)
904 pkt = new Packet(req_pkt, false, req_pkt->isRead());
905
906 assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() ||
907 pkt->hasSharers());
908 pkt->makeTimingResponse();
909 if (pkt->isRead()) {
910 pkt->setDataFromBlock(blk_data, blkSize);
911 }
912 if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
913 // Assume we defer a response to a read from a far-away cache
914 // A, then later defer a ReadExcl from a cache B on the same
915 // bus as us. We'll assert cacheResponding in both cases, but
916 // in the latter case cacheResponding will keep the
917 // invalidation from reaching cache A. This special response
918 // tells cache A that it gets the block to satisfy its read,
919 // but must immediately invalidate it.
920 pkt->cmd = MemCmd::ReadRespWithInvalidate;
921 }
922 // Here we consider forward_time, paying for just forward latency and
923 // also charging the delay provided by the xbar.
924 // forward_time is used as send_time in next allocateWriteBuffer().
925 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
926 // Here we reset the timing of the packet.
927 pkt->headerDelay = pkt->payloadDelay = 0;
928 DPRINTF(CacheVerbose, "%s: created response: %s tick: %lu\n", __func__,
929 pkt->print(), forward_time);
930 memSidePort.schedTimingSnoopResp(pkt, forward_time);
931}
932
933uint32_t
934Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
935 bool is_deferred, bool pending_inval)
936{
937 DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print());
938 // deferred snoops can only happen in timing mode
939 assert(!(is_deferred && !is_timing));
940 // pending_inval only makes sense on deferred snoops
941 assert(!(pending_inval && !is_deferred));
942 assert(pkt->isRequest());
943
944 // the packet may get modified if we or a forwarded snooper
945 // responds in atomic mode, so remember a few things about the
946 // original packet up front
947 bool invalidate = pkt->isInvalidate();
948 bool M5_VAR_USED needs_writable = pkt->needsWritable();
949
950 // at the moment we could get an uncacheable write which does not
951 // have the invalidate flag, and we need a suitable way of dealing
952 // with this case
953 panic_if(invalidate && pkt->req->isUncacheable(),
954 "%s got an invalidating uncacheable snoop request %s",
955 name(), pkt->print());
956
957 uint32_t snoop_delay = 0;
958
959 if (forwardSnoops) {
960 // first propagate snoop upward to see if anyone above us wants to
961 // handle it. save & restore packet src since it will get
962 // rewritten to be relative to cpu-side bus (if any)
963 if (is_timing) {
964 // copy the packet so that we can clear any flags before
965 // forwarding it upwards, we also allocate data (passing
966 // the pointer along in case of static data), in case
967 // there is a snoop hit in upper levels
968 Packet snoopPkt(pkt, true, true);
969 snoopPkt.setExpressSnoop();
970 // the snoop packet does not need to wait any additional
971 // time
972 snoopPkt.headerDelay = snoopPkt.payloadDelay = 0;
973 cpuSidePort.sendTimingSnoopReq(&snoopPkt);
974
975 // add the header delay (including crossbar and snoop
976 // delays) of the upward snoop to the snoop delay for this
977 // cache
978 snoop_delay += snoopPkt.headerDelay;
979
980 // If this request is a prefetch or clean evict and an upper level
981 // signals block present, make sure to propagate the block
982 // presence to the requester.
983 if (snoopPkt.isBlockCached()) {
984 pkt->setBlockCached();
985 }
986 // If the request was satisfied by snooping the cache
987 // above, mark the original packet as satisfied too.
988 if (snoopPkt.satisfied()) {
989 pkt->setSatisfied();
990 }
991
992 // Copy over flags from the snoop response to make sure we
993 // inform the final destination
994 pkt->copyResponderFlags(&snoopPkt);
995 } else {
996 bool already_responded = pkt->cacheResponding();
997 cpuSidePort.sendAtomicSnoop(pkt);
998 if (!already_responded && pkt->cacheResponding()) {
999 // cache-to-cache response from some upper cache:
1000 // forward response to original requester
1001 assert(pkt->isResponse());
1002 }
1003 }
1004 }
1005
1006 bool respond = false;
1007 bool blk_valid = blk && blk->isValid();
1008 if (pkt->isClean()) {
1009 if (blk_valid && blk->isDirty()) {
1010 DPRINTF(CacheVerbose, "%s: packet (snoop) %s found block: %s\n",
1011 __func__, pkt->print(), blk->print());
1012 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id);
1013
1014 if (is_timing) {
1015 // anything that is merely forwarded pays for the forward
1016 // latency and the delay provided by the crossbar
1017 Tick forward_time = clockEdge(forwardLatency) +
1018 pkt->headerDelay;
1019 doWritebacks(wb_pkt, forward_time);
1020 } else {
1021 doWritebacksAtomic(wb_pkt);
1022 }
1023 pkt->setSatisfied();
1024 }
1025 } else if (!blk_valid) {
1026 DPRINTF(CacheVerbose, "%s: snoop miss for %s\n", __func__,
1027 pkt->print());
1028 if (is_deferred) {
1029 // we no longer have the block, and will not respond, but a
1030 // packet was allocated in MSHR::handleSnoop and we have
1031 // to delete it
1032 assert(pkt->needsResponse());
1033
1034 // we have passed the block to a cache upstream, that
1035 // cache should be responding
1036 assert(pkt->cacheResponding());
1037
1038 delete pkt;
1039 }
1040 return snoop_delay;
1041 } else {
1042 DPRINTF(Cache, "%s: snoop hit for %s, old state is %s\n", __func__,
1043 pkt->print(), blk->print());
1044
1045 // We may end up modifying both the block state and the packet (if
1046 // we respond in atomic mode), so just figure out what to do now
1047 // and then do it later. We respond to all snoops that need
1048 // responses provided we have the block in dirty state. The
1049 // invalidation itself is taken care of below. We don't respond to
1050 // cache maintenance operations as this is done by the destination
1051 // xbar.
1052 respond = blk->isDirty() && pkt->needsResponse();
1053
1054 chatty_assert(!(isReadOnly && blk->isDirty()), "Should never have "
1055 "a dirty block in a read-only cache %s\n", name());
1056 }
1057
1058 // Invalidate any prefetch's from below that would strip write permissions
1059 // MemCmd::HardPFReq is only observed by upstream caches. After missing
1060 // above and in it's own cache, a new MemCmd::ReadReq is created that
1061 // downstream caches observe.
1062 if (pkt->mustCheckAbove()) {
1063 DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s "
1064 "from lower cache\n", pkt->getAddr(), pkt->print());
1065 pkt->setBlockCached();
1066 return snoop_delay;
1067 }
1068
1069 if (pkt->isRead() && !invalidate) {
1070 // reading without requiring the line in a writable state
1071 assert(!needs_writable);
1072 pkt->setHasSharers();
1073
1074 // if the requesting packet is uncacheable, retain the line in
1075 // the current state, otherwhise unset the writable flag,
1076 // which means we go from Modified to Owned (and will respond
1077 // below), remain in Owned (and will respond below), from
1078 // Exclusive to Shared, or remain in Shared
1079 if (!pkt->req->isUncacheable())
1080 blk->status &= ~BlkWritable;
1081 DPRINTF(Cache, "new state is %s\n", blk->print());
1082 }
1083
1084 if (respond) {
1085 // prevent anyone else from responding, cache as well as
1086 // memory, and also prevent any memory from even seeing the
1087 // request
1088 pkt->setCacheResponding();
1089 if (!pkt->isClean() && blk->isWritable()) {
1090 // inform the cache hierarchy that this cache had the line
1091 // in the Modified state so that we avoid unnecessary
1092 // invalidations (see Packet::setResponderHadWritable)
1093 pkt->setResponderHadWritable();
1094
1095 // in the case of an uncacheable request there is no point
1096 // in setting the responderHadWritable flag, but since the
1097 // recipient does not care there is no harm in doing so
1098 } else {
1099 // if the packet has needsWritable set we invalidate our
1100 // copy below and all other copies will be invalidates
1101 // through express snoops, and if needsWritable is not set
1102 // we already called setHasSharers above
1103 }
1104
1105 // if we are returning a writable and dirty (Modified) line,
1106 // we should be invalidating the line
1107 panic_if(!invalidate && !pkt->hasSharers(),
1108 "%s is passing a Modified line through %s, "
1109 "but keeping the block", name(), pkt->print());
1110
1111 if (is_timing) {
1112 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
1113 } else {
1114 pkt->makeAtomicResponse();
1115 // packets such as upgrades do not actually have any data
1116 // payload
1117 if (pkt->hasData())
1118 pkt->setDataFromBlock(blk->data, blkSize);
1119 }
1120
1121 // When a block is compressed, it must first be decompressed before
1122 // being read, and this increases the snoop delay.
1123 if (compressor && pkt->isRead()) {
1124 snoop_delay += compressor->getDecompressionLatency(blk);
1125 }
1126 }
1127
1128 if (!respond && is_deferred) {
1129 assert(pkt->needsResponse());
1130 delete pkt;
1131 }
1132
1133 // Do this last in case it deallocates block data or something
1134 // like that
1135 if (blk_valid && invalidate) {
1136 invalidateBlock(blk);
1137 DPRINTF(Cache, "new state is %s\n", blk->print());
1138 }
1139
1140 return snoop_delay;
1141}
1142
1143
1144void
1145Cache::recvTimingSnoopReq(PacketPtr pkt)
1146{
1147 DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print());
1148
1149 // no need to snoop requests that are not in range
1150 if (!inRange(pkt->getAddr())) {
1151 return;
1152 }
1153
1154 bool is_secure = pkt->isSecure();
1155 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
1156
1157 Addr blk_addr = pkt->getBlockAddr(blkSize);
1158 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
1159
1160 // Update the latency cost of the snoop so that the crossbar can
1161 // account for it. Do not overwrite what other neighbouring caches
1162 // have already done, rather take the maximum. The update is
1163 // tentative, for cases where we return before an upward snoop
1164 // happens below.
1165 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay,
1166 lookupLatency * clockPeriod());
1167
1168 // Inform request(Prefetch, CleanEvict or Writeback) from below of
1169 // MSHR hit, set setBlockCached.
1170 if (mshr && pkt->mustCheckAbove()) {
1171 DPRINTF(Cache, "Setting block cached for %s from lower cache on "
1172 "mshr hit\n", pkt->print());
1173 pkt->setBlockCached();
1174 return;
1175 }
1176
1177 // Let the MSHR itself track the snoop and decide whether we want
1178 // to go ahead and do the regular cache snoop
1179 if (mshr && mshr->handleSnoop(pkt, order++)) {
1180 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)."
1181 "mshrs: %s\n", blk_addr, is_secure ? "s" : "ns",
1182 mshr->print());
1183
1184 if (mshr->getNumTargets() > numTarget)
1185 warn("allocating bonus target for snoop"); //handle later
1186 return;
1187 }
1188
1189 //We also need to check the writeback buffers and handle those
1190 WriteQueueEntry *wb_entry = writeBuffer.findMatch(blk_addr, is_secure);
1191 if (wb_entry) {
1192 DPRINTF(Cache, "Snoop hit in writeback to addr %#llx (%s)\n",
1193 pkt->getAddr(), is_secure ? "s" : "ns");
1194 // Expect to see only Writebacks and/or CleanEvicts here, both of
1195 // which should not be generated for uncacheable data.
1196 assert(!wb_entry->isUncacheable());
1197 // There should only be a single request responsible for generating
1198 // Writebacks/CleanEvicts.
1199 assert(wb_entry->getNumTargets() == 1);
1200 PacketPtr wb_pkt = wb_entry->getTarget()->pkt;
1201 assert(wb_pkt->isEviction() || wb_pkt->cmd == MemCmd::WriteClean);
1202
1203 if (pkt->isEviction()) {
1204 // if the block is found in the write queue, set the BLOCK_CACHED
1205 // flag for Writeback/CleanEvict snoop. On return the snoop will
1206 // propagate the BLOCK_CACHED flag in Writeback packets and prevent
1207 // any CleanEvicts from travelling down the memory hierarchy.
1208 pkt->setBlockCached();
1209 DPRINTF(Cache, "%s: Squashing %s from lower cache on writequeue "
1210 "hit\n", __func__, pkt->print());
1211 return;
1212 }
1213
1214 // conceptually writebacks are no different to other blocks in
1215 // this cache, so the behaviour is modelled after handleSnoop,
1216 // the difference being that instead of querying the block
1217 // state to determine if it is dirty and writable, we use the
1218 // command and fields of the writeback packet
1219 bool respond = wb_pkt->cmd == MemCmd::WritebackDirty &&
1220 pkt->needsResponse();
1221 bool have_writable = !wb_pkt->hasSharers();
1222 bool invalidate = pkt->isInvalidate();
1223
1224 if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) {
1225 assert(!pkt->needsWritable());
1226 pkt->setHasSharers();
1227 wb_pkt->setHasSharers();
1228 }
1229
1230 if (respond) {
1231 pkt->setCacheResponding();
1232
1233 if (have_writable) {
1234 pkt->setResponderHadWritable();
1235 }
1236
1237 doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(),
1238 false, false);
1239 }
1240
1241 if (invalidate && wb_pkt->cmd != MemCmd::WriteClean) {
1242 // Invalidation trumps our writeback... discard here
1243 // Note: markInService will remove entry from writeback buffer.
1244 markInService(wb_entry);
1245 delete wb_pkt;
1246 }
1247 }
1248
1249 // If this was a shared writeback, there may still be
1250 // other shared copies above that require invalidation.
1251 // We could be more selective and return here if the
1252 // request is non-exclusive or if the writeback is
1253 // exclusive.
1254 uint32_t snoop_delay = handleSnoop(pkt, blk, true, false, false);
1255
1256 // Override what we did when we first saw the snoop, as we now
1257 // also have the cost of the upwards snoops to account for
1258 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, snoop_delay +
1259 lookupLatency * clockPeriod());
1260}
1261
1262Tick
1263Cache::recvAtomicSnoop(PacketPtr pkt)
1264{
1265 // no need to snoop requests that are not in range.
1266 if (!inRange(pkt->getAddr())) {
1267 return 0;
1268 }
1269
1270 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
1271 uint32_t snoop_delay = handleSnoop(pkt, blk, false, false, false);
1272 return snoop_delay + lookupLatency * clockPeriod();
1273}
1274
1275bool
1276Cache::isCachedAbove(PacketPtr pkt, bool is_timing)
1277{
1278 if (!forwardSnoops)
1279 return false;
1280 // Mirroring the flow of HardPFReqs, the cache sends CleanEvict and
1281 // Writeback snoops into upper level caches to check for copies of the
1282 // same block. Using the BLOCK_CACHED flag with the Writeback/CleanEvict
1283 // packet, the cache can inform the crossbar below of presence or absence
1284 // of the block.
1285 if (is_timing) {
1286 Packet snoop_pkt(pkt, true, false);
1287 snoop_pkt.setExpressSnoop();
1288 // Assert that packet is either Writeback or CleanEvict and not a
1289 // prefetch request because prefetch requests need an MSHR and may
1290 // generate a snoop response.
1291 assert(pkt->isEviction() || pkt->cmd == MemCmd::WriteClean);
1292 snoop_pkt.senderState = nullptr;
1293 cpuSidePort.sendTimingSnoopReq(&snoop_pkt);
1294 // Writeback/CleanEvict snoops do not generate a snoop response.
1295 assert(!(snoop_pkt.cacheResponding()));
1296 return snoop_pkt.isBlockCached();
1297 } else {
1298 cpuSidePort.sendAtomicSnoop(pkt);
1299 return pkt->isBlockCached();
1300 }
1301}
1302
1303bool
1304Cache::sendMSHRQueuePacket(MSHR* mshr)
1305{
1306 assert(mshr);
1307
1308 // use request from 1st target
1309 PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1310
1311 if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) {
1312 DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print());
1313
1314 // we should never have hardware prefetches to allocated
1315 // blocks
1316 assert(!tags->findBlock(mshr->blkAddr, mshr->isSecure));
1317
1318 // We need to check the caches above us to verify that
1319 // they don't have a copy of this block in the dirty state
1320 // at the moment. Without this check we could get a stale
1321 // copy from memory that might get used in place of the
1322 // dirty one.
1323 Packet snoop_pkt(tgt_pkt, true, false);
1324 snoop_pkt.setExpressSnoop();
1325 // We are sending this packet upwards, but if it hits we will
1326 // get a snoop response that we end up treating just like a
1327 // normal response, hence it needs the MSHR as its sender
1328 // state
1329 snoop_pkt.senderState = mshr;
1330 cpuSidePort.sendTimingSnoopReq(&snoop_pkt);
1331
1332 // Check to see if the prefetch was squashed by an upper cache (to
1333 // prevent us from grabbing the line) or if a Check to see if a
1334 // writeback arrived between the time the prefetch was placed in
1335 // the MSHRs and when it was selected to be sent or if the
1336 // prefetch was squashed by an upper cache.
1337
1338 // It is important to check cacheResponding before
1339 // prefetchSquashed. If another cache has committed to
1340 // responding, it will be sending a dirty response which will
1341 // arrive at the MSHR allocated for this request. Checking the
1342 // prefetchSquash first may result in the MSHR being
1343 // prematurely deallocated.
1344 if (snoop_pkt.cacheResponding()) {
1345 auto M5_VAR_USED r = outstandingSnoop.insert(snoop_pkt.req);
1346 assert(r.second);
1347
1348 // if we are getting a snoop response with no sharers it
1349 // will be allocated as Modified
1350 bool pending_modified_resp = !snoop_pkt.hasSharers();
1351 markInService(mshr, pending_modified_resp);
1352
1353 DPRINTF(Cache, "Upward snoop of prefetch for addr"
1354 " %#x (%s) hit\n",
1355 tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns");
1356 return false;
1357 }
1358
1359 if (snoop_pkt.isBlockCached()) {
1360 DPRINTF(Cache, "Block present, prefetch squashed by cache. "
1361 "Deallocating mshr target %#x.\n",
1362 mshr->blkAddr);
1363
1364 // Deallocate the mshr target
1365 if (mshrQueue.forceDeallocateTarget(mshr)) {
1366 // Clear block if this deallocation resulted freed an
1367 // mshr when all had previously been utilized
1368 clearBlocked(Blocked_NoMSHRs);
1369 }
1370
1371 // given that no response is expected, delete Request and Packet
1372 delete tgt_pkt;
1373
1374 return false;
1375 }
1376 }
1377
1378 return BaseCache::sendMSHRQueuePacket(mshr);
1379}
1380
1381Cache*
1382CacheParams::create()
1383{
1384 assert(tags);
1385 assert(replacement_policy);
1386
1387 return new Cache(this);
1388}