cache.cc revision 13478
1/*
2 * Copyright (c) 2010-2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 *          Dave Greene
43 *          Nathan Binkert
44 *          Steve Reinhardt
45 *          Ron Dreslinski
46 *          Andreas Sandberg
47 *          Nikos Nikoleris
48 */
49
50/**
51 * @file
52 * Cache definitions.
53 */
54
55#include "mem/cache/cache.hh"
56
57#include <cassert>
58
59#include "base/compiler.hh"
60#include "base/logging.hh"
61#include "base/trace.hh"
62#include "base/types.hh"
63#include "debug/Cache.hh"
64#include "debug/CacheTags.hh"
65#include "debug/CacheVerbose.hh"
66#include "enums/Clusivity.hh"
67#include "mem/cache/cache_blk.hh"
68#include "mem/cache/mshr.hh"
69#include "mem/cache/tags/base.hh"
70#include "mem/cache/write_queue_entry.hh"
71#include "mem/request.hh"
72#include "params/Cache.hh"
73
74Cache::Cache(const CacheParams *p)
75    : BaseCache(p, p->system->cacheLineSize()),
76      doFastWrites(true)
77{
78}
79
80void
81Cache::satisfyRequest(PacketPtr pkt, CacheBlk *blk,
82                      bool deferred_response, bool pending_downgrade)
83{
84    BaseCache::satisfyRequest(pkt, blk);
85
86    if (pkt->isRead()) {
87        // determine if this read is from a (coherent) cache or not
88        if (pkt->fromCache()) {
89            assert(pkt->getSize() == blkSize);
90            // special handling for coherent block requests from
91            // upper-level caches
92            if (pkt->needsWritable()) {
93                // sanity check
94                assert(pkt->cmd == MemCmd::ReadExReq ||
95                       pkt->cmd == MemCmd::SCUpgradeFailReq);
96                assert(!pkt->hasSharers());
97
98                // if we have a dirty copy, make sure the recipient
99                // keeps it marked dirty (in the modified state)
100                if (blk->isDirty()) {
101                    pkt->setCacheResponding();
102                    blk->status &= ~BlkDirty;
103                }
104            } else if (blk->isWritable() && !pending_downgrade &&
105                       !pkt->hasSharers() &&
106                       pkt->cmd != MemCmd::ReadCleanReq) {
107                // we can give the requester a writable copy on a read
108                // request if:
109                // - we have a writable copy at this level (& below)
110                // - we don't have a pending snoop from below
111                //   signaling another read request
112                // - no other cache above has a copy (otherwise it
113                //   would have set hasSharers flag when
114                //   snooping the packet)
115                // - the read has explicitly asked for a clean
116                //   copy of the line
117                if (blk->isDirty()) {
118                    // special considerations if we're owner:
119                    if (!deferred_response) {
120                        // respond with the line in Modified state
121                        // (cacheResponding set, hasSharers not set)
122                        pkt->setCacheResponding();
123
124                        // if this cache is mostly inclusive, we
125                        // keep the block in the Exclusive state,
126                        // and pass it upwards as Modified
127                        // (writable and dirty), hence we have
128                        // multiple caches, all on the same path
129                        // towards memory, all considering the
130                        // same block writable, but only one
131                        // considering it Modified
132
133                        // we get away with multiple caches (on
134                        // the same path to memory) considering
135                        // the block writeable as we always enter
136                        // the cache hierarchy through a cache,
137                        // and first snoop upwards in all other
138                        // branches
139                        blk->status &= ~BlkDirty;
140                    } else {
141                        // if we're responding after our own miss,
142                        // there's a window where the recipient didn't
143                        // know it was getting ownership and may not
144                        // have responded to snoops correctly, so we
145                        // have to respond with a shared line
146                        pkt->setHasSharers();
147                    }
148                }
149            } else {
150                // otherwise only respond with a shared copy
151                pkt->setHasSharers();
152            }
153        }
154    }
155}
156
157/////////////////////////////////////////////////////
158//
159// Access path: requests coming in from the CPU side
160//
161/////////////////////////////////////////////////////
162
163bool
164Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
165              PacketList &writebacks)
166{
167
168    if (pkt->req->isUncacheable()) {
169        assert(pkt->isRequest());
170
171        chatty_assert(!(isReadOnly && pkt->isWrite()),
172                      "Should never see a write in a read-only cache %s\n",
173                      name());
174
175        DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
176
177        // flush and invalidate any existing block
178        CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure()));
179        if (old_blk && old_blk->isValid()) {
180            BaseCache::evictBlock(old_blk, writebacks);
181        }
182
183        blk = nullptr;
184        // lookupLatency is the latency in case the request is uncacheable.
185        lat = lookupLatency;
186        return false;
187    }
188
189    return BaseCache::access(pkt, blk, lat, writebacks);
190}
191
192void
193Cache::doWritebacks(PacketList& writebacks, Tick forward_time)
194{
195    while (!writebacks.empty()) {
196        PacketPtr wbPkt = writebacks.front();
197        // We use forwardLatency here because we are copying writebacks to
198        // write buffer.
199
200        // Call isCachedAbove for Writebacks, CleanEvicts and
201        // WriteCleans to discover if the block is cached above.
202        if (isCachedAbove(wbPkt)) {
203            if (wbPkt->cmd == MemCmd::CleanEvict) {
204                // Delete CleanEvict because cached copies exist above. The
205                // packet destructor will delete the request object because
206                // this is a non-snoop request packet which does not require a
207                // response.
208                delete wbPkt;
209            } else if (wbPkt->cmd == MemCmd::WritebackClean) {
210                // clean writeback, do not send since the block is
211                // still cached above
212                assert(writebackClean);
213                delete wbPkt;
214            } else {
215                assert(wbPkt->cmd == MemCmd::WritebackDirty ||
216                       wbPkt->cmd == MemCmd::WriteClean);
217                // Set BLOCK_CACHED flag in Writeback and send below, so that
218                // the Writeback does not reset the bit corresponding to this
219                // address in the snoop filter below.
220                wbPkt->setBlockCached();
221                allocateWriteBuffer(wbPkt, forward_time);
222            }
223        } else {
224            // If the block is not cached above, send packet below. Both
225            // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
226            // reset the bit corresponding to this address in the snoop filter
227            // below.
228            allocateWriteBuffer(wbPkt, forward_time);
229        }
230        writebacks.pop_front();
231    }
232}
233
234void
235Cache::doWritebacksAtomic(PacketList& writebacks)
236{
237    while (!writebacks.empty()) {
238        PacketPtr wbPkt = writebacks.front();
239        // Call isCachedAbove for both Writebacks and CleanEvicts. If
240        // isCachedAbove returns true we set BLOCK_CACHED flag in Writebacks
241        // and discard CleanEvicts.
242        if (isCachedAbove(wbPkt, false)) {
243            if (wbPkt->cmd == MemCmd::WritebackDirty ||
244                wbPkt->cmd == MemCmd::WriteClean) {
245                // Set BLOCK_CACHED flag in Writeback and send below,
246                // so that the Writeback does not reset the bit
247                // corresponding to this address in the snoop filter
248                // below. We can discard CleanEvicts because cached
249                // copies exist above. Atomic mode isCachedAbove
250                // modifies packet to set BLOCK_CACHED flag
251                memSidePort.sendAtomic(wbPkt);
252            }
253        } else {
254            // If the block is not cached above, send packet below. Both
255            // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
256            // reset the bit corresponding to this address in the snoop filter
257            // below.
258            memSidePort.sendAtomic(wbPkt);
259        }
260        writebacks.pop_front();
261        // In case of CleanEvicts, the packet destructor will delete the
262        // request object because this is a non-snoop request packet which
263        // does not require a response.
264        delete wbPkt;
265    }
266}
267
268
269void
270Cache::recvTimingSnoopResp(PacketPtr pkt)
271{
272    DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
273
274    // determine if the response is from a snoop request we created
275    // (in which case it should be in the outstandingSnoop), or if we
276    // merely forwarded someone else's snoop request
277    const bool forwardAsSnoop = outstandingSnoop.find(pkt->req) ==
278        outstandingSnoop.end();
279
280    if (!forwardAsSnoop) {
281        // the packet came from this cache, so sink it here and do not
282        // forward it
283        assert(pkt->cmd == MemCmd::HardPFResp);
284
285        outstandingSnoop.erase(pkt->req);
286
287        DPRINTF(Cache, "Got prefetch response from above for addr "
288                "%#llx (%s)\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
289        recvTimingResp(pkt);
290        return;
291    }
292
293    // forwardLatency is set here because there is a response from an
294    // upper level cache.
295    // To pay the delay that occurs if the packet comes from the bus,
296    // we charge also headerDelay.
297    Tick snoop_resp_time = clockEdge(forwardLatency) + pkt->headerDelay;
298    // Reset the timing of the packet.
299    pkt->headerDelay = pkt->payloadDelay = 0;
300    memSidePort.schedTimingSnoopResp(pkt, snoop_resp_time);
301}
302
303void
304Cache::promoteWholeLineWrites(PacketPtr pkt)
305{
306    // Cache line clearing instructions
307    if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) &&
308        (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0)) {
309        pkt->cmd = MemCmd::WriteLineReq;
310        DPRINTF(Cache, "packet promoted from Write to WriteLineReq\n");
311    }
312}
313
314void
315Cache::handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time)
316{
317    // should never be satisfying an uncacheable access as we
318    // flush and invalidate any existing block as part of the
319    // lookup
320    assert(!pkt->req->isUncacheable());
321
322    BaseCache::handleTimingReqHit(pkt, blk, request_time);
323}
324
325void
326Cache::handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk, Tick forward_time,
327                           Tick request_time)
328{
329    if (pkt->req->isUncacheable()) {
330        // ignore any existing MSHR if we are dealing with an
331        // uncacheable request
332
333        // should have flushed and have no valid block
334        assert(!blk || !blk->isValid());
335
336        mshr_uncacheable[pkt->cmdToIndex()][pkt->req->masterId()]++;
337
338        if (pkt->isWrite()) {
339            allocateWriteBuffer(pkt, forward_time);
340        } else {
341            assert(pkt->isRead());
342
343            // uncacheable accesses always allocate a new MSHR
344
345            // Here we are using forward_time, modelling the latency of
346            // a miss (outbound) just as forwardLatency, neglecting the
347            // lookupLatency component.
348            allocateMissBuffer(pkt, forward_time);
349        }
350
351        return;
352    }
353
354    Addr blk_addr = pkt->getBlockAddr(blkSize);
355
356    MSHR *mshr = mshrQueue.findMatch(blk_addr, pkt->isSecure());
357
358    // Software prefetch handling:
359    // To keep the core from waiting on data it won't look at
360    // anyway, send back a response with dummy data. Miss handling
361    // will continue asynchronously. Unfortunately, the core will
362    // insist upon freeing original Packet/Request, so we have to
363    // create a new pair with a different lifecycle. Note that this
364    // processing happens before any MSHR munging on the behalf of
365    // this request because this new Request will be the one stored
366    // into the MSHRs, not the original.
367    if (pkt->cmd.isSWPrefetch()) {
368        assert(pkt->needsResponse());
369        assert(pkt->req->hasPaddr());
370        assert(!pkt->req->isUncacheable());
371
372        // There's no reason to add a prefetch as an additional target
373        // to an existing MSHR. If an outstanding request is already
374        // in progress, there is nothing for the prefetch to do.
375        // If this is the case, we don't even create a request at all.
376        PacketPtr pf = nullptr;
377
378        if (!mshr) {
379            // copy the request and create a new SoftPFReq packet
380            RequestPtr req = std::make_shared<Request>(pkt->req->getPaddr(),
381                                                       pkt->req->getSize(),
382                                                       pkt->req->getFlags(),
383                                                       pkt->req->masterId());
384            pf = new Packet(req, pkt->cmd);
385            pf->allocate();
386            assert(pf->getAddr() == pkt->getAddr());
387            assert(pf->getSize() == pkt->getSize());
388        }
389
390        pkt->makeTimingResponse();
391
392        // request_time is used here, taking into account lat and the delay
393        // charged if the packet comes from the xbar.
394        cpuSidePort.schedTimingResp(pkt, request_time, true);
395
396        // If an outstanding request is in progress (we found an
397        // MSHR) this is set to null
398        pkt = pf;
399    }
400
401    BaseCache::handleTimingReqMiss(pkt, mshr, blk, forward_time, request_time);
402}
403
404void
405Cache::recvTimingReq(PacketPtr pkt)
406{
407    DPRINTF(CacheTags, "%s tags:\n%s\n", __func__, tags->print());
408
409    promoteWholeLineWrites(pkt);
410
411    if (pkt->cacheResponding()) {
412        // a cache above us (but not where the packet came from) is
413        // responding to the request, in other words it has the line
414        // in Modified or Owned state
415        DPRINTF(Cache, "Cache above responding to %s: not responding\n",
416                pkt->print());
417
418        // if the packet needs the block to be writable, and the cache
419        // that has promised to respond (setting the cache responding
420        // flag) is not providing writable (it is in Owned rather than
421        // the Modified state), we know that there may be other Shared
422        // copies in the system; go out and invalidate them all
423        assert(pkt->needsWritable() && !pkt->responderHadWritable());
424
425        // an upstream cache that had the line in Owned state
426        // (dirty, but not writable), is responding and thus
427        // transferring the dirty line from one branch of the
428        // cache hierarchy to another
429
430        // send out an express snoop and invalidate all other
431        // copies (snooping a packet that needs writable is the
432        // same as an invalidation), thus turning the Owned line
433        // into a Modified line, note that we don't invalidate the
434        // block in the current cache or any other cache on the
435        // path to memory
436
437        // create a downstream express snoop with cleared packet
438        // flags, there is no need to allocate any data as the
439        // packet is merely used to co-ordinate state transitions
440        Packet *snoop_pkt = new Packet(pkt, true, false);
441
442        // also reset the bus time that the original packet has
443        // not yet paid for
444        snoop_pkt->headerDelay = snoop_pkt->payloadDelay = 0;
445
446        // make this an instantaneous express snoop, and let the
447        // other caches in the system know that the another cache
448        // is responding, because we have found the authorative
449        // copy (Modified or Owned) that will supply the right
450        // data
451        snoop_pkt->setExpressSnoop();
452        snoop_pkt->setCacheResponding();
453
454        // this express snoop travels towards the memory, and at
455        // every crossbar it is snooped upwards thus reaching
456        // every cache in the system
457        bool M5_VAR_USED success = memSidePort.sendTimingReq(snoop_pkt);
458        // express snoops always succeed
459        assert(success);
460
461        // main memory will delete the snoop packet
462
463        // queue for deletion, as opposed to immediate deletion, as
464        // the sending cache is still relying on the packet
465        pendingDelete.reset(pkt);
466
467        // no need to take any further action in this particular cache
468        // as an upstram cache has already committed to responding,
469        // and we have already sent out any express snoops in the
470        // section above to ensure all other copies in the system are
471        // invalidated
472        return;
473    }
474
475    BaseCache::recvTimingReq(pkt);
476}
477
478PacketPtr
479Cache::createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk,
480                        bool needsWritable,
481                        bool is_whole_line_write) const
482{
483    // should never see evictions here
484    assert(!cpu_pkt->isEviction());
485
486    bool blkValid = blk && blk->isValid();
487
488    if (cpu_pkt->req->isUncacheable() ||
489        (!blkValid && cpu_pkt->isUpgrade()) ||
490        cpu_pkt->cmd == MemCmd::InvalidateReq || cpu_pkt->isClean()) {
491        // uncacheable requests and upgrades from upper-level caches
492        // that missed completely just go through as is
493        return nullptr;
494    }
495
496    assert(cpu_pkt->needsResponse());
497
498    MemCmd cmd;
499    // @TODO make useUpgrades a parameter.
500    // Note that ownership protocols require upgrade, otherwise a
501    // write miss on a shared owned block will generate a ReadExcl,
502    // which will clobber the owned copy.
503    const bool useUpgrades = true;
504    assert(cpu_pkt->cmd != MemCmd::WriteLineReq || is_whole_line_write);
505    if (is_whole_line_write) {
506        assert(!blkValid || !blk->isWritable());
507        // forward as invalidate to all other caches, this gives us
508        // the line in Exclusive state, and invalidates all other
509        // copies
510        cmd = MemCmd::InvalidateReq;
511    } else if (blkValid && useUpgrades) {
512        // only reason to be here is that blk is read only and we need
513        // it to be writable
514        assert(needsWritable);
515        assert(!blk->isWritable());
516        cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
517    } else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq ||
518               cpu_pkt->cmd == MemCmd::StoreCondFailReq) {
519        // Even though this SC will fail, we still need to send out the
520        // request and get the data to supply it to other snoopers in the case
521        // where the determination the StoreCond fails is delayed due to
522        // all caches not being on the same local bus.
523        cmd = MemCmd::SCUpgradeFailReq;
524    } else {
525        // block is invalid
526
527        // If the request does not need a writable there are two cases
528        // where we need to ensure the response will not fetch the
529        // block in dirty state:
530        // * this cache is read only and it does not perform
531        //   writebacks,
532        // * this cache is mostly exclusive and will not fill (since
533        //   it does not fill it will have to writeback the dirty data
534        //   immediately which generates uneccesary writebacks).
535        bool force_clean_rsp = isReadOnly || clusivity == Enums::mostly_excl;
536        cmd = needsWritable ? MemCmd::ReadExReq :
537            (force_clean_rsp ? MemCmd::ReadCleanReq : MemCmd::ReadSharedReq);
538    }
539    PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize);
540
541    // if there are upstream caches that have already marked the
542    // packet as having sharers (not passing writable), pass that info
543    // downstream
544    if (cpu_pkt->hasSharers() && !needsWritable) {
545        // note that cpu_pkt may have spent a considerable time in the
546        // MSHR queue and that the information could possibly be out
547        // of date, however, there is no harm in conservatively
548        // assuming the block has sharers
549        pkt->setHasSharers();
550        DPRINTF(Cache, "%s: passing hasSharers from %s to %s\n",
551                __func__, cpu_pkt->print(), pkt->print());
552    }
553
554    // the packet should be block aligned
555    assert(pkt->getAddr() == pkt->getBlockAddr(blkSize));
556
557    pkt->allocate();
558    DPRINTF(Cache, "%s: created %s from %s\n", __func__, pkt->print(),
559            cpu_pkt->print());
560    return pkt;
561}
562
563
564Cycles
565Cache::handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk,
566                           PacketList &writebacks)
567{
568    // deal with the packets that go through the write path of
569    // the cache, i.e. any evictions and writes
570    if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean ||
571        (pkt->req->isUncacheable() && pkt->isWrite())) {
572        Cycles latency = ticksToCycles(memSidePort.sendAtomic(pkt));
573
574        // at this point, if the request was an uncacheable write
575        // request, it has been satisfied by a memory below and the
576        // packet carries the response back
577        assert(!(pkt->req->isUncacheable() && pkt->isWrite()) ||
578               pkt->isResponse());
579
580        return latency;
581    }
582
583    // only misses left
584
585    PacketPtr bus_pkt = createMissPacket(pkt, blk, pkt->needsWritable(),
586                                         pkt->isWholeLineWrite(blkSize));
587
588    bool is_forward = (bus_pkt == nullptr);
589
590    if (is_forward) {
591        // just forwarding the same request to the next level
592        // no local cache operation involved
593        bus_pkt = pkt;
594    }
595
596    DPRINTF(Cache, "%s: Sending an atomic %s\n", __func__,
597            bus_pkt->print());
598
599#if TRACING_ON
600    CacheBlk::State old_state = blk ? blk->status : 0;
601#endif
602
603    Cycles latency = ticksToCycles(memSidePort.sendAtomic(bus_pkt));
604
605    bool is_invalidate = bus_pkt->isInvalidate();
606
607    // We are now dealing with the response handling
608    DPRINTF(Cache, "%s: Receive response: %s in state %i\n", __func__,
609            bus_pkt->print(), old_state);
610
611    // If packet was a forward, the response (if any) is already
612    // in place in the bus_pkt == pkt structure, so we don't need
613    // to do anything.  Otherwise, use the separate bus_pkt to
614    // generate response to pkt and then delete it.
615    if (!is_forward) {
616        if (pkt->needsResponse()) {
617            assert(bus_pkt->isResponse());
618            if (bus_pkt->isError()) {
619                pkt->makeAtomicResponse();
620                pkt->copyError(bus_pkt);
621            } else if (pkt->isWholeLineWrite(blkSize)) {
622                // note the use of pkt, not bus_pkt here.
623
624                // write-line request to the cache that promoted
625                // the write to a whole line
626                const bool allocate = allocOnFill(pkt->cmd) &&
627                    (!writeAllocator || writeAllocator->allocate());
628                blk = handleFill(bus_pkt, blk, writebacks, allocate);
629                assert(blk != NULL);
630                is_invalidate = false;
631                satisfyRequest(pkt, blk);
632            } else if (bus_pkt->isRead() ||
633                       bus_pkt->cmd == MemCmd::UpgradeResp) {
634                // we're updating cache state to allow us to
635                // satisfy the upstream request from the cache
636                blk = handleFill(bus_pkt, blk, writebacks,
637                                 allocOnFill(pkt->cmd));
638                satisfyRequest(pkt, blk);
639                maintainClusivity(pkt->fromCache(), blk);
640            } else {
641                // we're satisfying the upstream request without
642                // modifying cache state, e.g., a write-through
643                pkt->makeAtomicResponse();
644            }
645        }
646        delete bus_pkt;
647    }
648
649    if (is_invalidate && blk && blk->isValid()) {
650        invalidateBlock(blk);
651    }
652
653    return latency;
654}
655
656Tick
657Cache::recvAtomic(PacketPtr pkt)
658{
659    promoteWholeLineWrites(pkt);
660
661    // follow the same flow as in recvTimingReq, and check if a cache
662    // above us is responding
663    if (pkt->cacheResponding()) {
664        assert(!pkt->req->isCacheInvalidate());
665        DPRINTF(Cache, "Cache above responding to %s: not responding\n",
666                pkt->print());
667
668        // if a cache is responding, and it had the line in Owned
669        // rather than Modified state, we need to invalidate any
670        // copies that are not on the same path to memory
671        assert(pkt->needsWritable() && !pkt->responderHadWritable());
672
673        return memSidePort.sendAtomic(pkt);
674    }
675
676    return BaseCache::recvAtomic(pkt);
677}
678
679
680/////////////////////////////////////////////////////
681//
682// Response handling: responses from the memory side
683//
684/////////////////////////////////////////////////////
685
686
687void
688Cache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk)
689{
690    MSHR::Target *initial_tgt = mshr->getTarget();
691    // First offset for critical word first calculations
692    const int initial_offset = initial_tgt->pkt->getOffset(blkSize);
693
694    const bool is_error = pkt->isError();
695    // allow invalidation responses originating from write-line
696    // requests to be discarded
697    bool is_invalidate = pkt->isInvalidate() &&
698        !mshr->wasWholeLineWrite;
699
700    MSHR::TargetList targets = mshr->extractServiceableTargets(pkt);
701    for (auto &target: targets) {
702        Packet *tgt_pkt = target.pkt;
703        switch (target.source) {
704          case MSHR::Target::FromCPU:
705            Tick completion_time;
706            // Here we charge on completion_time the delay of the xbar if the
707            // packet comes from it, charged on headerDelay.
708            completion_time = pkt->headerDelay;
709
710            // Software prefetch handling for cache closest to core
711            if (tgt_pkt->cmd.isSWPrefetch()) {
712                // a software prefetch would have already been ack'd
713                // immediately with dummy data so the core would be able to
714                // retire it. This request completes right here, so we
715                // deallocate it.
716                delete tgt_pkt;
717                break; // skip response
718            }
719
720            // unlike the other packet flows, where data is found in other
721            // caches or memory and brought back, write-line requests always
722            // have the data right away, so the above check for "is fill?"
723            // cannot actually be determined until examining the stored MSHR
724            // state. We "catch up" with that logic here, which is duplicated
725            // from above.
726            if (tgt_pkt->cmd == MemCmd::WriteLineReq) {
727                assert(!is_error);
728                assert(blk);
729                assert(blk->isWritable());
730            }
731
732            if (blk && blk->isValid() && !mshr->isForward) {
733                satisfyRequest(tgt_pkt, blk, true, mshr->hasPostDowngrade());
734
735                // How many bytes past the first request is this one
736                int transfer_offset =
737                    tgt_pkt->getOffset(blkSize) - initial_offset;
738                if (transfer_offset < 0) {
739                    transfer_offset += blkSize;
740                }
741
742                // If not critical word (offset) return payloadDelay.
743                // responseLatency is the latency of the return path
744                // from lower level caches/memory to an upper level cache or
745                // the core.
746                completion_time += clockEdge(responseLatency) +
747                    (transfer_offset ? pkt->payloadDelay : 0);
748
749                assert(!tgt_pkt->req->isUncacheable());
750
751                assert(tgt_pkt->req->masterId() < system->maxMasters());
752                missLatency[tgt_pkt->cmdToIndex()][tgt_pkt->req->masterId()] +=
753                    completion_time - target.recvTime;
754            } else if (pkt->cmd == MemCmd::UpgradeFailResp) {
755                // failed StoreCond upgrade
756                assert(tgt_pkt->cmd == MemCmd::StoreCondReq ||
757                       tgt_pkt->cmd == MemCmd::StoreCondFailReq ||
758                       tgt_pkt->cmd == MemCmd::SCUpgradeFailReq);
759                // responseLatency is the latency of the return path
760                // from lower level caches/memory to an upper level cache or
761                // the core.
762                completion_time += clockEdge(responseLatency) +
763                    pkt->payloadDelay;
764                tgt_pkt->req->setExtraData(0);
765            } else {
766                // We are about to send a response to a cache above
767                // that asked for an invalidation; we need to
768                // invalidate our copy immediately as the most
769                // up-to-date copy of the block will now be in the
770                // cache above. It will also prevent this cache from
771                // responding (if the block was previously dirty) to
772                // snoops as they should snoop the caches above where
773                // they will get the response from.
774                if (is_invalidate && blk && blk->isValid()) {
775                    invalidateBlock(blk);
776                }
777                // not a cache fill, just forwarding response
778                // responseLatency is the latency of the return path
779                // from lower level cahces/memory to the core.
780                completion_time += clockEdge(responseLatency) +
781                    pkt->payloadDelay;
782                if (pkt->isRead() && !is_error) {
783                    // sanity check
784                    assert(pkt->getAddr() == tgt_pkt->getAddr());
785                    assert(pkt->getSize() >= tgt_pkt->getSize());
786
787                    tgt_pkt->setData(pkt->getConstPtr<uint8_t>());
788                }
789            }
790            tgt_pkt->makeTimingResponse();
791            // if this packet is an error copy that to the new packet
792            if (is_error)
793                tgt_pkt->copyError(pkt);
794            if (tgt_pkt->cmd == MemCmd::ReadResp &&
795                (is_invalidate || mshr->hasPostInvalidate())) {
796                // If intermediate cache got ReadRespWithInvalidate,
797                // propagate that.  Response should not have
798                // isInvalidate() set otherwise.
799                tgt_pkt->cmd = MemCmd::ReadRespWithInvalidate;
800                DPRINTF(Cache, "%s: updated cmd to %s\n", __func__,
801                        tgt_pkt->print());
802            }
803            // Reset the bus additional time as it is now accounted for
804            tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0;
805            cpuSidePort.schedTimingResp(tgt_pkt, completion_time, true);
806            break;
807
808          case MSHR::Target::FromPrefetcher:
809            assert(tgt_pkt->cmd == MemCmd::HardPFReq);
810            if (blk)
811                blk->status |= BlkHWPrefetched;
812            delete tgt_pkt;
813            break;
814
815          case MSHR::Target::FromSnoop:
816            // I don't believe that a snoop can be in an error state
817            assert(!is_error);
818            // response to snoop request
819            DPRINTF(Cache, "processing deferred snoop...\n");
820            // If the response is invalidating, a snooping target can
821            // be satisfied if it is also invalidating. If the reponse is, not
822            // only invalidating, but more specifically an InvalidateResp and
823            // the MSHR was created due to an InvalidateReq then a cache above
824            // is waiting to satisfy a WriteLineReq. In this case even an
825            // non-invalidating snoop is added as a target here since this is
826            // the ordering point. When the InvalidateResp reaches this cache,
827            // the snooping target will snoop further the cache above with the
828            // WriteLineReq.
829            assert(!is_invalidate || pkt->cmd == MemCmd::InvalidateResp ||
830                   pkt->req->isCacheMaintenance() ||
831                   mshr->hasPostInvalidate());
832            handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate());
833            break;
834
835          default:
836            panic("Illegal target->source enum %d\n", target.source);
837        }
838    }
839
840    maintainClusivity(targets.hasFromCache, blk);
841
842    if (blk && blk->isValid()) {
843        // an invalidate response stemming from a write line request
844        // should not invalidate the block, so check if the
845        // invalidation should be discarded
846        if (is_invalidate || mshr->hasPostInvalidate()) {
847            invalidateBlock(blk);
848        } else if (mshr->hasPostDowngrade()) {
849            blk->status &= ~BlkWritable;
850        }
851    }
852}
853
854PacketPtr
855Cache::evictBlock(CacheBlk *blk)
856{
857    PacketPtr pkt = (blk->isDirty() || writebackClean) ?
858        writebackBlk(blk) : cleanEvictBlk(blk);
859
860    invalidateBlock(blk);
861
862    return pkt;
863}
864
865PacketPtr
866Cache::cleanEvictBlk(CacheBlk *blk)
867{
868    assert(!writebackClean);
869    assert(blk && blk->isValid() && !blk->isDirty());
870
871    // Creating a zero sized write, a message to the snoop filter
872    RequestPtr req = std::make_shared<Request>(
873        regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
874
875    if (blk->isSecure())
876        req->setFlags(Request::SECURE);
877
878    req->taskId(blk->task_id);
879
880    PacketPtr pkt = new Packet(req, MemCmd::CleanEvict);
881    pkt->allocate();
882    DPRINTF(Cache, "Create CleanEvict %s\n", pkt->print());
883
884    return pkt;
885}
886
887/////////////////////////////////////////////////////
888//
889// Snoop path: requests coming in from the memory side
890//
891/////////////////////////////////////////////////////
892
893void
894Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
895                              bool already_copied, bool pending_inval)
896{
897    // sanity check
898    assert(req_pkt->isRequest());
899    assert(req_pkt->needsResponse());
900
901    DPRINTF(Cache, "%s: for %s\n", __func__, req_pkt->print());
902    // timing-mode snoop responses require a new packet, unless we
903    // already made a copy...
904    PacketPtr pkt = req_pkt;
905    if (!already_copied)
906        // do not clear flags, and allocate space for data if the
907        // packet needs it (the only packets that carry data are read
908        // responses)
909        pkt = new Packet(req_pkt, false, req_pkt->isRead());
910
911    assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() ||
912           pkt->hasSharers());
913    pkt->makeTimingResponse();
914    if (pkt->isRead()) {
915        pkt->setDataFromBlock(blk_data, blkSize);
916    }
917    if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
918        // Assume we defer a response to a read from a far-away cache
919        // A, then later defer a ReadExcl from a cache B on the same
920        // bus as us. We'll assert cacheResponding in both cases, but
921        // in the latter case cacheResponding will keep the
922        // invalidation from reaching cache A. This special response
923        // tells cache A that it gets the block to satisfy its read,
924        // but must immediately invalidate it.
925        pkt->cmd = MemCmd::ReadRespWithInvalidate;
926    }
927    // Here we consider forward_time, paying for just forward latency and
928    // also charging the delay provided by the xbar.
929    // forward_time is used as send_time in next allocateWriteBuffer().
930    Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
931    // Here we reset the timing of the packet.
932    pkt->headerDelay = pkt->payloadDelay = 0;
933    DPRINTF(CacheVerbose, "%s: created response: %s tick: %lu\n", __func__,
934            pkt->print(), forward_time);
935    memSidePort.schedTimingSnoopResp(pkt, forward_time, true);
936}
937
938uint32_t
939Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
940                   bool is_deferred, bool pending_inval)
941{
942    DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print());
943    // deferred snoops can only happen in timing mode
944    assert(!(is_deferred && !is_timing));
945    // pending_inval only makes sense on deferred snoops
946    assert(!(pending_inval && !is_deferred));
947    assert(pkt->isRequest());
948
949    // the packet may get modified if we or a forwarded snooper
950    // responds in atomic mode, so remember a few things about the
951    // original packet up front
952    bool invalidate = pkt->isInvalidate();
953    bool M5_VAR_USED needs_writable = pkt->needsWritable();
954
955    // at the moment we could get an uncacheable write which does not
956    // have the invalidate flag, and we need a suitable way of dealing
957    // with this case
958    panic_if(invalidate && pkt->req->isUncacheable(),
959             "%s got an invalidating uncacheable snoop request %s",
960             name(), pkt->print());
961
962    uint32_t snoop_delay = 0;
963
964    if (forwardSnoops) {
965        // first propagate snoop upward to see if anyone above us wants to
966        // handle it.  save & restore packet src since it will get
967        // rewritten to be relative to cpu-side bus (if any)
968        bool alreadyResponded = pkt->cacheResponding();
969        if (is_timing) {
970            // copy the packet so that we can clear any flags before
971            // forwarding it upwards, we also allocate data (passing
972            // the pointer along in case of static data), in case
973            // there is a snoop hit in upper levels
974            Packet snoopPkt(pkt, true, true);
975            snoopPkt.setExpressSnoop();
976            // the snoop packet does not need to wait any additional
977            // time
978            snoopPkt.headerDelay = snoopPkt.payloadDelay = 0;
979            cpuSidePort.sendTimingSnoopReq(&snoopPkt);
980
981            // add the header delay (including crossbar and snoop
982            // delays) of the upward snoop to the snoop delay for this
983            // cache
984            snoop_delay += snoopPkt.headerDelay;
985
986            if (snoopPkt.cacheResponding()) {
987                // cache-to-cache response from some upper cache
988                assert(!alreadyResponded);
989                pkt->setCacheResponding();
990            }
991            // upstream cache has the block, or has an outstanding
992            // MSHR, pass the flag on
993            if (snoopPkt.hasSharers()) {
994                pkt->setHasSharers();
995            }
996            // If this request is a prefetch or clean evict and an upper level
997            // signals block present, make sure to propagate the block
998            // presence to the requester.
999            if (snoopPkt.isBlockCached()) {
1000                pkt->setBlockCached();
1001            }
1002            // If the request was satisfied by snooping the cache
1003            // above, mark the original packet as satisfied too.
1004            if (snoopPkt.satisfied()) {
1005                pkt->setSatisfied();
1006            }
1007        } else {
1008            cpuSidePort.sendAtomicSnoop(pkt);
1009            if (!alreadyResponded && pkt->cacheResponding()) {
1010                // cache-to-cache response from some upper cache:
1011                // forward response to original requester
1012                assert(pkt->isResponse());
1013            }
1014        }
1015    }
1016
1017    bool respond = false;
1018    bool blk_valid = blk && blk->isValid();
1019    if (pkt->isClean()) {
1020        if (blk_valid && blk->isDirty()) {
1021            DPRINTF(CacheVerbose, "%s: packet (snoop) %s found block: %s\n",
1022                    __func__, pkt->print(), blk->print());
1023            PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id);
1024            PacketList writebacks;
1025            writebacks.push_back(wb_pkt);
1026
1027            if (is_timing) {
1028                // anything that is merely forwarded pays for the forward
1029                // latency and the delay provided by the crossbar
1030                Tick forward_time = clockEdge(forwardLatency) +
1031                    pkt->headerDelay;
1032                doWritebacks(writebacks, forward_time);
1033            } else {
1034                doWritebacksAtomic(writebacks);
1035            }
1036            pkt->setSatisfied();
1037        }
1038    } else if (!blk_valid) {
1039        DPRINTF(CacheVerbose, "%s: snoop miss for %s\n", __func__,
1040                pkt->print());
1041        if (is_deferred) {
1042            // we no longer have the block, and will not respond, but a
1043            // packet was allocated in MSHR::handleSnoop and we have
1044            // to delete it
1045            assert(pkt->needsResponse());
1046
1047            // we have passed the block to a cache upstream, that
1048            // cache should be responding
1049            assert(pkt->cacheResponding());
1050
1051            delete pkt;
1052        }
1053        return snoop_delay;
1054    } else {
1055        DPRINTF(Cache, "%s: snoop hit for %s, old state is %s\n", __func__,
1056                pkt->print(), blk->print());
1057
1058        // We may end up modifying both the block state and the packet (if
1059        // we respond in atomic mode), so just figure out what to do now
1060        // and then do it later. We respond to all snoops that need
1061        // responses provided we have the block in dirty state. The
1062        // invalidation itself is taken care of below. We don't respond to
1063        // cache maintenance operations as this is done by the destination
1064        // xbar.
1065        respond = blk->isDirty() && pkt->needsResponse();
1066
1067        chatty_assert(!(isReadOnly && blk->isDirty()), "Should never have "
1068                      "a dirty block in a read-only cache %s\n", name());
1069    }
1070
1071    // Invalidate any prefetch's from below that would strip write permissions
1072    // MemCmd::HardPFReq is only observed by upstream caches.  After missing
1073    // above and in it's own cache, a new MemCmd::ReadReq is created that
1074    // downstream caches observe.
1075    if (pkt->mustCheckAbove()) {
1076        DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s "
1077                "from lower cache\n", pkt->getAddr(), pkt->print());
1078        pkt->setBlockCached();
1079        return snoop_delay;
1080    }
1081
1082    if (pkt->isRead() && !invalidate) {
1083        // reading without requiring the line in a writable state
1084        assert(!needs_writable);
1085        pkt->setHasSharers();
1086
1087        // if the requesting packet is uncacheable, retain the line in
1088        // the current state, otherwhise unset the writable flag,
1089        // which means we go from Modified to Owned (and will respond
1090        // below), remain in Owned (and will respond below), from
1091        // Exclusive to Shared, or remain in Shared
1092        if (!pkt->req->isUncacheable())
1093            blk->status &= ~BlkWritable;
1094        DPRINTF(Cache, "new state is %s\n", blk->print());
1095    }
1096
1097    if (respond) {
1098        // prevent anyone else from responding, cache as well as
1099        // memory, and also prevent any memory from even seeing the
1100        // request
1101        pkt->setCacheResponding();
1102        if (!pkt->isClean() && blk->isWritable()) {
1103            // inform the cache hierarchy that this cache had the line
1104            // in the Modified state so that we avoid unnecessary
1105            // invalidations (see Packet::setResponderHadWritable)
1106            pkt->setResponderHadWritable();
1107
1108            // in the case of an uncacheable request there is no point
1109            // in setting the responderHadWritable flag, but since the
1110            // recipient does not care there is no harm in doing so
1111        } else {
1112            // if the packet has needsWritable set we invalidate our
1113            // copy below and all other copies will be invalidates
1114            // through express snoops, and if needsWritable is not set
1115            // we already called setHasSharers above
1116        }
1117
1118        // if we are returning a writable and dirty (Modified) line,
1119        // we should be invalidating the line
1120        panic_if(!invalidate && !pkt->hasSharers(),
1121                 "%s is passing a Modified line through %s, "
1122                 "but keeping the block", name(), pkt->print());
1123
1124        if (is_timing) {
1125            doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
1126        } else {
1127            pkt->makeAtomicResponse();
1128            // packets such as upgrades do not actually have any data
1129            // payload
1130            if (pkt->hasData())
1131                pkt->setDataFromBlock(blk->data, blkSize);
1132        }
1133    }
1134
1135    if (!respond && is_deferred) {
1136        assert(pkt->needsResponse());
1137        delete pkt;
1138    }
1139
1140    // Do this last in case it deallocates block data or something
1141    // like that
1142    if (blk_valid && invalidate) {
1143        invalidateBlock(blk);
1144        DPRINTF(Cache, "new state is %s\n", blk->print());
1145    }
1146
1147    return snoop_delay;
1148}
1149
1150
1151void
1152Cache::recvTimingSnoopReq(PacketPtr pkt)
1153{
1154    DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print());
1155
1156    // no need to snoop requests that are not in range
1157    if (!inRange(pkt->getAddr())) {
1158        return;
1159    }
1160
1161    bool is_secure = pkt->isSecure();
1162    CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
1163
1164    Addr blk_addr = pkt->getBlockAddr(blkSize);
1165    MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
1166
1167    // Update the latency cost of the snoop so that the crossbar can
1168    // account for it. Do not overwrite what other neighbouring caches
1169    // have already done, rather take the maximum. The update is
1170    // tentative, for cases where we return before an upward snoop
1171    // happens below.
1172    pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay,
1173                                         lookupLatency * clockPeriod());
1174
1175    // Inform request(Prefetch, CleanEvict or Writeback) from below of
1176    // MSHR hit, set setBlockCached.
1177    if (mshr && pkt->mustCheckAbove()) {
1178        DPRINTF(Cache, "Setting block cached for %s from lower cache on "
1179                "mshr hit\n", pkt->print());
1180        pkt->setBlockCached();
1181        return;
1182    }
1183
1184    // Bypass any existing cache maintenance requests if the request
1185    // has been satisfied already (i.e., the dirty block has been
1186    // found).
1187    if (mshr && pkt->req->isCacheMaintenance() && pkt->satisfied()) {
1188        return;
1189    }
1190
1191    // Let the MSHR itself track the snoop and decide whether we want
1192    // to go ahead and do the regular cache snoop
1193    if (mshr && mshr->handleSnoop(pkt, order++)) {
1194        DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)."
1195                "mshrs: %s\n", blk_addr, is_secure ? "s" : "ns",
1196                mshr->print());
1197
1198        if (mshr->getNumTargets() > numTarget)
1199            warn("allocating bonus target for snoop"); //handle later
1200        return;
1201    }
1202
1203    //We also need to check the writeback buffers and handle those
1204    WriteQueueEntry *wb_entry = writeBuffer.findMatch(blk_addr, is_secure);
1205    if (wb_entry) {
1206        DPRINTF(Cache, "Snoop hit in writeback to addr %#llx (%s)\n",
1207                pkt->getAddr(), is_secure ? "s" : "ns");
1208        // Expect to see only Writebacks and/or CleanEvicts here, both of
1209        // which should not be generated for uncacheable data.
1210        assert(!wb_entry->isUncacheable());
1211        // There should only be a single request responsible for generating
1212        // Writebacks/CleanEvicts.
1213        assert(wb_entry->getNumTargets() == 1);
1214        PacketPtr wb_pkt = wb_entry->getTarget()->pkt;
1215        assert(wb_pkt->isEviction() || wb_pkt->cmd == MemCmd::WriteClean);
1216
1217        if (pkt->isEviction()) {
1218            // if the block is found in the write queue, set the BLOCK_CACHED
1219            // flag for Writeback/CleanEvict snoop. On return the snoop will
1220            // propagate the BLOCK_CACHED flag in Writeback packets and prevent
1221            // any CleanEvicts from travelling down the memory hierarchy.
1222            pkt->setBlockCached();
1223            DPRINTF(Cache, "%s: Squashing %s from lower cache on writequeue "
1224                    "hit\n", __func__, pkt->print());
1225            return;
1226        }
1227
1228        // conceptually writebacks are no different to other blocks in
1229        // this cache, so the behaviour is modelled after handleSnoop,
1230        // the difference being that instead of querying the block
1231        // state to determine if it is dirty and writable, we use the
1232        // command and fields of the writeback packet
1233        bool respond = wb_pkt->cmd == MemCmd::WritebackDirty &&
1234            pkt->needsResponse();
1235        bool have_writable = !wb_pkt->hasSharers();
1236        bool invalidate = pkt->isInvalidate();
1237
1238        if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) {
1239            assert(!pkt->needsWritable());
1240            pkt->setHasSharers();
1241            wb_pkt->setHasSharers();
1242        }
1243
1244        if (respond) {
1245            pkt->setCacheResponding();
1246
1247            if (have_writable) {
1248                pkt->setResponderHadWritable();
1249            }
1250
1251            doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(),
1252                                   false, false);
1253        }
1254
1255        if (invalidate && wb_pkt->cmd != MemCmd::WriteClean) {
1256            // Invalidation trumps our writeback... discard here
1257            // Note: markInService will remove entry from writeback buffer.
1258            markInService(wb_entry);
1259            delete wb_pkt;
1260        }
1261    }
1262
1263    // If this was a shared writeback, there may still be
1264    // other shared copies above that require invalidation.
1265    // We could be more selective and return here if the
1266    // request is non-exclusive or if the writeback is
1267    // exclusive.
1268    uint32_t snoop_delay = handleSnoop(pkt, blk, true, false, false);
1269
1270    // Override what we did when we first saw the snoop, as we now
1271    // also have the cost of the upwards snoops to account for
1272    pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, snoop_delay +
1273                                         lookupLatency * clockPeriod());
1274}
1275
1276Tick
1277Cache::recvAtomicSnoop(PacketPtr pkt)
1278{
1279    // no need to snoop requests that are not in range.
1280    if (!inRange(pkt->getAddr())) {
1281        return 0;
1282    }
1283
1284    CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
1285    uint32_t snoop_delay = handleSnoop(pkt, blk, false, false, false);
1286    return snoop_delay + lookupLatency * clockPeriod();
1287}
1288
1289bool
1290Cache::isCachedAbove(PacketPtr pkt, bool is_timing)
1291{
1292    if (!forwardSnoops)
1293        return false;
1294    // Mirroring the flow of HardPFReqs, the cache sends CleanEvict and
1295    // Writeback snoops into upper level caches to check for copies of the
1296    // same block. Using the BLOCK_CACHED flag with the Writeback/CleanEvict
1297    // packet, the cache can inform the crossbar below of presence or absence
1298    // of the block.
1299    if (is_timing) {
1300        Packet snoop_pkt(pkt, true, false);
1301        snoop_pkt.setExpressSnoop();
1302        // Assert that packet is either Writeback or CleanEvict and not a
1303        // prefetch request because prefetch requests need an MSHR and may
1304        // generate a snoop response.
1305        assert(pkt->isEviction() || pkt->cmd == MemCmd::WriteClean);
1306        snoop_pkt.senderState = nullptr;
1307        cpuSidePort.sendTimingSnoopReq(&snoop_pkt);
1308        // Writeback/CleanEvict snoops do not generate a snoop response.
1309        assert(!(snoop_pkt.cacheResponding()));
1310        return snoop_pkt.isBlockCached();
1311    } else {
1312        cpuSidePort.sendAtomicSnoop(pkt);
1313        return pkt->isBlockCached();
1314    }
1315}
1316
1317bool
1318Cache::sendMSHRQueuePacket(MSHR* mshr)
1319{
1320    assert(mshr);
1321
1322    // use request from 1st target
1323    PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1324
1325    if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) {
1326        DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print());
1327
1328        // we should never have hardware prefetches to allocated
1329        // blocks
1330        assert(!tags->findBlock(mshr->blkAddr, mshr->isSecure));
1331
1332        // We need to check the caches above us to verify that
1333        // they don't have a copy of this block in the dirty state
1334        // at the moment. Without this check we could get a stale
1335        // copy from memory that might get used in place of the
1336        // dirty one.
1337        Packet snoop_pkt(tgt_pkt, true, false);
1338        snoop_pkt.setExpressSnoop();
1339        // We are sending this packet upwards, but if it hits we will
1340        // get a snoop response that we end up treating just like a
1341        // normal response, hence it needs the MSHR as its sender
1342        // state
1343        snoop_pkt.senderState = mshr;
1344        cpuSidePort.sendTimingSnoopReq(&snoop_pkt);
1345
1346        // Check to see if the prefetch was squashed by an upper cache (to
1347        // prevent us from grabbing the line) or if a Check to see if a
1348        // writeback arrived between the time the prefetch was placed in
1349        // the MSHRs and when it was selected to be sent or if the
1350        // prefetch was squashed by an upper cache.
1351
1352        // It is important to check cacheResponding before
1353        // prefetchSquashed. If another cache has committed to
1354        // responding, it will be sending a dirty response which will
1355        // arrive at the MSHR allocated for this request. Checking the
1356        // prefetchSquash first may result in the MSHR being
1357        // prematurely deallocated.
1358        if (snoop_pkt.cacheResponding()) {
1359            auto M5_VAR_USED r = outstandingSnoop.insert(snoop_pkt.req);
1360            assert(r.second);
1361
1362            // if we are getting a snoop response with no sharers it
1363            // will be allocated as Modified
1364            bool pending_modified_resp = !snoop_pkt.hasSharers();
1365            markInService(mshr, pending_modified_resp);
1366
1367            DPRINTF(Cache, "Upward snoop of prefetch for addr"
1368                    " %#x (%s) hit\n",
1369                    tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns");
1370            return false;
1371        }
1372
1373        if (snoop_pkt.isBlockCached()) {
1374            DPRINTF(Cache, "Block present, prefetch squashed by cache.  "
1375                    "Deallocating mshr target %#x.\n",
1376                    mshr->blkAddr);
1377
1378            // Deallocate the mshr target
1379            if (mshrQueue.forceDeallocateTarget(mshr)) {
1380                // Clear block if this deallocation resulted freed an
1381                // mshr when all had previously been utilized
1382                clearBlocked(Blocked_NoMSHRs);
1383            }
1384
1385            // given that no response is expected, delete Request and Packet
1386            delete tgt_pkt;
1387
1388            return false;
1389        }
1390    }
1391
1392    return BaseCache::sendMSHRQueuePacket(mshr);
1393}
1394
1395Cache*
1396CacheParams::create()
1397{
1398    assert(tags);
1399    assert(replacement_policy);
1400
1401    return new Cache(this);
1402}
1403