cache.cc revision 13954:2f400a5f2627
1/*
2 * Copyright (c) 2010-2019 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 *          Dave Greene
43 *          Nathan Binkert
44 *          Steve Reinhardt
45 *          Ron Dreslinski
46 *          Andreas Sandberg
47 *          Nikos Nikoleris
48 */
49
50/**
51 * @file
52 * Cache definitions.
53 */
54
55#include "mem/cache/cache.hh"
56
57#include <cassert>
58
59#include "base/compiler.hh"
60#include "base/logging.hh"
61#include "base/trace.hh"
62#include "base/types.hh"
63#include "debug/Cache.hh"
64#include "debug/CacheTags.hh"
65#include "debug/CacheVerbose.hh"
66#include "enums/Clusivity.hh"
67#include "mem/cache/cache_blk.hh"
68#include "mem/cache/mshr.hh"
69#include "mem/cache/tags/base.hh"
70#include "mem/cache/write_queue_entry.hh"
71#include "mem/request.hh"
72#include "params/Cache.hh"
73
74Cache::Cache(const CacheParams *p)
75    : BaseCache(p, p->system->cacheLineSize()),
76      doFastWrites(true)
77{
78}
79
80void
81Cache::satisfyRequest(PacketPtr pkt, CacheBlk *blk,
82                      bool deferred_response, bool pending_downgrade)
83{
84    BaseCache::satisfyRequest(pkt, blk);
85
86    if (pkt->isRead()) {
87        // determine if this read is from a (coherent) cache or not
88        if (pkt->fromCache()) {
89            assert(pkt->getSize() == blkSize);
90            // special handling for coherent block requests from
91            // upper-level caches
92            if (pkt->needsWritable()) {
93                // sanity check
94                assert(pkt->cmd == MemCmd::ReadExReq ||
95                       pkt->cmd == MemCmd::SCUpgradeFailReq);
96                assert(!pkt->hasSharers());
97
98                // if we have a dirty copy, make sure the recipient
99                // keeps it marked dirty (in the modified state)
100                if (blk->isDirty()) {
101                    pkt->setCacheResponding();
102                    blk->status &= ~BlkDirty;
103                }
104            } else if (blk->isWritable() && !pending_downgrade &&
105                       !pkt->hasSharers() &&
106                       pkt->cmd != MemCmd::ReadCleanReq) {
107                // we can give the requester a writable copy on a read
108                // request if:
109                // - we have a writable copy at this level (& below)
110                // - we don't have a pending snoop from below
111                //   signaling another read request
112                // - no other cache above has a copy (otherwise it
113                //   would have set hasSharers flag when
114                //   snooping the packet)
115                // - the read has explicitly asked for a clean
116                //   copy of the line
117                if (blk->isDirty()) {
118                    // special considerations if we're owner:
119                    if (!deferred_response) {
120                        // respond with the line in Modified state
121                        // (cacheResponding set, hasSharers not set)
122                        pkt->setCacheResponding();
123
124                        // if this cache is mostly inclusive, we
125                        // keep the block in the Exclusive state,
126                        // and pass it upwards as Modified
127                        // (writable and dirty), hence we have
128                        // multiple caches, all on the same path
129                        // towards memory, all considering the
130                        // same block writable, but only one
131                        // considering it Modified
132
133                        // we get away with multiple caches (on
134                        // the same path to memory) considering
135                        // the block writeable as we always enter
136                        // the cache hierarchy through a cache,
137                        // and first snoop upwards in all other
138                        // branches
139                        blk->status &= ~BlkDirty;
140                    } else {
141                        // if we're responding after our own miss,
142                        // there's a window where the recipient didn't
143                        // know it was getting ownership and may not
144                        // have responded to snoops correctly, so we
145                        // have to respond with a shared line
146                        pkt->setHasSharers();
147                    }
148                }
149            } else {
150                // otherwise only respond with a shared copy
151                pkt->setHasSharers();
152            }
153        }
154    }
155}
156
157/////////////////////////////////////////////////////
158//
159// Access path: requests coming in from the CPU side
160//
161/////////////////////////////////////////////////////
162
163bool
164Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat)
165{
166
167    if (pkt->req->isUncacheable()) {
168        assert(pkt->isRequest());
169
170        chatty_assert(!(isReadOnly && pkt->isWrite()),
171                      "Should never see a write in a read-only cache %s\n",
172                      name());
173
174        DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
175
176        // lookupLatency is the latency in case the request is uncacheable.
177        lat = lookupLatency;
178
179        // flush and invalidate any existing block
180        CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure()));
181        if (old_blk && old_blk->isValid()) {
182            BaseCache::evictBlock(old_blk, clockEdge(lat + forwardLatency));
183        }
184
185        blk = nullptr;
186        return false;
187    }
188
189    return BaseCache::access(pkt, blk, lat);
190}
191
192void
193Cache::doWritebacks(PacketPtr pkt, Tick forward_time)
194{
195    // We use forwardLatency here because we are copying writebacks to
196    // write buffer.
197
198    // Call isCachedAbove for Writebacks, CleanEvicts and
199    // WriteCleans to discover if the block is cached above.
200    if (isCachedAbove(pkt)) {
201        if (pkt->cmd == MemCmd::CleanEvict) {
202            // Delete CleanEvict because cached copies exist above. The
203            // packet destructor will delete the request object because
204            // this is a non-snoop request packet which does not require a
205            // response.
206            delete pkt;
207        } else if (pkt->cmd == MemCmd::WritebackClean) {
208            // clean writeback, do not send since the block is
209            // still cached above
210            assert(writebackClean);
211            delete pkt;
212        } else {
213            assert(pkt->cmd == MemCmd::WritebackDirty ||
214                   pkt->cmd == MemCmd::WriteClean);
215            // Set BLOCK_CACHED flag in Writeback and send below, so that
216            // the Writeback does not reset the bit corresponding to this
217            // address in the snoop filter below.
218            pkt->setBlockCached();
219            allocateWriteBuffer(pkt, forward_time);
220        }
221    } else {
222        // If the block is not cached above, send packet below. Both
223        // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
224        // reset the bit corresponding to this address in the snoop filter
225        // below.
226        allocateWriteBuffer(pkt, forward_time);
227    }
228}
229
230void
231Cache::doWritebacksAtomic(PacketPtr pkt)
232{
233    // Call isCachedAbove for both Writebacks and CleanEvicts. If
234    // isCachedAbove returns true we set BLOCK_CACHED flag in Writebacks
235    // and discard CleanEvicts.
236    if (isCachedAbove(pkt, false)) {
237        if (pkt->cmd == MemCmd::WritebackDirty ||
238            pkt->cmd == MemCmd::WriteClean) {
239            // Set BLOCK_CACHED flag in Writeback and send below,
240            // so that the Writeback does not reset the bit
241            // corresponding to this address in the snoop filter
242            // below. We can discard CleanEvicts because cached
243            // copies exist above. Atomic mode isCachedAbove
244            // modifies packet to set BLOCK_CACHED flag
245            memSidePort.sendAtomic(pkt);
246        }
247    } else {
248        // If the block is not cached above, send packet below. Both
249        // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
250        // reset the bit corresponding to this address in the snoop filter
251        // below.
252        memSidePort.sendAtomic(pkt);
253    }
254
255    // In case of CleanEvicts, the packet destructor will delete the
256    // request object because this is a non-snoop request packet which
257    // does not require a response.
258    delete pkt;
259}
260
261void
262Cache::recvTimingSnoopResp(PacketPtr pkt)
263{
264    DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
265
266    // determine if the response is from a snoop request we created
267    // (in which case it should be in the outstandingSnoop), or if we
268    // merely forwarded someone else's snoop request
269    const bool forwardAsSnoop = outstandingSnoop.find(pkt->req) ==
270        outstandingSnoop.end();
271
272    if (!forwardAsSnoop) {
273        // the packet came from this cache, so sink it here and do not
274        // forward it
275        assert(pkt->cmd == MemCmd::HardPFResp);
276
277        outstandingSnoop.erase(pkt->req);
278
279        DPRINTF(Cache, "Got prefetch response from above for addr "
280                "%#llx (%s)\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
281        recvTimingResp(pkt);
282        return;
283    }
284
285    // forwardLatency is set here because there is a response from an
286    // upper level cache.
287    // To pay the delay that occurs if the packet comes from the bus,
288    // we charge also headerDelay.
289    Tick snoop_resp_time = clockEdge(forwardLatency) + pkt->headerDelay;
290    // Reset the timing of the packet.
291    pkt->headerDelay = pkt->payloadDelay = 0;
292    memSidePort.schedTimingSnoopResp(pkt, snoop_resp_time);
293}
294
295void
296Cache::promoteWholeLineWrites(PacketPtr pkt)
297{
298    // Cache line clearing instructions
299    if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) &&
300        (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0) &&
301        !pkt->isMaskedWrite()) {
302        pkt->cmd = MemCmd::WriteLineReq;
303        DPRINTF(Cache, "packet promoted from Write to WriteLineReq\n");
304    }
305}
306
307void
308Cache::handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time)
309{
310    // should never be satisfying an uncacheable access as we
311    // flush and invalidate any existing block as part of the
312    // lookup
313    assert(!pkt->req->isUncacheable());
314
315    BaseCache::handleTimingReqHit(pkt, blk, request_time);
316}
317
318void
319Cache::handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk, Tick forward_time,
320                           Tick request_time)
321{
322    if (pkt->req->isUncacheable()) {
323        // ignore any existing MSHR if we are dealing with an
324        // uncacheable request
325
326        // should have flushed and have no valid block
327        assert(!blk || !blk->isValid());
328
329        mshr_uncacheable[pkt->cmdToIndex()][pkt->req->masterId()]++;
330
331        if (pkt->isWrite()) {
332            allocateWriteBuffer(pkt, forward_time);
333        } else {
334            assert(pkt->isRead());
335
336            // uncacheable accesses always allocate a new MSHR
337
338            // Here we are using forward_time, modelling the latency of
339            // a miss (outbound) just as forwardLatency, neglecting the
340            // lookupLatency component.
341            allocateMissBuffer(pkt, forward_time);
342        }
343
344        return;
345    }
346
347    Addr blk_addr = pkt->getBlockAddr(blkSize);
348
349    MSHR *mshr = mshrQueue.findMatch(blk_addr, pkt->isSecure());
350
351    // Software prefetch handling:
352    // To keep the core from waiting on data it won't look at
353    // anyway, send back a response with dummy data. Miss handling
354    // will continue asynchronously. Unfortunately, the core will
355    // insist upon freeing original Packet/Request, so we have to
356    // create a new pair with a different lifecycle. Note that this
357    // processing happens before any MSHR munging on the behalf of
358    // this request because this new Request will be the one stored
359    // into the MSHRs, not the original.
360    if (pkt->cmd.isSWPrefetch()) {
361        assert(pkt->needsResponse());
362        assert(pkt->req->hasPaddr());
363        assert(!pkt->req->isUncacheable());
364
365        // There's no reason to add a prefetch as an additional target
366        // to an existing MSHR. If an outstanding request is already
367        // in progress, there is nothing for the prefetch to do.
368        // If this is the case, we don't even create a request at all.
369        PacketPtr pf = nullptr;
370
371        if (!mshr) {
372            // copy the request and create a new SoftPFReq packet
373            RequestPtr req = std::make_shared<Request>(pkt->req->getPaddr(),
374                                                       pkt->req->getSize(),
375                                                       pkt->req->getFlags(),
376                                                       pkt->req->masterId());
377            pf = new Packet(req, pkt->cmd);
378            pf->allocate();
379            assert(pf->matchAddr(pkt));
380            assert(pf->getSize() == pkt->getSize());
381        }
382
383        pkt->makeTimingResponse();
384
385        // request_time is used here, taking into account lat and the delay
386        // charged if the packet comes from the xbar.
387        cpuSidePort.schedTimingResp(pkt, request_time);
388
389        // If an outstanding request is in progress (we found an
390        // MSHR) this is set to null
391        pkt = pf;
392    }
393
394    BaseCache::handleTimingReqMiss(pkt, mshr, blk, forward_time, request_time);
395}
396
397void
398Cache::recvTimingReq(PacketPtr pkt)
399{
400    DPRINTF(CacheTags, "%s tags:\n%s\n", __func__, tags->print());
401
402    promoteWholeLineWrites(pkt);
403
404    if (pkt->cacheResponding()) {
405        // a cache above us (but not where the packet came from) is
406        // responding to the request, in other words it has the line
407        // in Modified or Owned state
408        DPRINTF(Cache, "Cache above responding to %s: not responding\n",
409                pkt->print());
410
411        // if the packet needs the block to be writable, and the cache
412        // that has promised to respond (setting the cache responding
413        // flag) is not providing writable (it is in Owned rather than
414        // the Modified state), we know that there may be other Shared
415        // copies in the system; go out and invalidate them all
416        assert(pkt->needsWritable() && !pkt->responderHadWritable());
417
418        // an upstream cache that had the line in Owned state
419        // (dirty, but not writable), is responding and thus
420        // transferring the dirty line from one branch of the
421        // cache hierarchy to another
422
423        // send out an express snoop and invalidate all other
424        // copies (snooping a packet that needs writable is the
425        // same as an invalidation), thus turning the Owned line
426        // into a Modified line, note that we don't invalidate the
427        // block in the current cache or any other cache on the
428        // path to memory
429
430        // create a downstream express snoop with cleared packet
431        // flags, there is no need to allocate any data as the
432        // packet is merely used to co-ordinate state transitions
433        Packet *snoop_pkt = new Packet(pkt, true, false);
434
435        // also reset the bus time that the original packet has
436        // not yet paid for
437        snoop_pkt->headerDelay = snoop_pkt->payloadDelay = 0;
438
439        // make this an instantaneous express snoop, and let the
440        // other caches in the system know that the another cache
441        // is responding, because we have found the authorative
442        // copy (Modified or Owned) that will supply the right
443        // data
444        snoop_pkt->setExpressSnoop();
445        snoop_pkt->setCacheResponding();
446
447        // this express snoop travels towards the memory, and at
448        // every crossbar it is snooped upwards thus reaching
449        // every cache in the system
450        bool M5_VAR_USED success = memSidePort.sendTimingReq(snoop_pkt);
451        // express snoops always succeed
452        assert(success);
453
454        // main memory will delete the snoop packet
455
456        // queue for deletion, as opposed to immediate deletion, as
457        // the sending cache is still relying on the packet
458        pendingDelete.reset(pkt);
459
460        // no need to take any further action in this particular cache
461        // as an upstram cache has already committed to responding,
462        // and we have already sent out any express snoops in the
463        // section above to ensure all other copies in the system are
464        // invalidated
465        return;
466    }
467
468    BaseCache::recvTimingReq(pkt);
469}
470
471PacketPtr
472Cache::createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk,
473                        bool needsWritable,
474                        bool is_whole_line_write) const
475{
476    // should never see evictions here
477    assert(!cpu_pkt->isEviction());
478
479    bool blkValid = blk && blk->isValid();
480
481    if (cpu_pkt->req->isUncacheable() ||
482        (!blkValid && cpu_pkt->isUpgrade()) ||
483        cpu_pkt->cmd == MemCmd::InvalidateReq || cpu_pkt->isClean()) {
484        // uncacheable requests and upgrades from upper-level caches
485        // that missed completely just go through as is
486        return nullptr;
487    }
488
489    assert(cpu_pkt->needsResponse());
490
491    MemCmd cmd;
492    // @TODO make useUpgrades a parameter.
493    // Note that ownership protocols require upgrade, otherwise a
494    // write miss on a shared owned block will generate a ReadExcl,
495    // which will clobber the owned copy.
496    const bool useUpgrades = true;
497    assert(cpu_pkt->cmd != MemCmd::WriteLineReq || is_whole_line_write);
498    if (is_whole_line_write) {
499        assert(!blkValid || !blk->isWritable());
500        // forward as invalidate to all other caches, this gives us
501        // the line in Exclusive state, and invalidates all other
502        // copies
503        cmd = MemCmd::InvalidateReq;
504    } else if (blkValid && useUpgrades) {
505        // only reason to be here is that blk is read only and we need
506        // it to be writable
507        assert(needsWritable);
508        assert(!blk->isWritable());
509        cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
510    } else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq ||
511               cpu_pkt->cmd == MemCmd::StoreCondFailReq) {
512        // Even though this SC will fail, we still need to send out the
513        // request and get the data to supply it to other snoopers in the case
514        // where the determination the StoreCond fails is delayed due to
515        // all caches not being on the same local bus.
516        cmd = MemCmd::SCUpgradeFailReq;
517    } else {
518        // block is invalid
519
520        // If the request does not need a writable there are two cases
521        // where we need to ensure the response will not fetch the
522        // block in dirty state:
523        // * this cache is read only and it does not perform
524        //   writebacks,
525        // * this cache is mostly exclusive and will not fill (since
526        //   it does not fill it will have to writeback the dirty data
527        //   immediately which generates uneccesary writebacks).
528        bool force_clean_rsp = isReadOnly || clusivity == Enums::mostly_excl;
529        cmd = needsWritable ? MemCmd::ReadExReq :
530            (force_clean_rsp ? MemCmd::ReadCleanReq : MemCmd::ReadSharedReq);
531    }
532    PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize);
533
534    // if there are upstream caches that have already marked the
535    // packet as having sharers (not passing writable), pass that info
536    // downstream
537    if (cpu_pkt->hasSharers() && !needsWritable) {
538        // note that cpu_pkt may have spent a considerable time in the
539        // MSHR queue and that the information could possibly be out
540        // of date, however, there is no harm in conservatively
541        // assuming the block has sharers
542        pkt->setHasSharers();
543        DPRINTF(Cache, "%s: passing hasSharers from %s to %s\n",
544                __func__, cpu_pkt->print(), pkt->print());
545    }
546
547    // the packet should be block aligned
548    assert(pkt->getAddr() == pkt->getBlockAddr(blkSize));
549
550    pkt->allocate();
551    DPRINTF(Cache, "%s: created %s from %s\n", __func__, pkt->print(),
552            cpu_pkt->print());
553    return pkt;
554}
555
556
557Cycles
558Cache::handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk)
559{
560    // deal with the packets that go through the write path of
561    // the cache, i.e. any evictions and writes
562    if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean ||
563        (pkt->req->isUncacheable() && pkt->isWrite())) {
564        Cycles latency = ticksToCycles(memSidePort.sendAtomic(pkt));
565
566        // at this point, if the request was an uncacheable write
567        // request, it has been satisfied by a memory below and the
568        // packet carries the response back
569        assert(!(pkt->req->isUncacheable() && pkt->isWrite()) ||
570               pkt->isResponse());
571
572        return latency;
573    }
574
575    // only misses left
576
577    PacketPtr bus_pkt = createMissPacket(pkt, blk, pkt->needsWritable(),
578                                         pkt->isWholeLineWrite(blkSize));
579
580    bool is_forward = (bus_pkt == nullptr);
581
582    if (is_forward) {
583        // just forwarding the same request to the next level
584        // no local cache operation involved
585        bus_pkt = pkt;
586    }
587
588    DPRINTF(Cache, "%s: Sending an atomic %s\n", __func__,
589            bus_pkt->print());
590
591#if TRACING_ON
592    CacheBlk::State old_state = blk ? blk->status : 0;
593#endif
594
595    Cycles latency = ticksToCycles(memSidePort.sendAtomic(bus_pkt));
596
597    bool is_invalidate = bus_pkt->isInvalidate();
598
599    // We are now dealing with the response handling
600    DPRINTF(Cache, "%s: Receive response: %s in state %i\n", __func__,
601            bus_pkt->print(), old_state);
602
603    // If packet was a forward, the response (if any) is already
604    // in place in the bus_pkt == pkt structure, so we don't need
605    // to do anything.  Otherwise, use the separate bus_pkt to
606    // generate response to pkt and then delete it.
607    if (!is_forward) {
608        if (pkt->needsResponse()) {
609            assert(bus_pkt->isResponse());
610            if (bus_pkt->isError()) {
611                pkt->makeAtomicResponse();
612                pkt->copyError(bus_pkt);
613            } else if (pkt->isWholeLineWrite(blkSize)) {
614                // note the use of pkt, not bus_pkt here.
615
616                // write-line request to the cache that promoted
617                // the write to a whole line
618                const bool allocate = allocOnFill(pkt->cmd) &&
619                    (!writeAllocator || writeAllocator->allocate());
620                blk = handleFill(bus_pkt, blk, allocate);
621                assert(blk != NULL);
622                is_invalidate = false;
623                satisfyRequest(pkt, blk);
624            } else if (bus_pkt->isRead() ||
625                       bus_pkt->cmd == MemCmd::UpgradeResp) {
626                // we're updating cache state to allow us to
627                // satisfy the upstream request from the cache
628                blk = handleFill(bus_pkt, blk, allocOnFill(pkt->cmd));
629                satisfyRequest(pkt, blk);
630                maintainClusivity(pkt->fromCache(), blk);
631            } else {
632                // we're satisfying the upstream request without
633                // modifying cache state, e.g., a write-through
634                pkt->makeAtomicResponse();
635            }
636        }
637        delete bus_pkt;
638    }
639
640    if (is_invalidate && blk && blk->isValid()) {
641        invalidateBlock(blk);
642    }
643
644    return latency;
645}
646
647Tick
648Cache::recvAtomic(PacketPtr pkt)
649{
650    promoteWholeLineWrites(pkt);
651
652    // follow the same flow as in recvTimingReq, and check if a cache
653    // above us is responding
654    if (pkt->cacheResponding()) {
655        assert(!pkt->req->isCacheInvalidate());
656        DPRINTF(Cache, "Cache above responding to %s: not responding\n",
657                pkt->print());
658
659        // if a cache is responding, and it had the line in Owned
660        // rather than Modified state, we need to invalidate any
661        // copies that are not on the same path to memory
662        assert(pkt->needsWritable() && !pkt->responderHadWritable());
663
664        return memSidePort.sendAtomic(pkt);
665    }
666
667    return BaseCache::recvAtomic(pkt);
668}
669
670
671/////////////////////////////////////////////////////
672//
673// Response handling: responses from the memory side
674//
675/////////////////////////////////////////////////////
676
677
678void
679Cache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk)
680{
681    QueueEntry::Target *initial_tgt = mshr->getTarget();
682    // First offset for critical word first calculations
683    const int initial_offset = initial_tgt->pkt->getOffset(blkSize);
684
685    const bool is_error = pkt->isError();
686    // allow invalidation responses originating from write-line
687    // requests to be discarded
688    bool is_invalidate = pkt->isInvalidate() &&
689        !mshr->wasWholeLineWrite;
690
691    MSHR::TargetList targets = mshr->extractServiceableTargets(pkt);
692    for (auto &target: targets) {
693        Packet *tgt_pkt = target.pkt;
694        switch (target.source) {
695          case MSHR::Target::FromCPU:
696            Tick completion_time;
697            // Here we charge on completion_time the delay of the xbar if the
698            // packet comes from it, charged on headerDelay.
699            completion_time = pkt->headerDelay;
700
701            // Software prefetch handling for cache closest to core
702            if (tgt_pkt->cmd.isSWPrefetch()) {
703                // a software prefetch would have already been ack'd
704                // immediately with dummy data so the core would be able to
705                // retire it. This request completes right here, so we
706                // deallocate it.
707                delete tgt_pkt;
708                break; // skip response
709            }
710
711            // unlike the other packet flows, where data is found in other
712            // caches or memory and brought back, write-line requests always
713            // have the data right away, so the above check for "is fill?"
714            // cannot actually be determined until examining the stored MSHR
715            // state. We "catch up" with that logic here, which is duplicated
716            // from above.
717            if (tgt_pkt->cmd == MemCmd::WriteLineReq) {
718                assert(!is_error);
719                assert(blk);
720                assert(blk->isWritable());
721            }
722
723            if (blk && blk->isValid() && !mshr->isForward) {
724                satisfyRequest(tgt_pkt, blk, true, mshr->hasPostDowngrade());
725
726                // How many bytes past the first request is this one
727                int transfer_offset =
728                    tgt_pkt->getOffset(blkSize) - initial_offset;
729                if (transfer_offset < 0) {
730                    transfer_offset += blkSize;
731                }
732
733                // If not critical word (offset) return payloadDelay.
734                // responseLatency is the latency of the return path
735                // from lower level caches/memory to an upper level cache or
736                // the core.
737                completion_time += clockEdge(responseLatency) +
738                    (transfer_offset ? pkt->payloadDelay : 0);
739
740                assert(!tgt_pkt->req->isUncacheable());
741
742                assert(tgt_pkt->req->masterId() < system->maxMasters());
743                missLatency[tgt_pkt->cmdToIndex()][tgt_pkt->req->masterId()] +=
744                    completion_time - target.recvTime;
745            } else if (pkt->cmd == MemCmd::UpgradeFailResp) {
746                // failed StoreCond upgrade
747                assert(tgt_pkt->cmd == MemCmd::StoreCondReq ||
748                       tgt_pkt->cmd == MemCmd::StoreCondFailReq ||
749                       tgt_pkt->cmd == MemCmd::SCUpgradeFailReq);
750                // responseLatency is the latency of the return path
751                // from lower level caches/memory to an upper level cache or
752                // the core.
753                completion_time += clockEdge(responseLatency) +
754                    pkt->payloadDelay;
755                tgt_pkt->req->setExtraData(0);
756            } else {
757                // We are about to send a response to a cache above
758                // that asked for an invalidation; we need to
759                // invalidate our copy immediately as the most
760                // up-to-date copy of the block will now be in the
761                // cache above. It will also prevent this cache from
762                // responding (if the block was previously dirty) to
763                // snoops as they should snoop the caches above where
764                // they will get the response from.
765                if (is_invalidate && blk && blk->isValid()) {
766                    invalidateBlock(blk);
767                }
768                // not a cache fill, just forwarding response
769                // responseLatency is the latency of the return path
770                // from lower level cahces/memory to the core.
771                completion_time += clockEdge(responseLatency) +
772                    pkt->payloadDelay;
773                if (pkt->isRead() && !is_error) {
774                    // sanity check
775                    assert(pkt->matchAddr(tgt_pkt));
776                    assert(pkt->getSize() >= tgt_pkt->getSize());
777
778                    tgt_pkt->setData(pkt->getConstPtr<uint8_t>());
779                }
780
781                // this response did not allocate here and therefore
782                // it was not consumed, make sure that any flags are
783                // carried over to cache above
784                tgt_pkt->copyResponderFlags(pkt);
785            }
786            tgt_pkt->makeTimingResponse();
787            // if this packet is an error copy that to the new packet
788            if (is_error)
789                tgt_pkt->copyError(pkt);
790            if (tgt_pkt->cmd == MemCmd::ReadResp &&
791                (is_invalidate || mshr->hasPostInvalidate())) {
792                // If intermediate cache got ReadRespWithInvalidate,
793                // propagate that.  Response should not have
794                // isInvalidate() set otherwise.
795                tgt_pkt->cmd = MemCmd::ReadRespWithInvalidate;
796                DPRINTF(Cache, "%s: updated cmd to %s\n", __func__,
797                        tgt_pkt->print());
798            }
799            // Reset the bus additional time as it is now accounted for
800            tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0;
801            cpuSidePort.schedTimingResp(tgt_pkt, completion_time);
802            break;
803
804          case MSHR::Target::FromPrefetcher:
805            assert(tgt_pkt->cmd == MemCmd::HardPFReq);
806            if (blk)
807                blk->status |= BlkHWPrefetched;
808            delete tgt_pkt;
809            break;
810
811          case MSHR::Target::FromSnoop:
812            // I don't believe that a snoop can be in an error state
813            assert(!is_error);
814            // response to snoop request
815            DPRINTF(Cache, "processing deferred snoop...\n");
816            // If the response is invalidating, a snooping target can
817            // be satisfied if it is also invalidating. If the reponse is, not
818            // only invalidating, but more specifically an InvalidateResp and
819            // the MSHR was created due to an InvalidateReq then a cache above
820            // is waiting to satisfy a WriteLineReq. In this case even an
821            // non-invalidating snoop is added as a target here since this is
822            // the ordering point. When the InvalidateResp reaches this cache,
823            // the snooping target will snoop further the cache above with the
824            // WriteLineReq.
825            assert(!is_invalidate || pkt->cmd == MemCmd::InvalidateResp ||
826                   pkt->req->isCacheMaintenance() ||
827                   mshr->hasPostInvalidate());
828            handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate());
829            break;
830
831          default:
832            panic("Illegal target->source enum %d\n", target.source);
833        }
834    }
835
836    maintainClusivity(targets.hasFromCache, blk);
837
838    if (blk && blk->isValid()) {
839        // an invalidate response stemming from a write line request
840        // should not invalidate the block, so check if the
841        // invalidation should be discarded
842        if (is_invalidate || mshr->hasPostInvalidate()) {
843            invalidateBlock(blk);
844        } else if (mshr->hasPostDowngrade()) {
845            blk->status &= ~BlkWritable;
846        }
847    }
848}
849
850PacketPtr
851Cache::evictBlock(CacheBlk *blk)
852{
853    PacketPtr pkt = (blk->isDirty() || writebackClean) ?
854        writebackBlk(blk) : cleanEvictBlk(blk);
855
856    invalidateBlock(blk);
857
858    return pkt;
859}
860
861PacketPtr
862Cache::cleanEvictBlk(CacheBlk *blk)
863{
864    assert(!writebackClean);
865    assert(blk && blk->isValid() && !blk->isDirty());
866
867    // Creating a zero sized write, a message to the snoop filter
868    RequestPtr req = std::make_shared<Request>(
869        regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
870
871    if (blk->isSecure())
872        req->setFlags(Request::SECURE);
873
874    req->taskId(blk->task_id);
875
876    PacketPtr pkt = new Packet(req, MemCmd::CleanEvict);
877    pkt->allocate();
878    DPRINTF(Cache, "Create CleanEvict %s\n", pkt->print());
879
880    return pkt;
881}
882
883/////////////////////////////////////////////////////
884//
885// Snoop path: requests coming in from the memory side
886//
887/////////////////////////////////////////////////////
888
889void
890Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
891                              bool already_copied, bool pending_inval)
892{
893    // sanity check
894    assert(req_pkt->isRequest());
895    assert(req_pkt->needsResponse());
896
897    DPRINTF(Cache, "%s: for %s\n", __func__, req_pkt->print());
898    // timing-mode snoop responses require a new packet, unless we
899    // already made a copy...
900    PacketPtr pkt = req_pkt;
901    if (!already_copied)
902        // do not clear flags, and allocate space for data if the
903        // packet needs it (the only packets that carry data are read
904        // responses)
905        pkt = new Packet(req_pkt, false, req_pkt->isRead());
906
907    assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() ||
908           pkt->hasSharers());
909    pkt->makeTimingResponse();
910    if (pkt->isRead()) {
911        pkt->setDataFromBlock(blk_data, blkSize);
912    }
913    if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
914        // Assume we defer a response to a read from a far-away cache
915        // A, then later defer a ReadExcl from a cache B on the same
916        // bus as us. We'll assert cacheResponding in both cases, but
917        // in the latter case cacheResponding will keep the
918        // invalidation from reaching cache A. This special response
919        // tells cache A that it gets the block to satisfy its read,
920        // but must immediately invalidate it.
921        pkt->cmd = MemCmd::ReadRespWithInvalidate;
922    }
923    // Here we consider forward_time, paying for just forward latency and
924    // also charging the delay provided by the xbar.
925    // forward_time is used as send_time in next allocateWriteBuffer().
926    Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
927    // Here we reset the timing of the packet.
928    pkt->headerDelay = pkt->payloadDelay = 0;
929    DPRINTF(CacheVerbose, "%s: created response: %s tick: %lu\n", __func__,
930            pkt->print(), forward_time);
931    memSidePort.schedTimingSnoopResp(pkt, forward_time);
932}
933
934uint32_t
935Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
936                   bool is_deferred, bool pending_inval)
937{
938    DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print());
939    // deferred snoops can only happen in timing mode
940    assert(!(is_deferred && !is_timing));
941    // pending_inval only makes sense on deferred snoops
942    assert(!(pending_inval && !is_deferred));
943    assert(pkt->isRequest());
944
945    // the packet may get modified if we or a forwarded snooper
946    // responds in atomic mode, so remember a few things about the
947    // original packet up front
948    bool invalidate = pkt->isInvalidate();
949    bool M5_VAR_USED needs_writable = pkt->needsWritable();
950
951    // at the moment we could get an uncacheable write which does not
952    // have the invalidate flag, and we need a suitable way of dealing
953    // with this case
954    panic_if(invalidate && pkt->req->isUncacheable(),
955             "%s got an invalidating uncacheable snoop request %s",
956             name(), pkt->print());
957
958    uint32_t snoop_delay = 0;
959
960    if (forwardSnoops) {
961        // first propagate snoop upward to see if anyone above us wants to
962        // handle it.  save & restore packet src since it will get
963        // rewritten to be relative to cpu-side bus (if any)
964        if (is_timing) {
965            // copy the packet so that we can clear any flags before
966            // forwarding it upwards, we also allocate data (passing
967            // the pointer along in case of static data), in case
968            // there is a snoop hit in upper levels
969            Packet snoopPkt(pkt, true, true);
970            snoopPkt.setExpressSnoop();
971            // the snoop packet does not need to wait any additional
972            // time
973            snoopPkt.headerDelay = snoopPkt.payloadDelay = 0;
974            cpuSidePort.sendTimingSnoopReq(&snoopPkt);
975
976            // add the header delay (including crossbar and snoop
977            // delays) of the upward snoop to the snoop delay for this
978            // cache
979            snoop_delay += snoopPkt.headerDelay;
980
981            // If this request is a prefetch or clean evict and an upper level
982            // signals block present, make sure to propagate the block
983            // presence to the requester.
984            if (snoopPkt.isBlockCached()) {
985                pkt->setBlockCached();
986            }
987            // If the request was satisfied by snooping the cache
988            // above, mark the original packet as satisfied too.
989            if (snoopPkt.satisfied()) {
990                pkt->setSatisfied();
991            }
992
993            // Copy over flags from the snoop response to make sure we
994            // inform the final destination
995            pkt->copyResponderFlags(&snoopPkt);
996        } else {
997            bool already_responded = pkt->cacheResponding();
998            cpuSidePort.sendAtomicSnoop(pkt);
999            if (!already_responded && pkt->cacheResponding()) {
1000                // cache-to-cache response from some upper cache:
1001                // forward response to original requester
1002                assert(pkt->isResponse());
1003            }
1004        }
1005    }
1006
1007    bool respond = false;
1008    bool blk_valid = blk && blk->isValid();
1009    if (pkt->isClean()) {
1010        if (blk_valid && blk->isDirty()) {
1011            DPRINTF(CacheVerbose, "%s: packet (snoop) %s found block: %s\n",
1012                    __func__, pkt->print(), blk->print());
1013            PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id);
1014
1015            if (is_timing) {
1016                // anything that is merely forwarded pays for the forward
1017                // latency and the delay provided by the crossbar
1018                Tick forward_time = clockEdge(forwardLatency) +
1019                    pkt->headerDelay;
1020                doWritebacks(wb_pkt, forward_time);
1021            } else {
1022                doWritebacksAtomic(wb_pkt);
1023            }
1024            pkt->setSatisfied();
1025        }
1026    } else if (!blk_valid) {
1027        DPRINTF(CacheVerbose, "%s: snoop miss for %s\n", __func__,
1028                pkt->print());
1029        if (is_deferred) {
1030            // we no longer have the block, and will not respond, but a
1031            // packet was allocated in MSHR::handleSnoop and we have
1032            // to delete it
1033            assert(pkt->needsResponse());
1034
1035            // we have passed the block to a cache upstream, that
1036            // cache should be responding
1037            assert(pkt->cacheResponding());
1038
1039            delete pkt;
1040        }
1041        return snoop_delay;
1042    } else {
1043        DPRINTF(Cache, "%s: snoop hit for %s, old state is %s\n", __func__,
1044                pkt->print(), blk->print());
1045
1046        // We may end up modifying both the block state and the packet (if
1047        // we respond in atomic mode), so just figure out what to do now
1048        // and then do it later. We respond to all snoops that need
1049        // responses provided we have the block in dirty state. The
1050        // invalidation itself is taken care of below. We don't respond to
1051        // cache maintenance operations as this is done by the destination
1052        // xbar.
1053        respond = blk->isDirty() && pkt->needsResponse();
1054
1055        chatty_assert(!(isReadOnly && blk->isDirty()), "Should never have "
1056                      "a dirty block in a read-only cache %s\n", name());
1057    }
1058
1059    // Invalidate any prefetch's from below that would strip write permissions
1060    // MemCmd::HardPFReq is only observed by upstream caches.  After missing
1061    // above and in it's own cache, a new MemCmd::ReadReq is created that
1062    // downstream caches observe.
1063    if (pkt->mustCheckAbove()) {
1064        DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s "
1065                "from lower cache\n", pkt->getAddr(), pkt->print());
1066        pkt->setBlockCached();
1067        return snoop_delay;
1068    }
1069
1070    if (pkt->isRead() && !invalidate) {
1071        // reading without requiring the line in a writable state
1072        assert(!needs_writable);
1073        pkt->setHasSharers();
1074
1075        // if the requesting packet is uncacheable, retain the line in
1076        // the current state, otherwhise unset the writable flag,
1077        // which means we go from Modified to Owned (and will respond
1078        // below), remain in Owned (and will respond below), from
1079        // Exclusive to Shared, or remain in Shared
1080        if (!pkt->req->isUncacheable())
1081            blk->status &= ~BlkWritable;
1082        DPRINTF(Cache, "new state is %s\n", blk->print());
1083    }
1084
1085    if (respond) {
1086        // prevent anyone else from responding, cache as well as
1087        // memory, and also prevent any memory from even seeing the
1088        // request
1089        pkt->setCacheResponding();
1090        if (!pkt->isClean() && blk->isWritable()) {
1091            // inform the cache hierarchy that this cache had the line
1092            // in the Modified state so that we avoid unnecessary
1093            // invalidations (see Packet::setResponderHadWritable)
1094            pkt->setResponderHadWritable();
1095
1096            // in the case of an uncacheable request there is no point
1097            // in setting the responderHadWritable flag, but since the
1098            // recipient does not care there is no harm in doing so
1099        } else {
1100            // if the packet has needsWritable set we invalidate our
1101            // copy below and all other copies will be invalidates
1102            // through express snoops, and if needsWritable is not set
1103            // we already called setHasSharers above
1104        }
1105
1106        // if we are returning a writable and dirty (Modified) line,
1107        // we should be invalidating the line
1108        panic_if(!invalidate && !pkt->hasSharers(),
1109                 "%s is passing a Modified line through %s, "
1110                 "but keeping the block", name(), pkt->print());
1111
1112        if (is_timing) {
1113            doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
1114        } else {
1115            pkt->makeAtomicResponse();
1116            // packets such as upgrades do not actually have any data
1117            // payload
1118            if (pkt->hasData())
1119                pkt->setDataFromBlock(blk->data, blkSize);
1120        }
1121
1122        // When a block is compressed, it must first be decompressed before
1123        // being read, and this increases the snoop delay.
1124        if (compressor && pkt->isRead()) {
1125            snoop_delay += compressor->getDecompressionLatency(blk);
1126        }
1127    }
1128
1129    if (!respond && is_deferred) {
1130        assert(pkt->needsResponse());
1131        delete pkt;
1132    }
1133
1134    // Do this last in case it deallocates block data or something
1135    // like that
1136    if (blk_valid && invalidate) {
1137        invalidateBlock(blk);
1138        DPRINTF(Cache, "new state is %s\n", blk->print());
1139    }
1140
1141    return snoop_delay;
1142}
1143
1144
1145void
1146Cache::recvTimingSnoopReq(PacketPtr pkt)
1147{
1148    DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print());
1149
1150    // no need to snoop requests that are not in range
1151    if (!inRange(pkt->getAddr())) {
1152        return;
1153    }
1154
1155    bool is_secure = pkt->isSecure();
1156    CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
1157
1158    Addr blk_addr = pkt->getBlockAddr(blkSize);
1159    MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
1160
1161    // Update the latency cost of the snoop so that the crossbar can
1162    // account for it. Do not overwrite what other neighbouring caches
1163    // have already done, rather take the maximum. The update is
1164    // tentative, for cases where we return before an upward snoop
1165    // happens below.
1166    pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay,
1167                                         lookupLatency * clockPeriod());
1168
1169    // Inform request(Prefetch, CleanEvict or Writeback) from below of
1170    // MSHR hit, set setBlockCached.
1171    if (mshr && pkt->mustCheckAbove()) {
1172        DPRINTF(Cache, "Setting block cached for %s from lower cache on "
1173                "mshr hit\n", pkt->print());
1174        pkt->setBlockCached();
1175        return;
1176    }
1177
1178    // Let the MSHR itself track the snoop and decide whether we want
1179    // to go ahead and do the regular cache snoop
1180    if (mshr && mshr->handleSnoop(pkt, order++)) {
1181        DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)."
1182                "mshrs: %s\n", blk_addr, is_secure ? "s" : "ns",
1183                mshr->print());
1184
1185        if (mshr->getNumTargets() > numTarget)
1186            warn("allocating bonus target for snoop"); //handle later
1187        return;
1188    }
1189
1190    //We also need to check the writeback buffers and handle those
1191    WriteQueueEntry *wb_entry = writeBuffer.findMatch(blk_addr, is_secure);
1192    if (wb_entry) {
1193        DPRINTF(Cache, "Snoop hit in writeback to addr %#llx (%s)\n",
1194                pkt->getAddr(), is_secure ? "s" : "ns");
1195        // Expect to see only Writebacks and/or CleanEvicts here, both of
1196        // which should not be generated for uncacheable data.
1197        assert(!wb_entry->isUncacheable());
1198        // There should only be a single request responsible for generating
1199        // Writebacks/CleanEvicts.
1200        assert(wb_entry->getNumTargets() == 1);
1201        PacketPtr wb_pkt = wb_entry->getTarget()->pkt;
1202        assert(wb_pkt->isEviction() || wb_pkt->cmd == MemCmd::WriteClean);
1203
1204        if (pkt->isEviction()) {
1205            // if the block is found in the write queue, set the BLOCK_CACHED
1206            // flag for Writeback/CleanEvict snoop. On return the snoop will
1207            // propagate the BLOCK_CACHED flag in Writeback packets and prevent
1208            // any CleanEvicts from travelling down the memory hierarchy.
1209            pkt->setBlockCached();
1210            DPRINTF(Cache, "%s: Squashing %s from lower cache on writequeue "
1211                    "hit\n", __func__, pkt->print());
1212            return;
1213        }
1214
1215        // conceptually writebacks are no different to other blocks in
1216        // this cache, so the behaviour is modelled after handleSnoop,
1217        // the difference being that instead of querying the block
1218        // state to determine if it is dirty and writable, we use the
1219        // command and fields of the writeback packet
1220        bool respond = wb_pkt->cmd == MemCmd::WritebackDirty &&
1221            pkt->needsResponse();
1222        bool have_writable = !wb_pkt->hasSharers();
1223        bool invalidate = pkt->isInvalidate();
1224
1225        if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) {
1226            assert(!pkt->needsWritable());
1227            pkt->setHasSharers();
1228            wb_pkt->setHasSharers();
1229        }
1230
1231        if (respond) {
1232            pkt->setCacheResponding();
1233
1234            if (have_writable) {
1235                pkt->setResponderHadWritable();
1236            }
1237
1238            doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(),
1239                                   false, false);
1240        }
1241
1242        if (invalidate && wb_pkt->cmd != MemCmd::WriteClean) {
1243            // Invalidation trumps our writeback... discard here
1244            // Note: markInService will remove entry from writeback buffer.
1245            markInService(wb_entry);
1246            delete wb_pkt;
1247        }
1248    }
1249
1250    // If this was a shared writeback, there may still be
1251    // other shared copies above that require invalidation.
1252    // We could be more selective and return here if the
1253    // request is non-exclusive or if the writeback is
1254    // exclusive.
1255    uint32_t snoop_delay = handleSnoop(pkt, blk, true, false, false);
1256
1257    // Override what we did when we first saw the snoop, as we now
1258    // also have the cost of the upwards snoops to account for
1259    pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, snoop_delay +
1260                                         lookupLatency * clockPeriod());
1261}
1262
1263Tick
1264Cache::recvAtomicSnoop(PacketPtr pkt)
1265{
1266    // no need to snoop requests that are not in range.
1267    if (!inRange(pkt->getAddr())) {
1268        return 0;
1269    }
1270
1271    CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
1272    uint32_t snoop_delay = handleSnoop(pkt, blk, false, false, false);
1273    return snoop_delay + lookupLatency * clockPeriod();
1274}
1275
1276bool
1277Cache::isCachedAbove(PacketPtr pkt, bool is_timing)
1278{
1279    if (!forwardSnoops)
1280        return false;
1281    // Mirroring the flow of HardPFReqs, the cache sends CleanEvict and
1282    // Writeback snoops into upper level caches to check for copies of the
1283    // same block. Using the BLOCK_CACHED flag with the Writeback/CleanEvict
1284    // packet, the cache can inform the crossbar below of presence or absence
1285    // of the block.
1286    if (is_timing) {
1287        Packet snoop_pkt(pkt, true, false);
1288        snoop_pkt.setExpressSnoop();
1289        // Assert that packet is either Writeback or CleanEvict and not a
1290        // prefetch request because prefetch requests need an MSHR and may
1291        // generate a snoop response.
1292        assert(pkt->isEviction() || pkt->cmd == MemCmd::WriteClean);
1293        snoop_pkt.senderState = nullptr;
1294        cpuSidePort.sendTimingSnoopReq(&snoop_pkt);
1295        // Writeback/CleanEvict snoops do not generate a snoop response.
1296        assert(!(snoop_pkt.cacheResponding()));
1297        return snoop_pkt.isBlockCached();
1298    } else {
1299        cpuSidePort.sendAtomicSnoop(pkt);
1300        return pkt->isBlockCached();
1301    }
1302}
1303
1304bool
1305Cache::sendMSHRQueuePacket(MSHR* mshr)
1306{
1307    assert(mshr);
1308
1309    // use request from 1st target
1310    PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1311
1312    if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) {
1313        DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print());
1314
1315        // we should never have hardware prefetches to allocated
1316        // blocks
1317        assert(!tags->findBlock(mshr->blkAddr, mshr->isSecure));
1318
1319        // We need to check the caches above us to verify that
1320        // they don't have a copy of this block in the dirty state
1321        // at the moment. Without this check we could get a stale
1322        // copy from memory that might get used in place of the
1323        // dirty one.
1324        Packet snoop_pkt(tgt_pkt, true, false);
1325        snoop_pkt.setExpressSnoop();
1326        // We are sending this packet upwards, but if it hits we will
1327        // get a snoop response that we end up treating just like a
1328        // normal response, hence it needs the MSHR as its sender
1329        // state
1330        snoop_pkt.senderState = mshr;
1331        cpuSidePort.sendTimingSnoopReq(&snoop_pkt);
1332
1333        // Check to see if the prefetch was squashed by an upper cache (to
1334        // prevent us from grabbing the line) or if a Check to see if a
1335        // writeback arrived between the time the prefetch was placed in
1336        // the MSHRs and when it was selected to be sent or if the
1337        // prefetch was squashed by an upper cache.
1338
1339        // It is important to check cacheResponding before
1340        // prefetchSquashed. If another cache has committed to
1341        // responding, it will be sending a dirty response which will
1342        // arrive at the MSHR allocated for this request. Checking the
1343        // prefetchSquash first may result in the MSHR being
1344        // prematurely deallocated.
1345        if (snoop_pkt.cacheResponding()) {
1346            auto M5_VAR_USED r = outstandingSnoop.insert(snoop_pkt.req);
1347            assert(r.second);
1348
1349            // if we are getting a snoop response with no sharers it
1350            // will be allocated as Modified
1351            bool pending_modified_resp = !snoop_pkt.hasSharers();
1352            markInService(mshr, pending_modified_resp);
1353
1354            DPRINTF(Cache, "Upward snoop of prefetch for addr"
1355                    " %#x (%s) hit\n",
1356                    tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns");
1357            return false;
1358        }
1359
1360        if (snoop_pkt.isBlockCached()) {
1361            DPRINTF(Cache, "Block present, prefetch squashed by cache.  "
1362                    "Deallocating mshr target %#x.\n",
1363                    mshr->blkAddr);
1364
1365            // Deallocate the mshr target
1366            if (mshrQueue.forceDeallocateTarget(mshr)) {
1367                // Clear block if this deallocation resulted freed an
1368                // mshr when all had previously been utilized
1369                clearBlocked(Blocked_NoMSHRs);
1370            }
1371
1372            // given that no response is expected, delete Request and Packet
1373            delete tgt_pkt;
1374
1375            return false;
1376        }
1377    }
1378
1379    return BaseCache::sendMSHRQueuePacket(mshr);
1380}
1381
1382Cache*
1383CacheParams::create()
1384{
1385    assert(tags);
1386    assert(replacement_policy);
1387
1388    return new Cache(this);
1389}
1390