cache.cc revision 12724:4f6fac3191d2
1/*
2 * Copyright (c) 2010-2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 *          Dave Greene
43 *          Nathan Binkert
44 *          Steve Reinhardt
45 *          Ron Dreslinski
46 *          Andreas Sandberg
47 *          Nikos Nikoleris
48 */
49
50/**
51 * @file
52 * Cache definitions.
53 */
54
55#include "mem/cache/cache.hh"
56
57#include <cassert>
58
59#include "base/compiler.hh"
60#include "base/logging.hh"
61#include "base/trace.hh"
62#include "base/types.hh"
63#include "debug/Cache.hh"
64#include "debug/CacheTags.hh"
65#include "debug/CacheVerbose.hh"
66#include "enums/Clusivity.hh"
67#include "mem/cache/blk.hh"
68#include "mem/cache/mshr.hh"
69#include "mem/cache/tags/base.hh"
70#include "mem/cache/write_queue_entry.hh"
71#include "mem/request.hh"
72#include "params/Cache.hh"
73
74Cache::Cache(const CacheParams *p)
75    : BaseCache(p, p->system->cacheLineSize()),
76      doFastWrites(true)
77{
78}
79
80void
81Cache::satisfyRequest(PacketPtr pkt, CacheBlk *blk,
82                      bool deferred_response, bool pending_downgrade)
83{
84    BaseCache::satisfyRequest(pkt, blk);
85
86    if (pkt->isRead()) {
87        // determine if this read is from a (coherent) cache or not
88        if (pkt->fromCache()) {
89            assert(pkt->getSize() == blkSize);
90            // special handling for coherent block requests from
91            // upper-level caches
92            if (pkt->needsWritable()) {
93                // sanity check
94                assert(pkt->cmd == MemCmd::ReadExReq ||
95                       pkt->cmd == MemCmd::SCUpgradeFailReq);
96                assert(!pkt->hasSharers());
97
98                // if we have a dirty copy, make sure the recipient
99                // keeps it marked dirty (in the modified state)
100                if (blk->isDirty()) {
101                    pkt->setCacheResponding();
102                    blk->status &= ~BlkDirty;
103                }
104            } else if (blk->isWritable() && !pending_downgrade &&
105                       !pkt->hasSharers() &&
106                       pkt->cmd != MemCmd::ReadCleanReq) {
107                // we can give the requester a writable copy on a read
108                // request if:
109                // - we have a writable copy at this level (& below)
110                // - we don't have a pending snoop from below
111                //   signaling another read request
112                // - no other cache above has a copy (otherwise it
113                //   would have set hasSharers flag when
114                //   snooping the packet)
115                // - the read has explicitly asked for a clean
116                //   copy of the line
117                if (blk->isDirty()) {
118                    // special considerations if we're owner:
119                    if (!deferred_response) {
120                        // respond with the line in Modified state
121                        // (cacheResponding set, hasSharers not set)
122                        pkt->setCacheResponding();
123
124                        // if this cache is mostly inclusive, we
125                        // keep the block in the Exclusive state,
126                        // and pass it upwards as Modified
127                        // (writable and dirty), hence we have
128                        // multiple caches, all on the same path
129                        // towards memory, all considering the
130                        // same block writable, but only one
131                        // considering it Modified
132
133                        // we get away with multiple caches (on
134                        // the same path to memory) considering
135                        // the block writeable as we always enter
136                        // the cache hierarchy through a cache,
137                        // and first snoop upwards in all other
138                        // branches
139                        blk->status &= ~BlkDirty;
140                    } else {
141                        // if we're responding after our own miss,
142                        // there's a window where the recipient didn't
143                        // know it was getting ownership and may not
144                        // have responded to snoops correctly, so we
145                        // have to respond with a shared line
146                        pkt->setHasSharers();
147                    }
148                }
149            } else {
150                // otherwise only respond with a shared copy
151                pkt->setHasSharers();
152            }
153        }
154    }
155}
156
157/////////////////////////////////////////////////////
158//
159// Access path: requests coming in from the CPU side
160//
161/////////////////////////////////////////////////////
162
163bool
164Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
165              PacketList &writebacks)
166{
167
168    if (pkt->req->isUncacheable()) {
169        assert(pkt->isRequest());
170
171        chatty_assert(!(isReadOnly && pkt->isWrite()),
172                      "Should never see a write in a read-only cache %s\n",
173                      name());
174
175        DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
176
177        // flush and invalidate any existing block
178        CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure()));
179        if (old_blk && old_blk->isValid()) {
180            evictBlock(old_blk, writebacks);
181        }
182
183        blk = nullptr;
184        // lookupLatency is the latency in case the request is uncacheable.
185        lat = lookupLatency;
186        return false;
187    }
188
189    return BaseCache::access(pkt, blk, lat, writebacks);
190}
191
192void
193Cache::doWritebacks(PacketList& writebacks, Tick forward_time)
194{
195    while (!writebacks.empty()) {
196        PacketPtr wbPkt = writebacks.front();
197        // We use forwardLatency here because we are copying writebacks to
198        // write buffer.
199
200        // Call isCachedAbove for Writebacks, CleanEvicts and
201        // WriteCleans to discover if the block is cached above.
202        if (isCachedAbove(wbPkt)) {
203            if (wbPkt->cmd == MemCmd::CleanEvict) {
204                // Delete CleanEvict because cached copies exist above. The
205                // packet destructor will delete the request object because
206                // this is a non-snoop request packet which does not require a
207                // response.
208                delete wbPkt;
209            } else if (wbPkt->cmd == MemCmd::WritebackClean) {
210                // clean writeback, do not send since the block is
211                // still cached above
212                assert(writebackClean);
213                delete wbPkt;
214            } else {
215                assert(wbPkt->cmd == MemCmd::WritebackDirty ||
216                       wbPkt->cmd == MemCmd::WriteClean);
217                // Set BLOCK_CACHED flag in Writeback and send below, so that
218                // the Writeback does not reset the bit corresponding to this
219                // address in the snoop filter below.
220                wbPkt->setBlockCached();
221                allocateWriteBuffer(wbPkt, forward_time);
222            }
223        } else {
224            // If the block is not cached above, send packet below. Both
225            // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
226            // reset the bit corresponding to this address in the snoop filter
227            // below.
228            allocateWriteBuffer(wbPkt, forward_time);
229        }
230        writebacks.pop_front();
231    }
232}
233
234void
235Cache::doWritebacksAtomic(PacketList& writebacks)
236{
237    while (!writebacks.empty()) {
238        PacketPtr wbPkt = writebacks.front();
239        // Call isCachedAbove for both Writebacks and CleanEvicts. If
240        // isCachedAbove returns true we set BLOCK_CACHED flag in Writebacks
241        // and discard CleanEvicts.
242        if (isCachedAbove(wbPkt, false)) {
243            if (wbPkt->cmd == MemCmd::WritebackDirty ||
244                wbPkt->cmd == MemCmd::WriteClean) {
245                // Set BLOCK_CACHED flag in Writeback and send below,
246                // so that the Writeback does not reset the bit
247                // corresponding to this address in the snoop filter
248                // below. We can discard CleanEvicts because cached
249                // copies exist above. Atomic mode isCachedAbove
250                // modifies packet to set BLOCK_CACHED flag
251                memSidePort.sendAtomic(wbPkt);
252            }
253        } else {
254            // If the block is not cached above, send packet below. Both
255            // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
256            // reset the bit corresponding to this address in the snoop filter
257            // below.
258            memSidePort.sendAtomic(wbPkt);
259        }
260        writebacks.pop_front();
261        // In case of CleanEvicts, the packet destructor will delete the
262        // request object because this is a non-snoop request packet which
263        // does not require a response.
264        delete wbPkt;
265    }
266}
267
268
269void
270Cache::recvTimingSnoopResp(PacketPtr pkt)
271{
272    DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
273
274    assert(pkt->isResponse());
275    assert(!system->bypassCaches());
276
277    // determine if the response is from a snoop request we created
278    // (in which case it should be in the outstandingSnoop), or if we
279    // merely forwarded someone else's snoop request
280    const bool forwardAsSnoop = outstandingSnoop.find(pkt->req) ==
281        outstandingSnoop.end();
282
283    if (!forwardAsSnoop) {
284        // the packet came from this cache, so sink it here and do not
285        // forward it
286        assert(pkt->cmd == MemCmd::HardPFResp);
287
288        outstandingSnoop.erase(pkt->req);
289
290        DPRINTF(Cache, "Got prefetch response from above for addr "
291                "%#llx (%s)\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
292        recvTimingResp(pkt);
293        return;
294    }
295
296    // forwardLatency is set here because there is a response from an
297    // upper level cache.
298    // To pay the delay that occurs if the packet comes from the bus,
299    // we charge also headerDelay.
300    Tick snoop_resp_time = clockEdge(forwardLatency) + pkt->headerDelay;
301    // Reset the timing of the packet.
302    pkt->headerDelay = pkt->payloadDelay = 0;
303    memSidePort.schedTimingSnoopResp(pkt, snoop_resp_time);
304}
305
306void
307Cache::promoteWholeLineWrites(PacketPtr pkt)
308{
309    // Cache line clearing instructions
310    if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) &&
311        (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0)) {
312        pkt->cmd = MemCmd::WriteLineReq;
313        DPRINTF(Cache, "packet promoted from Write to WriteLineReq\n");
314    }
315}
316
317void
318Cache::handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time)
319{
320    // should never be satisfying an uncacheable access as we
321    // flush and invalidate any existing block as part of the
322    // lookup
323    assert(!pkt->req->isUncacheable());
324
325    BaseCache::handleTimingReqHit(pkt, blk, request_time);
326}
327
328void
329Cache::handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk, Tick forward_time,
330                           Tick request_time)
331{
332    if (pkt->req->isUncacheable()) {
333        // ignore any existing MSHR if we are dealing with an
334        // uncacheable request
335
336        // should have flushed and have no valid block
337        assert(!blk || !blk->isValid());
338
339        mshr_uncacheable[pkt->cmdToIndex()][pkt->req->masterId()]++;
340
341        if (pkt->isWrite()) {
342            allocateWriteBuffer(pkt, forward_time);
343        } else {
344            assert(pkt->isRead());
345
346            // uncacheable accesses always allocate a new MSHR
347
348            // Here we are using forward_time, modelling the latency of
349            // a miss (outbound) just as forwardLatency, neglecting the
350            // lookupLatency component.
351            allocateMissBuffer(pkt, forward_time);
352        }
353
354        return;
355    }
356
357    Addr blk_addr = pkt->getBlockAddr(blkSize);
358
359    MSHR *mshr = mshrQueue.findMatch(blk_addr, pkt->isSecure());
360
361    // Software prefetch handling:
362    // To keep the core from waiting on data it won't look at
363    // anyway, send back a response with dummy data. Miss handling
364    // will continue asynchronously. Unfortunately, the core will
365    // insist upon freeing original Packet/Request, so we have to
366    // create a new pair with a different lifecycle. Note that this
367    // processing happens before any MSHR munging on the behalf of
368    // this request because this new Request will be the one stored
369    // into the MSHRs, not the original.
370    if (pkt->cmd.isSWPrefetch()) {
371        assert(pkt->needsResponse());
372        assert(pkt->req->hasPaddr());
373        assert(!pkt->req->isUncacheable());
374
375        // There's no reason to add a prefetch as an additional target
376        // to an existing MSHR. If an outstanding request is already
377        // in progress, there is nothing for the prefetch to do.
378        // If this is the case, we don't even create a request at all.
379        PacketPtr pf = nullptr;
380
381        if (!mshr) {
382            // copy the request and create a new SoftPFReq packet
383            RequestPtr req = new Request(pkt->req->getPaddr(),
384                                         pkt->req->getSize(),
385                                         pkt->req->getFlags(),
386                                         pkt->req->masterId());
387            pf = new Packet(req, pkt->cmd);
388            pf->allocate();
389            assert(pf->getAddr() == pkt->getAddr());
390            assert(pf->getSize() == pkt->getSize());
391        }
392
393        pkt->makeTimingResponse();
394
395        // request_time is used here, taking into account lat and the delay
396        // charged if the packet comes from the xbar.
397        cpuSidePort.schedTimingResp(pkt, request_time, true);
398
399        // If an outstanding request is in progress (we found an
400        // MSHR) this is set to null
401        pkt = pf;
402    }
403
404    BaseCache::handleTimingReqMiss(pkt, mshr, blk, forward_time, request_time);
405}
406
407void
408Cache::recvTimingReq(PacketPtr pkt)
409{
410    DPRINTF(CacheTags, "%s tags:\n%s\n", __func__, tags->print());
411
412    assert(pkt->isRequest());
413
414    // Just forward the packet if caches are disabled.
415    if (system->bypassCaches()) {
416        // @todo This should really enqueue the packet rather
417        bool M5_VAR_USED success = memSidePort.sendTimingReq(pkt);
418        assert(success);
419        return;
420    }
421
422    promoteWholeLineWrites(pkt);
423
424    if (pkt->cacheResponding()) {
425        // a cache above us (but not where the packet came from) is
426        // responding to the request, in other words it has the line
427        // in Modified or Owned state
428        DPRINTF(Cache, "Cache above responding to %s: not responding\n",
429                pkt->print());
430
431        // if the packet needs the block to be writable, and the cache
432        // that has promised to respond (setting the cache responding
433        // flag) is not providing writable (it is in Owned rather than
434        // the Modified state), we know that there may be other Shared
435        // copies in the system; go out and invalidate them all
436        assert(pkt->needsWritable() && !pkt->responderHadWritable());
437
438        // an upstream cache that had the line in Owned state
439        // (dirty, but not writable), is responding and thus
440        // transferring the dirty line from one branch of the
441        // cache hierarchy to another
442
443        // send out an express snoop and invalidate all other
444        // copies (snooping a packet that needs writable is the
445        // same as an invalidation), thus turning the Owned line
446        // into a Modified line, note that we don't invalidate the
447        // block in the current cache or any other cache on the
448        // path to memory
449
450        // create a downstream express snoop with cleared packet
451        // flags, there is no need to allocate any data as the
452        // packet is merely used to co-ordinate state transitions
453        Packet *snoop_pkt = new Packet(pkt, true, false);
454
455        // also reset the bus time that the original packet has
456        // not yet paid for
457        snoop_pkt->headerDelay = snoop_pkt->payloadDelay = 0;
458
459        // make this an instantaneous express snoop, and let the
460        // other caches in the system know that the another cache
461        // is responding, because we have found the authorative
462        // copy (Modified or Owned) that will supply the right
463        // data
464        snoop_pkt->setExpressSnoop();
465        snoop_pkt->setCacheResponding();
466
467        // this express snoop travels towards the memory, and at
468        // every crossbar it is snooped upwards thus reaching
469        // every cache in the system
470        bool M5_VAR_USED success = memSidePort.sendTimingReq(snoop_pkt);
471        // express snoops always succeed
472        assert(success);
473
474        // main memory will delete the snoop packet
475
476        // queue for deletion, as opposed to immediate deletion, as
477        // the sending cache is still relying on the packet
478        pendingDelete.reset(pkt);
479
480        // no need to take any further action in this particular cache
481        // as an upstram cache has already committed to responding,
482        // and we have already sent out any express snoops in the
483        // section above to ensure all other copies in the system are
484        // invalidated
485        return;
486    }
487
488    BaseCache::recvTimingReq(pkt);
489}
490
491PacketPtr
492Cache::createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk,
493                        bool needsWritable) const
494{
495    // should never see evictions here
496    assert(!cpu_pkt->isEviction());
497
498    bool blkValid = blk && blk->isValid();
499
500    if (cpu_pkt->req->isUncacheable() ||
501        (!blkValid && cpu_pkt->isUpgrade()) ||
502        cpu_pkt->cmd == MemCmd::InvalidateReq || cpu_pkt->isClean()) {
503        // uncacheable requests and upgrades from upper-level caches
504        // that missed completely just go through as is
505        return nullptr;
506    }
507
508    assert(cpu_pkt->needsResponse());
509
510    MemCmd cmd;
511    // @TODO make useUpgrades a parameter.
512    // Note that ownership protocols require upgrade, otherwise a
513    // write miss on a shared owned block will generate a ReadExcl,
514    // which will clobber the owned copy.
515    const bool useUpgrades = true;
516    if (cpu_pkt->cmd == MemCmd::WriteLineReq) {
517        assert(!blkValid || !blk->isWritable());
518        // forward as invalidate to all other caches, this gives us
519        // the line in Exclusive state, and invalidates all other
520        // copies
521        cmd = MemCmd::InvalidateReq;
522    } else if (blkValid && useUpgrades) {
523        // only reason to be here is that blk is read only and we need
524        // it to be writable
525        assert(needsWritable);
526        assert(!blk->isWritable());
527        cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
528    } else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq ||
529               cpu_pkt->cmd == MemCmd::StoreCondFailReq) {
530        // Even though this SC will fail, we still need to send out the
531        // request and get the data to supply it to other snoopers in the case
532        // where the determination the StoreCond fails is delayed due to
533        // all caches not being on the same local bus.
534        cmd = MemCmd::SCUpgradeFailReq;
535    } else {
536        // block is invalid
537
538        // If the request does not need a writable there are two cases
539        // where we need to ensure the response will not fetch the
540        // block in dirty state:
541        // * this cache is read only and it does not perform
542        //   writebacks,
543        // * this cache is mostly exclusive and will not fill (since
544        //   it does not fill it will have to writeback the dirty data
545        //   immediately which generates uneccesary writebacks).
546        bool force_clean_rsp = isReadOnly || clusivity == Enums::mostly_excl;
547        cmd = needsWritable ? MemCmd::ReadExReq :
548            (force_clean_rsp ? MemCmd::ReadCleanReq : MemCmd::ReadSharedReq);
549    }
550    PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize);
551
552    // if there are upstream caches that have already marked the
553    // packet as having sharers (not passing writable), pass that info
554    // downstream
555    if (cpu_pkt->hasSharers() && !needsWritable) {
556        // note that cpu_pkt may have spent a considerable time in the
557        // MSHR queue and that the information could possibly be out
558        // of date, however, there is no harm in conservatively
559        // assuming the block has sharers
560        pkt->setHasSharers();
561        DPRINTF(Cache, "%s: passing hasSharers from %s to %s\n",
562                __func__, cpu_pkt->print(), pkt->print());
563    }
564
565    // the packet should be block aligned
566    assert(pkt->getAddr() == pkt->getBlockAddr(blkSize));
567
568    pkt->allocate();
569    DPRINTF(Cache, "%s: created %s from %s\n", __func__, pkt->print(),
570            cpu_pkt->print());
571    return pkt;
572}
573
574
575Cycles
576Cache::handleAtomicReqMiss(PacketPtr pkt, CacheBlk *blk,
577                           PacketList &writebacks)
578{
579    // deal with the packets that go through the write path of
580    // the cache, i.e. any evictions and writes
581    if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean ||
582        (pkt->req->isUncacheable() && pkt->isWrite())) {
583        Cycles latency = ticksToCycles(memSidePort.sendAtomic(pkt));
584
585        // at this point, if the request was an uncacheable write
586        // request, it has been satisfied by a memory below and the
587        // packet carries the response back
588        assert(!(pkt->req->isUncacheable() && pkt->isWrite()) ||
589               pkt->isResponse());
590
591        return latency;
592    }
593
594    // only misses left
595
596    PacketPtr bus_pkt = createMissPacket(pkt, blk, pkt->needsWritable());
597
598    bool is_forward = (bus_pkt == nullptr);
599
600    if (is_forward) {
601        // just forwarding the same request to the next level
602        // no local cache operation involved
603        bus_pkt = pkt;
604    }
605
606    DPRINTF(Cache, "%s: Sending an atomic %s\n", __func__,
607            bus_pkt->print());
608
609#if TRACING_ON
610    CacheBlk::State old_state = blk ? blk->status : 0;
611#endif
612
613    Cycles latency = ticksToCycles(memSidePort.sendAtomic(bus_pkt));
614
615    bool is_invalidate = bus_pkt->isInvalidate();
616
617    // We are now dealing with the response handling
618    DPRINTF(Cache, "%s: Receive response: %s in state %i\n", __func__,
619            bus_pkt->print(), old_state);
620
621    // If packet was a forward, the response (if any) is already
622    // in place in the bus_pkt == pkt structure, so we don't need
623    // to do anything.  Otherwise, use the separate bus_pkt to
624    // generate response to pkt and then delete it.
625    if (!is_forward) {
626        if (pkt->needsResponse()) {
627            assert(bus_pkt->isResponse());
628            if (bus_pkt->isError()) {
629                pkt->makeAtomicResponse();
630                pkt->copyError(bus_pkt);
631            } else if (pkt->cmd == MemCmd::WriteLineReq) {
632                // note the use of pkt, not bus_pkt here.
633
634                // write-line request to the cache that promoted
635                // the write to a whole line
636                blk = handleFill(pkt, blk, writebacks,
637                                 allocOnFill(pkt->cmd));
638                assert(blk != NULL);
639                is_invalidate = false;
640                satisfyRequest(pkt, blk);
641            } else if (bus_pkt->isRead() ||
642                       bus_pkt->cmd == MemCmd::UpgradeResp) {
643                // we're updating cache state to allow us to
644                // satisfy the upstream request from the cache
645                blk = handleFill(bus_pkt, blk, writebacks,
646                                 allocOnFill(pkt->cmd));
647                satisfyRequest(pkt, blk);
648                maintainClusivity(pkt->fromCache(), blk);
649            } else {
650                // we're satisfying the upstream request without
651                // modifying cache state, e.g., a write-through
652                pkt->makeAtomicResponse();
653            }
654        }
655        delete bus_pkt;
656    }
657
658    if (is_invalidate && blk && blk->isValid()) {
659        invalidateBlock(blk);
660    }
661
662    return latency;
663}
664
665Tick
666Cache::recvAtomic(PacketPtr pkt)
667{
668    // Forward the request if the system is in cache bypass mode.
669    if (system->bypassCaches())
670        return ticksToCycles(memSidePort.sendAtomic(pkt));
671
672    promoteWholeLineWrites(pkt);
673
674    return BaseCache::recvAtomic(pkt);
675}
676
677
678/////////////////////////////////////////////////////
679//
680// Response handling: responses from the memory side
681//
682/////////////////////////////////////////////////////
683
684
685void
686Cache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk,
687                          PacketList &writebacks)
688{
689    MSHR::Target *initial_tgt = mshr->getTarget();
690    // First offset for critical word first calculations
691    const int initial_offset = initial_tgt->pkt->getOffset(blkSize);
692
693    const bool is_error = pkt->isError();
694    bool is_fill = !mshr->isForward &&
695        (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp);
696    // allow invalidation responses originating from write-line
697    // requests to be discarded
698    bool is_invalidate = pkt->isInvalidate();
699
700    MSHR::TargetList targets = mshr->extractServiceableTargets(pkt);
701    for (auto &target: targets) {
702        Packet *tgt_pkt = target.pkt;
703        switch (target.source) {
704          case MSHR::Target::FromCPU:
705            Tick completion_time;
706            // Here we charge on completion_time the delay of the xbar if the
707            // packet comes from it, charged on headerDelay.
708            completion_time = pkt->headerDelay;
709
710            // Software prefetch handling for cache closest to core
711            if (tgt_pkt->cmd.isSWPrefetch()) {
712                // a software prefetch would have already been ack'd
713                // immediately with dummy data so the core would be able to
714                // retire it. This request completes right here, so we
715                // deallocate it.
716                delete tgt_pkt->req;
717                delete tgt_pkt;
718                break; // skip response
719            }
720
721            // unlike the other packet flows, where data is found in other
722            // caches or memory and brought back, write-line requests always
723            // have the data right away, so the above check for "is fill?"
724            // cannot actually be determined until examining the stored MSHR
725            // state. We "catch up" with that logic here, which is duplicated
726            // from above.
727            if (tgt_pkt->cmd == MemCmd::WriteLineReq) {
728                assert(!is_error);
729                // we got the block in a writable state, so promote
730                // any deferred targets if possible
731                mshr->promoteWritable();
732                // NB: we use the original packet here and not the response!
733                blk = handleFill(tgt_pkt, blk, writebacks,
734                                 targets.allocOnFill);
735                assert(blk);
736
737                // treat as a fill, and discard the invalidation
738                // response
739                is_fill = true;
740                is_invalidate = false;
741            }
742
743            if (is_fill) {
744                satisfyRequest(tgt_pkt, blk, true, mshr->hasPostDowngrade());
745
746                // How many bytes past the first request is this one
747                int transfer_offset =
748                    tgt_pkt->getOffset(blkSize) - initial_offset;
749                if (transfer_offset < 0) {
750                    transfer_offset += blkSize;
751                }
752
753                // If not critical word (offset) return payloadDelay.
754                // responseLatency is the latency of the return path
755                // from lower level caches/memory to an upper level cache or
756                // the core.
757                completion_time += clockEdge(responseLatency) +
758                    (transfer_offset ? pkt->payloadDelay : 0);
759
760                assert(!tgt_pkt->req->isUncacheable());
761
762                assert(tgt_pkt->req->masterId() < system->maxMasters());
763                missLatency[tgt_pkt->cmdToIndex()][tgt_pkt->req->masterId()] +=
764                    completion_time - target.recvTime;
765            } else if (pkt->cmd == MemCmd::UpgradeFailResp) {
766                // failed StoreCond upgrade
767                assert(tgt_pkt->cmd == MemCmd::StoreCondReq ||
768                       tgt_pkt->cmd == MemCmd::StoreCondFailReq ||
769                       tgt_pkt->cmd == MemCmd::SCUpgradeFailReq);
770                // responseLatency is the latency of the return path
771                // from lower level caches/memory to an upper level cache or
772                // the core.
773                completion_time += clockEdge(responseLatency) +
774                    pkt->payloadDelay;
775                tgt_pkt->req->setExtraData(0);
776            } else {
777                // We are about to send a response to a cache above
778                // that asked for an invalidation; we need to
779                // invalidate our copy immediately as the most
780                // up-to-date copy of the block will now be in the
781                // cache above. It will also prevent this cache from
782                // responding (if the block was previously dirty) to
783                // snoops as they should snoop the caches above where
784                // they will get the response from.
785                if (is_invalidate && blk && blk->isValid()) {
786                    invalidateBlock(blk);
787                }
788                // not a cache fill, just forwarding response
789                // responseLatency is the latency of the return path
790                // from lower level cahces/memory to the core.
791                completion_time += clockEdge(responseLatency) +
792                    pkt->payloadDelay;
793                if (pkt->isRead() && !is_error) {
794                    // sanity check
795                    assert(pkt->getAddr() == tgt_pkt->getAddr());
796                    assert(pkt->getSize() >= tgt_pkt->getSize());
797
798                    tgt_pkt->setData(pkt->getConstPtr<uint8_t>());
799                }
800            }
801            tgt_pkt->makeTimingResponse();
802            // if this packet is an error copy that to the new packet
803            if (is_error)
804                tgt_pkt->copyError(pkt);
805            if (tgt_pkt->cmd == MemCmd::ReadResp &&
806                (is_invalidate || mshr->hasPostInvalidate())) {
807                // If intermediate cache got ReadRespWithInvalidate,
808                // propagate that.  Response should not have
809                // isInvalidate() set otherwise.
810                tgt_pkt->cmd = MemCmd::ReadRespWithInvalidate;
811                DPRINTF(Cache, "%s: updated cmd to %s\n", __func__,
812                        tgt_pkt->print());
813            }
814            // Reset the bus additional time as it is now accounted for
815            tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0;
816            cpuSidePort.schedTimingResp(tgt_pkt, completion_time, true);
817            break;
818
819          case MSHR::Target::FromPrefetcher:
820            assert(tgt_pkt->cmd == MemCmd::HardPFReq);
821            if (blk)
822                blk->status |= BlkHWPrefetched;
823            delete tgt_pkt->req;
824            delete tgt_pkt;
825            break;
826
827          case MSHR::Target::FromSnoop:
828            // I don't believe that a snoop can be in an error state
829            assert(!is_error);
830            // response to snoop request
831            DPRINTF(Cache, "processing deferred snoop...\n");
832            // If the response is invalidating, a snooping target can
833            // be satisfied if it is also invalidating. If the reponse is, not
834            // only invalidating, but more specifically an InvalidateResp and
835            // the MSHR was created due to an InvalidateReq then a cache above
836            // is waiting to satisfy a WriteLineReq. In this case even an
837            // non-invalidating snoop is added as a target here since this is
838            // the ordering point. When the InvalidateResp reaches this cache,
839            // the snooping target will snoop further the cache above with the
840            // WriteLineReq.
841            assert(!is_invalidate || pkt->cmd == MemCmd::InvalidateResp ||
842                   pkt->req->isCacheMaintenance() ||
843                   mshr->hasPostInvalidate());
844            handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate());
845            break;
846
847          default:
848            panic("Illegal target->source enum %d\n", target.source);
849        }
850    }
851
852    maintainClusivity(targets.hasFromCache, blk);
853
854    if (blk && blk->isValid()) {
855        // an invalidate response stemming from a write line request
856        // should not invalidate the block, so check if the
857        // invalidation should be discarded
858        if (is_invalidate || mshr->hasPostInvalidate()) {
859            invalidateBlock(blk);
860        } else if (mshr->hasPostDowngrade()) {
861            blk->status &= ~BlkWritable;
862        }
863    }
864}
865
866PacketPtr
867Cache::evictBlock(CacheBlk *blk)
868{
869    PacketPtr pkt = (blk->isDirty() || writebackClean) ?
870        writebackBlk(blk) : cleanEvictBlk(blk);
871
872    invalidateBlock(blk);
873
874    return pkt;
875}
876
877void
878Cache::evictBlock(CacheBlk *blk, PacketList &writebacks)
879{
880    PacketPtr pkt = evictBlock(blk);
881    if (pkt) {
882        writebacks.push_back(pkt);
883    }
884}
885
886PacketPtr
887Cache::cleanEvictBlk(CacheBlk *blk)
888{
889    assert(!writebackClean);
890    assert(blk && blk->isValid() && !blk->isDirty());
891    // Creating a zero sized write, a message to the snoop filter
892    Request *req =
893        new Request(tags->regenerateBlkAddr(blk), blkSize, 0,
894                    Request::wbMasterId);
895    if (blk->isSecure())
896        req->setFlags(Request::SECURE);
897
898    req->taskId(blk->task_id);
899
900    PacketPtr pkt = new Packet(req, MemCmd::CleanEvict);
901    pkt->allocate();
902    DPRINTF(Cache, "Create CleanEvict %s\n", pkt->print());
903
904    return pkt;
905}
906
907
908/////////////////////////////////////////////////////
909//
910// Snoop path: requests coming in from the memory side
911//
912/////////////////////////////////////////////////////
913
914void
915Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
916                              bool already_copied, bool pending_inval)
917{
918    // sanity check
919    assert(req_pkt->isRequest());
920    assert(req_pkt->needsResponse());
921
922    DPRINTF(Cache, "%s: for %s\n", __func__, req_pkt->print());
923    // timing-mode snoop responses require a new packet, unless we
924    // already made a copy...
925    PacketPtr pkt = req_pkt;
926    if (!already_copied)
927        // do not clear flags, and allocate space for data if the
928        // packet needs it (the only packets that carry data are read
929        // responses)
930        pkt = new Packet(req_pkt, false, req_pkt->isRead());
931
932    assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() ||
933           pkt->hasSharers());
934    pkt->makeTimingResponse();
935    if (pkt->isRead()) {
936        pkt->setDataFromBlock(blk_data, blkSize);
937    }
938    if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
939        // Assume we defer a response to a read from a far-away cache
940        // A, then later defer a ReadExcl from a cache B on the same
941        // bus as us. We'll assert cacheResponding in both cases, but
942        // in the latter case cacheResponding will keep the
943        // invalidation from reaching cache A. This special response
944        // tells cache A that it gets the block to satisfy its read,
945        // but must immediately invalidate it.
946        pkt->cmd = MemCmd::ReadRespWithInvalidate;
947    }
948    // Here we consider forward_time, paying for just forward latency and
949    // also charging the delay provided by the xbar.
950    // forward_time is used as send_time in next allocateWriteBuffer().
951    Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
952    // Here we reset the timing of the packet.
953    pkt->headerDelay = pkt->payloadDelay = 0;
954    DPRINTF(CacheVerbose, "%s: created response: %s tick: %lu\n", __func__,
955            pkt->print(), forward_time);
956    memSidePort.schedTimingSnoopResp(pkt, forward_time, true);
957}
958
959uint32_t
960Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
961                   bool is_deferred, bool pending_inval)
962{
963    DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print());
964    // deferred snoops can only happen in timing mode
965    assert(!(is_deferred && !is_timing));
966    // pending_inval only makes sense on deferred snoops
967    assert(!(pending_inval && !is_deferred));
968    assert(pkt->isRequest());
969
970    // the packet may get modified if we or a forwarded snooper
971    // responds in atomic mode, so remember a few things about the
972    // original packet up front
973    bool invalidate = pkt->isInvalidate();
974    bool M5_VAR_USED needs_writable = pkt->needsWritable();
975
976    // at the moment we could get an uncacheable write which does not
977    // have the invalidate flag, and we need a suitable way of dealing
978    // with this case
979    panic_if(invalidate && pkt->req->isUncacheable(),
980             "%s got an invalidating uncacheable snoop request %s",
981             name(), pkt->print());
982
983    uint32_t snoop_delay = 0;
984
985    if (forwardSnoops) {
986        // first propagate snoop upward to see if anyone above us wants to
987        // handle it.  save & restore packet src since it will get
988        // rewritten to be relative to cpu-side bus (if any)
989        bool alreadyResponded = pkt->cacheResponding();
990        if (is_timing) {
991            // copy the packet so that we can clear any flags before
992            // forwarding it upwards, we also allocate data (passing
993            // the pointer along in case of static data), in case
994            // there is a snoop hit in upper levels
995            Packet snoopPkt(pkt, true, true);
996            snoopPkt.setExpressSnoop();
997            // the snoop packet does not need to wait any additional
998            // time
999            snoopPkt.headerDelay = snoopPkt.payloadDelay = 0;
1000            cpuSidePort.sendTimingSnoopReq(&snoopPkt);
1001
1002            // add the header delay (including crossbar and snoop
1003            // delays) of the upward snoop to the snoop delay for this
1004            // cache
1005            snoop_delay += snoopPkt.headerDelay;
1006
1007            if (snoopPkt.cacheResponding()) {
1008                // cache-to-cache response from some upper cache
1009                assert(!alreadyResponded);
1010                pkt->setCacheResponding();
1011            }
1012            // upstream cache has the block, or has an outstanding
1013            // MSHR, pass the flag on
1014            if (snoopPkt.hasSharers()) {
1015                pkt->setHasSharers();
1016            }
1017            // If this request is a prefetch or clean evict and an upper level
1018            // signals block present, make sure to propagate the block
1019            // presence to the requester.
1020            if (snoopPkt.isBlockCached()) {
1021                pkt->setBlockCached();
1022            }
1023            // If the request was satisfied by snooping the cache
1024            // above, mark the original packet as satisfied too.
1025            if (snoopPkt.satisfied()) {
1026                pkt->setSatisfied();
1027            }
1028        } else {
1029            cpuSidePort.sendAtomicSnoop(pkt);
1030            if (!alreadyResponded && pkt->cacheResponding()) {
1031                // cache-to-cache response from some upper cache:
1032                // forward response to original requester
1033                assert(pkt->isResponse());
1034            }
1035        }
1036    }
1037
1038    bool respond = false;
1039    bool blk_valid = blk && blk->isValid();
1040    if (pkt->isClean()) {
1041        if (blk_valid && blk->isDirty()) {
1042            DPRINTF(CacheVerbose, "%s: packet (snoop) %s found block: %s\n",
1043                    __func__, pkt->print(), blk->print());
1044            PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id);
1045            PacketList writebacks;
1046            writebacks.push_back(wb_pkt);
1047
1048            if (is_timing) {
1049                // anything that is merely forwarded pays for the forward
1050                // latency and the delay provided by the crossbar
1051                Tick forward_time = clockEdge(forwardLatency) +
1052                    pkt->headerDelay;
1053                doWritebacks(writebacks, forward_time);
1054            } else {
1055                doWritebacksAtomic(writebacks);
1056            }
1057            pkt->setSatisfied();
1058        }
1059    } else if (!blk_valid) {
1060        DPRINTF(CacheVerbose, "%s: snoop miss for %s\n", __func__,
1061                pkt->print());
1062        if (is_deferred) {
1063            // we no longer have the block, and will not respond, but a
1064            // packet was allocated in MSHR::handleSnoop and we have
1065            // to delete it
1066            assert(pkt->needsResponse());
1067
1068            // we have passed the block to a cache upstream, that
1069            // cache should be responding
1070            assert(pkt->cacheResponding());
1071
1072            delete pkt;
1073        }
1074        return snoop_delay;
1075    } else {
1076        DPRINTF(Cache, "%s: snoop hit for %s, old state is %s\n", __func__,
1077                pkt->print(), blk->print());
1078
1079        // We may end up modifying both the block state and the packet (if
1080        // we respond in atomic mode), so just figure out what to do now
1081        // and then do it later. We respond to all snoops that need
1082        // responses provided we have the block in dirty state. The
1083        // invalidation itself is taken care of below. We don't respond to
1084        // cache maintenance operations as this is done by the destination
1085        // xbar.
1086        respond = blk->isDirty() && pkt->needsResponse();
1087
1088        chatty_assert(!(isReadOnly && blk->isDirty()), "Should never have "
1089                      "a dirty block in a read-only cache %s\n", name());
1090    }
1091
1092    // Invalidate any prefetch's from below that would strip write permissions
1093    // MemCmd::HardPFReq is only observed by upstream caches.  After missing
1094    // above and in it's own cache, a new MemCmd::ReadReq is created that
1095    // downstream caches observe.
1096    if (pkt->mustCheckAbove()) {
1097        DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s "
1098                "from lower cache\n", pkt->getAddr(), pkt->print());
1099        pkt->setBlockCached();
1100        return snoop_delay;
1101    }
1102
1103    if (pkt->isRead() && !invalidate) {
1104        // reading without requiring the line in a writable state
1105        assert(!needs_writable);
1106        pkt->setHasSharers();
1107
1108        // if the requesting packet is uncacheable, retain the line in
1109        // the current state, otherwhise unset the writable flag,
1110        // which means we go from Modified to Owned (and will respond
1111        // below), remain in Owned (and will respond below), from
1112        // Exclusive to Shared, or remain in Shared
1113        if (!pkt->req->isUncacheable())
1114            blk->status &= ~BlkWritable;
1115        DPRINTF(Cache, "new state is %s\n", blk->print());
1116    }
1117
1118    if (respond) {
1119        // prevent anyone else from responding, cache as well as
1120        // memory, and also prevent any memory from even seeing the
1121        // request
1122        pkt->setCacheResponding();
1123        if (!pkt->isClean() && blk->isWritable()) {
1124            // inform the cache hierarchy that this cache had the line
1125            // in the Modified state so that we avoid unnecessary
1126            // invalidations (see Packet::setResponderHadWritable)
1127            pkt->setResponderHadWritable();
1128
1129            // in the case of an uncacheable request there is no point
1130            // in setting the responderHadWritable flag, but since the
1131            // recipient does not care there is no harm in doing so
1132        } else {
1133            // if the packet has needsWritable set we invalidate our
1134            // copy below and all other copies will be invalidates
1135            // through express snoops, and if needsWritable is not set
1136            // we already called setHasSharers above
1137        }
1138
1139        // if we are returning a writable and dirty (Modified) line,
1140        // we should be invalidating the line
1141        panic_if(!invalidate && !pkt->hasSharers(),
1142                 "%s is passing a Modified line through %s, "
1143                 "but keeping the block", name(), pkt->print());
1144
1145        if (is_timing) {
1146            doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
1147        } else {
1148            pkt->makeAtomicResponse();
1149            // packets such as upgrades do not actually have any data
1150            // payload
1151            if (pkt->hasData())
1152                pkt->setDataFromBlock(blk->data, blkSize);
1153        }
1154    }
1155
1156    if (!respond && is_deferred) {
1157        assert(pkt->needsResponse());
1158
1159        // if we copied the deferred packet with the intention to
1160        // respond, but are not responding, then a cache above us must
1161        // be, and we can use this as the indication of whether this
1162        // is a packet where we created a copy of the request or not
1163        if (!pkt->cacheResponding()) {
1164            delete pkt->req;
1165        }
1166
1167        delete pkt;
1168    }
1169
1170    // Do this last in case it deallocates block data or something
1171    // like that
1172    if (blk_valid && invalidate) {
1173        invalidateBlock(blk);
1174        DPRINTF(Cache, "new state is %s\n", blk->print());
1175    }
1176
1177    return snoop_delay;
1178}
1179
1180
1181void
1182Cache::recvTimingSnoopReq(PacketPtr pkt)
1183{
1184    DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print());
1185
1186    // Snoops shouldn't happen when bypassing caches
1187    assert(!system->bypassCaches());
1188
1189    // no need to snoop requests that are not in range
1190    if (!inRange(pkt->getAddr())) {
1191        return;
1192    }
1193
1194    bool is_secure = pkt->isSecure();
1195    CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
1196
1197    Addr blk_addr = pkt->getBlockAddr(blkSize);
1198    MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
1199
1200    // Update the latency cost of the snoop so that the crossbar can
1201    // account for it. Do not overwrite what other neighbouring caches
1202    // have already done, rather take the maximum. The update is
1203    // tentative, for cases where we return before an upward snoop
1204    // happens below.
1205    pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay,
1206                                         lookupLatency * clockPeriod());
1207
1208    // Inform request(Prefetch, CleanEvict or Writeback) from below of
1209    // MSHR hit, set setBlockCached.
1210    if (mshr && pkt->mustCheckAbove()) {
1211        DPRINTF(Cache, "Setting block cached for %s from lower cache on "
1212                "mshr hit\n", pkt->print());
1213        pkt->setBlockCached();
1214        return;
1215    }
1216
1217    // Bypass any existing cache maintenance requests if the request
1218    // has been satisfied already (i.e., the dirty block has been
1219    // found).
1220    if (mshr && pkt->req->isCacheMaintenance() && pkt->satisfied()) {
1221        return;
1222    }
1223
1224    // Let the MSHR itself track the snoop and decide whether we want
1225    // to go ahead and do the regular cache snoop
1226    if (mshr && mshr->handleSnoop(pkt, order++)) {
1227        DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)."
1228                "mshrs: %s\n", blk_addr, is_secure ? "s" : "ns",
1229                mshr->print());
1230
1231        if (mshr->getNumTargets() > numTarget)
1232            warn("allocating bonus target for snoop"); //handle later
1233        return;
1234    }
1235
1236    //We also need to check the writeback buffers and handle those
1237    WriteQueueEntry *wb_entry = writeBuffer.findMatch(blk_addr, is_secure);
1238    if (wb_entry) {
1239        DPRINTF(Cache, "Snoop hit in writeback to addr %#llx (%s)\n",
1240                pkt->getAddr(), is_secure ? "s" : "ns");
1241        // Expect to see only Writebacks and/or CleanEvicts here, both of
1242        // which should not be generated for uncacheable data.
1243        assert(!wb_entry->isUncacheable());
1244        // There should only be a single request responsible for generating
1245        // Writebacks/CleanEvicts.
1246        assert(wb_entry->getNumTargets() == 1);
1247        PacketPtr wb_pkt = wb_entry->getTarget()->pkt;
1248        assert(wb_pkt->isEviction() || wb_pkt->cmd == MemCmd::WriteClean);
1249
1250        if (pkt->isEviction()) {
1251            // if the block is found in the write queue, set the BLOCK_CACHED
1252            // flag for Writeback/CleanEvict snoop. On return the snoop will
1253            // propagate the BLOCK_CACHED flag in Writeback packets and prevent
1254            // any CleanEvicts from travelling down the memory hierarchy.
1255            pkt->setBlockCached();
1256            DPRINTF(Cache, "%s: Squashing %s from lower cache on writequeue "
1257                    "hit\n", __func__, pkt->print());
1258            return;
1259        }
1260
1261        // conceptually writebacks are no different to other blocks in
1262        // this cache, so the behaviour is modelled after handleSnoop,
1263        // the difference being that instead of querying the block
1264        // state to determine if it is dirty and writable, we use the
1265        // command and fields of the writeback packet
1266        bool respond = wb_pkt->cmd == MemCmd::WritebackDirty &&
1267            pkt->needsResponse();
1268        bool have_writable = !wb_pkt->hasSharers();
1269        bool invalidate = pkt->isInvalidate();
1270
1271        if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) {
1272            assert(!pkt->needsWritable());
1273            pkt->setHasSharers();
1274            wb_pkt->setHasSharers();
1275        }
1276
1277        if (respond) {
1278            pkt->setCacheResponding();
1279
1280            if (have_writable) {
1281                pkt->setResponderHadWritable();
1282            }
1283
1284            doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(),
1285                                   false, false);
1286        }
1287
1288        if (invalidate && wb_pkt->cmd != MemCmd::WriteClean) {
1289            // Invalidation trumps our writeback... discard here
1290            // Note: markInService will remove entry from writeback buffer.
1291            markInService(wb_entry);
1292            delete wb_pkt;
1293        }
1294    }
1295
1296    // If this was a shared writeback, there may still be
1297    // other shared copies above that require invalidation.
1298    // We could be more selective and return here if the
1299    // request is non-exclusive or if the writeback is
1300    // exclusive.
1301    uint32_t snoop_delay = handleSnoop(pkt, blk, true, false, false);
1302
1303    // Override what we did when we first saw the snoop, as we now
1304    // also have the cost of the upwards snoops to account for
1305    pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, snoop_delay +
1306                                         lookupLatency * clockPeriod());
1307}
1308
1309Tick
1310Cache::recvAtomicSnoop(PacketPtr pkt)
1311{
1312    // Snoops shouldn't happen when bypassing caches
1313    assert(!system->bypassCaches());
1314
1315    // no need to snoop requests that are not in range.
1316    if (!inRange(pkt->getAddr())) {
1317        return 0;
1318    }
1319
1320    CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
1321    uint32_t snoop_delay = handleSnoop(pkt, blk, false, false, false);
1322    return snoop_delay + lookupLatency * clockPeriod();
1323}
1324
1325bool
1326Cache::isCachedAbove(PacketPtr pkt, bool is_timing)
1327{
1328    if (!forwardSnoops)
1329        return false;
1330    // Mirroring the flow of HardPFReqs, the cache sends CleanEvict and
1331    // Writeback snoops into upper level caches to check for copies of the
1332    // same block. Using the BLOCK_CACHED flag with the Writeback/CleanEvict
1333    // packet, the cache can inform the crossbar below of presence or absence
1334    // of the block.
1335    if (is_timing) {
1336        Packet snoop_pkt(pkt, true, false);
1337        snoop_pkt.setExpressSnoop();
1338        // Assert that packet is either Writeback or CleanEvict and not a
1339        // prefetch request because prefetch requests need an MSHR and may
1340        // generate a snoop response.
1341        assert(pkt->isEviction() || pkt->cmd == MemCmd::WriteClean);
1342        snoop_pkt.senderState = nullptr;
1343        cpuSidePort.sendTimingSnoopReq(&snoop_pkt);
1344        // Writeback/CleanEvict snoops do not generate a snoop response.
1345        assert(!(snoop_pkt.cacheResponding()));
1346        return snoop_pkt.isBlockCached();
1347    } else {
1348        cpuSidePort.sendAtomicSnoop(pkt);
1349        return pkt->isBlockCached();
1350    }
1351}
1352
1353bool
1354Cache::sendMSHRQueuePacket(MSHR* mshr)
1355{
1356    assert(mshr);
1357
1358    // use request from 1st target
1359    PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1360
1361    if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) {
1362        DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print());
1363
1364        // we should never have hardware prefetches to allocated
1365        // blocks
1366        assert(!tags->findBlock(mshr->blkAddr, mshr->isSecure));
1367
1368        // We need to check the caches above us to verify that
1369        // they don't have a copy of this block in the dirty state
1370        // at the moment. Without this check we could get a stale
1371        // copy from memory that might get used in place of the
1372        // dirty one.
1373        Packet snoop_pkt(tgt_pkt, true, false);
1374        snoop_pkt.setExpressSnoop();
1375        // We are sending this packet upwards, but if it hits we will
1376        // get a snoop response that we end up treating just like a
1377        // normal response, hence it needs the MSHR as its sender
1378        // state
1379        snoop_pkt.senderState = mshr;
1380        cpuSidePort.sendTimingSnoopReq(&snoop_pkt);
1381
1382        // Check to see if the prefetch was squashed by an upper cache (to
1383        // prevent us from grabbing the line) or if a Check to see if a
1384        // writeback arrived between the time the prefetch was placed in
1385        // the MSHRs and when it was selected to be sent or if the
1386        // prefetch was squashed by an upper cache.
1387
1388        // It is important to check cacheResponding before
1389        // prefetchSquashed. If another cache has committed to
1390        // responding, it will be sending a dirty response which will
1391        // arrive at the MSHR allocated for this request. Checking the
1392        // prefetchSquash first may result in the MSHR being
1393        // prematurely deallocated.
1394        if (snoop_pkt.cacheResponding()) {
1395            auto M5_VAR_USED r = outstandingSnoop.insert(snoop_pkt.req);
1396            assert(r.second);
1397
1398            // if we are getting a snoop response with no sharers it
1399            // will be allocated as Modified
1400            bool pending_modified_resp = !snoop_pkt.hasSharers();
1401            markInService(mshr, pending_modified_resp);
1402
1403            DPRINTF(Cache, "Upward snoop of prefetch for addr"
1404                    " %#x (%s) hit\n",
1405                    tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns");
1406            return false;
1407        }
1408
1409        if (snoop_pkt.isBlockCached()) {
1410            DPRINTF(Cache, "Block present, prefetch squashed by cache.  "
1411                    "Deallocating mshr target %#x.\n",
1412                    mshr->blkAddr);
1413
1414            // Deallocate the mshr target
1415            if (mshrQueue.forceDeallocateTarget(mshr)) {
1416                // Clear block if this deallocation resulted freed an
1417                // mshr when all had previously been utilized
1418                clearBlocked(Blocked_NoMSHRs);
1419            }
1420
1421            // given that no response is expected, delete Request and Packet
1422            delete tgt_pkt->req;
1423            delete tgt_pkt;
1424
1425            return false;
1426        }
1427    }
1428
1429    return BaseCache::sendMSHRQueuePacket(mshr);
1430}
1431
1432Cache*
1433CacheParams::create()
1434{
1435    assert(tags);
1436    assert(replacement_policy);
1437
1438    return new Cache(this);
1439}
1440