cache.cc revision 11375
1/*
2 * Copyright (c) 2010-2016 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 *          Dave Greene
43 *          Nathan Binkert
44 *          Steve Reinhardt
45 *          Ron Dreslinski
46 *          Andreas Sandberg
47 */
48
49/**
50 * @file
51 * Cache definitions.
52 */
53
54#include "mem/cache/cache.hh"
55
56#include "base/misc.hh"
57#include "base/types.hh"
58#include "debug/Cache.hh"
59#include "debug/CachePort.hh"
60#include "debug/CacheTags.hh"
61#include "debug/CacheVerbose.hh"
62#include "mem/cache/blk.hh"
63#include "mem/cache/mshr.hh"
64#include "mem/cache/prefetch/base.hh"
65#include "sim/sim_exit.hh"
66
67Cache::Cache(const CacheParams *p)
68    : BaseCache(p, p->system->cacheLineSize()),
69      tags(p->tags),
70      prefetcher(p->prefetcher),
71      doFastWrites(true),
72      prefetchOnAccess(p->prefetch_on_access),
73      clusivity(p->clusivity),
74      writebackClean(p->writeback_clean),
75      tempBlockWriteback(nullptr),
76      writebackTempBlockAtomicEvent(this, false,
77                                    EventBase::Delayed_Writeback_Pri)
78{
79    tempBlock = new CacheBlk();
80    tempBlock->data = new uint8_t[blkSize];
81
82    cpuSidePort = new CpuSidePort(p->name + ".cpu_side", this,
83                                  "CpuSidePort");
84    memSidePort = new MemSidePort(p->name + ".mem_side", this,
85                                  "MemSidePort");
86
87    tags->setCache(this);
88    if (prefetcher)
89        prefetcher->setCache(this);
90}
91
92Cache::~Cache()
93{
94    delete [] tempBlock->data;
95    delete tempBlock;
96
97    delete cpuSidePort;
98    delete memSidePort;
99}
100
101void
102Cache::regStats()
103{
104    BaseCache::regStats();
105}
106
107void
108Cache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt)
109{
110    assert(pkt->isRequest());
111
112    uint64_t overwrite_val;
113    bool overwrite_mem;
114    uint64_t condition_val64;
115    uint32_t condition_val32;
116
117    int offset = tags->extractBlkOffset(pkt->getAddr());
118    uint8_t *blk_data = blk->data + offset;
119
120    assert(sizeof(uint64_t) >= pkt->getSize());
121
122    overwrite_mem = true;
123    // keep a copy of our possible write value, and copy what is at the
124    // memory address into the packet
125    pkt->writeData((uint8_t *)&overwrite_val);
126    pkt->setData(blk_data);
127
128    if (pkt->req->isCondSwap()) {
129        if (pkt->getSize() == sizeof(uint64_t)) {
130            condition_val64 = pkt->req->getExtraData();
131            overwrite_mem = !std::memcmp(&condition_val64, blk_data,
132                                         sizeof(uint64_t));
133        } else if (pkt->getSize() == sizeof(uint32_t)) {
134            condition_val32 = (uint32_t)pkt->req->getExtraData();
135            overwrite_mem = !std::memcmp(&condition_val32, blk_data,
136                                         sizeof(uint32_t));
137        } else
138            panic("Invalid size for conditional read/write\n");
139    }
140
141    if (overwrite_mem) {
142        std::memcpy(blk_data, &overwrite_val, pkt->getSize());
143        blk->status |= BlkDirty;
144    }
145}
146
147
148void
149Cache::satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk,
150                             bool deferred_response, bool pending_downgrade)
151{
152    assert(pkt->isRequest());
153
154    assert(blk && blk->isValid());
155    // Occasionally this is not true... if we are a lower-level cache
156    // satisfying a string of Read and ReadEx requests from
157    // upper-level caches, a Read will mark the block as shared but we
158    // can satisfy a following ReadEx anyway since we can rely on the
159    // Read requester(s) to have buffered the ReadEx snoop and to
160    // invalidate their blocks after receiving them.
161    // assert(!pkt->needsWritable() || blk->isWritable());
162    assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
163
164    // Check RMW operations first since both isRead() and
165    // isWrite() will be true for them
166    if (pkt->cmd == MemCmd::SwapReq) {
167        cmpAndSwap(blk, pkt);
168    } else if (pkt->isWrite()) {
169        // we have the block in a writable state and can go ahead,
170        // note that the line may be also be considered writable in
171        // downstream caches along the path to memory, but always
172        // Exclusive, and never Modified
173        assert(blk->isWritable());
174        // Write or WriteLine at the first cache with block in writable state
175        if (blk->checkWrite(pkt)) {
176            pkt->writeDataToBlock(blk->data, blkSize);
177        }
178        // Always mark the line as dirty (and thus transition to the
179        // Modified state) even if we are a failed StoreCond so we
180        // supply data to any snoops that have appended themselves to
181        // this cache before knowing the store will fail.
182        blk->status |= BlkDirty;
183        DPRINTF(CacheVerbose, "%s for %s addr %#llx size %d (write)\n",
184                __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize());
185    } else if (pkt->isRead()) {
186        if (pkt->isLLSC()) {
187            blk->trackLoadLocked(pkt);
188        }
189
190        // all read responses have a data payload
191        assert(pkt->hasRespData());
192        pkt->setDataFromBlock(blk->data, blkSize);
193
194        // determine if this read is from a (coherent) cache, or not
195        // by looking at the command type; we could potentially add a
196        // packet attribute such as 'FromCache' to make this check a
197        // bit cleaner
198        if (pkt->cmd == MemCmd::ReadExReq ||
199            pkt->cmd == MemCmd::ReadSharedReq ||
200            pkt->cmd == MemCmd::ReadCleanReq ||
201            pkt->cmd == MemCmd::SCUpgradeFailReq) {
202            assert(pkt->getSize() == blkSize);
203            // special handling for coherent block requests from
204            // upper-level caches
205            if (pkt->needsWritable()) {
206                // sanity check
207                assert(pkt->cmd == MemCmd::ReadExReq ||
208                       pkt->cmd == MemCmd::SCUpgradeFailReq);
209
210                // if we have a dirty copy, make sure the recipient
211                // keeps it marked dirty (in the modified state)
212                if (blk->isDirty()) {
213                    pkt->setCacheResponding();
214                }
215                // on ReadExReq we give up our copy unconditionally,
216                // even if this cache is mostly inclusive, we may want
217                // to revisit this
218                invalidateBlock(blk);
219            } else if (blk->isWritable() && !pending_downgrade &&
220                       !pkt->hasSharers() &&
221                       pkt->cmd != MemCmd::ReadCleanReq) {
222                // we can give the requester a writable copy on a read
223                // request if:
224                // - we have a writable copy at this level (& below)
225                // - we don't have a pending snoop from below
226                //   signaling another read request
227                // - no other cache above has a copy (otherwise it
228                //   would have set hasSharers flag when
229                //   snooping the packet)
230                // - the read has explicitly asked for a clean
231                //   copy of the line
232                if (blk->isDirty()) {
233                    // special considerations if we're owner:
234                    if (!deferred_response) {
235                        // respond with the line in Modified state
236                        // (cacheResponding set, hasSharers not set)
237                        pkt->setCacheResponding();
238
239                        if (clusivity == Enums::mostly_excl) {
240                            // if this cache is mostly exclusive with
241                            // respect to the cache above, drop the
242                            // block, no need to first unset the dirty
243                            // bit
244                            invalidateBlock(blk);
245                        } else {
246                            // if this cache is mostly inclusive, we
247                            // keep the block in the Exclusive state,
248                            // and pass it upwards as Modified
249                            // (writable and dirty), hence we have
250                            // multiple caches, all on the same path
251                            // towards memory, all considering the
252                            // same block writable, but only one
253                            // considering it Modified
254
255                            // we get away with multiple caches (on
256                            // the same path to memory) considering
257                            // the block writeable as we always enter
258                            // the cache hierarchy through a cache,
259                            // and first snoop upwards in all other
260                            // branches
261                            blk->status &= ~BlkDirty;
262                        }
263                    } else {
264                        // if we're responding after our own miss,
265                        // there's a window where the recipient didn't
266                        // know it was getting ownership and may not
267                        // have responded to snoops correctly, so we
268                        // have to respond with a shared line
269                        pkt->setHasSharers();
270                    }
271                }
272            } else {
273                // otherwise only respond with a shared copy
274                pkt->setHasSharers();
275            }
276        }
277    } else {
278        // Upgrade or Invalidate
279        assert(pkt->isUpgrade() || pkt->isInvalidate());
280
281        // for invalidations we could be looking at the temp block
282        // (for upgrades we always allocate)
283        invalidateBlock(blk);
284        DPRINTF(CacheVerbose, "%s for %s addr %#llx size %d (invalidation)\n",
285                __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize());
286    }
287}
288
289/////////////////////////////////////////////////////
290//
291// Access path: requests coming in from the CPU side
292//
293/////////////////////////////////////////////////////
294
295bool
296Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
297              PacketList &writebacks)
298{
299    // sanity check
300    assert(pkt->isRequest());
301
302    chatty_assert(!(isReadOnly && pkt->isWrite()),
303                  "Should never see a write in a read-only cache %s\n",
304                  name());
305
306    DPRINTF(CacheVerbose, "%s for %s addr %#llx size %d\n", __func__,
307            pkt->cmdString(), pkt->getAddr(), pkt->getSize());
308
309    if (pkt->req->isUncacheable()) {
310        DPRINTF(Cache, "%s%s addr %#llx uncacheable\n", pkt->cmdString(),
311                pkt->req->isInstFetch() ? " (ifetch)" : "",
312                pkt->getAddr());
313
314        // flush and invalidate any existing block
315        CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure()));
316        if (old_blk && old_blk->isValid()) {
317            if (old_blk->isDirty() || writebackClean)
318                writebacks.push_back(writebackBlk(old_blk));
319            else
320                writebacks.push_back(cleanEvictBlk(old_blk));
321            tags->invalidate(old_blk);
322            old_blk->invalidate();
323        }
324
325        blk = NULL;
326        // lookupLatency is the latency in case the request is uncacheable.
327        lat = lookupLatency;
328        return false;
329    }
330
331    ContextID id = pkt->req->hasContextId() ?
332        pkt->req->contextId() : InvalidContextID;
333    // Here lat is the value passed as parameter to accessBlock() function
334    // that can modify its value.
335    blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), lat, id);
336
337    DPRINTF(Cache, "%s%s addr %#llx size %d (%s) %s\n", pkt->cmdString(),
338            pkt->req->isInstFetch() ? " (ifetch)" : "",
339            pkt->getAddr(), pkt->getSize(), pkt->isSecure() ? "s" : "ns",
340            blk ? "hit " + blk->print() : "miss");
341
342
343    if (pkt->isEviction()) {
344        // We check for presence of block in above caches before issuing
345        // Writeback or CleanEvict to write buffer. Therefore the only
346        // possible cases can be of a CleanEvict packet coming from above
347        // encountering a Writeback generated in this cache peer cache and
348        // waiting in the write buffer. Cases of upper level peer caches
349        // generating CleanEvict and Writeback or simply CleanEvict and
350        // CleanEvict almost simultaneously will be caught by snoops sent out
351        // by crossbar.
352        WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(),
353                                                          pkt->isSecure());
354        if (wb_entry) {
355            assert(wb_entry->getNumTargets() == 1);
356            PacketPtr wbPkt = wb_entry->getTarget()->pkt;
357            assert(wbPkt->isWriteback());
358
359            if (pkt->isCleanEviction()) {
360                // The CleanEvict and WritebackClean snoops into other
361                // peer caches of the same level while traversing the
362                // crossbar. If a copy of the block is found, the
363                // packet is deleted in the crossbar. Hence, none of
364                // the other upper level caches connected to this
365                // cache have the block, so we can clear the
366                // BLOCK_CACHED flag in the Writeback if set and
367                // discard the CleanEvict by returning true.
368                wbPkt->clearBlockCached();
369                return true;
370            } else {
371                assert(pkt->cmd == MemCmd::WritebackDirty);
372                // Dirty writeback from above trumps our clean
373                // writeback... discard here
374                // Note: markInService will remove entry from writeback buffer.
375                markInService(wb_entry);
376                delete wbPkt;
377            }
378        }
379    }
380
381    // Writeback handling is special case.  We can write the block into
382    // the cache without having a writeable copy (or any copy at all).
383    if (pkt->isWriteback()) {
384        assert(blkSize == pkt->getSize());
385
386        // we could get a clean writeback while we are having
387        // outstanding accesses to a block, do the simple thing for
388        // now and drop the clean writeback so that we do not upset
389        // any ordering/decisions about ownership already taken
390        if (pkt->cmd == MemCmd::WritebackClean &&
391            mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) {
392            DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, "
393                    "dropping\n", pkt->getAddr());
394            return true;
395        }
396
397        if (blk == NULL) {
398            // need to do a replacement
399            blk = allocateBlock(pkt->getAddr(), pkt->isSecure(), writebacks);
400            if (blk == NULL) {
401                // no replaceable block available: give up, fwd to next level.
402                incMissCount(pkt);
403                return false;
404            }
405            tags->insertBlock(pkt, blk);
406
407            blk->status = (BlkValid | BlkReadable);
408            if (pkt->isSecure()) {
409                blk->status |= BlkSecure;
410            }
411        }
412        // only mark the block dirty if we got a writeback command,
413        // and leave it as is for a clean writeback
414        if (pkt->cmd == MemCmd::WritebackDirty) {
415            blk->status |= BlkDirty;
416        }
417        // if the packet does not have sharers, it is passing
418        // writable, and we got the writeback in Modified or Exclusive
419        // state, if not we are in the Owned or Shared state
420        if (!pkt->hasSharers()) {
421            blk->status |= BlkWritable;
422        }
423        // nothing else to do; writeback doesn't expect response
424        assert(!pkt->needsResponse());
425        std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize);
426        DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
427        incHitCount(pkt);
428        return true;
429    } else if (pkt->cmd == MemCmd::CleanEvict) {
430        if (blk != NULL) {
431            // Found the block in the tags, need to stop CleanEvict from
432            // propagating further down the hierarchy. Returning true will
433            // treat the CleanEvict like a satisfied write request and delete
434            // it.
435            return true;
436        }
437        // We didn't find the block here, propagate the CleanEvict further
438        // down the memory hierarchy. Returning false will treat the CleanEvict
439        // like a Writeback which could not find a replaceable block so has to
440        // go to next level.
441        return false;
442    } else if ((blk != NULL) &&
443               (pkt->needsWritable() ? blk->isWritable() : blk->isReadable())) {
444        // OK to satisfy access
445        incHitCount(pkt);
446        satisfyCpuSideRequest(pkt, blk);
447        return true;
448    }
449
450    // Can't satisfy access normally... either no block (blk == NULL)
451    // or have block but need writable
452
453    incMissCount(pkt);
454
455    if (blk == NULL && pkt->isLLSC() && pkt->isWrite()) {
456        // complete miss on store conditional... just give up now
457        pkt->req->setExtraData(0);
458        return true;
459    }
460
461    return false;
462}
463
464void
465Cache::doWritebacks(PacketList& writebacks, Tick forward_time)
466{
467    while (!writebacks.empty()) {
468        PacketPtr wbPkt = writebacks.front();
469        // We use forwardLatency here because we are copying writebacks to
470        // write buffer.  Call isCachedAbove for both Writebacks and
471        // CleanEvicts. If isCachedAbove returns true we set BLOCK_CACHED flag
472        // in Writebacks and discard CleanEvicts.
473        if (isCachedAbove(wbPkt)) {
474            if (wbPkt->cmd == MemCmd::CleanEvict) {
475                // Delete CleanEvict because cached copies exist above. The
476                // packet destructor will delete the request object because
477                // this is a non-snoop request packet which does not require a
478                // response.
479                delete wbPkt;
480            } else if (wbPkt->cmd == MemCmd::WritebackClean) {
481                // clean writeback, do not send since the block is
482                // still cached above
483                assert(writebackClean);
484                delete wbPkt;
485            } else {
486                assert(wbPkt->cmd == MemCmd::WritebackDirty);
487                // Set BLOCK_CACHED flag in Writeback and send below, so that
488                // the Writeback does not reset the bit corresponding to this
489                // address in the snoop filter below.
490                wbPkt->setBlockCached();
491                allocateWriteBuffer(wbPkt, forward_time);
492            }
493        } else {
494            // If the block is not cached above, send packet below. Both
495            // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
496            // reset the bit corresponding to this address in the snoop filter
497            // below.
498            allocateWriteBuffer(wbPkt, forward_time);
499        }
500        writebacks.pop_front();
501    }
502}
503
504void
505Cache::doWritebacksAtomic(PacketList& writebacks)
506{
507    while (!writebacks.empty()) {
508        PacketPtr wbPkt = writebacks.front();
509        // Call isCachedAbove for both Writebacks and CleanEvicts. If
510        // isCachedAbove returns true we set BLOCK_CACHED flag in Writebacks
511        // and discard CleanEvicts.
512        if (isCachedAbove(wbPkt, false)) {
513            if (wbPkt->cmd == MemCmd::WritebackDirty) {
514                // Set BLOCK_CACHED flag in Writeback and send below,
515                // so that the Writeback does not reset the bit
516                // corresponding to this address in the snoop filter
517                // below. We can discard CleanEvicts because cached
518                // copies exist above. Atomic mode isCachedAbove
519                // modifies packet to set BLOCK_CACHED flag
520                memSidePort->sendAtomic(wbPkt);
521            }
522        } else {
523            // If the block is not cached above, send packet below. Both
524            // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
525            // reset the bit corresponding to this address in the snoop filter
526            // below.
527            memSidePort->sendAtomic(wbPkt);
528        }
529        writebacks.pop_front();
530        // In case of CleanEvicts, the packet destructor will delete the
531        // request object because this is a non-snoop request packet which
532        // does not require a response.
533        delete wbPkt;
534    }
535}
536
537
538void
539Cache::recvTimingSnoopResp(PacketPtr pkt)
540{
541    DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__,
542            pkt->cmdString(), pkt->getAddr(), pkt->getSize());
543
544    assert(pkt->isResponse());
545    assert(!system->bypassCaches());
546
547    // determine if the response is from a snoop request we created
548    // (in which case it should be in the outstandingSnoop), or if we
549    // merely forwarded someone else's snoop request
550    const bool forwardAsSnoop = outstandingSnoop.find(pkt->req) ==
551        outstandingSnoop.end();
552
553    if (!forwardAsSnoop) {
554        // the packet came from this cache, so sink it here and do not
555        // forward it
556        assert(pkt->cmd == MemCmd::HardPFResp);
557
558        outstandingSnoop.erase(pkt->req);
559
560        DPRINTF(Cache, "Got prefetch response from above for addr "
561                "%#llx (%s)\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
562        recvTimingResp(pkt);
563        return;
564    }
565
566    // forwardLatency is set here because there is a response from an
567    // upper level cache.
568    // To pay the delay that occurs if the packet comes from the bus,
569    // we charge also headerDelay.
570    Tick snoop_resp_time = clockEdge(forwardLatency) + pkt->headerDelay;
571    // Reset the timing of the packet.
572    pkt->headerDelay = pkt->payloadDelay = 0;
573    memSidePort->schedTimingSnoopResp(pkt, snoop_resp_time);
574}
575
576void
577Cache::promoteWholeLineWrites(PacketPtr pkt)
578{
579    // Cache line clearing instructions
580    if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) &&
581        (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0)) {
582        pkt->cmd = MemCmd::WriteLineReq;
583        DPRINTF(Cache, "packet promoted from Write to WriteLineReq\n");
584    }
585}
586
587bool
588Cache::recvTimingReq(PacketPtr pkt)
589{
590    DPRINTF(CacheTags, "%s tags: %s\n", __func__, tags->print());
591
592    assert(pkt->isRequest());
593
594    // Just forward the packet if caches are disabled.
595    if (system->bypassCaches()) {
596        // @todo This should really enqueue the packet rather
597        bool M5_VAR_USED success = memSidePort->sendTimingReq(pkt);
598        assert(success);
599        return true;
600    }
601
602    promoteWholeLineWrites(pkt);
603
604    if (pkt->cacheResponding()) {
605        // a cache above us (but not where the packet came from) is
606        // responding to the request, in other words it has the line
607        // in Modified or Owned state
608        DPRINTF(Cache, "Cache above responding to %#llx (%s): "
609                "not responding\n",
610                pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
611
612        // if the packet needs the block to be writable, and the cache
613        // that has promised to respond (setting the cache responding
614        // flag) is not providing writable (it is in Owned rather than
615        // the Modified state), we know that there may be other Shared
616        // copies in the system; go out and invalidate them all
617        assert(pkt->needsWritable() && !pkt->responderHadWritable());
618
619        // an upstream cache that had the line in Owned state
620        // (dirty, but not writable), is responding and thus
621        // transferring the dirty line from one branch of the
622        // cache hierarchy to another
623
624        // send out an express snoop and invalidate all other
625        // copies (snooping a packet that needs writable is the
626        // same as an invalidation), thus turning the Owned line
627        // into a Modified line, note that we don't invalidate the
628        // block in the current cache or any other cache on the
629        // path to memory
630
631        // create a downstream express snoop with cleared packet
632        // flags, there is no need to allocate any data as the
633        // packet is merely used to co-ordinate state transitions
634        Packet *snoop_pkt = new Packet(pkt, true, false);
635
636        // also reset the bus time that the original packet has
637        // not yet paid for
638        snoop_pkt->headerDelay = snoop_pkt->payloadDelay = 0;
639
640        // make this an instantaneous express snoop, and let the
641        // other caches in the system know that the another cache
642        // is responding, because we have found the authorative
643        // copy (Modified or Owned) that will supply the right
644        // data
645        snoop_pkt->setExpressSnoop();
646        snoop_pkt->setCacheResponding();
647
648        // this express snoop travels towards the memory, and at
649        // every crossbar it is snooped upwards thus reaching
650        // every cache in the system
651        bool M5_VAR_USED success = memSidePort->sendTimingReq(snoop_pkt);
652        // express snoops always succeed
653        assert(success);
654
655        // main memory will delete the snoop packet
656
657        // queue for deletion, as opposed to immediate deletion, as
658        // the sending cache is still relying on the packet
659        pendingDelete.reset(pkt);
660
661        // no need to take any further action in this particular cache
662        // as an upstram cache has already committed to responding,
663        // and we have already sent out any express snoops in the
664        // section above to ensure all other copies in the system are
665        // invalidated
666        return true;
667    }
668
669    // anything that is merely forwarded pays for the forward latency and
670    // the delay provided by the crossbar
671    Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
672
673    // We use lookupLatency here because it is used to specify the latency
674    // to access.
675    Cycles lat = lookupLatency;
676    CacheBlk *blk = NULL;
677    bool satisfied = false;
678    {
679        PacketList writebacks;
680        // Note that lat is passed by reference here. The function
681        // access() calls accessBlock() which can modify lat value.
682        satisfied = access(pkt, blk, lat, writebacks);
683
684        // copy writebacks to write buffer here to ensure they logically
685        // proceed anything happening below
686        doWritebacks(writebacks, forward_time);
687    }
688
689    // Here we charge the headerDelay that takes into account the latencies
690    // of the bus, if the packet comes from it.
691    // The latency charged it is just lat that is the value of lookupLatency
692    // modified by access() function, or if not just lookupLatency.
693    // In case of a hit we are neglecting response latency.
694    // In case of a miss we are neglecting forward latency.
695    Tick request_time = clockEdge(lat) + pkt->headerDelay;
696    // Here we reset the timing of the packet.
697    pkt->headerDelay = pkt->payloadDelay = 0;
698
699    // track time of availability of next prefetch, if any
700    Tick next_pf_time = MaxTick;
701
702    bool needsResponse = pkt->needsResponse();
703
704    if (satisfied) {
705        // should never be satisfying an uncacheable access as we
706        // flush and invalidate any existing block as part of the
707        // lookup
708        assert(!pkt->req->isUncacheable());
709
710        // hit (for all other request types)
711
712        if (prefetcher && (prefetchOnAccess || (blk && blk->wasPrefetched()))) {
713            if (blk)
714                blk->status &= ~BlkHWPrefetched;
715
716            // Don't notify on SWPrefetch
717            if (!pkt->cmd.isSWPrefetch())
718                next_pf_time = prefetcher->notify(pkt);
719        }
720
721        if (needsResponse) {
722            pkt->makeTimingResponse();
723            // @todo: Make someone pay for this
724            pkt->headerDelay = pkt->payloadDelay = 0;
725
726            // In this case we are considering request_time that takes
727            // into account the delay of the xbar, if any, and just
728            // lat, neglecting responseLatency, modelling hit latency
729            // just as lookupLatency or or the value of lat overriden
730            // by access(), that calls accessBlock() function.
731            cpuSidePort->schedTimingResp(pkt, request_time, true);
732        } else {
733            DPRINTF(Cache, "%s satisfied %s addr %#llx, no response needed\n",
734                    __func__, pkt->cmdString(), pkt->getAddr(),
735                    pkt->getSize());
736
737            // queue the packet for deletion, as the sending cache is
738            // still relying on it; if the block is found in access(),
739            // CleanEvict and Writeback messages will be deleted
740            // here as well
741            pendingDelete.reset(pkt);
742        }
743    } else {
744        // miss
745
746        Addr blk_addr = blockAlign(pkt->getAddr());
747
748        // ignore any existing MSHR if we are dealing with an
749        // uncacheable request
750        MSHR *mshr = pkt->req->isUncacheable() ? nullptr :
751            mshrQueue.findMatch(blk_addr, pkt->isSecure());
752
753        // Software prefetch handling:
754        // To keep the core from waiting on data it won't look at
755        // anyway, send back a response with dummy data. Miss handling
756        // will continue asynchronously. Unfortunately, the core will
757        // insist upon freeing original Packet/Request, so we have to
758        // create a new pair with a different lifecycle. Note that this
759        // processing happens before any MSHR munging on the behalf of
760        // this request because this new Request will be the one stored
761        // into the MSHRs, not the original.
762        if (pkt->cmd.isSWPrefetch()) {
763            assert(needsResponse);
764            assert(pkt->req->hasPaddr());
765            assert(!pkt->req->isUncacheable());
766
767            // There's no reason to add a prefetch as an additional target
768            // to an existing MSHR. If an outstanding request is already
769            // in progress, there is nothing for the prefetch to do.
770            // If this is the case, we don't even create a request at all.
771            PacketPtr pf = nullptr;
772
773            if (!mshr) {
774                // copy the request and create a new SoftPFReq packet
775                RequestPtr req = new Request(pkt->req->getPaddr(),
776                                             pkt->req->getSize(),
777                                             pkt->req->getFlags(),
778                                             pkt->req->masterId());
779                pf = new Packet(req, pkt->cmd);
780                pf->allocate();
781                assert(pf->getAddr() == pkt->getAddr());
782                assert(pf->getSize() == pkt->getSize());
783            }
784
785            pkt->makeTimingResponse();
786
787            // request_time is used here, taking into account lat and the delay
788            // charged if the packet comes from the xbar.
789            cpuSidePort->schedTimingResp(pkt, request_time, true);
790
791            // If an outstanding request is in progress (we found an
792            // MSHR) this is set to null
793            pkt = pf;
794        }
795
796        if (mshr) {
797            /// MSHR hit
798            /// @note writebacks will be checked in getNextMSHR()
799            /// for any conflicting requests to the same block
800
801            //@todo remove hw_pf here
802
803            // Coalesce unless it was a software prefetch (see above).
804            if (pkt) {
805                assert(!pkt->isWriteback());
806                // CleanEvicts corresponding to blocks which have
807                // outstanding requests in MSHRs are simply sunk here
808                if (pkt->cmd == MemCmd::CleanEvict) {
809                    pendingDelete.reset(pkt);
810                } else {
811                    DPRINTF(Cache, "%s coalescing MSHR for %s addr %#llx size %d\n",
812                            __func__, pkt->cmdString(), pkt->getAddr(),
813                            pkt->getSize());
814
815                    assert(pkt->req->masterId() < system->maxMasters());
816                    mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
817                    // We use forward_time here because it is the same
818                    // considering new targets. We have multiple
819                    // requests for the same address here. It
820                    // specifies the latency to allocate an internal
821                    // buffer and to schedule an event to the queued
822                    // port and also takes into account the additional
823                    // delay of the xbar.
824                    mshr->allocateTarget(pkt, forward_time, order++,
825                                         allocOnFill(pkt->cmd));
826                    if (mshr->getNumTargets() == numTarget) {
827                        noTargetMSHR = mshr;
828                        setBlocked(Blocked_NoTargets);
829                        // need to be careful with this... if this mshr isn't
830                        // ready yet (i.e. time > curTick()), we don't want to
831                        // move it ahead of mshrs that are ready
832                        // mshrQueue.moveToFront(mshr);
833                    }
834                }
835                // We should call the prefetcher reguardless if the request is
836                // satisfied or not, reguardless if the request is in the MSHR or
837                // not.  The request could be a ReadReq hit, but still not
838                // satisfied (potentially because of a prior write to the same
839                // cache line.  So, even when not satisfied, tehre is an MSHR
840                // already allocated for this, we need to let the prefetcher know
841                // about the request
842                if (prefetcher) {
843                    // Don't notify on SWPrefetch
844                    if (!pkt->cmd.isSWPrefetch())
845                        next_pf_time = prefetcher->notify(pkt);
846                }
847            }
848        } else {
849            // no MSHR
850            assert(pkt->req->masterId() < system->maxMasters());
851            if (pkt->req->isUncacheable()) {
852                mshr_uncacheable[pkt->cmdToIndex()][pkt->req->masterId()]++;
853            } else {
854                mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
855            }
856
857            if (pkt->isEviction() ||
858                (pkt->req->isUncacheable() && pkt->isWrite())) {
859                // We use forward_time here because there is an
860                // uncached memory write, forwarded to WriteBuffer.
861                allocateWriteBuffer(pkt, forward_time);
862            } else {
863                if (blk && blk->isValid()) {
864                    // should have flushed and have no valid block
865                    assert(!pkt->req->isUncacheable());
866
867                    // If we have a write miss to a valid block, we
868                    // need to mark the block non-readable.  Otherwise
869                    // if we allow reads while there's an outstanding
870                    // write miss, the read could return stale data
871                    // out of the cache block... a more aggressive
872                    // system could detect the overlap (if any) and
873                    // forward data out of the MSHRs, but we don't do
874                    // that yet.  Note that we do need to leave the
875                    // block valid so that it stays in the cache, in
876                    // case we get an upgrade response (and hence no
877                    // new data) when the write miss completes.
878                    // As long as CPUs do proper store/load forwarding
879                    // internally, and have a sufficiently weak memory
880                    // model, this is probably unnecessary, but at some
881                    // point it must have seemed like we needed it...
882                    assert(pkt->needsWritable());
883                    assert(!blk->isWritable());
884                    blk->status &= ~BlkReadable;
885                }
886                // Here we are using forward_time, modelling the latency of
887                // a miss (outbound) just as forwardLatency, neglecting the
888                // lookupLatency component.
889                allocateMissBuffer(pkt, forward_time);
890            }
891
892            if (prefetcher) {
893                // Don't notify on SWPrefetch
894                if (!pkt->cmd.isSWPrefetch())
895                    next_pf_time = prefetcher->notify(pkt);
896            }
897        }
898    }
899
900    if (next_pf_time != MaxTick)
901        schedMemSideSendEvent(next_pf_time);
902
903    return true;
904}
905
906
907// See comment in cache.hh.
908PacketPtr
909Cache::getBusPacket(PacketPtr cpu_pkt, CacheBlk *blk,
910                    bool needsWritable) const
911{
912    bool blkValid = blk && blk->isValid();
913
914    if (cpu_pkt->req->isUncacheable()) {
915        // note that at the point we see the uncacheable request we
916        // flush any block, but there could be an outstanding MSHR,
917        // and the cache could have filled again before we actually
918        // send out the forwarded uncacheable request (blk could thus
919        // be non-null)
920        return NULL;
921    }
922
923    if (!blkValid &&
924        (cpu_pkt->isUpgrade() ||
925         cpu_pkt->isEviction())) {
926        // Writebacks that weren't allocated in access() and upgrades
927        // from upper-level caches that missed completely just go
928        // through.
929        return NULL;
930    }
931
932    assert(cpu_pkt->needsResponse());
933
934    MemCmd cmd;
935    // @TODO make useUpgrades a parameter.
936    // Note that ownership protocols require upgrade, otherwise a
937    // write miss on a shared owned block will generate a ReadExcl,
938    // which will clobber the owned copy.
939    const bool useUpgrades = true;
940    if (blkValid && useUpgrades) {
941        // only reason to be here is that blk is read only and we need
942        // it to be writable
943        assert(needsWritable);
944        assert(!blk->isWritable());
945        cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
946    } else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq ||
947               cpu_pkt->cmd == MemCmd::StoreCondFailReq) {
948        // Even though this SC will fail, we still need to send out the
949        // request and get the data to supply it to other snoopers in the case
950        // where the determination the StoreCond fails is delayed due to
951        // all caches not being on the same local bus.
952        cmd = MemCmd::SCUpgradeFailReq;
953    } else if (cpu_pkt->cmd == MemCmd::WriteLineReq ||
954               cpu_pkt->cmd == MemCmd::InvalidateReq) {
955        // forward as invalidate to all other caches, this gives us
956        // the line in Exclusive state, and invalidates all other
957        // copies
958        cmd = MemCmd::InvalidateReq;
959    } else {
960        // block is invalid
961        cmd = needsWritable ? MemCmd::ReadExReq :
962            (isReadOnly ? MemCmd::ReadCleanReq : MemCmd::ReadSharedReq);
963    }
964    PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize);
965
966    // if there are upstream caches that have already marked the
967    // packet as having sharers (not passing writable), pass that info
968    // downstream
969    if (cpu_pkt->hasSharers()) {
970        // note that cpu_pkt may have spent a considerable time in the
971        // MSHR queue and that the information could possibly be out
972        // of date, however, there is no harm in conservatively
973        // assuming the block has sharers
974        pkt->setHasSharers();
975        DPRINTF(Cache, "%s passing hasSharers from %s to %s addr %#llx "
976                "size %d\n",
977                __func__, cpu_pkt->cmdString(), pkt->cmdString(),
978                pkt->getAddr(), pkt->getSize());
979    }
980
981    // the packet should be block aligned
982    assert(pkt->getAddr() == blockAlign(pkt->getAddr()));
983
984    pkt->allocate();
985    DPRINTF(Cache, "%s created %s from %s for  addr %#llx size %d\n",
986            __func__, pkt->cmdString(), cpu_pkt->cmdString(), pkt->getAddr(),
987            pkt->getSize());
988    return pkt;
989}
990
991
992Tick
993Cache::recvAtomic(PacketPtr pkt)
994{
995    // We are in atomic mode so we pay just for lookupLatency here.
996    Cycles lat = lookupLatency;
997
998    // Forward the request if the system is in cache bypass mode.
999    if (system->bypassCaches())
1000        return ticksToCycles(memSidePort->sendAtomic(pkt));
1001
1002    promoteWholeLineWrites(pkt);
1003
1004    // follow the same flow as in recvTimingReq, and check if a cache
1005    // above us is responding
1006    if (pkt->cacheResponding()) {
1007        DPRINTF(Cache, "Cache above responding to %#llx (%s): "
1008                "not responding\n",
1009                pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
1010
1011        // if a cache is responding, and it had the line in Owned
1012        // rather than Modified state, we need to invalidate any
1013        // copies that are not on the same path to memory
1014        assert(pkt->needsWritable() && !pkt->responderHadWritable());
1015        lat += ticksToCycles(memSidePort->sendAtomic(pkt));
1016
1017        return lat * clockPeriod();
1018    }
1019
1020    // should assert here that there are no outstanding MSHRs or
1021    // writebacks... that would mean that someone used an atomic
1022    // access in timing mode
1023
1024    CacheBlk *blk = NULL;
1025    PacketList writebacks;
1026    bool satisfied = access(pkt, blk, lat, writebacks);
1027
1028    // handle writebacks resulting from the access here to ensure they
1029    // logically proceed anything happening below
1030    doWritebacksAtomic(writebacks);
1031
1032    if (!satisfied) {
1033        // MISS
1034
1035        PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsWritable());
1036
1037        bool is_forward = (bus_pkt == NULL);
1038
1039        if (is_forward) {
1040            // just forwarding the same request to the next level
1041            // no local cache operation involved
1042            bus_pkt = pkt;
1043        }
1044
1045        DPRINTF(Cache, "Sending an atomic %s for %#llx (%s)\n",
1046                bus_pkt->cmdString(), bus_pkt->getAddr(),
1047                bus_pkt->isSecure() ? "s" : "ns");
1048
1049#if TRACING_ON
1050        CacheBlk::State old_state = blk ? blk->status : 0;
1051#endif
1052
1053        lat += ticksToCycles(memSidePort->sendAtomic(bus_pkt));
1054
1055        // We are now dealing with the response handling
1056        DPRINTF(Cache, "Receive response: %s for addr %#llx (%s) in state %i\n",
1057                bus_pkt->cmdString(), bus_pkt->getAddr(),
1058                bus_pkt->isSecure() ? "s" : "ns",
1059                old_state);
1060
1061        // If packet was a forward, the response (if any) is already
1062        // in place in the bus_pkt == pkt structure, so we don't need
1063        // to do anything.  Otherwise, use the separate bus_pkt to
1064        // generate response to pkt and then delete it.
1065        if (!is_forward) {
1066            if (pkt->needsResponse()) {
1067                assert(bus_pkt->isResponse());
1068                if (bus_pkt->isError()) {
1069                    pkt->makeAtomicResponse();
1070                    pkt->copyError(bus_pkt);
1071                } else if (pkt->cmd == MemCmd::InvalidateReq) {
1072                    if (blk) {
1073                        // invalidate response to a cache that received
1074                        // an invalidate request
1075                        satisfyCpuSideRequest(pkt, blk);
1076                    }
1077                } else if (pkt->cmd == MemCmd::WriteLineReq) {
1078                    // note the use of pkt, not bus_pkt here.
1079
1080                    // write-line request to the cache that promoted
1081                    // the write to a whole line
1082                    blk = handleFill(pkt, blk, writebacks,
1083                                     allocOnFill(pkt->cmd));
1084                    satisfyCpuSideRequest(pkt, blk);
1085                } else if (bus_pkt->isRead() ||
1086                           bus_pkt->cmd == MemCmd::UpgradeResp) {
1087                    // we're updating cache state to allow us to
1088                    // satisfy the upstream request from the cache
1089                    blk = handleFill(bus_pkt, blk, writebacks,
1090                                     allocOnFill(pkt->cmd));
1091                    satisfyCpuSideRequest(pkt, blk);
1092                } else {
1093                    // we're satisfying the upstream request without
1094                    // modifying cache state, e.g., a write-through
1095                    pkt->makeAtomicResponse();
1096                }
1097            }
1098            delete bus_pkt;
1099        }
1100    }
1101
1102    // Note that we don't invoke the prefetcher at all in atomic mode.
1103    // It's not clear how to do it properly, particularly for
1104    // prefetchers that aggressively generate prefetch candidates and
1105    // rely on bandwidth contention to throttle them; these will tend
1106    // to pollute the cache in atomic mode since there is no bandwidth
1107    // contention.  If we ever do want to enable prefetching in atomic
1108    // mode, though, this is the place to do it... see timingAccess()
1109    // for an example (though we'd want to issue the prefetch(es)
1110    // immediately rather than calling requestMemSideBus() as we do
1111    // there).
1112
1113    // do any writebacks resulting from the response handling
1114    doWritebacksAtomic(writebacks);
1115
1116    // if we used temp block, check to see if its valid and if so
1117    // clear it out, but only do so after the call to recvAtomic is
1118    // finished so that any downstream observers (such as a snoop
1119    // filter), first see the fill, and only then see the eviction
1120    if (blk == tempBlock && tempBlock->isValid()) {
1121        // the atomic CPU calls recvAtomic for fetch and load/store
1122        // sequentuially, and we may already have a tempBlock
1123        // writeback from the fetch that we have not yet sent
1124        if (tempBlockWriteback) {
1125            // if that is the case, write the prevoius one back, and
1126            // do not schedule any new event
1127            writebackTempBlockAtomic();
1128        } else {
1129            // the writeback/clean eviction happens after the call to
1130            // recvAtomic has finished (but before any successive
1131            // calls), so that the response handling from the fill is
1132            // allowed to happen first
1133            schedule(writebackTempBlockAtomicEvent, curTick());
1134        }
1135
1136        tempBlockWriteback = (blk->isDirty() || writebackClean) ?
1137            writebackBlk(blk) : cleanEvictBlk(blk);
1138        blk->invalidate();
1139    }
1140
1141    if (pkt->needsResponse()) {
1142        pkt->makeAtomicResponse();
1143    }
1144
1145    return lat * clockPeriod();
1146}
1147
1148
1149void
1150Cache::functionalAccess(PacketPtr pkt, bool fromCpuSide)
1151{
1152    if (system->bypassCaches()) {
1153        // Packets from the memory side are snoop request and
1154        // shouldn't happen in bypass mode.
1155        assert(fromCpuSide);
1156
1157        // The cache should be flushed if we are in cache bypass mode,
1158        // so we don't need to check if we need to update anything.
1159        memSidePort->sendFunctional(pkt);
1160        return;
1161    }
1162
1163    Addr blk_addr = blockAlign(pkt->getAddr());
1164    bool is_secure = pkt->isSecure();
1165    CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
1166    MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
1167
1168    pkt->pushLabel(name());
1169
1170    CacheBlkPrintWrapper cbpw(blk);
1171
1172    // Note that just because an L2/L3 has valid data doesn't mean an
1173    // L1 doesn't have a more up-to-date modified copy that still
1174    // needs to be found.  As a result we always update the request if
1175    // we have it, but only declare it satisfied if we are the owner.
1176
1177    // see if we have data at all (owned or otherwise)
1178    bool have_data = blk && blk->isValid()
1179        && pkt->checkFunctional(&cbpw, blk_addr, is_secure, blkSize,
1180                                blk->data);
1181
1182    // data we have is dirty if marked as such or if we have an
1183    // in-service MSHR that is pending a modified line
1184    bool have_dirty =
1185        have_data && (blk->isDirty() ||
1186                      (mshr && mshr->inService && mshr->isPendingModified()));
1187
1188    bool done = have_dirty
1189        || cpuSidePort->checkFunctional(pkt)
1190        || mshrQueue.checkFunctional(pkt, blk_addr)
1191        || writeBuffer.checkFunctional(pkt, blk_addr)
1192        || memSidePort->checkFunctional(pkt);
1193
1194    DPRINTF(CacheVerbose, "functional %s %#llx (%s) %s%s%s\n",
1195            pkt->cmdString(), pkt->getAddr(), is_secure ? "s" : "ns",
1196            (blk && blk->isValid()) ? "valid " : "",
1197            have_data ? "data " : "", done ? "done " : "");
1198
1199    // We're leaving the cache, so pop cache->name() label
1200    pkt->popLabel();
1201
1202    if (done) {
1203        pkt->makeResponse();
1204    } else {
1205        // if it came as a request from the CPU side then make sure it
1206        // continues towards the memory side
1207        if (fromCpuSide) {
1208            memSidePort->sendFunctional(pkt);
1209        } else if (forwardSnoops && cpuSidePort->isSnooping()) {
1210            // if it came from the memory side, it must be a snoop request
1211            // and we should only forward it if we are forwarding snoops
1212            cpuSidePort->sendFunctionalSnoop(pkt);
1213        }
1214    }
1215}
1216
1217
1218/////////////////////////////////////////////////////
1219//
1220// Response handling: responses from the memory side
1221//
1222/////////////////////////////////////////////////////
1223
1224
1225void
1226Cache::handleUncacheableWriteResp(PacketPtr pkt)
1227{
1228    WriteQueueEntry *wq_entry =
1229        dynamic_cast<WriteQueueEntry*>(pkt->senderState);
1230    assert(wq_entry);
1231
1232    WriteQueueEntry::Target *target = wq_entry->getTarget();
1233    Packet *tgt_pkt = target->pkt;
1234
1235    // we send out invalidation reqs and get invalidation
1236    // responses for write-line requests
1237    assert(tgt_pkt->cmd != MemCmd::WriteLineReq);
1238
1239    int stats_cmd_idx = tgt_pkt->cmdToIndex();
1240    Tick miss_latency = curTick() - target->recvTime;
1241    assert(pkt->req->masterId() < system->maxMasters());
1242    mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] +=
1243        miss_latency;
1244
1245    tgt_pkt->makeTimingResponse();
1246    // if this packet is an error copy that to the new packet
1247    if (pkt->isError())
1248        tgt_pkt->copyError(pkt);
1249    // Reset the bus additional time as it is now accounted for
1250    tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0;
1251    Tick completion_time = clockEdge(responseLatency) +
1252        pkt->headerDelay + pkt->payloadDelay;
1253
1254    cpuSidePort->schedTimingResp(tgt_pkt, completion_time, true);
1255
1256    wq_entry->popTarget();
1257    assert(!wq_entry->hasTargets());
1258
1259    bool wasFull = writeBuffer.isFull();
1260    writeBuffer.deallocate(wq_entry);
1261
1262    if (wasFull && !writeBuffer.isFull()) {
1263        clearBlocked(Blocked_NoWBBuffers);
1264    }
1265
1266    delete pkt;
1267}
1268
1269void
1270Cache::recvTimingResp(PacketPtr pkt)
1271{
1272    assert(pkt->isResponse());
1273
1274    // all header delay should be paid for by the crossbar, unless
1275    // this is a prefetch response from above
1276    panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp,
1277             "%s saw a non-zero packet delay\n", name());
1278
1279    bool is_error = pkt->isError();
1280
1281    if (is_error) {
1282        DPRINTF(Cache, "Cache received packet with error for addr %#llx (%s), "
1283                "cmd: %s\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns",
1284                pkt->cmdString());
1285    }
1286
1287    DPRINTF(Cache, "Handling response %s for addr %#llx size %d (%s)\n",
1288            pkt->cmdString(), pkt->getAddr(), pkt->getSize(),
1289            pkt->isSecure() ? "s" : "ns");
1290
1291    // if this is a write, we should be looking at an uncacheable
1292    // write
1293    if (pkt->isWrite()) {
1294        assert(pkt->req->isUncacheable());
1295        handleUncacheableWriteResp(pkt);
1296        return;
1297    }
1298
1299    // we have dealt with any (uncacheable) writes above, from here on
1300    // we know we are dealing with an MSHR due to a miss or a prefetch
1301    MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
1302    assert(mshr);
1303
1304    if (mshr == noTargetMSHR) {
1305        // we always clear at least one target
1306        clearBlocked(Blocked_NoTargets);
1307        noTargetMSHR = NULL;
1308    }
1309
1310    // Initial target is used just for stats
1311    MSHR::Target *initial_tgt = mshr->getTarget();
1312    int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
1313    Tick miss_latency = curTick() - initial_tgt->recvTime;
1314
1315    if (pkt->req->isUncacheable()) {
1316        assert(pkt->req->masterId() < system->maxMasters());
1317        mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] +=
1318            miss_latency;
1319    } else {
1320        assert(pkt->req->masterId() < system->maxMasters());
1321        mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] +=
1322            miss_latency;
1323    }
1324
1325    bool wasFull = mshrQueue.isFull();
1326
1327    PacketList writebacks;
1328
1329    Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
1330
1331    // upgrade deferred targets if the response has no sharers, and is
1332    // thus passing writable
1333    if (!pkt->hasSharers()) {
1334        mshr->promoteWritable();
1335    }
1336
1337    bool is_fill = !mshr->isForward &&
1338        (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp);
1339
1340    CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
1341
1342    if (is_fill && !is_error) {
1343        DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n",
1344                pkt->getAddr());
1345
1346        blk = handleFill(pkt, blk, writebacks, mshr->allocOnFill);
1347        assert(blk != NULL);
1348    }
1349
1350    // allow invalidation responses originating from write-line
1351    // requests to be discarded
1352    bool is_invalidate = pkt->isInvalidate();
1353
1354    // First offset for critical word first calculations
1355    int initial_offset = initial_tgt->pkt->getOffset(blkSize);
1356
1357    while (mshr->hasTargets()) {
1358        MSHR::Target *target = mshr->getTarget();
1359        Packet *tgt_pkt = target->pkt;
1360
1361        switch (target->source) {
1362          case MSHR::Target::FromCPU:
1363            Tick completion_time;
1364            // Here we charge on completion_time the delay of the xbar if the
1365            // packet comes from it, charged on headerDelay.
1366            completion_time = pkt->headerDelay;
1367
1368            // Software prefetch handling for cache closest to core
1369            if (tgt_pkt->cmd.isSWPrefetch()) {
1370                // a software prefetch would have already been ack'd immediately
1371                // with dummy data so the core would be able to retire it.
1372                // this request completes right here, so we deallocate it.
1373                delete tgt_pkt->req;
1374                delete tgt_pkt;
1375                break; // skip response
1376            }
1377
1378            // unlike the other packet flows, where data is found in other
1379            // caches or memory and brought back, write-line requests always
1380            // have the data right away, so the above check for "is fill?"
1381            // cannot actually be determined until examining the stored MSHR
1382            // state. We "catch up" with that logic here, which is duplicated
1383            // from above.
1384            if (tgt_pkt->cmd == MemCmd::WriteLineReq) {
1385                assert(!is_error);
1386                // we got the block in a writable state, so promote
1387                // any deferred targets if possible
1388                mshr->promoteWritable();
1389                // NB: we use the original packet here and not the response!
1390                blk = handleFill(tgt_pkt, blk, writebacks, mshr->allocOnFill);
1391                assert(blk != NULL);
1392
1393                // treat as a fill, and discard the invalidation
1394                // response
1395                is_fill = true;
1396                is_invalidate = false;
1397            }
1398
1399            if (is_fill) {
1400                satisfyCpuSideRequest(tgt_pkt, blk,
1401                                      true, mshr->hasPostDowngrade());
1402
1403                // How many bytes past the first request is this one
1404                int transfer_offset =
1405                    tgt_pkt->getOffset(blkSize) - initial_offset;
1406                if (transfer_offset < 0) {
1407                    transfer_offset += blkSize;
1408                }
1409
1410                // If not critical word (offset) return payloadDelay.
1411                // responseLatency is the latency of the return path
1412                // from lower level caches/memory to an upper level cache or
1413                // the core.
1414                completion_time += clockEdge(responseLatency) +
1415                    (transfer_offset ? pkt->payloadDelay : 0);
1416
1417                assert(!tgt_pkt->req->isUncacheable());
1418
1419                assert(tgt_pkt->req->masterId() < system->maxMasters());
1420                missLatency[tgt_pkt->cmdToIndex()][tgt_pkt->req->masterId()] +=
1421                    completion_time - target->recvTime;
1422            } else if (pkt->cmd == MemCmd::UpgradeFailResp) {
1423                // failed StoreCond upgrade
1424                assert(tgt_pkt->cmd == MemCmd::StoreCondReq ||
1425                       tgt_pkt->cmd == MemCmd::StoreCondFailReq ||
1426                       tgt_pkt->cmd == MemCmd::SCUpgradeFailReq);
1427                // responseLatency is the latency of the return path
1428                // from lower level caches/memory to an upper level cache or
1429                // the core.
1430                completion_time += clockEdge(responseLatency) +
1431                    pkt->payloadDelay;
1432                tgt_pkt->req->setExtraData(0);
1433            } else {
1434                // not a cache fill, just forwarding response
1435                // responseLatency is the latency of the return path
1436                // from lower level cahces/memory to the core.
1437                completion_time += clockEdge(responseLatency) +
1438                    pkt->payloadDelay;
1439                if (pkt->isRead() && !is_error) {
1440                    // sanity check
1441                    assert(pkt->getAddr() == tgt_pkt->getAddr());
1442                    assert(pkt->getSize() >= tgt_pkt->getSize());
1443
1444                    tgt_pkt->setData(pkt->getConstPtr<uint8_t>());
1445                }
1446            }
1447            tgt_pkt->makeTimingResponse();
1448            // if this packet is an error copy that to the new packet
1449            if (is_error)
1450                tgt_pkt->copyError(pkt);
1451            if (tgt_pkt->cmd == MemCmd::ReadResp &&
1452                (is_invalidate || mshr->hasPostInvalidate())) {
1453                // If intermediate cache got ReadRespWithInvalidate,
1454                // propagate that.  Response should not have
1455                // isInvalidate() set otherwise.
1456                tgt_pkt->cmd = MemCmd::ReadRespWithInvalidate;
1457                DPRINTF(Cache, "%s updated cmd to %s for addr %#llx\n",
1458                        __func__, tgt_pkt->cmdString(), tgt_pkt->getAddr());
1459            }
1460            // Reset the bus additional time as it is now accounted for
1461            tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0;
1462            cpuSidePort->schedTimingResp(tgt_pkt, completion_time, true);
1463            break;
1464
1465          case MSHR::Target::FromPrefetcher:
1466            assert(tgt_pkt->cmd == MemCmd::HardPFReq);
1467            if (blk)
1468                blk->status |= BlkHWPrefetched;
1469            delete tgt_pkt->req;
1470            delete tgt_pkt;
1471            break;
1472
1473          case MSHR::Target::FromSnoop:
1474            // I don't believe that a snoop can be in an error state
1475            assert(!is_error);
1476            // response to snoop request
1477            DPRINTF(Cache, "processing deferred snoop...\n");
1478            assert(!(is_invalidate && !mshr->hasPostInvalidate()));
1479            handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate());
1480            break;
1481
1482          default:
1483            panic("Illegal target->source enum %d\n", target->source);
1484        }
1485
1486        mshr->popTarget();
1487    }
1488
1489    if (blk && blk->isValid()) {
1490        // an invalidate response stemming from a write line request
1491        // should not invalidate the block, so check if the
1492        // invalidation should be discarded
1493        if (is_invalidate || mshr->hasPostInvalidate()) {
1494            invalidateBlock(blk);
1495        } else if (mshr->hasPostDowngrade()) {
1496            blk->status &= ~BlkWritable;
1497        }
1498    }
1499
1500    if (mshr->promoteDeferredTargets()) {
1501        // avoid later read getting stale data while write miss is
1502        // outstanding.. see comment in timingAccess()
1503        if (blk) {
1504            blk->status &= ~BlkReadable;
1505        }
1506        mshrQueue.markPending(mshr);
1507        schedMemSideSendEvent(clockEdge() + pkt->payloadDelay);
1508    } else {
1509        mshrQueue.deallocate(mshr);
1510        if (wasFull && !mshrQueue.isFull()) {
1511            clearBlocked(Blocked_NoMSHRs);
1512        }
1513
1514        // Request the bus for a prefetch if this deallocation freed enough
1515        // MSHRs for a prefetch to take place
1516        if (prefetcher && mshrQueue.canPrefetch()) {
1517            Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(),
1518                                         clockEdge());
1519            if (next_pf_time != MaxTick)
1520                schedMemSideSendEvent(next_pf_time);
1521        }
1522    }
1523    // reset the xbar additional timinig  as it is now accounted for
1524    pkt->headerDelay = pkt->payloadDelay = 0;
1525
1526    // copy writebacks to write buffer
1527    doWritebacks(writebacks, forward_time);
1528
1529    // if we used temp block, check to see if its valid and then clear it out
1530    if (blk == tempBlock && tempBlock->isValid()) {
1531        // We use forwardLatency here because we are copying
1532        // Writebacks/CleanEvicts to write buffer. It specifies the latency to
1533        // allocate an internal buffer and to schedule an event to the
1534        // queued port.
1535        if (blk->isDirty() || writebackClean) {
1536            PacketPtr wbPkt = writebackBlk(blk);
1537            allocateWriteBuffer(wbPkt, forward_time);
1538            // Set BLOCK_CACHED flag if cached above.
1539            if (isCachedAbove(wbPkt))
1540                wbPkt->setBlockCached();
1541        } else {
1542            PacketPtr wcPkt = cleanEvictBlk(blk);
1543            // Check to see if block is cached above. If not allocate
1544            // write buffer
1545            if (isCachedAbove(wcPkt))
1546                delete wcPkt;
1547            else
1548                allocateWriteBuffer(wcPkt, forward_time);
1549        }
1550        blk->invalidate();
1551    }
1552
1553    DPRINTF(CacheVerbose, "Leaving %s with %s for addr %#llx\n", __func__,
1554            pkt->cmdString(), pkt->getAddr());
1555    delete pkt;
1556}
1557
1558PacketPtr
1559Cache::writebackBlk(CacheBlk *blk)
1560{
1561    chatty_assert(!isReadOnly || writebackClean,
1562                  "Writeback from read-only cache");
1563    assert(blk && blk->isValid() && (blk->isDirty() || writebackClean));
1564
1565    writebacks[Request::wbMasterId]++;
1566
1567    Request *req = new Request(tags->regenerateBlkAddr(blk->tag, blk->set),
1568                               blkSize, 0, Request::wbMasterId);
1569    if (blk->isSecure())
1570        req->setFlags(Request::SECURE);
1571
1572    req->taskId(blk->task_id);
1573    blk->task_id= ContextSwitchTaskId::Unknown;
1574    blk->tickInserted = curTick();
1575
1576    PacketPtr pkt =
1577        new Packet(req, blk->isDirty() ?
1578                   MemCmd::WritebackDirty : MemCmd::WritebackClean);
1579
1580    DPRINTF(Cache, "Create Writeback %#llx writable: %d, dirty: %d\n",
1581            pkt->getAddr(), blk->isWritable(), blk->isDirty());
1582
1583    if (blk->isWritable()) {
1584        // not asserting shared means we pass the block in modified
1585        // state, mark our own block non-writeable
1586        blk->status &= ~BlkWritable;
1587    } else {
1588        // we are in the Owned state, tell the receiver
1589        pkt->setHasSharers();
1590    }
1591
1592    // make sure the block is not marked dirty
1593    blk->status &= ~BlkDirty;
1594
1595    pkt->allocate();
1596    std::memcpy(pkt->getPtr<uint8_t>(), blk->data, blkSize);
1597
1598    return pkt;
1599}
1600
1601PacketPtr
1602Cache::cleanEvictBlk(CacheBlk *blk)
1603{
1604    assert(!writebackClean);
1605    assert(blk && blk->isValid() && !blk->isDirty());
1606    // Creating a zero sized write, a message to the snoop filter
1607    Request *req =
1608        new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0,
1609                    Request::wbMasterId);
1610    if (blk->isSecure())
1611        req->setFlags(Request::SECURE);
1612
1613    req->taskId(blk->task_id);
1614    blk->task_id = ContextSwitchTaskId::Unknown;
1615    blk->tickInserted = curTick();
1616
1617    PacketPtr pkt = new Packet(req, MemCmd::CleanEvict);
1618    pkt->allocate();
1619    DPRINTF(Cache, "%s%s %x Create CleanEvict\n", pkt->cmdString(),
1620            pkt->req->isInstFetch() ? " (ifetch)" : "",
1621            pkt->getAddr());
1622
1623    return pkt;
1624}
1625
1626void
1627Cache::memWriteback()
1628{
1629    CacheBlkVisitorWrapper visitor(*this, &Cache::writebackVisitor);
1630    tags->forEachBlk(visitor);
1631}
1632
1633void
1634Cache::memInvalidate()
1635{
1636    CacheBlkVisitorWrapper visitor(*this, &Cache::invalidateVisitor);
1637    tags->forEachBlk(visitor);
1638}
1639
1640bool
1641Cache::isDirty() const
1642{
1643    CacheBlkIsDirtyVisitor visitor;
1644    tags->forEachBlk(visitor);
1645
1646    return visitor.isDirty();
1647}
1648
1649bool
1650Cache::writebackVisitor(CacheBlk &blk)
1651{
1652    if (blk.isDirty()) {
1653        assert(blk.isValid());
1654
1655        Request request(tags->regenerateBlkAddr(blk.tag, blk.set),
1656                        blkSize, 0, Request::funcMasterId);
1657        request.taskId(blk.task_id);
1658
1659        Packet packet(&request, MemCmd::WriteReq);
1660        packet.dataStatic(blk.data);
1661
1662        memSidePort->sendFunctional(&packet);
1663
1664        blk.status &= ~BlkDirty;
1665    }
1666
1667    return true;
1668}
1669
1670bool
1671Cache::invalidateVisitor(CacheBlk &blk)
1672{
1673
1674    if (blk.isDirty())
1675        warn_once("Invalidating dirty cache lines. Expect things to break.\n");
1676
1677    if (blk.isValid()) {
1678        assert(!blk.isDirty());
1679        tags->invalidate(&blk);
1680        blk.invalidate();
1681    }
1682
1683    return true;
1684}
1685
1686CacheBlk*
1687Cache::allocateBlock(Addr addr, bool is_secure, PacketList &writebacks)
1688{
1689    CacheBlk *blk = tags->findVictim(addr);
1690
1691    // It is valid to return NULL if there is no victim
1692    if (!blk)
1693        return nullptr;
1694
1695    if (blk->isValid()) {
1696        Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set);
1697        MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure());
1698        if (repl_mshr) {
1699            // must be an outstanding upgrade request
1700            // on a block we're about to replace...
1701            assert(!blk->isWritable() || blk->isDirty());
1702            assert(repl_mshr->needsWritable());
1703            // too hard to replace block with transient state
1704            // allocation failed, block not inserted
1705            return NULL;
1706        } else {
1707            DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx (%s): %s\n",
1708                    repl_addr, blk->isSecure() ? "s" : "ns",
1709                    addr, is_secure ? "s" : "ns",
1710                    blk->isDirty() ? "writeback" : "clean");
1711
1712            // Will send up Writeback/CleanEvict snoops via isCachedAbove
1713            // when pushing this writeback list into the write buffer.
1714            if (blk->isDirty() || writebackClean) {
1715                // Save writeback packet for handling by caller
1716                writebacks.push_back(writebackBlk(blk));
1717            } else {
1718                writebacks.push_back(cleanEvictBlk(blk));
1719            }
1720        }
1721    }
1722
1723    return blk;
1724}
1725
1726void
1727Cache::invalidateBlock(CacheBlk *blk)
1728{
1729    if (blk != tempBlock)
1730        tags->invalidate(blk);
1731    blk->invalidate();
1732}
1733
1734// Note that the reason we return a list of writebacks rather than
1735// inserting them directly in the write buffer is that this function
1736// is called by both atomic and timing-mode accesses, and in atomic
1737// mode we don't mess with the write buffer (we just perform the
1738// writebacks atomically once the original request is complete).
1739CacheBlk*
1740Cache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks,
1741                  bool allocate)
1742{
1743    assert(pkt->isResponse() || pkt->cmd == MemCmd::WriteLineReq);
1744    Addr addr = pkt->getAddr();
1745    bool is_secure = pkt->isSecure();
1746#if TRACING_ON
1747    CacheBlk::State old_state = blk ? blk->status : 0;
1748#endif
1749
1750    // When handling a fill, we should have no writes to this line.
1751    assert(addr == blockAlign(addr));
1752    assert(!writeBuffer.findMatch(addr, is_secure));
1753
1754    if (blk == NULL) {
1755        // better have read new data...
1756        assert(pkt->hasData());
1757
1758        // only read responses and write-line requests have data;
1759        // note that we don't write the data here for write-line - that
1760        // happens in the subsequent satisfyCpuSideRequest.
1761        assert(pkt->isRead() || pkt->cmd == MemCmd::WriteLineReq);
1762
1763        // need to do a replacement if allocating, otherwise we stick
1764        // with the temporary storage
1765        blk = allocate ? allocateBlock(addr, is_secure, writebacks) : NULL;
1766
1767        if (blk == NULL) {
1768            // No replaceable block or a mostly exclusive
1769            // cache... just use temporary storage to complete the
1770            // current request and then get rid of it
1771            assert(!tempBlock->isValid());
1772            blk = tempBlock;
1773            tempBlock->set = tags->extractSet(addr);
1774            tempBlock->tag = tags->extractTag(addr);
1775            // @todo: set security state as well...
1776            DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr,
1777                    is_secure ? "s" : "ns");
1778        } else {
1779            tags->insertBlock(pkt, blk);
1780        }
1781
1782        // we should never be overwriting a valid block
1783        assert(!blk->isValid());
1784    } else {
1785        // existing block... probably an upgrade
1786        assert(blk->tag == tags->extractTag(addr));
1787        // either we're getting new data or the block should already be valid
1788        assert(pkt->hasData() || blk->isValid());
1789        // don't clear block status... if block is already dirty we
1790        // don't want to lose that
1791    }
1792
1793    if (is_secure)
1794        blk->status |= BlkSecure;
1795    blk->status |= BlkValid | BlkReadable;
1796
1797    // sanity check for whole-line writes, which should always be
1798    // marked as writable as part of the fill, and then later marked
1799    // dirty as part of satisfyCpuSideRequest
1800    if (pkt->cmd == MemCmd::WriteLineReq) {
1801        assert(!pkt->hasSharers());
1802        // at the moment other caches do not respond to the
1803        // invalidation requests corresponding to a whole-line write
1804        assert(!pkt->cacheResponding());
1805    }
1806
1807    // here we deal with setting the appropriate state of the line,
1808    // and we start by looking at the hasSharers flag, and ignore the
1809    // cacheResponding flag (normally signalling dirty data) if the
1810    // packet has sharers, thus the line is never allocated as Owned
1811    // (dirty but not writable), and always ends up being either
1812    // Shared, Exclusive or Modified, see Packet::setCacheResponding
1813    // for more details
1814    if (!pkt->hasSharers()) {
1815        // we could get a writable line from memory (rather than a
1816        // cache) even in a read-only cache, note that we set this bit
1817        // even for a read-only cache, possibly revisit this decision
1818        blk->status |= BlkWritable;
1819
1820        // check if we got this via cache-to-cache transfer (i.e., from a
1821        // cache that had the block in Modified or Owned state)
1822        if (pkt->cacheResponding()) {
1823            // we got the block in Modified state, and invalidated the
1824            // owners copy
1825            blk->status |= BlkDirty;
1826
1827            chatty_assert(!isReadOnly, "Should never see dirty snoop response "
1828                          "in read-only cache %s\n", name());
1829        }
1830    }
1831
1832    DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n",
1833            addr, is_secure ? "s" : "ns", old_state, blk->print());
1834
1835    // if we got new data, copy it in (checking for a read response
1836    // and a response that has data is the same in the end)
1837    if (pkt->isRead()) {
1838        // sanity checks
1839        assert(pkt->hasData());
1840        assert(pkt->getSize() == blkSize);
1841
1842        std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize);
1843    }
1844    // We pay for fillLatency here.
1845    blk->whenReady = clockEdge() + fillLatency * clockPeriod() +
1846        pkt->payloadDelay;
1847
1848    return blk;
1849}
1850
1851
1852/////////////////////////////////////////////////////
1853//
1854// Snoop path: requests coming in from the memory side
1855//
1856/////////////////////////////////////////////////////
1857
1858void
1859Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
1860                              bool already_copied, bool pending_inval)
1861{
1862    // sanity check
1863    assert(req_pkt->isRequest());
1864    assert(req_pkt->needsResponse());
1865
1866    DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__,
1867            req_pkt->cmdString(), req_pkt->getAddr(), req_pkt->getSize());
1868    // timing-mode snoop responses require a new packet, unless we
1869    // already made a copy...
1870    PacketPtr pkt = req_pkt;
1871    if (!already_copied)
1872        // do not clear flags, and allocate space for data if the
1873        // packet needs it (the only packets that carry data are read
1874        // responses)
1875        pkt = new Packet(req_pkt, false, req_pkt->isRead());
1876
1877    assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() ||
1878           pkt->hasSharers());
1879    pkt->makeTimingResponse();
1880    if (pkt->isRead()) {
1881        pkt->setDataFromBlock(blk_data, blkSize);
1882    }
1883    if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
1884        // Assume we defer a response to a read from a far-away cache
1885        // A, then later defer a ReadExcl from a cache B on the same
1886        // bus as us. We'll assert cacheResponding in both cases, but
1887        // in the latter case cacheResponding will keep the
1888        // invalidation from reaching cache A. This special response
1889        // tells cache A that it gets the block to satisfy its read,
1890        // but must immediately invalidate it.
1891        pkt->cmd = MemCmd::ReadRespWithInvalidate;
1892    }
1893    // Here we consider forward_time, paying for just forward latency and
1894    // also charging the delay provided by the xbar.
1895    // forward_time is used as send_time in next allocateWriteBuffer().
1896    Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
1897    // Here we reset the timing of the packet.
1898    pkt->headerDelay = pkt->payloadDelay = 0;
1899    DPRINTF(CacheVerbose,
1900            "%s created response: %s addr %#llx size %d tick: %lu\n",
1901            __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize(),
1902            forward_time);
1903    memSidePort->schedTimingSnoopResp(pkt, forward_time, true);
1904}
1905
1906uint32_t
1907Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
1908                   bool is_deferred, bool pending_inval)
1909{
1910    DPRINTF(CacheVerbose, "%s for %s addr %#llx size %d\n", __func__,
1911            pkt->cmdString(), pkt->getAddr(), pkt->getSize());
1912    // deferred snoops can only happen in timing mode
1913    assert(!(is_deferred && !is_timing));
1914    // pending_inval only makes sense on deferred snoops
1915    assert(!(pending_inval && !is_deferred));
1916    assert(pkt->isRequest());
1917
1918    // the packet may get modified if we or a forwarded snooper
1919    // responds in atomic mode, so remember a few things about the
1920    // original packet up front
1921    bool invalidate = pkt->isInvalidate();
1922    bool M5_VAR_USED needs_writable = pkt->needsWritable();
1923
1924    // at the moment we could get an uncacheable write which does not
1925    // have the invalidate flag, and we need a suitable way of dealing
1926    // with this case
1927    panic_if(invalidate && pkt->req->isUncacheable(),
1928             "%s got an invalidating uncacheable snoop request %s to %#llx",
1929             name(), pkt->cmdString(), pkt->getAddr());
1930
1931    uint32_t snoop_delay = 0;
1932
1933    if (forwardSnoops) {
1934        // first propagate snoop upward to see if anyone above us wants to
1935        // handle it.  save & restore packet src since it will get
1936        // rewritten to be relative to cpu-side bus (if any)
1937        bool alreadyResponded = pkt->cacheResponding();
1938        if (is_timing) {
1939            // copy the packet so that we can clear any flags before
1940            // forwarding it upwards, we also allocate data (passing
1941            // the pointer along in case of static data), in case
1942            // there is a snoop hit in upper levels
1943            Packet snoopPkt(pkt, true, true);
1944            snoopPkt.setExpressSnoop();
1945            // the snoop packet does not need to wait any additional
1946            // time
1947            snoopPkt.headerDelay = snoopPkt.payloadDelay = 0;
1948            cpuSidePort->sendTimingSnoopReq(&snoopPkt);
1949
1950            // add the header delay (including crossbar and snoop
1951            // delays) of the upward snoop to the snoop delay for this
1952            // cache
1953            snoop_delay += snoopPkt.headerDelay;
1954
1955            if (snoopPkt.cacheResponding()) {
1956                // cache-to-cache response from some upper cache
1957                assert(!alreadyResponded);
1958                pkt->setCacheResponding();
1959            }
1960            // upstream cache has the block, or has an outstanding
1961            // MSHR, pass the flag on
1962            if (snoopPkt.hasSharers()) {
1963                pkt->setHasSharers();
1964            }
1965            // If this request is a prefetch or clean evict and an upper level
1966            // signals block present, make sure to propagate the block
1967            // presence to the requester.
1968            if (snoopPkt.isBlockCached()) {
1969                pkt->setBlockCached();
1970            }
1971        } else {
1972            cpuSidePort->sendAtomicSnoop(pkt);
1973            if (!alreadyResponded && pkt->cacheResponding()) {
1974                // cache-to-cache response from some upper cache:
1975                // forward response to original requester
1976                assert(pkt->isResponse());
1977            }
1978        }
1979    }
1980
1981    if (!blk || !blk->isValid()) {
1982        DPRINTF(CacheVerbose, "%s snoop miss for %s addr %#llx size %d\n",
1983                __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize());
1984        return snoop_delay;
1985    } else {
1986        DPRINTF(Cache, "%s snoop hit for %s addr %#llx size %d, "
1987                "old state is %s\n", __func__, pkt->cmdString(),
1988                pkt->getAddr(), pkt->getSize(), blk->print());
1989    }
1990
1991    chatty_assert(!(isReadOnly && blk->isDirty()),
1992                  "Should never have a dirty block in a read-only cache %s\n",
1993                  name());
1994
1995    // We may end up modifying both the block state and the packet (if
1996    // we respond in atomic mode), so just figure out what to do now
1997    // and then do it later. If we find dirty data while snooping for
1998    // an invalidate, we don't need to send a response. The
1999    // invalidation itself is taken care of below.
2000    bool respond = blk->isDirty() && pkt->needsResponse() &&
2001        pkt->cmd != MemCmd::InvalidateReq;
2002    bool have_writable = blk->isWritable();
2003
2004    // Invalidate any prefetch's from below that would strip write permissions
2005    // MemCmd::HardPFReq is only observed by upstream caches.  After missing
2006    // above and in it's own cache, a new MemCmd::ReadReq is created that
2007    // downstream caches observe.
2008    if (pkt->mustCheckAbove()) {
2009        DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s from"
2010                " lower cache\n", pkt->getAddr(), pkt->cmdString());
2011        pkt->setBlockCached();
2012        return snoop_delay;
2013    }
2014
2015    if (pkt->isRead() && !invalidate) {
2016        // reading without requiring the line in a writable state
2017        assert(!needs_writable);
2018        pkt->setHasSharers();
2019
2020        // if the requesting packet is uncacheable, retain the line in
2021        // the current state, otherwhise unset the writable flag,
2022        // which means we go from Modified to Owned (and will respond
2023        // below), remain in Owned (and will respond below), from
2024        // Exclusive to Shared, or remain in Shared
2025        if (!pkt->req->isUncacheable())
2026            blk->status &= ~BlkWritable;
2027    }
2028
2029    if (respond) {
2030        // prevent anyone else from responding, cache as well as
2031        // memory, and also prevent any memory from even seeing the
2032        // request
2033        pkt->setCacheResponding();
2034        if (have_writable) {
2035            // inform the cache hierarchy that this cache had the line
2036            // in the Modified state so that we avoid unnecessary
2037            // invalidations (see Packet::setResponderHadWritable)
2038            pkt->setResponderHadWritable();
2039
2040            // in the case of an uncacheable request there is no point
2041            // in setting the responderHadWritable flag, but since the
2042            // recipient does not care there is no harm in doing so
2043        } else {
2044            // if the packet has needsWritable set we invalidate our
2045            // copy below and all other copies will be invalidates
2046            // through express snoops, and if needsWritable is not set
2047            // we already called setHasSharers above
2048        }
2049
2050        // if we are returning a writable and dirty (Modified) line,
2051        // we should be invalidating the line
2052        panic_if(!invalidate && !pkt->hasSharers(),
2053                 "%s is passing a Modified line through %s to %#llx, "
2054                 "but keeping the block",
2055                 name(), pkt->cmdString(), pkt->getAddr());
2056
2057        if (is_timing) {
2058            doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
2059        } else {
2060            pkt->makeAtomicResponse();
2061            // packets such as upgrades do not actually have any data
2062            // payload
2063            if (pkt->hasData())
2064                pkt->setDataFromBlock(blk->data, blkSize);
2065        }
2066    }
2067
2068    if (!respond && is_timing && is_deferred) {
2069        // if it's a deferred timing snoop to which we are not
2070        // responding, then we've made a copy of both the request and
2071        // the packet, delete them here
2072        assert(pkt->needsResponse());
2073        delete pkt->req;
2074        delete pkt;
2075    }
2076
2077    // Do this last in case it deallocates block data or something
2078    // like that
2079    if (invalidate) {
2080        invalidateBlock(blk);
2081    }
2082
2083    DPRINTF(Cache, "new state is %s\n", blk->print());
2084
2085    return snoop_delay;
2086}
2087
2088
2089void
2090Cache::recvTimingSnoopReq(PacketPtr pkt)
2091{
2092    DPRINTF(CacheVerbose, "%s for %s addr %#llx size %d\n", __func__,
2093            pkt->cmdString(), pkt->getAddr(), pkt->getSize());
2094
2095    // Snoops shouldn't happen when bypassing caches
2096    assert(!system->bypassCaches());
2097
2098    // no need to snoop requests that are not in range
2099    if (!inRange(pkt->getAddr())) {
2100        return;
2101    }
2102
2103    bool is_secure = pkt->isSecure();
2104    CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
2105
2106    Addr blk_addr = blockAlign(pkt->getAddr());
2107    MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
2108
2109    // Update the latency cost of the snoop so that the crossbar can
2110    // account for it. Do not overwrite what other neighbouring caches
2111    // have already done, rather take the maximum. The update is
2112    // tentative, for cases where we return before an upward snoop
2113    // happens below.
2114    pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay,
2115                                         lookupLatency * clockPeriod());
2116
2117    // Inform request(Prefetch, CleanEvict or Writeback) from below of
2118    // MSHR hit, set setBlockCached.
2119    if (mshr && pkt->mustCheckAbove()) {
2120        DPRINTF(Cache, "Setting block cached for %s from"
2121                "lower cache on mshr hit %#x\n",
2122                pkt->cmdString(), pkt->getAddr());
2123        pkt->setBlockCached();
2124        return;
2125    }
2126
2127    // Let the MSHR itself track the snoop and decide whether we want
2128    // to go ahead and do the regular cache snoop
2129    if (mshr && mshr->handleSnoop(pkt, order++)) {
2130        DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)."
2131                "mshrs: %s\n", blk_addr, is_secure ? "s" : "ns",
2132                mshr->print());
2133
2134        if (mshr->getNumTargets() > numTarget)
2135            warn("allocating bonus target for snoop"); //handle later
2136        return;
2137    }
2138
2139    //We also need to check the writeback buffers and handle those
2140    WriteQueueEntry *wb_entry = writeBuffer.findMatch(blk_addr, is_secure);
2141    if (wb_entry) {
2142        DPRINTF(Cache, "Snoop hit in writeback to addr %#llx (%s)\n",
2143                pkt->getAddr(), is_secure ? "s" : "ns");
2144        // Expect to see only Writebacks and/or CleanEvicts here, both of
2145        // which should not be generated for uncacheable data.
2146        assert(!wb_entry->isUncacheable());
2147        // There should only be a single request responsible for generating
2148        // Writebacks/CleanEvicts.
2149        assert(wb_entry->getNumTargets() == 1);
2150        PacketPtr wb_pkt = wb_entry->getTarget()->pkt;
2151        assert(wb_pkt->isEviction());
2152
2153        if (pkt->isEviction()) {
2154            // if the block is found in the write queue, set the BLOCK_CACHED
2155            // flag for Writeback/CleanEvict snoop. On return the snoop will
2156            // propagate the BLOCK_CACHED flag in Writeback packets and prevent
2157            // any CleanEvicts from travelling down the memory hierarchy.
2158            pkt->setBlockCached();
2159            DPRINTF(Cache, "Squashing %s from lower cache on writequeue hit"
2160                    " %#x\n", pkt->cmdString(), pkt->getAddr());
2161            return;
2162        }
2163
2164        // conceptually writebacks are no different to other blocks in
2165        // this cache, so the behaviour is modelled after handleSnoop,
2166        // the difference being that instead of querying the block
2167        // state to determine if it is dirty and writable, we use the
2168        // command and fields of the writeback packet
2169        bool respond = wb_pkt->cmd == MemCmd::WritebackDirty &&
2170            pkt->needsResponse() && pkt->cmd != MemCmd::InvalidateReq;
2171        bool have_writable = !wb_pkt->hasSharers();
2172        bool invalidate = pkt->isInvalidate();
2173
2174        if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) {
2175            assert(!pkt->needsWritable());
2176            pkt->setHasSharers();
2177            wb_pkt->setHasSharers();
2178        }
2179
2180        if (respond) {
2181            pkt->setCacheResponding();
2182
2183            if (have_writable) {
2184                pkt->setResponderHadWritable();
2185            }
2186
2187            doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(),
2188                                   false, false);
2189        }
2190
2191        if (invalidate) {
2192            // Invalidation trumps our writeback... discard here
2193            // Note: markInService will remove entry from writeback buffer.
2194            markInService(wb_entry);
2195            delete wb_pkt;
2196        }
2197    }
2198
2199    // If this was a shared writeback, there may still be
2200    // other shared copies above that require invalidation.
2201    // We could be more selective and return here if the
2202    // request is non-exclusive or if the writeback is
2203    // exclusive.
2204    uint32_t snoop_delay = handleSnoop(pkt, blk, true, false, false);
2205
2206    // Override what we did when we first saw the snoop, as we now
2207    // also have the cost of the upwards snoops to account for
2208    pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, snoop_delay +
2209                                         lookupLatency * clockPeriod());
2210}
2211
2212bool
2213Cache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt)
2214{
2215    // Express snoop responses from master to slave, e.g., from L1 to L2
2216    cache->recvTimingSnoopResp(pkt);
2217    return true;
2218}
2219
2220Tick
2221Cache::recvAtomicSnoop(PacketPtr pkt)
2222{
2223    // Snoops shouldn't happen when bypassing caches
2224    assert(!system->bypassCaches());
2225
2226    // no need to snoop requests that are not in range.
2227    if (!inRange(pkt->getAddr())) {
2228        return 0;
2229    }
2230
2231    CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
2232    uint32_t snoop_delay = handleSnoop(pkt, blk, false, false, false);
2233    return snoop_delay + lookupLatency * clockPeriod();
2234}
2235
2236
2237QueueEntry*
2238Cache::getNextQueueEntry()
2239{
2240    // Check both MSHR queue and write buffer for potential requests,
2241    // note that null does not mean there is no request, it could
2242    // simply be that it is not ready
2243    MSHR *miss_mshr  = mshrQueue.getNext();
2244    WriteQueueEntry *wq_entry = writeBuffer.getNext();
2245
2246    // If we got a write buffer request ready, first priority is a
2247    // full write buffer (but only if we have no uncacheable write
2248    // responses outstanding, possibly revisit this last part),
2249    // otherwhise we favour the miss requests
2250    if (wq_entry &&
2251        ((writeBuffer.isFull() && writeBuffer.numInService() == 0) ||
2252         !miss_mshr)) {
2253        // need to search MSHR queue for conflicting earlier miss.
2254        MSHR *conflict_mshr =
2255            mshrQueue.findPending(wq_entry->blkAddr,
2256                                  wq_entry->isSecure);
2257
2258        if (conflict_mshr && conflict_mshr->order < wq_entry->order) {
2259            // Service misses in order until conflict is cleared.
2260            return conflict_mshr;
2261
2262            // @todo Note that we ignore the ready time of the conflict here
2263        }
2264
2265        // No conflicts; issue write
2266        return wq_entry;
2267    } else if (miss_mshr) {
2268        // need to check for conflicting earlier writeback
2269        WriteQueueEntry *conflict_mshr =
2270            writeBuffer.findPending(miss_mshr->blkAddr,
2271                                    miss_mshr->isSecure);
2272        if (conflict_mshr) {
2273            // not sure why we don't check order here... it was in the
2274            // original code but commented out.
2275
2276            // The only way this happens is if we are
2277            // doing a write and we didn't have permissions
2278            // then subsequently saw a writeback (owned got evicted)
2279            // We need to make sure to perform the writeback first
2280            // To preserve the dirty data, then we can issue the write
2281
2282            // should we return wq_entry here instead?  I.e. do we
2283            // have to flush writes in order?  I don't think so... not
2284            // for Alpha anyway.  Maybe for x86?
2285            return conflict_mshr;
2286
2287            // @todo Note that we ignore the ready time of the conflict here
2288        }
2289
2290        // No conflicts; issue read
2291        return miss_mshr;
2292    }
2293
2294    // fall through... no pending requests.  Try a prefetch.
2295    assert(!miss_mshr && !wq_entry);
2296    if (prefetcher && mshrQueue.canPrefetch()) {
2297        // If we have a miss queue slot, we can try a prefetch
2298        PacketPtr pkt = prefetcher->getPacket();
2299        if (pkt) {
2300            Addr pf_addr = blockAlign(pkt->getAddr());
2301            if (!tags->findBlock(pf_addr, pkt->isSecure()) &&
2302                !mshrQueue.findMatch(pf_addr, pkt->isSecure()) &&
2303                !writeBuffer.findMatch(pf_addr, pkt->isSecure())) {
2304                // Update statistic on number of prefetches issued
2305                // (hwpf_mshr_misses)
2306                assert(pkt->req->masterId() < system->maxMasters());
2307                mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
2308
2309                // allocate an MSHR and return it, note
2310                // that we send the packet straight away, so do not
2311                // schedule the send
2312                return allocateMissBuffer(pkt, curTick(), false);
2313            } else {
2314                // free the request and packet
2315                delete pkt->req;
2316                delete pkt;
2317            }
2318        }
2319    }
2320
2321    return nullptr;
2322}
2323
2324bool
2325Cache::isCachedAbove(PacketPtr pkt, bool is_timing) const
2326{
2327    if (!forwardSnoops)
2328        return false;
2329    // Mirroring the flow of HardPFReqs, the cache sends CleanEvict and
2330    // Writeback snoops into upper level caches to check for copies of the
2331    // same block. Using the BLOCK_CACHED flag with the Writeback/CleanEvict
2332    // packet, the cache can inform the crossbar below of presence or absence
2333    // of the block.
2334    if (is_timing) {
2335        Packet snoop_pkt(pkt, true, false);
2336        snoop_pkt.setExpressSnoop();
2337        // Assert that packet is either Writeback or CleanEvict and not a
2338        // prefetch request because prefetch requests need an MSHR and may
2339        // generate a snoop response.
2340        assert(pkt->isEviction());
2341        snoop_pkt.senderState = NULL;
2342        cpuSidePort->sendTimingSnoopReq(&snoop_pkt);
2343        // Writeback/CleanEvict snoops do not generate a snoop response.
2344        assert(!(snoop_pkt.cacheResponding()));
2345        return snoop_pkt.isBlockCached();
2346    } else {
2347        cpuSidePort->sendAtomicSnoop(pkt);
2348        return pkt->isBlockCached();
2349    }
2350}
2351
2352Tick
2353Cache::nextQueueReadyTime() const
2354{
2355    Tick nextReady = std::min(mshrQueue.nextReadyTime(),
2356                              writeBuffer.nextReadyTime());
2357
2358    // Don't signal prefetch ready time if no MSHRs available
2359    // Will signal once enoguh MSHRs are deallocated
2360    if (prefetcher && mshrQueue.canPrefetch()) {
2361        nextReady = std::min(nextReady,
2362                             prefetcher->nextPrefetchReadyTime());
2363    }
2364
2365    return nextReady;
2366}
2367
2368bool
2369Cache::sendMSHRQueuePacket(MSHR* mshr)
2370{
2371    assert(mshr);
2372
2373    // use request from 1st target
2374    PacketPtr tgt_pkt = mshr->getTarget()->pkt;
2375
2376    DPRINTF(Cache, "%s MSHR %s for addr %#llx size %d\n", __func__,
2377            tgt_pkt->cmdString(), tgt_pkt->getAddr(),
2378            tgt_pkt->getSize());
2379
2380    CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure);
2381
2382    if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) {
2383        // we should never have hardware prefetches to allocated
2384        // blocks
2385        assert(blk == NULL);
2386
2387        // We need to check the caches above us to verify that
2388        // they don't have a copy of this block in the dirty state
2389        // at the moment. Without this check we could get a stale
2390        // copy from memory that might get used in place of the
2391        // dirty one.
2392        Packet snoop_pkt(tgt_pkt, true, false);
2393        snoop_pkt.setExpressSnoop();
2394        // We are sending this packet upwards, but if it hits we will
2395        // get a snoop response that we end up treating just like a
2396        // normal response, hence it needs the MSHR as its sender
2397        // state
2398        snoop_pkt.senderState = mshr;
2399        cpuSidePort->sendTimingSnoopReq(&snoop_pkt);
2400
2401        // Check to see if the prefetch was squashed by an upper cache (to
2402        // prevent us from grabbing the line) or if a Check to see if a
2403        // writeback arrived between the time the prefetch was placed in
2404        // the MSHRs and when it was selected to be sent or if the
2405        // prefetch was squashed by an upper cache.
2406
2407        // It is important to check cacheResponding before
2408        // prefetchSquashed. If another cache has committed to
2409        // responding, it will be sending a dirty response which will
2410        // arrive at the MSHR allocated for this request. Checking the
2411        // prefetchSquash first may result in the MSHR being
2412        // prematurely deallocated.
2413        if (snoop_pkt.cacheResponding()) {
2414            auto M5_VAR_USED r = outstandingSnoop.insert(snoop_pkt.req);
2415            assert(r.second);
2416
2417            // if we are getting a snoop response with no sharers it
2418            // will be allocated as Modified
2419            bool pending_modified_resp = !snoop_pkt.hasSharers();
2420            markInService(mshr, pending_modified_resp);
2421
2422            DPRINTF(Cache, "Upward snoop of prefetch for addr"
2423                    " %#x (%s) hit\n",
2424                    tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns");
2425            return false;
2426        }
2427
2428        if (snoop_pkt.isBlockCached()) {
2429            DPRINTF(Cache, "Block present, prefetch squashed by cache.  "
2430                    "Deallocating mshr target %#x.\n",
2431                    mshr->blkAddr);
2432
2433            // Deallocate the mshr target
2434            if (mshrQueue.forceDeallocateTarget(mshr)) {
2435                // Clear block if this deallocation resulted freed an
2436                // mshr when all had previously been utilized
2437                clearBlocked(Blocked_NoMSHRs);
2438            }
2439            return false;
2440        }
2441    }
2442
2443    // either a prefetch that is not present upstream, or a normal
2444    // MSHR request, proceed to get the packet to send downstream
2445    PacketPtr pkt = getBusPacket(tgt_pkt, blk, mshr->needsWritable());
2446
2447    mshr->isForward = (pkt == NULL);
2448
2449    if (mshr->isForward) {
2450        // not a cache block request, but a response is expected
2451        // make copy of current packet to forward, keep current
2452        // copy for response handling
2453        pkt = new Packet(tgt_pkt, false, true);
2454        assert(!pkt->isWrite());
2455    }
2456
2457    // play it safe and append (rather than set) the sender state,
2458    // as forwarded packets may already have existing state
2459    pkt->pushSenderState(mshr);
2460
2461    if (!memSidePort->sendTimingReq(pkt)) {
2462        // we are awaiting a retry, but we
2463        // delete the packet and will be creating a new packet
2464        // when we get the opportunity
2465        delete pkt;
2466
2467        // note that we have now masked any requestBus and
2468        // schedSendEvent (we will wait for a retry before
2469        // doing anything), and this is so even if we do not
2470        // care about this packet and might override it before
2471        // it gets retried
2472        return true;
2473    } else {
2474        // As part of the call to sendTimingReq the packet is
2475        // forwarded to all neighbouring caches (and any caches
2476        // above them) as a snoop. Thus at this point we know if
2477        // any of the neighbouring caches are responding, and if
2478        // so, we know it is dirty, and we can determine if it is
2479        // being passed as Modified, making our MSHR the ordering
2480        // point
2481        bool pending_modified_resp = !pkt->hasSharers() &&
2482            pkt->cacheResponding();
2483        markInService(mshr, pending_modified_resp);
2484        return false;
2485    }
2486}
2487
2488bool
2489Cache::sendWriteQueuePacket(WriteQueueEntry* wq_entry)
2490{
2491    assert(wq_entry);
2492
2493    // always a single target for write queue entries
2494    PacketPtr tgt_pkt = wq_entry->getTarget()->pkt;
2495
2496    DPRINTF(Cache, "%s write %s for addr %#llx size %d\n", __func__,
2497            tgt_pkt->cmdString(), tgt_pkt->getAddr(),
2498            tgt_pkt->getSize());
2499
2500    PacketPtr pkt = nullptr;
2501    bool delete_pkt = false;
2502
2503    if (tgt_pkt->isEviction()) {
2504        assert(!wq_entry->isUncacheable());
2505        // no response expected, just forward packet as it is
2506        pkt = tgt_pkt;
2507    } else {
2508        // the only thing we deal with besides eviction commands
2509        // are uncacheable writes
2510        assert(tgt_pkt->req->isUncacheable() && tgt_pkt->isWrite());
2511        // not a cache block request, but a response is expected
2512        // make copy of current packet to forward, keep current
2513        // copy for response handling
2514        pkt = new Packet(tgt_pkt, false, true);
2515        pkt->setData(tgt_pkt->getConstPtr<uint8_t>());
2516        delete_pkt = true;
2517    }
2518
2519    pkt->pushSenderState(wq_entry);
2520
2521    if (!memSidePort->sendTimingReq(pkt)) {
2522        if (delete_pkt) {
2523            // we are awaiting a retry, but we
2524            // delete the packet and will be creating a new packet
2525            // when we get the opportunity
2526            delete pkt;
2527        }
2528        // note that we have now masked any requestBus and
2529        // schedSendEvent (we will wait for a retry before
2530        // doing anything), and this is so even if we do not
2531        // care about this packet and might override it before
2532        // it gets retried
2533        return true;
2534    } else {
2535        markInService(wq_entry);
2536        return false;
2537    }
2538}
2539
2540void
2541Cache::serialize(CheckpointOut &cp) const
2542{
2543    bool dirty(isDirty());
2544
2545    if (dirty) {
2546        warn("*** The cache still contains dirty data. ***\n");
2547        warn("    Make sure to drain the system using the correct flags.\n");
2548        warn("    This checkpoint will not restore correctly and dirty data in "
2549             "the cache will be lost!\n");
2550    }
2551
2552    // Since we don't checkpoint the data in the cache, any dirty data
2553    // will be lost when restoring from a checkpoint of a system that
2554    // wasn't drained properly. Flag the checkpoint as invalid if the
2555    // cache contains dirty data.
2556    bool bad_checkpoint(dirty);
2557    SERIALIZE_SCALAR(bad_checkpoint);
2558}
2559
2560void
2561Cache::unserialize(CheckpointIn &cp)
2562{
2563    bool bad_checkpoint;
2564    UNSERIALIZE_SCALAR(bad_checkpoint);
2565    if (bad_checkpoint) {
2566        fatal("Restoring from checkpoints with dirty caches is not supported "
2567              "in the classic memory system. Please remove any caches or "
2568              " drain them properly before taking checkpoints.\n");
2569    }
2570}
2571
2572///////////////
2573//
2574// CpuSidePort
2575//
2576///////////////
2577
2578AddrRangeList
2579Cache::CpuSidePort::getAddrRanges() const
2580{
2581    return cache->getAddrRanges();
2582}
2583
2584bool
2585Cache::CpuSidePort::recvTimingReq(PacketPtr pkt)
2586{
2587    assert(!cache->system->bypassCaches());
2588
2589    bool success = false;
2590
2591    // always let express snoop packets through if even if blocked
2592    if (pkt->isExpressSnoop()) {
2593        // do not change the current retry state
2594        bool M5_VAR_USED bypass_success = cache->recvTimingReq(pkt);
2595        assert(bypass_success);
2596        return true;
2597    } else if (blocked || mustSendRetry) {
2598        // either already committed to send a retry, or blocked
2599        success = false;
2600    } else {
2601        // pass it on to the cache, and let the cache decide if we
2602        // have to retry or not
2603        success = cache->recvTimingReq(pkt);
2604    }
2605
2606    // remember if we have to retry
2607    mustSendRetry = !success;
2608    return success;
2609}
2610
2611Tick
2612Cache::CpuSidePort::recvAtomic(PacketPtr pkt)
2613{
2614    return cache->recvAtomic(pkt);
2615}
2616
2617void
2618Cache::CpuSidePort::recvFunctional(PacketPtr pkt)
2619{
2620    // functional request
2621    cache->functionalAccess(pkt, true);
2622}
2623
2624Cache::
2625CpuSidePort::CpuSidePort(const std::string &_name, Cache *_cache,
2626                         const std::string &_label)
2627    : BaseCache::CacheSlavePort(_name, _cache, _label), cache(_cache)
2628{
2629}
2630
2631Cache*
2632CacheParams::create()
2633{
2634    assert(tags);
2635
2636    return new Cache(this);
2637}
2638///////////////
2639//
2640// MemSidePort
2641//
2642///////////////
2643
2644bool
2645Cache::MemSidePort::recvTimingResp(PacketPtr pkt)
2646{
2647    cache->recvTimingResp(pkt);
2648    return true;
2649}
2650
2651// Express snooping requests to memside port
2652void
2653Cache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt)
2654{
2655    // handle snooping requests
2656    cache->recvTimingSnoopReq(pkt);
2657}
2658
2659Tick
2660Cache::MemSidePort::recvAtomicSnoop(PacketPtr pkt)
2661{
2662    return cache->recvAtomicSnoop(pkt);
2663}
2664
2665void
2666Cache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt)
2667{
2668    // functional snoop (note that in contrast to atomic we don't have
2669    // a specific functionalSnoop method, as they have the same
2670    // behaviour regardless)
2671    cache->functionalAccess(pkt, false);
2672}
2673
2674void
2675Cache::CacheReqPacketQueue::sendDeferredPacket()
2676{
2677    // sanity check
2678    assert(!waitingOnRetry);
2679
2680    // there should never be any deferred request packets in the
2681    // queue, instead we resly on the cache to provide the packets
2682    // from the MSHR queue or write queue
2683    assert(deferredPacketReadyTime() == MaxTick);
2684
2685    // check for request packets (requests & writebacks)
2686    QueueEntry* entry = cache.getNextQueueEntry();
2687
2688    if (!entry) {
2689        // can happen if e.g. we attempt a writeback and fail, but
2690        // before the retry, the writeback is eliminated because
2691        // we snoop another cache's ReadEx.
2692    } else {
2693        // let our snoop responses go first if there are responses to
2694        // the same addresses
2695        if (checkConflictingSnoop(entry->blkAddr)) {
2696            return;
2697        }
2698        waitingOnRetry = entry->sendPacket(cache);
2699    }
2700
2701    // if we succeeded and are not waiting for a retry, schedule the
2702    // next send considering when the next queue is ready, note that
2703    // snoop responses have their own packet queue and thus schedule
2704    // their own events
2705    if (!waitingOnRetry) {
2706        schedSendEvent(cache.nextQueueReadyTime());
2707    }
2708}
2709
2710Cache::
2711MemSidePort::MemSidePort(const std::string &_name, Cache *_cache,
2712                         const std::string &_label)
2713    : BaseCache::CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue),
2714      _reqQueue(*_cache, *this, _snoopRespQueue, _label),
2715      _snoopRespQueue(*_cache, *this, _label), cache(_cache)
2716{
2717}
2718