cache.cc revision 11277
1/*
2 * Copyright (c) 2010-2015 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 *          Dave Greene
43 *          Nathan Binkert
44 *          Steve Reinhardt
45 *          Ron Dreslinski
46 *          Andreas Sandberg
47 */
48
49/**
50 * @file
51 * Cache definitions.
52 */
53
54#include "mem/cache/cache.hh"
55
56#include "base/misc.hh"
57#include "base/types.hh"
58#include "debug/Cache.hh"
59#include "debug/CachePort.hh"
60#include "debug/CacheTags.hh"
61#include "mem/cache/blk.hh"
62#include "mem/cache/mshr.hh"
63#include "mem/cache/prefetch/base.hh"
64#include "sim/sim_exit.hh"
65
66Cache::Cache(const CacheParams *p)
67    : BaseCache(p, p->system->cacheLineSize()),
68      tags(p->tags),
69      prefetcher(p->prefetcher),
70      doFastWrites(true),
71      prefetchOnAccess(p->prefetch_on_access),
72      clusivity(p->clusivity),
73      writebackClean(p->writeback_clean),
74      tempBlockWriteback(nullptr),
75      writebackTempBlockAtomicEvent(this, false,
76                                    EventBase::Delayed_Writeback_Pri)
77{
78    tempBlock = new CacheBlk();
79    tempBlock->data = new uint8_t[blkSize];
80
81    cpuSidePort = new CpuSidePort(p->name + ".cpu_side", this,
82                                  "CpuSidePort");
83    memSidePort = new MemSidePort(p->name + ".mem_side", this,
84                                  "MemSidePort");
85
86    tags->setCache(this);
87    if (prefetcher)
88        prefetcher->setCache(this);
89}
90
91Cache::~Cache()
92{
93    delete [] tempBlock->data;
94    delete tempBlock;
95
96    delete cpuSidePort;
97    delete memSidePort;
98}
99
100void
101Cache::regStats()
102{
103    BaseCache::regStats();
104}
105
106void
107Cache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt)
108{
109    assert(pkt->isRequest());
110
111    uint64_t overwrite_val;
112    bool overwrite_mem;
113    uint64_t condition_val64;
114    uint32_t condition_val32;
115
116    int offset = tags->extractBlkOffset(pkt->getAddr());
117    uint8_t *blk_data = blk->data + offset;
118
119    assert(sizeof(uint64_t) >= pkt->getSize());
120
121    overwrite_mem = true;
122    // keep a copy of our possible write value, and copy what is at the
123    // memory address into the packet
124    pkt->writeData((uint8_t *)&overwrite_val);
125    pkt->setData(blk_data);
126
127    if (pkt->req->isCondSwap()) {
128        if (pkt->getSize() == sizeof(uint64_t)) {
129            condition_val64 = pkt->req->getExtraData();
130            overwrite_mem = !std::memcmp(&condition_val64, blk_data,
131                                         sizeof(uint64_t));
132        } else if (pkt->getSize() == sizeof(uint32_t)) {
133            condition_val32 = (uint32_t)pkt->req->getExtraData();
134            overwrite_mem = !std::memcmp(&condition_val32, blk_data,
135                                         sizeof(uint32_t));
136        } else
137            panic("Invalid size for conditional read/write\n");
138    }
139
140    if (overwrite_mem) {
141        std::memcpy(blk_data, &overwrite_val, pkt->getSize());
142        blk->status |= BlkDirty;
143    }
144}
145
146
147void
148Cache::satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk,
149                             bool deferred_response, bool pending_downgrade)
150{
151    assert(pkt->isRequest());
152
153    assert(blk && blk->isValid());
154    // Occasionally this is not true... if we are a lower-level cache
155    // satisfying a string of Read and ReadEx requests from
156    // upper-level caches, a Read will mark the block as shared but we
157    // can satisfy a following ReadEx anyway since we can rely on the
158    // Read requester(s) to have buffered the ReadEx snoop and to
159    // invalidate their blocks after receiving them.
160    // assert(!pkt->needsExclusive() || blk->isWritable());
161    assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
162
163    // Check RMW operations first since both isRead() and
164    // isWrite() will be true for them
165    if (pkt->cmd == MemCmd::SwapReq) {
166        cmpAndSwap(blk, pkt);
167    } else if (pkt->isWrite()) {
168        assert(blk->isWritable());
169        // Write or WriteLine at the first cache with block in Exclusive
170        if (blk->checkWrite(pkt)) {
171            pkt->writeDataToBlock(blk->data, blkSize);
172        }
173        // Always mark the line as dirty even if we are a failed
174        // StoreCond so we supply data to any snoops that have
175        // appended themselves to this cache before knowing the store
176        // will fail.
177        blk->status |= BlkDirty;
178        DPRINTF(Cache, "%s for %s addr %#llx size %d (write)\n", __func__,
179                pkt->cmdString(), pkt->getAddr(), pkt->getSize());
180    } else if (pkt->isRead()) {
181        if (pkt->isLLSC()) {
182            blk->trackLoadLocked(pkt);
183        }
184        pkt->setDataFromBlock(blk->data, blkSize);
185        // determine if this read is from a (coherent) cache, or not
186        // by looking at the command type; we could potentially add a
187        // packet attribute such as 'FromCache' to make this check a
188        // bit cleaner
189        if (pkt->cmd == MemCmd::ReadExReq ||
190            pkt->cmd == MemCmd::ReadSharedReq ||
191            pkt->cmd == MemCmd::ReadCleanReq ||
192            pkt->cmd == MemCmd::SCUpgradeFailReq) {
193            assert(pkt->getSize() == blkSize);
194            // special handling for coherent block requests from
195            // upper-level caches
196            if (pkt->needsExclusive()) {
197                // sanity check
198                assert(pkt->cmd == MemCmd::ReadExReq ||
199                       pkt->cmd == MemCmd::SCUpgradeFailReq);
200
201                // if we have a dirty copy, make sure the recipient
202                // keeps it marked dirty
203                if (blk->isDirty()) {
204                    pkt->assertMemInhibit();
205                }
206                // on ReadExReq we give up our copy unconditionally,
207                // even if this cache is mostly inclusive, we may want
208                // to revisit this
209                invalidateBlock(blk);
210            } else if (blk->isWritable() && !pending_downgrade &&
211                       !pkt->sharedAsserted() &&
212                       pkt->cmd != MemCmd::ReadCleanReq) {
213                // we can give the requester an exclusive copy (by not
214                // asserting shared line) on a read request if:
215                // - we have an exclusive copy at this level (& below)
216                // - we don't have a pending snoop from below
217                //   signaling another read request
218                // - no other cache above has a copy (otherwise it
219                //   would have asseretd shared line on request)
220                // - we are not satisfying an instruction fetch (this
221                //   prevents dirty data in the i-cache)
222
223                if (blk->isDirty()) {
224                    // special considerations if we're owner:
225                    if (!deferred_response) {
226                        // if we are responding immediately and can
227                        // signal that we're transferring ownership
228                        // (inhibit set) along with exclusivity
229                        // (shared not set), do so
230                        pkt->assertMemInhibit();
231
232                        // if this cache is mostly inclusive, we keep
233                        // the block as writable (exclusive), and pass
234                        // it upwards as writable and dirty
235                        // (modified), hence we have multiple caches
236                        // considering the same block writable,
237                        // something that we get away with due to the
238                        // fact that: 1) this cache has been
239                        // considered the ordering points and
240                        // responded to all snoops up till now, and 2)
241                        // we always snoop upwards before consulting
242                        // the local cache, both on a normal request
243                        // (snooping done by the crossbar), and on a
244                        // snoop
245                        blk->status &= ~BlkDirty;
246
247                        // if this cache is mostly exclusive with
248                        // respect to the cache above, drop the block
249                        if (clusivity == Enums::mostly_excl) {
250                            invalidateBlock(blk);
251                        }
252                    } else {
253                        // if we're responding after our own miss,
254                        // there's a window where the recipient didn't
255                        // know it was getting ownership and may not
256                        // have responded to snoops correctly, so we
257                        // can't pass off ownership *or* exclusivity
258                        pkt->assertShared();
259                    }
260                }
261            } else {
262                // otherwise only respond with a shared copy
263                pkt->assertShared();
264            }
265        }
266    } else {
267        // Upgrade or Invalidate, since we have it Exclusively (E or
268        // M), we ack then invalidate.
269        assert(pkt->isUpgrade() || pkt->isInvalidate());
270
271        // for invalidations we could be looking at the temp block
272        // (for upgrades we always allocate)
273        invalidateBlock(blk);
274        DPRINTF(Cache, "%s for %s addr %#llx size %d (invalidation)\n",
275                __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize());
276    }
277}
278
279
280/////////////////////////////////////////////////////
281//
282// MSHR helper functions
283//
284/////////////////////////////////////////////////////
285
286
287void
288Cache::markInService(MSHR *mshr, bool pending_dirty_resp)
289{
290    markInServiceInternal(mshr, pending_dirty_resp);
291}
292
293/////////////////////////////////////////////////////
294//
295// Access path: requests coming in from the CPU side
296//
297/////////////////////////////////////////////////////
298
299bool
300Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
301              PacketList &writebacks)
302{
303    // sanity check
304    assert(pkt->isRequest());
305
306    chatty_assert(!(isReadOnly && pkt->isWrite()),
307                  "Should never see a write in a read-only cache %s\n",
308                  name());
309
310    DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__,
311            pkt->cmdString(), pkt->getAddr(), pkt->getSize());
312
313    if (pkt->req->isUncacheable()) {
314        DPRINTF(Cache, "%s%s addr %#llx uncacheable\n", pkt->cmdString(),
315                pkt->req->isInstFetch() ? " (ifetch)" : "",
316                pkt->getAddr());
317
318        // flush and invalidate any existing block
319        CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure()));
320        if (old_blk && old_blk->isValid()) {
321            if (old_blk->isDirty() || writebackClean)
322                writebacks.push_back(writebackBlk(old_blk));
323            else
324                writebacks.push_back(cleanEvictBlk(old_blk));
325            tags->invalidate(old_blk);
326            old_blk->invalidate();
327        }
328
329        blk = NULL;
330        // lookupLatency is the latency in case the request is uncacheable.
331        lat = lookupLatency;
332        return false;
333    }
334
335    ContextID id = pkt->req->hasContextId() ?
336        pkt->req->contextId() : InvalidContextID;
337    // Here lat is the value passed as parameter to accessBlock() function
338    // that can modify its value.
339    blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), lat, id);
340
341    DPRINTF(Cache, "%s%s addr %#llx size %d (%s) %s\n", pkt->cmdString(),
342            pkt->req->isInstFetch() ? " (ifetch)" : "",
343            pkt->getAddr(), pkt->getSize(), pkt->isSecure() ? "s" : "ns",
344            blk ? "hit " + blk->print() : "miss");
345
346
347    if (pkt->isEviction()) {
348        // We check for presence of block in above caches before issuing
349        // Writeback or CleanEvict to write buffer. Therefore the only
350        // possible cases can be of a CleanEvict packet coming from above
351        // encountering a Writeback generated in this cache peer cache and
352        // waiting in the write buffer. Cases of upper level peer caches
353        // generating CleanEvict and Writeback or simply CleanEvict and
354        // CleanEvict almost simultaneously will be caught by snoops sent out
355        // by crossbar.
356        std::vector<MSHR *> outgoing;
357        if (writeBuffer.findMatches(pkt->getAddr(), pkt->isSecure(),
358                                   outgoing)) {
359            assert(outgoing.size() == 1);
360            MSHR *wb_entry = outgoing[0];
361            assert(wb_entry->getNumTargets() == 1);
362            PacketPtr wbPkt = wb_entry->getTarget()->pkt;
363            assert(wbPkt->isWriteback());
364
365            if (pkt->isCleanEviction()) {
366                // The CleanEvict and WritebackClean snoops into other
367                // peer caches of the same level while traversing the
368                // crossbar. If a copy of the block is found, the
369                // packet is deleted in the crossbar. Hence, none of
370                // the other upper level caches connected to this
371                // cache have the block, so we can clear the
372                // BLOCK_CACHED flag in the Writeback if set and
373                // discard the CleanEvict by returning true.
374                wbPkt->clearBlockCached();
375                return true;
376            } else {
377                assert(pkt->cmd == MemCmd::WritebackDirty);
378                // Dirty writeback from above trumps our clean
379                // writeback... discard here
380                // Note: markInService will remove entry from writeback buffer.
381                markInService(wb_entry, false);
382                delete wbPkt;
383            }
384        }
385    }
386
387    // Writeback handling is special case.  We can write the block into
388    // the cache without having a writeable copy (or any copy at all).
389    if (pkt->isWriteback()) {
390        assert(blkSize == pkt->getSize());
391
392        // we could get a clean writeback while we are having
393        // outstanding accesses to a block, do the simple thing for
394        // now and drop the clean writeback so that we do not upset
395        // any ordering/decisions about ownership already taken
396        if (pkt->cmd == MemCmd::WritebackClean &&
397            mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) {
398            DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, "
399                    "dropping\n", pkt->getAddr());
400            return true;
401        }
402
403        if (blk == NULL) {
404            // need to do a replacement
405            blk = allocateBlock(pkt->getAddr(), pkt->isSecure(), writebacks);
406            if (blk == NULL) {
407                // no replaceable block available: give up, fwd to next level.
408                incMissCount(pkt);
409                return false;
410            }
411            tags->insertBlock(pkt, blk);
412
413            blk->status = (BlkValid | BlkReadable);
414            if (pkt->isSecure()) {
415                blk->status |= BlkSecure;
416            }
417        }
418        // only mark the block dirty if we got a writeback command,
419        // and leave it as is for a clean writeback
420        if (pkt->cmd == MemCmd::WritebackDirty) {
421            blk->status |= BlkDirty;
422        }
423        // if shared is not asserted we got the writeback in modified
424        // state, if it is asserted we are in the owned state
425        if (!pkt->sharedAsserted()) {
426            blk->status |= BlkWritable;
427        }
428        // nothing else to do; writeback doesn't expect response
429        assert(!pkt->needsResponse());
430        std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize);
431        DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
432        incHitCount(pkt);
433        return true;
434    } else if (pkt->cmd == MemCmd::CleanEvict) {
435        if (blk != NULL) {
436            // Found the block in the tags, need to stop CleanEvict from
437            // propagating further down the hierarchy. Returning true will
438            // treat the CleanEvict like a satisfied write request and delete
439            // it.
440            return true;
441        }
442        // We didn't find the block here, propagate the CleanEvict further
443        // down the memory hierarchy. Returning false will treat the CleanEvict
444        // like a Writeback which could not find a replaceable block so has to
445        // go to next level.
446        return false;
447    } else if ((blk != NULL) &&
448               (pkt->needsExclusive() ? blk->isWritable()
449                                      : blk->isReadable())) {
450        // OK to satisfy access
451        incHitCount(pkt);
452        satisfyCpuSideRequest(pkt, blk);
453        return true;
454    }
455
456    // Can't satisfy access normally... either no block (blk == NULL)
457    // or have block but need exclusive & only have shared.
458
459    incMissCount(pkt);
460
461    if (blk == NULL && pkt->isLLSC() && pkt->isWrite()) {
462        // complete miss on store conditional... just give up now
463        pkt->req->setExtraData(0);
464        return true;
465    }
466
467    return false;
468}
469
470void
471Cache::doWritebacks(PacketList& writebacks, Tick forward_time)
472{
473    while (!writebacks.empty()) {
474        PacketPtr wbPkt = writebacks.front();
475        // We use forwardLatency here because we are copying writebacks to
476        // write buffer.  Call isCachedAbove for both Writebacks and
477        // CleanEvicts. If isCachedAbove returns true we set BLOCK_CACHED flag
478        // in Writebacks and discard CleanEvicts.
479        if (isCachedAbove(wbPkt)) {
480            if (wbPkt->cmd == MemCmd::CleanEvict) {
481                // Delete CleanEvict because cached copies exist above. The
482                // packet destructor will delete the request object because
483                // this is a non-snoop request packet which does not require a
484                // response.
485                delete wbPkt;
486            } else if (wbPkt->cmd == MemCmd::WritebackClean) {
487                // clean writeback, do not send since the block is
488                // still cached above
489                assert(writebackClean);
490                delete wbPkt;
491            } else {
492                assert(wbPkt->cmd == MemCmd::WritebackDirty);
493                // Set BLOCK_CACHED flag in Writeback and send below, so that
494                // the Writeback does not reset the bit corresponding to this
495                // address in the snoop filter below.
496                wbPkt->setBlockCached();
497                allocateWriteBuffer(wbPkt, forward_time);
498            }
499        } else {
500            // If the block is not cached above, send packet below. Both
501            // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
502            // reset the bit corresponding to this address in the snoop filter
503            // below.
504            allocateWriteBuffer(wbPkt, forward_time);
505        }
506        writebacks.pop_front();
507    }
508}
509
510void
511Cache::doWritebacksAtomic(PacketList& writebacks)
512{
513    while (!writebacks.empty()) {
514        PacketPtr wbPkt = writebacks.front();
515        // Call isCachedAbove for both Writebacks and CleanEvicts. If
516        // isCachedAbove returns true we set BLOCK_CACHED flag in Writebacks
517        // and discard CleanEvicts.
518        if (isCachedAbove(wbPkt, false)) {
519            if (wbPkt->cmd == MemCmd::WritebackDirty) {
520                // Set BLOCK_CACHED flag in Writeback and send below,
521                // so that the Writeback does not reset the bit
522                // corresponding to this address in the snoop filter
523                // below. We can discard CleanEvicts because cached
524                // copies exist above. Atomic mode isCachedAbove
525                // modifies packet to set BLOCK_CACHED flag
526                memSidePort->sendAtomic(wbPkt);
527            }
528        } else {
529            // If the block is not cached above, send packet below. Both
530            // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
531            // reset the bit corresponding to this address in the snoop filter
532            // below.
533            memSidePort->sendAtomic(wbPkt);
534        }
535        writebacks.pop_front();
536        // In case of CleanEvicts, the packet destructor will delete the
537        // request object because this is a non-snoop request packet which
538        // does not require a response.
539        delete wbPkt;
540    }
541}
542
543
544void
545Cache::recvTimingSnoopResp(PacketPtr pkt)
546{
547    DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__,
548            pkt->cmdString(), pkt->getAddr(), pkt->getSize());
549
550    assert(pkt->isResponse());
551    assert(!system->bypassCaches());
552
553    // determine if the response is from a snoop request we created
554    // (in which case it should be in the outstandingSnoop), or if we
555    // merely forwarded someone else's snoop request
556    const bool forwardAsSnoop = outstandingSnoop.find(pkt->req) ==
557        outstandingSnoop.end();
558
559    if (!forwardAsSnoop) {
560        // the packet came from this cache, so sink it here and do not
561        // forward it
562        assert(pkt->cmd == MemCmd::HardPFResp);
563
564        outstandingSnoop.erase(pkt->req);
565
566        DPRINTF(Cache, "Got prefetch response from above for addr "
567                "%#llx (%s)\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
568        recvTimingResp(pkt);
569        return;
570    }
571
572    // forwardLatency is set here because there is a response from an
573    // upper level cache.
574    // To pay the delay that occurs if the packet comes from the bus,
575    // we charge also headerDelay.
576    Tick snoop_resp_time = clockEdge(forwardLatency) + pkt->headerDelay;
577    // Reset the timing of the packet.
578    pkt->headerDelay = pkt->payloadDelay = 0;
579    memSidePort->schedTimingSnoopResp(pkt, snoop_resp_time);
580}
581
582void
583Cache::promoteWholeLineWrites(PacketPtr pkt)
584{
585    // Cache line clearing instructions
586    if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) &&
587        (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0)) {
588        pkt->cmd = MemCmd::WriteLineReq;
589        DPRINTF(Cache, "packet promoted from Write to WriteLineReq\n");
590    }
591}
592
593bool
594Cache::recvTimingReq(PacketPtr pkt)
595{
596    DPRINTF(CacheTags, "%s tags: %s\n", __func__, tags->print());
597
598    assert(pkt->isRequest());
599
600    // Just forward the packet if caches are disabled.
601    if (system->bypassCaches()) {
602        // @todo This should really enqueue the packet rather
603        bool M5_VAR_USED success = memSidePort->sendTimingReq(pkt);
604        assert(success);
605        return true;
606    }
607
608    promoteWholeLineWrites(pkt);
609
610    if (pkt->memInhibitAsserted()) {
611        // a cache above us (but not where the packet came from) is
612        // responding to the request
613        DPRINTF(Cache, "mem inhibited on addr %#llx (%s): not responding\n",
614                pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
615
616        // if the packet needs exclusive, and the cache that has
617        // promised to respond (setting the inhibit flag) is not
618        // providing exclusive (it is in O vs M state), we know that
619        // there may be other shared copies in the system; go out and
620        // invalidate them all
621        if (pkt->needsExclusive() && !pkt->isSupplyExclusive()) {
622            // create a downstream express snoop with cleared packet
623            // flags, there is no need to allocate any data as the
624            // packet is merely used to co-ordinate state transitions
625            Packet *snoop_pkt = new Packet(pkt, true, false);
626
627            // also reset the bus time that the original packet has
628            // not yet paid for
629            snoop_pkt->headerDelay = snoop_pkt->payloadDelay = 0;
630
631            // make this an instantaneous express snoop, and let the
632            // other caches in the system know that the packet is
633            // inhibited, because we have found the authorative copy
634            // (O) that will supply the right data
635            snoop_pkt->setExpressSnoop();
636            snoop_pkt->assertMemInhibit();
637
638            // this express snoop travels towards the memory, and at
639            // every crossbar it is snooped upwards thus reaching
640            // every cache in the system
641            bool M5_VAR_USED success = memSidePort->sendTimingReq(snoop_pkt);
642            // express snoops always succeed
643            assert(success);
644
645            // main memory will delete the packet
646        }
647
648        // queue for deletion, as the sending cache is still relying
649        // on the packet
650        pendingDelete.reset(pkt);
651
652        // no need to take any action in this particular cache as the
653        // caches along the path to memory are allowed to keep lines
654        // in a shared state, and a cache above us already committed
655        // to responding
656        return true;
657    }
658
659    // anything that is merely forwarded pays for the forward latency and
660    // the delay provided by the crossbar
661    Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
662
663    // We use lookupLatency here because it is used to specify the latency
664    // to access.
665    Cycles lat = lookupLatency;
666    CacheBlk *blk = NULL;
667    bool satisfied = false;
668    {
669        PacketList writebacks;
670        // Note that lat is passed by reference here. The function
671        // access() calls accessBlock() which can modify lat value.
672        satisfied = access(pkt, blk, lat, writebacks);
673
674        // copy writebacks to write buffer here to ensure they logically
675        // proceed anything happening below
676        doWritebacks(writebacks, forward_time);
677    }
678
679    // Here we charge the headerDelay that takes into account the latencies
680    // of the bus, if the packet comes from it.
681    // The latency charged it is just lat that is the value of lookupLatency
682    // modified by access() function, or if not just lookupLatency.
683    // In case of a hit we are neglecting response latency.
684    // In case of a miss we are neglecting forward latency.
685    Tick request_time = clockEdge(lat) + pkt->headerDelay;
686    // Here we reset the timing of the packet.
687    pkt->headerDelay = pkt->payloadDelay = 0;
688
689    // track time of availability of next prefetch, if any
690    Tick next_pf_time = MaxTick;
691
692    bool needsResponse = pkt->needsResponse();
693
694    if (satisfied) {
695        // should never be satisfying an uncacheable access as we
696        // flush and invalidate any existing block as part of the
697        // lookup
698        assert(!pkt->req->isUncacheable());
699
700        // hit (for all other request types)
701
702        if (prefetcher && (prefetchOnAccess || (blk && blk->wasPrefetched()))) {
703            if (blk)
704                blk->status &= ~BlkHWPrefetched;
705
706            // Don't notify on SWPrefetch
707            if (!pkt->cmd.isSWPrefetch())
708                next_pf_time = prefetcher->notify(pkt);
709        }
710
711        if (needsResponse) {
712            pkt->makeTimingResponse();
713            // @todo: Make someone pay for this
714            pkt->headerDelay = pkt->payloadDelay = 0;
715
716            // In this case we are considering request_time that takes
717            // into account the delay of the xbar, if any, and just
718            // lat, neglecting responseLatency, modelling hit latency
719            // just as lookupLatency or or the value of lat overriden
720            // by access(), that calls accessBlock() function.
721            cpuSidePort->schedTimingResp(pkt, request_time, true);
722        } else {
723            DPRINTF(Cache, "%s satisfied %s addr %#llx, no response needed\n",
724                    __func__, pkt->cmdString(), pkt->getAddr(),
725                    pkt->getSize());
726
727            // queue the packet for deletion, as the sending cache is
728            // still relying on it; if the block is found in access(),
729            // CleanEvict and Writeback messages will be deleted
730            // here as well
731            pendingDelete.reset(pkt);
732        }
733    } else {
734        // miss
735
736        Addr blk_addr = blockAlign(pkt->getAddr());
737
738        // ignore any existing MSHR if we are dealing with an
739        // uncacheable request
740        MSHR *mshr = pkt->req->isUncacheable() ? nullptr :
741            mshrQueue.findMatch(blk_addr, pkt->isSecure());
742
743        // Software prefetch handling:
744        // To keep the core from waiting on data it won't look at
745        // anyway, send back a response with dummy data. Miss handling
746        // will continue asynchronously. Unfortunately, the core will
747        // insist upon freeing original Packet/Request, so we have to
748        // create a new pair with a different lifecycle. Note that this
749        // processing happens before any MSHR munging on the behalf of
750        // this request because this new Request will be the one stored
751        // into the MSHRs, not the original.
752        if (pkt->cmd.isSWPrefetch()) {
753            assert(needsResponse);
754            assert(pkt->req->hasPaddr());
755            assert(!pkt->req->isUncacheable());
756
757            // There's no reason to add a prefetch as an additional target
758            // to an existing MSHR. If an outstanding request is already
759            // in progress, there is nothing for the prefetch to do.
760            // If this is the case, we don't even create a request at all.
761            PacketPtr pf = nullptr;
762
763            if (!mshr) {
764                // copy the request and create a new SoftPFReq packet
765                RequestPtr req = new Request(pkt->req->getPaddr(),
766                                             pkt->req->getSize(),
767                                             pkt->req->getFlags(),
768                                             pkt->req->masterId());
769                pf = new Packet(req, pkt->cmd);
770                pf->allocate();
771                assert(pf->getAddr() == pkt->getAddr());
772                assert(pf->getSize() == pkt->getSize());
773            }
774
775            pkt->makeTimingResponse();
776            // for debugging, set all the bits in the response data
777            // (also keeps valgrind from complaining when debugging settings
778            //  print out instruction results)
779            std::memset(pkt->getPtr<uint8_t>(), 0xFF, pkt->getSize());
780            // request_time is used here, taking into account lat and the delay
781            // charged if the packet comes from the xbar.
782            cpuSidePort->schedTimingResp(pkt, request_time, true);
783
784            // If an outstanding request is in progress (we found an
785            // MSHR) this is set to null
786            pkt = pf;
787        }
788
789        if (mshr) {
790            /// MSHR hit
791            /// @note writebacks will be checked in getNextMSHR()
792            /// for any conflicting requests to the same block
793
794            //@todo remove hw_pf here
795
796            // Coalesce unless it was a software prefetch (see above).
797            if (pkt) {
798                assert(!pkt->isWriteback());
799                // CleanEvicts corresponding to blocks which have
800                // outstanding requests in MSHRs are simply sunk here
801                if (pkt->cmd == MemCmd::CleanEvict) {
802                    pendingDelete.reset(pkt);
803                } else {
804                    DPRINTF(Cache, "%s coalescing MSHR for %s addr %#llx size %d\n",
805                            __func__, pkt->cmdString(), pkt->getAddr(),
806                            pkt->getSize());
807
808                    assert(pkt->req->masterId() < system->maxMasters());
809                    mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
810                    if (mshr->threadNum != 0/*pkt->req->threadId()*/) {
811                        mshr->threadNum = -1;
812                    }
813                    // We use forward_time here because it is the same
814                    // considering new targets. We have multiple
815                    // requests for the same address here. It
816                    // specifies the latency to allocate an internal
817                    // buffer and to schedule an event to the queued
818                    // port and also takes into account the additional
819                    // delay of the xbar.
820                    mshr->allocateTarget(pkt, forward_time, order++,
821                                         allocOnFill(pkt->cmd));
822                    if (mshr->getNumTargets() == numTarget) {
823                        noTargetMSHR = mshr;
824                        setBlocked(Blocked_NoTargets);
825                        // need to be careful with this... if this mshr isn't
826                        // ready yet (i.e. time > curTick()), we don't want to
827                        // move it ahead of mshrs that are ready
828                        // mshrQueue.moveToFront(mshr);
829                    }
830                }
831                // We should call the prefetcher reguardless if the request is
832                // satisfied or not, reguardless if the request is in the MSHR or
833                // not.  The request could be a ReadReq hit, but still not
834                // satisfied (potentially because of a prior write to the same
835                // cache line.  So, even when not satisfied, tehre is an MSHR
836                // already allocated for this, we need to let the prefetcher know
837                // about the request
838                if (prefetcher) {
839                    // Don't notify on SWPrefetch
840                    if (!pkt->cmd.isSWPrefetch())
841                        next_pf_time = prefetcher->notify(pkt);
842                }
843            }
844        } else {
845            // no MSHR
846            assert(pkt->req->masterId() < system->maxMasters());
847            if (pkt->req->isUncacheable()) {
848                mshr_uncacheable[pkt->cmdToIndex()][pkt->req->masterId()]++;
849            } else {
850                mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
851            }
852
853            if (pkt->isEviction() ||
854                (pkt->req->isUncacheable() && pkt->isWrite())) {
855                // We use forward_time here because there is an
856                // uncached memory write, forwarded to WriteBuffer.
857                allocateWriteBuffer(pkt, forward_time);
858            } else {
859                if (blk && blk->isValid()) {
860                    // should have flushed and have no valid block
861                    assert(!pkt->req->isUncacheable());
862
863                    // If we have a write miss to a valid block, we
864                    // need to mark the block non-readable.  Otherwise
865                    // if we allow reads while there's an outstanding
866                    // write miss, the read could return stale data
867                    // out of the cache block... a more aggressive
868                    // system could detect the overlap (if any) and
869                    // forward data out of the MSHRs, but we don't do
870                    // that yet.  Note that we do need to leave the
871                    // block valid so that it stays in the cache, in
872                    // case we get an upgrade response (and hence no
873                    // new data) when the write miss completes.
874                    // As long as CPUs do proper store/load forwarding
875                    // internally, and have a sufficiently weak memory
876                    // model, this is probably unnecessary, but at some
877                    // point it must have seemed like we needed it...
878                    assert(pkt->needsExclusive());
879                    assert(!blk->isWritable());
880                    blk->status &= ~BlkReadable;
881                }
882                // Here we are using forward_time, modelling the latency of
883                // a miss (outbound) just as forwardLatency, neglecting the
884                // lookupLatency component.
885                allocateMissBuffer(pkt, forward_time);
886            }
887
888            if (prefetcher) {
889                // Don't notify on SWPrefetch
890                if (!pkt->cmd.isSWPrefetch())
891                    next_pf_time = prefetcher->notify(pkt);
892            }
893        }
894    }
895
896    if (next_pf_time != MaxTick)
897        schedMemSideSendEvent(next_pf_time);
898
899    return true;
900}
901
902
903// See comment in cache.hh.
904PacketPtr
905Cache::getBusPacket(PacketPtr cpu_pkt, CacheBlk *blk,
906                    bool needsExclusive) const
907{
908    bool blkValid = blk && blk->isValid();
909
910    if (cpu_pkt->req->isUncacheable()) {
911        // note that at the point we see the uncacheable request we
912        // flush any block, but there could be an outstanding MSHR,
913        // and the cache could have filled again before we actually
914        // send out the forwarded uncacheable request (blk could thus
915        // be non-null)
916        return NULL;
917    }
918
919    if (!blkValid &&
920        (cpu_pkt->isUpgrade() ||
921         cpu_pkt->isEviction())) {
922        // Writebacks that weren't allocated in access() and upgrades
923        // from upper-level caches that missed completely just go
924        // through.
925        return NULL;
926    }
927
928    assert(cpu_pkt->needsResponse());
929
930    MemCmd cmd;
931    // @TODO make useUpgrades a parameter.
932    // Note that ownership protocols require upgrade, otherwise a
933    // write miss on a shared owned block will generate a ReadExcl,
934    // which will clobber the owned copy.
935    const bool useUpgrades = true;
936    if (blkValid && useUpgrades) {
937        // only reason to be here is that blk is shared
938        // (read-only) and we need exclusive
939        assert(needsExclusive);
940        assert(!blk->isWritable());
941        cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
942    } else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq ||
943               cpu_pkt->cmd == MemCmd::StoreCondFailReq) {
944        // Even though this SC will fail, we still need to send out the
945        // request and get the data to supply it to other snoopers in the case
946        // where the determination the StoreCond fails is delayed due to
947        // all caches not being on the same local bus.
948        cmd = MemCmd::SCUpgradeFailReq;
949    } else if (cpu_pkt->cmd == MemCmd::WriteLineReq) {
950        // forward as invalidate to all other caches, this gives us
951        // the line in exclusive state, and invalidates all other
952        // copies
953        cmd = MemCmd::InvalidateReq;
954    } else {
955        // block is invalid
956        cmd = needsExclusive ? MemCmd::ReadExReq :
957            (isReadOnly ? MemCmd::ReadCleanReq : MemCmd::ReadSharedReq);
958    }
959    PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize);
960
961    // if there are sharers in the upper levels, pass that info downstream
962    if (cpu_pkt->sharedAsserted()) {
963        // note that cpu_pkt may have spent a considerable time in the
964        // MSHR queue and that the information could possibly be out
965        // of date, however, there is no harm in conservatively
966        // assuming the block is shared
967        pkt->assertShared();
968        DPRINTF(Cache, "%s passing shared from %s to %s addr %#llx size %d\n",
969                __func__, cpu_pkt->cmdString(), pkt->cmdString(),
970                pkt->getAddr(), pkt->getSize());
971    }
972
973    // the packet should be block aligned
974    assert(pkt->getAddr() == blockAlign(pkt->getAddr()));
975
976    pkt->allocate();
977    DPRINTF(Cache, "%s created %s from %s for  addr %#llx size %d\n",
978            __func__, pkt->cmdString(), cpu_pkt->cmdString(), pkt->getAddr(),
979            pkt->getSize());
980    return pkt;
981}
982
983
984Tick
985Cache::recvAtomic(PacketPtr pkt)
986{
987    // We are in atomic mode so we pay just for lookupLatency here.
988    Cycles lat = lookupLatency;
989    // @TODO: make this a parameter
990    bool last_level_cache = false;
991
992    // Forward the request if the system is in cache bypass mode.
993    if (system->bypassCaches())
994        return ticksToCycles(memSidePort->sendAtomic(pkt));
995
996    promoteWholeLineWrites(pkt);
997
998    if (pkt->memInhibitAsserted()) {
999        // have to invalidate ourselves and any lower caches even if
1000        // upper cache will be responding
1001        if (pkt->isInvalidate()) {
1002            CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
1003            if (blk && blk->isValid()) {
1004                tags->invalidate(blk);
1005                blk->invalidate();
1006                DPRINTF(Cache, "rcvd mem-inhibited %s on %#llx (%s):"
1007                        " invalidating\n",
1008                        pkt->cmdString(), pkt->getAddr(),
1009                        pkt->isSecure() ? "s" : "ns");
1010            }
1011            if (!last_level_cache) {
1012                DPRINTF(Cache, "forwarding mem-inhibited %s on %#llx (%s)\n",
1013                        pkt->cmdString(), pkt->getAddr(),
1014                        pkt->isSecure() ? "s" : "ns");
1015                lat += ticksToCycles(memSidePort->sendAtomic(pkt));
1016            }
1017        } else {
1018            DPRINTF(Cache, "rcvd mem-inhibited %s on %#llx: not responding\n",
1019                    pkt->cmdString(), pkt->getAddr());
1020        }
1021
1022        return lat * clockPeriod();
1023    }
1024
1025    // should assert here that there are no outstanding MSHRs or
1026    // writebacks... that would mean that someone used an atomic
1027    // access in timing mode
1028
1029    CacheBlk *blk = NULL;
1030    PacketList writebacks;
1031    bool satisfied = access(pkt, blk, lat, writebacks);
1032
1033    // handle writebacks resulting from the access here to ensure they
1034    // logically proceed anything happening below
1035    doWritebacksAtomic(writebacks);
1036
1037    if (!satisfied) {
1038        // MISS
1039
1040        PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsExclusive());
1041
1042        bool is_forward = (bus_pkt == NULL);
1043
1044        if (is_forward) {
1045            // just forwarding the same request to the next level
1046            // no local cache operation involved
1047            bus_pkt = pkt;
1048        }
1049
1050        DPRINTF(Cache, "Sending an atomic %s for %#llx (%s)\n",
1051                bus_pkt->cmdString(), bus_pkt->getAddr(),
1052                bus_pkt->isSecure() ? "s" : "ns");
1053
1054#if TRACING_ON
1055        CacheBlk::State old_state = blk ? blk->status : 0;
1056#endif
1057
1058        lat += ticksToCycles(memSidePort->sendAtomic(bus_pkt));
1059
1060        // We are now dealing with the response handling
1061        DPRINTF(Cache, "Receive response: %s for addr %#llx (%s) in state %i\n",
1062                bus_pkt->cmdString(), bus_pkt->getAddr(),
1063                bus_pkt->isSecure() ? "s" : "ns",
1064                old_state);
1065
1066        // If packet was a forward, the response (if any) is already
1067        // in place in the bus_pkt == pkt structure, so we don't need
1068        // to do anything.  Otherwise, use the separate bus_pkt to
1069        // generate response to pkt and then delete it.
1070        if (!is_forward) {
1071            if (pkt->needsResponse()) {
1072                assert(bus_pkt->isResponse());
1073                if (bus_pkt->isError()) {
1074                    pkt->makeAtomicResponse();
1075                    pkt->copyError(bus_pkt);
1076                } else if (pkt->cmd == MemCmd::InvalidateReq) {
1077                    if (blk) {
1078                        // invalidate response to a cache that received
1079                        // an invalidate request
1080                        satisfyCpuSideRequest(pkt, blk);
1081                    }
1082                } else if (pkt->cmd == MemCmd::WriteLineReq) {
1083                    // note the use of pkt, not bus_pkt here.
1084
1085                    // write-line request to the cache that promoted
1086                    // the write to a whole line
1087                    blk = handleFill(pkt, blk, writebacks,
1088                                     allocOnFill(pkt->cmd));
1089                    satisfyCpuSideRequest(pkt, blk);
1090                } else if (bus_pkt->isRead() ||
1091                           bus_pkt->cmd == MemCmd::UpgradeResp) {
1092                    // we're updating cache state to allow us to
1093                    // satisfy the upstream request from the cache
1094                    blk = handleFill(bus_pkt, blk, writebacks,
1095                                     allocOnFill(pkt->cmd));
1096                    satisfyCpuSideRequest(pkt, blk);
1097                } else {
1098                    // we're satisfying the upstream request without
1099                    // modifying cache state, e.g., a write-through
1100                    pkt->makeAtomicResponse();
1101                }
1102            }
1103            delete bus_pkt;
1104        }
1105    }
1106
1107    // Note that we don't invoke the prefetcher at all in atomic mode.
1108    // It's not clear how to do it properly, particularly for
1109    // prefetchers that aggressively generate prefetch candidates and
1110    // rely on bandwidth contention to throttle them; these will tend
1111    // to pollute the cache in atomic mode since there is no bandwidth
1112    // contention.  If we ever do want to enable prefetching in atomic
1113    // mode, though, this is the place to do it... see timingAccess()
1114    // for an example (though we'd want to issue the prefetch(es)
1115    // immediately rather than calling requestMemSideBus() as we do
1116    // there).
1117
1118    // do any writebacks resulting from the response handling
1119    doWritebacksAtomic(writebacks);
1120
1121    // if we used temp block, check to see if its valid and if so
1122    // clear it out, but only do so after the call to recvAtomic is
1123    // finished so that any downstream observers (such as a snoop
1124    // filter), first see the fill, and only then see the eviction
1125    if (blk == tempBlock && tempBlock->isValid()) {
1126        // the atomic CPU calls recvAtomic for fetch and load/store
1127        // sequentuially, and we may already have a tempBlock
1128        // writeback from the fetch that we have not yet sent
1129        if (tempBlockWriteback) {
1130            // if that is the case, write the prevoius one back, and
1131            // do not schedule any new event
1132            writebackTempBlockAtomic();
1133        } else {
1134            // the writeback/clean eviction happens after the call to
1135            // recvAtomic has finished (but before any successive
1136            // calls), so that the response handling from the fill is
1137            // allowed to happen first
1138            schedule(writebackTempBlockAtomicEvent, curTick());
1139        }
1140
1141        tempBlockWriteback = (blk->isDirty() || writebackClean) ?
1142            writebackBlk(blk) : cleanEvictBlk(blk);
1143        blk->invalidate();
1144    }
1145
1146    if (pkt->needsResponse()) {
1147        pkt->makeAtomicResponse();
1148    }
1149
1150    return lat * clockPeriod();
1151}
1152
1153
1154void
1155Cache::functionalAccess(PacketPtr pkt, bool fromCpuSide)
1156{
1157    if (system->bypassCaches()) {
1158        // Packets from the memory side are snoop request and
1159        // shouldn't happen in bypass mode.
1160        assert(fromCpuSide);
1161
1162        // The cache should be flushed if we are in cache bypass mode,
1163        // so we don't need to check if we need to update anything.
1164        memSidePort->sendFunctional(pkt);
1165        return;
1166    }
1167
1168    Addr blk_addr = blockAlign(pkt->getAddr());
1169    bool is_secure = pkt->isSecure();
1170    CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
1171    MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
1172
1173    pkt->pushLabel(name());
1174
1175    CacheBlkPrintWrapper cbpw(blk);
1176
1177    // Note that just because an L2/L3 has valid data doesn't mean an
1178    // L1 doesn't have a more up-to-date modified copy that still
1179    // needs to be found.  As a result we always update the request if
1180    // we have it, but only declare it satisfied if we are the owner.
1181
1182    // see if we have data at all (owned or otherwise)
1183    bool have_data = blk && blk->isValid()
1184        && pkt->checkFunctional(&cbpw, blk_addr, is_secure, blkSize,
1185                                blk->data);
1186
1187    // data we have is dirty if marked as such or if valid & ownership
1188    // pending due to outstanding UpgradeReq
1189    bool have_dirty =
1190        have_data && (blk->isDirty() ||
1191                      (mshr && mshr->inService && mshr->isPendingDirty()));
1192
1193    bool done = have_dirty
1194        || cpuSidePort->checkFunctional(pkt)
1195        || mshrQueue.checkFunctional(pkt, blk_addr)
1196        || writeBuffer.checkFunctional(pkt, blk_addr)
1197        || memSidePort->checkFunctional(pkt);
1198
1199    DPRINTF(Cache, "functional %s %#llx (%s) %s%s%s\n",
1200            pkt->cmdString(), pkt->getAddr(), is_secure ? "s" : "ns",
1201            (blk && blk->isValid()) ? "valid " : "",
1202            have_data ? "data " : "", done ? "done " : "");
1203
1204    // We're leaving the cache, so pop cache->name() label
1205    pkt->popLabel();
1206
1207    if (done) {
1208        pkt->makeResponse();
1209    } else {
1210        // if it came as a request from the CPU side then make sure it
1211        // continues towards the memory side
1212        if (fromCpuSide) {
1213            memSidePort->sendFunctional(pkt);
1214        } else if (forwardSnoops && cpuSidePort->isSnooping()) {
1215            // if it came from the memory side, it must be a snoop request
1216            // and we should only forward it if we are forwarding snoops
1217            cpuSidePort->sendFunctionalSnoop(pkt);
1218        }
1219    }
1220}
1221
1222
1223/////////////////////////////////////////////////////
1224//
1225// Response handling: responses from the memory side
1226//
1227/////////////////////////////////////////////////////
1228
1229
1230void
1231Cache::recvTimingResp(PacketPtr pkt)
1232{
1233    assert(pkt->isResponse());
1234
1235    // all header delay should be paid for by the crossbar, unless
1236    // this is a prefetch response from above
1237    panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp,
1238             "%s saw a non-zero packet delay\n", name());
1239
1240    MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
1241    bool is_error = pkt->isError();
1242
1243    assert(mshr);
1244
1245    if (is_error) {
1246        DPRINTF(Cache, "Cache received packet with error for addr %#llx (%s), "
1247                "cmd: %s\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns",
1248                pkt->cmdString());
1249    }
1250
1251    DPRINTF(Cache, "Handling response %s for addr %#llx size %d (%s)\n",
1252            pkt->cmdString(), pkt->getAddr(), pkt->getSize(),
1253            pkt->isSecure() ? "s" : "ns");
1254
1255    MSHRQueue *mq = mshr->queue;
1256    bool wasFull = mq->isFull();
1257
1258    if (mshr == noTargetMSHR) {
1259        // we always clear at least one target
1260        clearBlocked(Blocked_NoTargets);
1261        noTargetMSHR = NULL;
1262    }
1263
1264    // Initial target is used just for stats
1265    MSHR::Target *initial_tgt = mshr->getTarget();
1266    int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
1267    Tick miss_latency = curTick() - initial_tgt->recvTime;
1268    PacketList writebacks;
1269    // We need forward_time here because we have a call of
1270    // allocateWriteBuffer() that need this parameter to specify the
1271    // time to request the bus.  In this case we use forward latency
1272    // because there is a writeback.  We pay also here for headerDelay
1273    // that is charged of bus latencies if the packet comes from the
1274    // bus.
1275    Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
1276
1277    if (pkt->req->isUncacheable()) {
1278        assert(pkt->req->masterId() < system->maxMasters());
1279        mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] +=
1280            miss_latency;
1281    } else {
1282        assert(pkt->req->masterId() < system->maxMasters());
1283        mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] +=
1284            miss_latency;
1285    }
1286
1287    // upgrade deferred targets if we got exclusive
1288    if (!pkt->sharedAsserted()) {
1289        mshr->promoteExclusive();
1290    }
1291
1292    bool is_fill = !mshr->isForward &&
1293        (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp);
1294
1295    CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
1296
1297    if (is_fill && !is_error) {
1298        DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n",
1299                pkt->getAddr());
1300
1301        blk = handleFill(pkt, blk, writebacks, mshr->allocOnFill);
1302        assert(blk != NULL);
1303    }
1304
1305    // allow invalidation responses originating from write-line
1306    // requests to be discarded
1307    bool is_invalidate = pkt->isInvalidate();
1308
1309    // First offset for critical word first calculations
1310    int initial_offset = initial_tgt->pkt->getOffset(blkSize);
1311
1312    while (mshr->hasTargets()) {
1313        MSHR::Target *target = mshr->getTarget();
1314        Packet *tgt_pkt = target->pkt;
1315
1316        switch (target->source) {
1317          case MSHR::Target::FromCPU:
1318            Tick completion_time;
1319            // Here we charge on completion_time the delay of the xbar if the
1320            // packet comes from it, charged on headerDelay.
1321            completion_time = pkt->headerDelay;
1322
1323            // Software prefetch handling for cache closest to core
1324            if (tgt_pkt->cmd.isSWPrefetch()) {
1325                // a software prefetch would have already been ack'd immediately
1326                // with dummy data so the core would be able to retire it.
1327                // this request completes right here, so we deallocate it.
1328                delete tgt_pkt->req;
1329                delete tgt_pkt;
1330                break; // skip response
1331            }
1332
1333            // unlike the other packet flows, where data is found in other
1334            // caches or memory and brought back, write-line requests always
1335            // have the data right away, so the above check for "is fill?"
1336            // cannot actually be determined until examining the stored MSHR
1337            // state. We "catch up" with that logic here, which is duplicated
1338            // from above.
1339            if (tgt_pkt->cmd == MemCmd::WriteLineReq) {
1340                assert(!is_error);
1341                // we got the block in exclusive state, so promote any
1342                // deferred targets if possible
1343                mshr->promoteExclusive();
1344                // NB: we use the original packet here and not the response!
1345                blk = handleFill(tgt_pkt, blk, writebacks, mshr->allocOnFill);
1346                assert(blk != NULL);
1347
1348                // treat as a fill, and discard the invalidation
1349                // response
1350                is_fill = true;
1351                is_invalidate = false;
1352            }
1353
1354            if (is_fill) {
1355                satisfyCpuSideRequest(tgt_pkt, blk,
1356                                      true, mshr->hasPostDowngrade());
1357
1358                // How many bytes past the first request is this one
1359                int transfer_offset =
1360                    tgt_pkt->getOffset(blkSize) - initial_offset;
1361                if (transfer_offset < 0) {
1362                    transfer_offset += blkSize;
1363                }
1364
1365                // If not critical word (offset) return payloadDelay.
1366                // responseLatency is the latency of the return path
1367                // from lower level caches/memory to an upper level cache or
1368                // the core.
1369                completion_time += clockEdge(responseLatency) +
1370                    (transfer_offset ? pkt->payloadDelay : 0);
1371
1372                assert(!tgt_pkt->req->isUncacheable());
1373
1374                assert(tgt_pkt->req->masterId() < system->maxMasters());
1375                missLatency[tgt_pkt->cmdToIndex()][tgt_pkt->req->masterId()] +=
1376                    completion_time - target->recvTime;
1377            } else if (pkt->cmd == MemCmd::UpgradeFailResp) {
1378                // failed StoreCond upgrade
1379                assert(tgt_pkt->cmd == MemCmd::StoreCondReq ||
1380                       tgt_pkt->cmd == MemCmd::StoreCondFailReq ||
1381                       tgt_pkt->cmd == MemCmd::SCUpgradeFailReq);
1382                // responseLatency is the latency of the return path
1383                // from lower level caches/memory to an upper level cache or
1384                // the core.
1385                completion_time += clockEdge(responseLatency) +
1386                    pkt->payloadDelay;
1387                tgt_pkt->req->setExtraData(0);
1388            } else {
1389                // not a cache fill, just forwarding response
1390                // responseLatency is the latency of the return path
1391                // from lower level cahces/memory to the core.
1392                completion_time += clockEdge(responseLatency) +
1393                    pkt->payloadDelay;
1394                if (pkt->isRead() && !is_error) {
1395                    // sanity check
1396                    assert(pkt->getAddr() == tgt_pkt->getAddr());
1397                    assert(pkt->getSize() >= tgt_pkt->getSize());
1398
1399                    tgt_pkt->setData(pkt->getConstPtr<uint8_t>());
1400                }
1401            }
1402            tgt_pkt->makeTimingResponse();
1403            // if this packet is an error copy that to the new packet
1404            if (is_error)
1405                tgt_pkt->copyError(pkt);
1406            if (tgt_pkt->cmd == MemCmd::ReadResp &&
1407                (is_invalidate || mshr->hasPostInvalidate())) {
1408                // If intermediate cache got ReadRespWithInvalidate,
1409                // propagate that.  Response should not have
1410                // isInvalidate() set otherwise.
1411                tgt_pkt->cmd = MemCmd::ReadRespWithInvalidate;
1412                DPRINTF(Cache, "%s updated cmd to %s for addr %#llx\n",
1413                        __func__, tgt_pkt->cmdString(), tgt_pkt->getAddr());
1414            }
1415            // Reset the bus additional time as it is now accounted for
1416            tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0;
1417            cpuSidePort->schedTimingResp(tgt_pkt, completion_time, true);
1418            break;
1419
1420          case MSHR::Target::FromPrefetcher:
1421            assert(tgt_pkt->cmd == MemCmd::HardPFReq);
1422            if (blk)
1423                blk->status |= BlkHWPrefetched;
1424            delete tgt_pkt->req;
1425            delete tgt_pkt;
1426            break;
1427
1428          case MSHR::Target::FromSnoop:
1429            // I don't believe that a snoop can be in an error state
1430            assert(!is_error);
1431            // response to snoop request
1432            DPRINTF(Cache, "processing deferred snoop...\n");
1433            assert(!(is_invalidate && !mshr->hasPostInvalidate()));
1434            handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate());
1435            break;
1436
1437          default:
1438            panic("Illegal target->source enum %d\n", target->source);
1439        }
1440
1441        mshr->popTarget();
1442    }
1443
1444    if (blk && blk->isValid()) {
1445        // an invalidate response stemming from a write line request
1446        // should not invalidate the block, so check if the
1447        // invalidation should be discarded
1448        if (is_invalidate || mshr->hasPostInvalidate()) {
1449            invalidateBlock(blk);
1450        } else if (mshr->hasPostDowngrade()) {
1451            blk->status &= ~BlkWritable;
1452        }
1453    }
1454
1455    if (mshr->promoteDeferredTargets()) {
1456        // avoid later read getting stale data while write miss is
1457        // outstanding.. see comment in timingAccess()
1458        if (blk) {
1459            blk->status &= ~BlkReadable;
1460        }
1461        mq = mshr->queue;
1462        mq->markPending(mshr);
1463        schedMemSideSendEvent(clockEdge() + pkt->payloadDelay);
1464    } else {
1465        mq->deallocate(mshr);
1466        if (wasFull && !mq->isFull()) {
1467            clearBlocked((BlockedCause)mq->index);
1468        }
1469
1470        // Request the bus for a prefetch if this deallocation freed enough
1471        // MSHRs for a prefetch to take place
1472        if (prefetcher && mq == &mshrQueue && mshrQueue.canPrefetch()) {
1473            Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(),
1474                                         clockEdge());
1475            if (next_pf_time != MaxTick)
1476                schedMemSideSendEvent(next_pf_time);
1477        }
1478    }
1479    // reset the xbar additional timinig  as it is now accounted for
1480    pkt->headerDelay = pkt->payloadDelay = 0;
1481
1482    // copy writebacks to write buffer
1483    doWritebacks(writebacks, forward_time);
1484
1485    // if we used temp block, check to see if its valid and then clear it out
1486    if (blk == tempBlock && tempBlock->isValid()) {
1487        // We use forwardLatency here because we are copying
1488        // Writebacks/CleanEvicts to write buffer. It specifies the latency to
1489        // allocate an internal buffer and to schedule an event to the
1490        // queued port.
1491        if (blk->isDirty() || writebackClean) {
1492            PacketPtr wbPkt = writebackBlk(blk);
1493            allocateWriteBuffer(wbPkt, forward_time);
1494            // Set BLOCK_CACHED flag if cached above.
1495            if (isCachedAbove(wbPkt))
1496                wbPkt->setBlockCached();
1497        } else {
1498            PacketPtr wcPkt = cleanEvictBlk(blk);
1499            // Check to see if block is cached above. If not allocate
1500            // write buffer
1501            if (isCachedAbove(wcPkt))
1502                delete wcPkt;
1503            else
1504                allocateWriteBuffer(wcPkt, forward_time);
1505        }
1506        blk->invalidate();
1507    }
1508
1509    DPRINTF(Cache, "Leaving %s with %s for addr %#llx\n", __func__,
1510            pkt->cmdString(), pkt->getAddr());
1511    delete pkt;
1512}
1513
1514PacketPtr
1515Cache::writebackBlk(CacheBlk *blk)
1516{
1517    chatty_assert(!isReadOnly || writebackClean,
1518                  "Writeback from read-only cache");
1519    assert(blk && blk->isValid() && (blk->isDirty() || writebackClean));
1520
1521    writebacks[Request::wbMasterId]++;
1522
1523    Request *req = new Request(tags->regenerateBlkAddr(blk->tag, blk->set),
1524                               blkSize, 0, Request::wbMasterId);
1525    if (blk->isSecure())
1526        req->setFlags(Request::SECURE);
1527
1528    req->taskId(blk->task_id);
1529    blk->task_id= ContextSwitchTaskId::Unknown;
1530    blk->tickInserted = curTick();
1531
1532    PacketPtr pkt =
1533        new Packet(req, blk->isDirty() ?
1534                   MemCmd::WritebackDirty : MemCmd::WritebackClean);
1535
1536    DPRINTF(Cache, "Create Writeback %#llx writable: %d, dirty: %d\n",
1537            pkt->getAddr(), blk->isWritable(), blk->isDirty());
1538
1539    if (blk->isWritable()) {
1540        // not asserting shared means we pass the block in modified
1541        // state, mark our own block non-writeable
1542        blk->status &= ~BlkWritable;
1543    } else {
1544        // we are in the owned state, tell the receiver
1545        pkt->assertShared();
1546    }
1547
1548    // make sure the block is not marked dirty
1549    blk->status &= ~BlkDirty;
1550
1551    pkt->allocate();
1552    std::memcpy(pkt->getPtr<uint8_t>(), blk->data, blkSize);
1553
1554    return pkt;
1555}
1556
1557PacketPtr
1558Cache::cleanEvictBlk(CacheBlk *blk)
1559{
1560    assert(!writebackClean);
1561    assert(blk && blk->isValid() && !blk->isDirty());
1562    // Creating a zero sized write, a message to the snoop filter
1563    Request *req =
1564        new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0,
1565                    Request::wbMasterId);
1566    if (blk->isSecure())
1567        req->setFlags(Request::SECURE);
1568
1569    req->taskId(blk->task_id);
1570    blk->task_id = ContextSwitchTaskId::Unknown;
1571    blk->tickInserted = curTick();
1572
1573    PacketPtr pkt = new Packet(req, MemCmd::CleanEvict);
1574    pkt->allocate();
1575    DPRINTF(Cache, "%s%s %x Create CleanEvict\n", pkt->cmdString(),
1576            pkt->req->isInstFetch() ? " (ifetch)" : "",
1577            pkt->getAddr());
1578
1579    return pkt;
1580}
1581
1582void
1583Cache::memWriteback()
1584{
1585    CacheBlkVisitorWrapper visitor(*this, &Cache::writebackVisitor);
1586    tags->forEachBlk(visitor);
1587}
1588
1589void
1590Cache::memInvalidate()
1591{
1592    CacheBlkVisitorWrapper visitor(*this, &Cache::invalidateVisitor);
1593    tags->forEachBlk(visitor);
1594}
1595
1596bool
1597Cache::isDirty() const
1598{
1599    CacheBlkIsDirtyVisitor visitor;
1600    tags->forEachBlk(visitor);
1601
1602    return visitor.isDirty();
1603}
1604
1605bool
1606Cache::writebackVisitor(CacheBlk &blk)
1607{
1608    if (blk.isDirty()) {
1609        assert(blk.isValid());
1610
1611        Request request(tags->regenerateBlkAddr(blk.tag, blk.set),
1612                        blkSize, 0, Request::funcMasterId);
1613        request.taskId(blk.task_id);
1614
1615        Packet packet(&request, MemCmd::WriteReq);
1616        packet.dataStatic(blk.data);
1617
1618        memSidePort->sendFunctional(&packet);
1619
1620        blk.status &= ~BlkDirty;
1621    }
1622
1623    return true;
1624}
1625
1626bool
1627Cache::invalidateVisitor(CacheBlk &blk)
1628{
1629
1630    if (blk.isDirty())
1631        warn_once("Invalidating dirty cache lines. Expect things to break.\n");
1632
1633    if (blk.isValid()) {
1634        assert(!blk.isDirty());
1635        tags->invalidate(&blk);
1636        blk.invalidate();
1637    }
1638
1639    return true;
1640}
1641
1642CacheBlk*
1643Cache::allocateBlock(Addr addr, bool is_secure, PacketList &writebacks)
1644{
1645    CacheBlk *blk = tags->findVictim(addr);
1646
1647    // It is valid to return NULL if there is no victim
1648    if (!blk)
1649        return nullptr;
1650
1651    if (blk->isValid()) {
1652        Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set);
1653        MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure());
1654        if (repl_mshr) {
1655            // must be an outstanding upgrade request
1656            // on a block we're about to replace...
1657            assert(!blk->isWritable() || blk->isDirty());
1658            assert(repl_mshr->needsExclusive());
1659            // too hard to replace block with transient state
1660            // allocation failed, block not inserted
1661            return NULL;
1662        } else {
1663            DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx (%s): %s\n",
1664                    repl_addr, blk->isSecure() ? "s" : "ns",
1665                    addr, is_secure ? "s" : "ns",
1666                    blk->isDirty() ? "writeback" : "clean");
1667
1668            // Will send up Writeback/CleanEvict snoops via isCachedAbove
1669            // when pushing this writeback list into the write buffer.
1670            if (blk->isDirty() || writebackClean) {
1671                // Save writeback packet for handling by caller
1672                writebacks.push_back(writebackBlk(blk));
1673            } else {
1674                writebacks.push_back(cleanEvictBlk(blk));
1675            }
1676        }
1677    }
1678
1679    return blk;
1680}
1681
1682void
1683Cache::invalidateBlock(CacheBlk *blk)
1684{
1685    if (blk != tempBlock)
1686        tags->invalidate(blk);
1687    blk->invalidate();
1688}
1689
1690// Note that the reason we return a list of writebacks rather than
1691// inserting them directly in the write buffer is that this function
1692// is called by both atomic and timing-mode accesses, and in atomic
1693// mode we don't mess with the write buffer (we just perform the
1694// writebacks atomically once the original request is complete).
1695CacheBlk*
1696Cache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks,
1697                  bool allocate)
1698{
1699    assert(pkt->isResponse() || pkt->cmd == MemCmd::WriteLineReq);
1700    Addr addr = pkt->getAddr();
1701    bool is_secure = pkt->isSecure();
1702#if TRACING_ON
1703    CacheBlk::State old_state = blk ? blk->status : 0;
1704#endif
1705
1706    // When handling a fill, discard any CleanEvicts for the
1707    // same address in write buffer.
1708    Addr M5_VAR_USED blk_addr = blockAlign(pkt->getAddr());
1709    std::vector<MSHR *> M5_VAR_USED wbs;
1710    assert (!writeBuffer.findMatches(blk_addr, is_secure, wbs));
1711
1712    if (blk == NULL) {
1713        // better have read new data...
1714        assert(pkt->hasData());
1715
1716        // only read responses and write-line requests have data;
1717        // note that we don't write the data here for write-line - that
1718        // happens in the subsequent satisfyCpuSideRequest.
1719        assert(pkt->isRead() || pkt->cmd == MemCmd::WriteLineReq);
1720
1721        // need to do a replacement if allocating, otherwise we stick
1722        // with the temporary storage
1723        blk = allocate ? allocateBlock(addr, is_secure, writebacks) : NULL;
1724
1725        if (blk == NULL) {
1726            // No replaceable block or a mostly exclusive
1727            // cache... just use temporary storage to complete the
1728            // current request and then get rid of it
1729            assert(!tempBlock->isValid());
1730            blk = tempBlock;
1731            tempBlock->set = tags->extractSet(addr);
1732            tempBlock->tag = tags->extractTag(addr);
1733            // @todo: set security state as well...
1734            DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr,
1735                    is_secure ? "s" : "ns");
1736        } else {
1737            tags->insertBlock(pkt, blk);
1738        }
1739
1740        // we should never be overwriting a valid block
1741        assert(!blk->isValid());
1742    } else {
1743        // existing block... probably an upgrade
1744        assert(blk->tag == tags->extractTag(addr));
1745        // either we're getting new data or the block should already be valid
1746        assert(pkt->hasData() || blk->isValid());
1747        // don't clear block status... if block is already dirty we
1748        // don't want to lose that
1749    }
1750
1751    if (is_secure)
1752        blk->status |= BlkSecure;
1753    blk->status |= BlkValid | BlkReadable;
1754
1755    // sanity check for whole-line writes, which should always be
1756    // marked as writable as part of the fill, and then later marked
1757    // dirty as part of satisfyCpuSideRequest
1758    if (pkt->cmd == MemCmd::WriteLineReq) {
1759        assert(!pkt->sharedAsserted());
1760        // at the moment other caches do not respond to the
1761        // invalidation requests corresponding to a whole-line write
1762        assert(!pkt->memInhibitAsserted());
1763    }
1764
1765    if (!pkt->sharedAsserted()) {
1766        // we could get non-shared responses from memory (rather than
1767        // a cache) even in a read-only cache, note that we set this
1768        // bit even for a read-only cache as we use it to represent
1769        // the exclusive state
1770        blk->status |= BlkWritable;
1771
1772        // If we got this via cache-to-cache transfer (i.e., from a
1773        // cache that was an owner) and took away that owner's copy,
1774        // then we need to write it back.  Normally this happens
1775        // anyway as a side effect of getting a copy to write it, but
1776        // there are cases (such as failed store conditionals or
1777        // compare-and-swaps) where we'll demand an exclusive copy but
1778        // end up not writing it.
1779        if (pkt->memInhibitAsserted()) {
1780            blk->status |= BlkDirty;
1781
1782            chatty_assert(!isReadOnly, "Should never see dirty snoop response "
1783                          "in read-only cache %s\n", name());
1784        }
1785    }
1786
1787    DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n",
1788            addr, is_secure ? "s" : "ns", old_state, blk->print());
1789
1790    // if we got new data, copy it in (checking for a read response
1791    // and a response that has data is the same in the end)
1792    if (pkt->isRead()) {
1793        // sanity checks
1794        assert(pkt->hasData());
1795        assert(pkt->getSize() == blkSize);
1796
1797        std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize);
1798    }
1799    // We pay for fillLatency here.
1800    blk->whenReady = clockEdge() + fillLatency * clockPeriod() +
1801        pkt->payloadDelay;
1802
1803    return blk;
1804}
1805
1806
1807/////////////////////////////////////////////////////
1808//
1809// Snoop path: requests coming in from the memory side
1810//
1811/////////////////////////////////////////////////////
1812
1813void
1814Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
1815                              bool already_copied, bool pending_inval)
1816{
1817    // sanity check
1818    assert(req_pkt->isRequest());
1819    assert(req_pkt->needsResponse());
1820
1821    DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__,
1822            req_pkt->cmdString(), req_pkt->getAddr(), req_pkt->getSize());
1823    // timing-mode snoop responses require a new packet, unless we
1824    // already made a copy...
1825    PacketPtr pkt = req_pkt;
1826    if (!already_copied)
1827        // do not clear flags, and allocate space for data if the
1828        // packet needs it (the only packets that carry data are read
1829        // responses)
1830        pkt = new Packet(req_pkt, false, req_pkt->isRead());
1831
1832    assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() ||
1833           pkt->sharedAsserted());
1834    pkt->makeTimingResponse();
1835    if (pkt->isRead()) {
1836        pkt->setDataFromBlock(blk_data, blkSize);
1837    }
1838    if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
1839        // Assume we defer a response to a read from a far-away cache
1840        // A, then later defer a ReadExcl from a cache B on the same
1841        // bus as us.  We'll assert MemInhibit in both cases, but in
1842        // the latter case MemInhibit will keep the invalidation from
1843        // reaching cache A.  This special response tells cache A that
1844        // it gets the block to satisfy its read, but must immediately
1845        // invalidate it.
1846        pkt->cmd = MemCmd::ReadRespWithInvalidate;
1847    }
1848    // Here we consider forward_time, paying for just forward latency and
1849    // also charging the delay provided by the xbar.
1850    // forward_time is used as send_time in next allocateWriteBuffer().
1851    Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
1852    // Here we reset the timing of the packet.
1853    pkt->headerDelay = pkt->payloadDelay = 0;
1854    DPRINTF(Cache, "%s created response: %s addr %#llx size %d tick: %lu\n",
1855            __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize(),
1856            forward_time);
1857    memSidePort->schedTimingSnoopResp(pkt, forward_time, true);
1858}
1859
1860uint32_t
1861Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
1862                   bool is_deferred, bool pending_inval)
1863{
1864    DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__,
1865            pkt->cmdString(), pkt->getAddr(), pkt->getSize());
1866    // deferred snoops can only happen in timing mode
1867    assert(!(is_deferred && !is_timing));
1868    // pending_inval only makes sense on deferred snoops
1869    assert(!(pending_inval && !is_deferred));
1870    assert(pkt->isRequest());
1871
1872    // the packet may get modified if we or a forwarded snooper
1873    // responds in atomic mode, so remember a few things about the
1874    // original packet up front
1875    bool invalidate = pkt->isInvalidate();
1876    bool M5_VAR_USED needs_exclusive = pkt->needsExclusive();
1877
1878    uint32_t snoop_delay = 0;
1879
1880    if (forwardSnoops) {
1881        // first propagate snoop upward to see if anyone above us wants to
1882        // handle it.  save & restore packet src since it will get
1883        // rewritten to be relative to cpu-side bus (if any)
1884        bool alreadyResponded = pkt->memInhibitAsserted();
1885        if (is_timing) {
1886            // copy the packet so that we can clear any flags before
1887            // forwarding it upwards, we also allocate data (passing
1888            // the pointer along in case of static data), in case
1889            // there is a snoop hit in upper levels
1890            Packet snoopPkt(pkt, true, true);
1891            snoopPkt.setExpressSnoop();
1892            // the snoop packet does not need to wait any additional
1893            // time
1894            snoopPkt.headerDelay = snoopPkt.payloadDelay = 0;
1895            cpuSidePort->sendTimingSnoopReq(&snoopPkt);
1896
1897            // add the header delay (including crossbar and snoop
1898            // delays) of the upward snoop to the snoop delay for this
1899            // cache
1900            snoop_delay += snoopPkt.headerDelay;
1901
1902            if (snoopPkt.memInhibitAsserted()) {
1903                // cache-to-cache response from some upper cache
1904                assert(!alreadyResponded);
1905                pkt->assertMemInhibit();
1906            }
1907            if (snoopPkt.sharedAsserted()) {
1908                pkt->assertShared();
1909            }
1910            // If this request is a prefetch or clean evict and an upper level
1911            // signals block present, make sure to propagate the block
1912            // presence to the requester.
1913            if (snoopPkt.isBlockCached()) {
1914                pkt->setBlockCached();
1915            }
1916        } else {
1917            cpuSidePort->sendAtomicSnoop(pkt);
1918            if (!alreadyResponded && pkt->memInhibitAsserted()) {
1919                // cache-to-cache response from some upper cache:
1920                // forward response to original requester
1921                assert(pkt->isResponse());
1922            }
1923        }
1924    }
1925
1926    if (!blk || !blk->isValid()) {
1927        DPRINTF(Cache, "%s snoop miss for %s addr %#llx size %d\n",
1928                __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize());
1929        return snoop_delay;
1930    } else {
1931       DPRINTF(Cache, "%s snoop hit for %s for addr %#llx size %d, "
1932               "old state is %s\n", __func__, pkt->cmdString(),
1933               pkt->getAddr(), pkt->getSize(), blk->print());
1934    }
1935
1936    chatty_assert(!(isReadOnly && blk->isDirty()),
1937                  "Should never have a dirty block in a read-only cache %s\n",
1938                  name());
1939
1940    // We may end up modifying both the block state and the packet (if
1941    // we respond in atomic mode), so just figure out what to do now
1942    // and then do it later. If we find dirty data while snooping for
1943    // an invalidate, we don't need to send a response. The
1944    // invalidation itself is taken care of below.
1945    bool respond = blk->isDirty() && pkt->needsResponse() &&
1946        pkt->cmd != MemCmd::InvalidateReq;
1947    bool have_exclusive = blk->isWritable();
1948
1949    // Invalidate any prefetch's from below that would strip write permissions
1950    // MemCmd::HardPFReq is only observed by upstream caches.  After missing
1951    // above and in it's own cache, a new MemCmd::ReadReq is created that
1952    // downstream caches observe.
1953    if (pkt->mustCheckAbove()) {
1954        DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s from"
1955                " lower cache\n", pkt->getAddr(), pkt->cmdString());
1956        pkt->setBlockCached();
1957        return snoop_delay;
1958    }
1959
1960    if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) {
1961        // reading non-exclusive shared data, note that we retain
1962        // the block in owned state if it is dirty, with the response
1963        // taken care of below, and otherwhise simply downgrade to
1964        // shared
1965        assert(!needs_exclusive);
1966        pkt->assertShared();
1967        blk->status &= ~BlkWritable;
1968    }
1969
1970    if (respond) {
1971        // prevent anyone else from responding, cache as well as
1972        // memory, and also prevent any memory from even seeing the
1973        // request (with current inhibited semantics), note that this
1974        // applies both to reads and writes and that for writes it
1975        // works thanks to the fact that we still have dirty data and
1976        // will write it back at a later point
1977        assert(!pkt->memInhibitAsserted());
1978        pkt->assertMemInhibit();
1979        if (have_exclusive) {
1980            // in the case of an uncacheable request there is no point
1981            // in setting the exclusive flag, but since the recipient
1982            // does not care there is no harm in doing so, in any case
1983            // it is just a hint
1984            pkt->setSupplyExclusive();
1985        }
1986        if (is_timing) {
1987            doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
1988        } else {
1989            pkt->makeAtomicResponse();
1990            pkt->setDataFromBlock(blk->data, blkSize);
1991        }
1992    }
1993
1994    if (!respond && is_timing && is_deferred) {
1995        // if it's a deferred timing snoop to which we are not
1996        // responding, then we've made a copy of both the request and
1997        // the packet, delete them here
1998        assert(pkt->needsResponse());
1999        delete pkt->req;
2000        delete pkt;
2001    }
2002
2003    // Do this last in case it deallocates block data or something
2004    // like that
2005    if (invalidate) {
2006        invalidateBlock(blk);
2007    }
2008
2009    DPRINTF(Cache, "new state is %s\n", blk->print());
2010
2011    return snoop_delay;
2012}
2013
2014
2015void
2016Cache::recvTimingSnoopReq(PacketPtr pkt)
2017{
2018    DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__,
2019            pkt->cmdString(), pkt->getAddr(), pkt->getSize());
2020
2021    // Snoops shouldn't happen when bypassing caches
2022    assert(!system->bypassCaches());
2023
2024    // no need to snoop requests that are not in range
2025    if (!inRange(pkt->getAddr())) {
2026        return;
2027    }
2028
2029    bool is_secure = pkt->isSecure();
2030    CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
2031
2032    Addr blk_addr = blockAlign(pkt->getAddr());
2033    MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
2034
2035    // Update the latency cost of the snoop so that the crossbar can
2036    // account for it. Do not overwrite what other neighbouring caches
2037    // have already done, rather take the maximum. The update is
2038    // tentative, for cases where we return before an upward snoop
2039    // happens below.
2040    pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay,
2041                                         lookupLatency * clockPeriod());
2042
2043    // Inform request(Prefetch, CleanEvict or Writeback) from below of
2044    // MSHR hit, set setBlockCached.
2045    if (mshr && pkt->mustCheckAbove()) {
2046        DPRINTF(Cache, "Setting block cached for %s from"
2047                "lower cache on mshr hit %#x\n",
2048                pkt->cmdString(), pkt->getAddr());
2049        pkt->setBlockCached();
2050        return;
2051    }
2052
2053    // Let the MSHR itself track the snoop and decide whether we want
2054    // to go ahead and do the regular cache snoop
2055    if (mshr && mshr->handleSnoop(pkt, order++)) {
2056        DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)."
2057                "mshrs: %s\n", blk_addr, is_secure ? "s" : "ns",
2058                mshr->print());
2059
2060        if (mshr->getNumTargets() > numTarget)
2061            warn("allocating bonus target for snoop"); //handle later
2062        return;
2063    }
2064
2065    //We also need to check the writeback buffers and handle those
2066    std::vector<MSHR *> writebacks;
2067    if (writeBuffer.findMatches(blk_addr, is_secure, writebacks)) {
2068        DPRINTF(Cache, "Snoop hit in writeback to addr %#llx (%s)\n",
2069                pkt->getAddr(), is_secure ? "s" : "ns");
2070
2071        // Look through writebacks for any cachable writes.
2072        // We should only ever find a single match
2073        assert(writebacks.size() == 1);
2074        MSHR *wb_entry = writebacks[0];
2075        // Expect to see only Writebacks and/or CleanEvicts here, both of
2076        // which should not be generated for uncacheable data.
2077        assert(!wb_entry->isUncacheable());
2078        // There should only be a single request responsible for generating
2079        // Writebacks/CleanEvicts.
2080        assert(wb_entry->getNumTargets() == 1);
2081        PacketPtr wb_pkt = wb_entry->getTarget()->pkt;
2082        assert(wb_pkt->isEviction());
2083
2084        if (pkt->isEviction()) {
2085            // if the block is found in the write queue, set the BLOCK_CACHED
2086            // flag for Writeback/CleanEvict snoop. On return the snoop will
2087            // propagate the BLOCK_CACHED flag in Writeback packets and prevent
2088            // any CleanEvicts from travelling down the memory hierarchy.
2089            pkt->setBlockCached();
2090            DPRINTF(Cache, "Squashing %s from lower cache on writequeue hit"
2091                    " %#x\n", pkt->cmdString(), pkt->getAddr());
2092            return;
2093        }
2094
2095        if (wb_pkt->cmd == MemCmd::WritebackDirty) {
2096            assert(!pkt->memInhibitAsserted());
2097            pkt->assertMemInhibit();
2098            if (!pkt->needsExclusive()) {
2099                pkt->assertShared();
2100                // the writeback is no longer passing exclusivity (the
2101                // receiving cache should consider the block owned
2102                // rather than modified)
2103                wb_pkt->assertShared();
2104            } else {
2105                // if we're not asserting the shared line, we need to
2106                // invalidate our copy.  we'll do that below as long as
2107                // the packet's invalidate flag is set...
2108                assert(pkt->isInvalidate());
2109            }
2110            doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(),
2111                                   false, false);
2112        } else {
2113            // on hitting a clean writeback we play it safe and do not
2114            // provide a response, the block may be dirty somewhere
2115            // else
2116            assert(wb_pkt->isCleanEviction());
2117            // The cache technically holds the block until the
2118            // corresponding message reaches the crossbar
2119            // below. Therefore when a snoop encounters a CleanEvict
2120            // or WritebackClean message we must set assertShared
2121            // (just like when it encounters a Writeback) to avoid the
2122            // snoop filter prematurely clearing the holder bit in the
2123            // crossbar below
2124            if (!pkt->needsExclusive()) {
2125                pkt->assertShared();
2126                // the writeback is no longer passing exclusivity (the
2127                // receiving cache should consider the block owned
2128                // rather than modified)
2129                wb_pkt->assertShared();
2130            } else {
2131                assert(pkt->isInvalidate());
2132            }
2133        }
2134
2135        if (pkt->isInvalidate()) {
2136            // Invalidation trumps our writeback... discard here
2137            // Note: markInService will remove entry from writeback buffer.
2138            markInService(wb_entry, false);
2139            delete wb_pkt;
2140        }
2141    }
2142
2143    // If this was a shared writeback, there may still be
2144    // other shared copies above that require invalidation.
2145    // We could be more selective and return here if the
2146    // request is non-exclusive or if the writeback is
2147    // exclusive.
2148    uint32_t snoop_delay = handleSnoop(pkt, blk, true, false, false);
2149
2150    // Override what we did when we first saw the snoop, as we now
2151    // also have the cost of the upwards snoops to account for
2152    pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, snoop_delay +
2153                                         lookupLatency * clockPeriod());
2154}
2155
2156bool
2157Cache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt)
2158{
2159    // Express snoop responses from master to slave, e.g., from L1 to L2
2160    cache->recvTimingSnoopResp(pkt);
2161    return true;
2162}
2163
2164Tick
2165Cache::recvAtomicSnoop(PacketPtr pkt)
2166{
2167    // Snoops shouldn't happen when bypassing caches
2168    assert(!system->bypassCaches());
2169
2170    // no need to snoop requests that are not in range.
2171    if (!inRange(pkt->getAddr())) {
2172        return 0;
2173    }
2174
2175    CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
2176    uint32_t snoop_delay = handleSnoop(pkt, blk, false, false, false);
2177    return snoop_delay + lookupLatency * clockPeriod();
2178}
2179
2180
2181MSHR *
2182Cache::getNextMSHR()
2183{
2184    // Check both MSHR queue and write buffer for potential requests,
2185    // note that null does not mean there is no request, it could
2186    // simply be that it is not ready
2187    MSHR *miss_mshr  = mshrQueue.getNextMSHR();
2188    MSHR *write_mshr = writeBuffer.getNextMSHR();
2189
2190    // If we got a write buffer request ready, first priority is a
2191    // full write buffer, otherwhise we favour the miss requests
2192    if (write_mshr &&
2193        ((writeBuffer.isFull() && writeBuffer.inServiceEntries == 0) ||
2194         !miss_mshr)) {
2195        // need to search MSHR queue for conflicting earlier miss.
2196        MSHR *conflict_mshr =
2197            mshrQueue.findPending(write_mshr->blkAddr,
2198                                  write_mshr->isSecure);
2199
2200        if (conflict_mshr && conflict_mshr->order < write_mshr->order) {
2201            // Service misses in order until conflict is cleared.
2202            return conflict_mshr;
2203
2204            // @todo Note that we ignore the ready time of the conflict here
2205        }
2206
2207        // No conflicts; issue write
2208        return write_mshr;
2209    } else if (miss_mshr) {
2210        // need to check for conflicting earlier writeback
2211        MSHR *conflict_mshr =
2212            writeBuffer.findPending(miss_mshr->blkAddr,
2213                                    miss_mshr->isSecure);
2214        if (conflict_mshr) {
2215            // not sure why we don't check order here... it was in the
2216            // original code but commented out.
2217
2218            // The only way this happens is if we are
2219            // doing a write and we didn't have permissions
2220            // then subsequently saw a writeback (owned got evicted)
2221            // We need to make sure to perform the writeback first
2222            // To preserve the dirty data, then we can issue the write
2223
2224            // should we return write_mshr here instead?  I.e. do we
2225            // have to flush writes in order?  I don't think so... not
2226            // for Alpha anyway.  Maybe for x86?
2227            return conflict_mshr;
2228
2229            // @todo Note that we ignore the ready time of the conflict here
2230        }
2231
2232        // No conflicts; issue read
2233        return miss_mshr;
2234    }
2235
2236    // fall through... no pending requests.  Try a prefetch.
2237    assert(!miss_mshr && !write_mshr);
2238    if (prefetcher && mshrQueue.canPrefetch()) {
2239        // If we have a miss queue slot, we can try a prefetch
2240        PacketPtr pkt = prefetcher->getPacket();
2241        if (pkt) {
2242            Addr pf_addr = blockAlign(pkt->getAddr());
2243            if (!tags->findBlock(pf_addr, pkt->isSecure()) &&
2244                !mshrQueue.findMatch(pf_addr, pkt->isSecure()) &&
2245                !writeBuffer.findMatch(pf_addr, pkt->isSecure())) {
2246                // Update statistic on number of prefetches issued
2247                // (hwpf_mshr_misses)
2248                assert(pkt->req->masterId() < system->maxMasters());
2249                mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
2250
2251                // allocate an MSHR and return it, note
2252                // that we send the packet straight away, so do not
2253                // schedule the send
2254                return allocateMissBuffer(pkt, curTick(), false);
2255            } else {
2256                // free the request and packet
2257                delete pkt->req;
2258                delete pkt;
2259            }
2260        }
2261    }
2262
2263    return NULL;
2264}
2265
2266bool
2267Cache::isCachedAbove(PacketPtr pkt, bool is_timing) const
2268{
2269    if (!forwardSnoops)
2270        return false;
2271    // Mirroring the flow of HardPFReqs, the cache sends CleanEvict and
2272    // Writeback snoops into upper level caches to check for copies of the
2273    // same block. Using the BLOCK_CACHED flag with the Writeback/CleanEvict
2274    // packet, the cache can inform the crossbar below of presence or absence
2275    // of the block.
2276    if (is_timing) {
2277        Packet snoop_pkt(pkt, true, false);
2278        snoop_pkt.setExpressSnoop();
2279        // Assert that packet is either Writeback or CleanEvict and not a
2280        // prefetch request because prefetch requests need an MSHR and may
2281        // generate a snoop response.
2282        assert(pkt->isEviction());
2283        snoop_pkt.senderState = NULL;
2284        cpuSidePort->sendTimingSnoopReq(&snoop_pkt);
2285        // Writeback/CleanEvict snoops do not generate a snoop response.
2286        assert(!(snoop_pkt.memInhibitAsserted()));
2287        return snoop_pkt.isBlockCached();
2288    } else {
2289        cpuSidePort->sendAtomicSnoop(pkt);
2290        return pkt->isBlockCached();
2291    }
2292}
2293
2294PacketPtr
2295Cache::getTimingPacket()
2296{
2297    MSHR *mshr = getNextMSHR();
2298
2299    if (mshr == NULL) {
2300        return NULL;
2301    }
2302
2303    // use request from 1st target
2304    PacketPtr tgt_pkt = mshr->getTarget()->pkt;
2305    PacketPtr pkt = NULL;
2306
2307    DPRINTF(CachePort, "%s %s for addr %#llx size %d\n", __func__,
2308            tgt_pkt->cmdString(), tgt_pkt->getAddr(), tgt_pkt->getSize());
2309
2310    CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure);
2311
2312    if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) {
2313        // We need to check the caches above us to verify that
2314        // they don't have a copy of this block in the dirty state
2315        // at the moment. Without this check we could get a stale
2316        // copy from memory that might get used in place of the
2317        // dirty one.
2318        Packet snoop_pkt(tgt_pkt, true, false);
2319        snoop_pkt.setExpressSnoop();
2320        // We are sending this packet upwards, but if it hits we will
2321        // get a snoop response that we end up treating just like a
2322        // normal response, hence it needs the MSHR as its sender
2323        // state
2324        snoop_pkt.senderState = mshr;
2325        cpuSidePort->sendTimingSnoopReq(&snoop_pkt);
2326
2327        // Check to see if the prefetch was squashed by an upper cache (to
2328        // prevent us from grabbing the line) or if a Check to see if a
2329        // writeback arrived between the time the prefetch was placed in
2330        // the MSHRs and when it was selected to be sent or if the
2331        // prefetch was squashed by an upper cache.
2332
2333        // It is important to check memInhibitAsserted before
2334        // prefetchSquashed. If another cache has asserted MEM_INGIBIT, it
2335        // will be sending a response which will arrive at the MSHR
2336        // allocated ofr this request. Checking the prefetchSquash first
2337        // may result in the MSHR being prematurely deallocated.
2338
2339        if (snoop_pkt.memInhibitAsserted()) {
2340            auto M5_VAR_USED r = outstandingSnoop.insert(snoop_pkt.req);
2341            assert(r.second);
2342            // If we are getting a non-shared response it is dirty
2343            bool pending_dirty_resp = !snoop_pkt.sharedAsserted();
2344            markInService(mshr, pending_dirty_resp);
2345            DPRINTF(Cache, "Upward snoop of prefetch for addr"
2346                    " %#x (%s) hit\n",
2347                    tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns");
2348            return NULL;
2349        }
2350
2351        if (snoop_pkt.isBlockCached() || blk != NULL) {
2352            DPRINTF(Cache, "Block present, prefetch squashed by cache.  "
2353                    "Deallocating mshr target %#x.\n",
2354                    mshr->blkAddr);
2355            // Deallocate the mshr target
2356            if (mshr->queue->forceDeallocateTarget(mshr)) {
2357                // Clear block if this deallocation resulted freed an
2358                // mshr when all had previously been utilized
2359                clearBlocked((BlockedCause)(mshr->queue->index));
2360            }
2361            return NULL;
2362        }
2363    }
2364
2365    if (mshr->isForwardNoResponse()) {
2366        // no response expected, just forward packet as it is
2367        assert(tags->findBlock(mshr->blkAddr, mshr->isSecure) == NULL);
2368        pkt = tgt_pkt;
2369    } else {
2370        pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive());
2371
2372        mshr->isForward = (pkt == NULL);
2373
2374        if (mshr->isForward) {
2375            // not a cache block request, but a response is expected
2376            // make copy of current packet to forward, keep current
2377            // copy for response handling
2378            pkt = new Packet(tgt_pkt, false, true);
2379            if (pkt->isWrite()) {
2380                pkt->setData(tgt_pkt->getConstPtr<uint8_t>());
2381            }
2382        }
2383    }
2384
2385    assert(pkt != NULL);
2386    // play it safe and append (rather than set) the sender state, as
2387    // forwarded packets may already have existing state
2388    pkt->pushSenderState(mshr);
2389    return pkt;
2390}
2391
2392
2393Tick
2394Cache::nextMSHRReadyTime() const
2395{
2396    Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(),
2397                              writeBuffer.nextMSHRReadyTime());
2398
2399    // Don't signal prefetch ready time if no MSHRs available
2400    // Will signal once enoguh MSHRs are deallocated
2401    if (prefetcher && mshrQueue.canPrefetch()) {
2402        nextReady = std::min(nextReady,
2403                             prefetcher->nextPrefetchReadyTime());
2404    }
2405
2406    return nextReady;
2407}
2408
2409void
2410Cache::serialize(CheckpointOut &cp) const
2411{
2412    bool dirty(isDirty());
2413
2414    if (dirty) {
2415        warn("*** The cache still contains dirty data. ***\n");
2416        warn("    Make sure to drain the system using the correct flags.\n");
2417        warn("    This checkpoint will not restore correctly and dirty data in "
2418             "the cache will be lost!\n");
2419    }
2420
2421    // Since we don't checkpoint the data in the cache, any dirty data
2422    // will be lost when restoring from a checkpoint of a system that
2423    // wasn't drained properly. Flag the checkpoint as invalid if the
2424    // cache contains dirty data.
2425    bool bad_checkpoint(dirty);
2426    SERIALIZE_SCALAR(bad_checkpoint);
2427}
2428
2429void
2430Cache::unserialize(CheckpointIn &cp)
2431{
2432    bool bad_checkpoint;
2433    UNSERIALIZE_SCALAR(bad_checkpoint);
2434    if (bad_checkpoint) {
2435        fatal("Restoring from checkpoints with dirty caches is not supported "
2436              "in the classic memory system. Please remove any caches or "
2437              " drain them properly before taking checkpoints.\n");
2438    }
2439}
2440
2441///////////////
2442//
2443// CpuSidePort
2444//
2445///////////////
2446
2447AddrRangeList
2448Cache::CpuSidePort::getAddrRanges() const
2449{
2450    return cache->getAddrRanges();
2451}
2452
2453bool
2454Cache::CpuSidePort::recvTimingReq(PacketPtr pkt)
2455{
2456    assert(!cache->system->bypassCaches());
2457
2458    bool success = false;
2459
2460    // always let inhibited requests through, even if blocked,
2461    // ultimately we should check if this is an express snoop, but at
2462    // the moment that flag is only set in the cache itself
2463    if (pkt->memInhibitAsserted()) {
2464        // do not change the current retry state
2465        bool M5_VAR_USED bypass_success = cache->recvTimingReq(pkt);
2466        assert(bypass_success);
2467        return true;
2468    } else if (blocked || mustSendRetry) {
2469        // either already committed to send a retry, or blocked
2470        success = false;
2471    } else {
2472        // pass it on to the cache, and let the cache decide if we
2473        // have to retry or not
2474        success = cache->recvTimingReq(pkt);
2475    }
2476
2477    // remember if we have to retry
2478    mustSendRetry = !success;
2479    return success;
2480}
2481
2482Tick
2483Cache::CpuSidePort::recvAtomic(PacketPtr pkt)
2484{
2485    return cache->recvAtomic(pkt);
2486}
2487
2488void
2489Cache::CpuSidePort::recvFunctional(PacketPtr pkt)
2490{
2491    // functional request
2492    cache->functionalAccess(pkt, true);
2493}
2494
2495Cache::
2496CpuSidePort::CpuSidePort(const std::string &_name, Cache *_cache,
2497                         const std::string &_label)
2498    : BaseCache::CacheSlavePort(_name, _cache, _label), cache(_cache)
2499{
2500}
2501
2502Cache*
2503CacheParams::create()
2504{
2505    assert(tags);
2506
2507    return new Cache(this);
2508}
2509///////////////
2510//
2511// MemSidePort
2512//
2513///////////////
2514
2515bool
2516Cache::MemSidePort::recvTimingResp(PacketPtr pkt)
2517{
2518    cache->recvTimingResp(pkt);
2519    return true;
2520}
2521
2522// Express snooping requests to memside port
2523void
2524Cache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt)
2525{
2526    // handle snooping requests
2527    cache->recvTimingSnoopReq(pkt);
2528}
2529
2530Tick
2531Cache::MemSidePort::recvAtomicSnoop(PacketPtr pkt)
2532{
2533    return cache->recvAtomicSnoop(pkt);
2534}
2535
2536void
2537Cache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt)
2538{
2539    // functional snoop (note that in contrast to atomic we don't have
2540    // a specific functionalSnoop method, as they have the same
2541    // behaviour regardless)
2542    cache->functionalAccess(pkt, false);
2543}
2544
2545void
2546Cache::CacheReqPacketQueue::sendDeferredPacket()
2547{
2548    // sanity check
2549    assert(!waitingOnRetry);
2550
2551    // there should never be any deferred request packets in the
2552    // queue, instead we resly on the cache to provide the packets
2553    // from the MSHR queue or write queue
2554    assert(deferredPacketReadyTime() == MaxTick);
2555
2556    // check for request packets (requests & writebacks)
2557    PacketPtr pkt = cache.getTimingPacket();
2558    if (pkt == NULL) {
2559        // can happen if e.g. we attempt a writeback and fail, but
2560        // before the retry, the writeback is eliminated because
2561        // we snoop another cache's ReadEx.
2562    } else {
2563        MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
2564        // in most cases getTimingPacket allocates a new packet, and
2565        // we must delete it unless it is successfully sent
2566        bool delete_pkt = !mshr->isForwardNoResponse();
2567
2568        // let our snoop responses go first if there are responses to
2569        // the same addresses we are about to writeback, note that
2570        // this creates a dependency between requests and snoop
2571        // responses, but that should not be a problem since there is
2572        // a chain already and the key is that the snoop responses can
2573        // sink unconditionally
2574        if (snoopRespQueue.hasAddr(pkt->getAddr())) {
2575            DPRINTF(CachePort, "Waiting for snoop response to be sent\n");
2576            Tick when = snoopRespQueue.deferredPacketReadyTime();
2577            schedSendEvent(when);
2578
2579            if (delete_pkt)
2580                delete pkt;
2581
2582            return;
2583        }
2584
2585
2586        waitingOnRetry = !masterPort.sendTimingReq(pkt);
2587
2588        if (waitingOnRetry) {
2589            DPRINTF(CachePort, "now waiting on a retry\n");
2590            if (delete_pkt) {
2591                // we are awaiting a retry, but we
2592                // delete the packet and will be creating a new packet
2593                // when we get the opportunity
2594                delete pkt;
2595            }
2596            // note that we have now masked any requestBus and
2597            // schedSendEvent (we will wait for a retry before
2598            // doing anything), and this is so even if we do not
2599            // care about this packet and might override it before
2600            // it gets retried
2601        } else {
2602            // As part of the call to sendTimingReq the packet is
2603            // forwarded to all neighbouring caches (and any
2604            // caches above them) as a snoop. The packet is also
2605            // sent to any potential cache below as the
2606            // interconnect is not allowed to buffer the
2607            // packet. Thus at this point we know if any of the
2608            // neighbouring, or the downstream cache is
2609            // responding, and if so, if it is with a dirty line
2610            // or not.
2611            bool pending_dirty_resp = !pkt->sharedAsserted() &&
2612                pkt->memInhibitAsserted();
2613
2614            cache.markInService(mshr, pending_dirty_resp);
2615        }
2616    }
2617
2618    // if we succeeded and are not waiting for a retry, schedule the
2619    // next send considering when the next MSHR is ready, note that
2620    // snoop responses have their own packet queue and thus schedule
2621    // their own events
2622    if (!waitingOnRetry) {
2623        schedSendEvent(cache.nextMSHRReadyTime());
2624    }
2625}
2626
2627Cache::
2628MemSidePort::MemSidePort(const std::string &_name, Cache *_cache,
2629                         const std::string &_label)
2630    : BaseCache::CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue),
2631      _reqQueue(*_cache, *this, _snoopRespQueue, _label),
2632      _snoopRespQueue(*_cache, *this, _label), cache(_cache)
2633{
2634}
2635