cache.cc revision 11275:fc2b0e6550ad
1/*
2 * Copyright (c) 2010-2015 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 *          Dave Greene
43 *          Nathan Binkert
44 *          Steve Reinhardt
45 *          Ron Dreslinski
46 *          Andreas Sandberg
47 */
48
49/**
50 * @file
51 * Cache definitions.
52 */
53
54#include "mem/cache/cache.hh"
55
56#include "base/misc.hh"
57#include "base/types.hh"
58#include "debug/Cache.hh"
59#include "debug/CachePort.hh"
60#include "debug/CacheTags.hh"
61#include "mem/cache/blk.hh"
62#include "mem/cache/mshr.hh"
63#include "mem/cache/prefetch/base.hh"
64#include "sim/sim_exit.hh"
65
66Cache::Cache(const CacheParams *p)
67    : BaseCache(p, p->system->cacheLineSize()),
68      tags(p->tags),
69      prefetcher(p->prefetcher),
70      doFastWrites(true),
71      prefetchOnAccess(p->prefetch_on_access),
72      clusivity(p->clusivity),
73      writebackClean(p->writeback_clean),
74      tempBlockWriteback(nullptr),
75      writebackTempBlockAtomicEvent(this, false,
76                                    EventBase::Delayed_Writeback_Pri)
77{
78    tempBlock = new CacheBlk();
79    tempBlock->data = new uint8_t[blkSize];
80
81    cpuSidePort = new CpuSidePort(p->name + ".cpu_side", this,
82                                  "CpuSidePort");
83    memSidePort = new MemSidePort(p->name + ".mem_side", this,
84                                  "MemSidePort");
85
86    tags->setCache(this);
87    if (prefetcher)
88        prefetcher->setCache(this);
89}
90
91Cache::~Cache()
92{
93    delete [] tempBlock->data;
94    delete tempBlock;
95
96    delete cpuSidePort;
97    delete memSidePort;
98}
99
100void
101Cache::regStats()
102{
103    BaseCache::regStats();
104}
105
106void
107Cache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt)
108{
109    assert(pkt->isRequest());
110
111    uint64_t overwrite_val;
112    bool overwrite_mem;
113    uint64_t condition_val64;
114    uint32_t condition_val32;
115
116    int offset = tags->extractBlkOffset(pkt->getAddr());
117    uint8_t *blk_data = blk->data + offset;
118
119    assert(sizeof(uint64_t) >= pkt->getSize());
120
121    overwrite_mem = true;
122    // keep a copy of our possible write value, and copy what is at the
123    // memory address into the packet
124    pkt->writeData((uint8_t *)&overwrite_val);
125    pkt->setData(blk_data);
126
127    if (pkt->req->isCondSwap()) {
128        if (pkt->getSize() == sizeof(uint64_t)) {
129            condition_val64 = pkt->req->getExtraData();
130            overwrite_mem = !std::memcmp(&condition_val64, blk_data,
131                                         sizeof(uint64_t));
132        } else if (pkt->getSize() == sizeof(uint32_t)) {
133            condition_val32 = (uint32_t)pkt->req->getExtraData();
134            overwrite_mem = !std::memcmp(&condition_val32, blk_data,
135                                         sizeof(uint32_t));
136        } else
137            panic("Invalid size for conditional read/write\n");
138    }
139
140    if (overwrite_mem) {
141        std::memcpy(blk_data, &overwrite_val, pkt->getSize());
142        blk->status |= BlkDirty;
143    }
144}
145
146
147void
148Cache::satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk,
149                             bool deferred_response, bool pending_downgrade)
150{
151    assert(pkt->isRequest());
152
153    assert(blk && blk->isValid());
154    // Occasionally this is not true... if we are a lower-level cache
155    // satisfying a string of Read and ReadEx requests from
156    // upper-level caches, a Read will mark the block as shared but we
157    // can satisfy a following ReadEx anyway since we can rely on the
158    // Read requester(s) to have buffered the ReadEx snoop and to
159    // invalidate their blocks after receiving them.
160    // assert(!pkt->needsExclusive() || blk->isWritable());
161    assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
162
163    // Check RMW operations first since both isRead() and
164    // isWrite() will be true for them
165    if (pkt->cmd == MemCmd::SwapReq) {
166        cmpAndSwap(blk, pkt);
167    } else if (pkt->isWrite()) {
168        assert(blk->isWritable());
169        // Write or WriteLine at the first cache with block in Exclusive
170        if (blk->checkWrite(pkt)) {
171            pkt->writeDataToBlock(blk->data, blkSize);
172        }
173        // Always mark the line as dirty even if we are a failed
174        // StoreCond so we supply data to any snoops that have
175        // appended themselves to this cache before knowing the store
176        // will fail.
177        blk->status |= BlkDirty;
178        DPRINTF(Cache, "%s for %s addr %#llx size %d (write)\n", __func__,
179                pkt->cmdString(), pkt->getAddr(), pkt->getSize());
180    } else if (pkt->isRead()) {
181        if (pkt->isLLSC()) {
182            blk->trackLoadLocked(pkt);
183        }
184        pkt->setDataFromBlock(blk->data, blkSize);
185        // determine if this read is from a (coherent) cache, or not
186        // by looking at the command type; we could potentially add a
187        // packet attribute such as 'FromCache' to make this check a
188        // bit cleaner
189        if (pkt->cmd == MemCmd::ReadExReq ||
190            pkt->cmd == MemCmd::ReadSharedReq ||
191            pkt->cmd == MemCmd::ReadCleanReq ||
192            pkt->cmd == MemCmd::SCUpgradeFailReq) {
193            assert(pkt->getSize() == blkSize);
194            // special handling for coherent block requests from
195            // upper-level caches
196            if (pkt->needsExclusive()) {
197                // sanity check
198                assert(pkt->cmd == MemCmd::ReadExReq ||
199                       pkt->cmd == MemCmd::SCUpgradeFailReq);
200
201                // if we have a dirty copy, make sure the recipient
202                // keeps it marked dirty
203                if (blk->isDirty()) {
204                    pkt->assertMemInhibit();
205                }
206                // on ReadExReq we give up our copy unconditionally,
207                // even if this cache is mostly inclusive, we may want
208                // to revisit this
209                invalidateBlock(blk);
210            } else if (blk->isWritable() && !pending_downgrade &&
211                       !pkt->sharedAsserted() &&
212                       pkt->cmd != MemCmd::ReadCleanReq) {
213                // we can give the requester an exclusive copy (by not
214                // asserting shared line) on a read request if:
215                // - we have an exclusive copy at this level (& below)
216                // - we don't have a pending snoop from below
217                //   signaling another read request
218                // - no other cache above has a copy (otherwise it
219                //   would have asseretd shared line on request)
220                // - we are not satisfying an instruction fetch (this
221                //   prevents dirty data in the i-cache)
222
223                if (blk->isDirty()) {
224                    // special considerations if we're owner:
225                    if (!deferred_response) {
226                        // if we are responding immediately and can
227                        // signal that we're transferring ownership
228                        // (inhibit set) along with exclusivity
229                        // (shared not set), do so
230                        pkt->assertMemInhibit();
231
232                        // if this cache is mostly inclusive, we keep
233                        // the block as writable (exclusive), and pass
234                        // it upwards as writable and dirty
235                        // (modified), hence we have multiple caches
236                        // considering the same block writable,
237                        // something that we get away with due to the
238                        // fact that: 1) this cache has been
239                        // considered the ordering points and
240                        // responded to all snoops up till now, and 2)
241                        // we always snoop upwards before consulting
242                        // the local cache, both on a normal request
243                        // (snooping done by the crossbar), and on a
244                        // snoop
245                        blk->status &= ~BlkDirty;
246
247                        // if this cache is mostly exclusive with
248                        // respect to the cache above, drop the block
249                        if (clusivity == Enums::mostly_excl) {
250                            invalidateBlock(blk);
251                        }
252                    } else {
253                        // if we're responding after our own miss,
254                        // there's a window where the recipient didn't
255                        // know it was getting ownership and may not
256                        // have responded to snoops correctly, so we
257                        // can't pass off ownership *or* exclusivity
258                        pkt->assertShared();
259                    }
260                }
261            } else {
262                // otherwise only respond with a shared copy
263                pkt->assertShared();
264            }
265        }
266    } else {
267        // Upgrade or Invalidate, since we have it Exclusively (E or
268        // M), we ack then invalidate.
269        assert(pkt->isUpgrade() || pkt->isInvalidate());
270
271        // for invalidations we could be looking at the temp block
272        // (for upgrades we always allocate)
273        invalidateBlock(blk);
274        DPRINTF(Cache, "%s for %s addr %#llx size %d (invalidation)\n",
275                __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize());
276    }
277}
278
279
280/////////////////////////////////////////////////////
281//
282// MSHR helper functions
283//
284/////////////////////////////////////////////////////
285
286
287void
288Cache::markInService(MSHR *mshr, bool pending_dirty_resp)
289{
290    markInServiceInternal(mshr, pending_dirty_resp);
291}
292
293/////////////////////////////////////////////////////
294//
295// Access path: requests coming in from the CPU side
296//
297/////////////////////////////////////////////////////
298
299bool
300Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
301              PacketList &writebacks)
302{
303    // sanity check
304    assert(pkt->isRequest());
305
306    chatty_assert(!(isReadOnly && pkt->isWrite()),
307                  "Should never see a write in a read-only cache %s\n",
308                  name());
309
310    DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__,
311            pkt->cmdString(), pkt->getAddr(), pkt->getSize());
312
313    if (pkt->req->isUncacheable()) {
314        DPRINTF(Cache, "%s%s addr %#llx uncacheable\n", pkt->cmdString(),
315                pkt->req->isInstFetch() ? " (ifetch)" : "",
316                pkt->getAddr());
317
318        // flush and invalidate any existing block
319        CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure()));
320        if (old_blk && old_blk->isValid()) {
321            if (old_blk->isDirty() || writebackClean)
322                writebacks.push_back(writebackBlk(old_blk));
323            else
324                writebacks.push_back(cleanEvictBlk(old_blk));
325            tags->invalidate(old_blk);
326            old_blk->invalidate();
327        }
328
329        blk = NULL;
330        // lookupLatency is the latency in case the request is uncacheable.
331        lat = lookupLatency;
332        return false;
333    }
334
335    ContextID id = pkt->req->hasContextId() ?
336        pkt->req->contextId() : InvalidContextID;
337    // Here lat is the value passed as parameter to accessBlock() function
338    // that can modify its value.
339    blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), lat, id);
340
341    DPRINTF(Cache, "%s%s addr %#llx size %d (%s) %s\n", pkt->cmdString(),
342            pkt->req->isInstFetch() ? " (ifetch)" : "",
343            pkt->getAddr(), pkt->getSize(), pkt->isSecure() ? "s" : "ns",
344            blk ? "hit " + blk->print() : "miss");
345
346
347    if (pkt->isEviction()) {
348        // We check for presence of block in above caches before issuing
349        // Writeback or CleanEvict to write buffer. Therefore the only
350        // possible cases can be of a CleanEvict packet coming from above
351        // encountering a Writeback generated in this cache peer cache and
352        // waiting in the write buffer. Cases of upper level peer caches
353        // generating CleanEvict and Writeback or simply CleanEvict and
354        // CleanEvict almost simultaneously will be caught by snoops sent out
355        // by crossbar.
356        std::vector<MSHR *> outgoing;
357        if (writeBuffer.findMatches(pkt->getAddr(), pkt->isSecure(),
358                                   outgoing)) {
359            assert(outgoing.size() == 1);
360            MSHR *wb_entry = outgoing[0];
361            assert(wb_entry->getNumTargets() == 1);
362            PacketPtr wbPkt = wb_entry->getTarget()->pkt;
363            assert(wbPkt->isWriteback());
364
365            if (pkt->isCleanEviction()) {
366                // The CleanEvict and WritebackClean snoops into other
367                // peer caches of the same level while traversing the
368                // crossbar. If a copy of the block is found, the
369                // packet is deleted in the crossbar. Hence, none of
370                // the other upper level caches connected to this
371                // cache have the block, so we can clear the
372                // BLOCK_CACHED flag in the Writeback if set and
373                // discard the CleanEvict by returning true.
374                wbPkt->clearBlockCached();
375                return true;
376            } else {
377                assert(pkt->cmd == MemCmd::WritebackDirty);
378                // Dirty writeback from above trumps our clean
379                // writeback... discard here
380                // Note: markInService will remove entry from writeback buffer.
381                markInService(wb_entry, false);
382                delete wbPkt;
383            }
384        }
385    }
386
387    // Writeback handling is special case.  We can write the block into
388    // the cache without having a writeable copy (or any copy at all).
389    if (pkt->isWriteback()) {
390        assert(blkSize == pkt->getSize());
391
392        // we could get a clean writeback while we are having
393        // outstanding accesses to a block, do the simple thing for
394        // now and drop the clean writeback so that we do not upset
395        // any ordering/decisions about ownership already taken
396        if (pkt->cmd == MemCmd::WritebackClean &&
397            mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) {
398            DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, "
399                    "dropping\n", pkt->getAddr());
400            return true;
401        }
402
403        if (blk == NULL) {
404            // need to do a replacement
405            blk = allocateBlock(pkt->getAddr(), pkt->isSecure(), writebacks);
406            if (blk == NULL) {
407                // no replaceable block available: give up, fwd to next level.
408                incMissCount(pkt);
409                return false;
410            }
411            tags->insertBlock(pkt, blk);
412
413            blk->status = (BlkValid | BlkReadable);
414            if (pkt->isSecure()) {
415                blk->status |= BlkSecure;
416            }
417        }
418        // only mark the block dirty if we got a writeback command,
419        // and leave it as is for a clean writeback
420        if (pkt->cmd == MemCmd::WritebackDirty) {
421            blk->status |= BlkDirty;
422        }
423        // if shared is not asserted we got the writeback in modified
424        // state, if it is asserted we are in the owned state
425        if (!pkt->sharedAsserted()) {
426            blk->status |= BlkWritable;
427        }
428        // nothing else to do; writeback doesn't expect response
429        assert(!pkt->needsResponse());
430        std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize);
431        DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
432        incHitCount(pkt);
433        return true;
434    } else if (pkt->cmd == MemCmd::CleanEvict) {
435        if (blk != NULL) {
436            // Found the block in the tags, need to stop CleanEvict from
437            // propagating further down the hierarchy. Returning true will
438            // treat the CleanEvict like a satisfied write request and delete
439            // it.
440            return true;
441        }
442        // We didn't find the block here, propagate the CleanEvict further
443        // down the memory hierarchy. Returning false will treat the CleanEvict
444        // like a Writeback which could not find a replaceable block so has to
445        // go to next level.
446        return false;
447    } else if ((blk != NULL) &&
448               (pkt->needsExclusive() ? blk->isWritable()
449                                      : blk->isReadable())) {
450        // OK to satisfy access
451        incHitCount(pkt);
452        satisfyCpuSideRequest(pkt, blk);
453        return true;
454    }
455
456    // Can't satisfy access normally... either no block (blk == NULL)
457    // or have block but need exclusive & only have shared.
458
459    incMissCount(pkt);
460
461    if (blk == NULL && pkt->isLLSC() && pkt->isWrite()) {
462        // complete miss on store conditional... just give up now
463        pkt->req->setExtraData(0);
464        return true;
465    }
466
467    return false;
468}
469
470
471class ForwardResponseRecord : public Packet::SenderState
472{
473  public:
474
475    ForwardResponseRecord() {}
476};
477
478void
479Cache::doWritebacks(PacketList& writebacks, Tick forward_time)
480{
481    while (!writebacks.empty()) {
482        PacketPtr wbPkt = writebacks.front();
483        // We use forwardLatency here because we are copying writebacks to
484        // write buffer.  Call isCachedAbove for both Writebacks and
485        // CleanEvicts. If isCachedAbove returns true we set BLOCK_CACHED flag
486        // in Writebacks and discard CleanEvicts.
487        if (isCachedAbove(wbPkt)) {
488            if (wbPkt->cmd == MemCmd::CleanEvict) {
489                // Delete CleanEvict because cached copies exist above. The
490                // packet destructor will delete the request object because
491                // this is a non-snoop request packet which does not require a
492                // response.
493                delete wbPkt;
494            } else if (wbPkt->cmd == MemCmd::WritebackClean) {
495                // clean writeback, do not send since the block is
496                // still cached above
497                assert(writebackClean);
498                delete wbPkt;
499            } else {
500                assert(wbPkt->cmd == MemCmd::WritebackDirty);
501                // Set BLOCK_CACHED flag in Writeback and send below, so that
502                // the Writeback does not reset the bit corresponding to this
503                // address in the snoop filter below.
504                wbPkt->setBlockCached();
505                allocateWriteBuffer(wbPkt, forward_time);
506            }
507        } else {
508            // If the block is not cached above, send packet below. Both
509            // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
510            // reset the bit corresponding to this address in the snoop filter
511            // below.
512            allocateWriteBuffer(wbPkt, forward_time);
513        }
514        writebacks.pop_front();
515    }
516}
517
518void
519Cache::doWritebacksAtomic(PacketList& writebacks)
520{
521    while (!writebacks.empty()) {
522        PacketPtr wbPkt = writebacks.front();
523        // Call isCachedAbove for both Writebacks and CleanEvicts. If
524        // isCachedAbove returns true we set BLOCK_CACHED flag in Writebacks
525        // and discard CleanEvicts.
526        if (isCachedAbove(wbPkt, false)) {
527            if (wbPkt->cmd == MemCmd::WritebackDirty) {
528                // Set BLOCK_CACHED flag in Writeback and send below,
529                // so that the Writeback does not reset the bit
530                // corresponding to this address in the snoop filter
531                // below. We can discard CleanEvicts because cached
532                // copies exist above. Atomic mode isCachedAbove
533                // modifies packet to set BLOCK_CACHED flag
534                memSidePort->sendAtomic(wbPkt);
535            }
536        } else {
537            // If the block is not cached above, send packet below. Both
538            // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
539            // reset the bit corresponding to this address in the snoop filter
540            // below.
541            memSidePort->sendAtomic(wbPkt);
542        }
543        writebacks.pop_front();
544        // In case of CleanEvicts, the packet destructor will delete the
545        // request object because this is a non-snoop request packet which
546        // does not require a response.
547        delete wbPkt;
548    }
549}
550
551
552void
553Cache::recvTimingSnoopResp(PacketPtr pkt)
554{
555    DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__,
556            pkt->cmdString(), pkt->getAddr(), pkt->getSize());
557
558    assert(pkt->isResponse());
559
560    // must be cache-to-cache response from upper to lower level
561    ForwardResponseRecord *rec =
562        dynamic_cast<ForwardResponseRecord *>(pkt->senderState);
563    assert(!system->bypassCaches());
564
565    if (rec == NULL) {
566        // @todo What guarantee do we have that this HardPFResp is
567        // actually for this cache, and not a cache closer to the
568        // memory?
569        assert(pkt->cmd == MemCmd::HardPFResp);
570        // Check if it's a prefetch response and handle it. We shouldn't
571        // get any other kinds of responses without FRRs.
572        DPRINTF(Cache, "Got prefetch response from above for addr %#llx (%s)\n",
573                pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
574        recvTimingResp(pkt);
575        return;
576    }
577
578    pkt->popSenderState();
579    delete rec;
580    // forwardLatency is set here because there is a response from an
581    // upper level cache.
582    // To pay the delay that occurs if the packet comes from the bus,
583    // we charge also headerDelay.
584    Tick snoop_resp_time = clockEdge(forwardLatency) + pkt->headerDelay;
585    // Reset the timing of the packet.
586    pkt->headerDelay = pkt->payloadDelay = 0;
587    memSidePort->schedTimingSnoopResp(pkt, snoop_resp_time);
588}
589
590void
591Cache::promoteWholeLineWrites(PacketPtr pkt)
592{
593    // Cache line clearing instructions
594    if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) &&
595        (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0)) {
596        pkt->cmd = MemCmd::WriteLineReq;
597        DPRINTF(Cache, "packet promoted from Write to WriteLineReq\n");
598    }
599}
600
601bool
602Cache::recvTimingReq(PacketPtr pkt)
603{
604    DPRINTF(CacheTags, "%s tags: %s\n", __func__, tags->print());
605
606    assert(pkt->isRequest());
607
608    // Just forward the packet if caches are disabled.
609    if (system->bypassCaches()) {
610        // @todo This should really enqueue the packet rather
611        bool M5_VAR_USED success = memSidePort->sendTimingReq(pkt);
612        assert(success);
613        return true;
614    }
615
616    promoteWholeLineWrites(pkt);
617
618    if (pkt->memInhibitAsserted()) {
619        // a cache above us (but not where the packet came from) is
620        // responding to the request
621        DPRINTF(Cache, "mem inhibited on addr %#llx (%s): not responding\n",
622                pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
623
624        // if the packet needs exclusive, and the cache that has
625        // promised to respond (setting the inhibit flag) is not
626        // providing exclusive (it is in O vs M state), we know that
627        // there may be other shared copies in the system; go out and
628        // invalidate them all
629        if (pkt->needsExclusive() && !pkt->isSupplyExclusive()) {
630            // create a downstream express snoop with cleared packet
631            // flags, there is no need to allocate any data as the
632            // packet is merely used to co-ordinate state transitions
633            Packet *snoop_pkt = new Packet(pkt, true, false);
634
635            // also reset the bus time that the original packet has
636            // not yet paid for
637            snoop_pkt->headerDelay = snoop_pkt->payloadDelay = 0;
638
639            // make this an instantaneous express snoop, and let the
640            // other caches in the system know that the packet is
641            // inhibited, because we have found the authorative copy
642            // (O) that will supply the right data
643            snoop_pkt->setExpressSnoop();
644            snoop_pkt->assertMemInhibit();
645
646            // this express snoop travels towards the memory, and at
647            // every crossbar it is snooped upwards thus reaching
648            // every cache in the system
649            bool M5_VAR_USED success = memSidePort->sendTimingReq(snoop_pkt);
650            // express snoops always succeed
651            assert(success);
652
653            // main memory will delete the packet
654        }
655
656        // queue for deletion, as the sending cache is still relying
657        // on the packet
658        pendingDelete.reset(pkt);
659
660        // no need to take any action in this particular cache as the
661        // caches along the path to memory are allowed to keep lines
662        // in a shared state, and a cache above us already committed
663        // to responding
664        return true;
665    }
666
667    // anything that is merely forwarded pays for the forward latency and
668    // the delay provided by the crossbar
669    Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
670
671    // We use lookupLatency here because it is used to specify the latency
672    // to access.
673    Cycles lat = lookupLatency;
674    CacheBlk *blk = NULL;
675    bool satisfied = false;
676    {
677        PacketList writebacks;
678        // Note that lat is passed by reference here. The function
679        // access() calls accessBlock() which can modify lat value.
680        satisfied = access(pkt, blk, lat, writebacks);
681
682        // copy writebacks to write buffer here to ensure they logically
683        // proceed anything happening below
684        doWritebacks(writebacks, forward_time);
685    }
686
687    // Here we charge the headerDelay that takes into account the latencies
688    // of the bus, if the packet comes from it.
689    // The latency charged it is just lat that is the value of lookupLatency
690    // modified by access() function, or if not just lookupLatency.
691    // In case of a hit we are neglecting response latency.
692    // In case of a miss we are neglecting forward latency.
693    Tick request_time = clockEdge(lat) + pkt->headerDelay;
694    // Here we reset the timing of the packet.
695    pkt->headerDelay = pkt->payloadDelay = 0;
696
697    // track time of availability of next prefetch, if any
698    Tick next_pf_time = MaxTick;
699
700    bool needsResponse = pkt->needsResponse();
701
702    if (satisfied) {
703        // should never be satisfying an uncacheable access as we
704        // flush and invalidate any existing block as part of the
705        // lookup
706        assert(!pkt->req->isUncacheable());
707
708        // hit (for all other request types)
709
710        if (prefetcher && (prefetchOnAccess || (blk && blk->wasPrefetched()))) {
711            if (blk)
712                blk->status &= ~BlkHWPrefetched;
713
714            // Don't notify on SWPrefetch
715            if (!pkt->cmd.isSWPrefetch())
716                next_pf_time = prefetcher->notify(pkt);
717        }
718
719        if (needsResponse) {
720            pkt->makeTimingResponse();
721            // @todo: Make someone pay for this
722            pkt->headerDelay = pkt->payloadDelay = 0;
723
724            // In this case we are considering request_time that takes
725            // into account the delay of the xbar, if any, and just
726            // lat, neglecting responseLatency, modelling hit latency
727            // just as lookupLatency or or the value of lat overriden
728            // by access(), that calls accessBlock() function.
729            cpuSidePort->schedTimingResp(pkt, request_time, true);
730        } else {
731            DPRINTF(Cache, "%s satisfied %s addr %#llx, no response needed\n",
732                    __func__, pkt->cmdString(), pkt->getAddr(),
733                    pkt->getSize());
734
735            // queue the packet for deletion, as the sending cache is
736            // still relying on it; if the block is found in access(),
737            // CleanEvict and Writeback messages will be deleted
738            // here as well
739            pendingDelete.reset(pkt);
740        }
741    } else {
742        // miss
743
744        Addr blk_addr = blockAlign(pkt->getAddr());
745
746        // ignore any existing MSHR if we are dealing with an
747        // uncacheable request
748        MSHR *mshr = pkt->req->isUncacheable() ? nullptr :
749            mshrQueue.findMatch(blk_addr, pkt->isSecure());
750
751        // Software prefetch handling:
752        // To keep the core from waiting on data it won't look at
753        // anyway, send back a response with dummy data. Miss handling
754        // will continue asynchronously. Unfortunately, the core will
755        // insist upon freeing original Packet/Request, so we have to
756        // create a new pair with a different lifecycle. Note that this
757        // processing happens before any MSHR munging on the behalf of
758        // this request because this new Request will be the one stored
759        // into the MSHRs, not the original.
760        if (pkt->cmd.isSWPrefetch()) {
761            assert(needsResponse);
762            assert(pkt->req->hasPaddr());
763            assert(!pkt->req->isUncacheable());
764
765            // There's no reason to add a prefetch as an additional target
766            // to an existing MSHR. If an outstanding request is already
767            // in progress, there is nothing for the prefetch to do.
768            // If this is the case, we don't even create a request at all.
769            PacketPtr pf = nullptr;
770
771            if (!mshr) {
772                // copy the request and create a new SoftPFReq packet
773                RequestPtr req = new Request(pkt->req->getPaddr(),
774                                             pkt->req->getSize(),
775                                             pkt->req->getFlags(),
776                                             pkt->req->masterId());
777                pf = new Packet(req, pkt->cmd);
778                pf->allocate();
779                assert(pf->getAddr() == pkt->getAddr());
780                assert(pf->getSize() == pkt->getSize());
781            }
782
783            pkt->makeTimingResponse();
784            // for debugging, set all the bits in the response data
785            // (also keeps valgrind from complaining when debugging settings
786            //  print out instruction results)
787            std::memset(pkt->getPtr<uint8_t>(), 0xFF, pkt->getSize());
788            // request_time is used here, taking into account lat and the delay
789            // charged if the packet comes from the xbar.
790            cpuSidePort->schedTimingResp(pkt, request_time, true);
791
792            // If an outstanding request is in progress (we found an
793            // MSHR) this is set to null
794            pkt = pf;
795        }
796
797        if (mshr) {
798            /// MSHR hit
799            /// @note writebacks will be checked in getNextMSHR()
800            /// for any conflicting requests to the same block
801
802            //@todo remove hw_pf here
803
804            // Coalesce unless it was a software prefetch (see above).
805            if (pkt) {
806                assert(!pkt->isWriteback());
807                // CleanEvicts corresponding to blocks which have
808                // outstanding requests in MSHRs are simply sunk here
809                if (pkt->cmd == MemCmd::CleanEvict) {
810                    pendingDelete.reset(pkt);
811                } else {
812                    DPRINTF(Cache, "%s coalescing MSHR for %s addr %#llx size %d\n",
813                            __func__, pkt->cmdString(), pkt->getAddr(),
814                            pkt->getSize());
815
816                    assert(pkt->req->masterId() < system->maxMasters());
817                    mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
818                    if (mshr->threadNum != 0/*pkt->req->threadId()*/) {
819                        mshr->threadNum = -1;
820                    }
821                    // We use forward_time here because it is the same
822                    // considering new targets. We have multiple
823                    // requests for the same address here. It
824                    // specifies the latency to allocate an internal
825                    // buffer and to schedule an event to the queued
826                    // port and also takes into account the additional
827                    // delay of the xbar.
828                    mshr->allocateTarget(pkt, forward_time, order++,
829                                         allocOnFill(pkt->cmd));
830                    if (mshr->getNumTargets() == numTarget) {
831                        noTargetMSHR = mshr;
832                        setBlocked(Blocked_NoTargets);
833                        // need to be careful with this... if this mshr isn't
834                        // ready yet (i.e. time > curTick()), we don't want to
835                        // move it ahead of mshrs that are ready
836                        // mshrQueue.moveToFront(mshr);
837                    }
838                }
839                // We should call the prefetcher reguardless if the request is
840                // satisfied or not, reguardless if the request is in the MSHR or
841                // not.  The request could be a ReadReq hit, but still not
842                // satisfied (potentially because of a prior write to the same
843                // cache line.  So, even when not satisfied, tehre is an MSHR
844                // already allocated for this, we need to let the prefetcher know
845                // about the request
846                if (prefetcher) {
847                    // Don't notify on SWPrefetch
848                    if (!pkt->cmd.isSWPrefetch())
849                        next_pf_time = prefetcher->notify(pkt);
850                }
851            }
852        } else {
853            // no MSHR
854            assert(pkt->req->masterId() < system->maxMasters());
855            if (pkt->req->isUncacheable()) {
856                mshr_uncacheable[pkt->cmdToIndex()][pkt->req->masterId()]++;
857            } else {
858                mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
859            }
860
861            if (pkt->isEviction() ||
862                (pkt->req->isUncacheable() && pkt->isWrite())) {
863                // We use forward_time here because there is an
864                // uncached memory write, forwarded to WriteBuffer.
865                allocateWriteBuffer(pkt, forward_time);
866            } else {
867                if (blk && blk->isValid()) {
868                    // should have flushed and have no valid block
869                    assert(!pkt->req->isUncacheable());
870
871                    // If we have a write miss to a valid block, we
872                    // need to mark the block non-readable.  Otherwise
873                    // if we allow reads while there's an outstanding
874                    // write miss, the read could return stale data
875                    // out of the cache block... a more aggressive
876                    // system could detect the overlap (if any) and
877                    // forward data out of the MSHRs, but we don't do
878                    // that yet.  Note that we do need to leave the
879                    // block valid so that it stays in the cache, in
880                    // case we get an upgrade response (and hence no
881                    // new data) when the write miss completes.
882                    // As long as CPUs do proper store/load forwarding
883                    // internally, and have a sufficiently weak memory
884                    // model, this is probably unnecessary, but at some
885                    // point it must have seemed like we needed it...
886                    assert(pkt->needsExclusive());
887                    assert(!blk->isWritable());
888                    blk->status &= ~BlkReadable;
889                }
890                // Here we are using forward_time, modelling the latency of
891                // a miss (outbound) just as forwardLatency, neglecting the
892                // lookupLatency component.
893                allocateMissBuffer(pkt, forward_time);
894            }
895
896            if (prefetcher) {
897                // Don't notify on SWPrefetch
898                if (!pkt->cmd.isSWPrefetch())
899                    next_pf_time = prefetcher->notify(pkt);
900            }
901        }
902    }
903
904    if (next_pf_time != MaxTick)
905        schedMemSideSendEvent(next_pf_time);
906
907    return true;
908}
909
910
911// See comment in cache.hh.
912PacketPtr
913Cache::getBusPacket(PacketPtr cpu_pkt, CacheBlk *blk,
914                    bool needsExclusive) const
915{
916    bool blkValid = blk && blk->isValid();
917
918    if (cpu_pkt->req->isUncacheable()) {
919        // note that at the point we see the uncacheable request we
920        // flush any block, but there could be an outstanding MSHR,
921        // and the cache could have filled again before we actually
922        // send out the forwarded uncacheable request (blk could thus
923        // be non-null)
924        return NULL;
925    }
926
927    if (!blkValid &&
928        (cpu_pkt->isUpgrade() ||
929         cpu_pkt->isEviction())) {
930        // Writebacks that weren't allocated in access() and upgrades
931        // from upper-level caches that missed completely just go
932        // through.
933        return NULL;
934    }
935
936    assert(cpu_pkt->needsResponse());
937
938    MemCmd cmd;
939    // @TODO make useUpgrades a parameter.
940    // Note that ownership protocols require upgrade, otherwise a
941    // write miss on a shared owned block will generate a ReadExcl,
942    // which will clobber the owned copy.
943    const bool useUpgrades = true;
944    if (blkValid && useUpgrades) {
945        // only reason to be here is that blk is shared
946        // (read-only) and we need exclusive
947        assert(needsExclusive);
948        assert(!blk->isWritable());
949        cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
950    } else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq ||
951               cpu_pkt->cmd == MemCmd::StoreCondFailReq) {
952        // Even though this SC will fail, we still need to send out the
953        // request and get the data to supply it to other snoopers in the case
954        // where the determination the StoreCond fails is delayed due to
955        // all caches not being on the same local bus.
956        cmd = MemCmd::SCUpgradeFailReq;
957    } else if (cpu_pkt->cmd == MemCmd::WriteLineReq) {
958        // forward as invalidate to all other caches, this gives us
959        // the line in exclusive state, and invalidates all other
960        // copies
961        cmd = MemCmd::InvalidateReq;
962    } else {
963        // block is invalid
964        cmd = needsExclusive ? MemCmd::ReadExReq :
965            (isReadOnly ? MemCmd::ReadCleanReq : MemCmd::ReadSharedReq);
966    }
967    PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize);
968
969    // if there are sharers in the upper levels, pass that info downstream
970    if (cpu_pkt->sharedAsserted()) {
971        // note that cpu_pkt may have spent a considerable time in the
972        // MSHR queue and that the information could possibly be out
973        // of date, however, there is no harm in conservatively
974        // assuming the block is shared
975        pkt->assertShared();
976        DPRINTF(Cache, "%s passing shared from %s to %s addr %#llx size %d\n",
977                __func__, cpu_pkt->cmdString(), pkt->cmdString(),
978                pkt->getAddr(), pkt->getSize());
979    }
980
981    // the packet should be block aligned
982    assert(pkt->getAddr() == blockAlign(pkt->getAddr()));
983
984    pkt->allocate();
985    DPRINTF(Cache, "%s created %s from %s for  addr %#llx size %d\n",
986            __func__, pkt->cmdString(), cpu_pkt->cmdString(), pkt->getAddr(),
987            pkt->getSize());
988    return pkt;
989}
990
991
992Tick
993Cache::recvAtomic(PacketPtr pkt)
994{
995    // We are in atomic mode so we pay just for lookupLatency here.
996    Cycles lat = lookupLatency;
997    // @TODO: make this a parameter
998    bool last_level_cache = false;
999
1000    // Forward the request if the system is in cache bypass mode.
1001    if (system->bypassCaches())
1002        return ticksToCycles(memSidePort->sendAtomic(pkt));
1003
1004    promoteWholeLineWrites(pkt);
1005
1006    if (pkt->memInhibitAsserted()) {
1007        // have to invalidate ourselves and any lower caches even if
1008        // upper cache will be responding
1009        if (pkt->isInvalidate()) {
1010            CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
1011            if (blk && blk->isValid()) {
1012                tags->invalidate(blk);
1013                blk->invalidate();
1014                DPRINTF(Cache, "rcvd mem-inhibited %s on %#llx (%s):"
1015                        " invalidating\n",
1016                        pkt->cmdString(), pkt->getAddr(),
1017                        pkt->isSecure() ? "s" : "ns");
1018            }
1019            if (!last_level_cache) {
1020                DPRINTF(Cache, "forwarding mem-inhibited %s on %#llx (%s)\n",
1021                        pkt->cmdString(), pkt->getAddr(),
1022                        pkt->isSecure() ? "s" : "ns");
1023                lat += ticksToCycles(memSidePort->sendAtomic(pkt));
1024            }
1025        } else {
1026            DPRINTF(Cache, "rcvd mem-inhibited %s on %#llx: not responding\n",
1027                    pkt->cmdString(), pkt->getAddr());
1028        }
1029
1030        return lat * clockPeriod();
1031    }
1032
1033    // should assert here that there are no outstanding MSHRs or
1034    // writebacks... that would mean that someone used an atomic
1035    // access in timing mode
1036
1037    CacheBlk *blk = NULL;
1038    PacketList writebacks;
1039    bool satisfied = access(pkt, blk, lat, writebacks);
1040
1041    // handle writebacks resulting from the access here to ensure they
1042    // logically proceed anything happening below
1043    doWritebacksAtomic(writebacks);
1044
1045    if (!satisfied) {
1046        // MISS
1047
1048        PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsExclusive());
1049
1050        bool is_forward = (bus_pkt == NULL);
1051
1052        if (is_forward) {
1053            // just forwarding the same request to the next level
1054            // no local cache operation involved
1055            bus_pkt = pkt;
1056        }
1057
1058        DPRINTF(Cache, "Sending an atomic %s for %#llx (%s)\n",
1059                bus_pkt->cmdString(), bus_pkt->getAddr(),
1060                bus_pkt->isSecure() ? "s" : "ns");
1061
1062#if TRACING_ON
1063        CacheBlk::State old_state = blk ? blk->status : 0;
1064#endif
1065
1066        lat += ticksToCycles(memSidePort->sendAtomic(bus_pkt));
1067
1068        // We are now dealing with the response handling
1069        DPRINTF(Cache, "Receive response: %s for addr %#llx (%s) in state %i\n",
1070                bus_pkt->cmdString(), bus_pkt->getAddr(),
1071                bus_pkt->isSecure() ? "s" : "ns",
1072                old_state);
1073
1074        // If packet was a forward, the response (if any) is already
1075        // in place in the bus_pkt == pkt structure, so we don't need
1076        // to do anything.  Otherwise, use the separate bus_pkt to
1077        // generate response to pkt and then delete it.
1078        if (!is_forward) {
1079            if (pkt->needsResponse()) {
1080                assert(bus_pkt->isResponse());
1081                if (bus_pkt->isError()) {
1082                    pkt->makeAtomicResponse();
1083                    pkt->copyError(bus_pkt);
1084                } else if (pkt->cmd == MemCmd::InvalidateReq) {
1085                    if (blk) {
1086                        // invalidate response to a cache that received
1087                        // an invalidate request
1088                        satisfyCpuSideRequest(pkt, blk);
1089                    }
1090                } else if (pkt->cmd == MemCmd::WriteLineReq) {
1091                    // note the use of pkt, not bus_pkt here.
1092
1093                    // write-line request to the cache that promoted
1094                    // the write to a whole line
1095                    blk = handleFill(pkt, blk, writebacks,
1096                                     allocOnFill(pkt->cmd));
1097                    satisfyCpuSideRequest(pkt, blk);
1098                } else if (bus_pkt->isRead() ||
1099                           bus_pkt->cmd == MemCmd::UpgradeResp) {
1100                    // we're updating cache state to allow us to
1101                    // satisfy the upstream request from the cache
1102                    blk = handleFill(bus_pkt, blk, writebacks,
1103                                     allocOnFill(pkt->cmd));
1104                    satisfyCpuSideRequest(pkt, blk);
1105                } else {
1106                    // we're satisfying the upstream request without
1107                    // modifying cache state, e.g., a write-through
1108                    pkt->makeAtomicResponse();
1109                }
1110            }
1111            delete bus_pkt;
1112        }
1113    }
1114
1115    // Note that we don't invoke the prefetcher at all in atomic mode.
1116    // It's not clear how to do it properly, particularly for
1117    // prefetchers that aggressively generate prefetch candidates and
1118    // rely on bandwidth contention to throttle them; these will tend
1119    // to pollute the cache in atomic mode since there is no bandwidth
1120    // contention.  If we ever do want to enable prefetching in atomic
1121    // mode, though, this is the place to do it... see timingAccess()
1122    // for an example (though we'd want to issue the prefetch(es)
1123    // immediately rather than calling requestMemSideBus() as we do
1124    // there).
1125
1126    // do any writebacks resulting from the response handling
1127    doWritebacksAtomic(writebacks);
1128
1129    // if we used temp block, check to see if its valid and if so
1130    // clear it out, but only do so after the call to recvAtomic is
1131    // finished so that any downstream observers (such as a snoop
1132    // filter), first see the fill, and only then see the eviction
1133    if (blk == tempBlock && tempBlock->isValid()) {
1134        // the atomic CPU calls recvAtomic for fetch and load/store
1135        // sequentuially, and we may already have a tempBlock
1136        // writeback from the fetch that we have not yet sent
1137        if (tempBlockWriteback) {
1138            // if that is the case, write the prevoius one back, and
1139            // do not schedule any new event
1140            writebackTempBlockAtomic();
1141        } else {
1142            // the writeback/clean eviction happens after the call to
1143            // recvAtomic has finished (but before any successive
1144            // calls), so that the response handling from the fill is
1145            // allowed to happen first
1146            schedule(writebackTempBlockAtomicEvent, curTick());
1147        }
1148
1149        tempBlockWriteback = (blk->isDirty() || writebackClean) ?
1150            writebackBlk(blk) : cleanEvictBlk(blk);
1151        blk->invalidate();
1152    }
1153
1154    if (pkt->needsResponse()) {
1155        pkt->makeAtomicResponse();
1156    }
1157
1158    return lat * clockPeriod();
1159}
1160
1161
1162void
1163Cache::functionalAccess(PacketPtr pkt, bool fromCpuSide)
1164{
1165    if (system->bypassCaches()) {
1166        // Packets from the memory side are snoop request and
1167        // shouldn't happen in bypass mode.
1168        assert(fromCpuSide);
1169
1170        // The cache should be flushed if we are in cache bypass mode,
1171        // so we don't need to check if we need to update anything.
1172        memSidePort->sendFunctional(pkt);
1173        return;
1174    }
1175
1176    Addr blk_addr = blockAlign(pkt->getAddr());
1177    bool is_secure = pkt->isSecure();
1178    CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
1179    MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
1180
1181    pkt->pushLabel(name());
1182
1183    CacheBlkPrintWrapper cbpw(blk);
1184
1185    // Note that just because an L2/L3 has valid data doesn't mean an
1186    // L1 doesn't have a more up-to-date modified copy that still
1187    // needs to be found.  As a result we always update the request if
1188    // we have it, but only declare it satisfied if we are the owner.
1189
1190    // see if we have data at all (owned or otherwise)
1191    bool have_data = blk && blk->isValid()
1192        && pkt->checkFunctional(&cbpw, blk_addr, is_secure, blkSize,
1193                                blk->data);
1194
1195    // data we have is dirty if marked as such or if valid & ownership
1196    // pending due to outstanding UpgradeReq
1197    bool have_dirty =
1198        have_data && (blk->isDirty() ||
1199                      (mshr && mshr->inService && mshr->isPendingDirty()));
1200
1201    bool done = have_dirty
1202        || cpuSidePort->checkFunctional(pkt)
1203        || mshrQueue.checkFunctional(pkt, blk_addr)
1204        || writeBuffer.checkFunctional(pkt, blk_addr)
1205        || memSidePort->checkFunctional(pkt);
1206
1207    DPRINTF(Cache, "functional %s %#llx (%s) %s%s%s\n",
1208            pkt->cmdString(), pkt->getAddr(), is_secure ? "s" : "ns",
1209            (blk && blk->isValid()) ? "valid " : "",
1210            have_data ? "data " : "", done ? "done " : "");
1211
1212    // We're leaving the cache, so pop cache->name() label
1213    pkt->popLabel();
1214
1215    if (done) {
1216        pkt->makeResponse();
1217    } else {
1218        // if it came as a request from the CPU side then make sure it
1219        // continues towards the memory side
1220        if (fromCpuSide) {
1221            memSidePort->sendFunctional(pkt);
1222        } else if (forwardSnoops && cpuSidePort->isSnooping()) {
1223            // if it came from the memory side, it must be a snoop request
1224            // and we should only forward it if we are forwarding snoops
1225            cpuSidePort->sendFunctionalSnoop(pkt);
1226        }
1227    }
1228}
1229
1230
1231/////////////////////////////////////////////////////
1232//
1233// Response handling: responses from the memory side
1234//
1235/////////////////////////////////////////////////////
1236
1237
1238void
1239Cache::recvTimingResp(PacketPtr pkt)
1240{
1241    assert(pkt->isResponse());
1242
1243    // all header delay should be paid for by the crossbar, unless
1244    // this is a prefetch response from above
1245    panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp,
1246             "%s saw a non-zero packet delay\n", name());
1247
1248    MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
1249    bool is_error = pkt->isError();
1250
1251    assert(mshr);
1252
1253    if (is_error) {
1254        DPRINTF(Cache, "Cache received packet with error for addr %#llx (%s), "
1255                "cmd: %s\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns",
1256                pkt->cmdString());
1257    }
1258
1259    DPRINTF(Cache, "Handling response %s for addr %#llx size %d (%s)\n",
1260            pkt->cmdString(), pkt->getAddr(), pkt->getSize(),
1261            pkt->isSecure() ? "s" : "ns");
1262
1263    MSHRQueue *mq = mshr->queue;
1264    bool wasFull = mq->isFull();
1265
1266    if (mshr == noTargetMSHR) {
1267        // we always clear at least one target
1268        clearBlocked(Blocked_NoTargets);
1269        noTargetMSHR = NULL;
1270    }
1271
1272    // Initial target is used just for stats
1273    MSHR::Target *initial_tgt = mshr->getTarget();
1274    int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
1275    Tick miss_latency = curTick() - initial_tgt->recvTime;
1276    PacketList writebacks;
1277    // We need forward_time here because we have a call of
1278    // allocateWriteBuffer() that need this parameter to specify the
1279    // time to request the bus.  In this case we use forward latency
1280    // because there is a writeback.  We pay also here for headerDelay
1281    // that is charged of bus latencies if the packet comes from the
1282    // bus.
1283    Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
1284
1285    if (pkt->req->isUncacheable()) {
1286        assert(pkt->req->masterId() < system->maxMasters());
1287        mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] +=
1288            miss_latency;
1289    } else {
1290        assert(pkt->req->masterId() < system->maxMasters());
1291        mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] +=
1292            miss_latency;
1293    }
1294
1295    // upgrade deferred targets if we got exclusive
1296    if (!pkt->sharedAsserted()) {
1297        mshr->promoteExclusive();
1298    }
1299
1300    bool is_fill = !mshr->isForward &&
1301        (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp);
1302
1303    CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
1304
1305    if (is_fill && !is_error) {
1306        DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n",
1307                pkt->getAddr());
1308
1309        blk = handleFill(pkt, blk, writebacks, mshr->allocOnFill);
1310        assert(blk != NULL);
1311    }
1312
1313    // allow invalidation responses originating from write-line
1314    // requests to be discarded
1315    bool is_invalidate = pkt->isInvalidate();
1316
1317    // First offset for critical word first calculations
1318    int initial_offset = initial_tgt->pkt->getOffset(blkSize);
1319
1320    while (mshr->hasTargets()) {
1321        MSHR::Target *target = mshr->getTarget();
1322        Packet *tgt_pkt = target->pkt;
1323
1324        switch (target->source) {
1325          case MSHR::Target::FromCPU:
1326            Tick completion_time;
1327            // Here we charge on completion_time the delay of the xbar if the
1328            // packet comes from it, charged on headerDelay.
1329            completion_time = pkt->headerDelay;
1330
1331            // Software prefetch handling for cache closest to core
1332            if (tgt_pkt->cmd.isSWPrefetch()) {
1333                // a software prefetch would have already been ack'd immediately
1334                // with dummy data so the core would be able to retire it.
1335                // this request completes right here, so we deallocate it.
1336                delete tgt_pkt->req;
1337                delete tgt_pkt;
1338                break; // skip response
1339            }
1340
1341            // unlike the other packet flows, where data is found in other
1342            // caches or memory and brought back, write-line requests always
1343            // have the data right away, so the above check for "is fill?"
1344            // cannot actually be determined until examining the stored MSHR
1345            // state. We "catch up" with that logic here, which is duplicated
1346            // from above.
1347            if (tgt_pkt->cmd == MemCmd::WriteLineReq) {
1348                assert(!is_error);
1349                // we got the block in exclusive state, so promote any
1350                // deferred targets if possible
1351                mshr->promoteExclusive();
1352                // NB: we use the original packet here and not the response!
1353                blk = handleFill(tgt_pkt, blk, writebacks, mshr->allocOnFill);
1354                assert(blk != NULL);
1355
1356                // treat as a fill, and discard the invalidation
1357                // response
1358                is_fill = true;
1359                is_invalidate = false;
1360            }
1361
1362            if (is_fill) {
1363                satisfyCpuSideRequest(tgt_pkt, blk,
1364                                      true, mshr->hasPostDowngrade());
1365
1366                // How many bytes past the first request is this one
1367                int transfer_offset =
1368                    tgt_pkt->getOffset(blkSize) - initial_offset;
1369                if (transfer_offset < 0) {
1370                    transfer_offset += blkSize;
1371                }
1372
1373                // If not critical word (offset) return payloadDelay.
1374                // responseLatency is the latency of the return path
1375                // from lower level caches/memory to an upper level cache or
1376                // the core.
1377                completion_time += clockEdge(responseLatency) +
1378                    (transfer_offset ? pkt->payloadDelay : 0);
1379
1380                assert(!tgt_pkt->req->isUncacheable());
1381
1382                assert(tgt_pkt->req->masterId() < system->maxMasters());
1383                missLatency[tgt_pkt->cmdToIndex()][tgt_pkt->req->masterId()] +=
1384                    completion_time - target->recvTime;
1385            } else if (pkt->cmd == MemCmd::UpgradeFailResp) {
1386                // failed StoreCond upgrade
1387                assert(tgt_pkt->cmd == MemCmd::StoreCondReq ||
1388                       tgt_pkt->cmd == MemCmd::StoreCondFailReq ||
1389                       tgt_pkt->cmd == MemCmd::SCUpgradeFailReq);
1390                // responseLatency is the latency of the return path
1391                // from lower level caches/memory to an upper level cache or
1392                // the core.
1393                completion_time += clockEdge(responseLatency) +
1394                    pkt->payloadDelay;
1395                tgt_pkt->req->setExtraData(0);
1396            } else {
1397                // not a cache fill, just forwarding response
1398                // responseLatency is the latency of the return path
1399                // from lower level cahces/memory to the core.
1400                completion_time += clockEdge(responseLatency) +
1401                    pkt->payloadDelay;
1402                if (pkt->isRead() && !is_error) {
1403                    // sanity check
1404                    assert(pkt->getAddr() == tgt_pkt->getAddr());
1405                    assert(pkt->getSize() >= tgt_pkt->getSize());
1406
1407                    tgt_pkt->setData(pkt->getConstPtr<uint8_t>());
1408                }
1409            }
1410            tgt_pkt->makeTimingResponse();
1411            // if this packet is an error copy that to the new packet
1412            if (is_error)
1413                tgt_pkt->copyError(pkt);
1414            if (tgt_pkt->cmd == MemCmd::ReadResp &&
1415                (is_invalidate || mshr->hasPostInvalidate())) {
1416                // If intermediate cache got ReadRespWithInvalidate,
1417                // propagate that.  Response should not have
1418                // isInvalidate() set otherwise.
1419                tgt_pkt->cmd = MemCmd::ReadRespWithInvalidate;
1420                DPRINTF(Cache, "%s updated cmd to %s for addr %#llx\n",
1421                        __func__, tgt_pkt->cmdString(), tgt_pkt->getAddr());
1422            }
1423            // Reset the bus additional time as it is now accounted for
1424            tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0;
1425            cpuSidePort->schedTimingResp(tgt_pkt, completion_time, true);
1426            break;
1427
1428          case MSHR::Target::FromPrefetcher:
1429            assert(tgt_pkt->cmd == MemCmd::HardPFReq);
1430            if (blk)
1431                blk->status |= BlkHWPrefetched;
1432            delete tgt_pkt->req;
1433            delete tgt_pkt;
1434            break;
1435
1436          case MSHR::Target::FromSnoop:
1437            // I don't believe that a snoop can be in an error state
1438            assert(!is_error);
1439            // response to snoop request
1440            DPRINTF(Cache, "processing deferred snoop...\n");
1441            assert(!(is_invalidate && !mshr->hasPostInvalidate()));
1442            handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate());
1443            break;
1444
1445          default:
1446            panic("Illegal target->source enum %d\n", target->source);
1447        }
1448
1449        mshr->popTarget();
1450    }
1451
1452    if (blk && blk->isValid()) {
1453        // an invalidate response stemming from a write line request
1454        // should not invalidate the block, so check if the
1455        // invalidation should be discarded
1456        if (is_invalidate || mshr->hasPostInvalidate()) {
1457            invalidateBlock(blk);
1458        } else if (mshr->hasPostDowngrade()) {
1459            blk->status &= ~BlkWritable;
1460        }
1461    }
1462
1463    if (mshr->promoteDeferredTargets()) {
1464        // avoid later read getting stale data while write miss is
1465        // outstanding.. see comment in timingAccess()
1466        if (blk) {
1467            blk->status &= ~BlkReadable;
1468        }
1469        mq = mshr->queue;
1470        mq->markPending(mshr);
1471        schedMemSideSendEvent(clockEdge() + pkt->payloadDelay);
1472    } else {
1473        mq->deallocate(mshr);
1474        if (wasFull && !mq->isFull()) {
1475            clearBlocked((BlockedCause)mq->index);
1476        }
1477
1478        // Request the bus for a prefetch if this deallocation freed enough
1479        // MSHRs for a prefetch to take place
1480        if (prefetcher && mq == &mshrQueue && mshrQueue.canPrefetch()) {
1481            Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(),
1482                                         clockEdge());
1483            if (next_pf_time != MaxTick)
1484                schedMemSideSendEvent(next_pf_time);
1485        }
1486    }
1487    // reset the xbar additional timinig  as it is now accounted for
1488    pkt->headerDelay = pkt->payloadDelay = 0;
1489
1490    // copy writebacks to write buffer
1491    doWritebacks(writebacks, forward_time);
1492
1493    // if we used temp block, check to see if its valid and then clear it out
1494    if (blk == tempBlock && tempBlock->isValid()) {
1495        // We use forwardLatency here because we are copying
1496        // Writebacks/CleanEvicts to write buffer. It specifies the latency to
1497        // allocate an internal buffer and to schedule an event to the
1498        // queued port.
1499        if (blk->isDirty() || writebackClean) {
1500            PacketPtr wbPkt = writebackBlk(blk);
1501            allocateWriteBuffer(wbPkt, forward_time);
1502            // Set BLOCK_CACHED flag if cached above.
1503            if (isCachedAbove(wbPkt))
1504                wbPkt->setBlockCached();
1505        } else {
1506            PacketPtr wcPkt = cleanEvictBlk(blk);
1507            // Check to see if block is cached above. If not allocate
1508            // write buffer
1509            if (isCachedAbove(wcPkt))
1510                delete wcPkt;
1511            else
1512                allocateWriteBuffer(wcPkt, forward_time);
1513        }
1514        blk->invalidate();
1515    }
1516
1517    DPRINTF(Cache, "Leaving %s with %s for addr %#llx\n", __func__,
1518            pkt->cmdString(), pkt->getAddr());
1519    delete pkt;
1520}
1521
1522PacketPtr
1523Cache::writebackBlk(CacheBlk *blk)
1524{
1525    chatty_assert(!isReadOnly || writebackClean,
1526                  "Writeback from read-only cache");
1527    assert(blk && blk->isValid() && (blk->isDirty() || writebackClean));
1528
1529    writebacks[Request::wbMasterId]++;
1530
1531    Request *req = new Request(tags->regenerateBlkAddr(blk->tag, blk->set),
1532                               blkSize, 0, Request::wbMasterId);
1533    if (blk->isSecure())
1534        req->setFlags(Request::SECURE);
1535
1536    req->taskId(blk->task_id);
1537    blk->task_id= ContextSwitchTaskId::Unknown;
1538    blk->tickInserted = curTick();
1539
1540    PacketPtr pkt =
1541        new Packet(req, blk->isDirty() ?
1542                   MemCmd::WritebackDirty : MemCmd::WritebackClean);
1543
1544    DPRINTF(Cache, "Create Writeback %#llx writable: %d, dirty: %d\n",
1545            pkt->getAddr(), blk->isWritable(), blk->isDirty());
1546
1547    if (blk->isWritable()) {
1548        // not asserting shared means we pass the block in modified
1549        // state, mark our own block non-writeable
1550        blk->status &= ~BlkWritable;
1551    } else {
1552        // we are in the owned state, tell the receiver
1553        pkt->assertShared();
1554    }
1555
1556    // make sure the block is not marked dirty
1557    blk->status &= ~BlkDirty;
1558
1559    pkt->allocate();
1560    std::memcpy(pkt->getPtr<uint8_t>(), blk->data, blkSize);
1561
1562    return pkt;
1563}
1564
1565PacketPtr
1566Cache::cleanEvictBlk(CacheBlk *blk)
1567{
1568    assert(!writebackClean);
1569    assert(blk && blk->isValid() && !blk->isDirty());
1570    // Creating a zero sized write, a message to the snoop filter
1571    Request *req =
1572        new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0,
1573                    Request::wbMasterId);
1574    if (blk->isSecure())
1575        req->setFlags(Request::SECURE);
1576
1577    req->taskId(blk->task_id);
1578    blk->task_id = ContextSwitchTaskId::Unknown;
1579    blk->tickInserted = curTick();
1580
1581    PacketPtr pkt = new Packet(req, MemCmd::CleanEvict);
1582    pkt->allocate();
1583    DPRINTF(Cache, "%s%s %x Create CleanEvict\n", pkt->cmdString(),
1584            pkt->req->isInstFetch() ? " (ifetch)" : "",
1585            pkt->getAddr());
1586
1587    return pkt;
1588}
1589
1590void
1591Cache::memWriteback()
1592{
1593    CacheBlkVisitorWrapper visitor(*this, &Cache::writebackVisitor);
1594    tags->forEachBlk(visitor);
1595}
1596
1597void
1598Cache::memInvalidate()
1599{
1600    CacheBlkVisitorWrapper visitor(*this, &Cache::invalidateVisitor);
1601    tags->forEachBlk(visitor);
1602}
1603
1604bool
1605Cache::isDirty() const
1606{
1607    CacheBlkIsDirtyVisitor visitor;
1608    tags->forEachBlk(visitor);
1609
1610    return visitor.isDirty();
1611}
1612
1613bool
1614Cache::writebackVisitor(CacheBlk &blk)
1615{
1616    if (blk.isDirty()) {
1617        assert(blk.isValid());
1618
1619        Request request(tags->regenerateBlkAddr(blk.tag, blk.set),
1620                        blkSize, 0, Request::funcMasterId);
1621        request.taskId(blk.task_id);
1622
1623        Packet packet(&request, MemCmd::WriteReq);
1624        packet.dataStatic(blk.data);
1625
1626        memSidePort->sendFunctional(&packet);
1627
1628        blk.status &= ~BlkDirty;
1629    }
1630
1631    return true;
1632}
1633
1634bool
1635Cache::invalidateVisitor(CacheBlk &blk)
1636{
1637
1638    if (blk.isDirty())
1639        warn_once("Invalidating dirty cache lines. Expect things to break.\n");
1640
1641    if (blk.isValid()) {
1642        assert(!blk.isDirty());
1643        tags->invalidate(&blk);
1644        blk.invalidate();
1645    }
1646
1647    return true;
1648}
1649
1650CacheBlk*
1651Cache::allocateBlock(Addr addr, bool is_secure, PacketList &writebacks)
1652{
1653    CacheBlk *blk = tags->findVictim(addr);
1654
1655    // It is valid to return NULL if there is no victim
1656    if (!blk)
1657        return nullptr;
1658
1659    if (blk->isValid()) {
1660        Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set);
1661        MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure());
1662        if (repl_mshr) {
1663            // must be an outstanding upgrade request
1664            // on a block we're about to replace...
1665            assert(!blk->isWritable() || blk->isDirty());
1666            assert(repl_mshr->needsExclusive());
1667            // too hard to replace block with transient state
1668            // allocation failed, block not inserted
1669            return NULL;
1670        } else {
1671            DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx (%s): %s\n",
1672                    repl_addr, blk->isSecure() ? "s" : "ns",
1673                    addr, is_secure ? "s" : "ns",
1674                    blk->isDirty() ? "writeback" : "clean");
1675
1676            // Will send up Writeback/CleanEvict snoops via isCachedAbove
1677            // when pushing this writeback list into the write buffer.
1678            if (blk->isDirty() || writebackClean) {
1679                // Save writeback packet for handling by caller
1680                writebacks.push_back(writebackBlk(blk));
1681            } else {
1682                writebacks.push_back(cleanEvictBlk(blk));
1683            }
1684        }
1685    }
1686
1687    return blk;
1688}
1689
1690void
1691Cache::invalidateBlock(CacheBlk *blk)
1692{
1693    if (blk != tempBlock)
1694        tags->invalidate(blk);
1695    blk->invalidate();
1696}
1697
1698// Note that the reason we return a list of writebacks rather than
1699// inserting them directly in the write buffer is that this function
1700// is called by both atomic and timing-mode accesses, and in atomic
1701// mode we don't mess with the write buffer (we just perform the
1702// writebacks atomically once the original request is complete).
1703CacheBlk*
1704Cache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks,
1705                  bool allocate)
1706{
1707    assert(pkt->isResponse() || pkt->cmd == MemCmd::WriteLineReq);
1708    Addr addr = pkt->getAddr();
1709    bool is_secure = pkt->isSecure();
1710#if TRACING_ON
1711    CacheBlk::State old_state = blk ? blk->status : 0;
1712#endif
1713
1714    // When handling a fill, discard any CleanEvicts for the
1715    // same address in write buffer.
1716    Addr M5_VAR_USED blk_addr = blockAlign(pkt->getAddr());
1717    std::vector<MSHR *> M5_VAR_USED wbs;
1718    assert (!writeBuffer.findMatches(blk_addr, is_secure, wbs));
1719
1720    if (blk == NULL) {
1721        // better have read new data...
1722        assert(pkt->hasData());
1723
1724        // only read responses and write-line requests have data;
1725        // note that we don't write the data here for write-line - that
1726        // happens in the subsequent satisfyCpuSideRequest.
1727        assert(pkt->isRead() || pkt->cmd == MemCmd::WriteLineReq);
1728
1729        // need to do a replacement if allocating, otherwise we stick
1730        // with the temporary storage
1731        blk = allocate ? allocateBlock(addr, is_secure, writebacks) : NULL;
1732
1733        if (blk == NULL) {
1734            // No replaceable block or a mostly exclusive
1735            // cache... just use temporary storage to complete the
1736            // current request and then get rid of it
1737            assert(!tempBlock->isValid());
1738            blk = tempBlock;
1739            tempBlock->set = tags->extractSet(addr);
1740            tempBlock->tag = tags->extractTag(addr);
1741            // @todo: set security state as well...
1742            DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr,
1743                    is_secure ? "s" : "ns");
1744        } else {
1745            tags->insertBlock(pkt, blk);
1746        }
1747
1748        // we should never be overwriting a valid block
1749        assert(!blk->isValid());
1750    } else {
1751        // existing block... probably an upgrade
1752        assert(blk->tag == tags->extractTag(addr));
1753        // either we're getting new data or the block should already be valid
1754        assert(pkt->hasData() || blk->isValid());
1755        // don't clear block status... if block is already dirty we
1756        // don't want to lose that
1757    }
1758
1759    if (is_secure)
1760        blk->status |= BlkSecure;
1761    blk->status |= BlkValid | BlkReadable;
1762
1763    // sanity check for whole-line writes, which should always be
1764    // marked as writable as part of the fill, and then later marked
1765    // dirty as part of satisfyCpuSideRequest
1766    if (pkt->cmd == MemCmd::WriteLineReq) {
1767        assert(!pkt->sharedAsserted());
1768        // at the moment other caches do not respond to the
1769        // invalidation requests corresponding to a whole-line write
1770        assert(!pkt->memInhibitAsserted());
1771    }
1772
1773    if (!pkt->sharedAsserted()) {
1774        // we could get non-shared responses from memory (rather than
1775        // a cache) even in a read-only cache, note that we set this
1776        // bit even for a read-only cache as we use it to represent
1777        // the exclusive state
1778        blk->status |= BlkWritable;
1779
1780        // If we got this via cache-to-cache transfer (i.e., from a
1781        // cache that was an owner) and took away that owner's copy,
1782        // then we need to write it back.  Normally this happens
1783        // anyway as a side effect of getting a copy to write it, but
1784        // there are cases (such as failed store conditionals or
1785        // compare-and-swaps) where we'll demand an exclusive copy but
1786        // end up not writing it.
1787        if (pkt->memInhibitAsserted()) {
1788            blk->status |= BlkDirty;
1789
1790            chatty_assert(!isReadOnly, "Should never see dirty snoop response "
1791                          "in read-only cache %s\n", name());
1792        }
1793    }
1794
1795    DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n",
1796            addr, is_secure ? "s" : "ns", old_state, blk->print());
1797
1798    // if we got new data, copy it in (checking for a read response
1799    // and a response that has data is the same in the end)
1800    if (pkt->isRead()) {
1801        // sanity checks
1802        assert(pkt->hasData());
1803        assert(pkt->getSize() == blkSize);
1804
1805        std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize);
1806    }
1807    // We pay for fillLatency here.
1808    blk->whenReady = clockEdge() + fillLatency * clockPeriod() +
1809        pkt->payloadDelay;
1810
1811    return blk;
1812}
1813
1814
1815/////////////////////////////////////////////////////
1816//
1817// Snoop path: requests coming in from the memory side
1818//
1819/////////////////////////////////////////////////////
1820
1821void
1822Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
1823                              bool already_copied, bool pending_inval)
1824{
1825    // sanity check
1826    assert(req_pkt->isRequest());
1827    assert(req_pkt->needsResponse());
1828
1829    DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__,
1830            req_pkt->cmdString(), req_pkt->getAddr(), req_pkt->getSize());
1831    // timing-mode snoop responses require a new packet, unless we
1832    // already made a copy...
1833    PacketPtr pkt = req_pkt;
1834    if (!already_copied)
1835        // do not clear flags, and allocate space for data if the
1836        // packet needs it (the only packets that carry data are read
1837        // responses)
1838        pkt = new Packet(req_pkt, false, req_pkt->isRead());
1839
1840    assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() ||
1841           pkt->sharedAsserted());
1842    pkt->makeTimingResponse();
1843    if (pkt->isRead()) {
1844        pkt->setDataFromBlock(blk_data, blkSize);
1845    }
1846    if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
1847        // Assume we defer a response to a read from a far-away cache
1848        // A, then later defer a ReadExcl from a cache B on the same
1849        // bus as us.  We'll assert MemInhibit in both cases, but in
1850        // the latter case MemInhibit will keep the invalidation from
1851        // reaching cache A.  This special response tells cache A that
1852        // it gets the block to satisfy its read, but must immediately
1853        // invalidate it.
1854        pkt->cmd = MemCmd::ReadRespWithInvalidate;
1855    }
1856    // Here we consider forward_time, paying for just forward latency and
1857    // also charging the delay provided by the xbar.
1858    // forward_time is used as send_time in next allocateWriteBuffer().
1859    Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
1860    // Here we reset the timing of the packet.
1861    pkt->headerDelay = pkt->payloadDelay = 0;
1862    DPRINTF(Cache, "%s created response: %s addr %#llx size %d tick: %lu\n",
1863            __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize(),
1864            forward_time);
1865    memSidePort->schedTimingSnoopResp(pkt, forward_time, true);
1866}
1867
1868uint32_t
1869Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
1870                   bool is_deferred, bool pending_inval)
1871{
1872    DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__,
1873            pkt->cmdString(), pkt->getAddr(), pkt->getSize());
1874    // deferred snoops can only happen in timing mode
1875    assert(!(is_deferred && !is_timing));
1876    // pending_inval only makes sense on deferred snoops
1877    assert(!(pending_inval && !is_deferred));
1878    assert(pkt->isRequest());
1879
1880    // the packet may get modified if we or a forwarded snooper
1881    // responds in atomic mode, so remember a few things about the
1882    // original packet up front
1883    bool invalidate = pkt->isInvalidate();
1884    bool M5_VAR_USED needs_exclusive = pkt->needsExclusive();
1885
1886    uint32_t snoop_delay = 0;
1887
1888    if (forwardSnoops) {
1889        // first propagate snoop upward to see if anyone above us wants to
1890        // handle it.  save & restore packet src since it will get
1891        // rewritten to be relative to cpu-side bus (if any)
1892        bool alreadyResponded = pkt->memInhibitAsserted();
1893        if (is_timing) {
1894            // copy the packet so that we can clear any flags before
1895            // forwarding it upwards, we also allocate data (passing
1896            // the pointer along in case of static data), in case
1897            // there is a snoop hit in upper levels
1898            Packet snoopPkt(pkt, true, true);
1899            snoopPkt.setExpressSnoop();
1900            snoopPkt.pushSenderState(new ForwardResponseRecord());
1901            // the snoop packet does not need to wait any additional
1902            // time
1903            snoopPkt.headerDelay = snoopPkt.payloadDelay = 0;
1904            cpuSidePort->sendTimingSnoopReq(&snoopPkt);
1905
1906            // add the header delay (including crossbar and snoop
1907            // delays) of the upward snoop to the snoop delay for this
1908            // cache
1909            snoop_delay += snoopPkt.headerDelay;
1910
1911            if (snoopPkt.memInhibitAsserted()) {
1912                // cache-to-cache response from some upper cache
1913                assert(!alreadyResponded);
1914                pkt->assertMemInhibit();
1915            } else {
1916                // no cache (or anyone else for that matter) will
1917                // respond, so delete the ForwardResponseRecord here
1918                delete snoopPkt.popSenderState();
1919            }
1920            if (snoopPkt.sharedAsserted()) {
1921                pkt->assertShared();
1922            }
1923            // If this request is a prefetch or clean evict and an upper level
1924            // signals block present, make sure to propagate the block
1925            // presence to the requester.
1926            if (snoopPkt.isBlockCached()) {
1927                pkt->setBlockCached();
1928            }
1929        } else {
1930            cpuSidePort->sendAtomicSnoop(pkt);
1931            if (!alreadyResponded && pkt->memInhibitAsserted()) {
1932                // cache-to-cache response from some upper cache:
1933                // forward response to original requester
1934                assert(pkt->isResponse());
1935            }
1936        }
1937    }
1938
1939    if (!blk || !blk->isValid()) {
1940        DPRINTF(Cache, "%s snoop miss for %s addr %#llx size %d\n",
1941                __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize());
1942        return snoop_delay;
1943    } else {
1944       DPRINTF(Cache, "%s snoop hit for %s for addr %#llx size %d, "
1945               "old state is %s\n", __func__, pkt->cmdString(),
1946               pkt->getAddr(), pkt->getSize(), blk->print());
1947    }
1948
1949    chatty_assert(!(isReadOnly && blk->isDirty()),
1950                  "Should never have a dirty block in a read-only cache %s\n",
1951                  name());
1952
1953    // We may end up modifying both the block state and the packet (if
1954    // we respond in atomic mode), so just figure out what to do now
1955    // and then do it later. If we find dirty data while snooping for
1956    // an invalidate, we don't need to send a response. The
1957    // invalidation itself is taken care of below.
1958    bool respond = blk->isDirty() && pkt->needsResponse() &&
1959        pkt->cmd != MemCmd::InvalidateReq;
1960    bool have_exclusive = blk->isWritable();
1961
1962    // Invalidate any prefetch's from below that would strip write permissions
1963    // MemCmd::HardPFReq is only observed by upstream caches.  After missing
1964    // above and in it's own cache, a new MemCmd::ReadReq is created that
1965    // downstream caches observe.
1966    if (pkt->mustCheckAbove()) {
1967        DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s from"
1968                " lower cache\n", pkt->getAddr(), pkt->cmdString());
1969        pkt->setBlockCached();
1970        return snoop_delay;
1971    }
1972
1973    if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) {
1974        // reading non-exclusive shared data, note that we retain
1975        // the block in owned state if it is dirty, with the response
1976        // taken care of below, and otherwhise simply downgrade to
1977        // shared
1978        assert(!needs_exclusive);
1979        pkt->assertShared();
1980        blk->status &= ~BlkWritable;
1981    }
1982
1983    if (respond) {
1984        // prevent anyone else from responding, cache as well as
1985        // memory, and also prevent any memory from even seeing the
1986        // request (with current inhibited semantics), note that this
1987        // applies both to reads and writes and that for writes it
1988        // works thanks to the fact that we still have dirty data and
1989        // will write it back at a later point
1990        assert(!pkt->memInhibitAsserted());
1991        pkt->assertMemInhibit();
1992        if (have_exclusive) {
1993            // in the case of an uncacheable request there is no point
1994            // in setting the exclusive flag, but since the recipient
1995            // does not care there is no harm in doing so, in any case
1996            // it is just a hint
1997            pkt->setSupplyExclusive();
1998        }
1999        if (is_timing) {
2000            doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
2001        } else {
2002            pkt->makeAtomicResponse();
2003            pkt->setDataFromBlock(blk->data, blkSize);
2004        }
2005    }
2006
2007    if (!respond && is_timing && is_deferred) {
2008        // if it's a deferred timing snoop to which we are not
2009        // responding, then we've made a copy of both the request and
2010        // the packet, delete them here
2011        assert(pkt->needsResponse());
2012        delete pkt->req;
2013        delete pkt;
2014    }
2015
2016    // Do this last in case it deallocates block data or something
2017    // like that
2018    if (invalidate) {
2019        invalidateBlock(blk);
2020    }
2021
2022    DPRINTF(Cache, "new state is %s\n", blk->print());
2023
2024    return snoop_delay;
2025}
2026
2027
2028void
2029Cache::recvTimingSnoopReq(PacketPtr pkt)
2030{
2031    DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__,
2032            pkt->cmdString(), pkt->getAddr(), pkt->getSize());
2033
2034    // Snoops shouldn't happen when bypassing caches
2035    assert(!system->bypassCaches());
2036
2037    // no need to snoop requests that are not in range
2038    if (!inRange(pkt->getAddr())) {
2039        return;
2040    }
2041
2042    bool is_secure = pkt->isSecure();
2043    CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
2044
2045    Addr blk_addr = blockAlign(pkt->getAddr());
2046    MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
2047
2048    // Update the latency cost of the snoop so that the crossbar can
2049    // account for it. Do not overwrite what other neighbouring caches
2050    // have already done, rather take the maximum. The update is
2051    // tentative, for cases where we return before an upward snoop
2052    // happens below.
2053    pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay,
2054                                         lookupLatency * clockPeriod());
2055
2056    // Inform request(Prefetch, CleanEvict or Writeback) from below of
2057    // MSHR hit, set setBlockCached.
2058    if (mshr && pkt->mustCheckAbove()) {
2059        DPRINTF(Cache, "Setting block cached for %s from"
2060                "lower cache on mshr hit %#x\n",
2061                pkt->cmdString(), pkt->getAddr());
2062        pkt->setBlockCached();
2063        return;
2064    }
2065
2066    // Let the MSHR itself track the snoop and decide whether we want
2067    // to go ahead and do the regular cache snoop
2068    if (mshr && mshr->handleSnoop(pkt, order++)) {
2069        DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)."
2070                "mshrs: %s\n", blk_addr, is_secure ? "s" : "ns",
2071                mshr->print());
2072
2073        if (mshr->getNumTargets() > numTarget)
2074            warn("allocating bonus target for snoop"); //handle later
2075        return;
2076    }
2077
2078    //We also need to check the writeback buffers and handle those
2079    std::vector<MSHR *> writebacks;
2080    if (writeBuffer.findMatches(blk_addr, is_secure, writebacks)) {
2081        DPRINTF(Cache, "Snoop hit in writeback to addr %#llx (%s)\n",
2082                pkt->getAddr(), is_secure ? "s" : "ns");
2083
2084        // Look through writebacks for any cachable writes.
2085        // We should only ever find a single match
2086        assert(writebacks.size() == 1);
2087        MSHR *wb_entry = writebacks[0];
2088        // Expect to see only Writebacks and/or CleanEvicts here, both of
2089        // which should not be generated for uncacheable data.
2090        assert(!wb_entry->isUncacheable());
2091        // There should only be a single request responsible for generating
2092        // Writebacks/CleanEvicts.
2093        assert(wb_entry->getNumTargets() == 1);
2094        PacketPtr wb_pkt = wb_entry->getTarget()->pkt;
2095        assert(wb_pkt->isEviction());
2096
2097        if (pkt->isEviction()) {
2098            // if the block is found in the write queue, set the BLOCK_CACHED
2099            // flag for Writeback/CleanEvict snoop. On return the snoop will
2100            // propagate the BLOCK_CACHED flag in Writeback packets and prevent
2101            // any CleanEvicts from travelling down the memory hierarchy.
2102            pkt->setBlockCached();
2103            DPRINTF(Cache, "Squashing %s from lower cache on writequeue hit"
2104                    " %#x\n", pkt->cmdString(), pkt->getAddr());
2105            return;
2106        }
2107
2108        if (wb_pkt->cmd == MemCmd::WritebackDirty) {
2109            assert(!pkt->memInhibitAsserted());
2110            pkt->assertMemInhibit();
2111            if (!pkt->needsExclusive()) {
2112                pkt->assertShared();
2113                // the writeback is no longer passing exclusivity (the
2114                // receiving cache should consider the block owned
2115                // rather than modified)
2116                wb_pkt->assertShared();
2117            } else {
2118                // if we're not asserting the shared line, we need to
2119                // invalidate our copy.  we'll do that below as long as
2120                // the packet's invalidate flag is set...
2121                assert(pkt->isInvalidate());
2122            }
2123            doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(),
2124                                   false, false);
2125        } else {
2126            // on hitting a clean writeback we play it safe and do not
2127            // provide a response, the block may be dirty somewhere
2128            // else
2129            assert(wb_pkt->isCleanEviction());
2130            // The cache technically holds the block until the
2131            // corresponding message reaches the crossbar
2132            // below. Therefore when a snoop encounters a CleanEvict
2133            // or WritebackClean message we must set assertShared
2134            // (just like when it encounters a Writeback) to avoid the
2135            // snoop filter prematurely clearing the holder bit in the
2136            // crossbar below
2137            if (!pkt->needsExclusive()) {
2138                pkt->assertShared();
2139                // the writeback is no longer passing exclusivity (the
2140                // receiving cache should consider the block owned
2141                // rather than modified)
2142                wb_pkt->assertShared();
2143            } else {
2144                assert(pkt->isInvalidate());
2145            }
2146        }
2147
2148        if (pkt->isInvalidate()) {
2149            // Invalidation trumps our writeback... discard here
2150            // Note: markInService will remove entry from writeback buffer.
2151            markInService(wb_entry, false);
2152            delete wb_pkt;
2153        }
2154    }
2155
2156    // If this was a shared writeback, there may still be
2157    // other shared copies above that require invalidation.
2158    // We could be more selective and return here if the
2159    // request is non-exclusive or if the writeback is
2160    // exclusive.
2161    uint32_t snoop_delay = handleSnoop(pkt, blk, true, false, false);
2162
2163    // Override what we did when we first saw the snoop, as we now
2164    // also have the cost of the upwards snoops to account for
2165    pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, snoop_delay +
2166                                         lookupLatency * clockPeriod());
2167}
2168
2169bool
2170Cache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt)
2171{
2172    // Express snoop responses from master to slave, e.g., from L1 to L2
2173    cache->recvTimingSnoopResp(pkt);
2174    return true;
2175}
2176
2177Tick
2178Cache::recvAtomicSnoop(PacketPtr pkt)
2179{
2180    // Snoops shouldn't happen when bypassing caches
2181    assert(!system->bypassCaches());
2182
2183    // no need to snoop requests that are not in range.
2184    if (!inRange(pkt->getAddr())) {
2185        return 0;
2186    }
2187
2188    CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
2189    uint32_t snoop_delay = handleSnoop(pkt, blk, false, false, false);
2190    return snoop_delay + lookupLatency * clockPeriod();
2191}
2192
2193
2194MSHR *
2195Cache::getNextMSHR()
2196{
2197    // Check both MSHR queue and write buffer for potential requests,
2198    // note that null does not mean there is no request, it could
2199    // simply be that it is not ready
2200    MSHR *miss_mshr  = mshrQueue.getNextMSHR();
2201    MSHR *write_mshr = writeBuffer.getNextMSHR();
2202
2203    // If we got a write buffer request ready, first priority is a
2204    // full write buffer, otherwhise we favour the miss requests
2205    if (write_mshr &&
2206        ((writeBuffer.isFull() && writeBuffer.inServiceEntries == 0) ||
2207         !miss_mshr)) {
2208        // need to search MSHR queue for conflicting earlier miss.
2209        MSHR *conflict_mshr =
2210            mshrQueue.findPending(write_mshr->blkAddr,
2211                                  write_mshr->isSecure);
2212
2213        if (conflict_mshr && conflict_mshr->order < write_mshr->order) {
2214            // Service misses in order until conflict is cleared.
2215            return conflict_mshr;
2216
2217            // @todo Note that we ignore the ready time of the conflict here
2218        }
2219
2220        // No conflicts; issue write
2221        return write_mshr;
2222    } else if (miss_mshr) {
2223        // need to check for conflicting earlier writeback
2224        MSHR *conflict_mshr =
2225            writeBuffer.findPending(miss_mshr->blkAddr,
2226                                    miss_mshr->isSecure);
2227        if (conflict_mshr) {
2228            // not sure why we don't check order here... it was in the
2229            // original code but commented out.
2230
2231            // The only way this happens is if we are
2232            // doing a write and we didn't have permissions
2233            // then subsequently saw a writeback (owned got evicted)
2234            // We need to make sure to perform the writeback first
2235            // To preserve the dirty data, then we can issue the write
2236
2237            // should we return write_mshr here instead?  I.e. do we
2238            // have to flush writes in order?  I don't think so... not
2239            // for Alpha anyway.  Maybe for x86?
2240            return conflict_mshr;
2241
2242            // @todo Note that we ignore the ready time of the conflict here
2243        }
2244
2245        // No conflicts; issue read
2246        return miss_mshr;
2247    }
2248
2249    // fall through... no pending requests.  Try a prefetch.
2250    assert(!miss_mshr && !write_mshr);
2251    if (prefetcher && mshrQueue.canPrefetch()) {
2252        // If we have a miss queue slot, we can try a prefetch
2253        PacketPtr pkt = prefetcher->getPacket();
2254        if (pkt) {
2255            Addr pf_addr = blockAlign(pkt->getAddr());
2256            if (!tags->findBlock(pf_addr, pkt->isSecure()) &&
2257                !mshrQueue.findMatch(pf_addr, pkt->isSecure()) &&
2258                !writeBuffer.findMatch(pf_addr, pkt->isSecure())) {
2259                // Update statistic on number of prefetches issued
2260                // (hwpf_mshr_misses)
2261                assert(pkt->req->masterId() < system->maxMasters());
2262                mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
2263
2264                // allocate an MSHR and return it, note
2265                // that we send the packet straight away, so do not
2266                // schedule the send
2267                return allocateMissBuffer(pkt, curTick(), false);
2268            } else {
2269                // free the request and packet
2270                delete pkt->req;
2271                delete pkt;
2272            }
2273        }
2274    }
2275
2276    return NULL;
2277}
2278
2279bool
2280Cache::isCachedAbove(PacketPtr pkt, bool is_timing) const
2281{
2282    if (!forwardSnoops)
2283        return false;
2284    // Mirroring the flow of HardPFReqs, the cache sends CleanEvict and
2285    // Writeback snoops into upper level caches to check for copies of the
2286    // same block. Using the BLOCK_CACHED flag with the Writeback/CleanEvict
2287    // packet, the cache can inform the crossbar below of presence or absence
2288    // of the block.
2289    if (is_timing) {
2290        Packet snoop_pkt(pkt, true, false);
2291        snoop_pkt.setExpressSnoop();
2292        // Assert that packet is either Writeback or CleanEvict and not a
2293        // prefetch request because prefetch requests need an MSHR and may
2294        // generate a snoop response.
2295        assert(pkt->isEviction());
2296        snoop_pkt.senderState = NULL;
2297        cpuSidePort->sendTimingSnoopReq(&snoop_pkt);
2298        // Writeback/CleanEvict snoops do not generate a snoop response.
2299        assert(!(snoop_pkt.memInhibitAsserted()));
2300        return snoop_pkt.isBlockCached();
2301    } else {
2302        cpuSidePort->sendAtomicSnoop(pkt);
2303        return pkt->isBlockCached();
2304    }
2305}
2306
2307PacketPtr
2308Cache::getTimingPacket()
2309{
2310    MSHR *mshr = getNextMSHR();
2311
2312    if (mshr == NULL) {
2313        return NULL;
2314    }
2315
2316    // use request from 1st target
2317    PacketPtr tgt_pkt = mshr->getTarget()->pkt;
2318    PacketPtr pkt = NULL;
2319
2320    DPRINTF(CachePort, "%s %s for addr %#llx size %d\n", __func__,
2321            tgt_pkt->cmdString(), tgt_pkt->getAddr(), tgt_pkt->getSize());
2322
2323    CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure);
2324
2325    if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) {
2326        // We need to check the caches above us to verify that
2327        // they don't have a copy of this block in the dirty state
2328        // at the moment. Without this check we could get a stale
2329        // copy from memory that might get used in place of the
2330        // dirty one.
2331        Packet snoop_pkt(tgt_pkt, true, false);
2332        snoop_pkt.setExpressSnoop();
2333        // We are sending this packet upwards, but if it hits we will
2334        // get a snoop response that we end up treating just like a
2335        // normal response, hence it needs the MSHR as its sender
2336        // state
2337        snoop_pkt.senderState = mshr;
2338        cpuSidePort->sendTimingSnoopReq(&snoop_pkt);
2339
2340        // Check to see if the prefetch was squashed by an upper cache (to
2341        // prevent us from grabbing the line) or if a Check to see if a
2342        // writeback arrived between the time the prefetch was placed in
2343        // the MSHRs and when it was selected to be sent or if the
2344        // prefetch was squashed by an upper cache.
2345
2346        // It is important to check memInhibitAsserted before
2347        // prefetchSquashed. If another cache has asserted MEM_INGIBIT, it
2348        // will be sending a response which will arrive at the MSHR
2349        // allocated ofr this request. Checking the prefetchSquash first
2350        // may result in the MSHR being prematurely deallocated.
2351
2352        if (snoop_pkt.memInhibitAsserted()) {
2353            // If we are getting a non-shared response it is dirty
2354            bool pending_dirty_resp = !snoop_pkt.sharedAsserted();
2355            markInService(mshr, pending_dirty_resp);
2356            DPRINTF(Cache, "Upward snoop of prefetch for addr"
2357                    " %#x (%s) hit\n",
2358                    tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns");
2359            return NULL;
2360        }
2361
2362        if (snoop_pkt.isBlockCached() || blk != NULL) {
2363            DPRINTF(Cache, "Block present, prefetch squashed by cache.  "
2364                    "Deallocating mshr target %#x.\n",
2365                    mshr->blkAddr);
2366
2367            // Deallocate the mshr target
2368            if (!tgt_pkt->isWriteback()) {
2369                if (mshr->queue->forceDeallocateTarget(mshr)) {
2370                    // Clear block if this deallocation resulted freed an
2371                    // mshr when all had previously been utilized
2372                    clearBlocked((BlockedCause)(mshr->queue->index));
2373                }
2374                return NULL;
2375            } else {
2376                // If this is a Writeback, and the snoops indicate that the blk
2377                // is cached above, set the BLOCK_CACHED flag in the Writeback
2378                // packet, so that it does not reset the bits corresponding to
2379                // this block in the snoop filter below.
2380                tgt_pkt->setBlockCached();
2381            }
2382        }
2383    }
2384
2385    if (mshr->isForwardNoResponse()) {
2386        // no response expected, just forward packet as it is
2387        assert(tags->findBlock(mshr->blkAddr, mshr->isSecure) == NULL);
2388        pkt = tgt_pkt;
2389    } else {
2390        pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive());
2391
2392        mshr->isForward = (pkt == NULL);
2393
2394        if (mshr->isForward) {
2395            // not a cache block request, but a response is expected
2396            // make copy of current packet to forward, keep current
2397            // copy for response handling
2398            pkt = new Packet(tgt_pkt, false, true);
2399            if (pkt->isWrite()) {
2400                pkt->setData(tgt_pkt->getConstPtr<uint8_t>());
2401            }
2402        }
2403    }
2404
2405    assert(pkt != NULL);
2406    // play it safe and append (rather than set) the sender state, as
2407    // forwarded packets may already have existing state
2408    pkt->pushSenderState(mshr);
2409    return pkt;
2410}
2411
2412
2413Tick
2414Cache::nextMSHRReadyTime() const
2415{
2416    Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(),
2417                              writeBuffer.nextMSHRReadyTime());
2418
2419    // Don't signal prefetch ready time if no MSHRs available
2420    // Will signal once enoguh MSHRs are deallocated
2421    if (prefetcher && mshrQueue.canPrefetch()) {
2422        nextReady = std::min(nextReady,
2423                             prefetcher->nextPrefetchReadyTime());
2424    }
2425
2426    return nextReady;
2427}
2428
2429void
2430Cache::serialize(CheckpointOut &cp) const
2431{
2432    bool dirty(isDirty());
2433
2434    if (dirty) {
2435        warn("*** The cache still contains dirty data. ***\n");
2436        warn("    Make sure to drain the system using the correct flags.\n");
2437        warn("    This checkpoint will not restore correctly and dirty data in "
2438             "the cache will be lost!\n");
2439    }
2440
2441    // Since we don't checkpoint the data in the cache, any dirty data
2442    // will be lost when restoring from a checkpoint of a system that
2443    // wasn't drained properly. Flag the checkpoint as invalid if the
2444    // cache contains dirty data.
2445    bool bad_checkpoint(dirty);
2446    SERIALIZE_SCALAR(bad_checkpoint);
2447}
2448
2449void
2450Cache::unserialize(CheckpointIn &cp)
2451{
2452    bool bad_checkpoint;
2453    UNSERIALIZE_SCALAR(bad_checkpoint);
2454    if (bad_checkpoint) {
2455        fatal("Restoring from checkpoints with dirty caches is not supported "
2456              "in the classic memory system. Please remove any caches or "
2457              " drain them properly before taking checkpoints.\n");
2458    }
2459}
2460
2461///////////////
2462//
2463// CpuSidePort
2464//
2465///////////////
2466
2467AddrRangeList
2468Cache::CpuSidePort::getAddrRanges() const
2469{
2470    return cache->getAddrRanges();
2471}
2472
2473bool
2474Cache::CpuSidePort::recvTimingReq(PacketPtr pkt)
2475{
2476    assert(!cache->system->bypassCaches());
2477
2478    bool success = false;
2479
2480    // always let inhibited requests through, even if blocked,
2481    // ultimately we should check if this is an express snoop, but at
2482    // the moment that flag is only set in the cache itself
2483    if (pkt->memInhibitAsserted()) {
2484        // do not change the current retry state
2485        bool M5_VAR_USED bypass_success = cache->recvTimingReq(pkt);
2486        assert(bypass_success);
2487        return true;
2488    } else if (blocked || mustSendRetry) {
2489        // either already committed to send a retry, or blocked
2490        success = false;
2491    } else {
2492        // pass it on to the cache, and let the cache decide if we
2493        // have to retry or not
2494        success = cache->recvTimingReq(pkt);
2495    }
2496
2497    // remember if we have to retry
2498    mustSendRetry = !success;
2499    return success;
2500}
2501
2502Tick
2503Cache::CpuSidePort::recvAtomic(PacketPtr pkt)
2504{
2505    return cache->recvAtomic(pkt);
2506}
2507
2508void
2509Cache::CpuSidePort::recvFunctional(PacketPtr pkt)
2510{
2511    // functional request
2512    cache->functionalAccess(pkt, true);
2513}
2514
2515Cache::
2516CpuSidePort::CpuSidePort(const std::string &_name, Cache *_cache,
2517                         const std::string &_label)
2518    : BaseCache::CacheSlavePort(_name, _cache, _label), cache(_cache)
2519{
2520}
2521
2522Cache*
2523CacheParams::create()
2524{
2525    assert(tags);
2526
2527    return new Cache(this);
2528}
2529///////////////
2530//
2531// MemSidePort
2532//
2533///////////////
2534
2535bool
2536Cache::MemSidePort::recvTimingResp(PacketPtr pkt)
2537{
2538    cache->recvTimingResp(pkt);
2539    return true;
2540}
2541
2542// Express snooping requests to memside port
2543void
2544Cache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt)
2545{
2546    // handle snooping requests
2547    cache->recvTimingSnoopReq(pkt);
2548}
2549
2550Tick
2551Cache::MemSidePort::recvAtomicSnoop(PacketPtr pkt)
2552{
2553    return cache->recvAtomicSnoop(pkt);
2554}
2555
2556void
2557Cache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt)
2558{
2559    // functional snoop (note that in contrast to atomic we don't have
2560    // a specific functionalSnoop method, as they have the same
2561    // behaviour regardless)
2562    cache->functionalAccess(pkt, false);
2563}
2564
2565void
2566Cache::CacheReqPacketQueue::sendDeferredPacket()
2567{
2568    // sanity check
2569    assert(!waitingOnRetry);
2570
2571    // there should never be any deferred request packets in the
2572    // queue, instead we resly on the cache to provide the packets
2573    // from the MSHR queue or write queue
2574    assert(deferredPacketReadyTime() == MaxTick);
2575
2576    // check for request packets (requests & writebacks)
2577    PacketPtr pkt = cache.getTimingPacket();
2578    if (pkt == NULL) {
2579        // can happen if e.g. we attempt a writeback and fail, but
2580        // before the retry, the writeback is eliminated because
2581        // we snoop another cache's ReadEx.
2582    } else {
2583        MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
2584        // in most cases getTimingPacket allocates a new packet, and
2585        // we must delete it unless it is successfully sent
2586        bool delete_pkt = !mshr->isForwardNoResponse();
2587
2588        // let our snoop responses go first if there are responses to
2589        // the same addresses we are about to writeback, note that
2590        // this creates a dependency between requests and snoop
2591        // responses, but that should not be a problem since there is
2592        // a chain already and the key is that the snoop responses can
2593        // sink unconditionally
2594        if (snoopRespQueue.hasAddr(pkt->getAddr())) {
2595            DPRINTF(CachePort, "Waiting for snoop response to be sent\n");
2596            Tick when = snoopRespQueue.deferredPacketReadyTime();
2597            schedSendEvent(when);
2598
2599            if (delete_pkt)
2600                delete pkt;
2601
2602            return;
2603        }
2604
2605
2606        waitingOnRetry = !masterPort.sendTimingReq(pkt);
2607
2608        if (waitingOnRetry) {
2609            DPRINTF(CachePort, "now waiting on a retry\n");
2610            if (delete_pkt) {
2611                // we are awaiting a retry, but we
2612                // delete the packet and will be creating a new packet
2613                // when we get the opportunity
2614                delete pkt;
2615            }
2616            // note that we have now masked any requestBus and
2617            // schedSendEvent (we will wait for a retry before
2618            // doing anything), and this is so even if we do not
2619            // care about this packet and might override it before
2620            // it gets retried
2621        } else {
2622            // As part of the call to sendTimingReq the packet is
2623            // forwarded to all neighbouring caches (and any
2624            // caches above them) as a snoop. The packet is also
2625            // sent to any potential cache below as the
2626            // interconnect is not allowed to buffer the
2627            // packet. Thus at this point we know if any of the
2628            // neighbouring, or the downstream cache is
2629            // responding, and if so, if it is with a dirty line
2630            // or not.
2631            bool pending_dirty_resp = !pkt->sharedAsserted() &&
2632                pkt->memInhibitAsserted();
2633
2634            cache.markInService(mshr, pending_dirty_resp);
2635        }
2636    }
2637
2638    // if we succeeded and are not waiting for a retry, schedule the
2639    // next send considering when the next MSHR is ready, note that
2640    // snoop responses have their own packet queue and thus schedule
2641    // their own events
2642    if (!waitingOnRetry) {
2643        schedSendEvent(cache.nextMSHRReadyTime());
2644    }
2645}
2646
2647Cache::
2648MemSidePort::MemSidePort(const std::string &_name, Cache *_cache,
2649                         const std::string &_label)
2650    : BaseCache::CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue),
2651      _reqQueue(*_cache, *this, _snoopRespQueue, _label),
2652      _snoopRespQueue(*_cache, *this, _label), cache(_cache)
2653{
2654}
2655