cache.cc revision 11278:18411ccc4f3c
14826Ssaidi@eecs.umich.edu/*
24826Ssaidi@eecs.umich.edu * Copyright (c) 2010-2015 ARM Limited
34826Ssaidi@eecs.umich.edu * All rights reserved.
44826Ssaidi@eecs.umich.edu *
54826Ssaidi@eecs.umich.edu * The license below extends only to copyright in the software and shall
64826Ssaidi@eecs.umich.edu * not be construed as granting a license to any other intellectual
74826Ssaidi@eecs.umich.edu * property including but not limited to intellectual property relating
84826Ssaidi@eecs.umich.edu * to a hardware implementation of the functionality of the software
94826Ssaidi@eecs.umich.edu * licensed hereunder.  You may use the software subject to the license
104826Ssaidi@eecs.umich.edu * terms below provided that you ensure that this notice is replicated
114826Ssaidi@eecs.umich.edu * unmodified and in its entirety in all distributions of the software,
124826Ssaidi@eecs.umich.edu * modified or unmodified, in source code or in binary form.
134826Ssaidi@eecs.umich.edu *
144826Ssaidi@eecs.umich.edu * Copyright (c) 2002-2005 The Regents of The University of Michigan
154826Ssaidi@eecs.umich.edu * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
164826Ssaidi@eecs.umich.edu * All rights reserved.
174826Ssaidi@eecs.umich.edu *
184826Ssaidi@eecs.umich.edu * Redistribution and use in source and binary forms, with or without
194826Ssaidi@eecs.umich.edu * modification, are permitted provided that the following conditions are
204826Ssaidi@eecs.umich.edu * met: redistributions of source code must retain the above copyright
214826Ssaidi@eecs.umich.edu * notice, this list of conditions and the following disclaimer;
224826Ssaidi@eecs.umich.edu * redistributions in binary form must reproduce the above copyright
234826Ssaidi@eecs.umich.edu * notice, this list of conditions and the following disclaimer in the
244826Ssaidi@eecs.umich.edu * documentation and/or other materials provided with the distribution;
254826Ssaidi@eecs.umich.edu * neither the name of the copyright holders nor the names of its
264826Ssaidi@eecs.umich.edu * contributors may be used to endorse or promote products derived from
274826Ssaidi@eecs.umich.edu * this software without specific prior written permission.
284826Ssaidi@eecs.umich.edu *
294826Ssaidi@eecs.umich.edu * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
304826Ssaidi@eecs.umich.edu * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
314826Ssaidi@eecs.umich.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
327678Sgblack@eecs.umich.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
334826Ssaidi@eecs.umich.edu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
344826Ssaidi@eecs.umich.edu * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
354826Ssaidi@eecs.umich.edu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
364826Ssaidi@eecs.umich.edu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
374826Ssaidi@eecs.umich.edu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
384826Ssaidi@eecs.umich.edu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
394826Ssaidi@eecs.umich.edu * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
404826Ssaidi@eecs.umich.edu *
414826Ssaidi@eecs.umich.edu * Authors: Erik Hallnor
424826Ssaidi@eecs.umich.edu *          Dave Greene
434826Ssaidi@eecs.umich.edu *          Nathan Binkert
444826Ssaidi@eecs.umich.edu *          Steve Reinhardt
454826Ssaidi@eecs.umich.edu *          Ron Dreslinski
464826Ssaidi@eecs.umich.edu *          Andreas Sandberg
474826Ssaidi@eecs.umich.edu */
484826Ssaidi@eecs.umich.edu
494826Ssaidi@eecs.umich.edu/**
505958Sgblack@eecs.umich.edu * @file
514826Ssaidi@eecs.umich.edu * Cache definitions.
525958Sgblack@eecs.umich.edu */
534826Ssaidi@eecs.umich.edu
544826Ssaidi@eecs.umich.edu#include "mem/cache/cache.hh"
555498Ssaidi@eecs.umich.edu
564826Ssaidi@eecs.umich.edu#include "base/misc.hh"
574826Ssaidi@eecs.umich.edu#include "base/types.hh"
584826Ssaidi@eecs.umich.edu#include "debug/Cache.hh"
594826Ssaidi@eecs.umich.edu#include "debug/CachePort.hh"
604826Ssaidi@eecs.umich.edu#include "debug/CacheTags.hh"
614826Ssaidi@eecs.umich.edu#include "mem/cache/blk.hh"
624826Ssaidi@eecs.umich.edu#include "mem/cache/mshr.hh"
634826Ssaidi@eecs.umich.edu#include "mem/cache/prefetch/base.hh"
644826Ssaidi@eecs.umich.edu#include "sim/sim_exit.hh"
656329Sgblack@eecs.umich.edu
666329Sgblack@eecs.umich.eduCache::Cache(const CacheParams *p)
676329Sgblack@eecs.umich.edu    : BaseCache(p, p->system->cacheLineSize()),
686329Sgblack@eecs.umich.edu      tags(p->tags),
696329Sgblack@eecs.umich.edu      prefetcher(p->prefetcher),
706329Sgblack@eecs.umich.edu      doFastWrites(true),
716329Sgblack@eecs.umich.edu      prefetchOnAccess(p->prefetch_on_access),
726329Sgblack@eecs.umich.edu      clusivity(p->clusivity),
736329Sgblack@eecs.umich.edu      writebackClean(p->writeback_clean),
746329Sgblack@eecs.umich.edu      tempBlockWriteback(nullptr),
756329Sgblack@eecs.umich.edu      writebackTempBlockAtomicEvent(this, false,
766329Sgblack@eecs.umich.edu                                    EventBase::Delayed_Writeback_Pri)
776329Sgblack@eecs.umich.edu{
786329Sgblack@eecs.umich.edu    tempBlock = new CacheBlk();
796329Sgblack@eecs.umich.edu    tempBlock->data = new uint8_t[blkSize];
806329Sgblack@eecs.umich.edu
816329Sgblack@eecs.umich.edu    cpuSidePort = new CpuSidePort(p->name + ".cpu_side", this,
826329Sgblack@eecs.umich.edu                                  "CpuSidePort");
836329Sgblack@eecs.umich.edu    memSidePort = new MemSidePort(p->name + ".mem_side", this,
846329Sgblack@eecs.umich.edu                                  "MemSidePort");
856329Sgblack@eecs.umich.edu
866329Sgblack@eecs.umich.edu    tags->setCache(this);
876329Sgblack@eecs.umich.edu    if (prefetcher)
886329Sgblack@eecs.umich.edu        prefetcher->setCache(this);
896329Sgblack@eecs.umich.edu}
906329Sgblack@eecs.umich.edu
916329Sgblack@eecs.umich.eduCache::~Cache()
926329Sgblack@eecs.umich.edu{
936329Sgblack@eecs.umich.edu    delete [] tempBlock->data;
946329Sgblack@eecs.umich.edu    delete tempBlock;
956329Sgblack@eecs.umich.edu
966329Sgblack@eecs.umich.edu    delete cpuSidePort;
976329Sgblack@eecs.umich.edu    delete memSidePort;
986329Sgblack@eecs.umich.edu}
996329Sgblack@eecs.umich.edu
1006329Sgblack@eecs.umich.eduvoid
1016329Sgblack@eecs.umich.eduCache::regStats()
1026329Sgblack@eecs.umich.edu{
1036329Sgblack@eecs.umich.edu    BaseCache::regStats();
1046329Sgblack@eecs.umich.edu}
1056337Sgblack@eecs.umich.edu
1066329Sgblack@eecs.umich.eduvoid
1076329Sgblack@eecs.umich.eduCache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt)
1086329Sgblack@eecs.umich.edu{
1096329Sgblack@eecs.umich.edu    assert(pkt->isRequest());
1106329Sgblack@eecs.umich.edu
1116337Sgblack@eecs.umich.edu    uint64_t overwrite_val;
1126329Sgblack@eecs.umich.edu    bool overwrite_mem;
1136329Sgblack@eecs.umich.edu    uint64_t condition_val64;
1146329Sgblack@eecs.umich.edu    uint32_t condition_val32;
1156329Sgblack@eecs.umich.edu
1166329Sgblack@eecs.umich.edu    int offset = tags->extractBlkOffset(pkt->getAddr());
1176329Sgblack@eecs.umich.edu    uint8_t *blk_data = blk->data + offset;
1186329Sgblack@eecs.umich.edu
1196329Sgblack@eecs.umich.edu    assert(sizeof(uint64_t) >= pkt->getSize());
1206329Sgblack@eecs.umich.edu
1216329Sgblack@eecs.umich.edu    overwrite_mem = true;
1226329Sgblack@eecs.umich.edu    // keep a copy of our possible write value, and copy what is at the
1236329Sgblack@eecs.umich.edu    // memory address into the packet
1246329Sgblack@eecs.umich.edu    pkt->writeData((uint8_t *)&overwrite_val);
1256329Sgblack@eecs.umich.edu    pkt->setData(blk_data);
1266329Sgblack@eecs.umich.edu
1276329Sgblack@eecs.umich.edu    if (pkt->req->isCondSwap()) {
1286329Sgblack@eecs.umich.edu        if (pkt->getSize() == sizeof(uint64_t)) {
1296329Sgblack@eecs.umich.edu            condition_val64 = pkt->req->getExtraData();
1306329Sgblack@eecs.umich.edu            overwrite_mem = !std::memcmp(&condition_val64, blk_data,
1316329Sgblack@eecs.umich.edu                                         sizeof(uint64_t));
1326329Sgblack@eecs.umich.edu        } else if (pkt->getSize() == sizeof(uint32_t)) {
1336329Sgblack@eecs.umich.edu            condition_val32 = (uint32_t)pkt->req->getExtraData();
1346329Sgblack@eecs.umich.edu            overwrite_mem = !std::memcmp(&condition_val32, blk_data,
1356329Sgblack@eecs.umich.edu                                         sizeof(uint32_t));
1366329Sgblack@eecs.umich.edu        } else
1376329Sgblack@eecs.umich.edu            panic("Invalid size for conditional read/write\n");
1386329Sgblack@eecs.umich.edu    }
1396329Sgblack@eecs.umich.edu
1406329Sgblack@eecs.umich.edu    if (overwrite_mem) {
1416329Sgblack@eecs.umich.edu        std::memcpy(blk_data, &overwrite_val, pkt->getSize());
1426329Sgblack@eecs.umich.edu        blk->status |= BlkDirty;
1436329Sgblack@eecs.umich.edu    }
1446329Sgblack@eecs.umich.edu}
1456329Sgblack@eecs.umich.edu
1466329Sgblack@eecs.umich.edu
1476329Sgblack@eecs.umich.eduvoid
1486329Sgblack@eecs.umich.eduCache::satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk,
1496329Sgblack@eecs.umich.edu                             bool deferred_response, bool pending_downgrade)
1506329Sgblack@eecs.umich.edu{
1516329Sgblack@eecs.umich.edu    assert(pkt->isRequest());
1526329Sgblack@eecs.umich.edu
1536329Sgblack@eecs.umich.edu    assert(blk && blk->isValid());
1546329Sgblack@eecs.umich.edu    // Occasionally this is not true... if we are a lower-level cache
1556329Sgblack@eecs.umich.edu    // satisfying a string of Read and ReadEx requests from
1566329Sgblack@eecs.umich.edu    // upper-level caches, a Read will mark the block as shared but we
1576329Sgblack@eecs.umich.edu    // can satisfy a following ReadEx anyway since we can rely on the
1586329Sgblack@eecs.umich.edu    // Read requester(s) to have buffered the ReadEx snoop and to
1596329Sgblack@eecs.umich.edu    // invalidate their blocks after receiving them.
1606329Sgblack@eecs.umich.edu    // assert(!pkt->needsExclusive() || blk->isWritable());
1616329Sgblack@eecs.umich.edu    assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
1626329Sgblack@eecs.umich.edu
1636329Sgblack@eecs.umich.edu    // Check RMW operations first since both isRead() and
1646329Sgblack@eecs.umich.edu    // isWrite() will be true for them
1656329Sgblack@eecs.umich.edu    if (pkt->cmd == MemCmd::SwapReq) {
1666329Sgblack@eecs.umich.edu        cmpAndSwap(blk, pkt);
1676329Sgblack@eecs.umich.edu    } else if (pkt->isWrite()) {
1686329Sgblack@eecs.umich.edu        assert(blk->isWritable());
1696329Sgblack@eecs.umich.edu        // Write or WriteLine at the first cache with block in Exclusive
1706329Sgblack@eecs.umich.edu        if (blk->checkWrite(pkt)) {
1716329Sgblack@eecs.umich.edu            pkt->writeDataToBlock(blk->data, blkSize);
1726329Sgblack@eecs.umich.edu        }
1736329Sgblack@eecs.umich.edu        // Always mark the line as dirty even if we are a failed
1746329Sgblack@eecs.umich.edu        // StoreCond so we supply data to any snoops that have
1756329Sgblack@eecs.umich.edu        // appended themselves to this cache before knowing the store
1766329Sgblack@eecs.umich.edu        // will fail.
1776329Sgblack@eecs.umich.edu        blk->status |= BlkDirty;
1786329Sgblack@eecs.umich.edu        DPRINTF(Cache, "%s for %s addr %#llx size %d (write)\n", __func__,
1796329Sgblack@eecs.umich.edu                pkt->cmdString(), pkt->getAddr(), pkt->getSize());
1806329Sgblack@eecs.umich.edu    } else if (pkt->isRead()) {
1816329Sgblack@eecs.umich.edu        if (pkt->isLLSC()) {
1826329Sgblack@eecs.umich.edu            blk->trackLoadLocked(pkt);
1836329Sgblack@eecs.umich.edu        }
1846337Sgblack@eecs.umich.edu        pkt->setDataFromBlock(blk->data, blkSize);
1856337Sgblack@eecs.umich.edu        // determine if this read is from a (coherent) cache, or not
1866329Sgblack@eecs.umich.edu        // by looking at the command type; we could potentially add a
1876329Sgblack@eecs.umich.edu        // packet attribute such as 'FromCache' to make this check a
1886329Sgblack@eecs.umich.edu        // bit cleaner
1896329Sgblack@eecs.umich.edu        if (pkt->cmd == MemCmd::ReadExReq ||
1906329Sgblack@eecs.umich.edu            pkt->cmd == MemCmd::ReadSharedReq ||
1916329Sgblack@eecs.umich.edu            pkt->cmd == MemCmd::ReadCleanReq ||
1926337Sgblack@eecs.umich.edu            pkt->cmd == MemCmd::SCUpgradeFailReq) {
1936337Sgblack@eecs.umich.edu            assert(pkt->getSize() == blkSize);
1946329Sgblack@eecs.umich.edu            // special handling for coherent block requests from
1956329Sgblack@eecs.umich.edu            // upper-level caches
1966329Sgblack@eecs.umich.edu            if (pkt->needsExclusive()) {
1976329Sgblack@eecs.umich.edu                // sanity check
1986329Sgblack@eecs.umich.edu                assert(pkt->cmd == MemCmd::ReadExReq ||
1996329Sgblack@eecs.umich.edu                       pkt->cmd == MemCmd::SCUpgradeFailReq);
2006329Sgblack@eecs.umich.edu
2016329Sgblack@eecs.umich.edu                // if we have a dirty copy, make sure the recipient
2026337Sgblack@eecs.umich.edu                // keeps it marked dirty
2036337Sgblack@eecs.umich.edu                if (blk->isDirty()) {
2046329Sgblack@eecs.umich.edu                    pkt->assertMemInhibit();
2056329Sgblack@eecs.umich.edu                }
2066329Sgblack@eecs.umich.edu                // on ReadExReq we give up our copy unconditionally,
2076329Sgblack@eecs.umich.edu                // even if this cache is mostly inclusive, we may want
2086329Sgblack@eecs.umich.edu                // to revisit this
2096329Sgblack@eecs.umich.edu                invalidateBlock(blk);
2106329Sgblack@eecs.umich.edu            } else if (blk->isWritable() && !pending_downgrade &&
2116329Sgblack@eecs.umich.edu                       !pkt->sharedAsserted() &&
2126329Sgblack@eecs.umich.edu                       pkt->cmd != MemCmd::ReadCleanReq) {
2136329Sgblack@eecs.umich.edu                // we can give the requester an exclusive copy (by not
2146329Sgblack@eecs.umich.edu                // asserting shared line) on a read request if:
2156329Sgblack@eecs.umich.edu                // - we have an exclusive copy at this level (& below)
2166329Sgblack@eecs.umich.edu                // - we don't have a pending snoop from below
2176329Sgblack@eecs.umich.edu                //   signaling another read request
2186329Sgblack@eecs.umich.edu                // - no other cache above has a copy (otherwise it
2196329Sgblack@eecs.umich.edu                //   would have asseretd shared line on request)
2207678Sgblack@eecs.umich.edu                // - we are not satisfying an instruction fetch (this
2217678Sgblack@eecs.umich.edu                //   prevents dirty data in the i-cache)
2227678Sgblack@eecs.umich.edu
2237678Sgblack@eecs.umich.edu                if (blk->isDirty()) {
2247678Sgblack@eecs.umich.edu                    // special considerations if we're owner:
2257678Sgblack@eecs.umich.edu                    if (!deferred_response) {
2267678Sgblack@eecs.umich.edu                        // if we are responding immediately and can
2277678Sgblack@eecs.umich.edu                        // signal that we're transferring ownership
2287678Sgblack@eecs.umich.edu                        // (inhibit set) along with exclusivity
2294826Ssaidi@eecs.umich.edu                        // (shared not set), do so
230                        pkt->assertMemInhibit();
231
232                        // if this cache is mostly inclusive, we keep
233                        // the block as writable (exclusive), and pass
234                        // it upwards as writable and dirty
235                        // (modified), hence we have multiple caches
236                        // considering the same block writable,
237                        // something that we get away with due to the
238                        // fact that: 1) this cache has been
239                        // considered the ordering points and
240                        // responded to all snoops up till now, and 2)
241                        // we always snoop upwards before consulting
242                        // the local cache, both on a normal request
243                        // (snooping done by the crossbar), and on a
244                        // snoop
245                        blk->status &= ~BlkDirty;
246
247                        // if this cache is mostly exclusive with
248                        // respect to the cache above, drop the block
249                        if (clusivity == Enums::mostly_excl) {
250                            invalidateBlock(blk);
251                        }
252                    } else {
253                        // if we're responding after our own miss,
254                        // there's a window where the recipient didn't
255                        // know it was getting ownership and may not
256                        // have responded to snoops correctly, so we
257                        // can't pass off ownership *or* exclusivity
258                        pkt->assertShared();
259                    }
260                }
261            } else {
262                // otherwise only respond with a shared copy
263                pkt->assertShared();
264            }
265        }
266    } else {
267        // Upgrade or Invalidate, since we have it Exclusively (E or
268        // M), we ack then invalidate.
269        assert(pkt->isUpgrade() || pkt->isInvalidate());
270
271        // for invalidations we could be looking at the temp block
272        // (for upgrades we always allocate)
273        invalidateBlock(blk);
274        DPRINTF(Cache, "%s for %s addr %#llx size %d (invalidation)\n",
275                __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize());
276    }
277}
278
279
280/////////////////////////////////////////////////////
281//
282// MSHR helper functions
283//
284/////////////////////////////////////////////////////
285
286
287void
288Cache::markInService(MSHR *mshr, bool pending_dirty_resp)
289{
290    markInServiceInternal(mshr, pending_dirty_resp);
291}
292
293/////////////////////////////////////////////////////
294//
295// Access path: requests coming in from the CPU side
296//
297/////////////////////////////////////////////////////
298
299bool
300Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
301              PacketList &writebacks)
302{
303    // sanity check
304    assert(pkt->isRequest());
305
306    chatty_assert(!(isReadOnly && pkt->isWrite()),
307                  "Should never see a write in a read-only cache %s\n",
308                  name());
309
310    DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__,
311            pkt->cmdString(), pkt->getAddr(), pkt->getSize());
312
313    if (pkt->req->isUncacheable()) {
314        DPRINTF(Cache, "%s%s addr %#llx uncacheable\n", pkt->cmdString(),
315                pkt->req->isInstFetch() ? " (ifetch)" : "",
316                pkt->getAddr());
317
318        // flush and invalidate any existing block
319        CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure()));
320        if (old_blk && old_blk->isValid()) {
321            if (old_blk->isDirty() || writebackClean)
322                writebacks.push_back(writebackBlk(old_blk));
323            else
324                writebacks.push_back(cleanEvictBlk(old_blk));
325            tags->invalidate(old_blk);
326            old_blk->invalidate();
327        }
328
329        blk = NULL;
330        // lookupLatency is the latency in case the request is uncacheable.
331        lat = lookupLatency;
332        return false;
333    }
334
335    ContextID id = pkt->req->hasContextId() ?
336        pkt->req->contextId() : InvalidContextID;
337    // Here lat is the value passed as parameter to accessBlock() function
338    // that can modify its value.
339    blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), lat, id);
340
341    DPRINTF(Cache, "%s%s addr %#llx size %d (%s) %s\n", pkt->cmdString(),
342            pkt->req->isInstFetch() ? " (ifetch)" : "",
343            pkt->getAddr(), pkt->getSize(), pkt->isSecure() ? "s" : "ns",
344            blk ? "hit " + blk->print() : "miss");
345
346
347    if (pkt->isEviction()) {
348        // We check for presence of block in above caches before issuing
349        // Writeback or CleanEvict to write buffer. Therefore the only
350        // possible cases can be of a CleanEvict packet coming from above
351        // encountering a Writeback generated in this cache peer cache and
352        // waiting in the write buffer. Cases of upper level peer caches
353        // generating CleanEvict and Writeback or simply CleanEvict and
354        // CleanEvict almost simultaneously will be caught by snoops sent out
355        // by crossbar.
356        std::vector<MSHR *> outgoing;
357        if (writeBuffer.findMatches(pkt->getAddr(), pkt->isSecure(),
358                                   outgoing)) {
359            assert(outgoing.size() == 1);
360            MSHR *wb_entry = outgoing[0];
361            assert(wb_entry->getNumTargets() == 1);
362            PacketPtr wbPkt = wb_entry->getTarget()->pkt;
363            assert(wbPkt->isWriteback());
364
365            if (pkt->isCleanEviction()) {
366                // The CleanEvict and WritebackClean snoops into other
367                // peer caches of the same level while traversing the
368                // crossbar. If a copy of the block is found, the
369                // packet is deleted in the crossbar. Hence, none of
370                // the other upper level caches connected to this
371                // cache have the block, so we can clear the
372                // BLOCK_CACHED flag in the Writeback if set and
373                // discard the CleanEvict by returning true.
374                wbPkt->clearBlockCached();
375                return true;
376            } else {
377                assert(pkt->cmd == MemCmd::WritebackDirty);
378                // Dirty writeback from above trumps our clean
379                // writeback... discard here
380                // Note: markInService will remove entry from writeback buffer.
381                markInService(wb_entry, false);
382                delete wbPkt;
383            }
384        }
385    }
386
387    // Writeback handling is special case.  We can write the block into
388    // the cache without having a writeable copy (or any copy at all).
389    if (pkt->isWriteback()) {
390        assert(blkSize == pkt->getSize());
391
392        // we could get a clean writeback while we are having
393        // outstanding accesses to a block, do the simple thing for
394        // now and drop the clean writeback so that we do not upset
395        // any ordering/decisions about ownership already taken
396        if (pkt->cmd == MemCmd::WritebackClean &&
397            mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) {
398            DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, "
399                    "dropping\n", pkt->getAddr());
400            return true;
401        }
402
403        if (blk == NULL) {
404            // need to do a replacement
405            blk = allocateBlock(pkt->getAddr(), pkt->isSecure(), writebacks);
406            if (blk == NULL) {
407                // no replaceable block available: give up, fwd to next level.
408                incMissCount(pkt);
409                return false;
410            }
411            tags->insertBlock(pkt, blk);
412
413            blk->status = (BlkValid | BlkReadable);
414            if (pkt->isSecure()) {
415                blk->status |= BlkSecure;
416            }
417        }
418        // only mark the block dirty if we got a writeback command,
419        // and leave it as is for a clean writeback
420        if (pkt->cmd == MemCmd::WritebackDirty) {
421            blk->status |= BlkDirty;
422        }
423        // if shared is not asserted we got the writeback in modified
424        // state, if it is asserted we are in the owned state
425        if (!pkt->sharedAsserted()) {
426            blk->status |= BlkWritable;
427        }
428        // nothing else to do; writeback doesn't expect response
429        assert(!pkt->needsResponse());
430        std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize);
431        DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
432        incHitCount(pkt);
433        return true;
434    } else if (pkt->cmd == MemCmd::CleanEvict) {
435        if (blk != NULL) {
436            // Found the block in the tags, need to stop CleanEvict from
437            // propagating further down the hierarchy. Returning true will
438            // treat the CleanEvict like a satisfied write request and delete
439            // it.
440            return true;
441        }
442        // We didn't find the block here, propagate the CleanEvict further
443        // down the memory hierarchy. Returning false will treat the CleanEvict
444        // like a Writeback which could not find a replaceable block so has to
445        // go to next level.
446        return false;
447    } else if ((blk != NULL) &&
448               (pkt->needsExclusive() ? blk->isWritable()
449                                      : blk->isReadable())) {
450        // OK to satisfy access
451        incHitCount(pkt);
452        satisfyCpuSideRequest(pkt, blk);
453        return true;
454    }
455
456    // Can't satisfy access normally... either no block (blk == NULL)
457    // or have block but need exclusive & only have shared.
458
459    incMissCount(pkt);
460
461    if (blk == NULL && pkt->isLLSC() && pkt->isWrite()) {
462        // complete miss on store conditional... just give up now
463        pkt->req->setExtraData(0);
464        return true;
465    }
466
467    return false;
468}
469
470void
471Cache::doWritebacks(PacketList& writebacks, Tick forward_time)
472{
473    while (!writebacks.empty()) {
474        PacketPtr wbPkt = writebacks.front();
475        // We use forwardLatency here because we are copying writebacks to
476        // write buffer.  Call isCachedAbove for both Writebacks and
477        // CleanEvicts. If isCachedAbove returns true we set BLOCK_CACHED flag
478        // in Writebacks and discard CleanEvicts.
479        if (isCachedAbove(wbPkt)) {
480            if (wbPkt->cmd == MemCmd::CleanEvict) {
481                // Delete CleanEvict because cached copies exist above. The
482                // packet destructor will delete the request object because
483                // this is a non-snoop request packet which does not require a
484                // response.
485                delete wbPkt;
486            } else if (wbPkt->cmd == MemCmd::WritebackClean) {
487                // clean writeback, do not send since the block is
488                // still cached above
489                assert(writebackClean);
490                delete wbPkt;
491            } else {
492                assert(wbPkt->cmd == MemCmd::WritebackDirty);
493                // Set BLOCK_CACHED flag in Writeback and send below, so that
494                // the Writeback does not reset the bit corresponding to this
495                // address in the snoop filter below.
496                wbPkt->setBlockCached();
497                allocateWriteBuffer(wbPkt, forward_time);
498            }
499        } else {
500            // If the block is not cached above, send packet below. Both
501            // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
502            // reset the bit corresponding to this address in the snoop filter
503            // below.
504            allocateWriteBuffer(wbPkt, forward_time);
505        }
506        writebacks.pop_front();
507    }
508}
509
510void
511Cache::doWritebacksAtomic(PacketList& writebacks)
512{
513    while (!writebacks.empty()) {
514        PacketPtr wbPkt = writebacks.front();
515        // Call isCachedAbove for both Writebacks and CleanEvicts. If
516        // isCachedAbove returns true we set BLOCK_CACHED flag in Writebacks
517        // and discard CleanEvicts.
518        if (isCachedAbove(wbPkt, false)) {
519            if (wbPkt->cmd == MemCmd::WritebackDirty) {
520                // Set BLOCK_CACHED flag in Writeback and send below,
521                // so that the Writeback does not reset the bit
522                // corresponding to this address in the snoop filter
523                // below. We can discard CleanEvicts because cached
524                // copies exist above. Atomic mode isCachedAbove
525                // modifies packet to set BLOCK_CACHED flag
526                memSidePort->sendAtomic(wbPkt);
527            }
528        } else {
529            // If the block is not cached above, send packet below. Both
530            // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
531            // reset the bit corresponding to this address in the snoop filter
532            // below.
533            memSidePort->sendAtomic(wbPkt);
534        }
535        writebacks.pop_front();
536        // In case of CleanEvicts, the packet destructor will delete the
537        // request object because this is a non-snoop request packet which
538        // does not require a response.
539        delete wbPkt;
540    }
541}
542
543
544void
545Cache::recvTimingSnoopResp(PacketPtr pkt)
546{
547    DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__,
548            pkt->cmdString(), pkt->getAddr(), pkt->getSize());
549
550    assert(pkt->isResponse());
551    assert(!system->bypassCaches());
552
553    // determine if the response is from a snoop request we created
554    // (in which case it should be in the outstandingSnoop), or if we
555    // merely forwarded someone else's snoop request
556    const bool forwardAsSnoop = outstandingSnoop.find(pkt->req) ==
557        outstandingSnoop.end();
558
559    if (!forwardAsSnoop) {
560        // the packet came from this cache, so sink it here and do not
561        // forward it
562        assert(pkt->cmd == MemCmd::HardPFResp);
563
564        outstandingSnoop.erase(pkt->req);
565
566        DPRINTF(Cache, "Got prefetch response from above for addr "
567                "%#llx (%s)\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
568        recvTimingResp(pkt);
569        return;
570    }
571
572    // forwardLatency is set here because there is a response from an
573    // upper level cache.
574    // To pay the delay that occurs if the packet comes from the bus,
575    // we charge also headerDelay.
576    Tick snoop_resp_time = clockEdge(forwardLatency) + pkt->headerDelay;
577    // Reset the timing of the packet.
578    pkt->headerDelay = pkt->payloadDelay = 0;
579    memSidePort->schedTimingSnoopResp(pkt, snoop_resp_time);
580}
581
582void
583Cache::promoteWholeLineWrites(PacketPtr pkt)
584{
585    // Cache line clearing instructions
586    if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) &&
587        (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0)) {
588        pkt->cmd = MemCmd::WriteLineReq;
589        DPRINTF(Cache, "packet promoted from Write to WriteLineReq\n");
590    }
591}
592
593bool
594Cache::recvTimingReq(PacketPtr pkt)
595{
596    DPRINTF(CacheTags, "%s tags: %s\n", __func__, tags->print());
597
598    assert(pkt->isRequest());
599
600    // Just forward the packet if caches are disabled.
601    if (system->bypassCaches()) {
602        // @todo This should really enqueue the packet rather
603        bool M5_VAR_USED success = memSidePort->sendTimingReq(pkt);
604        assert(success);
605        return true;
606    }
607
608    promoteWholeLineWrites(pkt);
609
610    if (pkt->memInhibitAsserted()) {
611        // a cache above us (but not where the packet came from) is
612        // responding to the request
613        DPRINTF(Cache, "mem inhibited on addr %#llx (%s): not responding\n",
614                pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
615
616        // if the packet needs exclusive, and the cache that has
617        // promised to respond (setting the inhibit flag) is not
618        // providing exclusive (it is in O vs M state), we know that
619        // there may be other shared copies in the system; go out and
620        // invalidate them all
621        if (pkt->needsExclusive() && !pkt->isSupplyExclusive()) {
622            // create a downstream express snoop with cleared packet
623            // flags, there is no need to allocate any data as the
624            // packet is merely used to co-ordinate state transitions
625            Packet *snoop_pkt = new Packet(pkt, true, false);
626
627            // also reset the bus time that the original packet has
628            // not yet paid for
629            snoop_pkt->headerDelay = snoop_pkt->payloadDelay = 0;
630
631            // make this an instantaneous express snoop, and let the
632            // other caches in the system know that the packet is
633            // inhibited, because we have found the authorative copy
634            // (O) that will supply the right data
635            snoop_pkt->setExpressSnoop();
636            snoop_pkt->assertMemInhibit();
637
638            // this express snoop travels towards the memory, and at
639            // every crossbar it is snooped upwards thus reaching
640            // every cache in the system
641            bool M5_VAR_USED success = memSidePort->sendTimingReq(snoop_pkt);
642            // express snoops always succeed
643            assert(success);
644
645            // main memory will delete the packet
646        }
647
648        // queue for deletion, as the sending cache is still relying
649        // on the packet
650        pendingDelete.reset(pkt);
651
652        // no need to take any action in this particular cache as the
653        // caches along the path to memory are allowed to keep lines
654        // in a shared state, and a cache above us already committed
655        // to responding
656        return true;
657    }
658
659    // anything that is merely forwarded pays for the forward latency and
660    // the delay provided by the crossbar
661    Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
662
663    // We use lookupLatency here because it is used to specify the latency
664    // to access.
665    Cycles lat = lookupLatency;
666    CacheBlk *blk = NULL;
667    bool satisfied = false;
668    {
669        PacketList writebacks;
670        // Note that lat is passed by reference here. The function
671        // access() calls accessBlock() which can modify lat value.
672        satisfied = access(pkt, blk, lat, writebacks);
673
674        // copy writebacks to write buffer here to ensure they logically
675        // proceed anything happening below
676        doWritebacks(writebacks, forward_time);
677    }
678
679    // Here we charge the headerDelay that takes into account the latencies
680    // of the bus, if the packet comes from it.
681    // The latency charged it is just lat that is the value of lookupLatency
682    // modified by access() function, or if not just lookupLatency.
683    // In case of a hit we are neglecting response latency.
684    // In case of a miss we are neglecting forward latency.
685    Tick request_time = clockEdge(lat) + pkt->headerDelay;
686    // Here we reset the timing of the packet.
687    pkt->headerDelay = pkt->payloadDelay = 0;
688
689    // track time of availability of next prefetch, if any
690    Tick next_pf_time = MaxTick;
691
692    bool needsResponse = pkt->needsResponse();
693
694    if (satisfied) {
695        // should never be satisfying an uncacheable access as we
696        // flush and invalidate any existing block as part of the
697        // lookup
698        assert(!pkt->req->isUncacheable());
699
700        // hit (for all other request types)
701
702        if (prefetcher && (prefetchOnAccess || (blk && blk->wasPrefetched()))) {
703            if (blk)
704                blk->status &= ~BlkHWPrefetched;
705
706            // Don't notify on SWPrefetch
707            if (!pkt->cmd.isSWPrefetch())
708                next_pf_time = prefetcher->notify(pkt);
709        }
710
711        if (needsResponse) {
712            pkt->makeTimingResponse();
713            // @todo: Make someone pay for this
714            pkt->headerDelay = pkt->payloadDelay = 0;
715
716            // In this case we are considering request_time that takes
717            // into account the delay of the xbar, if any, and just
718            // lat, neglecting responseLatency, modelling hit latency
719            // just as lookupLatency or or the value of lat overriden
720            // by access(), that calls accessBlock() function.
721            cpuSidePort->schedTimingResp(pkt, request_time, true);
722        } else {
723            DPRINTF(Cache, "%s satisfied %s addr %#llx, no response needed\n",
724                    __func__, pkt->cmdString(), pkt->getAddr(),
725                    pkt->getSize());
726
727            // queue the packet for deletion, as the sending cache is
728            // still relying on it; if the block is found in access(),
729            // CleanEvict and Writeback messages will be deleted
730            // here as well
731            pendingDelete.reset(pkt);
732        }
733    } else {
734        // miss
735
736        Addr blk_addr = blockAlign(pkt->getAddr());
737
738        // ignore any existing MSHR if we are dealing with an
739        // uncacheable request
740        MSHR *mshr = pkt->req->isUncacheable() ? nullptr :
741            mshrQueue.findMatch(blk_addr, pkt->isSecure());
742
743        // Software prefetch handling:
744        // To keep the core from waiting on data it won't look at
745        // anyway, send back a response with dummy data. Miss handling
746        // will continue asynchronously. Unfortunately, the core will
747        // insist upon freeing original Packet/Request, so we have to
748        // create a new pair with a different lifecycle. Note that this
749        // processing happens before any MSHR munging on the behalf of
750        // this request because this new Request will be the one stored
751        // into the MSHRs, not the original.
752        if (pkt->cmd.isSWPrefetch()) {
753            assert(needsResponse);
754            assert(pkt->req->hasPaddr());
755            assert(!pkt->req->isUncacheable());
756
757            // There's no reason to add a prefetch as an additional target
758            // to an existing MSHR. If an outstanding request is already
759            // in progress, there is nothing for the prefetch to do.
760            // If this is the case, we don't even create a request at all.
761            PacketPtr pf = nullptr;
762
763            if (!mshr) {
764                // copy the request and create a new SoftPFReq packet
765                RequestPtr req = new Request(pkt->req->getPaddr(),
766                                             pkt->req->getSize(),
767                                             pkt->req->getFlags(),
768                                             pkt->req->masterId());
769                pf = new Packet(req, pkt->cmd);
770                pf->allocate();
771                assert(pf->getAddr() == pkt->getAddr());
772                assert(pf->getSize() == pkt->getSize());
773            }
774
775            pkt->makeTimingResponse();
776            // for debugging, set all the bits in the response data
777            // (also keeps valgrind from complaining when debugging settings
778            //  print out instruction results)
779            std::memset(pkt->getPtr<uint8_t>(), 0xFF, pkt->getSize());
780            // request_time is used here, taking into account lat and the delay
781            // charged if the packet comes from the xbar.
782            cpuSidePort->schedTimingResp(pkt, request_time, true);
783
784            // If an outstanding request is in progress (we found an
785            // MSHR) this is set to null
786            pkt = pf;
787        }
788
789        if (mshr) {
790            /// MSHR hit
791            /// @note writebacks will be checked in getNextMSHR()
792            /// for any conflicting requests to the same block
793
794            //@todo remove hw_pf here
795
796            // Coalesce unless it was a software prefetch (see above).
797            if (pkt) {
798                assert(!pkt->isWriteback());
799                // CleanEvicts corresponding to blocks which have
800                // outstanding requests in MSHRs are simply sunk here
801                if (pkt->cmd == MemCmd::CleanEvict) {
802                    pendingDelete.reset(pkt);
803                } else {
804                    DPRINTF(Cache, "%s coalescing MSHR for %s addr %#llx size %d\n",
805                            __func__, pkt->cmdString(), pkt->getAddr(),
806                            pkt->getSize());
807
808                    assert(pkt->req->masterId() < system->maxMasters());
809                    mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
810                    // We use forward_time here because it is the same
811                    // considering new targets. We have multiple
812                    // requests for the same address here. It
813                    // specifies the latency to allocate an internal
814                    // buffer and to schedule an event to the queued
815                    // port and also takes into account the additional
816                    // delay of the xbar.
817                    mshr->allocateTarget(pkt, forward_time, order++,
818                                         allocOnFill(pkt->cmd));
819                    if (mshr->getNumTargets() == numTarget) {
820                        noTargetMSHR = mshr;
821                        setBlocked(Blocked_NoTargets);
822                        // need to be careful with this... if this mshr isn't
823                        // ready yet (i.e. time > curTick()), we don't want to
824                        // move it ahead of mshrs that are ready
825                        // mshrQueue.moveToFront(mshr);
826                    }
827                }
828                // We should call the prefetcher reguardless if the request is
829                // satisfied or not, reguardless if the request is in the MSHR or
830                // not.  The request could be a ReadReq hit, but still not
831                // satisfied (potentially because of a prior write to the same
832                // cache line.  So, even when not satisfied, tehre is an MSHR
833                // already allocated for this, we need to let the prefetcher know
834                // about the request
835                if (prefetcher) {
836                    // Don't notify on SWPrefetch
837                    if (!pkt->cmd.isSWPrefetch())
838                        next_pf_time = prefetcher->notify(pkt);
839                }
840            }
841        } else {
842            // no MSHR
843            assert(pkt->req->masterId() < system->maxMasters());
844            if (pkt->req->isUncacheable()) {
845                mshr_uncacheable[pkt->cmdToIndex()][pkt->req->masterId()]++;
846            } else {
847                mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
848            }
849
850            if (pkt->isEviction() ||
851                (pkt->req->isUncacheable() && pkt->isWrite())) {
852                // We use forward_time here because there is an
853                // uncached memory write, forwarded to WriteBuffer.
854                allocateWriteBuffer(pkt, forward_time);
855            } else {
856                if (blk && blk->isValid()) {
857                    // should have flushed and have no valid block
858                    assert(!pkt->req->isUncacheable());
859
860                    // If we have a write miss to a valid block, we
861                    // need to mark the block non-readable.  Otherwise
862                    // if we allow reads while there's an outstanding
863                    // write miss, the read could return stale data
864                    // out of the cache block... a more aggressive
865                    // system could detect the overlap (if any) and
866                    // forward data out of the MSHRs, but we don't do
867                    // that yet.  Note that we do need to leave the
868                    // block valid so that it stays in the cache, in
869                    // case we get an upgrade response (and hence no
870                    // new data) when the write miss completes.
871                    // As long as CPUs do proper store/load forwarding
872                    // internally, and have a sufficiently weak memory
873                    // model, this is probably unnecessary, but at some
874                    // point it must have seemed like we needed it...
875                    assert(pkt->needsExclusive());
876                    assert(!blk->isWritable());
877                    blk->status &= ~BlkReadable;
878                }
879                // Here we are using forward_time, modelling the latency of
880                // a miss (outbound) just as forwardLatency, neglecting the
881                // lookupLatency component.
882                allocateMissBuffer(pkt, forward_time);
883            }
884
885            if (prefetcher) {
886                // Don't notify on SWPrefetch
887                if (!pkt->cmd.isSWPrefetch())
888                    next_pf_time = prefetcher->notify(pkt);
889            }
890        }
891    }
892
893    if (next_pf_time != MaxTick)
894        schedMemSideSendEvent(next_pf_time);
895
896    return true;
897}
898
899
900// See comment in cache.hh.
901PacketPtr
902Cache::getBusPacket(PacketPtr cpu_pkt, CacheBlk *blk,
903                    bool needsExclusive) const
904{
905    bool blkValid = blk && blk->isValid();
906
907    if (cpu_pkt->req->isUncacheable()) {
908        // note that at the point we see the uncacheable request we
909        // flush any block, but there could be an outstanding MSHR,
910        // and the cache could have filled again before we actually
911        // send out the forwarded uncacheable request (blk could thus
912        // be non-null)
913        return NULL;
914    }
915
916    if (!blkValid &&
917        (cpu_pkt->isUpgrade() ||
918         cpu_pkt->isEviction())) {
919        // Writebacks that weren't allocated in access() and upgrades
920        // from upper-level caches that missed completely just go
921        // through.
922        return NULL;
923    }
924
925    assert(cpu_pkt->needsResponse());
926
927    MemCmd cmd;
928    // @TODO make useUpgrades a parameter.
929    // Note that ownership protocols require upgrade, otherwise a
930    // write miss on a shared owned block will generate a ReadExcl,
931    // which will clobber the owned copy.
932    const bool useUpgrades = true;
933    if (blkValid && useUpgrades) {
934        // only reason to be here is that blk is shared
935        // (read-only) and we need exclusive
936        assert(needsExclusive);
937        assert(!blk->isWritable());
938        cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
939    } else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq ||
940               cpu_pkt->cmd == MemCmd::StoreCondFailReq) {
941        // Even though this SC will fail, we still need to send out the
942        // request and get the data to supply it to other snoopers in the case
943        // where the determination the StoreCond fails is delayed due to
944        // all caches not being on the same local bus.
945        cmd = MemCmd::SCUpgradeFailReq;
946    } else if (cpu_pkt->cmd == MemCmd::WriteLineReq) {
947        // forward as invalidate to all other caches, this gives us
948        // the line in exclusive state, and invalidates all other
949        // copies
950        cmd = MemCmd::InvalidateReq;
951    } else {
952        // block is invalid
953        cmd = needsExclusive ? MemCmd::ReadExReq :
954            (isReadOnly ? MemCmd::ReadCleanReq : MemCmd::ReadSharedReq);
955    }
956    PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize);
957
958    // if there are sharers in the upper levels, pass that info downstream
959    if (cpu_pkt->sharedAsserted()) {
960        // note that cpu_pkt may have spent a considerable time in the
961        // MSHR queue and that the information could possibly be out
962        // of date, however, there is no harm in conservatively
963        // assuming the block is shared
964        pkt->assertShared();
965        DPRINTF(Cache, "%s passing shared from %s to %s addr %#llx size %d\n",
966                __func__, cpu_pkt->cmdString(), pkt->cmdString(),
967                pkt->getAddr(), pkt->getSize());
968    }
969
970    // the packet should be block aligned
971    assert(pkt->getAddr() == blockAlign(pkt->getAddr()));
972
973    pkt->allocate();
974    DPRINTF(Cache, "%s created %s from %s for  addr %#llx size %d\n",
975            __func__, pkt->cmdString(), cpu_pkt->cmdString(), pkt->getAddr(),
976            pkt->getSize());
977    return pkt;
978}
979
980
981Tick
982Cache::recvAtomic(PacketPtr pkt)
983{
984    // We are in atomic mode so we pay just for lookupLatency here.
985    Cycles lat = lookupLatency;
986    // @TODO: make this a parameter
987    bool last_level_cache = false;
988
989    // Forward the request if the system is in cache bypass mode.
990    if (system->bypassCaches())
991        return ticksToCycles(memSidePort->sendAtomic(pkt));
992
993    promoteWholeLineWrites(pkt);
994
995    if (pkt->memInhibitAsserted()) {
996        // have to invalidate ourselves and any lower caches even if
997        // upper cache will be responding
998        if (pkt->isInvalidate()) {
999            CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
1000            if (blk && blk->isValid()) {
1001                tags->invalidate(blk);
1002                blk->invalidate();
1003                DPRINTF(Cache, "rcvd mem-inhibited %s on %#llx (%s):"
1004                        " invalidating\n",
1005                        pkt->cmdString(), pkt->getAddr(),
1006                        pkt->isSecure() ? "s" : "ns");
1007            }
1008            if (!last_level_cache) {
1009                DPRINTF(Cache, "forwarding mem-inhibited %s on %#llx (%s)\n",
1010                        pkt->cmdString(), pkt->getAddr(),
1011                        pkt->isSecure() ? "s" : "ns");
1012                lat += ticksToCycles(memSidePort->sendAtomic(pkt));
1013            }
1014        } else {
1015            DPRINTF(Cache, "rcvd mem-inhibited %s on %#llx: not responding\n",
1016                    pkt->cmdString(), pkt->getAddr());
1017        }
1018
1019        return lat * clockPeriod();
1020    }
1021
1022    // should assert here that there are no outstanding MSHRs or
1023    // writebacks... that would mean that someone used an atomic
1024    // access in timing mode
1025
1026    CacheBlk *blk = NULL;
1027    PacketList writebacks;
1028    bool satisfied = access(pkt, blk, lat, writebacks);
1029
1030    // handle writebacks resulting from the access here to ensure they
1031    // logically proceed anything happening below
1032    doWritebacksAtomic(writebacks);
1033
1034    if (!satisfied) {
1035        // MISS
1036
1037        PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsExclusive());
1038
1039        bool is_forward = (bus_pkt == NULL);
1040
1041        if (is_forward) {
1042            // just forwarding the same request to the next level
1043            // no local cache operation involved
1044            bus_pkt = pkt;
1045        }
1046
1047        DPRINTF(Cache, "Sending an atomic %s for %#llx (%s)\n",
1048                bus_pkt->cmdString(), bus_pkt->getAddr(),
1049                bus_pkt->isSecure() ? "s" : "ns");
1050
1051#if TRACING_ON
1052        CacheBlk::State old_state = blk ? blk->status : 0;
1053#endif
1054
1055        lat += ticksToCycles(memSidePort->sendAtomic(bus_pkt));
1056
1057        // We are now dealing with the response handling
1058        DPRINTF(Cache, "Receive response: %s for addr %#llx (%s) in state %i\n",
1059                bus_pkt->cmdString(), bus_pkt->getAddr(),
1060                bus_pkt->isSecure() ? "s" : "ns",
1061                old_state);
1062
1063        // If packet was a forward, the response (if any) is already
1064        // in place in the bus_pkt == pkt structure, so we don't need
1065        // to do anything.  Otherwise, use the separate bus_pkt to
1066        // generate response to pkt and then delete it.
1067        if (!is_forward) {
1068            if (pkt->needsResponse()) {
1069                assert(bus_pkt->isResponse());
1070                if (bus_pkt->isError()) {
1071                    pkt->makeAtomicResponse();
1072                    pkt->copyError(bus_pkt);
1073                } else if (pkt->cmd == MemCmd::InvalidateReq) {
1074                    if (blk) {
1075                        // invalidate response to a cache that received
1076                        // an invalidate request
1077                        satisfyCpuSideRequest(pkt, blk);
1078                    }
1079                } else if (pkt->cmd == MemCmd::WriteLineReq) {
1080                    // note the use of pkt, not bus_pkt here.
1081
1082                    // write-line request to the cache that promoted
1083                    // the write to a whole line
1084                    blk = handleFill(pkt, blk, writebacks,
1085                                     allocOnFill(pkt->cmd));
1086                    satisfyCpuSideRequest(pkt, blk);
1087                } else if (bus_pkt->isRead() ||
1088                           bus_pkt->cmd == MemCmd::UpgradeResp) {
1089                    // we're updating cache state to allow us to
1090                    // satisfy the upstream request from the cache
1091                    blk = handleFill(bus_pkt, blk, writebacks,
1092                                     allocOnFill(pkt->cmd));
1093                    satisfyCpuSideRequest(pkt, blk);
1094                } else {
1095                    // we're satisfying the upstream request without
1096                    // modifying cache state, e.g., a write-through
1097                    pkt->makeAtomicResponse();
1098                }
1099            }
1100            delete bus_pkt;
1101        }
1102    }
1103
1104    // Note that we don't invoke the prefetcher at all in atomic mode.
1105    // It's not clear how to do it properly, particularly for
1106    // prefetchers that aggressively generate prefetch candidates and
1107    // rely on bandwidth contention to throttle them; these will tend
1108    // to pollute the cache in atomic mode since there is no bandwidth
1109    // contention.  If we ever do want to enable prefetching in atomic
1110    // mode, though, this is the place to do it... see timingAccess()
1111    // for an example (though we'd want to issue the prefetch(es)
1112    // immediately rather than calling requestMemSideBus() as we do
1113    // there).
1114
1115    // do any writebacks resulting from the response handling
1116    doWritebacksAtomic(writebacks);
1117
1118    // if we used temp block, check to see if its valid and if so
1119    // clear it out, but only do so after the call to recvAtomic is
1120    // finished so that any downstream observers (such as a snoop
1121    // filter), first see the fill, and only then see the eviction
1122    if (blk == tempBlock && tempBlock->isValid()) {
1123        // the atomic CPU calls recvAtomic for fetch and load/store
1124        // sequentuially, and we may already have a tempBlock
1125        // writeback from the fetch that we have not yet sent
1126        if (tempBlockWriteback) {
1127            // if that is the case, write the prevoius one back, and
1128            // do not schedule any new event
1129            writebackTempBlockAtomic();
1130        } else {
1131            // the writeback/clean eviction happens after the call to
1132            // recvAtomic has finished (but before any successive
1133            // calls), so that the response handling from the fill is
1134            // allowed to happen first
1135            schedule(writebackTempBlockAtomicEvent, curTick());
1136        }
1137
1138        tempBlockWriteback = (blk->isDirty() || writebackClean) ?
1139            writebackBlk(blk) : cleanEvictBlk(blk);
1140        blk->invalidate();
1141    }
1142
1143    if (pkt->needsResponse()) {
1144        pkt->makeAtomicResponse();
1145    }
1146
1147    return lat * clockPeriod();
1148}
1149
1150
1151void
1152Cache::functionalAccess(PacketPtr pkt, bool fromCpuSide)
1153{
1154    if (system->bypassCaches()) {
1155        // Packets from the memory side are snoop request and
1156        // shouldn't happen in bypass mode.
1157        assert(fromCpuSide);
1158
1159        // The cache should be flushed if we are in cache bypass mode,
1160        // so we don't need to check if we need to update anything.
1161        memSidePort->sendFunctional(pkt);
1162        return;
1163    }
1164
1165    Addr blk_addr = blockAlign(pkt->getAddr());
1166    bool is_secure = pkt->isSecure();
1167    CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
1168    MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
1169
1170    pkt->pushLabel(name());
1171
1172    CacheBlkPrintWrapper cbpw(blk);
1173
1174    // Note that just because an L2/L3 has valid data doesn't mean an
1175    // L1 doesn't have a more up-to-date modified copy that still
1176    // needs to be found.  As a result we always update the request if
1177    // we have it, but only declare it satisfied if we are the owner.
1178
1179    // see if we have data at all (owned or otherwise)
1180    bool have_data = blk && blk->isValid()
1181        && pkt->checkFunctional(&cbpw, blk_addr, is_secure, blkSize,
1182                                blk->data);
1183
1184    // data we have is dirty if marked as such or if valid & ownership
1185    // pending due to outstanding UpgradeReq
1186    bool have_dirty =
1187        have_data && (blk->isDirty() ||
1188                      (mshr && mshr->inService && mshr->isPendingDirty()));
1189
1190    bool done = have_dirty
1191        || cpuSidePort->checkFunctional(pkt)
1192        || mshrQueue.checkFunctional(pkt, blk_addr)
1193        || writeBuffer.checkFunctional(pkt, blk_addr)
1194        || memSidePort->checkFunctional(pkt);
1195
1196    DPRINTF(Cache, "functional %s %#llx (%s) %s%s%s\n",
1197            pkt->cmdString(), pkt->getAddr(), is_secure ? "s" : "ns",
1198            (blk && blk->isValid()) ? "valid " : "",
1199            have_data ? "data " : "", done ? "done " : "");
1200
1201    // We're leaving the cache, so pop cache->name() label
1202    pkt->popLabel();
1203
1204    if (done) {
1205        pkt->makeResponse();
1206    } else {
1207        // if it came as a request from the CPU side then make sure it
1208        // continues towards the memory side
1209        if (fromCpuSide) {
1210            memSidePort->sendFunctional(pkt);
1211        } else if (forwardSnoops && cpuSidePort->isSnooping()) {
1212            // if it came from the memory side, it must be a snoop request
1213            // and we should only forward it if we are forwarding snoops
1214            cpuSidePort->sendFunctionalSnoop(pkt);
1215        }
1216    }
1217}
1218
1219
1220/////////////////////////////////////////////////////
1221//
1222// Response handling: responses from the memory side
1223//
1224/////////////////////////////////////////////////////
1225
1226
1227void
1228Cache::recvTimingResp(PacketPtr pkt)
1229{
1230    assert(pkt->isResponse());
1231
1232    // all header delay should be paid for by the crossbar, unless
1233    // this is a prefetch response from above
1234    panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp,
1235             "%s saw a non-zero packet delay\n", name());
1236
1237    MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
1238    bool is_error = pkt->isError();
1239
1240    assert(mshr);
1241
1242    if (is_error) {
1243        DPRINTF(Cache, "Cache received packet with error for addr %#llx (%s), "
1244                "cmd: %s\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns",
1245                pkt->cmdString());
1246    }
1247
1248    DPRINTF(Cache, "Handling response %s for addr %#llx size %d (%s)\n",
1249            pkt->cmdString(), pkt->getAddr(), pkt->getSize(),
1250            pkt->isSecure() ? "s" : "ns");
1251
1252    MSHRQueue *mq = mshr->queue;
1253    bool wasFull = mq->isFull();
1254
1255    if (mshr == noTargetMSHR) {
1256        // we always clear at least one target
1257        clearBlocked(Blocked_NoTargets);
1258        noTargetMSHR = NULL;
1259    }
1260
1261    // Initial target is used just for stats
1262    MSHR::Target *initial_tgt = mshr->getTarget();
1263    int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
1264    Tick miss_latency = curTick() - initial_tgt->recvTime;
1265    PacketList writebacks;
1266    // We need forward_time here because we have a call of
1267    // allocateWriteBuffer() that need this parameter to specify the
1268    // time to request the bus.  In this case we use forward latency
1269    // because there is a writeback.  We pay also here for headerDelay
1270    // that is charged of bus latencies if the packet comes from the
1271    // bus.
1272    Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
1273
1274    if (pkt->req->isUncacheable()) {
1275        assert(pkt->req->masterId() < system->maxMasters());
1276        mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] +=
1277            miss_latency;
1278    } else {
1279        assert(pkt->req->masterId() < system->maxMasters());
1280        mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] +=
1281            miss_latency;
1282    }
1283
1284    // upgrade deferred targets if we got exclusive
1285    if (!pkt->sharedAsserted()) {
1286        mshr->promoteExclusive();
1287    }
1288
1289    bool is_fill = !mshr->isForward &&
1290        (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp);
1291
1292    CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
1293
1294    if (is_fill && !is_error) {
1295        DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n",
1296                pkt->getAddr());
1297
1298        blk = handleFill(pkt, blk, writebacks, mshr->allocOnFill);
1299        assert(blk != NULL);
1300    }
1301
1302    // allow invalidation responses originating from write-line
1303    // requests to be discarded
1304    bool is_invalidate = pkt->isInvalidate();
1305
1306    // First offset for critical word first calculations
1307    int initial_offset = initial_tgt->pkt->getOffset(blkSize);
1308
1309    while (mshr->hasTargets()) {
1310        MSHR::Target *target = mshr->getTarget();
1311        Packet *tgt_pkt = target->pkt;
1312
1313        switch (target->source) {
1314          case MSHR::Target::FromCPU:
1315            Tick completion_time;
1316            // Here we charge on completion_time the delay of the xbar if the
1317            // packet comes from it, charged on headerDelay.
1318            completion_time = pkt->headerDelay;
1319
1320            // Software prefetch handling for cache closest to core
1321            if (tgt_pkt->cmd.isSWPrefetch()) {
1322                // a software prefetch would have already been ack'd immediately
1323                // with dummy data so the core would be able to retire it.
1324                // this request completes right here, so we deallocate it.
1325                delete tgt_pkt->req;
1326                delete tgt_pkt;
1327                break; // skip response
1328            }
1329
1330            // unlike the other packet flows, where data is found in other
1331            // caches or memory and brought back, write-line requests always
1332            // have the data right away, so the above check for "is fill?"
1333            // cannot actually be determined until examining the stored MSHR
1334            // state. We "catch up" with that logic here, which is duplicated
1335            // from above.
1336            if (tgt_pkt->cmd == MemCmd::WriteLineReq) {
1337                assert(!is_error);
1338                // we got the block in exclusive state, so promote any
1339                // deferred targets if possible
1340                mshr->promoteExclusive();
1341                // NB: we use the original packet here and not the response!
1342                blk = handleFill(tgt_pkt, blk, writebacks, mshr->allocOnFill);
1343                assert(blk != NULL);
1344
1345                // treat as a fill, and discard the invalidation
1346                // response
1347                is_fill = true;
1348                is_invalidate = false;
1349            }
1350
1351            if (is_fill) {
1352                satisfyCpuSideRequest(tgt_pkt, blk,
1353                                      true, mshr->hasPostDowngrade());
1354
1355                // How many bytes past the first request is this one
1356                int transfer_offset =
1357                    tgt_pkt->getOffset(blkSize) - initial_offset;
1358                if (transfer_offset < 0) {
1359                    transfer_offset += blkSize;
1360                }
1361
1362                // If not critical word (offset) return payloadDelay.
1363                // responseLatency is the latency of the return path
1364                // from lower level caches/memory to an upper level cache or
1365                // the core.
1366                completion_time += clockEdge(responseLatency) +
1367                    (transfer_offset ? pkt->payloadDelay : 0);
1368
1369                assert(!tgt_pkt->req->isUncacheable());
1370
1371                assert(tgt_pkt->req->masterId() < system->maxMasters());
1372                missLatency[tgt_pkt->cmdToIndex()][tgt_pkt->req->masterId()] +=
1373                    completion_time - target->recvTime;
1374            } else if (pkt->cmd == MemCmd::UpgradeFailResp) {
1375                // failed StoreCond upgrade
1376                assert(tgt_pkt->cmd == MemCmd::StoreCondReq ||
1377                       tgt_pkt->cmd == MemCmd::StoreCondFailReq ||
1378                       tgt_pkt->cmd == MemCmd::SCUpgradeFailReq);
1379                // responseLatency is the latency of the return path
1380                // from lower level caches/memory to an upper level cache or
1381                // the core.
1382                completion_time += clockEdge(responseLatency) +
1383                    pkt->payloadDelay;
1384                tgt_pkt->req->setExtraData(0);
1385            } else {
1386                // not a cache fill, just forwarding response
1387                // responseLatency is the latency of the return path
1388                // from lower level cahces/memory to the core.
1389                completion_time += clockEdge(responseLatency) +
1390                    pkt->payloadDelay;
1391                if (pkt->isRead() && !is_error) {
1392                    // sanity check
1393                    assert(pkt->getAddr() == tgt_pkt->getAddr());
1394                    assert(pkt->getSize() >= tgt_pkt->getSize());
1395
1396                    tgt_pkt->setData(pkt->getConstPtr<uint8_t>());
1397                }
1398            }
1399            tgt_pkt->makeTimingResponse();
1400            // if this packet is an error copy that to the new packet
1401            if (is_error)
1402                tgt_pkt->copyError(pkt);
1403            if (tgt_pkt->cmd == MemCmd::ReadResp &&
1404                (is_invalidate || mshr->hasPostInvalidate())) {
1405                // If intermediate cache got ReadRespWithInvalidate,
1406                // propagate that.  Response should not have
1407                // isInvalidate() set otherwise.
1408                tgt_pkt->cmd = MemCmd::ReadRespWithInvalidate;
1409                DPRINTF(Cache, "%s updated cmd to %s for addr %#llx\n",
1410                        __func__, tgt_pkt->cmdString(), tgt_pkt->getAddr());
1411            }
1412            // Reset the bus additional time as it is now accounted for
1413            tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0;
1414            cpuSidePort->schedTimingResp(tgt_pkt, completion_time, true);
1415            break;
1416
1417          case MSHR::Target::FromPrefetcher:
1418            assert(tgt_pkt->cmd == MemCmd::HardPFReq);
1419            if (blk)
1420                blk->status |= BlkHWPrefetched;
1421            delete tgt_pkt->req;
1422            delete tgt_pkt;
1423            break;
1424
1425          case MSHR::Target::FromSnoop:
1426            // I don't believe that a snoop can be in an error state
1427            assert(!is_error);
1428            // response to snoop request
1429            DPRINTF(Cache, "processing deferred snoop...\n");
1430            assert(!(is_invalidate && !mshr->hasPostInvalidate()));
1431            handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate());
1432            break;
1433
1434          default:
1435            panic("Illegal target->source enum %d\n", target->source);
1436        }
1437
1438        mshr->popTarget();
1439    }
1440
1441    if (blk && blk->isValid()) {
1442        // an invalidate response stemming from a write line request
1443        // should not invalidate the block, so check if the
1444        // invalidation should be discarded
1445        if (is_invalidate || mshr->hasPostInvalidate()) {
1446            invalidateBlock(blk);
1447        } else if (mshr->hasPostDowngrade()) {
1448            blk->status &= ~BlkWritable;
1449        }
1450    }
1451
1452    if (mshr->promoteDeferredTargets()) {
1453        // avoid later read getting stale data while write miss is
1454        // outstanding.. see comment in timingAccess()
1455        if (blk) {
1456            blk->status &= ~BlkReadable;
1457        }
1458        mq = mshr->queue;
1459        mq->markPending(mshr);
1460        schedMemSideSendEvent(clockEdge() + pkt->payloadDelay);
1461    } else {
1462        mq->deallocate(mshr);
1463        if (wasFull && !mq->isFull()) {
1464            clearBlocked((BlockedCause)mq->index);
1465        }
1466
1467        // Request the bus for a prefetch if this deallocation freed enough
1468        // MSHRs for a prefetch to take place
1469        if (prefetcher && mq == &mshrQueue && mshrQueue.canPrefetch()) {
1470            Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(),
1471                                         clockEdge());
1472            if (next_pf_time != MaxTick)
1473                schedMemSideSendEvent(next_pf_time);
1474        }
1475    }
1476    // reset the xbar additional timinig  as it is now accounted for
1477    pkt->headerDelay = pkt->payloadDelay = 0;
1478
1479    // copy writebacks to write buffer
1480    doWritebacks(writebacks, forward_time);
1481
1482    // if we used temp block, check to see if its valid and then clear it out
1483    if (blk == tempBlock && tempBlock->isValid()) {
1484        // We use forwardLatency here because we are copying
1485        // Writebacks/CleanEvicts to write buffer. It specifies the latency to
1486        // allocate an internal buffer and to schedule an event to the
1487        // queued port.
1488        if (blk->isDirty() || writebackClean) {
1489            PacketPtr wbPkt = writebackBlk(blk);
1490            allocateWriteBuffer(wbPkt, forward_time);
1491            // Set BLOCK_CACHED flag if cached above.
1492            if (isCachedAbove(wbPkt))
1493                wbPkt->setBlockCached();
1494        } else {
1495            PacketPtr wcPkt = cleanEvictBlk(blk);
1496            // Check to see if block is cached above. If not allocate
1497            // write buffer
1498            if (isCachedAbove(wcPkt))
1499                delete wcPkt;
1500            else
1501                allocateWriteBuffer(wcPkt, forward_time);
1502        }
1503        blk->invalidate();
1504    }
1505
1506    DPRINTF(Cache, "Leaving %s with %s for addr %#llx\n", __func__,
1507            pkt->cmdString(), pkt->getAddr());
1508    delete pkt;
1509}
1510
1511PacketPtr
1512Cache::writebackBlk(CacheBlk *blk)
1513{
1514    chatty_assert(!isReadOnly || writebackClean,
1515                  "Writeback from read-only cache");
1516    assert(blk && blk->isValid() && (blk->isDirty() || writebackClean));
1517
1518    writebacks[Request::wbMasterId]++;
1519
1520    Request *req = new Request(tags->regenerateBlkAddr(blk->tag, blk->set),
1521                               blkSize, 0, Request::wbMasterId);
1522    if (blk->isSecure())
1523        req->setFlags(Request::SECURE);
1524
1525    req->taskId(blk->task_id);
1526    blk->task_id= ContextSwitchTaskId::Unknown;
1527    blk->tickInserted = curTick();
1528
1529    PacketPtr pkt =
1530        new Packet(req, blk->isDirty() ?
1531                   MemCmd::WritebackDirty : MemCmd::WritebackClean);
1532
1533    DPRINTF(Cache, "Create Writeback %#llx writable: %d, dirty: %d\n",
1534            pkt->getAddr(), blk->isWritable(), blk->isDirty());
1535
1536    if (blk->isWritable()) {
1537        // not asserting shared means we pass the block in modified
1538        // state, mark our own block non-writeable
1539        blk->status &= ~BlkWritable;
1540    } else {
1541        // we are in the owned state, tell the receiver
1542        pkt->assertShared();
1543    }
1544
1545    // make sure the block is not marked dirty
1546    blk->status &= ~BlkDirty;
1547
1548    pkt->allocate();
1549    std::memcpy(pkt->getPtr<uint8_t>(), blk->data, blkSize);
1550
1551    return pkt;
1552}
1553
1554PacketPtr
1555Cache::cleanEvictBlk(CacheBlk *blk)
1556{
1557    assert(!writebackClean);
1558    assert(blk && blk->isValid() && !blk->isDirty());
1559    // Creating a zero sized write, a message to the snoop filter
1560    Request *req =
1561        new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0,
1562                    Request::wbMasterId);
1563    if (blk->isSecure())
1564        req->setFlags(Request::SECURE);
1565
1566    req->taskId(blk->task_id);
1567    blk->task_id = ContextSwitchTaskId::Unknown;
1568    blk->tickInserted = curTick();
1569
1570    PacketPtr pkt = new Packet(req, MemCmd::CleanEvict);
1571    pkt->allocate();
1572    DPRINTF(Cache, "%s%s %x Create CleanEvict\n", pkt->cmdString(),
1573            pkt->req->isInstFetch() ? " (ifetch)" : "",
1574            pkt->getAddr());
1575
1576    return pkt;
1577}
1578
1579void
1580Cache::memWriteback()
1581{
1582    CacheBlkVisitorWrapper visitor(*this, &Cache::writebackVisitor);
1583    tags->forEachBlk(visitor);
1584}
1585
1586void
1587Cache::memInvalidate()
1588{
1589    CacheBlkVisitorWrapper visitor(*this, &Cache::invalidateVisitor);
1590    tags->forEachBlk(visitor);
1591}
1592
1593bool
1594Cache::isDirty() const
1595{
1596    CacheBlkIsDirtyVisitor visitor;
1597    tags->forEachBlk(visitor);
1598
1599    return visitor.isDirty();
1600}
1601
1602bool
1603Cache::writebackVisitor(CacheBlk &blk)
1604{
1605    if (blk.isDirty()) {
1606        assert(blk.isValid());
1607
1608        Request request(tags->regenerateBlkAddr(blk.tag, blk.set),
1609                        blkSize, 0, Request::funcMasterId);
1610        request.taskId(blk.task_id);
1611
1612        Packet packet(&request, MemCmd::WriteReq);
1613        packet.dataStatic(blk.data);
1614
1615        memSidePort->sendFunctional(&packet);
1616
1617        blk.status &= ~BlkDirty;
1618    }
1619
1620    return true;
1621}
1622
1623bool
1624Cache::invalidateVisitor(CacheBlk &blk)
1625{
1626
1627    if (blk.isDirty())
1628        warn_once("Invalidating dirty cache lines. Expect things to break.\n");
1629
1630    if (blk.isValid()) {
1631        assert(!blk.isDirty());
1632        tags->invalidate(&blk);
1633        blk.invalidate();
1634    }
1635
1636    return true;
1637}
1638
1639CacheBlk*
1640Cache::allocateBlock(Addr addr, bool is_secure, PacketList &writebacks)
1641{
1642    CacheBlk *blk = tags->findVictim(addr);
1643
1644    // It is valid to return NULL if there is no victim
1645    if (!blk)
1646        return nullptr;
1647
1648    if (blk->isValid()) {
1649        Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set);
1650        MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure());
1651        if (repl_mshr) {
1652            // must be an outstanding upgrade request
1653            // on a block we're about to replace...
1654            assert(!blk->isWritable() || blk->isDirty());
1655            assert(repl_mshr->needsExclusive());
1656            // too hard to replace block with transient state
1657            // allocation failed, block not inserted
1658            return NULL;
1659        } else {
1660            DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx (%s): %s\n",
1661                    repl_addr, blk->isSecure() ? "s" : "ns",
1662                    addr, is_secure ? "s" : "ns",
1663                    blk->isDirty() ? "writeback" : "clean");
1664
1665            // Will send up Writeback/CleanEvict snoops via isCachedAbove
1666            // when pushing this writeback list into the write buffer.
1667            if (blk->isDirty() || writebackClean) {
1668                // Save writeback packet for handling by caller
1669                writebacks.push_back(writebackBlk(blk));
1670            } else {
1671                writebacks.push_back(cleanEvictBlk(blk));
1672            }
1673        }
1674    }
1675
1676    return blk;
1677}
1678
1679void
1680Cache::invalidateBlock(CacheBlk *blk)
1681{
1682    if (blk != tempBlock)
1683        tags->invalidate(blk);
1684    blk->invalidate();
1685}
1686
1687// Note that the reason we return a list of writebacks rather than
1688// inserting them directly in the write buffer is that this function
1689// is called by both atomic and timing-mode accesses, and in atomic
1690// mode we don't mess with the write buffer (we just perform the
1691// writebacks atomically once the original request is complete).
1692CacheBlk*
1693Cache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks,
1694                  bool allocate)
1695{
1696    assert(pkt->isResponse() || pkt->cmd == MemCmd::WriteLineReq);
1697    Addr addr = pkt->getAddr();
1698    bool is_secure = pkt->isSecure();
1699#if TRACING_ON
1700    CacheBlk::State old_state = blk ? blk->status : 0;
1701#endif
1702
1703    // When handling a fill, discard any CleanEvicts for the
1704    // same address in write buffer.
1705    Addr M5_VAR_USED blk_addr = blockAlign(pkt->getAddr());
1706    std::vector<MSHR *> M5_VAR_USED wbs;
1707    assert (!writeBuffer.findMatches(blk_addr, is_secure, wbs));
1708
1709    if (blk == NULL) {
1710        // better have read new data...
1711        assert(pkt->hasData());
1712
1713        // only read responses and write-line requests have data;
1714        // note that we don't write the data here for write-line - that
1715        // happens in the subsequent satisfyCpuSideRequest.
1716        assert(pkt->isRead() || pkt->cmd == MemCmd::WriteLineReq);
1717
1718        // need to do a replacement if allocating, otherwise we stick
1719        // with the temporary storage
1720        blk = allocate ? allocateBlock(addr, is_secure, writebacks) : NULL;
1721
1722        if (blk == NULL) {
1723            // No replaceable block or a mostly exclusive
1724            // cache... just use temporary storage to complete the
1725            // current request and then get rid of it
1726            assert(!tempBlock->isValid());
1727            blk = tempBlock;
1728            tempBlock->set = tags->extractSet(addr);
1729            tempBlock->tag = tags->extractTag(addr);
1730            // @todo: set security state as well...
1731            DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr,
1732                    is_secure ? "s" : "ns");
1733        } else {
1734            tags->insertBlock(pkt, blk);
1735        }
1736
1737        // we should never be overwriting a valid block
1738        assert(!blk->isValid());
1739    } else {
1740        // existing block... probably an upgrade
1741        assert(blk->tag == tags->extractTag(addr));
1742        // either we're getting new data or the block should already be valid
1743        assert(pkt->hasData() || blk->isValid());
1744        // don't clear block status... if block is already dirty we
1745        // don't want to lose that
1746    }
1747
1748    if (is_secure)
1749        blk->status |= BlkSecure;
1750    blk->status |= BlkValid | BlkReadable;
1751
1752    // sanity check for whole-line writes, which should always be
1753    // marked as writable as part of the fill, and then later marked
1754    // dirty as part of satisfyCpuSideRequest
1755    if (pkt->cmd == MemCmd::WriteLineReq) {
1756        assert(!pkt->sharedAsserted());
1757        // at the moment other caches do not respond to the
1758        // invalidation requests corresponding to a whole-line write
1759        assert(!pkt->memInhibitAsserted());
1760    }
1761
1762    if (!pkt->sharedAsserted()) {
1763        // we could get non-shared responses from memory (rather than
1764        // a cache) even in a read-only cache, note that we set this
1765        // bit even for a read-only cache as we use it to represent
1766        // the exclusive state
1767        blk->status |= BlkWritable;
1768
1769        // If we got this via cache-to-cache transfer (i.e., from a
1770        // cache that was an owner) and took away that owner's copy,
1771        // then we need to write it back.  Normally this happens
1772        // anyway as a side effect of getting a copy to write it, but
1773        // there are cases (such as failed store conditionals or
1774        // compare-and-swaps) where we'll demand an exclusive copy but
1775        // end up not writing it.
1776        if (pkt->memInhibitAsserted()) {
1777            blk->status |= BlkDirty;
1778
1779            chatty_assert(!isReadOnly, "Should never see dirty snoop response "
1780                          "in read-only cache %s\n", name());
1781        }
1782    }
1783
1784    DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n",
1785            addr, is_secure ? "s" : "ns", old_state, blk->print());
1786
1787    // if we got new data, copy it in (checking for a read response
1788    // and a response that has data is the same in the end)
1789    if (pkt->isRead()) {
1790        // sanity checks
1791        assert(pkt->hasData());
1792        assert(pkt->getSize() == blkSize);
1793
1794        std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize);
1795    }
1796    // We pay for fillLatency here.
1797    blk->whenReady = clockEdge() + fillLatency * clockPeriod() +
1798        pkt->payloadDelay;
1799
1800    return blk;
1801}
1802
1803
1804/////////////////////////////////////////////////////
1805//
1806// Snoop path: requests coming in from the memory side
1807//
1808/////////////////////////////////////////////////////
1809
1810void
1811Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
1812                              bool already_copied, bool pending_inval)
1813{
1814    // sanity check
1815    assert(req_pkt->isRequest());
1816    assert(req_pkt->needsResponse());
1817
1818    DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__,
1819            req_pkt->cmdString(), req_pkt->getAddr(), req_pkt->getSize());
1820    // timing-mode snoop responses require a new packet, unless we
1821    // already made a copy...
1822    PacketPtr pkt = req_pkt;
1823    if (!already_copied)
1824        // do not clear flags, and allocate space for data if the
1825        // packet needs it (the only packets that carry data are read
1826        // responses)
1827        pkt = new Packet(req_pkt, false, req_pkt->isRead());
1828
1829    assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() ||
1830           pkt->sharedAsserted());
1831    pkt->makeTimingResponse();
1832    if (pkt->isRead()) {
1833        pkt->setDataFromBlock(blk_data, blkSize);
1834    }
1835    if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
1836        // Assume we defer a response to a read from a far-away cache
1837        // A, then later defer a ReadExcl from a cache B on the same
1838        // bus as us.  We'll assert MemInhibit in both cases, but in
1839        // the latter case MemInhibit will keep the invalidation from
1840        // reaching cache A.  This special response tells cache A that
1841        // it gets the block to satisfy its read, but must immediately
1842        // invalidate it.
1843        pkt->cmd = MemCmd::ReadRespWithInvalidate;
1844    }
1845    // Here we consider forward_time, paying for just forward latency and
1846    // also charging the delay provided by the xbar.
1847    // forward_time is used as send_time in next allocateWriteBuffer().
1848    Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
1849    // Here we reset the timing of the packet.
1850    pkt->headerDelay = pkt->payloadDelay = 0;
1851    DPRINTF(Cache, "%s created response: %s addr %#llx size %d tick: %lu\n",
1852            __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize(),
1853            forward_time);
1854    memSidePort->schedTimingSnoopResp(pkt, forward_time, true);
1855}
1856
1857uint32_t
1858Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
1859                   bool is_deferred, bool pending_inval)
1860{
1861    DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__,
1862            pkt->cmdString(), pkt->getAddr(), pkt->getSize());
1863    // deferred snoops can only happen in timing mode
1864    assert(!(is_deferred && !is_timing));
1865    // pending_inval only makes sense on deferred snoops
1866    assert(!(pending_inval && !is_deferred));
1867    assert(pkt->isRequest());
1868
1869    // the packet may get modified if we or a forwarded snooper
1870    // responds in atomic mode, so remember a few things about the
1871    // original packet up front
1872    bool invalidate = pkt->isInvalidate();
1873    bool M5_VAR_USED needs_exclusive = pkt->needsExclusive();
1874
1875    uint32_t snoop_delay = 0;
1876
1877    if (forwardSnoops) {
1878        // first propagate snoop upward to see if anyone above us wants to
1879        // handle it.  save & restore packet src since it will get
1880        // rewritten to be relative to cpu-side bus (if any)
1881        bool alreadyResponded = pkt->memInhibitAsserted();
1882        if (is_timing) {
1883            // copy the packet so that we can clear any flags before
1884            // forwarding it upwards, we also allocate data (passing
1885            // the pointer along in case of static data), in case
1886            // there is a snoop hit in upper levels
1887            Packet snoopPkt(pkt, true, true);
1888            snoopPkt.setExpressSnoop();
1889            // the snoop packet does not need to wait any additional
1890            // time
1891            snoopPkt.headerDelay = snoopPkt.payloadDelay = 0;
1892            cpuSidePort->sendTimingSnoopReq(&snoopPkt);
1893
1894            // add the header delay (including crossbar and snoop
1895            // delays) of the upward snoop to the snoop delay for this
1896            // cache
1897            snoop_delay += snoopPkt.headerDelay;
1898
1899            if (snoopPkt.memInhibitAsserted()) {
1900                // cache-to-cache response from some upper cache
1901                assert(!alreadyResponded);
1902                pkt->assertMemInhibit();
1903            }
1904            if (snoopPkt.sharedAsserted()) {
1905                pkt->assertShared();
1906            }
1907            // If this request is a prefetch or clean evict and an upper level
1908            // signals block present, make sure to propagate the block
1909            // presence to the requester.
1910            if (snoopPkt.isBlockCached()) {
1911                pkt->setBlockCached();
1912            }
1913        } else {
1914            cpuSidePort->sendAtomicSnoop(pkt);
1915            if (!alreadyResponded && pkt->memInhibitAsserted()) {
1916                // cache-to-cache response from some upper cache:
1917                // forward response to original requester
1918                assert(pkt->isResponse());
1919            }
1920        }
1921    }
1922
1923    if (!blk || !blk->isValid()) {
1924        DPRINTF(Cache, "%s snoop miss for %s addr %#llx size %d\n",
1925                __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize());
1926        return snoop_delay;
1927    } else {
1928       DPRINTF(Cache, "%s snoop hit for %s for addr %#llx size %d, "
1929               "old state is %s\n", __func__, pkt->cmdString(),
1930               pkt->getAddr(), pkt->getSize(), blk->print());
1931    }
1932
1933    chatty_assert(!(isReadOnly && blk->isDirty()),
1934                  "Should never have a dirty block in a read-only cache %s\n",
1935                  name());
1936
1937    // We may end up modifying both the block state and the packet (if
1938    // we respond in atomic mode), so just figure out what to do now
1939    // and then do it later. If we find dirty data while snooping for
1940    // an invalidate, we don't need to send a response. The
1941    // invalidation itself is taken care of below.
1942    bool respond = blk->isDirty() && pkt->needsResponse() &&
1943        pkt->cmd != MemCmd::InvalidateReq;
1944    bool have_exclusive = blk->isWritable();
1945
1946    // Invalidate any prefetch's from below that would strip write permissions
1947    // MemCmd::HardPFReq is only observed by upstream caches.  After missing
1948    // above and in it's own cache, a new MemCmd::ReadReq is created that
1949    // downstream caches observe.
1950    if (pkt->mustCheckAbove()) {
1951        DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s from"
1952                " lower cache\n", pkt->getAddr(), pkt->cmdString());
1953        pkt->setBlockCached();
1954        return snoop_delay;
1955    }
1956
1957    if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) {
1958        // reading non-exclusive shared data, note that we retain
1959        // the block in owned state if it is dirty, with the response
1960        // taken care of below, and otherwhise simply downgrade to
1961        // shared
1962        assert(!needs_exclusive);
1963        pkt->assertShared();
1964        blk->status &= ~BlkWritable;
1965    }
1966
1967    if (respond) {
1968        // prevent anyone else from responding, cache as well as
1969        // memory, and also prevent any memory from even seeing the
1970        // request (with current inhibited semantics), note that this
1971        // applies both to reads and writes and that for writes it
1972        // works thanks to the fact that we still have dirty data and
1973        // will write it back at a later point
1974        assert(!pkt->memInhibitAsserted());
1975        pkt->assertMemInhibit();
1976        if (have_exclusive) {
1977            // in the case of an uncacheable request there is no point
1978            // in setting the exclusive flag, but since the recipient
1979            // does not care there is no harm in doing so, in any case
1980            // it is just a hint
1981            pkt->setSupplyExclusive();
1982        }
1983        if (is_timing) {
1984            doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
1985        } else {
1986            pkt->makeAtomicResponse();
1987            pkt->setDataFromBlock(blk->data, blkSize);
1988        }
1989    }
1990
1991    if (!respond && is_timing && is_deferred) {
1992        // if it's a deferred timing snoop to which we are not
1993        // responding, then we've made a copy of both the request and
1994        // the packet, delete them here
1995        assert(pkt->needsResponse());
1996        delete pkt->req;
1997        delete pkt;
1998    }
1999
2000    // Do this last in case it deallocates block data or something
2001    // like that
2002    if (invalidate) {
2003        invalidateBlock(blk);
2004    }
2005
2006    DPRINTF(Cache, "new state is %s\n", blk->print());
2007
2008    return snoop_delay;
2009}
2010
2011
2012void
2013Cache::recvTimingSnoopReq(PacketPtr pkt)
2014{
2015    DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__,
2016            pkt->cmdString(), pkt->getAddr(), pkt->getSize());
2017
2018    // Snoops shouldn't happen when bypassing caches
2019    assert(!system->bypassCaches());
2020
2021    // no need to snoop requests that are not in range
2022    if (!inRange(pkt->getAddr())) {
2023        return;
2024    }
2025
2026    bool is_secure = pkt->isSecure();
2027    CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
2028
2029    Addr blk_addr = blockAlign(pkt->getAddr());
2030    MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
2031
2032    // Update the latency cost of the snoop so that the crossbar can
2033    // account for it. Do not overwrite what other neighbouring caches
2034    // have already done, rather take the maximum. The update is
2035    // tentative, for cases where we return before an upward snoop
2036    // happens below.
2037    pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay,
2038                                         lookupLatency * clockPeriod());
2039
2040    // Inform request(Prefetch, CleanEvict or Writeback) from below of
2041    // MSHR hit, set setBlockCached.
2042    if (mshr && pkt->mustCheckAbove()) {
2043        DPRINTF(Cache, "Setting block cached for %s from"
2044                "lower cache on mshr hit %#x\n",
2045                pkt->cmdString(), pkt->getAddr());
2046        pkt->setBlockCached();
2047        return;
2048    }
2049
2050    // Let the MSHR itself track the snoop and decide whether we want
2051    // to go ahead and do the regular cache snoop
2052    if (mshr && mshr->handleSnoop(pkt, order++)) {
2053        DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)."
2054                "mshrs: %s\n", blk_addr, is_secure ? "s" : "ns",
2055                mshr->print());
2056
2057        if (mshr->getNumTargets() > numTarget)
2058            warn("allocating bonus target for snoop"); //handle later
2059        return;
2060    }
2061
2062    //We also need to check the writeback buffers and handle those
2063    std::vector<MSHR *> writebacks;
2064    if (writeBuffer.findMatches(blk_addr, is_secure, writebacks)) {
2065        DPRINTF(Cache, "Snoop hit in writeback to addr %#llx (%s)\n",
2066                pkt->getAddr(), is_secure ? "s" : "ns");
2067
2068        // Look through writebacks for any cachable writes.
2069        // We should only ever find a single match
2070        assert(writebacks.size() == 1);
2071        MSHR *wb_entry = writebacks[0];
2072        // Expect to see only Writebacks and/or CleanEvicts here, both of
2073        // which should not be generated for uncacheable data.
2074        assert(!wb_entry->isUncacheable());
2075        // There should only be a single request responsible for generating
2076        // Writebacks/CleanEvicts.
2077        assert(wb_entry->getNumTargets() == 1);
2078        PacketPtr wb_pkt = wb_entry->getTarget()->pkt;
2079        assert(wb_pkt->isEviction());
2080
2081        if (pkt->isEviction()) {
2082            // if the block is found in the write queue, set the BLOCK_CACHED
2083            // flag for Writeback/CleanEvict snoop. On return the snoop will
2084            // propagate the BLOCK_CACHED flag in Writeback packets and prevent
2085            // any CleanEvicts from travelling down the memory hierarchy.
2086            pkt->setBlockCached();
2087            DPRINTF(Cache, "Squashing %s from lower cache on writequeue hit"
2088                    " %#x\n", pkt->cmdString(), pkt->getAddr());
2089            return;
2090        }
2091
2092        if (wb_pkt->cmd == MemCmd::WritebackDirty) {
2093            assert(!pkt->memInhibitAsserted());
2094            pkt->assertMemInhibit();
2095            if (!pkt->needsExclusive()) {
2096                pkt->assertShared();
2097                // the writeback is no longer passing exclusivity (the
2098                // receiving cache should consider the block owned
2099                // rather than modified)
2100                wb_pkt->assertShared();
2101            } else {
2102                // if we're not asserting the shared line, we need to
2103                // invalidate our copy.  we'll do that below as long as
2104                // the packet's invalidate flag is set...
2105                assert(pkt->isInvalidate());
2106            }
2107            doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(),
2108                                   false, false);
2109        } else {
2110            // on hitting a clean writeback we play it safe and do not
2111            // provide a response, the block may be dirty somewhere
2112            // else
2113            assert(wb_pkt->isCleanEviction());
2114            // The cache technically holds the block until the
2115            // corresponding message reaches the crossbar
2116            // below. Therefore when a snoop encounters a CleanEvict
2117            // or WritebackClean message we must set assertShared
2118            // (just like when it encounters a Writeback) to avoid the
2119            // snoop filter prematurely clearing the holder bit in the
2120            // crossbar below
2121            if (!pkt->needsExclusive()) {
2122                pkt->assertShared();
2123                // the writeback is no longer passing exclusivity (the
2124                // receiving cache should consider the block owned
2125                // rather than modified)
2126                wb_pkt->assertShared();
2127            } else {
2128                assert(pkt->isInvalidate());
2129            }
2130        }
2131
2132        if (pkt->isInvalidate()) {
2133            // Invalidation trumps our writeback... discard here
2134            // Note: markInService will remove entry from writeback buffer.
2135            markInService(wb_entry, false);
2136            delete wb_pkt;
2137        }
2138    }
2139
2140    // If this was a shared writeback, there may still be
2141    // other shared copies above that require invalidation.
2142    // We could be more selective and return here if the
2143    // request is non-exclusive or if the writeback is
2144    // exclusive.
2145    uint32_t snoop_delay = handleSnoop(pkt, blk, true, false, false);
2146
2147    // Override what we did when we first saw the snoop, as we now
2148    // also have the cost of the upwards snoops to account for
2149    pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, snoop_delay +
2150                                         lookupLatency * clockPeriod());
2151}
2152
2153bool
2154Cache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt)
2155{
2156    // Express snoop responses from master to slave, e.g., from L1 to L2
2157    cache->recvTimingSnoopResp(pkt);
2158    return true;
2159}
2160
2161Tick
2162Cache::recvAtomicSnoop(PacketPtr pkt)
2163{
2164    // Snoops shouldn't happen when bypassing caches
2165    assert(!system->bypassCaches());
2166
2167    // no need to snoop requests that are not in range.
2168    if (!inRange(pkt->getAddr())) {
2169        return 0;
2170    }
2171
2172    CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
2173    uint32_t snoop_delay = handleSnoop(pkt, blk, false, false, false);
2174    return snoop_delay + lookupLatency * clockPeriod();
2175}
2176
2177
2178MSHR *
2179Cache::getNextMSHR()
2180{
2181    // Check both MSHR queue and write buffer for potential requests,
2182    // note that null does not mean there is no request, it could
2183    // simply be that it is not ready
2184    MSHR *miss_mshr  = mshrQueue.getNextMSHR();
2185    MSHR *write_mshr = writeBuffer.getNextMSHR();
2186
2187    // If we got a write buffer request ready, first priority is a
2188    // full write buffer, otherwhise we favour the miss requests
2189    if (write_mshr &&
2190        ((writeBuffer.isFull() && writeBuffer.inServiceEntries == 0) ||
2191         !miss_mshr)) {
2192        // need to search MSHR queue for conflicting earlier miss.
2193        MSHR *conflict_mshr =
2194            mshrQueue.findPending(write_mshr->blkAddr,
2195                                  write_mshr->isSecure);
2196
2197        if (conflict_mshr && conflict_mshr->order < write_mshr->order) {
2198            // Service misses in order until conflict is cleared.
2199            return conflict_mshr;
2200
2201            // @todo Note that we ignore the ready time of the conflict here
2202        }
2203
2204        // No conflicts; issue write
2205        return write_mshr;
2206    } else if (miss_mshr) {
2207        // need to check for conflicting earlier writeback
2208        MSHR *conflict_mshr =
2209            writeBuffer.findPending(miss_mshr->blkAddr,
2210                                    miss_mshr->isSecure);
2211        if (conflict_mshr) {
2212            // not sure why we don't check order here... it was in the
2213            // original code but commented out.
2214
2215            // The only way this happens is if we are
2216            // doing a write and we didn't have permissions
2217            // then subsequently saw a writeback (owned got evicted)
2218            // We need to make sure to perform the writeback first
2219            // To preserve the dirty data, then we can issue the write
2220
2221            // should we return write_mshr here instead?  I.e. do we
2222            // have to flush writes in order?  I don't think so... not
2223            // for Alpha anyway.  Maybe for x86?
2224            return conflict_mshr;
2225
2226            // @todo Note that we ignore the ready time of the conflict here
2227        }
2228
2229        // No conflicts; issue read
2230        return miss_mshr;
2231    }
2232
2233    // fall through... no pending requests.  Try a prefetch.
2234    assert(!miss_mshr && !write_mshr);
2235    if (prefetcher && mshrQueue.canPrefetch()) {
2236        // If we have a miss queue slot, we can try a prefetch
2237        PacketPtr pkt = prefetcher->getPacket();
2238        if (pkt) {
2239            Addr pf_addr = blockAlign(pkt->getAddr());
2240            if (!tags->findBlock(pf_addr, pkt->isSecure()) &&
2241                !mshrQueue.findMatch(pf_addr, pkt->isSecure()) &&
2242                !writeBuffer.findMatch(pf_addr, pkt->isSecure())) {
2243                // Update statistic on number of prefetches issued
2244                // (hwpf_mshr_misses)
2245                assert(pkt->req->masterId() < system->maxMasters());
2246                mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
2247
2248                // allocate an MSHR and return it, note
2249                // that we send the packet straight away, so do not
2250                // schedule the send
2251                return allocateMissBuffer(pkt, curTick(), false);
2252            } else {
2253                // free the request and packet
2254                delete pkt->req;
2255                delete pkt;
2256            }
2257        }
2258    }
2259
2260    return NULL;
2261}
2262
2263bool
2264Cache::isCachedAbove(PacketPtr pkt, bool is_timing) const
2265{
2266    if (!forwardSnoops)
2267        return false;
2268    // Mirroring the flow of HardPFReqs, the cache sends CleanEvict and
2269    // Writeback snoops into upper level caches to check for copies of the
2270    // same block. Using the BLOCK_CACHED flag with the Writeback/CleanEvict
2271    // packet, the cache can inform the crossbar below of presence or absence
2272    // of the block.
2273    if (is_timing) {
2274        Packet snoop_pkt(pkt, true, false);
2275        snoop_pkt.setExpressSnoop();
2276        // Assert that packet is either Writeback or CleanEvict and not a
2277        // prefetch request because prefetch requests need an MSHR and may
2278        // generate a snoop response.
2279        assert(pkt->isEviction());
2280        snoop_pkt.senderState = NULL;
2281        cpuSidePort->sendTimingSnoopReq(&snoop_pkt);
2282        // Writeback/CleanEvict snoops do not generate a snoop response.
2283        assert(!(snoop_pkt.memInhibitAsserted()));
2284        return snoop_pkt.isBlockCached();
2285    } else {
2286        cpuSidePort->sendAtomicSnoop(pkt);
2287        return pkt->isBlockCached();
2288    }
2289}
2290
2291PacketPtr
2292Cache::getTimingPacket()
2293{
2294    MSHR *mshr = getNextMSHR();
2295
2296    if (mshr == NULL) {
2297        return NULL;
2298    }
2299
2300    // use request from 1st target
2301    PacketPtr tgt_pkt = mshr->getTarget()->pkt;
2302    PacketPtr pkt = NULL;
2303
2304    DPRINTF(CachePort, "%s %s for addr %#llx size %d\n", __func__,
2305            tgt_pkt->cmdString(), tgt_pkt->getAddr(), tgt_pkt->getSize());
2306
2307    CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure);
2308
2309    if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) {
2310        // We need to check the caches above us to verify that
2311        // they don't have a copy of this block in the dirty state
2312        // at the moment. Without this check we could get a stale
2313        // copy from memory that might get used in place of the
2314        // dirty one.
2315        Packet snoop_pkt(tgt_pkt, true, false);
2316        snoop_pkt.setExpressSnoop();
2317        // We are sending this packet upwards, but if it hits we will
2318        // get a snoop response that we end up treating just like a
2319        // normal response, hence it needs the MSHR as its sender
2320        // state
2321        snoop_pkt.senderState = mshr;
2322        cpuSidePort->sendTimingSnoopReq(&snoop_pkt);
2323
2324        // Check to see if the prefetch was squashed by an upper cache (to
2325        // prevent us from grabbing the line) or if a Check to see if a
2326        // writeback arrived between the time the prefetch was placed in
2327        // the MSHRs and when it was selected to be sent or if the
2328        // prefetch was squashed by an upper cache.
2329
2330        // It is important to check memInhibitAsserted before
2331        // prefetchSquashed. If another cache has asserted MEM_INGIBIT, it
2332        // will be sending a response which will arrive at the MSHR
2333        // allocated ofr this request. Checking the prefetchSquash first
2334        // may result in the MSHR being prematurely deallocated.
2335
2336        if (snoop_pkt.memInhibitAsserted()) {
2337            auto M5_VAR_USED r = outstandingSnoop.insert(snoop_pkt.req);
2338            assert(r.second);
2339            // If we are getting a non-shared response it is dirty
2340            bool pending_dirty_resp = !snoop_pkt.sharedAsserted();
2341            markInService(mshr, pending_dirty_resp);
2342            DPRINTF(Cache, "Upward snoop of prefetch for addr"
2343                    " %#x (%s) hit\n",
2344                    tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns");
2345            return NULL;
2346        }
2347
2348        if (snoop_pkt.isBlockCached() || blk != NULL) {
2349            DPRINTF(Cache, "Block present, prefetch squashed by cache.  "
2350                    "Deallocating mshr target %#x.\n",
2351                    mshr->blkAddr);
2352            // Deallocate the mshr target
2353            if (mshr->queue->forceDeallocateTarget(mshr)) {
2354                // Clear block if this deallocation resulted freed an
2355                // mshr when all had previously been utilized
2356                clearBlocked((BlockedCause)(mshr->queue->index));
2357            }
2358            return NULL;
2359        }
2360    }
2361
2362    if (mshr->isForwardNoResponse()) {
2363        // no response expected, just forward packet as it is
2364        assert(tags->findBlock(mshr->blkAddr, mshr->isSecure) == NULL);
2365        pkt = tgt_pkt;
2366    } else {
2367        pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive());
2368
2369        mshr->isForward = (pkt == NULL);
2370
2371        if (mshr->isForward) {
2372            // not a cache block request, but a response is expected
2373            // make copy of current packet to forward, keep current
2374            // copy for response handling
2375            pkt = new Packet(tgt_pkt, false, true);
2376            if (pkt->isWrite()) {
2377                pkt->setData(tgt_pkt->getConstPtr<uint8_t>());
2378            }
2379        }
2380    }
2381
2382    assert(pkt != NULL);
2383    // play it safe and append (rather than set) the sender state, as
2384    // forwarded packets may already have existing state
2385    pkt->pushSenderState(mshr);
2386    return pkt;
2387}
2388
2389
2390Tick
2391Cache::nextMSHRReadyTime() const
2392{
2393    Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(),
2394                              writeBuffer.nextMSHRReadyTime());
2395
2396    // Don't signal prefetch ready time if no MSHRs available
2397    // Will signal once enoguh MSHRs are deallocated
2398    if (prefetcher && mshrQueue.canPrefetch()) {
2399        nextReady = std::min(nextReady,
2400                             prefetcher->nextPrefetchReadyTime());
2401    }
2402
2403    return nextReady;
2404}
2405
2406void
2407Cache::serialize(CheckpointOut &cp) const
2408{
2409    bool dirty(isDirty());
2410
2411    if (dirty) {
2412        warn("*** The cache still contains dirty data. ***\n");
2413        warn("    Make sure to drain the system using the correct flags.\n");
2414        warn("    This checkpoint will not restore correctly and dirty data in "
2415             "the cache will be lost!\n");
2416    }
2417
2418    // Since we don't checkpoint the data in the cache, any dirty data
2419    // will be lost when restoring from a checkpoint of a system that
2420    // wasn't drained properly. Flag the checkpoint as invalid if the
2421    // cache contains dirty data.
2422    bool bad_checkpoint(dirty);
2423    SERIALIZE_SCALAR(bad_checkpoint);
2424}
2425
2426void
2427Cache::unserialize(CheckpointIn &cp)
2428{
2429    bool bad_checkpoint;
2430    UNSERIALIZE_SCALAR(bad_checkpoint);
2431    if (bad_checkpoint) {
2432        fatal("Restoring from checkpoints with dirty caches is not supported "
2433              "in the classic memory system. Please remove any caches or "
2434              " drain them properly before taking checkpoints.\n");
2435    }
2436}
2437
2438///////////////
2439//
2440// CpuSidePort
2441//
2442///////////////
2443
2444AddrRangeList
2445Cache::CpuSidePort::getAddrRanges() const
2446{
2447    return cache->getAddrRanges();
2448}
2449
2450bool
2451Cache::CpuSidePort::recvTimingReq(PacketPtr pkt)
2452{
2453    assert(!cache->system->bypassCaches());
2454
2455    bool success = false;
2456
2457    // always let inhibited requests through, even if blocked,
2458    // ultimately we should check if this is an express snoop, but at
2459    // the moment that flag is only set in the cache itself
2460    if (pkt->memInhibitAsserted()) {
2461        // do not change the current retry state
2462        bool M5_VAR_USED bypass_success = cache->recvTimingReq(pkt);
2463        assert(bypass_success);
2464        return true;
2465    } else if (blocked || mustSendRetry) {
2466        // either already committed to send a retry, or blocked
2467        success = false;
2468    } else {
2469        // pass it on to the cache, and let the cache decide if we
2470        // have to retry or not
2471        success = cache->recvTimingReq(pkt);
2472    }
2473
2474    // remember if we have to retry
2475    mustSendRetry = !success;
2476    return success;
2477}
2478
2479Tick
2480Cache::CpuSidePort::recvAtomic(PacketPtr pkt)
2481{
2482    return cache->recvAtomic(pkt);
2483}
2484
2485void
2486Cache::CpuSidePort::recvFunctional(PacketPtr pkt)
2487{
2488    // functional request
2489    cache->functionalAccess(pkt, true);
2490}
2491
2492Cache::
2493CpuSidePort::CpuSidePort(const std::string &_name, Cache *_cache,
2494                         const std::string &_label)
2495    : BaseCache::CacheSlavePort(_name, _cache, _label), cache(_cache)
2496{
2497}
2498
2499Cache*
2500CacheParams::create()
2501{
2502    assert(tags);
2503
2504    return new Cache(this);
2505}
2506///////////////
2507//
2508// MemSidePort
2509//
2510///////////////
2511
2512bool
2513Cache::MemSidePort::recvTimingResp(PacketPtr pkt)
2514{
2515    cache->recvTimingResp(pkt);
2516    return true;
2517}
2518
2519// Express snooping requests to memside port
2520void
2521Cache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt)
2522{
2523    // handle snooping requests
2524    cache->recvTimingSnoopReq(pkt);
2525}
2526
2527Tick
2528Cache::MemSidePort::recvAtomicSnoop(PacketPtr pkt)
2529{
2530    return cache->recvAtomicSnoop(pkt);
2531}
2532
2533void
2534Cache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt)
2535{
2536    // functional snoop (note that in contrast to atomic we don't have
2537    // a specific functionalSnoop method, as they have the same
2538    // behaviour regardless)
2539    cache->functionalAccess(pkt, false);
2540}
2541
2542void
2543Cache::CacheReqPacketQueue::sendDeferredPacket()
2544{
2545    // sanity check
2546    assert(!waitingOnRetry);
2547
2548    // there should never be any deferred request packets in the
2549    // queue, instead we resly on the cache to provide the packets
2550    // from the MSHR queue or write queue
2551    assert(deferredPacketReadyTime() == MaxTick);
2552
2553    // check for request packets (requests & writebacks)
2554    PacketPtr pkt = cache.getTimingPacket();
2555    if (pkt == NULL) {
2556        // can happen if e.g. we attempt a writeback and fail, but
2557        // before the retry, the writeback is eliminated because
2558        // we snoop another cache's ReadEx.
2559    } else {
2560        MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
2561        // in most cases getTimingPacket allocates a new packet, and
2562        // we must delete it unless it is successfully sent
2563        bool delete_pkt = !mshr->isForwardNoResponse();
2564
2565        // let our snoop responses go first if there are responses to
2566        // the same addresses we are about to writeback, note that
2567        // this creates a dependency between requests and snoop
2568        // responses, but that should not be a problem since there is
2569        // a chain already and the key is that the snoop responses can
2570        // sink unconditionally
2571        if (snoopRespQueue.hasAddr(pkt->getAddr())) {
2572            DPRINTF(CachePort, "Waiting for snoop response to be sent\n");
2573            Tick when = snoopRespQueue.deferredPacketReadyTime();
2574            schedSendEvent(when);
2575
2576            if (delete_pkt)
2577                delete pkt;
2578
2579            return;
2580        }
2581
2582
2583        waitingOnRetry = !masterPort.sendTimingReq(pkt);
2584
2585        if (waitingOnRetry) {
2586            DPRINTF(CachePort, "now waiting on a retry\n");
2587            if (delete_pkt) {
2588                // we are awaiting a retry, but we
2589                // delete the packet and will be creating a new packet
2590                // when we get the opportunity
2591                delete pkt;
2592            }
2593            // note that we have now masked any requestBus and
2594            // schedSendEvent (we will wait for a retry before
2595            // doing anything), and this is so even if we do not
2596            // care about this packet and might override it before
2597            // it gets retried
2598        } else {
2599            // As part of the call to sendTimingReq the packet is
2600            // forwarded to all neighbouring caches (and any
2601            // caches above them) as a snoop. The packet is also
2602            // sent to any potential cache below as the
2603            // interconnect is not allowed to buffer the
2604            // packet. Thus at this point we know if any of the
2605            // neighbouring, or the downstream cache is
2606            // responding, and if so, if it is with a dirty line
2607            // or not.
2608            bool pending_dirty_resp = !pkt->sharedAsserted() &&
2609                pkt->memInhibitAsserted();
2610
2611            cache.markInService(mshr, pending_dirty_resp);
2612        }
2613    }
2614
2615    // if we succeeded and are not waiting for a retry, schedule the
2616    // next send considering when the next MSHR is ready, note that
2617    // snoop responses have their own packet queue and thus schedule
2618    // their own events
2619    if (!waitingOnRetry) {
2620        schedSendEvent(cache.nextMSHRReadyTime());
2621    }
2622}
2623
2624Cache::
2625MemSidePort::MemSidePort(const std::string &_name, Cache *_cache,
2626                         const std::string &_label)
2627    : BaseCache::CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue),
2628      _reqQueue(*_cache, *this, _snoopRespQueue, _label),
2629      _snoopRespQueue(*_cache, *this, _label), cache(_cache)
2630{
2631}
2632