cache.cc revision 14035
15086Sgblack@eecs.umich.edu/* 25086Sgblack@eecs.umich.edu * Copyright (c) 2010-2019 ARM Limited 38466Snilay@cs.wisc.edu * All rights reserved. 45086Sgblack@eecs.umich.edu * 55086Sgblack@eecs.umich.edu * The license below extends only to copyright in the software and shall 67087Snate@binkert.org * not be construed as granting a license to any other intellectual 77087Snate@binkert.org * property including but not limited to intellectual property relating 87087Snate@binkert.org * to a hardware implementation of the functionality of the software 97087Snate@binkert.org * licensed hereunder. You may use the software subject to the license 107087Snate@binkert.org * terms below provided that you ensure that this notice is replicated 117087Snate@binkert.org * unmodified and in its entirety in all distributions of the software, 127087Snate@binkert.org * modified or unmodified, in source code or in binary form. 137087Snate@binkert.org * 145086Sgblack@eecs.umich.edu * Copyright (c) 2002-2005 The Regents of The University of Michigan 157087Snate@binkert.org * Copyright (c) 2010,2015 Advanced Micro Devices, Inc. 167087Snate@binkert.org * All rights reserved. 177087Snate@binkert.org * 187087Snate@binkert.org * Redistribution and use in source and binary forms, with or without 197087Snate@binkert.org * modification, are permitted provided that the following conditions are 207087Snate@binkert.org * met: redistributions of source code must retain the above copyright 217087Snate@binkert.org * notice, this list of conditions and the following disclaimer; 227087Snate@binkert.org * redistributions in binary form must reproduce the above copyright 235086Sgblack@eecs.umich.edu * notice, this list of conditions and the following disclaimer in the 247087Snate@binkert.org * documentation and/or other materials provided with the distribution; 255086Sgblack@eecs.umich.edu * neither the name of the copyright holders nor the names of its 265086Sgblack@eecs.umich.edu * contributors may be used to endorse or promote products derived from 275086Sgblack@eecs.umich.edu * this software without specific prior written permission. 285086Sgblack@eecs.umich.edu * 295086Sgblack@eecs.umich.edu * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 305086Sgblack@eecs.umich.edu * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 315086Sgblack@eecs.umich.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 325086Sgblack@eecs.umich.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 335086Sgblack@eecs.umich.edu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 345086Sgblack@eecs.umich.edu * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 355086Sgblack@eecs.umich.edu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 365086Sgblack@eecs.umich.edu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 375086Sgblack@eecs.umich.edu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 385086Sgblack@eecs.umich.edu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 395086Sgblack@eecs.umich.edu * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 405086Sgblack@eecs.umich.edu * 415647Sgblack@eecs.umich.edu * Authors: Erik Hallnor 428466Snilay@cs.wisc.edu * Dave Greene 438466Snilay@cs.wisc.edu * Nathan Binkert 445086Sgblack@eecs.umich.edu * Steve Reinhardt 455135Sgblack@eecs.umich.edu * Ron Dreslinski 465647Sgblack@eecs.umich.edu * Andreas Sandberg 479889Sandreas@sandberg.pp.se * Nikos Nikoleris 485234Sgblack@eecs.umich.edu */ 495086Sgblack@eecs.umich.edu 505086Sgblack@eecs.umich.edu/** 515086Sgblack@eecs.umich.edu * @file 527707Sgblack@eecs.umich.edu * Cache definitions. 537707Sgblack@eecs.umich.edu */ 547707Sgblack@eecs.umich.edu 559887Sandreas@sandberg.pp.se#include "mem/cache/cache.hh" 569887Sandreas@sandberg.pp.se 579887Sandreas@sandberg.pp.se#include <cassert> 589887Sandreas@sandberg.pp.se 599887Sandreas@sandberg.pp.se#include "base/compiler.hh" 609887Sandreas@sandberg.pp.se#include "base/logging.hh" 619887Sandreas@sandberg.pp.se#include "base/trace.hh" 629887Sandreas@sandberg.pp.se#include "base/types.hh" 639887Sandreas@sandberg.pp.se#include "debug/Cache.hh" 649887Sandreas@sandberg.pp.se#include "debug/CacheTags.hh" 659887Sandreas@sandberg.pp.se#include "debug/CacheVerbose.hh" 669887Sandreas@sandberg.pp.se#include "enums/Clusivity.hh" 679887Sandreas@sandberg.pp.se#include "mem/cache/cache_blk.hh" 689887Sandreas@sandberg.pp.se#include "mem/cache/mshr.hh" 699887Sandreas@sandberg.pp.se#include "mem/cache/tags/base.hh" 709887Sandreas@sandberg.pp.se#include "mem/cache/write_queue_entry.hh" 719887Sandreas@sandberg.pp.se#include "mem/request.hh" 729887Sandreas@sandberg.pp.se#include "params/Cache.hh" 739887Sandreas@sandberg.pp.se 745086Sgblack@eecs.umich.eduCache::Cache(const CacheParams *p) 755135Sgblack@eecs.umich.edu : BaseCache(p, p->system->cacheLineSize()), 765135Sgblack@eecs.umich.edu doFastWrites(true) 775135Sgblack@eecs.umich.edu{ 786048Sgblack@eecs.umich.edu} 796048Sgblack@eecs.umich.edu 806048Sgblack@eecs.umich.eduvoid 816048Sgblack@eecs.umich.eduCache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, 826048Sgblack@eecs.umich.edu bool deferred_response, bool pending_downgrade) 836048Sgblack@eecs.umich.edu{ 847720Sgblack@eecs.umich.edu BaseCache::satisfyRequest(pkt, blk); 857720Sgblack@eecs.umich.edu 867720Sgblack@eecs.umich.edu if (pkt->isRead()) { 877720Sgblack@eecs.umich.edu // determine if this read is from a (coherent) cache or not 885135Sgblack@eecs.umich.edu if (pkt->fromCache()) { 895135Sgblack@eecs.umich.edu assert(pkt->getSize() == blkSize); 905135Sgblack@eecs.umich.edu // special handling for coherent block requests from 915135Sgblack@eecs.umich.edu // upper-level caches 925135Sgblack@eecs.umich.edu if (pkt->needsWritable()) { 935135Sgblack@eecs.umich.edu // sanity check 945135Sgblack@eecs.umich.edu assert(pkt->cmd == MemCmd::ReadExReq || 955135Sgblack@eecs.umich.edu pkt->cmd == MemCmd::SCUpgradeFailReq); 965135Sgblack@eecs.umich.edu assert(!pkt->hasSharers()); 975135Sgblack@eecs.umich.edu 985135Sgblack@eecs.umich.edu // if we have a dirty copy, make sure the recipient 995135Sgblack@eecs.umich.edu // keeps it marked dirty (in the modified state) 1005135Sgblack@eecs.umich.edu if (blk->isDirty()) { 1015135Sgblack@eecs.umich.edu pkt->setCacheResponding(); 1025135Sgblack@eecs.umich.edu blk->status &= ~BlkDirty; 1035135Sgblack@eecs.umich.edu } 1045135Sgblack@eecs.umich.edu } else if (blk->isWritable() && !pending_downgrade && 1055264Sgblack@eecs.umich.edu !pkt->hasSharers() && 1065135Sgblack@eecs.umich.edu pkt->cmd != MemCmd::ReadCleanReq) { 1075135Sgblack@eecs.umich.edu // we can give the requester a writable copy on a read 1085135Sgblack@eecs.umich.edu // request if: 1095135Sgblack@eecs.umich.edu // - we have a writable copy at this level (& below) 1105141Sgblack@eecs.umich.edu // - we don't have a pending snoop from below 1115141Sgblack@eecs.umich.edu // signaling another read request 1125141Sgblack@eecs.umich.edu // - no other cache above has a copy (otherwise it 1135141Sgblack@eecs.umich.edu // would have set hasSharers flag when 1145141Sgblack@eecs.umich.edu // snooping the packet) 1155141Sgblack@eecs.umich.edu // - the read has explicitly asked for a clean 1165141Sgblack@eecs.umich.edu // copy of the line 1175141Sgblack@eecs.umich.edu if (blk->isDirty()) { 1185141Sgblack@eecs.umich.edu // special considerations if we're owner: 1195182Sgblack@eecs.umich.edu if (!deferred_response) { 1205141Sgblack@eecs.umich.edu // respond with the line in Modified state 1215141Sgblack@eecs.umich.edu // (cacheResponding set, hasSharers not set) 1225141Sgblack@eecs.umich.edu pkt->setCacheResponding(); 1235141Sgblack@eecs.umich.edu 1245141Sgblack@eecs.umich.edu // if this cache is mostly inclusive, we 1255141Sgblack@eecs.umich.edu // keep the block in the Exclusive state, 1265135Sgblack@eecs.umich.edu // and pass it upwards as Modified 1275141Sgblack@eecs.umich.edu // (writable and dirty), hence we have 1285141Sgblack@eecs.umich.edu // multiple caches, all on the same path 1295141Sgblack@eecs.umich.edu // towards memory, all considering the 1305141Sgblack@eecs.umich.edu // same block writable, but only one 1315141Sgblack@eecs.umich.edu // considering it Modified 1325141Sgblack@eecs.umich.edu 1335141Sgblack@eecs.umich.edu // we get away with multiple caches (on 1345141Sgblack@eecs.umich.edu // the same path to memory) considering 1355141Sgblack@eecs.umich.edu // the block writeable as we always enter 1365141Sgblack@eecs.umich.edu // the cache hierarchy through a cache, 1375141Sgblack@eecs.umich.edu // and first snoop upwards in all other 1385141Sgblack@eecs.umich.edu // branches 1395135Sgblack@eecs.umich.edu blk->status &= ~BlkDirty; 1405141Sgblack@eecs.umich.edu } else { 1415141Sgblack@eecs.umich.edu // if we're responding after our own miss, 1425135Sgblack@eecs.umich.edu // there's a window where the recipient didn't 1435141Sgblack@eecs.umich.edu // know it was getting ownership and may not 1445141Sgblack@eecs.umich.edu // have responded to snoops correctly, so we 1455141Sgblack@eecs.umich.edu // have to respond with a shared line 1465141Sgblack@eecs.umich.edu pkt->setHasSharers(); 1475135Sgblack@eecs.umich.edu } 1485141Sgblack@eecs.umich.edu } 1495141Sgblack@eecs.umich.edu } else { 1505141Sgblack@eecs.umich.edu // otherwise only respond with a shared copy 1515141Sgblack@eecs.umich.edu pkt->setHasSharers(); 1525141Sgblack@eecs.umich.edu } 1535141Sgblack@eecs.umich.edu } 1545141Sgblack@eecs.umich.edu } 1555141Sgblack@eecs.umich.edu} 1565141Sgblack@eecs.umich.edu 1575141Sgblack@eecs.umich.edu///////////////////////////////////////////////////// 1585141Sgblack@eecs.umich.edu// 1595141Sgblack@eecs.umich.edu// Access path: requests coming in from the CPU side 1605264Sgblack@eecs.umich.edu// 1615141Sgblack@eecs.umich.edu///////////////////////////////////////////////////// 1625141Sgblack@eecs.umich.edu 1635141Sgblack@eecs.umich.edubool 1645141Sgblack@eecs.umich.eduCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, 1655141Sgblack@eecs.umich.edu PacketList &writebacks) 1665141Sgblack@eecs.umich.edu{ 1675141Sgblack@eecs.umich.edu 1685141Sgblack@eecs.umich.edu if (pkt->req->isUncacheable()) { 1695141Sgblack@eecs.umich.edu assert(pkt->isRequest()); 1705141Sgblack@eecs.umich.edu 1715141Sgblack@eecs.umich.edu chatty_assert(!(isReadOnly && pkt->isWrite()), 1725141Sgblack@eecs.umich.edu "Should never see a write in a read-only cache %s\n", 1735141Sgblack@eecs.umich.edu name()); 1745141Sgblack@eecs.umich.edu 1755141Sgblack@eecs.umich.edu DPRINTF(Cache, "%s for %s\n", __func__, pkt->print()); 1765141Sgblack@eecs.umich.edu 1775141Sgblack@eecs.umich.edu // flush and invalidate any existing block 1785135Sgblack@eecs.umich.edu CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure())); 1795135Sgblack@eecs.umich.edu if (old_blk && old_blk->isValid()) { 1805135Sgblack@eecs.umich.edu BaseCache::evictBlock(old_blk, writebacks); 1815360Sgblack@eecs.umich.edu } 1825360Sgblack@eecs.umich.edu 1835360Sgblack@eecs.umich.edu blk = nullptr; 1845360Sgblack@eecs.umich.edu // lookupLatency is the latency in case the request is uncacheable. 1855360Sgblack@eecs.umich.edu lat = lookupLatency; 1865360Sgblack@eecs.umich.edu return false; 1875647Sgblack@eecs.umich.edu } 1885647Sgblack@eecs.umich.edu 1895647Sgblack@eecs.umich.edu return BaseCache::access(pkt, blk, lat, writebacks); 1905360Sgblack@eecs.umich.edu} 1915647Sgblack@eecs.umich.edu 1925647Sgblack@eecs.umich.eduvoid 1935647Sgblack@eecs.umich.eduCache::doWritebacks(PacketList& writebacks, Tick forward_time) 1949157Sandreas.hansson@arm.com{ 1955141Sgblack@eecs.umich.edu while (!writebacks.empty()) { 1965141Sgblack@eecs.umich.edu PacketPtr wbPkt = writebacks.front(); 1975141Sgblack@eecs.umich.edu // We use forwardLatency here because we are copying writebacks to 1985141Sgblack@eecs.umich.edu // write buffer. 1995141Sgblack@eecs.umich.edu 2005141Sgblack@eecs.umich.edu // Call isCachedAbove for Writebacks, CleanEvicts and 2015135Sgblack@eecs.umich.edu // WriteCleans to discover if the block is cached above. 2025135Sgblack@eecs.umich.edu if (isCachedAbove(wbPkt)) { 2035135Sgblack@eecs.umich.edu if (wbPkt->cmd == MemCmd::CleanEvict) { 2045135Sgblack@eecs.umich.edu // Delete CleanEvict because cached copies exist above. The 2058768Sgblack@eecs.umich.edu // packet destructor will delete the request object because 2069180Sandreas.hansson@arm.com // this is a non-snoop request packet which does not require a 2075135Sgblack@eecs.umich.edu // response. 2085135Sgblack@eecs.umich.edu delete wbPkt; 2095135Sgblack@eecs.umich.edu } else if (wbPkt->cmd == MemCmd::WritebackClean) { 2105135Sgblack@eecs.umich.edu // clean writeback, do not send since the block is 2119180Sandreas.hansson@arm.com // still cached above 2125135Sgblack@eecs.umich.edu assert(writebackClean); 2135135Sgblack@eecs.umich.edu delete wbPkt; 2145135Sgblack@eecs.umich.edu } else { 2156329Sgblack@eecs.umich.edu assert(wbPkt->cmd == MemCmd::WritebackDirty || 2166329Sgblack@eecs.umich.edu wbPkt->cmd == MemCmd::WriteClean); 2176329Sgblack@eecs.umich.edu // Set BLOCK_CACHED flag in Writeback and send below, so that 2188466Snilay@cs.wisc.edu // the Writeback does not reset the bit corresponding to this 2198466Snilay@cs.wisc.edu // address in the snoop filter below. 2208466Snilay@cs.wisc.edu wbPkt->setBlockCached(); 2216329Sgblack@eecs.umich.edu allocateWriteBuffer(wbPkt, forward_time); 2226329Sgblack@eecs.umich.edu } 2236329Sgblack@eecs.umich.edu } else { 2246329Sgblack@eecs.umich.edu // If the block is not cached above, send packet below. Both 2256329Sgblack@eecs.umich.edu // CleanEvict and Writeback with BLOCK_CACHED flag cleared will 2266329Sgblack@eecs.umich.edu // reset the bit corresponding to this address in the snoop filter 2276329Sgblack@eecs.umich.edu // below. 2286329Sgblack@eecs.umich.edu allocateWriteBuffer(wbPkt, forward_time); 2298466Snilay@cs.wisc.edu } 2309751Sandreas@sandberg.pp.se writebacks.pop_front(); 2319751Sandreas@sandberg.pp.se } 2329751Sandreas@sandberg.pp.se} 2339751Sandreas@sandberg.pp.se 2349423SAndreas.Sandberg@arm.comvoid 2359423SAndreas.Sandberg@arm.comCache::doWritebacksAtomic(PacketList& writebacks) 2366329Sgblack@eecs.umich.edu{ 2376329Sgblack@eecs.umich.edu while (!writebacks.empty()) { 2386329Sgblack@eecs.umich.edu PacketPtr wbPkt = writebacks.front(); 2396329Sgblack@eecs.umich.edu // Call isCachedAbove for both Writebacks and CleanEvicts. If 2406329Sgblack@eecs.umich.edu // isCachedAbove returns true we set BLOCK_CACHED flag in Writebacks 2416329Sgblack@eecs.umich.edu // and discard CleanEvicts. 2428466Snilay@cs.wisc.edu if (isCachedAbove(wbPkt, false)) { 24310058Sandreas@sandberg.pp.se if (wbPkt->cmd == MemCmd::WritebackDirty || 2446329Sgblack@eecs.umich.edu wbPkt->cmd == MemCmd::WriteClean) { 2458466Snilay@cs.wisc.edu // Set BLOCK_CACHED flag in Writeback and send below, 24610058Sandreas@sandberg.pp.se // so that the Writeback does not reset the bit 2479921Syasuko.eckert@amd.com // corresponding to this address in the snoop filter 2489921Syasuko.eckert@amd.com // below. We can discard CleanEvicts because cached 24910058Sandreas@sandberg.pp.se // copies exist above. Atomic mode isCachedAbove 2506329Sgblack@eecs.umich.edu // modifies packet to set BLOCK_CACHED flag 2517720Sgblack@eecs.umich.edu memSidePort.sendAtomic(wbPkt); 2526329Sgblack@eecs.umich.edu } 2536329Sgblack@eecs.umich.edu } else { 2547693SAli.Saidi@ARM.com // If the block is not cached above, send packet below. Both 2557693SAli.Saidi@ARM.com // CleanEvict and Writeback with BLOCK_CACHED flag cleared will 2567693SAli.Saidi@ARM.com // reset the bit corresponding to this address in the snoop filter 2577693SAli.Saidi@ARM.com // below. 2587693SAli.Saidi@ARM.com memSidePort.sendAtomic(wbPkt); 2597693SAli.Saidi@ARM.com } 2609759Sandreas@sandberg.pp.se writebacks.pop_front(); 2619759Sandreas@sandberg.pp.se // In case of CleanEvicts, the packet destructor will delete the 2629759Sandreas@sandberg.pp.se // request object because this is a non-snoop request packet which 2639759Sandreas@sandberg.pp.se // does not require a response. 26410057Snikos.nikoleris@gmail.com delete wbPkt; 26510057Snikos.nikoleris@gmail.com } 26610057Snikos.nikoleris@gmail.com} 2679759Sandreas@sandberg.pp.se 2689759Sandreas@sandberg.pp.se 2699759Sandreas@sandberg.pp.sevoid 2709759Sandreas@sandberg.pp.seCache::recvTimingSnoopResp(PacketPtr pkt) 2719759Sandreas@sandberg.pp.se{ 2729759Sandreas@sandberg.pp.se DPRINTF(Cache, "%s for %s\n", __func__, pkt->print()); 2739759Sandreas@sandberg.pp.se 2749759Sandreas@sandberg.pp.se // determine if the response is from a snoop request we created 2759759Sandreas@sandberg.pp.se // (in which case it should be in the outstandingSnoop), or if we 2769759Sandreas@sandberg.pp.se // merely forwarded someone else's snoop request 2779759Sandreas@sandberg.pp.se const bool forwardAsSnoop = outstandingSnoop.find(pkt->req) == 2789759Sandreas@sandberg.pp.se outstandingSnoop.end(); 27910057Snikos.nikoleris@gmail.com 28010057Snikos.nikoleris@gmail.com if (!forwardAsSnoop) { 28110057Snikos.nikoleris@gmail.com // the packet came from this cache, so sink it here and do not 2829759Sandreas@sandberg.pp.se // forward it 2839759Sandreas@sandberg.pp.se assert(pkt->cmd == MemCmd::HardPFResp); 28410057Snikos.nikoleris@gmail.com 28510057Snikos.nikoleris@gmail.com outstandingSnoop.erase(pkt->req); 2869759Sandreas@sandberg.pp.se 2879759Sandreas@sandberg.pp.se DPRINTF(Cache, "Got prefetch response from above for addr " 2889759Sandreas@sandberg.pp.se "%#llx (%s)\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns"); 2899759Sandreas@sandberg.pp.se recvTimingResp(pkt); 2909759Sandreas@sandberg.pp.se return; 2917693SAli.Saidi@ARM.com } 2929880Sandreas@sandberg.pp.se 2939880Sandreas@sandberg.pp.se // forwardLatency is set here because there is a response from an 2949880Sandreas@sandberg.pp.se // upper level cache. 2959880Sandreas@sandberg.pp.se // To pay the delay that occurs if the packet comes from the bus, 2969880Sandreas@sandberg.pp.se // we charge also headerDelay. 2979880Sandreas@sandberg.pp.se Tick snoop_resp_time = clockEdge(forwardLatency) + pkt->headerDelay; 2989880Sandreas@sandberg.pp.se // Reset the timing of the packet. 2999880Sandreas@sandberg.pp.se pkt->headerDelay = pkt->payloadDelay = 0; 3009880Sandreas@sandberg.pp.se memSidePort.schedTimingSnoopResp(pkt, snoop_resp_time); 3019880Sandreas@sandberg.pp.se} 3029880Sandreas@sandberg.pp.se 3039880Sandreas@sandberg.pp.sevoid 3049880Sandreas@sandberg.pp.seCache::promoteWholeLineWrites(PacketPtr pkt) 3059880Sandreas@sandberg.pp.se{ 3069880Sandreas@sandberg.pp.se // Cache line clearing instructions 3079880Sandreas@sandberg.pp.se if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) && 3089880Sandreas@sandberg.pp.se (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0) && 3099880Sandreas@sandberg.pp.se !pkt->isMaskedWrite()) { 3109880Sandreas@sandberg.pp.se pkt->cmd = MemCmd::WriteLineReq; 3119880Sandreas@sandberg.pp.se DPRINTF(Cache, "packet promoted from Write to WriteLineReq\n"); 3129880Sandreas@sandberg.pp.se } 3139880Sandreas@sandberg.pp.se} 3149880Sandreas@sandberg.pp.se 3159880Sandreas@sandberg.pp.sevoid 3169880Sandreas@sandberg.pp.seCache::handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time) 3179880Sandreas@sandberg.pp.se{ 3189880Sandreas@sandberg.pp.se // should never be satisfying an uncacheable access as we 3199880Sandreas@sandberg.pp.se // flush and invalidate any existing block as part of the 3209880Sandreas@sandberg.pp.se // lookup 3219880Sandreas@sandberg.pp.se assert(!pkt->req->isUncacheable()); 3229880Sandreas@sandberg.pp.se 3239880Sandreas@sandberg.pp.se BaseCache::handleTimingReqHit(pkt, blk, request_time); 3249880Sandreas@sandberg.pp.se} 3259880Sandreas@sandberg.pp.se 3269880Sandreas@sandberg.pp.sevoid 3279880Sandreas@sandberg.pp.seCache::handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk, Tick forward_time, 3289880Sandreas@sandberg.pp.se Tick request_time) 3299880Sandreas@sandberg.pp.se{ 3309880Sandreas@sandberg.pp.se if (pkt->req->isUncacheable()) { 3319880Sandreas@sandberg.pp.se // ignore any existing MSHR if we are dealing with an 3329880Sandreas@sandberg.pp.se // uncacheable request 3339880Sandreas@sandberg.pp.se 3349880Sandreas@sandberg.pp.se // should have flushed and have no valid block 3359880Sandreas@sandberg.pp.se assert(!blk || !blk->isValid()); 3369880Sandreas@sandberg.pp.se 3379880Sandreas@sandberg.pp.se mshr_uncacheable[pkt->cmdToIndex()][pkt->req->masterId()]++; 3389765Sandreas@sandberg.pp.se 3399765Sandreas@sandberg.pp.se if (pkt->isWrite()) { 3409765Sandreas@sandberg.pp.se allocateWriteBuffer(pkt, forward_time); 3419765Sandreas@sandberg.pp.se } else { 3429765Sandreas@sandberg.pp.se assert(pkt->isRead()); 3439765Sandreas@sandberg.pp.se 3449765Sandreas@sandberg.pp.se // uncacheable accesses always allocate a new MSHR 3459765Sandreas@sandberg.pp.se 3469765Sandreas@sandberg.pp.se // Here we are using forward_time, modelling the latency of 3479765Sandreas@sandberg.pp.se // a miss (outbound) just as forwardLatency, neglecting the 3489765Sandreas@sandberg.pp.se // lookupLatency component. 3499765Sandreas@sandberg.pp.se allocateMissBuffer(pkt, forward_time); 3509765Sandreas@sandberg.pp.se } 3519765Sandreas@sandberg.pp.se 3529765Sandreas@sandberg.pp.se return; 3539765Sandreas@sandberg.pp.se } 3549765Sandreas@sandberg.pp.se 3559765Sandreas@sandberg.pp.se Addr blk_addr = pkt->getBlockAddr(blkSize); 3569765Sandreas@sandberg.pp.se 3579765Sandreas@sandberg.pp.se MSHR *mshr = mshrQueue.findMatch(blk_addr, pkt->isSecure()); 3589889Sandreas@sandberg.pp.se 3599889Sandreas@sandberg.pp.se // Software prefetch handling: 3609889Sandreas@sandberg.pp.se // To keep the core from waiting on data it won't look at 3619889Sandreas@sandberg.pp.se // anyway, send back a response with dummy data. Miss handling 3629889Sandreas@sandberg.pp.se // will continue asynchronously. Unfortunately, the core will 3639889Sandreas@sandberg.pp.se // insist upon freeing original Packet/Request, so we have to 3649889Sandreas@sandberg.pp.se // create a new pair with a different lifecycle. Note that this 3659889Sandreas@sandberg.pp.se // processing happens before any MSHR munging on the behalf of 3669889Sandreas@sandberg.pp.se // this request because this new Request will be the one stored 3679889Sandreas@sandberg.pp.se // into the MSHRs, not the original. 3689889Sandreas@sandberg.pp.se if (pkt->cmd.isSWPrefetch()) { 3699889Sandreas@sandberg.pp.se assert(pkt->needsResponse()); 3709889Sandreas@sandberg.pp.se assert(pkt->req->hasPaddr()); 3719889Sandreas@sandberg.pp.se assert(!pkt->req->isUncacheable()); 3729889Sandreas@sandberg.pp.se 3739889Sandreas@sandberg.pp.se // There's no reason to add a prefetch as an additional target 3747811Ssteve.reinhardt@amd.com // to an existing MSHR. If an outstanding request is already 375 // in progress, there is nothing for the prefetch to do. 376 // If this is the case, we don't even create a request at all. 377 PacketPtr pf = nullptr; 378 379 if (!mshr) { 380 // copy the request and create a new SoftPFReq packet 381 RequestPtr req = std::make_shared<Request>(pkt->req->getPaddr(), 382 pkt->req->getSize(), 383 pkt->req->getFlags(), 384 pkt->req->masterId()); 385 pf = new Packet(req, pkt->cmd); 386 pf->allocate(); 387 assert(pf->matchAddr(pkt)); 388 assert(pf->getSize() == pkt->getSize()); 389 } 390 391 pkt->makeTimingResponse(); 392 393 // request_time is used here, taking into account lat and the delay 394 // charged if the packet comes from the xbar. 395 cpuSidePort.schedTimingResp(pkt, request_time); 396 397 // If an outstanding request is in progress (we found an 398 // MSHR) this is set to null 399 pkt = pf; 400 } 401 402 BaseCache::handleTimingReqMiss(pkt, mshr, blk, forward_time, request_time); 403} 404 405void 406Cache::recvTimingReq(PacketPtr pkt) 407{ 408 DPRINTF(CacheTags, "%s tags:\n%s\n", __func__, tags->print()); 409 410 promoteWholeLineWrites(pkt); 411 412 if (pkt->cacheResponding()) { 413 // a cache above us (but not where the packet came from) is 414 // responding to the request, in other words it has the line 415 // in Modified or Owned state 416 DPRINTF(Cache, "Cache above responding to %s: not responding\n", 417 pkt->print()); 418 419 // if the packet needs the block to be writable, and the cache 420 // that has promised to respond (setting the cache responding 421 // flag) is not providing writable (it is in Owned rather than 422 // the Modified state), we know that there may be other Shared 423 // copies in the system; go out and invalidate them all 424 assert(pkt->needsWritable() && !pkt->responderHadWritable()); 425 426 // an upstream cache that had the line in Owned state 427 // (dirty, but not writable), is responding and thus 428 // transferring the dirty line from one branch of the 429 // cache hierarchy to another 430 431 // send out an express snoop and invalidate all other 432 // copies (snooping a packet that needs writable is the 433 // same as an invalidation), thus turning the Owned line 434 // into a Modified line, note that we don't invalidate the 435 // block in the current cache or any other cache on the 436 // path to memory 437 438 // create a downstream express snoop with cleared packet 439 // flags, there is no need to allocate any data as the 440 // packet is merely used to co-ordinate state transitions 441 Packet *snoop_pkt = new Packet(pkt, true, false); 442 443 // also reset the bus time that the original packet has 444 // not yet paid for 445 snoop_pkt->headerDelay = snoop_pkt->payloadDelay = 0; 446 447 // make this an instantaneous express snoop, and let the 448 // other caches in the system know that the another cache 449 // is responding, because we have found the authorative 450 // copy (Modified or Owned) that will supply the right 451 // data 452 snoop_pkt->setExpressSnoop(); 453 snoop_pkt->setCacheResponding(); 454 455 // this express snoop travels towards the memory, and at 456 // every crossbar it is snooped upwards thus reaching 457 // every cache in the system 458 bool M5_VAR_USED success = memSidePort.sendTimingReq(snoop_pkt); 459 // express snoops always succeed 460 assert(success); 461 462 // main memory will delete the snoop packet 463 464 // queue for deletion, as opposed to immediate deletion, as 465 // the sending cache is still relying on the packet 466 pendingDelete.reset(pkt); 467 468 // no need to take any further action in this particular cache 469 // as an upstram cache has already committed to responding, 470 // and we have already sent out any express snoops in the 471 // section above to ensure all other copies in the system are 472 // invalidated 473 return; 474 } 475 476 BaseCache::recvTimingReq(pkt); 477} 478 479PacketPtr 480Cache::createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk, 481 bool needsWritable, 482 bool is_whole_line_write) const 483{ 484 // should never see evictions here 485 assert(!cpu_pkt->isEviction()); 486 487 bool blkValid = blk && blk->isValid(); 488 489 if (cpu_pkt->req->isUncacheable() || 490 (!blkValid && cpu_pkt->isUpgrade()) || 491 cpu_pkt->cmd == MemCmd::InvalidateReq || cpu_pkt->isClean()) { 492 // uncacheable requests and upgrades from upper-level caches 493 // that missed completely just go through as is 494 return nullptr; 495 } 496 497 assert(cpu_pkt->needsResponse()); 498 499 MemCmd cmd; 500 // @TODO make useUpgrades a parameter. 501 // Note that ownership protocols require upgrade, otherwise a 502 // write miss on a shared owned block will generate a ReadExcl, 503 // which will clobber the owned copy. 504 const bool useUpgrades = true; 505 assert(cpu_pkt->cmd != MemCmd::WriteLineReq || is_whole_line_write); 506 if (is_whole_line_write) { 507 assert(!blkValid || !blk->isWritable()); 508 // forward as invalidate to all other caches, this gives us 509 // the line in Exclusive state, and invalidates all other 510 // copies 511 cmd = MemCmd::InvalidateReq; 512 } else if (blkValid && useUpgrades) { 513 // only reason to be here is that blk is read only and we need 514 // it to be writable 515 assert(needsWritable); 516 assert(!blk->isWritable()); 517 cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq; 518 } else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq || 519 cpu_pkt->cmd == MemCmd::StoreCondFailReq) { 520 // Even though this SC will fail, we still need to send out the 521 // request and get the data to supply it to other snoopers in the case 522 // where the determination the StoreCond fails is delayed due to 523 // all caches not being on the same local bus. 524 cmd = MemCmd::SCUpgradeFailReq; 525 } else { 526 // block is invalid 527 528 // If the request does not need a writable there are two cases 529 // where we need to ensure the response will not fetch the 530 // block in dirty state: 531 // * this cache is read only and it does not perform 532 // writebacks, 533 // * this cache is mostly exclusive and will not fill (since 534 // it does not fill it will have to writeback the dirty data 535 // immediately which generates uneccesary writebacks). 536 bool force_clean_rsp = isReadOnly || clusivity == Enums::mostly_excl; 537 cmd = needsWritable ? MemCmd::ReadExReq : 538 (force_clean_rsp ? MemCmd::ReadCleanReq : MemCmd::ReadSharedReq); 539 } 540 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize); 541 542 // if there are upstream caches that have already marked the 543 // packet as having sharers (not passing writable), pass that info 544 // downstream 545 if (cpu_pkt->hasSharers() && !needsWritable) { 546 // note that cpu_pkt may have spent a considerable time in the 547 // MSHR queue and that the information could possibly be out 548 // of date, however, there is no harm in conservatively 549 // assuming the block has sharers 550 pkt->setHasSharers(); 551 DPRINTF(Cache, "%s: passing hasSharers from %s to %s\n", 552 __func__, cpu_pkt->print(), pkt->print()); 553 } 554 555 // the packet should be block aligned 556 assert(pkt->getAddr() == pkt->getBlockAddr(blkSize)); 557 558 pkt->allocate(); 559 DPRINTF(Cache, "%s: created %s from %s\n", __func__, pkt->print(), 560 cpu_pkt->print()); 561 return pkt; 562} 563 564 565Cycles 566Cache::handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk, 567 PacketList &writebacks) 568{ 569 // deal with the packets that go through the write path of 570 // the cache, i.e. any evictions and writes 571 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean || 572 (pkt->req->isUncacheable() && pkt->isWrite())) { 573 Cycles latency = ticksToCycles(memSidePort.sendAtomic(pkt)); 574 575 // at this point, if the request was an uncacheable write 576 // request, it has been satisfied by a memory below and the 577 // packet carries the response back 578 assert(!(pkt->req->isUncacheable() && pkt->isWrite()) || 579 pkt->isResponse()); 580 581 return latency; 582 } 583 584 // only misses left 585 586 PacketPtr bus_pkt = createMissPacket(pkt, blk, pkt->needsWritable(), 587 pkt->isWholeLineWrite(blkSize)); 588 589 bool is_forward = (bus_pkt == nullptr); 590 591 if (is_forward) { 592 // just forwarding the same request to the next level 593 // no local cache operation involved 594 bus_pkt = pkt; 595 } 596 597 DPRINTF(Cache, "%s: Sending an atomic %s\n", __func__, 598 bus_pkt->print()); 599 600#if TRACING_ON 601 CacheBlk::State old_state = blk ? blk->status : 0; 602#endif 603 604 Cycles latency = ticksToCycles(memSidePort.sendAtomic(bus_pkt)); 605 606 bool is_invalidate = bus_pkt->isInvalidate(); 607 608 // We are now dealing with the response handling 609 DPRINTF(Cache, "%s: Receive response: %s in state %i\n", __func__, 610 bus_pkt->print(), old_state); 611 612 // If packet was a forward, the response (if any) is already 613 // in place in the bus_pkt == pkt structure, so we don't need 614 // to do anything. Otherwise, use the separate bus_pkt to 615 // generate response to pkt and then delete it. 616 if (!is_forward) { 617 if (pkt->needsResponse()) { 618 assert(bus_pkt->isResponse()); 619 if (bus_pkt->isError()) { 620 pkt->makeAtomicResponse(); 621 pkt->copyError(bus_pkt); 622 } else if (pkt->isWholeLineWrite(blkSize)) { 623 // note the use of pkt, not bus_pkt here. 624 625 // write-line request to the cache that promoted 626 // the write to a whole line 627 const bool allocate = allocOnFill(pkt->cmd) && 628 (!writeAllocator || writeAllocator->allocate()); 629 blk = handleFill(bus_pkt, blk, writebacks, allocate); 630 assert(blk != NULL); 631 is_invalidate = false; 632 satisfyRequest(pkt, blk); 633 } else if (bus_pkt->isRead() || 634 bus_pkt->cmd == MemCmd::UpgradeResp) { 635 // we're updating cache state to allow us to 636 // satisfy the upstream request from the cache 637 blk = handleFill(bus_pkt, blk, writebacks, 638 allocOnFill(pkt->cmd)); 639 satisfyRequest(pkt, blk); 640 maintainClusivity(pkt->fromCache(), blk); 641 } else { 642 // we're satisfying the upstream request without 643 // modifying cache state, e.g., a write-through 644 pkt->makeAtomicResponse(); 645 } 646 } 647 delete bus_pkt; 648 } 649 650 if (is_invalidate && blk && blk->isValid()) { 651 invalidateBlock(blk); 652 } 653 654 return latency; 655} 656 657Tick 658Cache::recvAtomic(PacketPtr pkt) 659{ 660 promoteWholeLineWrites(pkt); 661 662 // follow the same flow as in recvTimingReq, and check if a cache 663 // above us is responding 664 if (pkt->cacheResponding()) { 665 assert(!pkt->req->isCacheInvalidate()); 666 DPRINTF(Cache, "Cache above responding to %s: not responding\n", 667 pkt->print()); 668 669 // if a cache is responding, and it had the line in Owned 670 // rather than Modified state, we need to invalidate any 671 // copies that are not on the same path to memory 672 assert(pkt->needsWritable() && !pkt->responderHadWritable()); 673 674 return memSidePort.sendAtomic(pkt); 675 } 676 677 return BaseCache::recvAtomic(pkt); 678} 679 680 681///////////////////////////////////////////////////// 682// 683// Response handling: responses from the memory side 684// 685///////////////////////////////////////////////////// 686 687 688void 689Cache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk) 690{ 691 QueueEntry::Target *initial_tgt = mshr->getTarget(); 692 // First offset for critical word first calculations 693 const int initial_offset = initial_tgt->pkt->getOffset(blkSize); 694 695 const bool is_error = pkt->isError(); 696 // allow invalidation responses originating from write-line 697 // requests to be discarded 698 bool is_invalidate = pkt->isInvalidate() && 699 !mshr->wasWholeLineWrite; 700 701 MSHR::TargetList targets = mshr->extractServiceableTargets(pkt); 702 for (auto &target: targets) { 703 Packet *tgt_pkt = target.pkt; 704 switch (target.source) { 705 case MSHR::Target::FromCPU: 706 Tick completion_time; 707 // Here we charge on completion_time the delay of the xbar if the 708 // packet comes from it, charged on headerDelay. 709 completion_time = pkt->headerDelay; 710 711 // Software prefetch handling for cache closest to core 712 if (tgt_pkt->cmd.isSWPrefetch()) { 713 // a software prefetch would have already been ack'd 714 // immediately with dummy data so the core would be able to 715 // retire it. This request completes right here, so we 716 // deallocate it. 717 delete tgt_pkt; 718 break; // skip response 719 } 720 721 // unlike the other packet flows, where data is found in other 722 // caches or memory and brought back, write-line requests always 723 // have the data right away, so the above check for "is fill?" 724 // cannot actually be determined until examining the stored MSHR 725 // state. We "catch up" with that logic here, which is duplicated 726 // from above. 727 if (tgt_pkt->cmd == MemCmd::WriteLineReq) { 728 assert(!is_error); 729 assert(blk); 730 assert(blk->isWritable()); 731 } 732 733 if (blk && blk->isValid() && !mshr->isForward) { 734 satisfyRequest(tgt_pkt, blk, true, mshr->hasPostDowngrade()); 735 736 // How many bytes past the first request is this one 737 int transfer_offset = 738 tgt_pkt->getOffset(blkSize) - initial_offset; 739 if (transfer_offset < 0) { 740 transfer_offset += blkSize; 741 } 742 743 // If not critical word (offset) return payloadDelay. 744 // responseLatency is the latency of the return path 745 // from lower level caches/memory to an upper level cache or 746 // the core. 747 completion_time += clockEdge(responseLatency) + 748 (transfer_offset ? pkt->payloadDelay : 0); 749 750 assert(!tgt_pkt->req->isUncacheable()); 751 752 assert(tgt_pkt->req->masterId() < system->maxMasters()); 753 missLatency[tgt_pkt->cmdToIndex()][tgt_pkt->req->masterId()] += 754 completion_time - target.recvTime; 755 } else if (pkt->cmd == MemCmd::UpgradeFailResp) { 756 // failed StoreCond upgrade 757 assert(tgt_pkt->cmd == MemCmd::StoreCondReq || 758 tgt_pkt->cmd == MemCmd::StoreCondFailReq || 759 tgt_pkt->cmd == MemCmd::SCUpgradeFailReq); 760 // responseLatency is the latency of the return path 761 // from lower level caches/memory to an upper level cache or 762 // the core. 763 completion_time += clockEdge(responseLatency) + 764 pkt->payloadDelay; 765 tgt_pkt->req->setExtraData(0); 766 } else { 767 // We are about to send a response to a cache above 768 // that asked for an invalidation; we need to 769 // invalidate our copy immediately as the most 770 // up-to-date copy of the block will now be in the 771 // cache above. It will also prevent this cache from 772 // responding (if the block was previously dirty) to 773 // snoops as they should snoop the caches above where 774 // they will get the response from. 775 if (is_invalidate && blk && blk->isValid()) { 776 invalidateBlock(blk); 777 } 778 // not a cache fill, just forwarding response 779 // responseLatency is the latency of the return path 780 // from lower level cahces/memory to the core. 781 completion_time += clockEdge(responseLatency) + 782 pkt->payloadDelay; 783 if (pkt->isRead() && !is_error) { 784 // sanity check 785 assert(pkt->matchAddr(tgt_pkt)); 786 assert(pkt->getSize() >= tgt_pkt->getSize()); 787 788 tgt_pkt->setData(pkt->getConstPtr<uint8_t>()); 789 } 790 791 // this response did not allocate here and therefore 792 // it was not consumed, make sure that any flags are 793 // carried over to cache above 794 tgt_pkt->copyResponderFlags(pkt); 795 } 796 tgt_pkt->makeTimingResponse(); 797 // if this packet is an error copy that to the new packet 798 if (is_error) 799 tgt_pkt->copyError(pkt); 800 if (tgt_pkt->cmd == MemCmd::ReadResp && 801 (is_invalidate || mshr->hasPostInvalidate())) { 802 // If intermediate cache got ReadRespWithInvalidate, 803 // propagate that. Response should not have 804 // isInvalidate() set otherwise. 805 tgt_pkt->cmd = MemCmd::ReadRespWithInvalidate; 806 DPRINTF(Cache, "%s: updated cmd to %s\n", __func__, 807 tgt_pkt->print()); 808 } 809 // Reset the bus additional time as it is now accounted for 810 tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0; 811 cpuSidePort.schedTimingResp(tgt_pkt, completion_time); 812 break; 813 814 case MSHR::Target::FromPrefetcher: 815 assert(tgt_pkt->cmd == MemCmd::HardPFReq); 816 if (blk) 817 blk->status |= BlkHWPrefetched; 818 delete tgt_pkt; 819 break; 820 821 case MSHR::Target::FromSnoop: 822 // I don't believe that a snoop can be in an error state 823 assert(!is_error); 824 // response to snoop request 825 DPRINTF(Cache, "processing deferred snoop...\n"); 826 // If the response is invalidating, a snooping target can 827 // be satisfied if it is also invalidating. If the reponse is, not 828 // only invalidating, but more specifically an InvalidateResp and 829 // the MSHR was created due to an InvalidateReq then a cache above 830 // is waiting to satisfy a WriteLineReq. In this case even an 831 // non-invalidating snoop is added as a target here since this is 832 // the ordering point. When the InvalidateResp reaches this cache, 833 // the snooping target will snoop further the cache above with the 834 // WriteLineReq. 835 assert(!is_invalidate || pkt->cmd == MemCmd::InvalidateResp || 836 pkt->req->isCacheMaintenance() || 837 mshr->hasPostInvalidate()); 838 handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate()); 839 break; 840 841 default: 842 panic("Illegal target->source enum %d\n", target.source); 843 } 844 } 845 846 maintainClusivity(targets.hasFromCache, blk); 847 848 if (blk && blk->isValid()) { 849 // an invalidate response stemming from a write line request 850 // should not invalidate the block, so check if the 851 // invalidation should be discarded 852 if (is_invalidate || mshr->hasPostInvalidate()) { 853 invalidateBlock(blk); 854 } else if (mshr->hasPostDowngrade()) { 855 blk->status &= ~BlkWritable; 856 } 857 } 858} 859 860PacketPtr 861Cache::evictBlock(CacheBlk *blk) 862{ 863 PacketPtr pkt = (blk->isDirty() || writebackClean) ? 864 writebackBlk(blk) : cleanEvictBlk(blk); 865 866 invalidateBlock(blk); 867 868 return pkt; 869} 870 871PacketPtr 872Cache::cleanEvictBlk(CacheBlk *blk) 873{ 874 assert(!writebackClean); 875 assert(blk && blk->isValid() && !blk->isDirty()); 876 877 // Creating a zero sized write, a message to the snoop filter 878 RequestPtr req = std::make_shared<Request>( 879 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId); 880 881 if (blk->isSecure()) 882 req->setFlags(Request::SECURE); 883 884 req->taskId(blk->task_id); 885 886 PacketPtr pkt = new Packet(req, MemCmd::CleanEvict); 887 pkt->allocate(); 888 DPRINTF(Cache, "Create CleanEvict %s\n", pkt->print()); 889 890 return pkt; 891} 892 893///////////////////////////////////////////////////// 894// 895// Snoop path: requests coming in from the memory side 896// 897///////////////////////////////////////////////////// 898 899void 900Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data, 901 bool already_copied, bool pending_inval) 902{ 903 // sanity check 904 assert(req_pkt->isRequest()); 905 assert(req_pkt->needsResponse()); 906 907 DPRINTF(Cache, "%s: for %s\n", __func__, req_pkt->print()); 908 // timing-mode snoop responses require a new packet, unless we 909 // already made a copy... 910 PacketPtr pkt = req_pkt; 911 if (!already_copied) 912 // do not clear flags, and allocate space for data if the 913 // packet needs it (the only packets that carry data are read 914 // responses) 915 pkt = new Packet(req_pkt, false, req_pkt->isRead()); 916 917 assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() || 918 pkt->hasSharers()); 919 pkt->makeTimingResponse(); 920 if (pkt->isRead()) { 921 pkt->setDataFromBlock(blk_data, blkSize); 922 } 923 if (pkt->cmd == MemCmd::ReadResp && pending_inval) { 924 // Assume we defer a response to a read from a far-away cache 925 // A, then later defer a ReadExcl from a cache B on the same 926 // bus as us. We'll assert cacheResponding in both cases, but 927 // in the latter case cacheResponding will keep the 928 // invalidation from reaching cache A. This special response 929 // tells cache A that it gets the block to satisfy its read, 930 // but must immediately invalidate it. 931 pkt->cmd = MemCmd::ReadRespWithInvalidate; 932 } 933 // Here we consider forward_time, paying for just forward latency and 934 // also charging the delay provided by the xbar. 935 // forward_time is used as send_time in next allocateWriteBuffer(). 936 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 937 // Here we reset the timing of the packet. 938 pkt->headerDelay = pkt->payloadDelay = 0; 939 DPRINTF(CacheVerbose, "%s: created response: %s tick: %lu\n", __func__, 940 pkt->print(), forward_time); 941 memSidePort.schedTimingSnoopResp(pkt, forward_time); 942} 943 944uint32_t 945Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing, 946 bool is_deferred, bool pending_inval) 947{ 948 DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print()); 949 // deferred snoops can only happen in timing mode 950 assert(!(is_deferred && !is_timing)); 951 // pending_inval only makes sense on deferred snoops 952 assert(!(pending_inval && !is_deferred)); 953 assert(pkt->isRequest()); 954 955 // the packet may get modified if we or a forwarded snooper 956 // responds in atomic mode, so remember a few things about the 957 // original packet up front 958 bool invalidate = pkt->isInvalidate(); 959 bool M5_VAR_USED needs_writable = pkt->needsWritable(); 960 961 // at the moment we could get an uncacheable write which does not 962 // have the invalidate flag, and we need a suitable way of dealing 963 // with this case 964 panic_if(invalidate && pkt->req->isUncacheable(), 965 "%s got an invalidating uncacheable snoop request %s", 966 name(), pkt->print()); 967 968 uint32_t snoop_delay = 0; 969 970 if (forwardSnoops) { 971 // first propagate snoop upward to see if anyone above us wants to 972 // handle it. save & restore packet src since it will get 973 // rewritten to be relative to cpu-side bus (if any) 974 if (is_timing) { 975 // copy the packet so that we can clear any flags before 976 // forwarding it upwards, we also allocate data (passing 977 // the pointer along in case of static data), in case 978 // there is a snoop hit in upper levels 979 Packet snoopPkt(pkt, true, true); 980 snoopPkt.setExpressSnoop(); 981 // the snoop packet does not need to wait any additional 982 // time 983 snoopPkt.headerDelay = snoopPkt.payloadDelay = 0; 984 cpuSidePort.sendTimingSnoopReq(&snoopPkt); 985 986 // add the header delay (including crossbar and snoop 987 // delays) of the upward snoop to the snoop delay for this 988 // cache 989 snoop_delay += snoopPkt.headerDelay; 990 991 // If this request is a prefetch or clean evict and an upper level 992 // signals block present, make sure to propagate the block 993 // presence to the requester. 994 if (snoopPkt.isBlockCached()) { 995 pkt->setBlockCached(); 996 } 997 // If the request was satisfied by snooping the cache 998 // above, mark the original packet as satisfied too. 999 if (snoopPkt.satisfied()) { 1000 pkt->setSatisfied(); 1001 } 1002 1003 // Copy over flags from the snoop response to make sure we 1004 // inform the final destination 1005 pkt->copyResponderFlags(&snoopPkt); 1006 } else { 1007 bool already_responded = pkt->cacheResponding(); 1008 cpuSidePort.sendAtomicSnoop(pkt); 1009 if (!already_responded && pkt->cacheResponding()) { 1010 // cache-to-cache response from some upper cache: 1011 // forward response to original requester 1012 assert(pkt->isResponse()); 1013 } 1014 } 1015 } 1016 1017 bool respond = false; 1018 bool blk_valid = blk && blk->isValid(); 1019 if (pkt->isClean()) { 1020 if (blk_valid && blk->isDirty()) { 1021 DPRINTF(CacheVerbose, "%s: packet (snoop) %s found block: %s\n", 1022 __func__, pkt->print(), blk->print()); 1023 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id); 1024 PacketList writebacks; 1025 writebacks.push_back(wb_pkt); 1026 1027 if (is_timing) { 1028 // anything that is merely forwarded pays for the forward 1029 // latency and the delay provided by the crossbar 1030 Tick forward_time = clockEdge(forwardLatency) + 1031 pkt->headerDelay; 1032 doWritebacks(writebacks, forward_time); 1033 } else { 1034 doWritebacksAtomic(writebacks); 1035 } 1036 pkt->setSatisfied(); 1037 } 1038 } else if (!blk_valid) { 1039 DPRINTF(CacheVerbose, "%s: snoop miss for %s\n", __func__, 1040 pkt->print()); 1041 if (is_deferred) { 1042 // we no longer have the block, and will not respond, but a 1043 // packet was allocated in MSHR::handleSnoop and we have 1044 // to delete it 1045 assert(pkt->needsResponse()); 1046 1047 // we have passed the block to a cache upstream, that 1048 // cache should be responding 1049 assert(pkt->cacheResponding()); 1050 1051 delete pkt; 1052 } 1053 return snoop_delay; 1054 } else { 1055 DPRINTF(Cache, "%s: snoop hit for %s, old state is %s\n", __func__, 1056 pkt->print(), blk->print()); 1057 1058 // We may end up modifying both the block state and the packet (if 1059 // we respond in atomic mode), so just figure out what to do now 1060 // and then do it later. We respond to all snoops that need 1061 // responses provided we have the block in dirty state. The 1062 // invalidation itself is taken care of below. We don't respond to 1063 // cache maintenance operations as this is done by the destination 1064 // xbar. 1065 respond = blk->isDirty() && pkt->needsResponse(); 1066 1067 chatty_assert(!(isReadOnly && blk->isDirty()), "Should never have " 1068 "a dirty block in a read-only cache %s\n", name()); 1069 } 1070 1071 // Invalidate any prefetch's from below that would strip write permissions 1072 // MemCmd::HardPFReq is only observed by upstream caches. After missing 1073 // above and in it's own cache, a new MemCmd::ReadReq is created that 1074 // downstream caches observe. 1075 if (pkt->mustCheckAbove()) { 1076 DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s " 1077 "from lower cache\n", pkt->getAddr(), pkt->print()); 1078 pkt->setBlockCached(); 1079 return snoop_delay; 1080 } 1081 1082 if (pkt->isRead() && !invalidate) { 1083 // reading without requiring the line in a writable state 1084 assert(!needs_writable); 1085 pkt->setHasSharers(); 1086 1087 // if the requesting packet is uncacheable, retain the line in 1088 // the current state, otherwhise unset the writable flag, 1089 // which means we go from Modified to Owned (and will respond 1090 // below), remain in Owned (and will respond below), from 1091 // Exclusive to Shared, or remain in Shared 1092 if (!pkt->req->isUncacheable()) 1093 blk->status &= ~BlkWritable; 1094 DPRINTF(Cache, "new state is %s\n", blk->print()); 1095 } 1096 1097 if (respond) { 1098 // prevent anyone else from responding, cache as well as 1099 // memory, and also prevent any memory from even seeing the 1100 // request 1101 pkt->setCacheResponding(); 1102 if (!pkt->isClean() && blk->isWritable()) { 1103 // inform the cache hierarchy that this cache had the line 1104 // in the Modified state so that we avoid unnecessary 1105 // invalidations (see Packet::setResponderHadWritable) 1106 pkt->setResponderHadWritable(); 1107 1108 // in the case of an uncacheable request there is no point 1109 // in setting the responderHadWritable flag, but since the 1110 // recipient does not care there is no harm in doing so 1111 } else { 1112 // if the packet has needsWritable set we invalidate our 1113 // copy below and all other copies will be invalidates 1114 // through express snoops, and if needsWritable is not set 1115 // we already called setHasSharers above 1116 } 1117 1118 // if we are returning a writable and dirty (Modified) line, 1119 // we should be invalidating the line 1120 panic_if(!invalidate && !pkt->hasSharers(), 1121 "%s is passing a Modified line through %s, " 1122 "but keeping the block", name(), pkt->print()); 1123 1124 if (is_timing) { 1125 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval); 1126 } else { 1127 pkt->makeAtomicResponse(); 1128 // packets such as upgrades do not actually have any data 1129 // payload 1130 if (pkt->hasData()) 1131 pkt->setDataFromBlock(blk->data, blkSize); 1132 } 1133 1134 // When a block is compressed, it must first be decompressed before 1135 // being read, and this increases the snoop delay. 1136 if (compressor && pkt->isRead()) { 1137 snoop_delay += compressor->getDecompressionLatency(blk); 1138 } 1139 } 1140 1141 if (!respond && is_deferred) { 1142 assert(pkt->needsResponse()); 1143 delete pkt; 1144 } 1145 1146 // Do this last in case it deallocates block data or something 1147 // like that 1148 if (blk_valid && invalidate) { 1149 invalidateBlock(blk); 1150 DPRINTF(Cache, "new state is %s\n", blk->print()); 1151 } 1152 1153 return snoop_delay; 1154} 1155 1156 1157void 1158Cache::recvTimingSnoopReq(PacketPtr pkt) 1159{ 1160 DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print()); 1161 1162 // no need to snoop requests that are not in range 1163 if (!inRange(pkt->getAddr())) { 1164 return; 1165 } 1166 1167 bool is_secure = pkt->isSecure(); 1168 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 1169 1170 Addr blk_addr = pkt->getBlockAddr(blkSize); 1171 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 1172 1173 // Update the latency cost of the snoop so that the crossbar can 1174 // account for it. Do not overwrite what other neighbouring caches 1175 // have already done, rather take the maximum. The update is 1176 // tentative, for cases where we return before an upward snoop 1177 // happens below. 1178 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, 1179 lookupLatency * clockPeriod()); 1180 1181 // Inform request(Prefetch, CleanEvict or Writeback) from below of 1182 // MSHR hit, set setBlockCached. 1183 if (mshr && pkt->mustCheckAbove()) { 1184 DPRINTF(Cache, "Setting block cached for %s from lower cache on " 1185 "mshr hit\n", pkt->print()); 1186 pkt->setBlockCached(); 1187 return; 1188 } 1189 1190 // Let the MSHR itself track the snoop and decide whether we want 1191 // to go ahead and do the regular cache snoop 1192 if (mshr && mshr->handleSnoop(pkt, order++)) { 1193 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)." 1194 "mshrs: %s\n", blk_addr, is_secure ? "s" : "ns", 1195 mshr->print()); 1196 1197 if (mshr->getNumTargets() > numTarget) 1198 warn("allocating bonus target for snoop"); //handle later 1199 return; 1200 } 1201 1202 //We also need to check the writeback buffers and handle those 1203 WriteQueueEntry *wb_entry = writeBuffer.findMatch(blk_addr, is_secure); 1204 if (wb_entry) { 1205 DPRINTF(Cache, "Snoop hit in writeback to addr %#llx (%s)\n", 1206 pkt->getAddr(), is_secure ? "s" : "ns"); 1207 // Expect to see only Writebacks and/or CleanEvicts here, both of 1208 // which should not be generated for uncacheable data. 1209 assert(!wb_entry->isUncacheable()); 1210 // There should only be a single request responsible for generating 1211 // Writebacks/CleanEvicts. 1212 assert(wb_entry->getNumTargets() == 1); 1213 PacketPtr wb_pkt = wb_entry->getTarget()->pkt; 1214 assert(wb_pkt->isEviction() || wb_pkt->cmd == MemCmd::WriteClean); 1215 1216 if (pkt->isEviction()) { 1217 // if the block is found in the write queue, set the BLOCK_CACHED 1218 // flag for Writeback/CleanEvict snoop. On return the snoop will 1219 // propagate the BLOCK_CACHED flag in Writeback packets and prevent 1220 // any CleanEvicts from travelling down the memory hierarchy. 1221 pkt->setBlockCached(); 1222 DPRINTF(Cache, "%s: Squashing %s from lower cache on writequeue " 1223 "hit\n", __func__, pkt->print()); 1224 return; 1225 } 1226 1227 // conceptually writebacks are no different to other blocks in 1228 // this cache, so the behaviour is modelled after handleSnoop, 1229 // the difference being that instead of querying the block 1230 // state to determine if it is dirty and writable, we use the 1231 // command and fields of the writeback packet 1232 bool respond = wb_pkt->cmd == MemCmd::WritebackDirty && 1233 pkt->needsResponse(); 1234 bool have_writable = !wb_pkt->hasSharers(); 1235 bool invalidate = pkt->isInvalidate(); 1236 1237 if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) { 1238 assert(!pkt->needsWritable()); 1239 pkt->setHasSharers(); 1240 wb_pkt->setHasSharers(); 1241 } 1242 1243 if (respond) { 1244 pkt->setCacheResponding(); 1245 1246 if (have_writable) { 1247 pkt->setResponderHadWritable(); 1248 } 1249 1250 doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(), 1251 false, false); 1252 } 1253 1254 if (invalidate && wb_pkt->cmd != MemCmd::WriteClean) { 1255 // Invalidation trumps our writeback... discard here 1256 // Note: markInService will remove entry from writeback buffer. 1257 markInService(wb_entry); 1258 delete wb_pkt; 1259 } 1260 } 1261 1262 // If this was a shared writeback, there may still be 1263 // other shared copies above that require invalidation. 1264 // We could be more selective and return here if the 1265 // request is non-exclusive or if the writeback is 1266 // exclusive. 1267 uint32_t snoop_delay = handleSnoop(pkt, blk, true, false, false); 1268 1269 // Override what we did when we first saw the snoop, as we now 1270 // also have the cost of the upwards snoops to account for 1271 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, snoop_delay + 1272 lookupLatency * clockPeriod()); 1273} 1274 1275Tick 1276Cache::recvAtomicSnoop(PacketPtr pkt) 1277{ 1278 // no need to snoop requests that are not in range. 1279 if (!inRange(pkt->getAddr())) { 1280 return 0; 1281 } 1282 1283 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 1284 uint32_t snoop_delay = handleSnoop(pkt, blk, false, false, false); 1285 return snoop_delay + lookupLatency * clockPeriod(); 1286} 1287 1288bool 1289Cache::isCachedAbove(PacketPtr pkt, bool is_timing) 1290{ 1291 if (!forwardSnoops) 1292 return false; 1293 // Mirroring the flow of HardPFReqs, the cache sends CleanEvict and 1294 // Writeback snoops into upper level caches to check for copies of the 1295 // same block. Using the BLOCK_CACHED flag with the Writeback/CleanEvict 1296 // packet, the cache can inform the crossbar below of presence or absence 1297 // of the block. 1298 if (is_timing) { 1299 Packet snoop_pkt(pkt, true, false); 1300 snoop_pkt.setExpressSnoop(); 1301 // Assert that packet is either Writeback or CleanEvict and not a 1302 // prefetch request because prefetch requests need an MSHR and may 1303 // generate a snoop response. 1304 assert(pkt->isEviction() || pkt->cmd == MemCmd::WriteClean); 1305 snoop_pkt.senderState = nullptr; 1306 cpuSidePort.sendTimingSnoopReq(&snoop_pkt); 1307 // Writeback/CleanEvict snoops do not generate a snoop response. 1308 assert(!(snoop_pkt.cacheResponding())); 1309 return snoop_pkt.isBlockCached(); 1310 } else { 1311 cpuSidePort.sendAtomicSnoop(pkt); 1312 return pkt->isBlockCached(); 1313 } 1314} 1315 1316bool 1317Cache::sendMSHRQueuePacket(MSHR* mshr) 1318{ 1319 assert(mshr); 1320 1321 // use request from 1st target 1322 PacketPtr tgt_pkt = mshr->getTarget()->pkt; 1323 1324 if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) { 1325 DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print()); 1326 1327 // we should never have hardware prefetches to allocated 1328 // blocks 1329 assert(!tags->findBlock(mshr->blkAddr, mshr->isSecure)); 1330 1331 // We need to check the caches above us to verify that 1332 // they don't have a copy of this block in the dirty state 1333 // at the moment. Without this check we could get a stale 1334 // copy from memory that might get used in place of the 1335 // dirty one. 1336 Packet snoop_pkt(tgt_pkt, true, false); 1337 snoop_pkt.setExpressSnoop(); 1338 // We are sending this packet upwards, but if it hits we will 1339 // get a snoop response that we end up treating just like a 1340 // normal response, hence it needs the MSHR as its sender 1341 // state 1342 snoop_pkt.senderState = mshr; 1343 cpuSidePort.sendTimingSnoopReq(&snoop_pkt); 1344 1345 // Check to see if the prefetch was squashed by an upper cache (to 1346 // prevent us from grabbing the line) or if a Check to see if a 1347 // writeback arrived between the time the prefetch was placed in 1348 // the MSHRs and when it was selected to be sent or if the 1349 // prefetch was squashed by an upper cache. 1350 1351 // It is important to check cacheResponding before 1352 // prefetchSquashed. If another cache has committed to 1353 // responding, it will be sending a dirty response which will 1354 // arrive at the MSHR allocated for this request. Checking the 1355 // prefetchSquash first may result in the MSHR being 1356 // prematurely deallocated. 1357 if (snoop_pkt.cacheResponding()) { 1358 auto M5_VAR_USED r = outstandingSnoop.insert(snoop_pkt.req); 1359 assert(r.second); 1360 1361 // if we are getting a snoop response with no sharers it 1362 // will be allocated as Modified 1363 bool pending_modified_resp = !snoop_pkt.hasSharers(); 1364 markInService(mshr, pending_modified_resp); 1365 1366 DPRINTF(Cache, "Upward snoop of prefetch for addr" 1367 " %#x (%s) hit\n", 1368 tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns"); 1369 return false; 1370 } 1371 1372 if (snoop_pkt.isBlockCached()) { 1373 DPRINTF(Cache, "Block present, prefetch squashed by cache. " 1374 "Deallocating mshr target %#x.\n", 1375 mshr->blkAddr); 1376 1377 // Deallocate the mshr target 1378 if (mshrQueue.forceDeallocateTarget(mshr)) { 1379 // Clear block if this deallocation resulted freed an 1380 // mshr when all had previously been utilized 1381 clearBlocked(Blocked_NoMSHRs); 1382 } 1383 1384 // given that no response is expected, delete Request and Packet 1385 delete tgt_pkt; 1386 1387 return false; 1388 } 1389 } 1390 1391 return BaseCache::sendMSHRQueuePacket(mshr); 1392} 1393 1394Cache* 1395CacheParams::create() 1396{ 1397 assert(tags); 1398 assert(replacement_policy); 1399 1400 return new Cache(this); 1401} 1402