cache.hh revision 11859
12810Srdreslin@umich.edu/* 211375Sandreas.hansson@arm.com * Copyright (c) 2012-2016 ARM Limited 38702Sandreas.hansson@arm.com * All rights reserved. 48702Sandreas.hansson@arm.com * 58702Sandreas.hansson@arm.com * The license below extends only to copyright in the software and shall 68702Sandreas.hansson@arm.com * not be construed as granting a license to any other intellectual 78702Sandreas.hansson@arm.com * property including but not limited to intellectual property relating 88702Sandreas.hansson@arm.com * to a hardware implementation of the functionality of the software 98702Sandreas.hansson@arm.com * licensed hereunder. You may use the software subject to the license 108702Sandreas.hansson@arm.com * terms below provided that you ensure that this notice is replicated 118702Sandreas.hansson@arm.com * unmodified and in its entirety in all distributions of the software, 128702Sandreas.hansson@arm.com * modified or unmodified, in source code or in binary form. 138702Sandreas.hansson@arm.com * 142810Srdreslin@umich.edu * Copyright (c) 2002-2005 The Regents of The University of Michigan 152810Srdreslin@umich.edu * All rights reserved. 162810Srdreslin@umich.edu * 172810Srdreslin@umich.edu * Redistribution and use in source and binary forms, with or without 182810Srdreslin@umich.edu * modification, are permitted provided that the following conditions are 192810Srdreslin@umich.edu * met: redistributions of source code must retain the above copyright 202810Srdreslin@umich.edu * notice, this list of conditions and the following disclaimer; 212810Srdreslin@umich.edu * redistributions in binary form must reproduce the above copyright 222810Srdreslin@umich.edu * notice, this list of conditions and the following disclaimer in the 232810Srdreslin@umich.edu * documentation and/or other materials provided with the distribution; 242810Srdreslin@umich.edu * neither the name of the copyright holders nor the names of its 252810Srdreslin@umich.edu * contributors may be used to endorse or promote products derived from 262810Srdreslin@umich.edu * this software without specific prior written permission. 272810Srdreslin@umich.edu * 282810Srdreslin@umich.edu * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 292810Srdreslin@umich.edu * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 302810Srdreslin@umich.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 312810Srdreslin@umich.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 322810Srdreslin@umich.edu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 332810Srdreslin@umich.edu * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 342810Srdreslin@umich.edu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 352810Srdreslin@umich.edu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 362810Srdreslin@umich.edu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 372810Srdreslin@umich.edu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 382810Srdreslin@umich.edu * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 392810Srdreslin@umich.edu * 402810Srdreslin@umich.edu * Authors: Erik Hallnor 412810Srdreslin@umich.edu * Dave Greene 422810Srdreslin@umich.edu * Steve Reinhardt 434458Sstever@eecs.umich.edu * Ron Dreslinski 448856Sandreas.hansson@arm.com * Andreas Hansson 452810Srdreslin@umich.edu */ 462810Srdreslin@umich.edu 472810Srdreslin@umich.edu/** 482810Srdreslin@umich.edu * @file 492810Srdreslin@umich.edu * Describes a cache based on template policies. 502810Srdreslin@umich.edu */ 512810Srdreslin@umich.edu 5211051Sandreas.hansson@arm.com#ifndef __MEM_CACHE_CACHE_HH__ 5311051Sandreas.hansson@arm.com#define __MEM_CACHE_CACHE_HH__ 542810Srdreslin@umich.edu 5511859Sandreas.hansson@arm.com#include <unordered_set> 5611859Sandreas.hansson@arm.com 572810Srdreslin@umich.edu#include "base/misc.hh" // fatal, panic, and warn 5811197Sandreas.hansson@arm.com#include "enums/Clusivity.hh" 595338Sstever@gmail.com#include "mem/cache/base.hh" 605338Sstever@gmail.com#include "mem/cache/blk.hh" 615338Sstever@gmail.com#include "mem/cache/mshr.hh" 6210815Sdavid.guillen@arm.com#include "mem/cache/tags/base.hh" 6311053Sandreas.hansson@arm.com#include "params/Cache.hh" 644458Sstever@eecs.umich.edu#include "sim/eventq.hh" 654458Sstever@eecs.umich.edu 662813Srdreslin@umich.edu//Forward decleration 673861Sstever@eecs.umich.educlass BasePrefetcher; 682810Srdreslin@umich.edu 692810Srdreslin@umich.edu/** 702810Srdreslin@umich.edu * A template-policy based cache. The behavior of the cache can be altered by 712810Srdreslin@umich.edu * supplying different template policies. TagStore handles all tag and data 729264Sdjordje.kovacevic@arm.com * storage @sa TagStore, \ref gem5MemorySystem "gem5 Memory System" 732810Srdreslin@umich.edu */ 742810Srdreslin@umich.educlass Cache : public BaseCache 752810Srdreslin@umich.edu{ 762810Srdreslin@umich.edu public: 7710815Sdavid.guillen@arm.com 7810815Sdavid.guillen@arm.com /** A typedef for a list of CacheBlk pointers. */ 7910815Sdavid.guillen@arm.com typedef std::list<CacheBlk*> BlkList; 802810Srdreslin@umich.edu 812810Srdreslin@umich.edu protected: 822810Srdreslin@umich.edu 838856Sandreas.hansson@arm.com /** 848856Sandreas.hansson@arm.com * The CPU-side port extends the base cache slave port with access 858856Sandreas.hansson@arm.com * functions for functional, atomic and timing requests. 868856Sandreas.hansson@arm.com */ 878856Sandreas.hansson@arm.com class CpuSidePort : public CacheSlavePort 883738Sstever@eecs.umich.edu { 898856Sandreas.hansson@arm.com private: 903738Sstever@eecs.umich.edu 918856Sandreas.hansson@arm.com // a pointer to our specific cache implementation 9210815Sdavid.guillen@arm.com Cache *cache; 933738Sstever@eecs.umich.edu 948856Sandreas.hansson@arm.com protected: 954478Sstever@eecs.umich.edu 968975Sandreas.hansson@arm.com virtual bool recvTimingSnoopResp(PacketPtr pkt); 978948Sandreas.hansson@arm.com 988975Sandreas.hansson@arm.com virtual bool recvTimingReq(PacketPtr pkt); 993738Sstever@eecs.umich.edu 1003738Sstever@eecs.umich.edu virtual Tick recvAtomic(PacketPtr pkt); 1013738Sstever@eecs.umich.edu 1023738Sstever@eecs.umich.edu virtual void recvFunctional(PacketPtr pkt); 1038856Sandreas.hansson@arm.com 1049090Sandreas.hansson@arm.com virtual AddrRangeList getAddrRanges() const; 1058856Sandreas.hansson@arm.com 1068856Sandreas.hansson@arm.com public: 1078856Sandreas.hansson@arm.com 10810815Sdavid.guillen@arm.com CpuSidePort(const std::string &_name, Cache *_cache, 1098856Sandreas.hansson@arm.com const std::string &_label); 1108856Sandreas.hansson@arm.com 1113738Sstever@eecs.umich.edu }; 1123738Sstever@eecs.umich.edu 1138856Sandreas.hansson@arm.com /** 1148914Sandreas.hansson@arm.com * Override the default behaviour of sendDeferredPacket to enable 1158914Sandreas.hansson@arm.com * the memory-side cache port to also send requests based on the 1168914Sandreas.hansson@arm.com * current MSHR status. This queue has a pointer to our specific 1178914Sandreas.hansson@arm.com * cache implementation and is used by the MemSidePort. 1188914Sandreas.hansson@arm.com */ 11910713Sandreas.hansson@arm.com class CacheReqPacketQueue : public ReqPacketQueue 1208914Sandreas.hansson@arm.com { 1218914Sandreas.hansson@arm.com 1228914Sandreas.hansson@arm.com protected: 1238914Sandreas.hansson@arm.com 12410815Sdavid.guillen@arm.com Cache &cache; 12510713Sandreas.hansson@arm.com SnoopRespPacketQueue &snoopRespQueue; 1268914Sandreas.hansson@arm.com 1278914Sandreas.hansson@arm.com public: 1288914Sandreas.hansson@arm.com 12910815Sdavid.guillen@arm.com CacheReqPacketQueue(Cache &cache, MasterPort &port, 13010713Sandreas.hansson@arm.com SnoopRespPacketQueue &snoop_resp_queue, 13110713Sandreas.hansson@arm.com const std::string &label) : 13210713Sandreas.hansson@arm.com ReqPacketQueue(cache, port, label), cache(cache), 13310713Sandreas.hansson@arm.com snoopRespQueue(snoop_resp_queue) { } 1348914Sandreas.hansson@arm.com 1358914Sandreas.hansson@arm.com /** 1368914Sandreas.hansson@arm.com * Override the normal sendDeferredPacket and do not only 1378914Sandreas.hansson@arm.com * consider the transmit list (used for responses), but also 1388914Sandreas.hansson@arm.com * requests. 1398914Sandreas.hansson@arm.com */ 1408914Sandreas.hansson@arm.com virtual void sendDeferredPacket(); 1418914Sandreas.hansson@arm.com 14211375Sandreas.hansson@arm.com /** 14311375Sandreas.hansson@arm.com * Check if there is a conflicting snoop response about to be 14411375Sandreas.hansson@arm.com * send out, and if so simply stall any requests, and schedule 14511375Sandreas.hansson@arm.com * a send event at the same time as the next snoop response is 14611375Sandreas.hansson@arm.com * being sent out. 14711375Sandreas.hansson@arm.com */ 14811375Sandreas.hansson@arm.com bool checkConflictingSnoop(Addr addr) 14911375Sandreas.hansson@arm.com { 15011375Sandreas.hansson@arm.com if (snoopRespQueue.hasAddr(addr)) { 15111375Sandreas.hansson@arm.com DPRINTF(CachePort, "Waiting for snoop response to be " 15211375Sandreas.hansson@arm.com "sent\n"); 15311375Sandreas.hansson@arm.com Tick when = snoopRespQueue.deferredPacketReadyTime(); 15411375Sandreas.hansson@arm.com schedSendEvent(when); 15511375Sandreas.hansson@arm.com return true; 15611375Sandreas.hansson@arm.com } 15711375Sandreas.hansson@arm.com return false; 15811375Sandreas.hansson@arm.com } 1598914Sandreas.hansson@arm.com }; 1608914Sandreas.hansson@arm.com 1618914Sandreas.hansson@arm.com /** 1628856Sandreas.hansson@arm.com * The memory-side port extends the base cache master port with 1638856Sandreas.hansson@arm.com * access functions for functional, atomic and timing snoops. 1648856Sandreas.hansson@arm.com */ 1658856Sandreas.hansson@arm.com class MemSidePort : public CacheMasterPort 1663738Sstever@eecs.umich.edu { 1678856Sandreas.hansson@arm.com private: 1683738Sstever@eecs.umich.edu 1698914Sandreas.hansson@arm.com /** The cache-specific queue. */ 17010713Sandreas.hansson@arm.com CacheReqPacketQueue _reqQueue; 17110713Sandreas.hansson@arm.com 17210713Sandreas.hansson@arm.com SnoopRespPacketQueue _snoopRespQueue; 1738914Sandreas.hansson@arm.com 1748856Sandreas.hansson@arm.com // a pointer to our specific cache implementation 17510815Sdavid.guillen@arm.com Cache *cache; 1763738Sstever@eecs.umich.edu 1778856Sandreas.hansson@arm.com protected: 1784478Sstever@eecs.umich.edu 1798975Sandreas.hansson@arm.com virtual void recvTimingSnoopReq(PacketPtr pkt); 1808948Sandreas.hansson@arm.com 1818975Sandreas.hansson@arm.com virtual bool recvTimingResp(PacketPtr pkt); 1823738Sstever@eecs.umich.edu 1838948Sandreas.hansson@arm.com virtual Tick recvAtomicSnoop(PacketPtr pkt); 1843738Sstever@eecs.umich.edu 1858948Sandreas.hansson@arm.com virtual void recvFunctionalSnoop(PacketPtr pkt); 1864458Sstever@eecs.umich.edu 1878856Sandreas.hansson@arm.com public: 1888856Sandreas.hansson@arm.com 18910815Sdavid.guillen@arm.com MemSidePort(const std::string &_name, Cache *_cache, 1908856Sandreas.hansson@arm.com const std::string &_label); 1913738Sstever@eecs.umich.edu }; 1923738Sstever@eecs.umich.edu 1932810Srdreslin@umich.edu /** Tag and data Storage */ 19410815Sdavid.guillen@arm.com BaseTags *tags; 1954626Sstever@eecs.umich.edu 1962810Srdreslin@umich.edu /** Prefetcher */ 1973861Sstever@eecs.umich.edu BasePrefetcher *prefetcher; 1982810Srdreslin@umich.edu 1994671Sstever@eecs.umich.edu /** Temporary cache block for occasional transitory use */ 20010815Sdavid.guillen@arm.com CacheBlk *tempBlock; 2014671Sstever@eecs.umich.edu 2022810Srdreslin@umich.edu /** 2035707Shsul@eecs.umich.edu * This cache should allocate a block on a line-sized write miss. 2043860Sstever@eecs.umich.edu */ 2053860Sstever@eecs.umich.edu const bool doFastWrites; 2063860Sstever@eecs.umich.edu 2075875Ssteve.reinhardt@amd.com /** 20810345SCurtis.Dunham@arm.com * Turn line-sized writes into WriteInvalidate transactions. 20910345SCurtis.Dunham@arm.com */ 21010345SCurtis.Dunham@arm.com void promoteWholeLineWrites(PacketPtr pkt); 21110345SCurtis.Dunham@arm.com 21210345SCurtis.Dunham@arm.com /** 2135875Ssteve.reinhardt@amd.com * Notify the prefetcher on every access, not just misses. 2145875Ssteve.reinhardt@amd.com */ 2155875Ssteve.reinhardt@amd.com const bool prefetchOnAccess; 2163860Sstever@eecs.umich.edu 21711197Sandreas.hansson@arm.com /** 21811197Sandreas.hansson@arm.com * Clusivity with respect to the upstream cache, determining if we 21911197Sandreas.hansson@arm.com * fill into both this cache and the cache above on a miss. Note 22011197Sandreas.hansson@arm.com * that we currently do not support strict clusivity policies. 22111197Sandreas.hansson@arm.com */ 22211197Sandreas.hansson@arm.com const Enums::Clusivity clusivity; 22311197Sandreas.hansson@arm.com 22411199Sandreas.hansson@arm.com /** 22511199Sandreas.hansson@arm.com * Determine if clean lines should be written back or not. In 22611199Sandreas.hansson@arm.com * cases where a downstream cache is mostly inclusive we likely 22711199Sandreas.hansson@arm.com * want it to act as a victim cache also for lines that have not 22811199Sandreas.hansson@arm.com * been modified. Hence, we cannot simply drop the line (or send a 22911199Sandreas.hansson@arm.com * clean evict), but rather need to send the actual data. 23011199Sandreas.hansson@arm.com */ 23111199Sandreas.hansson@arm.com const bool writebackClean; 23211199Sandreas.hansson@arm.com 2333860Sstever@eecs.umich.edu /** 23411190Sandreas.hansson@arm.com * Upstream caches need this packet until true is returned, so 23511190Sandreas.hansson@arm.com * hold it for deletion until a subsequent call 2369063SAli.Saidi@ARM.com */ 23711190Sandreas.hansson@arm.com std::unique_ptr<Packet> pendingDelete; 2389063SAli.Saidi@ARM.com 2399063SAli.Saidi@ARM.com /** 24011197Sandreas.hansson@arm.com * Writebacks from the tempBlock, resulting on the response path 24111197Sandreas.hansson@arm.com * in atomic mode, must happen after the call to recvAtomic has 24211197Sandreas.hansson@arm.com * finished (for the right ordering of the packets). We therefore 24311197Sandreas.hansson@arm.com * need to hold on to the packets, and have a method and an event 24411197Sandreas.hansson@arm.com * to send them. 24511197Sandreas.hansson@arm.com */ 24611197Sandreas.hansson@arm.com PacketPtr tempBlockWriteback; 24711197Sandreas.hansson@arm.com 24811197Sandreas.hansson@arm.com /** 24911197Sandreas.hansson@arm.com * Send the outstanding tempBlock writeback. To be called after 25011197Sandreas.hansson@arm.com * recvAtomic finishes in cases where the block we filled is in 25111197Sandreas.hansson@arm.com * fact the tempBlock, and now needs to be written back. 25211197Sandreas.hansson@arm.com */ 25311197Sandreas.hansson@arm.com void writebackTempBlockAtomic() { 25411197Sandreas.hansson@arm.com assert(tempBlockWriteback != nullptr); 25511197Sandreas.hansson@arm.com PacketList writebacks{tempBlockWriteback}; 25611197Sandreas.hansson@arm.com doWritebacksAtomic(writebacks); 25711197Sandreas.hansson@arm.com tempBlockWriteback = nullptr; 25811197Sandreas.hansson@arm.com } 25911197Sandreas.hansson@arm.com 26011197Sandreas.hansson@arm.com /** 26111197Sandreas.hansson@arm.com * An event to writeback the tempBlock after recvAtomic 26211197Sandreas.hansson@arm.com * finishes. To avoid other calls to recvAtomic getting in 26311197Sandreas.hansson@arm.com * between, we create this event with a higher priority. 26411197Sandreas.hansson@arm.com */ 26511197Sandreas.hansson@arm.com EventWrapper<Cache, &Cache::writebackTempBlockAtomic> \ 26611197Sandreas.hansson@arm.com writebackTempBlockAtomicEvent; 26711197Sandreas.hansson@arm.com 26811197Sandreas.hansson@arm.com /** 26911276Sandreas.hansson@arm.com * Store the outstanding requests that we are expecting snoop 27011276Sandreas.hansson@arm.com * responses from so we can determine which snoop responses we 27111276Sandreas.hansson@arm.com * generated and which ones were merely forwarded. 27211276Sandreas.hansson@arm.com */ 27311276Sandreas.hansson@arm.com std::unordered_set<RequestPtr> outstandingSnoop; 27411276Sandreas.hansson@arm.com 27511276Sandreas.hansson@arm.com /** 2763860Sstever@eecs.umich.edu * Does all the processing necessary to perform the provided request. 2773860Sstever@eecs.umich.edu * @param pkt The memory request to perform. 27810048Saminfar@gmail.com * @param blk The cache block to be updated. 2793860Sstever@eecs.umich.edu * @param lat The latency of the access. 2803860Sstever@eecs.umich.edu * @param writebacks List for any writebacks that need to be performed. 2815707Shsul@eecs.umich.edu * @return Boolean indicating whether the request was satisfied. 2823860Sstever@eecs.umich.edu */ 28310815Sdavid.guillen@arm.com bool access(PacketPtr pkt, CacheBlk *&blk, 2849288Sandreas.hansson@arm.com Cycles &lat, PacketList &writebacks); 2854219Srdreslin@umich.edu 2864219Srdreslin@umich.edu /** 2874219Srdreslin@umich.edu *Handle doing the Compare and Swap function for SPARC. 2884219Srdreslin@umich.edu */ 28910815Sdavid.guillen@arm.com void cmpAndSwap(CacheBlk *blk, PacketPtr pkt); 2903860Sstever@eecs.umich.edu 2913860Sstever@eecs.umich.edu /** 29210028SGiacomo.Gabrielli@arm.com * Find a block frame for new block at address addr targeting the 29310028SGiacomo.Gabrielli@arm.com * given security space, assuming that the block is not currently 29410028SGiacomo.Gabrielli@arm.com * in the cache. Append writebacks if any to provided packet 29511484Snikos.nikoleris@arm.com * list. Return free block frame. May return nullptr if there are 29610028SGiacomo.Gabrielli@arm.com * no replaceable blocks at the moment. 2975350Sstever@gmail.com */ 29810815Sdavid.guillen@arm.com CacheBlk *allocateBlock(Addr addr, bool is_secure, PacketList &writebacks); 2995350Sstever@gmail.com 3005350Sstever@gmail.com /** 30111197Sandreas.hansson@arm.com * Invalidate a cache block. 30211197Sandreas.hansson@arm.com * 30311197Sandreas.hansson@arm.com * @param blk Block to invalidate 30411197Sandreas.hansson@arm.com */ 30511197Sandreas.hansson@arm.com void invalidateBlock(CacheBlk *blk); 30611197Sandreas.hansson@arm.com 30711197Sandreas.hansson@arm.com /** 30811601Sandreas.hansson@arm.com * Maintain the clusivity of this cache by potentially 30911601Sandreas.hansson@arm.com * invalidating a block. This method works in conjunction with 31011601Sandreas.hansson@arm.com * satisfyRequest, but is separate to allow us to handle all MSHR 31111601Sandreas.hansson@arm.com * targets before potentially dropping a block. 31211601Sandreas.hansson@arm.com * 31311601Sandreas.hansson@arm.com * @param from_cache Whether we have dealt with a packet from a cache 31411601Sandreas.hansson@arm.com * @param blk The block that should potentially be dropped 31511601Sandreas.hansson@arm.com */ 31611601Sandreas.hansson@arm.com void maintainClusivity(bool from_cache, CacheBlk *blk); 31711601Sandreas.hansson@arm.com 31811601Sandreas.hansson@arm.com /** 3193860Sstever@eecs.umich.edu * Populates a cache block and handles all outstanding requests for the 3203860Sstever@eecs.umich.edu * satisfied fill request. This version takes two memory requests. One 3213860Sstever@eecs.umich.edu * contains the fill data, the other is an optional target to satisfy. 3224626Sstever@eecs.umich.edu * @param pkt The memory request with the fill data. 3233860Sstever@eecs.umich.edu * @param blk The cache block if it already exists. 3243860Sstever@eecs.umich.edu * @param writebacks List for any writebacks that need to be performed. 32511197Sandreas.hansson@arm.com * @param allocate Whether to allocate a block or use the temp block 3263860Sstever@eecs.umich.edu * @return Pointer to the new cache block. 3273860Sstever@eecs.umich.edu */ 32810815Sdavid.guillen@arm.com CacheBlk *handleFill(PacketPtr pkt, CacheBlk *blk, 32911197Sandreas.hansson@arm.com PacketList &writebacks, bool allocate); 3303860Sstever@eecs.umich.edu 33111197Sandreas.hansson@arm.com /** 33211197Sandreas.hansson@arm.com * Determine whether we should allocate on a fill or not. If this 33311197Sandreas.hansson@arm.com * cache is mostly inclusive with regards to the upstream cache(s) 33411197Sandreas.hansson@arm.com * we always allocate (for any non-forwarded and cacheable 33511197Sandreas.hansson@arm.com * requests). In the case of a mostly exclusive cache, we allocate 33611197Sandreas.hansson@arm.com * on fill if the packet did not come from a cache, thus if we: 33711197Sandreas.hansson@arm.com * are dealing with a whole-line write (the latter behaves much 33811197Sandreas.hansson@arm.com * like a writeback), the original target packet came from a 33911197Sandreas.hansson@arm.com * non-caching source, or if we are performing a prefetch or LLSC. 34011197Sandreas.hansson@arm.com * 34111197Sandreas.hansson@arm.com * @param cmd Command of the incoming requesting packet 34211197Sandreas.hansson@arm.com * @return Whether we should allocate on the fill 34311197Sandreas.hansson@arm.com */ 34411211Sandreas.sandberg@arm.com inline bool allocOnFill(MemCmd cmd) const override 34511197Sandreas.hansson@arm.com { 34611197Sandreas.hansson@arm.com return clusivity == Enums::mostly_incl || 34711197Sandreas.hansson@arm.com cmd == MemCmd::WriteLineReq || 34811197Sandreas.hansson@arm.com cmd == MemCmd::ReadReq || 34911197Sandreas.hansson@arm.com cmd == MemCmd::WriteReq || 35011197Sandreas.hansson@arm.com cmd.isPrefetch() || 35111197Sandreas.hansson@arm.com cmd.isLLSC(); 35211197Sandreas.hansson@arm.com } 3539548Sandreas.hansson@arm.com 3549548Sandreas.hansson@arm.com /** 3559548Sandreas.hansson@arm.com * Performs the access specified by the request. 3569548Sandreas.hansson@arm.com * @param pkt The request to perform. 3579548Sandreas.hansson@arm.com * @return The result of the access. 3589548Sandreas.hansson@arm.com */ 3599548Sandreas.hansson@arm.com bool recvTimingReq(PacketPtr pkt); 3609548Sandreas.hansson@arm.com 3619548Sandreas.hansson@arm.com /** 36210883Sali.jafri@arm.com * Insert writebacks into the write buffer 36310883Sali.jafri@arm.com */ 36410883Sali.jafri@arm.com void doWritebacks(PacketList& writebacks, Tick forward_time); 36510883Sali.jafri@arm.com 36610883Sali.jafri@arm.com /** 36711130Sali.jafri@arm.com * Send writebacks down the memory hierarchy in atomic mode 36811130Sali.jafri@arm.com */ 36911130Sali.jafri@arm.com void doWritebacksAtomic(PacketList& writebacks); 37011130Sali.jafri@arm.com 37111130Sali.jafri@arm.com /** 37211375Sandreas.hansson@arm.com * Handling the special case of uncacheable write responses to 37311375Sandreas.hansson@arm.com * make recvTimingResp less cluttered. 37411375Sandreas.hansson@arm.com */ 37511375Sandreas.hansson@arm.com void handleUncacheableWriteResp(PacketPtr pkt); 37611375Sandreas.hansson@arm.com 37711375Sandreas.hansson@arm.com /** 3789548Sandreas.hansson@arm.com * Handles a response (cache line fill/write ack) from the bus. 3799548Sandreas.hansson@arm.com * @param pkt The response packet 3809548Sandreas.hansson@arm.com */ 3819548Sandreas.hansson@arm.com void recvTimingResp(PacketPtr pkt); 3829548Sandreas.hansson@arm.com 3839548Sandreas.hansson@arm.com /** 3849548Sandreas.hansson@arm.com * Snoops bus transactions to maintain coherence. 3859548Sandreas.hansson@arm.com * @param pkt The current bus transaction. 3869548Sandreas.hansson@arm.com */ 3879548Sandreas.hansson@arm.com void recvTimingSnoopReq(PacketPtr pkt); 3889548Sandreas.hansson@arm.com 3899548Sandreas.hansson@arm.com /** 3909548Sandreas.hansson@arm.com * Handle a snoop response. 3919548Sandreas.hansson@arm.com * @param pkt Snoop response packet 3929548Sandreas.hansson@arm.com */ 3939548Sandreas.hansson@arm.com void recvTimingSnoopResp(PacketPtr pkt); 3949548Sandreas.hansson@arm.com 3959548Sandreas.hansson@arm.com /** 3969548Sandreas.hansson@arm.com * Performs the access specified by the request. 3979548Sandreas.hansson@arm.com * @param pkt The request to perform. 3989782Sandreas.hansson@arm.com * @return The number of ticks required for the access. 3999548Sandreas.hansson@arm.com */ 4009782Sandreas.hansson@arm.com Tick recvAtomic(PacketPtr pkt); 4019548Sandreas.hansson@arm.com 4029548Sandreas.hansson@arm.com /** 4039548Sandreas.hansson@arm.com * Snoop for the provided request in the cache and return the estimated 4049782Sandreas.hansson@arm.com * time taken. 4059548Sandreas.hansson@arm.com * @param pkt The memory request to snoop 4069782Sandreas.hansson@arm.com * @return The number of ticks required for the snoop. 4079548Sandreas.hansson@arm.com */ 4089782Sandreas.hansson@arm.com Tick recvAtomicSnoop(PacketPtr pkt); 4099548Sandreas.hansson@arm.com 4109548Sandreas.hansson@arm.com /** 4119548Sandreas.hansson@arm.com * Performs the access specified by the request. 4129548Sandreas.hansson@arm.com * @param pkt The request to perform. 4139548Sandreas.hansson@arm.com * @param fromCpuSide from the CPU side port or the memory side port 4149548Sandreas.hansson@arm.com */ 4159548Sandreas.hansson@arm.com void functionalAccess(PacketPtr pkt, bool fromCpuSide); 4169548Sandreas.hansson@arm.com 41711601Sandreas.hansson@arm.com /** 41811601Sandreas.hansson@arm.com * Perform any necessary updates to the block and perform any data 41911601Sandreas.hansson@arm.com * exchange between the packet and the block. The flags of the 42011601Sandreas.hansson@arm.com * packet are also set accordingly. 42111601Sandreas.hansson@arm.com * 42211601Sandreas.hansson@arm.com * @param pkt Request packet from upstream that hit a block 42311601Sandreas.hansson@arm.com * @param blk Cache block that the packet hit 42411601Sandreas.hansson@arm.com * @param deferred_response Whether this hit is to block that 42511601Sandreas.hansson@arm.com * originally missed 42611601Sandreas.hansson@arm.com * @param pending_downgrade Whether the writable flag is to be removed 42711601Sandreas.hansson@arm.com * 42811601Sandreas.hansson@arm.com * @return True if the block is to be invalidated 42911601Sandreas.hansson@arm.com */ 43011601Sandreas.hansson@arm.com void satisfyRequest(PacketPtr pkt, CacheBlk *blk, 43111601Sandreas.hansson@arm.com bool deferred_response = false, 43211601Sandreas.hansson@arm.com bool pending_downgrade = false); 4334626Sstever@eecs.umich.edu 43410563Sandreas.hansson@arm.com void doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data, 4355319Sstever@gmail.com bool already_copied, bool pending_inval); 4363860Sstever@eecs.umich.edu 4373860Sstever@eecs.umich.edu /** 43811127Sandreas.hansson@arm.com * Perform an upward snoop if needed, and update the block state 43911127Sandreas.hansson@arm.com * (possibly invalidating the block). Also create a response if required. 44011127Sandreas.hansson@arm.com * 44111127Sandreas.hansson@arm.com * @param pkt Snoop packet 44211127Sandreas.hansson@arm.com * @param blk Cache block being snooped 44311127Sandreas.hansson@arm.com * @param is_timing Timing or atomic for the response 44411127Sandreas.hansson@arm.com * @param is_deferred Is this a deferred snoop or not? 44511127Sandreas.hansson@arm.com * @param pending_inval Do we have a pending invalidation? 44611127Sandreas.hansson@arm.com * 44711127Sandreas.hansson@arm.com * @return The snoop delay incurred by the upwards snoop 4483860Sstever@eecs.umich.edu */ 44911127Sandreas.hansson@arm.com uint32_t handleSnoop(PacketPtr pkt, CacheBlk *blk, 45011127Sandreas.hansson@arm.com bool is_timing, bool is_deferred, bool pending_inval); 4513860Sstever@eecs.umich.edu 4523860Sstever@eecs.umich.edu /** 4533860Sstever@eecs.umich.edu * Create a writeback request for the given block. 4543860Sstever@eecs.umich.edu * @param blk The block to writeback. 4553860Sstever@eecs.umich.edu * @return The writeback request for the block. 4563860Sstever@eecs.umich.edu */ 45710815Sdavid.guillen@arm.com PacketPtr writebackBlk(CacheBlk *blk); 4583860Sstever@eecs.umich.edu 45910883Sali.jafri@arm.com /** 46010883Sali.jafri@arm.com * Create a CleanEvict request for the given block. 46110883Sali.jafri@arm.com * @param blk The block to evict. 46210883Sali.jafri@arm.com * @return The CleanEvict request for the block. 46310883Sali.jafri@arm.com */ 46410883Sali.jafri@arm.com PacketPtr cleanEvictBlk(CacheBlk *blk); 46510883Sali.jafri@arm.com 4669347SAndreas.Sandberg@arm.com 46711169Sandreas.hansson@arm.com void memWriteback() override; 46811169Sandreas.hansson@arm.com void memInvalidate() override; 46911169Sandreas.hansson@arm.com bool isDirty() const override; 4709347SAndreas.Sandberg@arm.com 4719347SAndreas.Sandberg@arm.com /** 4729347SAndreas.Sandberg@arm.com * Cache block visitor that writes back dirty cache blocks using 4739347SAndreas.Sandberg@arm.com * functional writes. 4749347SAndreas.Sandberg@arm.com * 4759347SAndreas.Sandberg@arm.com * \return Always returns true. 4769347SAndreas.Sandberg@arm.com */ 47710815Sdavid.guillen@arm.com bool writebackVisitor(CacheBlk &blk); 4789347SAndreas.Sandberg@arm.com /** 4799347SAndreas.Sandberg@arm.com * Cache block visitor that invalidates all blocks in the cache. 4809347SAndreas.Sandberg@arm.com * 4819347SAndreas.Sandberg@arm.com * @warn Dirty cache lines will not be written back to memory. 4829347SAndreas.Sandberg@arm.com * 4839347SAndreas.Sandberg@arm.com * \return Always returns true. 4849347SAndreas.Sandberg@arm.com */ 48510815Sdavid.guillen@arm.com bool invalidateVisitor(CacheBlk &blk); 4869347SAndreas.Sandberg@arm.com 4879445SAndreas.Sandberg@ARM.com /** 48811452Sandreas.hansson@arm.com * Create an appropriate downstream bus request packet for the 4895365Sstever@gmail.com * given parameters. 49011452Sandreas.hansson@arm.com * @param cpu_pkt The miss that needs to be satisfied. 4915365Sstever@gmail.com * @param blk The block currently in the cache corresponding to 49211484Snikos.nikoleris@arm.com * cpu_pkt (nullptr if none). 49311452Sandreas.hansson@arm.com * @param needsWritable Indicates that the block must be writable 4945365Sstever@gmail.com * even if the request in cpu_pkt doesn't indicate that. 49511484Snikos.nikoleris@arm.com * @return A new Packet containing the request, or nullptr if the 4965365Sstever@gmail.com * current request in cpu_pkt should just be forwarded on. 4974626Sstever@eecs.umich.edu */ 49811452Sandreas.hansson@arm.com PacketPtr createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk, 49911452Sandreas.hansson@arm.com bool needsWritable) const; 5005365Sstever@gmail.com 5015365Sstever@gmail.com /** 50211375Sandreas.hansson@arm.com * Return the next queue entry to service, either a pending miss 50311375Sandreas.hansson@arm.com * from the MSHR queue, a buffered write from the write buffer, or 50411375Sandreas.hansson@arm.com * something from the prefetcher. This function is responsible 50511375Sandreas.hansson@arm.com * for prioritizing among those sources on the fly. 5065365Sstever@gmail.com */ 50711375Sandreas.hansson@arm.com QueueEntry* getNextQueueEntry(); 5085365Sstever@gmail.com 5095365Sstever@gmail.com /** 51010883Sali.jafri@arm.com * Send up a snoop request and find cached copies. If cached copies are 51110883Sali.jafri@arm.com * found, set the BLOCK_CACHED flag in pkt. 51210883Sali.jafri@arm.com */ 51311130Sali.jafri@arm.com bool isCachedAbove(PacketPtr pkt, bool is_timing = true) const; 51410883Sali.jafri@arm.com 51510883Sali.jafri@arm.com /** 5164626Sstever@eecs.umich.edu * Return whether there are any outstanding misses. 5174626Sstever@eecs.umich.edu */ 5184626Sstever@eecs.umich.edu bool outstandingMisses() const 5192810Srdreslin@umich.edu { 52011375Sandreas.hansson@arm.com return !mshrQueue.isEmpty(); 5212810Srdreslin@umich.edu } 5222810Srdreslin@umich.edu 52310028SGiacomo.Gabrielli@arm.com CacheBlk *findBlock(Addr addr, bool is_secure) const { 52410028SGiacomo.Gabrielli@arm.com return tags->findBlock(addr, is_secure); 5252810Srdreslin@umich.edu } 5262810Srdreslin@umich.edu 52711169Sandreas.hansson@arm.com bool inCache(Addr addr, bool is_secure) const override { 52810028SGiacomo.Gabrielli@arm.com return (tags->findBlock(addr, is_secure) != 0); 5293861Sstever@eecs.umich.edu } 5303861Sstever@eecs.umich.edu 53111169Sandreas.hansson@arm.com bool inMissQueue(Addr addr, bool is_secure) const override { 53210028SGiacomo.Gabrielli@arm.com return (mshrQueue.findMatch(addr, is_secure) != 0); 5333861Sstever@eecs.umich.edu } 5345875Ssteve.reinhardt@amd.com 5355875Ssteve.reinhardt@amd.com /** 5365875Ssteve.reinhardt@amd.com * Find next request ready time from among possible sources. 5375875Ssteve.reinhardt@amd.com */ 53811375Sandreas.hansson@arm.com Tick nextQueueReadyTime() const; 5399529Sandreas.hansson@arm.com 5409529Sandreas.hansson@arm.com public: 5419529Sandreas.hansson@arm.com /** Instantiates a basic cache object. */ 54211053Sandreas.hansson@arm.com Cache(const CacheParams *p); 5439529Sandreas.hansson@arm.com 5449813Srioshering@gmail.com /** Non-default destructor is needed to deallocate memory. */ 5459813Srioshering@gmail.com virtual ~Cache(); 5469813Srioshering@gmail.com 54711169Sandreas.hansson@arm.com void regStats() override; 5488985SAli.Saidi@ARM.com 54911375Sandreas.hansson@arm.com /** 55011375Sandreas.hansson@arm.com * Take an MSHR, turn it into a suitable downstream packet, and 55111375Sandreas.hansson@arm.com * send it out. This construct allows a queue entry to choose a suitable 55211375Sandreas.hansson@arm.com * approach based on its type. 55311375Sandreas.hansson@arm.com * 55411375Sandreas.hansson@arm.com * @param mshr The MSHR to turn into a packet and send 55511375Sandreas.hansson@arm.com * @return True if the port is waiting for a retry 55611375Sandreas.hansson@arm.com */ 55711375Sandreas.hansson@arm.com bool sendMSHRQueuePacket(MSHR* mshr); 55811375Sandreas.hansson@arm.com 55911375Sandreas.hansson@arm.com /** 56011375Sandreas.hansson@arm.com * Similar to sendMSHR, but for a write-queue entry 56111375Sandreas.hansson@arm.com * instead. Create the packet, and send it, and if successful also 56211375Sandreas.hansson@arm.com * mark the entry in service. 56311375Sandreas.hansson@arm.com * 56411375Sandreas.hansson@arm.com * @param wq_entry The write-queue entry to turn into a packet and send 56511375Sandreas.hansson@arm.com * @return True if the port is waiting for a retry 56611375Sandreas.hansson@arm.com */ 56711375Sandreas.hansson@arm.com bool sendWriteQueuePacket(WriteQueueEntry* wq_entry); 56811375Sandreas.hansson@arm.com 5698985SAli.Saidi@ARM.com /** serialize the state of the caches 5708985SAli.Saidi@ARM.com * We currently don't support checkpointing cache state, so this panics. 5718985SAli.Saidi@ARM.com */ 57211168Sandreas.hansson@arm.com void serialize(CheckpointOut &cp) const override; 57311168Sandreas.hansson@arm.com void unserialize(CheckpointIn &cp) override; 5742810Srdreslin@umich.edu}; 5752810Srdreslin@umich.edu 57610815Sdavid.guillen@arm.com/** 57710815Sdavid.guillen@arm.com * Wrap a method and present it as a cache block visitor. 57810815Sdavid.guillen@arm.com * 57910815Sdavid.guillen@arm.com * For example the forEachBlk method in the tag arrays expects a 58010815Sdavid.guillen@arm.com * callable object/function as their parameter. This class wraps a 58110815Sdavid.guillen@arm.com * method in an object and presents callable object that adheres to 58210815Sdavid.guillen@arm.com * the cache block visitor protocol. 58310815Sdavid.guillen@arm.com */ 58410815Sdavid.guillen@arm.comclass CacheBlkVisitorWrapper : public CacheBlkVisitor 58510815Sdavid.guillen@arm.com{ 58610815Sdavid.guillen@arm.com public: 58710815Sdavid.guillen@arm.com typedef bool (Cache::*VisitorPtr)(CacheBlk &blk); 58810815Sdavid.guillen@arm.com 58910815Sdavid.guillen@arm.com CacheBlkVisitorWrapper(Cache &_cache, VisitorPtr _visitor) 59010815Sdavid.guillen@arm.com : cache(_cache), visitor(_visitor) {} 59110815Sdavid.guillen@arm.com 59211168Sandreas.hansson@arm.com bool operator()(CacheBlk &blk) override { 59310815Sdavid.guillen@arm.com return (cache.*visitor)(blk); 59410815Sdavid.guillen@arm.com } 59510815Sdavid.guillen@arm.com 59610815Sdavid.guillen@arm.com private: 59710815Sdavid.guillen@arm.com Cache &cache; 59810815Sdavid.guillen@arm.com VisitorPtr visitor; 59910815Sdavid.guillen@arm.com}; 60010815Sdavid.guillen@arm.com 60110815Sdavid.guillen@arm.com/** 60210815Sdavid.guillen@arm.com * Cache block visitor that determines if there are dirty blocks in a 60310815Sdavid.guillen@arm.com * cache. 60410815Sdavid.guillen@arm.com * 60510815Sdavid.guillen@arm.com * Use with the forEachBlk method in the tag array to determine if the 60610815Sdavid.guillen@arm.com * array contains dirty blocks. 60710815Sdavid.guillen@arm.com */ 60810815Sdavid.guillen@arm.comclass CacheBlkIsDirtyVisitor : public CacheBlkVisitor 60910815Sdavid.guillen@arm.com{ 61010815Sdavid.guillen@arm.com public: 61110815Sdavid.guillen@arm.com CacheBlkIsDirtyVisitor() 61210815Sdavid.guillen@arm.com : _isDirty(false) {} 61310815Sdavid.guillen@arm.com 61411168Sandreas.hansson@arm.com bool operator()(CacheBlk &blk) override { 61510815Sdavid.guillen@arm.com if (blk.isDirty()) { 61610815Sdavid.guillen@arm.com _isDirty = true; 61710815Sdavid.guillen@arm.com return false; 61810815Sdavid.guillen@arm.com } else { 61910815Sdavid.guillen@arm.com return true; 62010815Sdavid.guillen@arm.com } 62110815Sdavid.guillen@arm.com } 62210815Sdavid.guillen@arm.com 62310815Sdavid.guillen@arm.com /** 62410815Sdavid.guillen@arm.com * Does the array contain a dirty line? 62510815Sdavid.guillen@arm.com * 62610815Sdavid.guillen@arm.com * \return true if yes, false otherwise. 62710815Sdavid.guillen@arm.com */ 62810815Sdavid.guillen@arm.com bool isDirty() const { return _isDirty; }; 62910815Sdavid.guillen@arm.com 63010815Sdavid.guillen@arm.com private: 63110815Sdavid.guillen@arm.com bool _isDirty; 63210815Sdavid.guillen@arm.com}; 63310815Sdavid.guillen@arm.com 63411051Sandreas.hansson@arm.com#endif // __MEM_CACHE_CACHE_HH__ 635