base.hh revision 13223:081299f403fe
1955SN/A/* 2955SN/A * Copyright (c) 2012-2013, 2015-2016, 2018 ARM Limited 37816Ssteve.reinhardt@amd.com * All rights reserved. 45871Snate@binkert.org * 51762SN/A * The license below extends only to copyright in the software and shall 6955SN/A * not be construed as granting a license to any other intellectual 7955SN/A * property including but not limited to intellectual property relating 8955SN/A * to a hardware implementation of the functionality of the software 9955SN/A * licensed hereunder. You may use the software subject to the license 10955SN/A * terms below provided that you ensure that this notice is replicated 11955SN/A * unmodified and in its entirety in all distributions of the software, 12955SN/A * modified or unmodified, in source code or in binary form. 13955SN/A * 14955SN/A * Copyright (c) 2003-2005 The Regents of The University of Michigan 15955SN/A * All rights reserved. 16955SN/A * 17955SN/A * Redistribution and use in source and binary forms, with or without 18955SN/A * modification, are permitted provided that the following conditions are 19955SN/A * met: redistributions of source code must retain the above copyright 20955SN/A * notice, this list of conditions and the following disclaimer; 21955SN/A * redistributions in binary form must reproduce the above copyright 22955SN/A * notice, this list of conditions and the following disclaimer in the 23955SN/A * documentation and/or other materials provided with the distribution; 24955SN/A * neither the name of the copyright holders nor the names of its 25955SN/A * contributors may be used to endorse or promote products derived from 26955SN/A * this software without specific prior written permission. 27955SN/A * 28955SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29955SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 302665Ssaidi@eecs.umich.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 312665Ssaidi@eecs.umich.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 325863Snate@binkert.org * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33955SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34955SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35955SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36955SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37955SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 388878Ssteve.reinhardt@amd.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 392632Sstever@eecs.umich.edu * 408878Ssteve.reinhardt@amd.com * Authors: Erik Hallnor 412632Sstever@eecs.umich.edu * Steve Reinhardt 42955SN/A * Ron Dreslinski 438878Ssteve.reinhardt@amd.com * Andreas Hansson 442632Sstever@eecs.umich.edu * Nikos Nikoleris 452761Sstever@eecs.umich.edu */ 462632Sstever@eecs.umich.edu 472632Sstever@eecs.umich.edu/** 482632Sstever@eecs.umich.edu * @file 492761Sstever@eecs.umich.edu * Declares a basic cache interface BaseCache. 502761Sstever@eecs.umich.edu */ 512761Sstever@eecs.umich.edu 528878Ssteve.reinhardt@amd.com#ifndef __MEM_CACHE_BASE_HH__ 538878Ssteve.reinhardt@amd.com#define __MEM_CACHE_BASE_HH__ 542761Sstever@eecs.umich.edu 552761Sstever@eecs.umich.edu#include <cassert> 562761Sstever@eecs.umich.edu#include <cstdint> 572761Sstever@eecs.umich.edu#include <string> 582761Sstever@eecs.umich.edu 598878Ssteve.reinhardt@amd.com#include "base/addr_range.hh" 608878Ssteve.reinhardt@amd.com#include "base/statistics.hh" 612632Sstever@eecs.umich.edu#include "base/trace.hh" 622632Sstever@eecs.umich.edu#include "base/types.hh" 638878Ssteve.reinhardt@amd.com#include "debug/Cache.hh" 648878Ssteve.reinhardt@amd.com#include "debug/CachePort.hh" 652632Sstever@eecs.umich.edu#include "enums/Clusivity.hh" 66955SN/A#include "mem/cache/cache_blk.hh" 67955SN/A#include "mem/cache/mshr_queue.hh" 68955SN/A#include "mem/cache/tags/base.hh" 695863Snate@binkert.org#include "mem/cache/write_queue.hh" 705863Snate@binkert.org#include "mem/cache/write_queue_entry.hh" 715863Snate@binkert.org#include "mem/mem_object.hh" 725863Snate@binkert.org#include "mem/packet.hh" 735863Snate@binkert.org#include "mem/packet_queue.hh" 745863Snate@binkert.org#include "mem/qport.hh" 755863Snate@binkert.org#include "mem/request.hh" 765863Snate@binkert.org#include "sim/eventq.hh" 775863Snate@binkert.org#include "sim/serialize.hh" 785863Snate@binkert.org#include "sim/sim_exit.hh" 795863Snate@binkert.org#include "sim/system.hh" 808878Ssteve.reinhardt@amd.com 815863Snate@binkert.orgclass BaseMasterPort; 825863Snate@binkert.orgclass BasePrefetcher; 835863Snate@binkert.orgclass BaseSlavePort; 845863Snate@binkert.orgclass MSHR; 855863Snate@binkert.orgclass MasterPort; 865863Snate@binkert.orgclass QueueEntry; 875863Snate@binkert.orgstruct BaseCacheParams; 885863Snate@binkert.org 895863Snate@binkert.org/** 905863Snate@binkert.org * A basic cache interface. Implements some common functions for speed. 915863Snate@binkert.org */ 925863Snate@binkert.orgclass BaseCache : public MemObject 935863Snate@binkert.org{ 945863Snate@binkert.org protected: 955863Snate@binkert.org /** 968878Ssteve.reinhardt@amd.com * Indexes to enumerate the MSHR queues. 975863Snate@binkert.org */ 985863Snate@binkert.org enum MSHRQueueIndex { 995863Snate@binkert.org MSHRQueue_MSHRs, 1006654Snate@binkert.org MSHRQueue_WriteBuffer 101955SN/A }; 1025396Ssaidi@eecs.umich.edu 1035863Snate@binkert.org public: 1045863Snate@binkert.org /** 1054202Sbinkertn@umich.edu * Reasons for caches to be blocked. 1065863Snate@binkert.org */ 1075863Snate@binkert.org enum BlockedCause { 1085863Snate@binkert.org Blocked_NoMSHRs = MSHRQueue_MSHRs, 1095863Snate@binkert.org Blocked_NoWBBuffers = MSHRQueue_WriteBuffer, 110955SN/A Blocked_NoTargets, 1116654Snate@binkert.org NUM_BLOCKED_CAUSES 1125273Sstever@gmail.com }; 1135871Snate@binkert.org 1145273Sstever@gmail.com protected: 1156655Snate@binkert.org 1168878Ssteve.reinhardt@amd.com /** 1176655Snate@binkert.org * A cache master port is used for the memory-side port of the 1186655Snate@binkert.org * cache, and in addition to the basic timing port that only sends 1196655Snate@binkert.org * response packets through a transmit list, it also offers the 1206655Snate@binkert.org * ability to schedule and send request packets (requests & 1215871Snate@binkert.org * writebacks). The send event is scheduled through schedSendEvent, 1226654Snate@binkert.org * and the sendDeferredPacket of the timing port is modified to 1235396Ssaidi@eecs.umich.edu * consider both the transmit list and the requests from the MSHR. 1248120Sgblack@eecs.umich.edu */ 1258120Sgblack@eecs.umich.edu class CacheMasterPort : public QueuedMasterPort 1268120Sgblack@eecs.umich.edu { 1278120Sgblack@eecs.umich.edu 1288120Sgblack@eecs.umich.edu public: 1298120Sgblack@eecs.umich.edu 1308120Sgblack@eecs.umich.edu /** 1318120Sgblack@eecs.umich.edu * Schedule a send of a request packet (from the MSHR). Note 1328879Ssteve.reinhardt@amd.com * that we could already have a retry outstanding. 1338879Ssteve.reinhardt@amd.com */ 1348879Ssteve.reinhardt@amd.com void schedSendEvent(Tick time) 1358879Ssteve.reinhardt@amd.com { 1368879Ssteve.reinhardt@amd.com DPRINTF(CachePort, "Scheduling send event at %llu\n", time); 1378879Ssteve.reinhardt@amd.com reqQueue.schedSendEvent(time); 1388879Ssteve.reinhardt@amd.com } 1398879Ssteve.reinhardt@amd.com 1408879Ssteve.reinhardt@amd.com protected: 1418879Ssteve.reinhardt@amd.com 1428879Ssteve.reinhardt@amd.com CacheMasterPort(const std::string &_name, BaseCache *_cache, 1438879Ssteve.reinhardt@amd.com ReqPacketQueue &_reqQueue, 1448879Ssteve.reinhardt@amd.com SnoopRespPacketQueue &_snoopRespQueue) : 1458120Sgblack@eecs.umich.edu QueuedMasterPort(_name, _cache, _reqQueue, _snoopRespQueue) 1468120Sgblack@eecs.umich.edu { } 1478120Sgblack@eecs.umich.edu 1488120Sgblack@eecs.umich.edu /** 1498120Sgblack@eecs.umich.edu * Memory-side port always snoops. 1508120Sgblack@eecs.umich.edu * 1518120Sgblack@eecs.umich.edu * @return always true 1528120Sgblack@eecs.umich.edu */ 1538120Sgblack@eecs.umich.edu virtual bool isSnooping() const { return true; } 1548120Sgblack@eecs.umich.edu }; 1558120Sgblack@eecs.umich.edu 1568120Sgblack@eecs.umich.edu /** 1578120Sgblack@eecs.umich.edu * Override the default behaviour of sendDeferredPacket to enable 1588120Sgblack@eecs.umich.edu * the memory-side cache port to also send requests based on the 1598879Ssteve.reinhardt@amd.com * current MSHR status. This queue has a pointer to our specific 1608879Ssteve.reinhardt@amd.com * cache implementation and is used by the MemSidePort. 1618879Ssteve.reinhardt@amd.com */ 1628879Ssteve.reinhardt@amd.com class CacheReqPacketQueue : public ReqPacketQueue 1638879Ssteve.reinhardt@amd.com { 1648879Ssteve.reinhardt@amd.com 1658879Ssteve.reinhardt@amd.com protected: 1668879Ssteve.reinhardt@amd.com 1678879Ssteve.reinhardt@amd.com BaseCache &cache; 1688879Ssteve.reinhardt@amd.com SnoopRespPacketQueue &snoopRespQueue; 1698879Ssteve.reinhardt@amd.com 1708879Ssteve.reinhardt@amd.com public: 1718120Sgblack@eecs.umich.edu 1727816Ssteve.reinhardt@amd.com CacheReqPacketQueue(BaseCache &cache, MasterPort &port, 1737816Ssteve.reinhardt@amd.com SnoopRespPacketQueue &snoop_resp_queue, 1747816Ssteve.reinhardt@amd.com const std::string &label) : 1757816Ssteve.reinhardt@amd.com ReqPacketQueue(cache, port, label), cache(cache), 1767816Ssteve.reinhardt@amd.com snoopRespQueue(snoop_resp_queue) { } 1777816Ssteve.reinhardt@amd.com 1787816Ssteve.reinhardt@amd.com /** 1797816Ssteve.reinhardt@amd.com * Override the normal sendDeferredPacket and do not only 1807816Ssteve.reinhardt@amd.com * consider the transmit list (used for responses), but also 1815871Snate@binkert.org * requests. 1825871Snate@binkert.org */ 1836121Snate@binkert.org virtual void sendDeferredPacket(); 1845871Snate@binkert.org 1855871Snate@binkert.org /** 1866003Snate@binkert.org * Check if there is a conflicting snoop response about to be 1876655Snate@binkert.org * send out, and if so simply stall any requests, and schedule 188955SN/A * a send event at the same time as the next snoop response is 1895871Snate@binkert.org * being sent out. 1905871Snate@binkert.org */ 1915871Snate@binkert.org bool checkConflictingSnoop(Addr addr) 1925871Snate@binkert.org { 193955SN/A if (snoopRespQueue.hasAddr(addr)) { 1946121Snate@binkert.org DPRINTF(CachePort, "Waiting for snoop response to be " 1958881Smarc.orr@gmail.com "sent\n"); 1966121Snate@binkert.org Tick when = snoopRespQueue.deferredPacketReadyTime(); 1976121Snate@binkert.org schedSendEvent(when); 1981533SN/A return true; 1996655Snate@binkert.org } 2006655Snate@binkert.org return false; 2016655Snate@binkert.org } 2026655Snate@binkert.org }; 2035871Snate@binkert.org 2045871Snate@binkert.org 2055863Snate@binkert.org /** 2065871Snate@binkert.org * The memory-side port extends the base cache master port with 2078878Ssteve.reinhardt@amd.com * access functions for functional, atomic and timing snoops. 2085871Snate@binkert.org */ 2095871Snate@binkert.org class MemSidePort : public CacheMasterPort 2105871Snate@binkert.org { 2115863Snate@binkert.org private: 2126121Snate@binkert.org 2135863Snate@binkert.org /** The cache-specific queue. */ 2145871Snate@binkert.org CacheReqPacketQueue _reqQueue; 2158336Ssteve.reinhardt@amd.com 2168336Ssteve.reinhardt@amd.com SnoopRespPacketQueue _snoopRespQueue; 2178336Ssteve.reinhardt@amd.com 2188336Ssteve.reinhardt@amd.com // a pointer to our specific cache implementation 2194678Snate@binkert.org BaseCache *cache; 2208336Ssteve.reinhardt@amd.com 2218336Ssteve.reinhardt@amd.com protected: 2228336Ssteve.reinhardt@amd.com 2234678Snate@binkert.org virtual void recvTimingSnoopReq(PacketPtr pkt); 2244678Snate@binkert.org 2254678Snate@binkert.org virtual bool recvTimingResp(PacketPtr pkt); 2264678Snate@binkert.org 2277827Snate@binkert.org virtual Tick recvAtomicSnoop(PacketPtr pkt); 2287827Snate@binkert.org 2298336Ssteve.reinhardt@amd.com virtual void recvFunctionalSnoop(PacketPtr pkt); 2304678Snate@binkert.org 2318336Ssteve.reinhardt@amd.com public: 2328336Ssteve.reinhardt@amd.com 2338336Ssteve.reinhardt@amd.com MemSidePort(const std::string &_name, BaseCache *_cache, 2348336Ssteve.reinhardt@amd.com const std::string &_label); 2358336Ssteve.reinhardt@amd.com }; 2368336Ssteve.reinhardt@amd.com 2375871Snate@binkert.org /** 2385871Snate@binkert.org * A cache slave port is used for the CPU-side port of the cache, 2398336Ssteve.reinhardt@amd.com * and it is basically a simple timing port that uses a transmit 2408336Ssteve.reinhardt@amd.com * list for responses to the CPU (or connected master). In 2418336Ssteve.reinhardt@amd.com * addition, it has the functionality to block the port for 2428336Ssteve.reinhardt@amd.com * incoming requests. If blocked, the port will issue a retry once 2438336Ssteve.reinhardt@amd.com * unblocked. 2445871Snate@binkert.org */ 2458336Ssteve.reinhardt@amd.com class CacheSlavePort : public QueuedSlavePort 2468336Ssteve.reinhardt@amd.com { 2478336Ssteve.reinhardt@amd.com 2488336Ssteve.reinhardt@amd.com public: 2498336Ssteve.reinhardt@amd.com 2504678Snate@binkert.org /** Do not accept any new requests. */ 2515871Snate@binkert.org void setBlocked(); 2524678Snate@binkert.org 2538336Ssteve.reinhardt@amd.com /** Return to normal operation and accept new requests. */ 2548336Ssteve.reinhardt@amd.com void clearBlocked(); 2558336Ssteve.reinhardt@amd.com 2568336Ssteve.reinhardt@amd.com bool isBlocked() const { return blocked; } 2578336Ssteve.reinhardt@amd.com 2588336Ssteve.reinhardt@amd.com protected: 2598336Ssteve.reinhardt@amd.com 2608336Ssteve.reinhardt@amd.com CacheSlavePort(const std::string &_name, BaseCache *_cache, 2618336Ssteve.reinhardt@amd.com const std::string &_label); 2628336Ssteve.reinhardt@amd.com 2638336Ssteve.reinhardt@amd.com /** A normal packet queue used to store responses. */ 2648336Ssteve.reinhardt@amd.com RespPacketQueue queue; 2658336Ssteve.reinhardt@amd.com 2668336Ssteve.reinhardt@amd.com bool blocked; 2678336Ssteve.reinhardt@amd.com 2688336Ssteve.reinhardt@amd.com bool mustSendRetry; 2698336Ssteve.reinhardt@amd.com 2705871Snate@binkert.org private: 2716121Snate@binkert.org 272955SN/A void processSendRetry(); 273955SN/A 2742632Sstever@eecs.umich.edu EventFunctionWrapper sendRetryEvent; 2752632Sstever@eecs.umich.edu 276955SN/A }; 277955SN/A 278955SN/A /** 279955SN/A * The CPU-side port extends the base cache slave port with access 2808878Ssteve.reinhardt@amd.com * functions for functional, atomic and timing requests. 281955SN/A */ 2822632Sstever@eecs.umich.edu class CpuSidePort : public CacheSlavePort 2832632Sstever@eecs.umich.edu { 2842632Sstever@eecs.umich.edu private: 2852632Sstever@eecs.umich.edu 2862632Sstever@eecs.umich.edu // a pointer to our specific cache implementation 2872632Sstever@eecs.umich.edu BaseCache *cache; 2882632Sstever@eecs.umich.edu 2898268Ssteve.reinhardt@amd.com protected: 2908268Ssteve.reinhardt@amd.com virtual bool recvTimingSnoopResp(PacketPtr pkt) override; 2918268Ssteve.reinhardt@amd.com 2928268Ssteve.reinhardt@amd.com virtual bool tryTiming(PacketPtr pkt) override; 2938268Ssteve.reinhardt@amd.com 2948268Ssteve.reinhardt@amd.com virtual bool recvTimingReq(PacketPtr pkt) override; 2958268Ssteve.reinhardt@amd.com 2962632Sstever@eecs.umich.edu virtual Tick recvAtomic(PacketPtr pkt) override; 2972632Sstever@eecs.umich.edu 2982632Sstever@eecs.umich.edu virtual void recvFunctional(PacketPtr pkt) override; 2992632Sstever@eecs.umich.edu 3008268Ssteve.reinhardt@amd.com virtual AddrRangeList getAddrRanges() const override; 3012632Sstever@eecs.umich.edu 3028268Ssteve.reinhardt@amd.com public: 3038268Ssteve.reinhardt@amd.com 3048268Ssteve.reinhardt@amd.com CpuSidePort(const std::string &_name, BaseCache *_cache, 3058268Ssteve.reinhardt@amd.com const std::string &_label); 3063718Sstever@eecs.umich.edu 3072634Sstever@eecs.umich.edu }; 3082634Sstever@eecs.umich.edu 3095863Snate@binkert.org CpuSidePort cpuSidePort; 3102638Sstever@eecs.umich.edu MemSidePort memSidePort; 3118268Ssteve.reinhardt@amd.com 3122632Sstever@eecs.umich.edu protected: 3132632Sstever@eecs.umich.edu 3142632Sstever@eecs.umich.edu /** Miss status registers */ 3152632Sstever@eecs.umich.edu MSHRQueue mshrQueue; 3162632Sstever@eecs.umich.edu 3171858SN/A /** Write/writeback buffer */ 3183716Sstever@eecs.umich.edu WriteQueue writeBuffer; 3192638Sstever@eecs.umich.edu 3202638Sstever@eecs.umich.edu /** Tag and data Storage */ 3212638Sstever@eecs.umich.edu BaseTags *tags; 3222638Sstever@eecs.umich.edu 3232638Sstever@eecs.umich.edu /** Prefetcher */ 3242638Sstever@eecs.umich.edu BasePrefetcher *prefetcher; 3252638Sstever@eecs.umich.edu 3265863Snate@binkert.org /** 3275863Snate@binkert.org * Notify the prefetcher on every access, not just misses. 3285863Snate@binkert.org */ 329955SN/A const bool prefetchOnAccess; 3305341Sstever@gmail.com 3315341Sstever@gmail.com /** 3325863Snate@binkert.org * Temporary cache block for occasional transitory use. We use 3337756SAli.Saidi@ARM.com * the tempBlock to fill when allocation fails (e.g., when there 3345341Sstever@gmail.com * is an outstanding request that accesses the victim block) or 3356121Snate@binkert.org * when we want to avoid allocation (e.g., exclusive caches) 3364494Ssaidi@eecs.umich.edu */ 3376121Snate@binkert.org TempCacheBlk *tempBlock; 3381105SN/A 3392667Sstever@eecs.umich.edu /** 3402667Sstever@eecs.umich.edu * Upstream caches need this packet until true is returned, so 3412667Sstever@eecs.umich.edu * hold it for deletion until a subsequent call 3422667Sstever@eecs.umich.edu */ 3436121Snate@binkert.org std::unique_ptr<Packet> pendingDelete; 3442667Sstever@eecs.umich.edu 3455341Sstever@gmail.com /** 3465863Snate@binkert.org * Mark a request as in service (sent downstream in the memory 3475341Sstever@gmail.com * system), effectively making this MSHR the ordering point. 3485341Sstever@gmail.com */ 3495341Sstever@gmail.com void markInService(MSHR *mshr, bool pending_modified_resp) 3508120Sgblack@eecs.umich.edu { 3515341Sstever@gmail.com bool wasFull = mshrQueue.isFull(); 3528120Sgblack@eecs.umich.edu mshrQueue.markInService(mshr, pending_modified_resp); 3535341Sstever@gmail.com 3548120Sgblack@eecs.umich.edu if (wasFull && !mshrQueue.isFull()) { 3556121Snate@binkert.org clearBlocked(Blocked_NoMSHRs); 3566121Snate@binkert.org } 3575397Ssaidi@eecs.umich.edu } 3585397Ssaidi@eecs.umich.edu 3597727SAli.Saidi@ARM.com void markInService(WriteQueueEntry *entry) 3608268Ssteve.reinhardt@amd.com { 3616168Snate@binkert.org bool wasFull = writeBuffer.isFull(); 3625341Sstever@gmail.com writeBuffer.markInService(entry); 3638120Sgblack@eecs.umich.edu 3648120Sgblack@eecs.umich.edu if (wasFull && !writeBuffer.isFull()) { 3658120Sgblack@eecs.umich.edu clearBlocked(Blocked_NoWBBuffers); 3666814Sgblack@eecs.umich.edu } 3675863Snate@binkert.org } 3688120Sgblack@eecs.umich.edu 3695341Sstever@gmail.com /** 3705863Snate@binkert.org * Determine whether we should allocate on a fill or not. If this 3718268Ssteve.reinhardt@amd.com * cache is mostly inclusive with regards to the upstream cache(s) 3726121Snate@binkert.org * we always allocate (for any non-forwarded and cacheable 3736121Snate@binkert.org * requests). In the case of a mostly exclusive cache, we allocate 3748268Ssteve.reinhardt@amd.com * on fill if the packet did not come from a cache, thus if we: 3755742Snate@binkert.org * are dealing with a whole-line write (the latter behaves much 3765742Snate@binkert.org * like a writeback), the original target packet came from a 3775341Sstever@gmail.com * non-caching source, or if we are performing a prefetch or LLSC. 3785742Snate@binkert.org * 3795742Snate@binkert.org * @param cmd Command of the incoming requesting packet 3805341Sstever@gmail.com * @return Whether we should allocate on the fill 3816017Snate@binkert.org */ 3826121Snate@binkert.org inline bool allocOnFill(MemCmd cmd) const 3836017Snate@binkert.org { 3847816Ssteve.reinhardt@amd.com return clusivity == Enums::mostly_incl || 3857756SAli.Saidi@ARM.com cmd == MemCmd::WriteLineReq || 3867756SAli.Saidi@ARM.com cmd == MemCmd::ReadReq || 3877756SAli.Saidi@ARM.com cmd == MemCmd::WriteReq || 3887756SAli.Saidi@ARM.com cmd.isPrefetch() || 3897756SAli.Saidi@ARM.com cmd.isLLSC(); 3907756SAli.Saidi@ARM.com } 3917756SAli.Saidi@ARM.com 3927756SAli.Saidi@ARM.com /** 3937816Ssteve.reinhardt@amd.com * Regenerate block address using tags. 3947816Ssteve.reinhardt@amd.com * Block address regeneration depends on whether we're using a temporary 3957816Ssteve.reinhardt@amd.com * block or not. 3967816Ssteve.reinhardt@amd.com * 3977816Ssteve.reinhardt@amd.com * @param blk The block to regenerate address. 3987816Ssteve.reinhardt@amd.com * @return The block's address. 3997816Ssteve.reinhardt@amd.com */ 4007816Ssteve.reinhardt@amd.com Addr regenerateBlkAddr(CacheBlk* blk); 4017816Ssteve.reinhardt@amd.com 4027816Ssteve.reinhardt@amd.com /** 4037756SAli.Saidi@ARM.com * Does all the processing necessary to perform the provided request. 4047816Ssteve.reinhardt@amd.com * @param pkt The memory request to perform. 4057816Ssteve.reinhardt@amd.com * @param blk The cache block to be updated. 4067816Ssteve.reinhardt@amd.com * @param lat The latency of the access. 4077816Ssteve.reinhardt@amd.com * @param writebacks List for any writebacks that need to be performed. 4087816Ssteve.reinhardt@amd.com * @return Boolean indicating whether the request was satisfied. 4097816Ssteve.reinhardt@amd.com */ 4107816Ssteve.reinhardt@amd.com virtual bool access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, 4117816Ssteve.reinhardt@amd.com PacketList &writebacks); 4127816Ssteve.reinhardt@amd.com 4137816Ssteve.reinhardt@amd.com /* 4147816Ssteve.reinhardt@amd.com * Handle a timing request that hit in the cache 4157816Ssteve.reinhardt@amd.com * 4167816Ssteve.reinhardt@amd.com * @param ptk The request packet 4177816Ssteve.reinhardt@amd.com * @param blk The referenced block 4187816Ssteve.reinhardt@amd.com * @param request_time The tick at which the block lookup is compete 4197816Ssteve.reinhardt@amd.com */ 4207816Ssteve.reinhardt@amd.com virtual void handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, 4217816Ssteve.reinhardt@amd.com Tick request_time); 4227816Ssteve.reinhardt@amd.com 4237816Ssteve.reinhardt@amd.com /* 4247816Ssteve.reinhardt@amd.com * Handle a timing request that missed in the cache 4257816Ssteve.reinhardt@amd.com * 4267816Ssteve.reinhardt@amd.com * Implementation specific handling for different cache 4277816Ssteve.reinhardt@amd.com * implementations 4287816Ssteve.reinhardt@amd.com * 4297816Ssteve.reinhardt@amd.com * @param ptk The request packet 4307816Ssteve.reinhardt@amd.com * @param blk The referenced block 4317816Ssteve.reinhardt@amd.com * @param forward_time The tick at which we can process dependent requests 4327816Ssteve.reinhardt@amd.com * @param request_time The tick at which the block lookup is compete 4337816Ssteve.reinhardt@amd.com */ 4347816Ssteve.reinhardt@amd.com virtual void handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk, 4357816Ssteve.reinhardt@amd.com Tick forward_time, 4367816Ssteve.reinhardt@amd.com Tick request_time) = 0; 4377816Ssteve.reinhardt@amd.com 4387816Ssteve.reinhardt@amd.com /* 4397816Ssteve.reinhardt@amd.com * Handle a timing request that missed in the cache 4407816Ssteve.reinhardt@amd.com * 4417816Ssteve.reinhardt@amd.com * Common functionality across different cache implementations 4427816Ssteve.reinhardt@amd.com * 4437816Ssteve.reinhardt@amd.com * @param ptk The request packet 4447816Ssteve.reinhardt@amd.com * @param blk The referenced block 4457816Ssteve.reinhardt@amd.com * @param mshr Any existing mshr for the referenced cache block 4467816Ssteve.reinhardt@amd.com * @param forward_time The tick at which we can process dependent requests 4477816Ssteve.reinhardt@amd.com * @param request_time The tick at which the block lookup is compete 4487816Ssteve.reinhardt@amd.com */ 4497816Ssteve.reinhardt@amd.com void handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk, 4507816Ssteve.reinhardt@amd.com Tick forward_time, Tick request_time); 4517816Ssteve.reinhardt@amd.com 4527816Ssteve.reinhardt@amd.com /** 4537816Ssteve.reinhardt@amd.com * Performs the access specified by the request. 4547816Ssteve.reinhardt@amd.com * @param pkt The request to perform. 4557816Ssteve.reinhardt@amd.com */ 4567816Ssteve.reinhardt@amd.com virtual void recvTimingReq(PacketPtr pkt); 4577816Ssteve.reinhardt@amd.com 4587816Ssteve.reinhardt@amd.com /** 4597816Ssteve.reinhardt@amd.com * Handling the special case of uncacheable write responses to 4607816Ssteve.reinhardt@amd.com * make recvTimingResp less cluttered. 4617816Ssteve.reinhardt@amd.com */ 4627816Ssteve.reinhardt@amd.com void handleUncacheableWriteResp(PacketPtr pkt); 4637816Ssteve.reinhardt@amd.com 4647816Ssteve.reinhardt@amd.com /** 4657756SAli.Saidi@ARM.com * Service non-deferred MSHR targets using the received response 4668120Sgblack@eecs.umich.edu * 4677756SAli.Saidi@ARM.com * Iterates through the list of targets that can be serviced with 4687756SAli.Saidi@ARM.com * the current response. Any writebacks that need to performed 4697756SAli.Saidi@ARM.com * must be appended to the writebacks parameter. 4707756SAli.Saidi@ARM.com * 4717816Ssteve.reinhardt@amd.com * @param mshr The MSHR that corresponds to the reponse 4727816Ssteve.reinhardt@amd.com * @param pkt The response packet 4737816Ssteve.reinhardt@amd.com * @param blk The reference block 4747816Ssteve.reinhardt@amd.com * @param writebacks List of writebacks that need to be performed 4757816Ssteve.reinhardt@amd.com */ 4767816Ssteve.reinhardt@amd.com virtual void serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, 4777816Ssteve.reinhardt@amd.com CacheBlk *blk, PacketList& writebacks) = 0; 4787816Ssteve.reinhardt@amd.com 4797816Ssteve.reinhardt@amd.com /** 4807816Ssteve.reinhardt@amd.com * Handles a response (cache line fill/write ack) from the bus. 4817756SAli.Saidi@ARM.com * @param pkt The response packet 4827756SAli.Saidi@ARM.com */ 4836654Snate@binkert.org virtual void recvTimingResp(PacketPtr pkt); 4846654Snate@binkert.org 4855871Snate@binkert.org /** 4866121Snate@binkert.org * Snoops bus transactions to maintain coherence. 4876121Snate@binkert.org * @param pkt The current bus transaction. 4886121Snate@binkert.org */ 4898946Sandreas.hansson@arm.com virtual void recvTimingSnoopReq(PacketPtr pkt) = 0; 4908737Skoansin.tan@gmail.com 4913940Ssaidi@eecs.umich.edu /** 4923918Ssaidi@eecs.umich.edu * Handle a snoop response. 4933918Ssaidi@eecs.umich.edu * @param pkt Snoop response packet 4941858SN/A */ 4956121Snate@binkert.org virtual void recvTimingSnoopResp(PacketPtr pkt) = 0; 4967739Sgblack@eecs.umich.edu 4977739Sgblack@eecs.umich.edu /** 4986143Snate@binkert.org * Handle a request in atomic mode that missed in this cache 4997618SAli.Saidi@arm.com * 5007618SAli.Saidi@arm.com * Creates a downstream request, sends it to the memory below and 5017618SAli.Saidi@arm.com * handles the response. As we are in atomic mode all operations 5027618SAli.Saidi@arm.com * are performed immediately. 5038614Sgblack@eecs.umich.edu * 5047618SAli.Saidi@arm.com * @param pkt The packet with the requests 5057618SAli.Saidi@arm.com * @param blk The referenced block 5067618SAli.Saidi@arm.com * @param writebacks A list with packets for any performed writebacks 5077739Sgblack@eecs.umich.edu * @return Cycles for handling the request 5088946Sandreas.hansson@arm.com */ 5098946Sandreas.hansson@arm.com virtual Cycles handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk, 5106121Snate@binkert.org PacketList &writebacks) = 0; 5113940Ssaidi@eecs.umich.edu 5126121Snate@binkert.org /** 5137739Sgblack@eecs.umich.edu * Performs the access specified by the request. 5147739Sgblack@eecs.umich.edu * @param pkt The request to perform. 5157739Sgblack@eecs.umich.edu * @return The number of ticks required for the access. 5167739Sgblack@eecs.umich.edu */ 5177739Sgblack@eecs.umich.edu virtual Tick recvAtomic(PacketPtr pkt); 5187739Sgblack@eecs.umich.edu 5198737Skoansin.tan@gmail.com /** 5208737Skoansin.tan@gmail.com * Snoop for the provided request in the cache and return the estimated 5218737Skoansin.tan@gmail.com * time taken. 5228737Skoansin.tan@gmail.com * @param pkt The memory request to snoop 5238737Skoansin.tan@gmail.com * @return The number of ticks required for the snoop. 5248737Skoansin.tan@gmail.com */ 5258737Skoansin.tan@gmail.com virtual Tick recvAtomicSnoop(PacketPtr pkt) = 0; 5268737Skoansin.tan@gmail.com 5278737Skoansin.tan@gmail.com /** 5288737Skoansin.tan@gmail.com * Performs the access specified by the request. 5298737Skoansin.tan@gmail.com * 5308737Skoansin.tan@gmail.com * @param pkt The request to perform. 5318737Skoansin.tan@gmail.com * @param fromCpuSide from the CPU side port or the memory side port 5328737Skoansin.tan@gmail.com */ 5338737Skoansin.tan@gmail.com virtual void functionalAccess(PacketPtr pkt, bool from_cpu_side); 5348737Skoansin.tan@gmail.com 5358737Skoansin.tan@gmail.com /** 5368737Skoansin.tan@gmail.com * Handle doing the Compare and Swap function for SPARC. 5378946Sandreas.hansson@arm.com */ 5388946Sandreas.hansson@arm.com void cmpAndSwap(CacheBlk *blk, PacketPtr pkt); 5398946Sandreas.hansson@arm.com 5408946Sandreas.hansson@arm.com /** 5418946Sandreas.hansson@arm.com * Return the next queue entry to service, either a pending miss 5428946Sandreas.hansson@arm.com * from the MSHR queue, a buffered write from the write buffer, or 5433918Ssaidi@eecs.umich.edu * something from the prefetcher. This function is responsible 5443918Ssaidi@eecs.umich.edu * for prioritizing among those sources on the fly. 5453940Ssaidi@eecs.umich.edu */ 5463918Ssaidi@eecs.umich.edu QueueEntry* getNextQueueEntry(); 5473918Ssaidi@eecs.umich.edu 5486157Snate@binkert.org /** 5496157Snate@binkert.org * Insert writebacks into the write buffer 5506157Snate@binkert.org */ 5516157Snate@binkert.org virtual void doWritebacks(PacketList& writebacks, Tick forward_time) = 0; 5525397Ssaidi@eecs.umich.edu 5535397Ssaidi@eecs.umich.edu /** 5546121Snate@binkert.org * Send writebacks down the memory hierarchy in atomic mode 5556121Snate@binkert.org */ 5566121Snate@binkert.org virtual void doWritebacksAtomic(PacketList& writebacks) = 0; 5576121Snate@binkert.org 5586121Snate@binkert.org /** 5596121Snate@binkert.org * Create an appropriate downstream bus request packet. 5605397Ssaidi@eecs.umich.edu * 5611851SN/A * Creates a new packet with the request to be send to the memory 5621851SN/A * below, or nullptr if the current request in cpu_pkt should just 5637739Sgblack@eecs.umich.edu * be forwarded on. 564955SN/A * 5653053Sstever@eecs.umich.edu * @param cpu_pkt The miss packet that needs to be satisfied. 5666121Snate@binkert.org * @param blk The referenced block, can be nullptr. 5673053Sstever@eecs.umich.edu * @param needs_writable Indicates that the block must be writable 5683053Sstever@eecs.umich.edu * even if the request in cpu_pkt doesn't indicate that. 5693053Sstever@eecs.umich.edu * @return A packet send to the memory below 5703053Sstever@eecs.umich.edu */ 5713053Sstever@eecs.umich.edu virtual PacketPtr createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk, 5726654Snate@binkert.org bool needs_writable) const = 0; 5733053Sstever@eecs.umich.edu 5744742Sstever@eecs.umich.edu /** 5754742Sstever@eecs.umich.edu * Determine if clean lines should be written back or not. In 5763053Sstever@eecs.umich.edu * cases where a downstream cache is mostly inclusive we likely 5773053Sstever@eecs.umich.edu * want it to act as a victim cache also for lines that have not 5783053Sstever@eecs.umich.edu * been modified. Hence, we cannot simply drop the line (or send a 5793053Sstever@eecs.umich.edu * clean evict), but rather need to send the actual data. 5806654Snate@binkert.org */ 5813053Sstever@eecs.umich.edu const bool writebackClean; 5823053Sstever@eecs.umich.edu 5833053Sstever@eecs.umich.edu /** 5843053Sstever@eecs.umich.edu * Writebacks from the tempBlock, resulting on the response path 5852667Sstever@eecs.umich.edu * in atomic mode, must happen after the call to recvAtomic has 5864554Sbinkertn@umich.edu * finished (for the right ordering of the packets). We therefore 5876121Snate@binkert.org * need to hold on to the packets, and have a method and an event 5882667Sstever@eecs.umich.edu * to send them. 5894554Sbinkertn@umich.edu */ 5904554Sbinkertn@umich.edu PacketPtr tempBlockWriteback; 5914554Sbinkertn@umich.edu 5926121Snate@binkert.org /** 5934554Sbinkertn@umich.edu * Send the outstanding tempBlock writeback. To be called after 5944554Sbinkertn@umich.edu * recvAtomic finishes in cases where the block we filled is in 5954554Sbinkertn@umich.edu * fact the tempBlock, and now needs to be written back. 5964781Snate@binkert.org */ 5974554Sbinkertn@umich.edu void writebackTempBlockAtomic() { 5984554Sbinkertn@umich.edu assert(tempBlockWriteback != nullptr); 5992667Sstever@eecs.umich.edu PacketList writebacks{tempBlockWriteback}; 6004554Sbinkertn@umich.edu doWritebacksAtomic(writebacks); 6014554Sbinkertn@umich.edu tempBlockWriteback = nullptr; 6024554Sbinkertn@umich.edu } 6034554Sbinkertn@umich.edu 6042667Sstever@eecs.umich.edu /** 6054554Sbinkertn@umich.edu * An event to writeback the tempBlock after recvAtomic 6062667Sstever@eecs.umich.edu * finishes. To avoid other calls to recvAtomic getting in 6074554Sbinkertn@umich.edu * between, we create this event with a higher priority. 6086121Snate@binkert.org */ 6092667Sstever@eecs.umich.edu EventFunctionWrapper writebackTempBlockAtomicEvent; 6105522Snate@binkert.org 6115522Snate@binkert.org /** 6125522Snate@binkert.org * Perform any necessary updates to the block and perform any data 6135522Snate@binkert.org * exchange between the packet and the block. The flags of the 6145522Snate@binkert.org * packet are also set accordingly. 6155522Snate@binkert.org * 6165522Snate@binkert.org * @param pkt Request packet from upstream that hit a block 6175522Snate@binkert.org * @param blk Cache block that the packet hit 6185522Snate@binkert.org * @param deferred_response Whether this request originally missed 6195522Snate@binkert.org * @param pending_downgrade Whether the writable flag is to be removed 6205522Snate@binkert.org */ 6215522Snate@binkert.org virtual void satisfyRequest(PacketPtr pkt, CacheBlk *blk, 6225522Snate@binkert.org bool deferred_response = false, 6235522Snate@binkert.org bool pending_downgrade = false); 6245522Snate@binkert.org 6255522Snate@binkert.org /** 6265522Snate@binkert.org * Maintain the clusivity of this cache by potentially 6275522Snate@binkert.org * invalidating a block. This method works in conjunction with 6285522Snate@binkert.org * satisfyRequest, but is separate to allow us to handle all MSHR 6295522Snate@binkert.org * targets before potentially dropping a block. 6305522Snate@binkert.org * 6315522Snate@binkert.org * @param from_cache Whether we have dealt with a packet from a cache 6325522Snate@binkert.org * @param blk The block that should potentially be dropped 6335522Snate@binkert.org */ 6345522Snate@binkert.org void maintainClusivity(bool from_cache, CacheBlk *blk); 6355522Snate@binkert.org 6362638Sstever@eecs.umich.edu /** 6372638Sstever@eecs.umich.edu * Handle a fill operation caused by a received packet. 6386121Snate@binkert.org * 6393716Sstever@eecs.umich.edu * Populates a cache block and handles all outstanding requests for the 6405522Snate@binkert.org * satisfied fill request. This version takes two memory requests. One 6415522Snate@binkert.org * contains the fill data, the other is an optional target to satisfy. 6425522Snate@binkert.org * Note that the reason we return a list of writebacks rather than 6435522Snate@binkert.org * inserting them directly in the write buffer is that this function 6445522Snate@binkert.org * is called by both atomic and timing-mode accesses, and in atomic 6455522Snate@binkert.org * mode we don't mess with the write buffer (we just perform the 6461858SN/A * writebacks atomically once the original request is complete). 6475227Ssaidi@eecs.umich.edu * 6485227Ssaidi@eecs.umich.edu * @param pkt The memory request with the fill data. 6495227Ssaidi@eecs.umich.edu * @param blk The cache block if it already exists. 6505227Ssaidi@eecs.umich.edu * @param writebacks List for any writebacks that need to be performed. 6516654Snate@binkert.org * @param allocate Whether to allocate a block or use the temp block 6526654Snate@binkert.org * @return Pointer to the new cache block. 6537769SAli.Saidi@ARM.com */ 6547769SAli.Saidi@ARM.com CacheBlk *handleFill(PacketPtr pkt, CacheBlk *blk, 6557769SAli.Saidi@ARM.com PacketList &writebacks, bool allocate); 6567769SAli.Saidi@ARM.com 6575227Ssaidi@eecs.umich.edu /** 6585227Ssaidi@eecs.umich.edu * Allocate a new block and perform any necessary writebacks 6595227Ssaidi@eecs.umich.edu * 6605204Sstever@gmail.com * Find a victim block and if necessary prepare writebacks for any 6615204Sstever@gmail.com * existing data. May return nullptr if there are no replaceable 6625204Sstever@gmail.com * blocks. If a replaceable block is found, it inserts the new block in 6635204Sstever@gmail.com * its place. The new block, however, is not set as valid yet. 6645204Sstever@gmail.com * 6655204Sstever@gmail.com * @param pkt Packet holding the address to update 6665204Sstever@gmail.com * @param writebacks A list of writeback packets for the evicted blocks 6675204Sstever@gmail.com * @return the allocated block 6685204Sstever@gmail.com */ 6695204Sstever@gmail.com CacheBlk *allocateBlock(const PacketPtr pkt, PacketList &writebacks); 6705204Sstever@gmail.com /** 6715204Sstever@gmail.com * Evict a cache block. 6725204Sstever@gmail.com * 6735204Sstever@gmail.com * Performs a writeback if necesssary and invalidates the block 6745204Sstever@gmail.com * 6755204Sstever@gmail.com * @param blk Block to invalidate 6765204Sstever@gmail.com * @return A packet with the writeback, can be nullptr 6776121Snate@binkert.org */ 6785204Sstever@gmail.com M5_NODISCARD virtual PacketPtr evictBlock(CacheBlk *blk) = 0; 6793118Sstever@eecs.umich.edu 6803118Sstever@eecs.umich.edu /** 6813118Sstever@eecs.umich.edu * Evict a cache block. 6823118Sstever@eecs.umich.edu * 6833118Sstever@eecs.umich.edu * Performs a writeback if necesssary and invalidates the block 6845863Snate@binkert.org * 6853118Sstever@eecs.umich.edu * @param blk Block to invalidate 6865863Snate@binkert.org * @param writebacks Return a list of packets with writebacks 6873118Sstever@eecs.umich.edu */ 6887457Snate@binkert.org virtual void evictBlock(CacheBlk *blk, PacketList &writebacks) = 0; 6897457Snate@binkert.org 6905863Snate@binkert.org /** 6915863Snate@binkert.org * Invalidate a cache block. 6925863Snate@binkert.org * 6935863Snate@binkert.org * @param blk Block to invalidate 6945863Snate@binkert.org */ 6955863Snate@binkert.org void invalidateBlock(CacheBlk *blk); 6965863Snate@binkert.org 6976003Snate@binkert.org /** 6985863Snate@binkert.org * Create a writeback request for the given block. 6995863Snate@binkert.org * 7005863Snate@binkert.org * @param blk The block to writeback. 7016120Snate@binkert.org * @return The writeback request for the block. 7025863Snate@binkert.org */ 7035863Snate@binkert.org PacketPtr writebackBlk(CacheBlk *blk); 7045863Snate@binkert.org 7058655Sandreas.hansson@arm.com /** 7068655Sandreas.hansson@arm.com * Create a writeclean request for the given block. 7078655Sandreas.hansson@arm.com * 7088655Sandreas.hansson@arm.com * Creates a request that writes the block to the cache below 7098655Sandreas.hansson@arm.com * without evicting the block from the current cache. 7108655Sandreas.hansson@arm.com * 7118655Sandreas.hansson@arm.com * @param blk The block to write clean. 7128655Sandreas.hansson@arm.com * @param dest The destination of the write clean operation. 7136120Snate@binkert.org * @param id Use the given packet id for the write clean operation. 7145863Snate@binkert.org * @return The generated write clean packet. 7156121Snate@binkert.org */ 7166121Snate@binkert.org PacketPtr writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id); 7175863Snate@binkert.org 7187727SAli.Saidi@ARM.com /** 7197727SAli.Saidi@ARM.com * Write back dirty blocks in the cache using functional accesses. 7207727SAli.Saidi@ARM.com */ 7217727SAli.Saidi@ARM.com virtual void memWriteback() override; 7227727SAli.Saidi@ARM.com 7237727SAli.Saidi@ARM.com /** 7245863Snate@binkert.org * Invalidates all blocks in the cache. 7253118Sstever@eecs.umich.edu * 7265863Snate@binkert.org * @warn Dirty cache lines will not be written back to 7273118Sstever@eecs.umich.edu * memory. Make sure to call functionalWriteback() first if you 7283118Sstever@eecs.umich.edu * want the to write them to memory. 7295863Snate@binkert.org */ 7305863Snate@binkert.org virtual void memInvalidate() override; 7315863Snate@binkert.org 7325863Snate@binkert.org /** 7333118Sstever@eecs.umich.edu * Determine if there are any dirty blocks in the cache. 7343483Ssaidi@eecs.umich.edu * 7353494Ssaidi@eecs.umich.edu * @return true if at least one block is dirty, false otherwise. 7363494Ssaidi@eecs.umich.edu */ 7373483Ssaidi@eecs.umich.edu bool isDirty() const; 7383483Ssaidi@eecs.umich.edu 7393483Ssaidi@eecs.umich.edu /** 7403053Sstever@eecs.umich.edu * Determine if an address is in the ranges covered by this 7413053Sstever@eecs.umich.edu * cache. This is useful to filter snoops. 7423918Ssaidi@eecs.umich.edu * 7433053Sstever@eecs.umich.edu * @param addr Address to check against 7443053Sstever@eecs.umich.edu * 7453053Sstever@eecs.umich.edu * @return If the address in question is in range 7463053Sstever@eecs.umich.edu */ 7473053Sstever@eecs.umich.edu bool inRange(Addr addr) const; 7487840Snate@binkert.org 7497865Sgblack@eecs.umich.edu /** 7507865Sgblack@eecs.umich.edu * Find next request ready time from among possible sources. 7517865Sgblack@eecs.umich.edu */ 7527865Sgblack@eecs.umich.edu Tick nextQueueReadyTime() const; 7537865Sgblack@eecs.umich.edu 7547840Snate@binkert.org /** Block size of this cache */ 7557840Snate@binkert.org const unsigned blkSize; 7567840Snate@binkert.org 7577840Snate@binkert.org /** 7581858SN/A * The latency of tag lookup of a cache. It occurs when there is 7591858SN/A * an access to the cache. 7601858SN/A */ 7611858SN/A const Cycles lookupLatency; 7621858SN/A 7631858SN/A /** 7645863Snate@binkert.org * The latency of data access of a cache. It occurs when there is 7655863Snate@binkert.org * an access to the cache. 7665863Snate@binkert.org */ 7675863Snate@binkert.org const Cycles dataLatency; 7686121Snate@binkert.org 7691858SN/A /** 7705863Snate@binkert.org * This is the forward latency of the cache. It occurs when there 7715863Snate@binkert.org * is a cache miss and a request is forwarded downstream, in 7725863Snate@binkert.org * particular an outbound miss. 7735863Snate@binkert.org */ 7745863Snate@binkert.org const Cycles forwardLatency; 7752139SN/A 7764202Sbinkertn@umich.edu /** The latency to fill a cache block */ 7774202Sbinkertn@umich.edu const Cycles fillLatency; 7782139SN/A 7796994Snate@binkert.org /** 7806994Snate@binkert.org * The latency of sending reponse to its upper level cache/core on 7816994Snate@binkert.org * a linefill. The responseLatency parameter captures this 7826994Snate@binkert.org * latency. 7836994Snate@binkert.org */ 7846994Snate@binkert.org const Cycles responseLatency; 7856994Snate@binkert.org 7866994Snate@binkert.org /** The number of targets for each MSHR. */ 7876994Snate@binkert.org const int numTarget; 7886994Snate@binkert.org 7896994Snate@binkert.org /** Do we forward snoops from mem side port through to cpu side port? */ 7906994Snate@binkert.org bool forwardSnoops; 7916994Snate@binkert.org 7926994Snate@binkert.org /** 7936994Snate@binkert.org * Clusivity with respect to the upstream cache, determining if we 7946994Snate@binkert.org * fill into both this cache and the cache above on a miss. Note 7956994Snate@binkert.org * that we currently do not support strict clusivity policies. 7966994Snate@binkert.org */ 7976994Snate@binkert.org const Enums::Clusivity clusivity; 7986994Snate@binkert.org 7996994Snate@binkert.org /** 8006994Snate@binkert.org * Is this cache read only, for example the instruction cache, or 8016994Snate@binkert.org * table-walker cache. A cache that is read only should never see 8026994Snate@binkert.org * any writes, and should never get any dirty data (and hence 8036994Snate@binkert.org * never have to do any writebacks). 8046994Snate@binkert.org */ 8056994Snate@binkert.org const bool isReadOnly; 8066994Snate@binkert.org 8072155SN/A /** 8085863Snate@binkert.org * Bit vector of the blocking reasons for the access path. 8091869SN/A * @sa #BlockedCause 8101869SN/A */ 8115863Snate@binkert.org uint8_t blocked; 8125863Snate@binkert.org 8134202Sbinkertn@umich.edu /** Increasing order number assigned to each incoming request. */ 8146108Snate@binkert.org uint64_t order; 8156108Snate@binkert.org 8166108Snate@binkert.org /** Stores time the cache blocked for statistics. */ 8176108Snate@binkert.org Cycles blockedCycle; 8184202Sbinkertn@umich.edu 8195863Snate@binkert.org /** Pointer to the MSHR that has no targets. */ 8208474Sgblack@eecs.umich.edu MSHR *noTargetMSHR; 8218474Sgblack@eecs.umich.edu 8225742Snate@binkert.org /** The number of misses to trigger an exit event. */ 8238268Ssteve.reinhardt@amd.com Counter missCount; 8248268Ssteve.reinhardt@amd.com 8258268Ssteve.reinhardt@amd.com /** 8265742Snate@binkert.org * The address range to which the cache responds on the CPU side. 8275341Sstever@gmail.com * Normally this is all possible memory addresses. */ 8288474Sgblack@eecs.umich.edu const AddrRangeList addrRanges; 8298474Sgblack@eecs.umich.edu 8305342Sstever@gmail.com public: 8314202Sbinkertn@umich.edu /** System we are currently operating in. */ 8324202Sbinkertn@umich.edu System *system; 8334202Sbinkertn@umich.edu 8345863Snate@binkert.org // Statistics 8355863Snate@binkert.org /** 8366994Snate@binkert.org * @addtogroup CacheStatistics 8376994Snate@binkert.org * @{ 8386994Snate@binkert.org */ 8395863Snate@binkert.org 8408152Ssteve.reinhardt@amd.com /** Number of hits per thread for each type of command. 8418878Ssteve.reinhardt@amd.com @sa Packet::Command */ 8425863Snate@binkert.org Stats::Vector hits[MemCmd::NUM_MEM_CMDS]; 8435863Snate@binkert.org /** Number of hits for demand accesses. */ 8445863Snate@binkert.org Stats::Formula demandHits; 8455863Snate@binkert.org /** Number of hit for all accesses. */ 8465863Snate@binkert.org Stats::Formula overallHits; 8475863Snate@binkert.org 8485863Snate@binkert.org /** Number of misses per thread for each type of command. 8495863Snate@binkert.org @sa Packet::Command */ 8505863Snate@binkert.org Stats::Vector misses[MemCmd::NUM_MEM_CMDS]; 8515863Snate@binkert.org /** Number of misses for demand accesses. */ 8527840Snate@binkert.org Stats::Formula demandMisses; 8535863Snate@binkert.org /** Number of misses for all accesses. */ 8545952Ssaidi@eecs.umich.edu Stats::Formula overallMisses; 8551869SN/A 8561858SN/A /** 8575863Snate@binkert.org * Total number of cycles per thread/command spent waiting for a miss. 8588805Sgblack@eecs.umich.edu * Used to calculate the average miss latency. 8598887Sgeoffrey.blake@arm.com */ 8608805Sgblack@eecs.umich.edu Stats::Vector missLatency[MemCmd::NUM_MEM_CMDS]; 8611858SN/A /** Total number of cycles spent waiting for demand misses. */ 862955SN/A Stats::Formula demandMissLatency; 863955SN/A /** Total number of cycles spent waiting for all misses. */ 8641869SN/A Stats::Formula overallMissLatency; 8651869SN/A 8661869SN/A /** The number of accesses per command and thread. */ 8671869SN/A Stats::Formula accesses[MemCmd::NUM_MEM_CMDS]; 8681869SN/A /** The number of demand accesses. */ 8695863Snate@binkert.org Stats::Formula demandAccesses; 8705863Snate@binkert.org /** The number of overall accesses. */ 8715863Snate@binkert.org Stats::Formula overallAccesses; 8721869SN/A 8735863Snate@binkert.org /** The miss rate per command and thread. */ 8741869SN/A Stats::Formula missRate[MemCmd::NUM_MEM_CMDS]; 8755863Snate@binkert.org /** The miss rate of all demand accesses. */ 8761869SN/A Stats::Formula demandMissRate; 8771869SN/A /** The miss rate for all accesses. */ 8781869SN/A Stats::Formula overallMissRate; 8791869SN/A 8808483Sgblack@eecs.umich.edu /** The average miss latency per command and thread. */ 8811869SN/A Stats::Formula avgMissLatency[MemCmd::NUM_MEM_CMDS]; 8821869SN/A /** The average miss latency for demand misses. */ 8831869SN/A Stats::Formula demandAvgMissLatency; 8841869SN/A /** The average miss latency for all misses. */ 8855863Snate@binkert.org Stats::Formula overallAvgMissLatency; 8865863Snate@binkert.org 8871869SN/A /** The total number of cycles blocked for each blocked cause. */ 8885863Snate@binkert.org Stats::Vector blocked_cycles; 8895863Snate@binkert.org /** The number of times this cache blocked for each blocked cause. */ 8903356Sbinkertn@umich.edu Stats::Vector blocked_causes; 8913356Sbinkertn@umich.edu 8923356Sbinkertn@umich.edu /** The average number of cycles blocked for each blocked cause. */ 8933356Sbinkertn@umich.edu Stats::Formula avg_blocked; 8943356Sbinkertn@umich.edu 8954781Snate@binkert.org /** The number of times a HW-prefetched block is evicted w/o reference. */ 8965863Snate@binkert.org Stats::Scalar unusedPrefetches; 8975863Snate@binkert.org 8981869SN/A /** Number of blocks written back per thread. */ 8991869SN/A Stats::Vector writebacks; 9001869SN/A 9016121Snate@binkert.org /** Number of misses that hit in the MSHRs per command and thread. */ 9021869SN/A Stats::Vector mshr_hits[MemCmd::NUM_MEM_CMDS]; 9032638Sstever@eecs.umich.edu /** Demand misses that hit in the MSHRs. */ 9046121Snate@binkert.org Stats::Formula demandMshrHits; 9056121Snate@binkert.org /** Total number of misses that hit in the MSHRs. */ 9062638Sstever@eecs.umich.edu Stats::Formula overallMshrHits; 9075749Scws3k@cs.virginia.edu 9086121Snate@binkert.org /** Number of misses that miss in the MSHRs, per command and thread. */ 9096121Snate@binkert.org Stats::Vector mshr_misses[MemCmd::NUM_MEM_CMDS]; 9105749Scws3k@cs.virginia.edu /** Demand misses that miss in the MSHRs. */ 9111869SN/A Stats::Formula demandMshrMisses; 9121869SN/A /** Total number of misses that miss in the MSHRs. */ 9133546Sgblack@eecs.umich.edu Stats::Formula overallMshrMisses; 9143546Sgblack@eecs.umich.edu 9153546Sgblack@eecs.umich.edu /** Number of misses that miss in the MSHRs, per command and thread. */ 9163546Sgblack@eecs.umich.edu Stats::Vector mshr_uncacheable[MemCmd::NUM_MEM_CMDS]; 9176121Snate@binkert.org /** Total number of misses that miss in the MSHRs. */ 9185863Snate@binkert.org Stats::Formula overallMshrUncacheable; 9193546Sgblack@eecs.umich.edu 9203546Sgblack@eecs.umich.edu /** Total cycle latency of each MSHR miss, per command and thread. */ 9213546Sgblack@eecs.umich.edu Stats::Vector mshr_miss_latency[MemCmd::NUM_MEM_CMDS]; 9223546Sgblack@eecs.umich.edu /** Total cycle latency of demand MSHR misses. */ 9234781Snate@binkert.org Stats::Formula demandMshrMissLatency; 9244781Snate@binkert.org /** Total cycle latency of overall MSHR misses. */ 9256658Snate@binkert.org Stats::Formula overallMshrMissLatency; 9266658Snate@binkert.org 9274781Snate@binkert.org /** Total cycle latency of each MSHR miss, per command and thread. */ 9283546Sgblack@eecs.umich.edu Stats::Vector mshr_uncacheable_lat[MemCmd::NUM_MEM_CMDS]; 9293546Sgblack@eecs.umich.edu /** Total cycle latency of overall MSHR misses. */ 9303546Sgblack@eecs.umich.edu Stats::Formula overallMshrUncacheableLatency; 9313546Sgblack@eecs.umich.edu 9327756SAli.Saidi@ARM.com#if 0 9337816Ssteve.reinhardt@amd.com /** The total number of MSHR accesses per command and thread. */ 9343546Sgblack@eecs.umich.edu Stats::Formula mshrAccesses[MemCmd::NUM_MEM_CMDS]; 9353546Sgblack@eecs.umich.edu /** The total number of demand MSHR accesses. */ 9363546Sgblack@eecs.umich.edu Stats::Formula demandMshrAccesses; 9373546Sgblack@eecs.umich.edu /** The total number of MSHR accesses. */ 9384202Sbinkertn@umich.edu Stats::Formula overallMshrAccesses; 9393546Sgblack@eecs.umich.edu#endif 9403546Sgblack@eecs.umich.edu 9413546Sgblack@eecs.umich.edu /** The miss rate in the MSHRs pre command and thread. */ 942955SN/A Stats::Formula mshrMissRate[MemCmd::NUM_MEM_CMDS]; 943955SN/A /** The demand miss rate in the MSHRs. */ 944955SN/A Stats::Formula demandMshrMissRate; 945955SN/A /** The overall miss rate in the MSHRs. */ 9465863Snate@binkert.org Stats::Formula overallMshrMissRate; 9475863Snate@binkert.org 9485343Sstever@gmail.com /** The average latency of an MSHR miss, per command and thread. */ 9495343Sstever@gmail.com Stats::Formula avgMshrMissLatency[MemCmd::NUM_MEM_CMDS]; 9506121Snate@binkert.org /** The average latency of a demand MSHR miss. */ 9515863Snate@binkert.org Stats::Formula demandAvgMshrMissLatency; 9524773Snate@binkert.org /** The average overall latency of an MSHR miss. */ 9535863Snate@binkert.org Stats::Formula overallAvgMshrMissLatency; 9542632Sstever@eecs.umich.edu 9555863Snate@binkert.org /** The average latency of an MSHR miss, per command and thread. */ 9562023SN/A Stats::Formula avgMshrUncacheableLatency[MemCmd::NUM_MEM_CMDS]; 9575863Snate@binkert.org /** The average overall latency of an MSHR miss. */ 9585863Snate@binkert.org Stats::Formula overallAvgMshrUncacheableLatency; 9595863Snate@binkert.org 9605863Snate@binkert.org /** Number of replacements of valid blocks. */ 9615863Snate@binkert.org Stats::Scalar replacements; 9625863Snate@binkert.org 9635863Snate@binkert.org /** 9645863Snate@binkert.org * @} 9655863Snate@binkert.org */ 9662632Sstever@eecs.umich.edu 9675863Snate@binkert.org /** 9682023SN/A * Register stats for this object. 9692632Sstever@eecs.umich.edu */ 9705863Snate@binkert.org void regStats() override; 9715342Sstever@gmail.com 9725863Snate@binkert.org public: 9732632Sstever@eecs.umich.edu BaseCache(const BaseCacheParams *p, unsigned blk_size); 9745863Snate@binkert.org ~BaseCache(); 9755863Snate@binkert.org 9768267Ssteve.reinhardt@amd.com void init() override; 9778120Sgblack@eecs.umich.edu 9788267Ssteve.reinhardt@amd.com BaseMasterPort &getMasterPort(const std::string &if_name, 9798267Ssteve.reinhardt@amd.com PortID idx = InvalidPortID) override; 9808267Ssteve.reinhardt@amd.com BaseSlavePort &getSlavePort(const std::string &if_name, 9818267Ssteve.reinhardt@amd.com PortID idx = InvalidPortID) override; 9828267Ssteve.reinhardt@amd.com 9838267Ssteve.reinhardt@amd.com /** 9848267Ssteve.reinhardt@amd.com * Query block size of a cache. 9858267Ssteve.reinhardt@amd.com * @return The block size 9868267Ssteve.reinhardt@amd.com */ 9875863Snate@binkert.org unsigned 9885863Snate@binkert.org getBlockSize() const 9895863Snate@binkert.org { 9902632Sstever@eecs.umich.edu return blkSize; 9918267Ssteve.reinhardt@amd.com } 9928267Ssteve.reinhardt@amd.com 9938267Ssteve.reinhardt@amd.com const AddrRangeList &getAddrRanges() const { return addrRanges; } 9942632Sstever@eecs.umich.edu 9951888SN/A MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool sched_send = true) 9965863Snate@binkert.org { 9975863Snate@binkert.org MSHR *mshr = mshrQueue.allocate(pkt->getBlockAddr(blkSize), blkSize, 9981858SN/A pkt, time, order++, 9998120Sgblack@eecs.umich.edu allocOnFill(pkt->cmd)); 10008120Sgblack@eecs.umich.edu 10017756SAli.Saidi@ARM.com if (mshrQueue.isFull()) { 10022598SN/A setBlocked((BlockedCause)MSHRQueue_MSHRs); 10035863Snate@binkert.org } 10041858SN/A 10051858SN/A if (sched_send) { 10061858SN/A // schedule the send 10075863Snate@binkert.org schedMemSideSendEvent(time); 10081858SN/A } 10091858SN/A 10101858SN/A return mshr; 10115863Snate@binkert.org } 10121871SN/A 10131858SN/A void allocateWriteBuffer(PacketPtr pkt, Tick time) 10141858SN/A { 10151858SN/A // should only see writes or clean evicts here 10161858SN/A assert(pkt->isWrite() || pkt->cmd == MemCmd::CleanEvict); 10175863Snate@binkert.org 10185863Snate@binkert.org Addr blk_addr = pkt->getBlockAddr(blkSize); 10191869SN/A 10201965SN/A WriteQueueEntry *wq_entry = 10217739Sgblack@eecs.umich.edu writeBuffer.findMatch(blk_addr, pkt->isSecure()); 10221965SN/A if (wq_entry && !wq_entry->inService) { 10232761Sstever@eecs.umich.edu DPRINTF(Cache, "Potential to merge writeback %s", pkt->print()); 10245863Snate@binkert.org } 10251869SN/A 10265863Snate@binkert.org writeBuffer.allocate(blk_addr, blkSize, pkt, time, order++); 10272667Sstever@eecs.umich.edu 10281869SN/A if (writeBuffer.isFull()) { 10291869SN/A setBlocked((BlockedCause)MSHRQueue_WriteBuffer); 10302929Sktlim@umich.edu } 10312929Sktlim@umich.edu 10325863Snate@binkert.org // schedule the send 10332929Sktlim@umich.edu schedMemSideSendEvent(time); 1034955SN/A } 10358120Sgblack@eecs.umich.edu 10368120Sgblack@eecs.umich.edu /** 10378120Sgblack@eecs.umich.edu * Returns true if the cache is blocked for accesses. 10388120Sgblack@eecs.umich.edu */ 10398120Sgblack@eecs.umich.edu bool isBlocked() const 10408120Sgblack@eecs.umich.edu { 10418120Sgblack@eecs.umich.edu return blocked != 0; 10428120Sgblack@eecs.umich.edu } 10438120Sgblack@eecs.umich.edu 10448120Sgblack@eecs.umich.edu /** 10458120Sgblack@eecs.umich.edu * Marks the access path of the cache as blocked for the given cause. This 10468120Sgblack@eecs.umich.edu * also sets the blocked flag in the slave interface. 1047 * @param cause The reason for the cache blocking. 1048 */ 1049 void setBlocked(BlockedCause cause) 1050 { 1051 uint8_t flag = 1 << cause; 1052 if (blocked == 0) { 1053 blocked_causes[cause]++; 1054 blockedCycle = curCycle(); 1055 cpuSidePort.setBlocked(); 1056 } 1057 blocked |= flag; 1058 DPRINTF(Cache,"Blocking for cause %d, mask=%d\n", cause, blocked); 1059 } 1060 1061 /** 1062 * Marks the cache as unblocked for the given cause. This also clears the 1063 * blocked flags in the appropriate interfaces. 1064 * @param cause The newly unblocked cause. 1065 * @warning Calling this function can cause a blocked request on the bus to 1066 * access the cache. The cache must be in a state to handle that request. 1067 */ 1068 void clearBlocked(BlockedCause cause) 1069 { 1070 uint8_t flag = 1 << cause; 1071 blocked &= ~flag; 1072 DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked); 1073 if (blocked == 0) { 1074 blocked_cycles[cause] += curCycle() - blockedCycle; 1075 cpuSidePort.clearBlocked(); 1076 } 1077 } 1078 1079 /** 1080 * Schedule a send event for the memory-side port. If already 1081 * scheduled, this may reschedule the event at an earlier 1082 * time. When the specified time is reached, the port is free to 1083 * send either a response, a request, or a prefetch request. 1084 * 1085 * @param time The time when to attempt sending a packet. 1086 */ 1087 void schedMemSideSendEvent(Tick time) 1088 { 1089 memSidePort.schedSendEvent(time); 1090 } 1091 1092 bool inCache(Addr addr, bool is_secure) const { 1093 return tags->findBlock(addr, is_secure); 1094 } 1095 1096 bool inMissQueue(Addr addr, bool is_secure) const { 1097 return mshrQueue.findMatch(addr, is_secure); 1098 } 1099 1100 void incMissCount(PacketPtr pkt) 1101 { 1102 assert(pkt->req->masterId() < system->maxMasters()); 1103 misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 1104 pkt->req->incAccessDepth(); 1105 if (missCount) { 1106 --missCount; 1107 if (missCount == 0) 1108 exitSimLoop("A cache reached the maximum miss count"); 1109 } 1110 } 1111 void incHitCount(PacketPtr pkt) 1112 { 1113 assert(pkt->req->masterId() < system->maxMasters()); 1114 hits[pkt->cmdToIndex()][pkt->req->masterId()]++; 1115 1116 } 1117 1118 /** 1119 * Cache block visitor that writes back dirty cache blocks using 1120 * functional writes. 1121 */ 1122 void writebackVisitor(CacheBlk &blk); 1123 1124 /** 1125 * Cache block visitor that invalidates all blocks in the cache. 1126 * 1127 * @warn Dirty cache lines will not be written back to memory. 1128 */ 1129 void invalidateVisitor(CacheBlk &blk); 1130 1131 /** 1132 * Take an MSHR, turn it into a suitable downstream packet, and 1133 * send it out. This construct allows a queue entry to choose a suitable 1134 * approach based on its type. 1135 * 1136 * @param mshr The MSHR to turn into a packet and send 1137 * @return True if the port is waiting for a retry 1138 */ 1139 virtual bool sendMSHRQueuePacket(MSHR* mshr); 1140 1141 /** 1142 * Similar to sendMSHR, but for a write-queue entry 1143 * instead. Create the packet, and send it, and if successful also 1144 * mark the entry in service. 1145 * 1146 * @param wq_entry The write-queue entry to turn into a packet and send 1147 * @return True if the port is waiting for a retry 1148 */ 1149 bool sendWriteQueuePacket(WriteQueueEntry* wq_entry); 1150 1151 /** 1152 * Serialize the state of the caches 1153 * 1154 * We currently don't support checkpointing cache state, so this panics. 1155 */ 1156 void serialize(CheckpointOut &cp) const override; 1157 void unserialize(CheckpointIn &cp) override; 1158 1159}; 1160 1161#endif //__MEM_CACHE_BASE_HH__ 1162