base.hh revision 12727
14997Sgblack@eecs.umich.edu/* 25417Sgblack@eecs.umich.edu * Copyright (c) 2012-2013, 2015-2016, 2018 ARM Limited 34997Sgblack@eecs.umich.edu * All rights reserved. 44997Sgblack@eecs.umich.edu * 54997Sgblack@eecs.umich.edu * The license below extends only to copyright in the software and shall 64997Sgblack@eecs.umich.edu * not be construed as granting a license to any other intellectual 74997Sgblack@eecs.umich.edu * property including but not limited to intellectual property relating 84997Sgblack@eecs.umich.edu * to a hardware implementation of the functionality of the software 94997Sgblack@eecs.umich.edu * licensed hereunder. You may use the software subject to the license 104997Sgblack@eecs.umich.edu * terms below provided that you ensure that this notice is replicated 114997Sgblack@eecs.umich.edu * unmodified and in its entirety in all distributions of the software, 124997Sgblack@eecs.umich.edu * modified or unmodified, in source code or in binary form. 134997Sgblack@eecs.umich.edu * 144997Sgblack@eecs.umich.edu * Copyright (c) 2003-2005 The Regents of The University of Michigan 154997Sgblack@eecs.umich.edu * All rights reserved. 164997Sgblack@eecs.umich.edu * 174997Sgblack@eecs.umich.edu * Redistribution and use in source and binary forms, with or without 184997Sgblack@eecs.umich.edu * modification, are permitted provided that the following conditions are 194997Sgblack@eecs.umich.edu * met: redistributions of source code must retain the above copyright 204997Sgblack@eecs.umich.edu * notice, this list of conditions and the following disclaimer; 214997Sgblack@eecs.umich.edu * redistributions in binary form must reproduce the above copyright 224997Sgblack@eecs.umich.edu * notice, this list of conditions and the following disclaimer in the 234997Sgblack@eecs.umich.edu * documentation and/or other materials provided with the distribution; 244997Sgblack@eecs.umich.edu * neither the name of the copyright holders nor the names of its 254997Sgblack@eecs.umich.edu * contributors may be used to endorse or promote products derived from 264997Sgblack@eecs.umich.edu * this software without specific prior written permission. 274997Sgblack@eecs.umich.edu * 284997Sgblack@eecs.umich.edu * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 294997Sgblack@eecs.umich.edu * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 304997Sgblack@eecs.umich.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 314997Sgblack@eecs.umich.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 324997Sgblack@eecs.umich.edu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 334997Sgblack@eecs.umich.edu * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 344997Sgblack@eecs.umich.edu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 354997Sgblack@eecs.umich.edu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 364997Sgblack@eecs.umich.edu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 374997Sgblack@eecs.umich.edu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 384997Sgblack@eecs.umich.edu * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 394997Sgblack@eecs.umich.edu * 404997Sgblack@eecs.umich.edu * Authors: Erik Hallnor 414997Sgblack@eecs.umich.edu * Steve Reinhardt 424997Sgblack@eecs.umich.edu * Ron Dreslinski 434997Sgblack@eecs.umich.edu * Andreas Hansson 444997Sgblack@eecs.umich.edu * Nikos Nikoleris 454997Sgblack@eecs.umich.edu */ 464997Sgblack@eecs.umich.edu 474997Sgblack@eecs.umich.edu/** 484997Sgblack@eecs.umich.edu * @file 494997Sgblack@eecs.umich.edu * Declares a basic cache interface BaseCache. 504997Sgblack@eecs.umich.edu */ 514997Sgblack@eecs.umich.edu 524997Sgblack@eecs.umich.edu#ifndef __MEM_CACHE_BASE_HH__ 534997Sgblack@eecs.umich.edu#define __MEM_CACHE_BASE_HH__ 544997Sgblack@eecs.umich.edu 554997Sgblack@eecs.umich.edu#include <cassert> 564997Sgblack@eecs.umich.edu#include <cstdint> 574997Sgblack@eecs.umich.edu#include <string> 584997Sgblack@eecs.umich.edu 594997Sgblack@eecs.umich.edu#include "base/addr_range.hh" 605086Sgblack@eecs.umich.edu#include "base/statistics.hh" 615086Sgblack@eecs.umich.edu#include "base/trace.hh" 625124Sgblack@eecs.umich.edu#include "base/types.hh" 635086Sgblack@eecs.umich.edu#include "debug/Cache.hh" 645149Sgblack@eecs.umich.edu#include "debug/CachePort.hh" 655086Sgblack@eecs.umich.edu#include "enums/Clusivity.hh" 665086Sgblack@eecs.umich.edu#include "mem/cache/blk.hh" 675237Sgblack@eecs.umich.edu#include "mem/cache/mshr_queue.hh" 685086Sgblack@eecs.umich.edu#include "mem/cache/tags/base.hh" 695086Sgblack@eecs.umich.edu#include "mem/cache/write_queue.hh" 705086Sgblack@eecs.umich.edu#include "mem/cache/write_queue_entry.hh" 715086Sgblack@eecs.umich.edu#include "mem/mem_object.hh" 725245Sgblack@eecs.umich.edu#include "mem/packet.hh" 735245Sgblack@eecs.umich.edu#include "mem/packet_queue.hh" 745245Sgblack@eecs.umich.edu#include "mem/qport.hh" 755245Sgblack@eecs.umich.edu#include "mem/request.hh" 765086Sgblack@eecs.umich.edu#include "sim/eventq.hh" 775086Sgblack@eecs.umich.edu#include "sim/serialize.hh" 785086Sgblack@eecs.umich.edu#include "sim/sim_exit.hh" 795358Sgblack@eecs.umich.edu#include "sim/system.hh" 805124Sgblack@eecs.umich.edu 815124Sgblack@eecs.umich.educlass BaseMasterPort; 825124Sgblack@eecs.umich.educlass BasePrefetcher; 835124Sgblack@eecs.umich.educlass BaseSlavePort; 845124Sgblack@eecs.umich.educlass MSHR; 855124Sgblack@eecs.umich.educlass MasterPort; 865124Sgblack@eecs.umich.educlass QueueEntry; 875237Sgblack@eecs.umich.edustruct BaseCacheParams; 885245Sgblack@eecs.umich.edu 895245Sgblack@eecs.umich.edu/** 905245Sgblack@eecs.umich.edu * A basic cache interface. Implements some common functions for speed. 915236Sgblack@eecs.umich.edu */ 925236Sgblack@eecs.umich.educlass BaseCache : public MemObject 935236Sgblack@eecs.umich.edu{ 945124Sgblack@eecs.umich.edu protected: 955124Sgblack@eecs.umich.edu /** 965124Sgblack@eecs.umich.edu * Indexes to enumerate the MSHR queues. 975124Sgblack@eecs.umich.edu */ 985124Sgblack@eecs.umich.edu enum MSHRQueueIndex { 995124Sgblack@eecs.umich.edu MSHRQueue_MSHRs, 1005124Sgblack@eecs.umich.edu MSHRQueue_WriteBuffer 1015124Sgblack@eecs.umich.edu }; 1025124Sgblack@eecs.umich.edu 1035124Sgblack@eecs.umich.edu public: 1045124Sgblack@eecs.umich.edu /** 1055124Sgblack@eecs.umich.edu * Reasons for caches to be blocked. 1065124Sgblack@eecs.umich.edu */ 1075124Sgblack@eecs.umich.edu enum BlockedCause { 1085124Sgblack@eecs.umich.edu Blocked_NoMSHRs = MSHRQueue_MSHRs, 1095124Sgblack@eecs.umich.edu Blocked_NoWBBuffers = MSHRQueue_WriteBuffer, 1105124Sgblack@eecs.umich.edu Blocked_NoTargets, 1115360Sgblack@eecs.umich.edu NUM_BLOCKED_CAUSES 1125360Sgblack@eecs.umich.edu }; 1135124Sgblack@eecs.umich.edu 1145124Sgblack@eecs.umich.edu protected: 1155124Sgblack@eecs.umich.edu 1165124Sgblack@eecs.umich.edu /** 1175124Sgblack@eecs.umich.edu * A cache master port is used for the memory-side port of the 1185124Sgblack@eecs.umich.edu * cache, and in addition to the basic timing port that only sends 1195124Sgblack@eecs.umich.edu * response packets through a transmit list, it also offers the 1205124Sgblack@eecs.umich.edu * ability to schedule and send request packets (requests & 1215360Sgblack@eecs.umich.edu * writebacks). The send event is scheduled through schedSendEvent, 1225124Sgblack@eecs.umich.edu * and the sendDeferredPacket of the timing port is modified to 1235360Sgblack@eecs.umich.edu * consider both the transmit list and the requests from the MSHR. 1245124Sgblack@eecs.umich.edu */ 1255360Sgblack@eecs.umich.edu class CacheMasterPort : public QueuedMasterPort 1265124Sgblack@eecs.umich.edu { 1275124Sgblack@eecs.umich.edu 1285360Sgblack@eecs.umich.edu public: 1295360Sgblack@eecs.umich.edu 1305360Sgblack@eecs.umich.edu /** 1315360Sgblack@eecs.umich.edu * Schedule a send of a request packet (from the MSHR). Note 1325360Sgblack@eecs.umich.edu * that we could already have a retry outstanding. 1335360Sgblack@eecs.umich.edu */ 1345360Sgblack@eecs.umich.edu void schedSendEvent(Tick time) 1355360Sgblack@eecs.umich.edu { 1365360Sgblack@eecs.umich.edu DPRINTF(CachePort, "Scheduling send event at %llu\n", time); 1375360Sgblack@eecs.umich.edu reqQueue.schedSendEvent(time); 1385360Sgblack@eecs.umich.edu } 1395124Sgblack@eecs.umich.edu 1405124Sgblack@eecs.umich.edu protected: 1415245Sgblack@eecs.umich.edu 1425245Sgblack@eecs.umich.edu CacheMasterPort(const std::string &_name, BaseCache *_cache, 1435245Sgblack@eecs.umich.edu ReqPacketQueue &_reqQueue, 1445245Sgblack@eecs.umich.edu SnoopRespPacketQueue &_snoopRespQueue) : 1455245Sgblack@eecs.umich.edu QueuedMasterPort(_name, _cache, _reqQueue, _snoopRespQueue) 1465245Sgblack@eecs.umich.edu { } 1475245Sgblack@eecs.umich.edu 1485245Sgblack@eecs.umich.edu /** 1495124Sgblack@eecs.umich.edu * Memory-side port always snoops. 1505124Sgblack@eecs.umich.edu * 1515124Sgblack@eecs.umich.edu * @return always true 1525242Sgblack@eecs.umich.edu */ 1535242Sgblack@eecs.umich.edu virtual bool isSnooping() const { return true; } 1545242Sgblack@eecs.umich.edu }; 1555242Sgblack@eecs.umich.edu 1565242Sgblack@eecs.umich.edu /** 1575242Sgblack@eecs.umich.edu * Override the default behaviour of sendDeferredPacket to enable 1585124Sgblack@eecs.umich.edu * the memory-side cache port to also send requests based on the 1595124Sgblack@eecs.umich.edu * current MSHR status. This queue has a pointer to our specific 1605124Sgblack@eecs.umich.edu * cache implementation and is used by the MemSidePort. 1615357Sgblack@eecs.umich.edu */ 1625357Sgblack@eecs.umich.edu class CacheReqPacketQueue : public ReqPacketQueue 1635357Sgblack@eecs.umich.edu { 1645357Sgblack@eecs.umich.edu 1655357Sgblack@eecs.umich.edu protected: 1665357Sgblack@eecs.umich.edu 1675124Sgblack@eecs.umich.edu BaseCache &cache; 1685124Sgblack@eecs.umich.edu SnoopRespPacketQueue &snoopRespQueue; 1695242Sgblack@eecs.umich.edu 1705242Sgblack@eecs.umich.edu public: 1715242Sgblack@eecs.umich.edu 1725242Sgblack@eecs.umich.edu CacheReqPacketQueue(BaseCache &cache, MasterPort &port, 1735242Sgblack@eecs.umich.edu SnoopRespPacketQueue &snoop_resp_queue, 1745242Sgblack@eecs.umich.edu const std::string &label) : 1755242Sgblack@eecs.umich.edu ReqPacketQueue(cache, port, label), cache(cache), 1765242Sgblack@eecs.umich.edu snoopRespQueue(snoop_resp_queue) { } 1775242Sgblack@eecs.umich.edu 1785242Sgblack@eecs.umich.edu /** 1795124Sgblack@eecs.umich.edu * Override the normal sendDeferredPacket and do not only 1805124Sgblack@eecs.umich.edu * consider the transmit list (used for responses), but also 1815124Sgblack@eecs.umich.edu * requests. 1825358Sgblack@eecs.umich.edu */ 1835086Sgblack@eecs.umich.edu virtual void sendDeferredPacket(); 1845359Sgblack@eecs.umich.edu 1855359Sgblack@eecs.umich.edu /** 1865359Sgblack@eecs.umich.edu * Check if there is a conflicting snoop response about to be 1875359Sgblack@eecs.umich.edu * send out, and if so simply stall any requests, and schedule 1885359Sgblack@eecs.umich.edu * a send event at the same time as the next snoop response is 1895086Sgblack@eecs.umich.edu * being sent out. 1905086Sgblack@eecs.umich.edu */ 1915140Sgblack@eecs.umich.edu bool checkConflictingSnoop(Addr addr) 1925086Sgblack@eecs.umich.edu { 1935140Sgblack@eecs.umich.edu if (snoopRespQueue.hasAddr(addr)) { 1945086Sgblack@eecs.umich.edu DPRINTF(CachePort, "Waiting for snoop response to be " 1955124Sgblack@eecs.umich.edu "sent\n"); 1965140Sgblack@eecs.umich.edu Tick when = snoopRespQueue.deferredPacketReadyTime(); 1975124Sgblack@eecs.umich.edu schedSendEvent(when); 1985124Sgblack@eecs.umich.edu return true; 1995140Sgblack@eecs.umich.edu } 2005294Sgblack@eecs.umich.edu return false; 2015124Sgblack@eecs.umich.edu } 2025124Sgblack@eecs.umich.edu }; 2035149Sgblack@eecs.umich.edu 2045149Sgblack@eecs.umich.edu 2055149Sgblack@eecs.umich.edu /** 2065149Sgblack@eecs.umich.edu * The memory-side port extends the base cache master port with 2075294Sgblack@eecs.umich.edu * access functions for functional, atomic and timing snoops. 2085243Sgblack@eecs.umich.edu */ 2095418Sgblack@eecs.umich.edu class MemSidePort : public CacheMasterPort 2105149Sgblack@eecs.umich.edu { 2115149Sgblack@eecs.umich.edu private: 2125149Sgblack@eecs.umich.edu 2135418Sgblack@eecs.umich.edu /** The cache-specific queue. */ 2145149Sgblack@eecs.umich.edu CacheReqPacketQueue _reqQueue; 2155149Sgblack@eecs.umich.edu 2165149Sgblack@eecs.umich.edu SnoopRespPacketQueue _snoopRespQueue; 2175149Sgblack@eecs.umich.edu 2185149Sgblack@eecs.umich.edu // a pointer to our specific cache implementation 2195149Sgblack@eecs.umich.edu BaseCache *cache; 2205360Sgblack@eecs.umich.edu 2215360Sgblack@eecs.umich.edu protected: 2225360Sgblack@eecs.umich.edu 2235149Sgblack@eecs.umich.edu virtual void recvTimingSnoopReq(PacketPtr pkt); 2245149Sgblack@eecs.umich.edu 2255149Sgblack@eecs.umich.edu virtual bool recvTimingResp(PacketPtr pkt); 2265149Sgblack@eecs.umich.edu 2275149Sgblack@eecs.umich.edu virtual Tick recvAtomicSnoop(PacketPtr pkt); 2285149Sgblack@eecs.umich.edu 2295149Sgblack@eecs.umich.edu virtual void recvFunctionalSnoop(PacketPtr pkt); 2305149Sgblack@eecs.umich.edu 2315149Sgblack@eecs.umich.edu public: 2325149Sgblack@eecs.umich.edu 2335149Sgblack@eecs.umich.edu MemSidePort(const std::string &_name, BaseCache *_cache, 2345149Sgblack@eecs.umich.edu const std::string &_label); 2355149Sgblack@eecs.umich.edu }; 2365149Sgblack@eecs.umich.edu 2375149Sgblack@eecs.umich.edu /** 2385149Sgblack@eecs.umich.edu * A cache slave port is used for the CPU-side port of the cache, 2395149Sgblack@eecs.umich.edu * and it is basically a simple timing port that uses a transmit 2405149Sgblack@eecs.umich.edu * list for responses to the CPU (or connected master). In 2415149Sgblack@eecs.umich.edu * addition, it has the functionality to block the port for 2425149Sgblack@eecs.umich.edu * incoming requests. If blocked, the port will issue a retry once 2435149Sgblack@eecs.umich.edu * unblocked. 2445149Sgblack@eecs.umich.edu */ 2455149Sgblack@eecs.umich.edu class CacheSlavePort : public QueuedSlavePort 2465149Sgblack@eecs.umich.edu { 2475149Sgblack@eecs.umich.edu 2485149Sgblack@eecs.umich.edu public: 2495149Sgblack@eecs.umich.edu 2505149Sgblack@eecs.umich.edu /** Do not accept any new requests. */ 2515149Sgblack@eecs.umich.edu void setBlocked(); 2525149Sgblack@eecs.umich.edu 2535149Sgblack@eecs.umich.edu /** Return to normal operation and accept new requests. */ 2545149Sgblack@eecs.umich.edu void clearBlocked(); 2555149Sgblack@eecs.umich.edu 2565149Sgblack@eecs.umich.edu bool isBlocked() const { return blocked; } 2575149Sgblack@eecs.umich.edu 2585149Sgblack@eecs.umich.edu protected: 2595149Sgblack@eecs.umich.edu 2605149Sgblack@eecs.umich.edu CacheSlavePort(const std::string &_name, BaseCache *_cache, 2615149Sgblack@eecs.umich.edu const std::string &_label); 2625149Sgblack@eecs.umich.edu 2635149Sgblack@eecs.umich.edu /** A normal packet queue used to store responses. */ 2645149Sgblack@eecs.umich.edu RespPacketQueue queue; 2655149Sgblack@eecs.umich.edu 2665149Sgblack@eecs.umich.edu bool blocked; 2675149Sgblack@eecs.umich.edu 2685149Sgblack@eecs.umich.edu bool mustSendRetry; 2695149Sgblack@eecs.umich.edu 2705149Sgblack@eecs.umich.edu private: 2715149Sgblack@eecs.umich.edu 2725149Sgblack@eecs.umich.edu void processSendRetry(); 2735149Sgblack@eecs.umich.edu 2745149Sgblack@eecs.umich.edu EventFunctionWrapper sendRetryEvent; 2755149Sgblack@eecs.umich.edu 2765149Sgblack@eecs.umich.edu }; 2775149Sgblack@eecs.umich.edu 2785149Sgblack@eecs.umich.edu /** 2795149Sgblack@eecs.umich.edu * The CPU-side port extends the base cache slave port with access 2805149Sgblack@eecs.umich.edu * functions for functional, atomic and timing requests. 2815149Sgblack@eecs.umich.edu */ 2825149Sgblack@eecs.umich.edu class CpuSidePort : public CacheSlavePort 2835149Sgblack@eecs.umich.edu { 2845149Sgblack@eecs.umich.edu private: 2855149Sgblack@eecs.umich.edu 2865149Sgblack@eecs.umich.edu // a pointer to our specific cache implementation 2875149Sgblack@eecs.umich.edu BaseCache *cache; 2885149Sgblack@eecs.umich.edu 2895149Sgblack@eecs.umich.edu protected: 2905149Sgblack@eecs.umich.edu virtual bool recvTimingSnoopResp(PacketPtr pkt) override; 2915149Sgblack@eecs.umich.edu 2925149Sgblack@eecs.umich.edu virtual bool tryTiming(PacketPtr pkt) override; 2935149Sgblack@eecs.umich.edu 2945149Sgblack@eecs.umich.edu virtual bool recvTimingReq(PacketPtr pkt) override; 2955149Sgblack@eecs.umich.edu 2965149Sgblack@eecs.umich.edu virtual Tick recvAtomic(PacketPtr pkt) override; 2975149Sgblack@eecs.umich.edu 2985149Sgblack@eecs.umich.edu virtual void recvFunctional(PacketPtr pkt) override; 2995149Sgblack@eecs.umich.edu 3005149Sgblack@eecs.umich.edu virtual AddrRangeList getAddrRanges() const override; 3015149Sgblack@eecs.umich.edu 3025149Sgblack@eecs.umich.edu public: 3035149Sgblack@eecs.umich.edu 3045149Sgblack@eecs.umich.edu CpuSidePort(const std::string &_name, BaseCache *_cache, 3055149Sgblack@eecs.umich.edu const std::string &_label); 3065149Sgblack@eecs.umich.edu 3075149Sgblack@eecs.umich.edu }; 3085149Sgblack@eecs.umich.edu 3095149Sgblack@eecs.umich.edu CpuSidePort cpuSidePort; 3105149Sgblack@eecs.umich.edu MemSidePort memSidePort; 3115149Sgblack@eecs.umich.edu 3125149Sgblack@eecs.umich.edu protected: 3135149Sgblack@eecs.umich.edu 3145149Sgblack@eecs.umich.edu /** Miss status registers */ 3155149Sgblack@eecs.umich.edu MSHRQueue mshrQueue; 3165149Sgblack@eecs.umich.edu 3175149Sgblack@eecs.umich.edu /** Write/writeback buffer */ 3185149Sgblack@eecs.umich.edu WriteQueue writeBuffer; 3195149Sgblack@eecs.umich.edu 3205149Sgblack@eecs.umich.edu /** Tag and data Storage */ 3215149Sgblack@eecs.umich.edu BaseTags *tags; 3225149Sgblack@eecs.umich.edu 3235149Sgblack@eecs.umich.edu /** Prefetcher */ 3245149Sgblack@eecs.umich.edu BasePrefetcher *prefetcher; 3255149Sgblack@eecs.umich.edu 3265149Sgblack@eecs.umich.edu /** 3275149Sgblack@eecs.umich.edu * Notify the prefetcher on every access, not just misses. 3285149Sgblack@eecs.umich.edu */ 3295149Sgblack@eecs.umich.edu const bool prefetchOnAccess; 3305149Sgblack@eecs.umich.edu 3315149Sgblack@eecs.umich.edu /** 3325149Sgblack@eecs.umich.edu * Temporary cache block for occasional transitory use. We use 3335149Sgblack@eecs.umich.edu * the tempBlock to fill when allocation fails (e.g., when there 3345149Sgblack@eecs.umich.edu * is an outstanding request that accesses the victim block) or 3355149Sgblack@eecs.umich.edu * when we want to avoid allocation (e.g., exclusive caches) 3365149Sgblack@eecs.umich.edu */ 3375149Sgblack@eecs.umich.edu CacheBlk *tempBlock; 3385149Sgblack@eecs.umich.edu 3395149Sgblack@eecs.umich.edu /** 3405149Sgblack@eecs.umich.edu * Upstream caches need this packet until true is returned, so 3415149Sgblack@eecs.umich.edu * hold it for deletion until a subsequent call 3425149Sgblack@eecs.umich.edu */ 3435149Sgblack@eecs.umich.edu std::unique_ptr<Packet> pendingDelete; 3445149Sgblack@eecs.umich.edu 3455149Sgblack@eecs.umich.edu /** 3465149Sgblack@eecs.umich.edu * Mark a request as in service (sent downstream in the memory 3475149Sgblack@eecs.umich.edu * system), effectively making this MSHR the ordering point. 3485149Sgblack@eecs.umich.edu */ 3495149Sgblack@eecs.umich.edu void markInService(MSHR *mshr, bool pending_modified_resp) 3505149Sgblack@eecs.umich.edu { 3515149Sgblack@eecs.umich.edu bool wasFull = mshrQueue.isFull(); 3525149Sgblack@eecs.umich.edu mshrQueue.markInService(mshr, pending_modified_resp); 3535149Sgblack@eecs.umich.edu 3545149Sgblack@eecs.umich.edu if (wasFull && !mshrQueue.isFull()) { 3555149Sgblack@eecs.umich.edu clearBlocked(Blocked_NoMSHRs); 3565149Sgblack@eecs.umich.edu } 3575149Sgblack@eecs.umich.edu } 3585149Sgblack@eecs.umich.edu 3595149Sgblack@eecs.umich.edu void markInService(WriteQueueEntry *entry) 3605149Sgblack@eecs.umich.edu { 3615419Sgblack@eecs.umich.edu bool wasFull = writeBuffer.isFull(); 3625419Sgblack@eecs.umich.edu writeBuffer.markInService(entry); 3635419Sgblack@eecs.umich.edu 3645419Sgblack@eecs.umich.edu if (wasFull && !writeBuffer.isFull()) { 3655419Sgblack@eecs.umich.edu clearBlocked(Blocked_NoWBBuffers); 3665419Sgblack@eecs.umich.edu } 3675419Sgblack@eecs.umich.edu } 3685419Sgblack@eecs.umich.edu 3695419Sgblack@eecs.umich.edu /** 3705149Sgblack@eecs.umich.edu * Determine whether we should allocate on a fill or not. If this 3715149Sgblack@eecs.umich.edu * cache is mostly inclusive with regards to the upstream cache(s) 3725149Sgblack@eecs.umich.edu * we always allocate (for any non-forwarded and cacheable 3735149Sgblack@eecs.umich.edu * requests). In the case of a mostly exclusive cache, we allocate 3745149Sgblack@eecs.umich.edu * on fill if the packet did not come from a cache, thus if we: 3755149Sgblack@eecs.umich.edu * are dealing with a whole-line write (the latter behaves much 3765149Sgblack@eecs.umich.edu * like a writeback), the original target packet came from a 3775149Sgblack@eecs.umich.edu * non-caching source, or if we are performing a prefetch or LLSC. 3785149Sgblack@eecs.umich.edu * 3795149Sgblack@eecs.umich.edu * @param cmd Command of the incoming requesting packet 3805149Sgblack@eecs.umich.edu * @return Whether we should allocate on the fill 3815149Sgblack@eecs.umich.edu */ 3825149Sgblack@eecs.umich.edu inline bool allocOnFill(MemCmd cmd) const 3835149Sgblack@eecs.umich.edu { 3845149Sgblack@eecs.umich.edu return clusivity == Enums::mostly_incl || 3855419Sgblack@eecs.umich.edu cmd == MemCmd::WriteLineReq || 3865419Sgblack@eecs.umich.edu cmd == MemCmd::ReadReq || 3875419Sgblack@eecs.umich.edu cmd == MemCmd::WriteReq || 3885419Sgblack@eecs.umich.edu cmd.isPrefetch() || 3895419Sgblack@eecs.umich.edu cmd.isLLSC(); 3905419Sgblack@eecs.umich.edu } 3915419Sgblack@eecs.umich.edu 3925419Sgblack@eecs.umich.edu /** 3935419Sgblack@eecs.umich.edu * Does all the processing necessary to perform the provided request. 3945149Sgblack@eecs.umich.edu * @param pkt The memory request to perform. 3955149Sgblack@eecs.umich.edu * @param blk The cache block to be updated. 3965149Sgblack@eecs.umich.edu * @param lat The latency of the access. 3975149Sgblack@eecs.umich.edu * @param writebacks List for any writebacks that need to be performed. 3985149Sgblack@eecs.umich.edu * @return Boolean indicating whether the request was satisfied. 3995149Sgblack@eecs.umich.edu */ 4005149Sgblack@eecs.umich.edu virtual bool access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, 4015149Sgblack@eecs.umich.edu PacketList &writebacks); 4025149Sgblack@eecs.umich.edu 4035149Sgblack@eecs.umich.edu /* 4045149Sgblack@eecs.umich.edu * Handle a timing request that hit in the cache 4055149Sgblack@eecs.umich.edu * 4065149Sgblack@eecs.umich.edu * @param ptk The request packet 4075149Sgblack@eecs.umich.edu * @param blk The referenced block 4085149Sgblack@eecs.umich.edu * @param request_time The tick at which the block lookup is compete 4095419Sgblack@eecs.umich.edu */ 4105419Sgblack@eecs.umich.edu virtual void handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, 4115419Sgblack@eecs.umich.edu Tick request_time); 4125419Sgblack@eecs.umich.edu 4135419Sgblack@eecs.umich.edu /* 4145419Sgblack@eecs.umich.edu * Handle a timing request that missed in the cache 4155419Sgblack@eecs.umich.edu * 4165419Sgblack@eecs.umich.edu * Implementation specific handling for different cache 4175419Sgblack@eecs.umich.edu * implementations 4185149Sgblack@eecs.umich.edu * 4195149Sgblack@eecs.umich.edu * @param ptk The request packet 4205149Sgblack@eecs.umich.edu * @param blk The referenced block 4215149Sgblack@eecs.umich.edu * @param forward_time The tick at which we can process dependent requests 4225149Sgblack@eecs.umich.edu * @param request_time The tick at which the block lookup is compete 4235149Sgblack@eecs.umich.edu */ 4245149Sgblack@eecs.umich.edu virtual void handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk, 4255149Sgblack@eecs.umich.edu Tick forward_time, 4265149Sgblack@eecs.umich.edu Tick request_time) = 0; 4275149Sgblack@eecs.umich.edu 4285149Sgblack@eecs.umich.edu /* 4295149Sgblack@eecs.umich.edu * Handle a timing request that missed in the cache 4305149Sgblack@eecs.umich.edu * 4315149Sgblack@eecs.umich.edu * Common functionality across different cache implementations 4325149Sgblack@eecs.umich.edu * 4335419Sgblack@eecs.umich.edu * @param ptk The request packet 4345419Sgblack@eecs.umich.edu * @param blk The referenced block 4355419Sgblack@eecs.umich.edu * @param mshr Any existing mshr for the referenced cache block 4365419Sgblack@eecs.umich.edu * @param forward_time The tick at which we can process dependent requests 4375419Sgblack@eecs.umich.edu * @param request_time The tick at which the block lookup is compete 4385419Sgblack@eecs.umich.edu */ 4395419Sgblack@eecs.umich.edu void handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk, 4405419Sgblack@eecs.umich.edu Tick forward_time, Tick request_time); 4415419Sgblack@eecs.umich.edu 4425149Sgblack@eecs.umich.edu /** 4435149Sgblack@eecs.umich.edu * Performs the access specified by the request. 4445149Sgblack@eecs.umich.edu * @param pkt The request to perform. 4455149Sgblack@eecs.umich.edu */ 4465149Sgblack@eecs.umich.edu virtual void recvTimingReq(PacketPtr pkt); 4475149Sgblack@eecs.umich.edu 4485149Sgblack@eecs.umich.edu /** 4495149Sgblack@eecs.umich.edu * Handling the special case of uncacheable write responses to 4505149Sgblack@eecs.umich.edu * make recvTimingResp less cluttered. 4515149Sgblack@eecs.umich.edu */ 4525149Sgblack@eecs.umich.edu void handleUncacheableWriteResp(PacketPtr pkt); 4535149Sgblack@eecs.umich.edu 4545149Sgblack@eecs.umich.edu /** 4555149Sgblack@eecs.umich.edu * Service non-deferred MSHR targets using the received response 4565149Sgblack@eecs.umich.edu * 4575149Sgblack@eecs.umich.edu * Iterates through the list of targets that can be serviced with 4585149Sgblack@eecs.umich.edu * the current response. Any writebacks that need to performed 4595149Sgblack@eecs.umich.edu * must be appended to the writebacks parameter. 4605149Sgblack@eecs.umich.edu * 4615149Sgblack@eecs.umich.edu * @param mshr The MSHR that corresponds to the reponse 4625149Sgblack@eecs.umich.edu * @param pkt The response packet 4635149Sgblack@eecs.umich.edu * @param blk The reference block 4645149Sgblack@eecs.umich.edu * @param writebacks List of writebacks that need to be performed 4655149Sgblack@eecs.umich.edu */ 4665149Sgblack@eecs.umich.edu virtual void serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, 4675149Sgblack@eecs.umich.edu CacheBlk *blk, PacketList& writebacks) = 0; 4685149Sgblack@eecs.umich.edu 4695149Sgblack@eecs.umich.edu /** 4705149Sgblack@eecs.umich.edu * Handles a response (cache line fill/write ack) from the bus. 4715149Sgblack@eecs.umich.edu * @param pkt The response packet 4725149Sgblack@eecs.umich.edu */ 4735149Sgblack@eecs.umich.edu virtual void recvTimingResp(PacketPtr pkt); 4745149Sgblack@eecs.umich.edu 4755149Sgblack@eecs.umich.edu /** 4765149Sgblack@eecs.umich.edu * Snoops bus transactions to maintain coherence. 4775149Sgblack@eecs.umich.edu * @param pkt The current bus transaction. 4785149Sgblack@eecs.umich.edu */ 4795149Sgblack@eecs.umich.edu virtual void recvTimingSnoopReq(PacketPtr pkt) = 0; 4805149Sgblack@eecs.umich.edu 4815149Sgblack@eecs.umich.edu /** 4825149Sgblack@eecs.umich.edu * Handle a snoop response. 4835149Sgblack@eecs.umich.edu * @param pkt Snoop response packet 4845149Sgblack@eecs.umich.edu */ 4855149Sgblack@eecs.umich.edu virtual void recvTimingSnoopResp(PacketPtr pkt) = 0; 4865149Sgblack@eecs.umich.edu 4875149Sgblack@eecs.umich.edu /** 4885149Sgblack@eecs.umich.edu * Handle a request in atomic mode that missed in this cache 4895149Sgblack@eecs.umich.edu * 4905149Sgblack@eecs.umich.edu * Creates a downstream request, sends it to the memory below and 4915149Sgblack@eecs.umich.edu * handles the response. As we are in atomic mode all operations 4925149Sgblack@eecs.umich.edu * are performed immediately. 4935149Sgblack@eecs.umich.edu * 4945149Sgblack@eecs.umich.edu * @param pkt The packet with the requests 4955149Sgblack@eecs.umich.edu * @param blk The referenced block 4965149Sgblack@eecs.umich.edu * @param writebacks A list with packets for any performed writebacks 4975149Sgblack@eecs.umich.edu * @return Cycles for handling the request 4985149Sgblack@eecs.umich.edu */ 4995149Sgblack@eecs.umich.edu virtual Cycles handleAtomicReqMiss(PacketPtr pkt, CacheBlk *blk, 5005149Sgblack@eecs.umich.edu PacketList &writebacks) = 0; 5015149Sgblack@eecs.umich.edu 5025149Sgblack@eecs.umich.edu /** 5035149Sgblack@eecs.umich.edu * Performs the access specified by the request. 5045149Sgblack@eecs.umich.edu * @param pkt The request to perform. 5055149Sgblack@eecs.umich.edu * @return The number of ticks required for the access. 5065149Sgblack@eecs.umich.edu */ 5075149Sgblack@eecs.umich.edu virtual Tick recvAtomic(PacketPtr pkt); 5085149Sgblack@eecs.umich.edu 5095149Sgblack@eecs.umich.edu /** 5105149Sgblack@eecs.umich.edu * Snoop for the provided request in the cache and return the estimated 5115149Sgblack@eecs.umich.edu * time taken. 5125149Sgblack@eecs.umich.edu * @param pkt The memory request to snoop 5135149Sgblack@eecs.umich.edu * @return The number of ticks required for the snoop. 5145149Sgblack@eecs.umich.edu */ 5155149Sgblack@eecs.umich.edu virtual Tick recvAtomicSnoop(PacketPtr pkt) = 0; 5165149Sgblack@eecs.umich.edu 5175149Sgblack@eecs.umich.edu /** 5185149Sgblack@eecs.umich.edu * Performs the access specified by the request. 5195149Sgblack@eecs.umich.edu * 5205149Sgblack@eecs.umich.edu * @param pkt The request to perform. 5215149Sgblack@eecs.umich.edu * @param fromCpuSide from the CPU side port or the memory side port 5225149Sgblack@eecs.umich.edu */ 5235149Sgblack@eecs.umich.edu virtual void functionalAccess(PacketPtr pkt, bool from_cpu_side); 5245149Sgblack@eecs.umich.edu 5255149Sgblack@eecs.umich.edu /** 5265149Sgblack@eecs.umich.edu * Handle doing the Compare and Swap function for SPARC. 5275149Sgblack@eecs.umich.edu */ 5285149Sgblack@eecs.umich.edu void cmpAndSwap(CacheBlk *blk, PacketPtr pkt); 5295149Sgblack@eecs.umich.edu 5305149Sgblack@eecs.umich.edu /** 5315149Sgblack@eecs.umich.edu * Return the next queue entry to service, either a pending miss 5325149Sgblack@eecs.umich.edu * from the MSHR queue, a buffered write from the write buffer, or 5335149Sgblack@eecs.umich.edu * something from the prefetcher. This function is responsible 5345323Sgblack@eecs.umich.edu * for prioritizing among those sources on the fly. 5355323Sgblack@eecs.umich.edu */ 5365323Sgblack@eecs.umich.edu QueueEntry* getNextQueueEntry(); 5375323Sgblack@eecs.umich.edu 5385323Sgblack@eecs.umich.edu /** 5395323Sgblack@eecs.umich.edu * Insert writebacks into the write buffer 5405323Sgblack@eecs.umich.edu */ 5415323Sgblack@eecs.umich.edu virtual void doWritebacks(PacketList& writebacks, Tick forward_time) = 0; 5425357Sgblack@eecs.umich.edu 5435357Sgblack@eecs.umich.edu /** 5445357Sgblack@eecs.umich.edu * Send writebacks down the memory hierarchy in atomic mode 5455357Sgblack@eecs.umich.edu */ 5465357Sgblack@eecs.umich.edu virtual void doWritebacksAtomic(PacketList& writebacks) = 0; 5475357Sgblack@eecs.umich.edu 5485357Sgblack@eecs.umich.edu /** 5495357Sgblack@eecs.umich.edu * Create an appropriate downstream bus request packet. 5505837Sgblack@eecs.umich.edu * 5515837Sgblack@eecs.umich.edu * Creates a new packet with the request to be send to the memory 5525357Sgblack@eecs.umich.edu * below, or nullptr if the current request in cpu_pkt should just 5535357Sgblack@eecs.umich.edu * be forwarded on. 5545357Sgblack@eecs.umich.edu * 5555357Sgblack@eecs.umich.edu * @param cpu_pkt The miss packet that needs to be satisfied. 5565323Sgblack@eecs.umich.edu * @param blk The referenced block, can be nullptr. 5575149Sgblack@eecs.umich.edu * @param needs_writable Indicates that the block must be writable 5585149Sgblack@eecs.umich.edu * even if the request in cpu_pkt doesn't indicate that. 5595149Sgblack@eecs.umich.edu * @return A packet send to the memory below 5605149Sgblack@eecs.umich.edu */ 5615149Sgblack@eecs.umich.edu virtual PacketPtr createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk, 5625124Sgblack@eecs.umich.edu bool needs_writable) const = 0; 5635140Sgblack@eecs.umich.edu 5645140Sgblack@eecs.umich.edu /** 5655140Sgblack@eecs.umich.edu * Determine if clean lines should be written back or not. In 5665140Sgblack@eecs.umich.edu * cases where a downstream cache is mostly inclusive we likely 5675140Sgblack@eecs.umich.edu * want it to act as a victim cache also for lines that have not 5685140Sgblack@eecs.umich.edu * been modified. Hence, we cannot simply drop the line (or send a 5695237Sgblack@eecs.umich.edu * clean evict), but rather need to send the actual data. 5705140Sgblack@eecs.umich.edu */ 5715140Sgblack@eecs.umich.edu const bool writebackClean; 5725140Sgblack@eecs.umich.edu 5735140Sgblack@eecs.umich.edu /** 5745237Sgblack@eecs.umich.edu * Writebacks from the tempBlock, resulting on the response path 5755431Sgblack@eecs.umich.edu * in atomic mode, must happen after the call to recvAtomic has 5765431Sgblack@eecs.umich.edu * finished (for the right ordering of the packets). We therefore 5775431Sgblack@eecs.umich.edu * need to hold on to the packets, and have a method and an event 5785433Sgblack@eecs.umich.edu * to send them. 5795433Sgblack@eecs.umich.edu */ 5805433Sgblack@eecs.umich.edu PacketPtr tempBlockWriteback; 5815433Sgblack@eecs.umich.edu 5825433Sgblack@eecs.umich.edu /** 5835433Sgblack@eecs.umich.edu * Send the outstanding tempBlock writeback. To be called after 5845433Sgblack@eecs.umich.edu * recvAtomic finishes in cases where the block we filled is in 5855433Sgblack@eecs.umich.edu * fact the tempBlock, and now needs to be written back. 5865433Sgblack@eecs.umich.edu */ 5875140Sgblack@eecs.umich.edu void writebackTempBlockAtomic() { 5885140Sgblack@eecs.umich.edu assert(tempBlockWriteback != nullptr); 5895433Sgblack@eecs.umich.edu PacketList writebacks{tempBlockWriteback}; 5905237Sgblack@eecs.umich.edu doWritebacksAtomic(writebacks); 5915140Sgblack@eecs.umich.edu tempBlockWriteback = nullptr; 5925140Sgblack@eecs.umich.edu } 5935140Sgblack@eecs.umich.edu 5945140Sgblack@eecs.umich.edu /** 5955140Sgblack@eecs.umich.edu * An event to writeback the tempBlock after recvAtomic 5965140Sgblack@eecs.umich.edu * finishes. To avoid other calls to recvAtomic getting in 5975140Sgblack@eecs.umich.edu * between, we create this event with a higher priority. 5985140Sgblack@eecs.umich.edu */ 5995140Sgblack@eecs.umich.edu EventFunctionWrapper writebackTempBlockAtomicEvent; 6005140Sgblack@eecs.umich.edu 6015140Sgblack@eecs.umich.edu /** 6025140Sgblack@eecs.umich.edu * Perform any necessary updates to the block and perform any data 6035140Sgblack@eecs.umich.edu * exchange between the packet and the block. The flags of the 6045140Sgblack@eecs.umich.edu * packet are also set accordingly. 6055140Sgblack@eecs.umich.edu * 6065140Sgblack@eecs.umich.edu * @param pkt Request packet from upstream that hit a block 6075140Sgblack@eecs.umich.edu * @param blk Cache block that the packet hit 6085140Sgblack@eecs.umich.edu * @param deferred_response Whether this request originally missed 6095140Sgblack@eecs.umich.edu * @param pending_downgrade Whether the writable flag is to be removed 6105140Sgblack@eecs.umich.edu */ 6115140Sgblack@eecs.umich.edu virtual void satisfyRequest(PacketPtr pkt, CacheBlk *blk, 6125140Sgblack@eecs.umich.edu bool deferred_response = false, 6135140Sgblack@eecs.umich.edu bool pending_downgrade = false); 6145140Sgblack@eecs.umich.edu 6155237Sgblack@eecs.umich.edu /** 6165140Sgblack@eecs.umich.edu * Maintain the clusivity of this cache by potentially 6175140Sgblack@eecs.umich.edu * invalidating a block. This method works in conjunction with 6185140Sgblack@eecs.umich.edu * satisfyRequest, but is separate to allow us to handle all MSHR 6195140Sgblack@eecs.umich.edu * targets before potentially dropping a block. 6205140Sgblack@eecs.umich.edu * 6215140Sgblack@eecs.umich.edu * @param from_cache Whether we have dealt with a packet from a cache 6225237Sgblack@eecs.umich.edu * @param blk The block that should potentially be dropped 6235237Sgblack@eecs.umich.edu */ 6245237Sgblack@eecs.umich.edu void maintainClusivity(bool from_cache, CacheBlk *blk); 6255140Sgblack@eecs.umich.edu 6265140Sgblack@eecs.umich.edu /** 6275140Sgblack@eecs.umich.edu * Handle a fill operation caused by a received packet. 6285140Sgblack@eecs.umich.edu * 6295237Sgblack@eecs.umich.edu * Populates a cache block and handles all outstanding requests for the 6305237Sgblack@eecs.umich.edu * satisfied fill request. This version takes two memory requests. One 6315140Sgblack@eecs.umich.edu * contains the fill data, the other is an optional target to satisfy. 6325140Sgblack@eecs.umich.edu * Note that the reason we return a list of writebacks rather than 6335124Sgblack@eecs.umich.edu * inserting them directly in the write buffer is that this function 6345140Sgblack@eecs.umich.edu * is called by both atomic and timing-mode accesses, and in atomic 6355237Sgblack@eecs.umich.edu * mode we don't mess with the write buffer (we just perform the 6365237Sgblack@eecs.umich.edu * writebacks atomically once the original request is complete). 6375140Sgblack@eecs.umich.edu * 6385124Sgblack@eecs.umich.edu * @param pkt The memory request with the fill data. 6395360Sgblack@eecs.umich.edu * @param blk The cache block if it already exists. 6405374Sgblack@eecs.umich.edu * @param writebacks List for any writebacks that need to be performed. 6415360Sgblack@eecs.umich.edu * @param allocate Whether to allocate a block or use the temp block 6425648Sgblack@eecs.umich.edu * @return Pointer to the new cache block. 6435360Sgblack@eecs.umich.edu */ 6445648Sgblack@eecs.umich.edu CacheBlk *handleFill(PacketPtr pkt, CacheBlk *blk, 6455417Sgblack@eecs.umich.edu PacketList &writebacks, bool allocate); 6465417Sgblack@eecs.umich.edu 6475417Sgblack@eecs.umich.edu /** 6485417Sgblack@eecs.umich.edu * Allocate a new block and perform any necessary writebacks 6495360Sgblack@eecs.umich.edu * 6505360Sgblack@eecs.umich.edu * Find a victim block and if necessary prepare writebacks for any 6515360Sgblack@eecs.umich.edu * existing data. May return nullptr if there are no replaceable 6525360Sgblack@eecs.umich.edu * blocks. 6535360Sgblack@eecs.umich.edu * 6545360Sgblack@eecs.umich.edu * @param addr Physical address of the new block 6555417Sgblack@eecs.umich.edu * @param is_secure Set if the block should be secure 6565648Sgblack@eecs.umich.edu * @param writebacks A list of writeback packets for the evicted blocks 6575736Snate@binkert.org * @return the allocated block 6585714Shsul@eecs.umich.edu */ 6595360Sgblack@eecs.umich.edu CacheBlk *allocateBlock(Addr addr, bool is_secure, PacketList &writebacks); 6605374Sgblack@eecs.umich.edu /** 6615086Sgblack@eecs.umich.edu * Evict a cache block. 6625086Sgblack@eecs.umich.edu * 6635086Sgblack@eecs.umich.edu * Performs a writeback if necesssary and invalidates the block 6645140Sgblack@eecs.umich.edu * 6655140Sgblack@eecs.umich.edu * @param blk Block to invalidate 6665140Sgblack@eecs.umich.edu * @return A packet with the writeback, can be nullptr 6675140Sgblack@eecs.umich.edu */ 6685140Sgblack@eecs.umich.edu M5_NODISCARD virtual PacketPtr evictBlock(CacheBlk *blk) = 0; 6695140Sgblack@eecs.umich.edu 6705140Sgblack@eecs.umich.edu /** 6715140Sgblack@eecs.umich.edu * Evict a cache block. 6725140Sgblack@eecs.umich.edu * 6735140Sgblack@eecs.umich.edu * Performs a writeback if necesssary and invalidates the block 6745140Sgblack@eecs.umich.edu * 6755140Sgblack@eecs.umich.edu * @param blk Block to invalidate 6765086Sgblack@eecs.umich.edu * @param writebacks Return a list of packets with writebacks 6775086Sgblack@eecs.umich.edu */ 6785086Sgblack@eecs.umich.edu virtual void evictBlock(CacheBlk *blk, PacketList &writebacks) = 0; 6795086Sgblack@eecs.umich.edu 6805086Sgblack@eecs.umich.edu /** 6815100Ssaidi@eecs.umich.edu * Invalidate a cache block. 6825086Sgblack@eecs.umich.edu * 6835086Sgblack@eecs.umich.edu * @param blk Block to invalidate 6845086Sgblack@eecs.umich.edu */ 6855086Sgblack@eecs.umich.edu void invalidateBlock(CacheBlk *blk); 6865086Sgblack@eecs.umich.edu 6875100Ssaidi@eecs.umich.edu /** 6885086Sgblack@eecs.umich.edu * Create a writeback request for the given block. 6895086Sgblack@eecs.umich.edu * 6905086Sgblack@eecs.umich.edu * @param blk The block to writeback. 6915086Sgblack@eecs.umich.edu * @return The writeback request for the block. 6925086Sgblack@eecs.umich.edu */ 6935086Sgblack@eecs.umich.edu PacketPtr writebackBlk(CacheBlk *blk); 6945086Sgblack@eecs.umich.edu 6955086Sgblack@eecs.umich.edu /** 6965086Sgblack@eecs.umich.edu * Create a writeclean request for the given block. 6975086Sgblack@eecs.umich.edu * 6985086Sgblack@eecs.umich.edu * Creates a request that writes the block to the cache below 6995086Sgblack@eecs.umich.edu * without evicting the block from the current cache. 7005086Sgblack@eecs.umich.edu * 7015086Sgblack@eecs.umich.edu * @param blk The block to write clean. 7025086Sgblack@eecs.umich.edu * @param dest The destination of the write clean operation. 7035086Sgblack@eecs.umich.edu * @param id Use the given packet id for the write clean operation. 7045086Sgblack@eecs.umich.edu * @return The generated write clean packet. 7055086Sgblack@eecs.umich.edu */ 7065086Sgblack@eecs.umich.edu PacketPtr writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id); 7075086Sgblack@eecs.umich.edu 7085086Sgblack@eecs.umich.edu /** 7095086Sgblack@eecs.umich.edu * Write back dirty blocks in the cache using functional accesses. 7105086Sgblack@eecs.umich.edu */ 7115086Sgblack@eecs.umich.edu virtual void memWriteback() override; 7125086Sgblack@eecs.umich.edu 7135086Sgblack@eecs.umich.edu /** 7145086Sgblack@eecs.umich.edu * Invalidates all blocks in the cache. 7155086Sgblack@eecs.umich.edu * 7164997Sgblack@eecs.umich.edu * @warn Dirty cache lines will not be written back to 7174997Sgblack@eecs.umich.edu * memory. Make sure to call functionalWriteback() first if you 7184997Sgblack@eecs.umich.edu * want the to write them to memory. 7195038Sgblack@eecs.umich.edu */ 7204997Sgblack@eecs.umich.edu virtual void memInvalidate() override; 7214997Sgblack@eecs.umich.edu 7224997Sgblack@eecs.umich.edu /** 7234997Sgblack@eecs.umich.edu * Determine if there are any dirty blocks in the cache. 7244997Sgblack@eecs.umich.edu * 7255038Sgblack@eecs.umich.edu * @return true if at least one block is dirty, false otherwise. 7264997Sgblack@eecs.umich.edu */ 727 bool isDirty() const; 728 729 /** 730 * Determine if an address is in the ranges covered by this 731 * cache. This is useful to filter snoops. 732 * 733 * @param addr Address to check against 734 * 735 * @return If the address in question is in range 736 */ 737 bool inRange(Addr addr) const; 738 739 /** 740 * Find next request ready time from among possible sources. 741 */ 742 Tick nextQueueReadyTime() const; 743 744 /** Block size of this cache */ 745 const unsigned blkSize; 746 747 /** 748 * The latency of tag lookup of a cache. It occurs when there is 749 * an access to the cache. 750 */ 751 const Cycles lookupLatency; 752 753 /** 754 * The latency of data access of a cache. It occurs when there is 755 * an access to the cache. 756 */ 757 const Cycles dataLatency; 758 759 /** 760 * This is the forward latency of the cache. It occurs when there 761 * is a cache miss and a request is forwarded downstream, in 762 * particular an outbound miss. 763 */ 764 const Cycles forwardLatency; 765 766 /** The latency to fill a cache block */ 767 const Cycles fillLatency; 768 769 /** 770 * The latency of sending reponse to its upper level cache/core on 771 * a linefill. The responseLatency parameter captures this 772 * latency. 773 */ 774 const Cycles responseLatency; 775 776 /** The number of targets for each MSHR. */ 777 const int numTarget; 778 779 /** Do we forward snoops from mem side port through to cpu side port? */ 780 bool forwardSnoops; 781 782 /** 783 * Clusivity with respect to the upstream cache, determining if we 784 * fill into both this cache and the cache above on a miss. Note 785 * that we currently do not support strict clusivity policies. 786 */ 787 const Enums::Clusivity clusivity; 788 789 /** 790 * Is this cache read only, for example the instruction cache, or 791 * table-walker cache. A cache that is read only should never see 792 * any writes, and should never get any dirty data (and hence 793 * never have to do any writebacks). 794 */ 795 const bool isReadOnly; 796 797 /** 798 * Bit vector of the blocking reasons for the access path. 799 * @sa #BlockedCause 800 */ 801 uint8_t blocked; 802 803 /** Increasing order number assigned to each incoming request. */ 804 uint64_t order; 805 806 /** Stores time the cache blocked for statistics. */ 807 Cycles blockedCycle; 808 809 /** Pointer to the MSHR that has no targets. */ 810 MSHR *noTargetMSHR; 811 812 /** The number of misses to trigger an exit event. */ 813 Counter missCount; 814 815 /** 816 * The address range to which the cache responds on the CPU side. 817 * Normally this is all possible memory addresses. */ 818 const AddrRangeList addrRanges; 819 820 public: 821 /** System we are currently operating in. */ 822 System *system; 823 824 // Statistics 825 /** 826 * @addtogroup CacheStatistics 827 * @{ 828 */ 829 830 /** Number of hits per thread for each type of command. 831 @sa Packet::Command */ 832 Stats::Vector hits[MemCmd::NUM_MEM_CMDS]; 833 /** Number of hits for demand accesses. */ 834 Stats::Formula demandHits; 835 /** Number of hit for all accesses. */ 836 Stats::Formula overallHits; 837 838 /** Number of misses per thread for each type of command. 839 @sa Packet::Command */ 840 Stats::Vector misses[MemCmd::NUM_MEM_CMDS]; 841 /** Number of misses for demand accesses. */ 842 Stats::Formula demandMisses; 843 /** Number of misses for all accesses. */ 844 Stats::Formula overallMisses; 845 846 /** 847 * Total number of cycles per thread/command spent waiting for a miss. 848 * Used to calculate the average miss latency. 849 */ 850 Stats::Vector missLatency[MemCmd::NUM_MEM_CMDS]; 851 /** Total number of cycles spent waiting for demand misses. */ 852 Stats::Formula demandMissLatency; 853 /** Total number of cycles spent waiting for all misses. */ 854 Stats::Formula overallMissLatency; 855 856 /** The number of accesses per command and thread. */ 857 Stats::Formula accesses[MemCmd::NUM_MEM_CMDS]; 858 /** The number of demand accesses. */ 859 Stats::Formula demandAccesses; 860 /** The number of overall accesses. */ 861 Stats::Formula overallAccesses; 862 863 /** The miss rate per command and thread. */ 864 Stats::Formula missRate[MemCmd::NUM_MEM_CMDS]; 865 /** The miss rate of all demand accesses. */ 866 Stats::Formula demandMissRate; 867 /** The miss rate for all accesses. */ 868 Stats::Formula overallMissRate; 869 870 /** The average miss latency per command and thread. */ 871 Stats::Formula avgMissLatency[MemCmd::NUM_MEM_CMDS]; 872 /** The average miss latency for demand misses. */ 873 Stats::Formula demandAvgMissLatency; 874 /** The average miss latency for all misses. */ 875 Stats::Formula overallAvgMissLatency; 876 877 /** The total number of cycles blocked for each blocked cause. */ 878 Stats::Vector blocked_cycles; 879 /** The number of times this cache blocked for each blocked cause. */ 880 Stats::Vector blocked_causes; 881 882 /** The average number of cycles blocked for each blocked cause. */ 883 Stats::Formula avg_blocked; 884 885 /** The number of times a HW-prefetched block is evicted w/o reference. */ 886 Stats::Scalar unusedPrefetches; 887 888 /** Number of blocks written back per thread. */ 889 Stats::Vector writebacks; 890 891 /** Number of misses that hit in the MSHRs per command and thread. */ 892 Stats::Vector mshr_hits[MemCmd::NUM_MEM_CMDS]; 893 /** Demand misses that hit in the MSHRs. */ 894 Stats::Formula demandMshrHits; 895 /** Total number of misses that hit in the MSHRs. */ 896 Stats::Formula overallMshrHits; 897 898 /** Number of misses that miss in the MSHRs, per command and thread. */ 899 Stats::Vector mshr_misses[MemCmd::NUM_MEM_CMDS]; 900 /** Demand misses that miss in the MSHRs. */ 901 Stats::Formula demandMshrMisses; 902 /** Total number of misses that miss in the MSHRs. */ 903 Stats::Formula overallMshrMisses; 904 905 /** Number of misses that miss in the MSHRs, per command and thread. */ 906 Stats::Vector mshr_uncacheable[MemCmd::NUM_MEM_CMDS]; 907 /** Total number of misses that miss in the MSHRs. */ 908 Stats::Formula overallMshrUncacheable; 909 910 /** Total cycle latency of each MSHR miss, per command and thread. */ 911 Stats::Vector mshr_miss_latency[MemCmd::NUM_MEM_CMDS]; 912 /** Total cycle latency of demand MSHR misses. */ 913 Stats::Formula demandMshrMissLatency; 914 /** Total cycle latency of overall MSHR misses. */ 915 Stats::Formula overallMshrMissLatency; 916 917 /** Total cycle latency of each MSHR miss, per command and thread. */ 918 Stats::Vector mshr_uncacheable_lat[MemCmd::NUM_MEM_CMDS]; 919 /** Total cycle latency of overall MSHR misses. */ 920 Stats::Formula overallMshrUncacheableLatency; 921 922#if 0 923 /** The total number of MSHR accesses per command and thread. */ 924 Stats::Formula mshrAccesses[MemCmd::NUM_MEM_CMDS]; 925 /** The total number of demand MSHR accesses. */ 926 Stats::Formula demandMshrAccesses; 927 /** The total number of MSHR accesses. */ 928 Stats::Formula overallMshrAccesses; 929#endif 930 931 /** The miss rate in the MSHRs pre command and thread. */ 932 Stats::Formula mshrMissRate[MemCmd::NUM_MEM_CMDS]; 933 /** The demand miss rate in the MSHRs. */ 934 Stats::Formula demandMshrMissRate; 935 /** The overall miss rate in the MSHRs. */ 936 Stats::Formula overallMshrMissRate; 937 938 /** The average latency of an MSHR miss, per command and thread. */ 939 Stats::Formula avgMshrMissLatency[MemCmd::NUM_MEM_CMDS]; 940 /** The average latency of a demand MSHR miss. */ 941 Stats::Formula demandAvgMshrMissLatency; 942 /** The average overall latency of an MSHR miss. */ 943 Stats::Formula overallAvgMshrMissLatency; 944 945 /** The average latency of an MSHR miss, per command and thread. */ 946 Stats::Formula avgMshrUncacheableLatency[MemCmd::NUM_MEM_CMDS]; 947 /** The average overall latency of an MSHR miss. */ 948 Stats::Formula overallAvgMshrUncacheableLatency; 949 950 /** Number of replacements of valid blocks. */ 951 Stats::Scalar replacements; 952 953 /** 954 * @} 955 */ 956 957 /** 958 * Register stats for this object. 959 */ 960 void regStats() override; 961 962 public: 963 BaseCache(const BaseCacheParams *p, unsigned blk_size); 964 ~BaseCache(); 965 966 void init() override; 967 968 BaseMasterPort &getMasterPort(const std::string &if_name, 969 PortID idx = InvalidPortID) override; 970 BaseSlavePort &getSlavePort(const std::string &if_name, 971 PortID idx = InvalidPortID) override; 972 973 /** 974 * Query block size of a cache. 975 * @return The block size 976 */ 977 unsigned 978 getBlockSize() const 979 { 980 return blkSize; 981 } 982 983 const AddrRangeList &getAddrRanges() const { return addrRanges; } 984 985 MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool sched_send = true) 986 { 987 MSHR *mshr = mshrQueue.allocate(pkt->getBlockAddr(blkSize), blkSize, 988 pkt, time, order++, 989 allocOnFill(pkt->cmd)); 990 991 if (mshrQueue.isFull()) { 992 setBlocked((BlockedCause)MSHRQueue_MSHRs); 993 } 994 995 if (sched_send) { 996 // schedule the send 997 schedMemSideSendEvent(time); 998 } 999 1000 return mshr; 1001 } 1002 1003 void allocateWriteBuffer(PacketPtr pkt, Tick time) 1004 { 1005 // should only see writes or clean evicts here 1006 assert(pkt->isWrite() || pkt->cmd == MemCmd::CleanEvict); 1007 1008 Addr blk_addr = pkt->getBlockAddr(blkSize); 1009 1010 WriteQueueEntry *wq_entry = 1011 writeBuffer.findMatch(blk_addr, pkt->isSecure()); 1012 if (wq_entry && !wq_entry->inService) { 1013 DPRINTF(Cache, "Potential to merge writeback %s", pkt->print()); 1014 } 1015 1016 writeBuffer.allocate(blk_addr, blkSize, pkt, time, order++); 1017 1018 if (writeBuffer.isFull()) { 1019 setBlocked((BlockedCause)MSHRQueue_WriteBuffer); 1020 } 1021 1022 // schedule the send 1023 schedMemSideSendEvent(time); 1024 } 1025 1026 /** 1027 * Returns true if the cache is blocked for accesses. 1028 */ 1029 bool isBlocked() const 1030 { 1031 return blocked != 0; 1032 } 1033 1034 /** 1035 * Marks the access path of the cache as blocked for the given cause. This 1036 * also sets the blocked flag in the slave interface. 1037 * @param cause The reason for the cache blocking. 1038 */ 1039 void setBlocked(BlockedCause cause) 1040 { 1041 uint8_t flag = 1 << cause; 1042 if (blocked == 0) { 1043 blocked_causes[cause]++; 1044 blockedCycle = curCycle(); 1045 cpuSidePort.setBlocked(); 1046 } 1047 blocked |= flag; 1048 DPRINTF(Cache,"Blocking for cause %d, mask=%d\n", cause, blocked); 1049 } 1050 1051 /** 1052 * Marks the cache as unblocked for the given cause. This also clears the 1053 * blocked flags in the appropriate interfaces. 1054 * @param cause The newly unblocked cause. 1055 * @warning Calling this function can cause a blocked request on the bus to 1056 * access the cache. The cache must be in a state to handle that request. 1057 */ 1058 void clearBlocked(BlockedCause cause) 1059 { 1060 uint8_t flag = 1 << cause; 1061 blocked &= ~flag; 1062 DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked); 1063 if (blocked == 0) { 1064 blocked_cycles[cause] += curCycle() - blockedCycle; 1065 cpuSidePort.clearBlocked(); 1066 } 1067 } 1068 1069 /** 1070 * Schedule a send event for the memory-side port. If already 1071 * scheduled, this may reschedule the event at an earlier 1072 * time. When the specified time is reached, the port is free to 1073 * send either a response, a request, or a prefetch request. 1074 * 1075 * @param time The time when to attempt sending a packet. 1076 */ 1077 void schedMemSideSendEvent(Tick time) 1078 { 1079 memSidePort.schedSendEvent(time); 1080 } 1081 1082 bool inCache(Addr addr, bool is_secure) const { 1083 return tags->findBlock(addr, is_secure); 1084 } 1085 1086 bool inMissQueue(Addr addr, bool is_secure) const { 1087 return mshrQueue.findMatch(addr, is_secure); 1088 } 1089 1090 void incMissCount(PacketPtr pkt) 1091 { 1092 assert(pkt->req->masterId() < system->maxMasters()); 1093 misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 1094 pkt->req->incAccessDepth(); 1095 if (missCount) { 1096 --missCount; 1097 if (missCount == 0) 1098 exitSimLoop("A cache reached the maximum miss count"); 1099 } 1100 } 1101 void incHitCount(PacketPtr pkt) 1102 { 1103 assert(pkt->req->masterId() < system->maxMasters()); 1104 hits[pkt->cmdToIndex()][pkt->req->masterId()]++; 1105 1106 } 1107 1108 /** 1109 * Cache block visitor that writes back dirty cache blocks using 1110 * functional writes. 1111 * 1112 * @return Always returns true. 1113 */ 1114 bool writebackVisitor(CacheBlk &blk); 1115 1116 /** 1117 * Cache block visitor that invalidates all blocks in the cache. 1118 * 1119 * @warn Dirty cache lines will not be written back to memory. 1120 * 1121 * @return Always returns true. 1122 */ 1123 bool invalidateVisitor(CacheBlk &blk); 1124 1125 /** 1126 * Take an MSHR, turn it into a suitable downstream packet, and 1127 * send it out. This construct allows a queue entry to choose a suitable 1128 * approach based on its type. 1129 * 1130 * @param mshr The MSHR to turn into a packet and send 1131 * @return True if the port is waiting for a retry 1132 */ 1133 virtual bool sendMSHRQueuePacket(MSHR* mshr); 1134 1135 /** 1136 * Similar to sendMSHR, but for a write-queue entry 1137 * instead. Create the packet, and send it, and if successful also 1138 * mark the entry in service. 1139 * 1140 * @param wq_entry The write-queue entry to turn into a packet and send 1141 * @return True if the port is waiting for a retry 1142 */ 1143 bool sendWriteQueuePacket(WriteQueueEntry* wq_entry); 1144 1145 /** 1146 * Serialize the state of the caches 1147 * 1148 * We currently don't support checkpointing cache state, so this panics. 1149 */ 1150 void serialize(CheckpointOut &cp) const override; 1151 void unserialize(CheckpointIn &cp) override; 1152 1153}; 1154 1155/** 1156 * Wrap a method and present it as a cache block visitor. 1157 * 1158 * For example the forEachBlk method in the tag arrays expects a 1159 * callable object/function as their parameter. This class wraps a 1160 * method in an object and presents callable object that adheres to 1161 * the cache block visitor protocol. 1162 */ 1163class CacheBlkVisitorWrapper : public CacheBlkVisitor 1164{ 1165 public: 1166 typedef bool (BaseCache::*VisitorPtr)(CacheBlk &blk); 1167 1168 CacheBlkVisitorWrapper(BaseCache &_cache, VisitorPtr _visitor) 1169 : cache(_cache), visitor(_visitor) {} 1170 1171 bool operator()(CacheBlk &blk) override { 1172 return (cache.*visitor)(blk); 1173 } 1174 1175 private: 1176 BaseCache &cache; 1177 VisitorPtr visitor; 1178}; 1179 1180/** 1181 * Cache block visitor that determines if there are dirty blocks in a 1182 * cache. 1183 * 1184 * Use with the forEachBlk method in the tag array to determine if the 1185 * array contains dirty blocks. 1186 */ 1187class CacheBlkIsDirtyVisitor : public CacheBlkVisitor 1188{ 1189 public: 1190 CacheBlkIsDirtyVisitor() 1191 : _isDirty(false) {} 1192 1193 bool operator()(CacheBlk &blk) override { 1194 if (blk.isDirty()) { 1195 _isDirty = true; 1196 return false; 1197 } else { 1198 return true; 1199 } 1200 } 1201 1202 /** 1203 * Does the array contain a dirty line? 1204 * 1205 * @return true if yes, false otherwise. 1206 */ 1207 bool isDirty() const { return _isDirty; }; 1208 1209 private: 1210 bool _isDirty; 1211}; 1212 1213#endif //__MEM_CACHE_BASE_HH__ 1214