base.hh revision 8662
12810SN/A/* 22810SN/A * Copyright (c) 2003-2005 The Regents of The University of Michigan 32810SN/A * All rights reserved. 42810SN/A * 52810SN/A * Redistribution and use in source and binary forms, with or without 62810SN/A * modification, are permitted provided that the following conditions are 72810SN/A * met: redistributions of source code must retain the above copyright 82810SN/A * notice, this list of conditions and the following disclaimer; 92810SN/A * redistributions in binary form must reproduce the above copyright 102810SN/A * notice, this list of conditions and the following disclaimer in the 112810SN/A * documentation and/or other materials provided with the distribution; 122810SN/A * neither the name of the copyright holders nor the names of its 132810SN/A * contributors may be used to endorse or promote products derived from 142810SN/A * this software without specific prior written permission. 152810SN/A * 162810SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 172810SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 182810SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 192810SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 202810SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 212810SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 222810SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 232810SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 242810SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 252810SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 262810SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 272810SN/A * 282810SN/A * Authors: Erik Hallnor 294458SN/A * Steve Reinhardt 304458SN/A * Ron Dreslinski 312810SN/A */ 322810SN/A 332810SN/A/** 342810SN/A * @file 352810SN/A * Declares a basic cache interface BaseCache. 362810SN/A */ 372810SN/A 382810SN/A#ifndef __BASE_CACHE_HH__ 392810SN/A#define __BASE_CACHE_HH__ 402810SN/A 417676Snate@binkert.org#include <algorithm> 427676Snate@binkert.org#include <list> 437676Snate@binkert.org#include <string> 442810SN/A#include <vector> 452810SN/A 462825SN/A#include "base/misc.hh" 472810SN/A#include "base/statistics.hh" 482810SN/A#include "base/trace.hh" 496215Snate@binkert.org#include "base/types.hh" 506978SLisa.Hsu@amd.com#include "config/full_system.hh" 518232Snate@binkert.org#include "debug/Cache.hh" 528232Snate@binkert.org#include "debug/CachePort.hh" 535338Sstever@gmail.com#include "mem/cache/mshr_queue.hh" 542810SN/A#include "mem/mem_object.hh" 552810SN/A#include "mem/packet.hh" 568229Snate@binkert.org#include "mem/request.hh" 574626SN/A#include "mem/tport.hh" 585034SN/A#include "params/BaseCache.hh" 592811SN/A#include "sim/eventq.hh" 604626SN/A#include "sim/sim_exit.hh" 612810SN/A 623194SN/Aclass MSHR; 632810SN/A/** 642810SN/A * A basic cache interface. Implements some common functions for speed. 652810SN/A */ 662810SN/Aclass BaseCache : public MemObject 672810SN/A{ 684628SN/A /** 694628SN/A * Indexes to enumerate the MSHR queues. 704628SN/A */ 714628SN/A enum MSHRQueueIndex { 724628SN/A MSHRQueue_MSHRs, 734628SN/A MSHRQueue_WriteBuffer 744628SN/A }; 754628SN/A 764628SN/A /** 774628SN/A * Reasons for caches to be blocked. 784628SN/A */ 794628SN/A enum BlockedCause { 804628SN/A Blocked_NoMSHRs = MSHRQueue_MSHRs, 814628SN/A Blocked_NoWBBuffers = MSHRQueue_WriteBuffer, 824628SN/A Blocked_NoTargets, 834628SN/A NUM_BLOCKED_CAUSES 844628SN/A }; 854628SN/A 864628SN/A public: 874628SN/A /** 884628SN/A * Reasons for cache to request a bus. 894628SN/A */ 904628SN/A enum RequestCause { 914628SN/A Request_MSHR = MSHRQueue_MSHRs, 924628SN/A Request_WB = MSHRQueue_WriteBuffer, 934628SN/A Request_PF, 944628SN/A NUM_REQUEST_CAUSES 954628SN/A }; 964628SN/A 974628SN/A private: 984628SN/A 994626SN/A class CachePort : public SimpleTimingPort 1002810SN/A { 1012844SN/A public: 1022810SN/A BaseCache *cache; 1032810SN/A 1043738SN/A protected: 1054965SN/A CachePort(const std::string &_name, BaseCache *_cache, 1066122SSteve.Reinhardt@amd.com const std::string &_label); 1074458SN/A 1082810SN/A virtual void recvStatusChange(Status status); 1092810SN/A 1106227Snate@binkert.org virtual unsigned deviceBlockSize() const; 1112810SN/A 1124458SN/A bool recvRetryCommon(); 1133013SN/A 1144666SN/A typedef EventWrapper<Port, &Port::sendRetry> 1154666SN/A SendRetryEvent; 1164666SN/A 1175314SN/A const std::string label; 1185314SN/A 1192811SN/A public: 1204458SN/A void setOtherPort(CachePort *_otherPort) { otherPort = _otherPort; } 1214458SN/A 1222810SN/A void setBlocked(); 1232810SN/A 1242810SN/A void clearBlocked(); 1252810SN/A 1265314SN/A bool checkFunctional(PacketPtr pkt); 1273606SN/A 1284458SN/A CachePort *otherPort; 1294458SN/A 1302810SN/A bool blocked; 1312810SN/A 1322897SN/A bool mustSendRetry; 1332897SN/A 1344458SN/A void requestBus(RequestCause cause, Tick time) 1354458SN/A { 1364888SN/A DPRINTF(CachePort, "Asserting bus request for cause %d\n", cause); 1374666SN/A if (!waitingOnRetry) { 1384666SN/A schedSendEvent(time); 1394458SN/A } 1404458SN/A } 1414458SN/A 1424626SN/A void respond(PacketPtr pkt, Tick time) { 1434626SN/A schedSendTiming(pkt, time); 1444626SN/A } 1452811SN/A }; 1462810SN/A 1473338SN/A public: //Made public so coherence can get at it. 1483338SN/A CachePort *cpuSidePort; 1493738SN/A CachePort *memSidePort; 1503338SN/A 1514626SN/A protected: 1524626SN/A 1534626SN/A /** Miss status registers */ 1544626SN/A MSHRQueue mshrQueue; 1554626SN/A 1564626SN/A /** Write/writeback buffer */ 1574626SN/A MSHRQueue writeBuffer; 1584626SN/A 1594628SN/A MSHR *allocateBufferInternal(MSHRQueue *mq, Addr addr, int size, 1604628SN/A PacketPtr pkt, Tick time, bool requestBus) 1614628SN/A { 1624666SN/A MSHR *mshr = mq->allocate(addr, size, pkt, time, order++); 1634628SN/A 1644628SN/A if (mq->isFull()) { 1654628SN/A setBlocked((BlockedCause)mq->index); 1664628SN/A } 1674628SN/A 1684628SN/A if (requestBus) { 1694628SN/A requestMemSideBus((RequestCause)mq->index, time); 1704628SN/A } 1714628SN/A 1724628SN/A return mshr; 1734628SN/A } 1744628SN/A 1757667Ssteve.reinhardt@amd.com void markInServiceInternal(MSHR *mshr, PacketPtr pkt) 1764628SN/A { 1774628SN/A MSHRQueue *mq = mshr->queue; 1784628SN/A bool wasFull = mq->isFull(); 1797667Ssteve.reinhardt@amd.com mq->markInService(mshr, pkt); 1804628SN/A if (wasFull && !mq->isFull()) { 1814628SN/A clearBlocked((BlockedCause)mq->index); 1824628SN/A } 1834628SN/A } 1844628SN/A 1854626SN/A /** Block size of this cache */ 1866227Snate@binkert.org const unsigned blkSize; 1874626SN/A 1884630SN/A /** 1894630SN/A * The latency of a hit in this device. 1904630SN/A */ 1914630SN/A int hitLatency; 1924630SN/A 1934626SN/A /** The number of targets for each MSHR. */ 1944626SN/A const int numTarget; 1954626SN/A 1966122SSteve.Reinhardt@amd.com /** Do we forward snoops from mem side port through to cpu side port? */ 1976122SSteve.Reinhardt@amd.com bool forwardSnoops; 1984626SN/A 1998134SAli.Saidi@ARM.com /** Is this cache a toplevel cache (e.g. L1, I/O cache). If so we should 2008134SAli.Saidi@ARM.com * never try to forward ownership and similar optimizations to the cpu 2018134SAli.Saidi@ARM.com * side */ 2028134SAli.Saidi@ARM.com bool isTopLevel; 2038134SAli.Saidi@ARM.com 2042810SN/A /** 2052810SN/A * Bit vector of the blocking reasons for the access path. 2062810SN/A * @sa #BlockedCause 2072810SN/A */ 2082810SN/A uint8_t blocked; 2092810SN/A 2106122SSteve.Reinhardt@amd.com /** Increasing order number assigned to each incoming request. */ 2116122SSteve.Reinhardt@amd.com uint64_t order; 2126122SSteve.Reinhardt@amd.com 2132810SN/A /** Stores time the cache blocked for statistics. */ 2142810SN/A Tick blockedCycle; 2152810SN/A 2164626SN/A /** Pointer to the MSHR that has no targets. */ 2174626SN/A MSHR *noTargetMSHR; 2182810SN/A 2192810SN/A /** The number of misses to trigger an exit event. */ 2202810SN/A Counter missCount; 2212810SN/A 2223503SN/A /** The drain event. */ 2233503SN/A Event *drainEvent; 2243503SN/A 2256122SSteve.Reinhardt@amd.com /** 2266122SSteve.Reinhardt@amd.com * The address range to which the cache responds on the CPU side. 2276122SSteve.Reinhardt@amd.com * Normally this is all possible memory addresses. */ 2286122SSteve.Reinhardt@amd.com Range<Addr> addrRange; 2296122SSteve.Reinhardt@amd.com 2306978SLisa.Hsu@amd.com /** number of cpus sharing this cache - from config file */ 2316978SLisa.Hsu@amd.com int _numCpus; 2326978SLisa.Hsu@amd.com 2332810SN/A public: 2346978SLisa.Hsu@amd.com int numCpus() { return _numCpus; } 2352810SN/A // Statistics 2362810SN/A /** 2372810SN/A * @addtogroup CacheStatistics 2382810SN/A * @{ 2392810SN/A */ 2402810SN/A 2412810SN/A /** Number of hits per thread for each type of command. @sa Packet::Command */ 2425999Snate@binkert.org Stats::Vector hits[MemCmd::NUM_MEM_CMDS]; 2432810SN/A /** Number of hits for demand accesses. */ 2442810SN/A Stats::Formula demandHits; 2452810SN/A /** Number of hit for all accesses. */ 2462810SN/A Stats::Formula overallHits; 2472810SN/A 2482810SN/A /** Number of misses per thread for each type of command. @sa Packet::Command */ 2495999Snate@binkert.org Stats::Vector misses[MemCmd::NUM_MEM_CMDS]; 2502810SN/A /** Number of misses for demand accesses. */ 2512810SN/A Stats::Formula demandMisses; 2522810SN/A /** Number of misses for all accesses. */ 2532810SN/A Stats::Formula overallMisses; 2542810SN/A 2552810SN/A /** 2562810SN/A * Total number of cycles per thread/command spent waiting for a miss. 2572810SN/A * Used to calculate the average miss latency. 2582810SN/A */ 2595999Snate@binkert.org Stats::Vector missLatency[MemCmd::NUM_MEM_CMDS]; 2602810SN/A /** Total number of cycles spent waiting for demand misses. */ 2612810SN/A Stats::Formula demandMissLatency; 2622810SN/A /** Total number of cycles spent waiting for all misses. */ 2632810SN/A Stats::Formula overallMissLatency; 2642810SN/A 2652810SN/A /** The number of accesses per command and thread. */ 2664022SN/A Stats::Formula accesses[MemCmd::NUM_MEM_CMDS]; 2672810SN/A /** The number of demand accesses. */ 2682810SN/A Stats::Formula demandAccesses; 2692810SN/A /** The number of overall accesses. */ 2702810SN/A Stats::Formula overallAccesses; 2712810SN/A 2722810SN/A /** The miss rate per command and thread. */ 2734022SN/A Stats::Formula missRate[MemCmd::NUM_MEM_CMDS]; 2742810SN/A /** The miss rate of all demand accesses. */ 2752810SN/A Stats::Formula demandMissRate; 2762810SN/A /** The miss rate for all accesses. */ 2772810SN/A Stats::Formula overallMissRate; 2782810SN/A 2792810SN/A /** The average miss latency per command and thread. */ 2804022SN/A Stats::Formula avgMissLatency[MemCmd::NUM_MEM_CMDS]; 2812810SN/A /** The average miss latency for demand misses. */ 2822810SN/A Stats::Formula demandAvgMissLatency; 2832810SN/A /** The average miss latency for all misses. */ 2842810SN/A Stats::Formula overallAvgMissLatency; 2852810SN/A 2862810SN/A /** The total number of cycles blocked for each blocked cause. */ 2875999Snate@binkert.org Stats::Vector blocked_cycles; 2882810SN/A /** The number of times this cache blocked for each blocked cause. */ 2895999Snate@binkert.org Stats::Vector blocked_causes; 2902810SN/A 2912810SN/A /** The average number of cycles blocked for each blocked cause. */ 2922810SN/A Stats::Formula avg_blocked; 2932810SN/A 2942810SN/A /** The number of fast writes (WH64) performed. */ 2955999Snate@binkert.org Stats::Scalar fastWrites; 2962810SN/A 2972810SN/A /** The number of cache copies performed. */ 2985999Snate@binkert.org Stats::Scalar cacheCopies; 2992810SN/A 3004626SN/A /** Number of blocks written back per thread. */ 3015999Snate@binkert.org Stats::Vector writebacks; 3024626SN/A 3034626SN/A /** Number of misses that hit in the MSHRs per command and thread. */ 3045999Snate@binkert.org Stats::Vector mshr_hits[MemCmd::NUM_MEM_CMDS]; 3054626SN/A /** Demand misses that hit in the MSHRs. */ 3064626SN/A Stats::Formula demandMshrHits; 3074626SN/A /** Total number of misses that hit in the MSHRs. */ 3084626SN/A Stats::Formula overallMshrHits; 3094626SN/A 3104626SN/A /** Number of misses that miss in the MSHRs, per command and thread. */ 3115999Snate@binkert.org Stats::Vector mshr_misses[MemCmd::NUM_MEM_CMDS]; 3124626SN/A /** Demand misses that miss in the MSHRs. */ 3134626SN/A Stats::Formula demandMshrMisses; 3144626SN/A /** Total number of misses that miss in the MSHRs. */ 3154626SN/A Stats::Formula overallMshrMisses; 3164626SN/A 3174626SN/A /** Number of misses that miss in the MSHRs, per command and thread. */ 3185999Snate@binkert.org Stats::Vector mshr_uncacheable[MemCmd::NUM_MEM_CMDS]; 3194626SN/A /** Total number of misses that miss in the MSHRs. */ 3204626SN/A Stats::Formula overallMshrUncacheable; 3214626SN/A 3224626SN/A /** Total cycle latency of each MSHR miss, per command and thread. */ 3235999Snate@binkert.org Stats::Vector mshr_miss_latency[MemCmd::NUM_MEM_CMDS]; 3244626SN/A /** Total cycle latency of demand MSHR misses. */ 3254626SN/A Stats::Formula demandMshrMissLatency; 3264626SN/A /** Total cycle latency of overall MSHR misses. */ 3274626SN/A Stats::Formula overallMshrMissLatency; 3284626SN/A 3294626SN/A /** Total cycle latency of each MSHR miss, per command and thread. */ 3305999Snate@binkert.org Stats::Vector mshr_uncacheable_lat[MemCmd::NUM_MEM_CMDS]; 3314626SN/A /** Total cycle latency of overall MSHR misses. */ 3324626SN/A Stats::Formula overallMshrUncacheableLatency; 3334626SN/A 3347461Snate@binkert.org#if 0 3354626SN/A /** The total number of MSHR accesses per command and thread. */ 3364626SN/A Stats::Formula mshrAccesses[MemCmd::NUM_MEM_CMDS]; 3374626SN/A /** The total number of demand MSHR accesses. */ 3384626SN/A Stats::Formula demandMshrAccesses; 3394626SN/A /** The total number of MSHR accesses. */ 3404626SN/A Stats::Formula overallMshrAccesses; 3417461Snate@binkert.org#endif 3424626SN/A 3434626SN/A /** The miss rate in the MSHRs pre command and thread. */ 3444626SN/A Stats::Formula mshrMissRate[MemCmd::NUM_MEM_CMDS]; 3454626SN/A /** The demand miss rate in the MSHRs. */ 3464626SN/A Stats::Formula demandMshrMissRate; 3474626SN/A /** The overall miss rate in the MSHRs. */ 3484626SN/A Stats::Formula overallMshrMissRate; 3494626SN/A 3504626SN/A /** The average latency of an MSHR miss, per command and thread. */ 3514626SN/A Stats::Formula avgMshrMissLatency[MemCmd::NUM_MEM_CMDS]; 3524626SN/A /** The average latency of a demand MSHR miss. */ 3534626SN/A Stats::Formula demandAvgMshrMissLatency; 3544626SN/A /** The average overall latency of an MSHR miss. */ 3554626SN/A Stats::Formula overallAvgMshrMissLatency; 3564626SN/A 3574626SN/A /** The average latency of an MSHR miss, per command and thread. */ 3584626SN/A Stats::Formula avgMshrUncacheableLatency[MemCmd::NUM_MEM_CMDS]; 3594626SN/A /** The average overall latency of an MSHR miss. */ 3604626SN/A Stats::Formula overallAvgMshrUncacheableLatency; 3614626SN/A 3624626SN/A /** The number of times a thread hit its MSHR cap. */ 3635999Snate@binkert.org Stats::Vector mshr_cap_events; 3644626SN/A /** The number of times software prefetches caused the MSHR to block. */ 3655999Snate@binkert.org Stats::Vector soft_prefetch_mshr_full; 3664626SN/A 3675999Snate@binkert.org Stats::Scalar mshr_no_allocate_misses; 3684626SN/A 3692810SN/A /** 3702810SN/A * @} 3712810SN/A */ 3722810SN/A 3732810SN/A /** 3742810SN/A * Register stats for this object. 3752810SN/A */ 3762810SN/A virtual void regStats(); 3772810SN/A 3782810SN/A public: 3795034SN/A typedef BaseCacheParams Params; 3805034SN/A BaseCache(const Params *p); 3815034SN/A ~BaseCache() {} 3823606SN/A 3832858SN/A virtual void init(); 3842858SN/A 3852810SN/A /** 3862810SN/A * Query block size of a cache. 3872810SN/A * @return The block size 3882810SN/A */ 3896227Snate@binkert.org unsigned 3906227Snate@binkert.org getBlockSize() const 3912810SN/A { 3922810SN/A return blkSize; 3932810SN/A } 3942810SN/A 3954626SN/A 3966666Ssteve.reinhardt@amd.com Addr blockAlign(Addr addr) const { return (addr & ~(Addr(blkSize - 1))); } 3974626SN/A 3984626SN/A 3996122SSteve.Reinhardt@amd.com const Range<Addr> &getAddrRange() const { return addrRange; } 4006122SSteve.Reinhardt@amd.com 4014628SN/A MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool requestBus) 4024628SN/A { 4034902SN/A assert(!pkt->req->isUncacheable()); 4044628SN/A return allocateBufferInternal(&mshrQueue, 4054628SN/A blockAlign(pkt->getAddr()), blkSize, 4064628SN/A pkt, time, requestBus); 4074628SN/A } 4084628SN/A 4094902SN/A MSHR *allocateWriteBuffer(PacketPtr pkt, Tick time, bool requestBus) 4104628SN/A { 4114902SN/A assert(pkt->isWrite() && !pkt->isRead()); 4124902SN/A return allocateBufferInternal(&writeBuffer, 4134902SN/A pkt->getAddr(), pkt->getSize(), 4144628SN/A pkt, time, requestBus); 4154628SN/A } 4164628SN/A 4174902SN/A MSHR *allocateUncachedReadBuffer(PacketPtr pkt, Tick time, bool requestBus) 4184902SN/A { 4194902SN/A assert(pkt->req->isUncacheable()); 4204902SN/A assert(pkt->isRead()); 4214902SN/A return allocateBufferInternal(&mshrQueue, 4224902SN/A pkt->getAddr(), pkt->getSize(), 4234902SN/A pkt, time, requestBus); 4244902SN/A } 4254628SN/A 4262810SN/A /** 4272810SN/A * Returns true if the cache is blocked for accesses. 4282810SN/A */ 4292810SN/A bool isBlocked() 4302810SN/A { 4312810SN/A return blocked != 0; 4322810SN/A } 4332810SN/A 4342810SN/A /** 4352810SN/A * Marks the access path of the cache as blocked for the given cause. This 4362810SN/A * also sets the blocked flag in the slave interface. 4372810SN/A * @param cause The reason for the cache blocking. 4382810SN/A */ 4392810SN/A void setBlocked(BlockedCause cause) 4402810SN/A { 4412810SN/A uint8_t flag = 1 << cause; 4422810SN/A if (blocked == 0) { 4432810SN/A blocked_causes[cause]++; 4447823Ssteve.reinhardt@amd.com blockedCycle = curTick(); 4454630SN/A cpuSidePort->setBlocked(); 4462810SN/A } 4474630SN/A blocked |= flag; 4484630SN/A DPRINTF(Cache,"Blocking for cause %d, mask=%d\n", cause, blocked); 4492810SN/A } 4502810SN/A 4512810SN/A /** 4522810SN/A * Marks the cache as unblocked for the given cause. This also clears the 4532810SN/A * blocked flags in the appropriate interfaces. 4542810SN/A * @param cause The newly unblocked cause. 4552810SN/A * @warning Calling this function can cause a blocked request on the bus to 4562810SN/A * access the cache. The cache must be in a state to handle that request. 4572810SN/A */ 4582810SN/A void clearBlocked(BlockedCause cause) 4592810SN/A { 4602810SN/A uint8_t flag = 1 << cause; 4614630SN/A blocked &= ~flag; 4624630SN/A DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked); 4634630SN/A if (blocked == 0) { 4647823Ssteve.reinhardt@amd.com blocked_cycles[cause] += curTick() - blockedCycle; 4654630SN/A cpuSidePort->clearBlocked(); 4662810SN/A } 4672810SN/A } 4682810SN/A 4692810SN/A /** 4702810SN/A * Request the master bus for the given cause and time. 4712810SN/A * @param cause The reason for the request. 4722810SN/A * @param time The time to make the request. 4732810SN/A */ 4744458SN/A void requestMemSideBus(RequestCause cause, Tick time) 4752810SN/A { 4764458SN/A memSidePort->requestBus(cause, time); 4772810SN/A } 4782810SN/A 4792810SN/A /** 4802810SN/A * Clear the master bus request for the given cause. 4812810SN/A * @param cause The request reason to clear. 4822810SN/A */ 4834458SN/A void deassertMemSideBusRequest(RequestCause cause) 4842810SN/A { 4855875Ssteve.reinhardt@amd.com // Obsolete... we no longer signal bus requests explicitly so 4865875Ssteve.reinhardt@amd.com // we can't deassert them. Leaving this in as a no-op since 4875875Ssteve.reinhardt@amd.com // the prefetcher calls it to indicate that it no longer wants 4885875Ssteve.reinhardt@amd.com // to request a prefetch, and someday that might be 4895875Ssteve.reinhardt@amd.com // interesting again. 4902811SN/A } 4913503SN/A 4923503SN/A virtual unsigned int drain(Event *de); 4933503SN/A 4944626SN/A virtual bool inCache(Addr addr) = 0; 4954626SN/A 4964626SN/A virtual bool inMissQueue(Addr addr) = 0; 4974626SN/A 4986978SLisa.Hsu@amd.com void incMissCount(PacketPtr pkt, int id) 4993503SN/A { 5006978SLisa.Hsu@amd.com 5016978SLisa.Hsu@amd.com if (pkt->cmd == MemCmd::Writeback) { 5026978SLisa.Hsu@amd.com assert(id == -1); 5036978SLisa.Hsu@amd.com misses[pkt->cmdToIndex()][0]++; 5046978SLisa.Hsu@amd.com /* same thing for writeback hits as misses - no context id 5056978SLisa.Hsu@amd.com * available, meanwhile writeback hit/miss stats are not used 5066978SLisa.Hsu@amd.com * in any aggregate hit/miss calculations, so just lump them all 5076978SLisa.Hsu@amd.com * in bucket 0 */ 5086978SLisa.Hsu@amd.com#if FULL_SYSTEM 5096978SLisa.Hsu@amd.com } else if (id == -1) { 5106978SLisa.Hsu@amd.com // Device accesses have id -1 5116978SLisa.Hsu@amd.com // lump device accesses into their own bucket 5126978SLisa.Hsu@amd.com misses[pkt->cmdToIndex()][_numCpus]++; 5136978SLisa.Hsu@amd.com#endif 5146978SLisa.Hsu@amd.com } else { 5156978SLisa.Hsu@amd.com misses[pkt->cmdToIndex()][id % _numCpus]++; 5166978SLisa.Hsu@amd.com } 5174626SN/A 5184626SN/A if (missCount) { 5194626SN/A --missCount; 5204626SN/A if (missCount == 0) 5214626SN/A exitSimLoop("A cache reached the maximum miss count"); 5223503SN/A } 5233503SN/A } 5246978SLisa.Hsu@amd.com void incHitCount(PacketPtr pkt, int id) 5256978SLisa.Hsu@amd.com { 5266978SLisa.Hsu@amd.com 5276978SLisa.Hsu@amd.com /* Writeback requests don't have a context id associated with 5286978SLisa.Hsu@amd.com * them, so attributing a hit to a -1 context id is obviously a 5296978SLisa.Hsu@amd.com * problem. I've noticed in the stats that hits are split into 5306978SLisa.Hsu@amd.com * demand and non-demand hits - neither of which include writeback 5316978SLisa.Hsu@amd.com * hits, so here, I'll just put the writeback hits into bucket 0 5326978SLisa.Hsu@amd.com * since it won't mess with any other stats -hsul */ 5336978SLisa.Hsu@amd.com if (pkt->cmd == MemCmd::Writeback) { 5346978SLisa.Hsu@amd.com assert(id == -1); 5356978SLisa.Hsu@amd.com hits[pkt->cmdToIndex()][0]++; 5366978SLisa.Hsu@amd.com#if FULL_SYSTEM 5376978SLisa.Hsu@amd.com } else if (id == -1) { 5386978SLisa.Hsu@amd.com // Device accesses have id -1 5396978SLisa.Hsu@amd.com // lump device accesses into their own bucket 5406978SLisa.Hsu@amd.com hits[pkt->cmdToIndex()][_numCpus]++; 5416978SLisa.Hsu@amd.com#endif 5426978SLisa.Hsu@amd.com } else { 5436978SLisa.Hsu@amd.com /* the % is necessary in case there are switch cpus */ 5446978SLisa.Hsu@amd.com hits[pkt->cmdToIndex()][id % _numCpus]++; 5456978SLisa.Hsu@amd.com } 5466978SLisa.Hsu@amd.com } 5473503SN/A 5482810SN/A}; 5492810SN/A 5502810SN/A#endif //__BASE_CACHE_HH__ 551