base.hh revision 7612
12810SN/A/*
22810SN/A * Copyright (c) 2003-2005 The Regents of The University of Michigan
32810SN/A * All rights reserved.
42810SN/A *
52810SN/A * Redistribution and use in source and binary forms, with or without
62810SN/A * modification, are permitted provided that the following conditions are
72810SN/A * met: redistributions of source code must retain the above copyright
82810SN/A * notice, this list of conditions and the following disclaimer;
92810SN/A * redistributions in binary form must reproduce the above copyright
102810SN/A * notice, this list of conditions and the following disclaimer in the
112810SN/A * documentation and/or other materials provided with the distribution;
122810SN/A * neither the name of the copyright holders nor the names of its
132810SN/A * contributors may be used to endorse or promote products derived from
142810SN/A * this software without specific prior written permission.
152810SN/A *
162810SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
172810SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
182810SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
192810SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
202810SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
212810SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
222810SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
232810SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
242810SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
252810SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
262810SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
272810SN/A *
282810SN/A * Authors: Erik Hallnor
294458SN/A *          Steve Reinhardt
304458SN/A *          Ron Dreslinski
312810SN/A */
322810SN/A
332810SN/A/**
342810SN/A * @file
352810SN/A * Declares a basic cache interface BaseCache.
362810SN/A */
372810SN/A
382810SN/A#ifndef __BASE_CACHE_HH__
392810SN/A#define __BASE_CACHE_HH__
402810SN/A
412810SN/A#include <vector>
422810SN/A#include <string>
432810SN/A#include <list>
444666SN/A#include <algorithm>
452810SN/A
462825SN/A#include "base/misc.hh"
472810SN/A#include "base/statistics.hh"
482810SN/A#include "base/trace.hh"
496215Snate@binkert.org#include "base/types.hh"
506978SLisa.Hsu@amd.com#include "config/full_system.hh"
515338Sstever@gmail.com#include "mem/cache/mshr_queue.hh"
522810SN/A#include "mem/mem_object.hh"
532810SN/A#include "mem/packet.hh"
544626SN/A#include "mem/tport.hh"
552810SN/A#include "mem/request.hh"
565034SN/A#include "params/BaseCache.hh"
572811SN/A#include "sim/eventq.hh"
584626SN/A#include "sim/sim_exit.hh"
592810SN/A
603194SN/Aclass MSHR;
612810SN/A/**
622810SN/A * A basic cache interface. Implements some common functions for speed.
632810SN/A */
642810SN/Aclass BaseCache : public MemObject
652810SN/A{
664628SN/A    /**
674628SN/A     * Indexes to enumerate the MSHR queues.
684628SN/A     */
694628SN/A    enum MSHRQueueIndex {
704628SN/A        MSHRQueue_MSHRs,
714628SN/A        MSHRQueue_WriteBuffer
724628SN/A    };
734628SN/A
744628SN/A    /**
754628SN/A     * Reasons for caches to be blocked.
764628SN/A     */
774628SN/A    enum BlockedCause {
784628SN/A        Blocked_NoMSHRs = MSHRQueue_MSHRs,
794628SN/A        Blocked_NoWBBuffers = MSHRQueue_WriteBuffer,
804628SN/A        Blocked_NoTargets,
814628SN/A        NUM_BLOCKED_CAUSES
824628SN/A    };
834628SN/A
844628SN/A  public:
854628SN/A    /**
864628SN/A     * Reasons for cache to request a bus.
874628SN/A     */
884628SN/A    enum RequestCause {
894628SN/A        Request_MSHR = MSHRQueue_MSHRs,
904628SN/A        Request_WB = MSHRQueue_WriteBuffer,
914628SN/A        Request_PF,
924628SN/A        NUM_REQUEST_CAUSES
934628SN/A    };
944628SN/A
954628SN/A  private:
964628SN/A
974626SN/A    class CachePort : public SimpleTimingPort
982810SN/A    {
992844SN/A      public:
1002810SN/A        BaseCache *cache;
1012810SN/A
1023738SN/A      protected:
1034965SN/A        CachePort(const std::string &_name, BaseCache *_cache,
1046122SSteve.Reinhardt@amd.com                  const std::string &_label);
1054458SN/A
1062810SN/A        virtual void recvStatusChange(Status status);
1072810SN/A
1086227Snate@binkert.org        virtual unsigned deviceBlockSize() const;
1092810SN/A
1104458SN/A        bool recvRetryCommon();
1113013SN/A
1124666SN/A        typedef EventWrapper<Port, &Port::sendRetry>
1134666SN/A            SendRetryEvent;
1144666SN/A
1155314SN/A        const std::string label;
1165314SN/A
1172811SN/A      public:
1184458SN/A        void setOtherPort(CachePort *_otherPort) { otherPort = _otherPort; }
1194458SN/A
1202810SN/A        void setBlocked();
1212810SN/A
1222810SN/A        void clearBlocked();
1232810SN/A
1245314SN/A        bool checkFunctional(PacketPtr pkt);
1253606SN/A
1264458SN/A        CachePort *otherPort;
1274458SN/A
1282810SN/A        bool blocked;
1292810SN/A
1302897SN/A        bool mustSendRetry;
1312897SN/A
1324458SN/A        void requestBus(RequestCause cause, Tick time)
1334458SN/A        {
1344888SN/A            DPRINTF(CachePort, "Asserting bus request for cause %d\n", cause);
1354666SN/A            if (!waitingOnRetry) {
1364666SN/A                schedSendEvent(time);
1374458SN/A            }
1384458SN/A        }
1394458SN/A
1404626SN/A        void respond(PacketPtr pkt, Tick time) {
1414626SN/A            schedSendTiming(pkt, time);
1424626SN/A        }
1432811SN/A    };
1442810SN/A
1453338SN/A  public: //Made public so coherence can get at it.
1463338SN/A    CachePort *cpuSidePort;
1473738SN/A    CachePort *memSidePort;
1483338SN/A
1494626SN/A  protected:
1504626SN/A
1514626SN/A    /** Miss status registers */
1524626SN/A    MSHRQueue mshrQueue;
1534626SN/A
1544626SN/A    /** Write/writeback buffer */
1554626SN/A    MSHRQueue writeBuffer;
1564626SN/A
1574628SN/A    MSHR *allocateBufferInternal(MSHRQueue *mq, Addr addr, int size,
1584628SN/A                                 PacketPtr pkt, Tick time, bool requestBus)
1594628SN/A    {
1604666SN/A        MSHR *mshr = mq->allocate(addr, size, pkt, time, order++);
1614628SN/A
1624628SN/A        if (mq->isFull()) {
1634628SN/A            setBlocked((BlockedCause)mq->index);
1644628SN/A        }
1654628SN/A
1664628SN/A        if (requestBus) {
1674628SN/A            requestMemSideBus((RequestCause)mq->index, time);
1684628SN/A        }
1694628SN/A
1704628SN/A        return mshr;
1714628SN/A    }
1724628SN/A
1734628SN/A    void markInServiceInternal(MSHR *mshr)
1744628SN/A    {
1754628SN/A        MSHRQueue *mq = mshr->queue;
1764628SN/A        bool wasFull = mq->isFull();
1774628SN/A        mq->markInService(mshr);
1784628SN/A        if (wasFull && !mq->isFull()) {
1794628SN/A            clearBlocked((BlockedCause)mq->index);
1804628SN/A        }
1814628SN/A    }
1824628SN/A
1834626SN/A    /** Block size of this cache */
1846227Snate@binkert.org    const unsigned blkSize;
1854626SN/A
1864630SN/A    /**
1874630SN/A     * The latency of a hit in this device.
1884630SN/A     */
1894630SN/A    int hitLatency;
1904630SN/A
1914626SN/A    /** The number of targets for each MSHR. */
1924626SN/A    const int numTarget;
1934626SN/A
1946122SSteve.Reinhardt@amd.com    /** Do we forward snoops from mem side port through to cpu side port? */
1956122SSteve.Reinhardt@amd.com    bool forwardSnoops;
1964626SN/A
1972810SN/A    /**
1982810SN/A     * Bit vector of the blocking reasons for the access path.
1992810SN/A     * @sa #BlockedCause
2002810SN/A     */
2012810SN/A    uint8_t blocked;
2022810SN/A
2036122SSteve.Reinhardt@amd.com    /** Increasing order number assigned to each incoming request. */
2046122SSteve.Reinhardt@amd.com    uint64_t order;
2056122SSteve.Reinhardt@amd.com
2062810SN/A    /** Stores time the cache blocked for statistics. */
2072810SN/A    Tick blockedCycle;
2082810SN/A
2094626SN/A    /** Pointer to the MSHR that has no targets. */
2104626SN/A    MSHR *noTargetMSHR;
2112810SN/A
2122810SN/A    /** The number of misses to trigger an exit event. */
2132810SN/A    Counter missCount;
2142810SN/A
2153503SN/A    /** The drain event. */
2163503SN/A    Event *drainEvent;
2173503SN/A
2186122SSteve.Reinhardt@amd.com    /**
2196122SSteve.Reinhardt@amd.com     * The address range to which the cache responds on the CPU side.
2206122SSteve.Reinhardt@amd.com     * Normally this is all possible memory addresses. */
2216122SSteve.Reinhardt@amd.com    Range<Addr> addrRange;
2226122SSteve.Reinhardt@amd.com
2236978SLisa.Hsu@amd.com    /** number of cpus sharing this cache - from config file */
2246978SLisa.Hsu@amd.com    int _numCpus;
2256978SLisa.Hsu@amd.com
2262810SN/A  public:
2276978SLisa.Hsu@amd.com    int numCpus() { return _numCpus; }
2282810SN/A    // Statistics
2292810SN/A    /**
2302810SN/A     * @addtogroup CacheStatistics
2312810SN/A     * @{
2322810SN/A     */
2332810SN/A
2342810SN/A    /** Number of hits per thread for each type of command. @sa Packet::Command */
2355999Snate@binkert.org    Stats::Vector hits[MemCmd::NUM_MEM_CMDS];
2362810SN/A    /** Number of hits for demand accesses. */
2372810SN/A    Stats::Formula demandHits;
2382810SN/A    /** Number of hit for all accesses. */
2392810SN/A    Stats::Formula overallHits;
2402810SN/A
2412810SN/A    /** Number of misses per thread for each type of command. @sa Packet::Command */
2425999Snate@binkert.org    Stats::Vector misses[MemCmd::NUM_MEM_CMDS];
2432810SN/A    /** Number of misses for demand accesses. */
2442810SN/A    Stats::Formula demandMisses;
2452810SN/A    /** Number of misses for all accesses. */
2462810SN/A    Stats::Formula overallMisses;
2472810SN/A
2482810SN/A    /**
2492810SN/A     * Total number of cycles per thread/command spent waiting for a miss.
2502810SN/A     * Used to calculate the average miss latency.
2512810SN/A     */
2525999Snate@binkert.org    Stats::Vector missLatency[MemCmd::NUM_MEM_CMDS];
2532810SN/A    /** Total number of cycles spent waiting for demand misses. */
2542810SN/A    Stats::Formula demandMissLatency;
2552810SN/A    /** Total number of cycles spent waiting for all misses. */
2562810SN/A    Stats::Formula overallMissLatency;
2572810SN/A
2582810SN/A    /** The number of accesses per command and thread. */
2594022SN/A    Stats::Formula accesses[MemCmd::NUM_MEM_CMDS];
2602810SN/A    /** The number of demand accesses. */
2612810SN/A    Stats::Formula demandAccesses;
2622810SN/A    /** The number of overall accesses. */
2632810SN/A    Stats::Formula overallAccesses;
2642810SN/A
2652810SN/A    /** The miss rate per command and thread. */
2664022SN/A    Stats::Formula missRate[MemCmd::NUM_MEM_CMDS];
2672810SN/A    /** The miss rate of all demand accesses. */
2682810SN/A    Stats::Formula demandMissRate;
2692810SN/A    /** The miss rate for all accesses. */
2702810SN/A    Stats::Formula overallMissRate;
2712810SN/A
2722810SN/A    /** The average miss latency per command and thread. */
2734022SN/A    Stats::Formula avgMissLatency[MemCmd::NUM_MEM_CMDS];
2742810SN/A    /** The average miss latency for demand misses. */
2752810SN/A    Stats::Formula demandAvgMissLatency;
2762810SN/A    /** The average miss latency for all misses. */
2772810SN/A    Stats::Formula overallAvgMissLatency;
2782810SN/A
2792810SN/A    /** The total number of cycles blocked for each blocked cause. */
2805999Snate@binkert.org    Stats::Vector blocked_cycles;
2812810SN/A    /** The number of times this cache blocked for each blocked cause. */
2825999Snate@binkert.org    Stats::Vector blocked_causes;
2832810SN/A
2842810SN/A    /** The average number of cycles blocked for each blocked cause. */
2852810SN/A    Stats::Formula avg_blocked;
2862810SN/A
2872810SN/A    /** The number of fast writes (WH64) performed. */
2885999Snate@binkert.org    Stats::Scalar fastWrites;
2892810SN/A
2902810SN/A    /** The number of cache copies performed. */
2915999Snate@binkert.org    Stats::Scalar cacheCopies;
2922810SN/A
2934626SN/A    /** Number of blocks written back per thread. */
2945999Snate@binkert.org    Stats::Vector writebacks;
2954626SN/A
2964626SN/A    /** Number of misses that hit in the MSHRs per command and thread. */
2975999Snate@binkert.org    Stats::Vector mshr_hits[MemCmd::NUM_MEM_CMDS];
2984626SN/A    /** Demand misses that hit in the MSHRs. */
2994626SN/A    Stats::Formula demandMshrHits;
3004626SN/A    /** Total number of misses that hit in the MSHRs. */
3014626SN/A    Stats::Formula overallMshrHits;
3024626SN/A
3034626SN/A    /** Number of misses that miss in the MSHRs, per command and thread. */
3045999Snate@binkert.org    Stats::Vector mshr_misses[MemCmd::NUM_MEM_CMDS];
3054626SN/A    /** Demand misses that miss in the MSHRs. */
3064626SN/A    Stats::Formula demandMshrMisses;
3074626SN/A    /** Total number of misses that miss in the MSHRs. */
3084626SN/A    Stats::Formula overallMshrMisses;
3094626SN/A
3104626SN/A    /** Number of misses that miss in the MSHRs, per command and thread. */
3115999Snate@binkert.org    Stats::Vector mshr_uncacheable[MemCmd::NUM_MEM_CMDS];
3124626SN/A    /** Total number of misses that miss in the MSHRs. */
3134626SN/A    Stats::Formula overallMshrUncacheable;
3144626SN/A
3154626SN/A    /** Total cycle latency of each MSHR miss, per command and thread. */
3165999Snate@binkert.org    Stats::Vector mshr_miss_latency[MemCmd::NUM_MEM_CMDS];
3174626SN/A    /** Total cycle latency of demand MSHR misses. */
3184626SN/A    Stats::Formula demandMshrMissLatency;
3194626SN/A    /** Total cycle latency of overall MSHR misses. */
3204626SN/A    Stats::Formula overallMshrMissLatency;
3214626SN/A
3224626SN/A    /** Total cycle latency of each MSHR miss, per command and thread. */
3235999Snate@binkert.org    Stats::Vector mshr_uncacheable_lat[MemCmd::NUM_MEM_CMDS];
3244626SN/A    /** Total cycle latency of overall MSHR misses. */
3254626SN/A    Stats::Formula overallMshrUncacheableLatency;
3264626SN/A
3277461Snate@binkert.org#if 0
3284626SN/A    /** The total number of MSHR accesses per command and thread. */
3294626SN/A    Stats::Formula mshrAccesses[MemCmd::NUM_MEM_CMDS];
3304626SN/A    /** The total number of demand MSHR accesses. */
3314626SN/A    Stats::Formula demandMshrAccesses;
3324626SN/A    /** The total number of MSHR accesses. */
3334626SN/A    Stats::Formula overallMshrAccesses;
3347461Snate@binkert.org#endif
3354626SN/A
3364626SN/A    /** The miss rate in the MSHRs pre command and thread. */
3374626SN/A    Stats::Formula mshrMissRate[MemCmd::NUM_MEM_CMDS];
3384626SN/A    /** The demand miss rate in the MSHRs. */
3394626SN/A    Stats::Formula demandMshrMissRate;
3404626SN/A    /** The overall miss rate in the MSHRs. */
3414626SN/A    Stats::Formula overallMshrMissRate;
3424626SN/A
3434626SN/A    /** The average latency of an MSHR miss, per command and thread. */
3444626SN/A    Stats::Formula avgMshrMissLatency[MemCmd::NUM_MEM_CMDS];
3454626SN/A    /** The average latency of a demand MSHR miss. */
3464626SN/A    Stats::Formula demandAvgMshrMissLatency;
3474626SN/A    /** The average overall latency of an MSHR miss. */
3484626SN/A    Stats::Formula overallAvgMshrMissLatency;
3494626SN/A
3504626SN/A    /** The average latency of an MSHR miss, per command and thread. */
3514626SN/A    Stats::Formula avgMshrUncacheableLatency[MemCmd::NUM_MEM_CMDS];
3524626SN/A    /** The average overall latency of an MSHR miss. */
3534626SN/A    Stats::Formula overallAvgMshrUncacheableLatency;
3544626SN/A
3554626SN/A    /** The number of times a thread hit its MSHR cap. */
3565999Snate@binkert.org    Stats::Vector mshr_cap_events;
3574626SN/A    /** The number of times software prefetches caused the MSHR to block. */
3585999Snate@binkert.org    Stats::Vector soft_prefetch_mshr_full;
3594626SN/A
3605999Snate@binkert.org    Stats::Scalar mshr_no_allocate_misses;
3614626SN/A
3622810SN/A    /**
3632810SN/A     * @}
3642810SN/A     */
3652810SN/A
3662810SN/A    /**
3672810SN/A     * Register stats for this object.
3682810SN/A     */
3692810SN/A    virtual void regStats();
3702810SN/A
3712810SN/A  public:
3725034SN/A    typedef BaseCacheParams Params;
3735034SN/A    BaseCache(const Params *p);
3745034SN/A    ~BaseCache() {}
3753606SN/A
3762858SN/A    virtual void init();
3772858SN/A
3782810SN/A    /**
3792810SN/A     * Query block size of a cache.
3802810SN/A     * @return  The block size
3812810SN/A     */
3826227Snate@binkert.org    unsigned
3836227Snate@binkert.org    getBlockSize() const
3842810SN/A    {
3852810SN/A        return blkSize;
3862810SN/A    }
3872810SN/A
3884626SN/A
3896666Ssteve.reinhardt@amd.com    Addr blockAlign(Addr addr) const { return (addr & ~(Addr(blkSize - 1))); }
3904626SN/A
3914626SN/A
3926122SSteve.Reinhardt@amd.com    const Range<Addr> &getAddrRange() const { return addrRange; }
3936122SSteve.Reinhardt@amd.com
3944628SN/A    MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool requestBus)
3954628SN/A    {
3964902SN/A        assert(!pkt->req->isUncacheable());
3974628SN/A        return allocateBufferInternal(&mshrQueue,
3984628SN/A                                      blockAlign(pkt->getAddr()), blkSize,
3994628SN/A                                      pkt, time, requestBus);
4004628SN/A    }
4014628SN/A
4024902SN/A    MSHR *allocateWriteBuffer(PacketPtr pkt, Tick time, bool requestBus)
4034628SN/A    {
4044902SN/A        assert(pkt->isWrite() && !pkt->isRead());
4054902SN/A        return allocateBufferInternal(&writeBuffer,
4064902SN/A                                      pkt->getAddr(), pkt->getSize(),
4074628SN/A                                      pkt, time, requestBus);
4084628SN/A    }
4094628SN/A
4104902SN/A    MSHR *allocateUncachedReadBuffer(PacketPtr pkt, Tick time, bool requestBus)
4114902SN/A    {
4124902SN/A        assert(pkt->req->isUncacheable());
4134902SN/A        assert(pkt->isRead());
4144902SN/A        return allocateBufferInternal(&mshrQueue,
4154902SN/A                                      pkt->getAddr(), pkt->getSize(),
4164902SN/A                                      pkt, time, requestBus);
4174902SN/A    }
4184628SN/A
4192810SN/A    /**
4202810SN/A     * Returns true if the cache is blocked for accesses.
4212810SN/A     */
4222810SN/A    bool isBlocked()
4232810SN/A    {
4242810SN/A        return blocked != 0;
4252810SN/A    }
4262810SN/A
4272810SN/A    /**
4282810SN/A     * Marks the access path of the cache as blocked for the given cause. This
4292810SN/A     * also sets the blocked flag in the slave interface.
4302810SN/A     * @param cause The reason for the cache blocking.
4312810SN/A     */
4322810SN/A    void setBlocked(BlockedCause cause)
4332810SN/A    {
4342810SN/A        uint8_t flag = 1 << cause;
4352810SN/A        if (blocked == 0) {
4362810SN/A            blocked_causes[cause]++;
4372810SN/A            blockedCycle = curTick;
4384630SN/A            cpuSidePort->setBlocked();
4392810SN/A        }
4404630SN/A        blocked |= flag;
4414630SN/A        DPRINTF(Cache,"Blocking for cause %d, mask=%d\n", cause, blocked);
4422810SN/A    }
4432810SN/A
4442810SN/A    /**
4452810SN/A     * Marks the cache as unblocked for the given cause. This also clears the
4462810SN/A     * blocked flags in the appropriate interfaces.
4472810SN/A     * @param cause The newly unblocked cause.
4482810SN/A     * @warning Calling this function can cause a blocked request on the bus to
4492810SN/A     * access the cache. The cache must be in a state to handle that request.
4502810SN/A     */
4512810SN/A    void clearBlocked(BlockedCause cause)
4522810SN/A    {
4532810SN/A        uint8_t flag = 1 << cause;
4544630SN/A        blocked &= ~flag;
4554630SN/A        DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked);
4564630SN/A        if (blocked == 0) {
4574630SN/A            blocked_cycles[cause] += curTick - blockedCycle;
4584630SN/A            cpuSidePort->clearBlocked();
4592810SN/A        }
4602810SN/A    }
4612810SN/A
4622810SN/A    /**
4632810SN/A     * Request the master bus for the given cause and time.
4642810SN/A     * @param cause The reason for the request.
4652810SN/A     * @param time The time to make the request.
4662810SN/A     */
4674458SN/A    void requestMemSideBus(RequestCause cause, Tick time)
4682810SN/A    {
4694458SN/A        memSidePort->requestBus(cause, time);
4702810SN/A    }
4712810SN/A
4722810SN/A    /**
4732810SN/A     * Clear the master bus request for the given cause.
4742810SN/A     * @param cause The request reason to clear.
4752810SN/A     */
4764458SN/A    void deassertMemSideBusRequest(RequestCause cause)
4772810SN/A    {
4785875Ssteve.reinhardt@amd.com        // Obsolete... we no longer signal bus requests explicitly so
4795875Ssteve.reinhardt@amd.com        // we can't deassert them.  Leaving this in as a no-op since
4805875Ssteve.reinhardt@amd.com        // the prefetcher calls it to indicate that it no longer wants
4815875Ssteve.reinhardt@amd.com        // to request a prefetch, and someday that might be
4825875Ssteve.reinhardt@amd.com        // interesting again.
4832811SN/A    }
4843503SN/A
4853503SN/A    virtual unsigned int drain(Event *de);
4863503SN/A
4874626SN/A    virtual bool inCache(Addr addr) = 0;
4884626SN/A
4894626SN/A    virtual bool inMissQueue(Addr addr) = 0;
4904626SN/A
4916978SLisa.Hsu@amd.com    void incMissCount(PacketPtr pkt, int id)
4923503SN/A    {
4936978SLisa.Hsu@amd.com
4946978SLisa.Hsu@amd.com        if (pkt->cmd == MemCmd::Writeback) {
4956978SLisa.Hsu@amd.com            assert(id == -1);
4966978SLisa.Hsu@amd.com            misses[pkt->cmdToIndex()][0]++;
4976978SLisa.Hsu@amd.com            /* same thing for writeback hits as misses - no context id
4986978SLisa.Hsu@amd.com             * available, meanwhile writeback hit/miss stats are not used
4996978SLisa.Hsu@amd.com             * in any aggregate hit/miss calculations, so just lump them all
5006978SLisa.Hsu@amd.com             * in bucket 0 */
5016978SLisa.Hsu@amd.com#if FULL_SYSTEM
5026978SLisa.Hsu@amd.com        } else if (id == -1) {
5036978SLisa.Hsu@amd.com            // Device accesses have id -1
5046978SLisa.Hsu@amd.com            // lump device accesses into their own bucket
5056978SLisa.Hsu@amd.com            misses[pkt->cmdToIndex()][_numCpus]++;
5066978SLisa.Hsu@amd.com#endif
5076978SLisa.Hsu@amd.com        } else {
5086978SLisa.Hsu@amd.com            misses[pkt->cmdToIndex()][id % _numCpus]++;
5096978SLisa.Hsu@amd.com        }
5104626SN/A
5114626SN/A        if (missCount) {
5124626SN/A            --missCount;
5134626SN/A            if (missCount == 0)
5144626SN/A                exitSimLoop("A cache reached the maximum miss count");
5153503SN/A        }
5163503SN/A    }
5176978SLisa.Hsu@amd.com    void incHitCount(PacketPtr pkt, int id)
5186978SLisa.Hsu@amd.com    {
5196978SLisa.Hsu@amd.com
5206978SLisa.Hsu@amd.com        /* Writeback requests don't have a context id associated with
5216978SLisa.Hsu@amd.com         * them, so attributing a hit to a -1 context id is obviously a
5226978SLisa.Hsu@amd.com         * problem.  I've noticed in the stats that hits are split into
5236978SLisa.Hsu@amd.com         * demand and non-demand hits - neither of which include writeback
5246978SLisa.Hsu@amd.com         * hits, so here, I'll just put the writeback hits into bucket 0
5256978SLisa.Hsu@amd.com         * since it won't mess with any other stats -hsul */
5266978SLisa.Hsu@amd.com        if (pkt->cmd == MemCmd::Writeback) {
5276978SLisa.Hsu@amd.com            assert(id == -1);
5286978SLisa.Hsu@amd.com            hits[pkt->cmdToIndex()][0]++;
5296978SLisa.Hsu@amd.com#if FULL_SYSTEM
5306978SLisa.Hsu@amd.com        } else if (id == -1) {
5316978SLisa.Hsu@amd.com            // Device accesses have id -1
5326978SLisa.Hsu@amd.com            // lump device accesses into their own bucket
5336978SLisa.Hsu@amd.com            hits[pkt->cmdToIndex()][_numCpus]++;
5346978SLisa.Hsu@amd.com#endif
5356978SLisa.Hsu@amd.com        } else {
5366978SLisa.Hsu@amd.com            /* the % is necessary in case there are switch cpus */
5376978SLisa.Hsu@amd.com            hits[pkt->cmdToIndex()][id % _numCpus]++;
5386978SLisa.Hsu@amd.com        }
5396978SLisa.Hsu@amd.com    }
5403503SN/A
5412810SN/A};
5422810SN/A
5432810SN/A#endif //__BASE_CACHE_HH__
544