base.hh revision 8229
12810SN/A/*
22810SN/A * Copyright (c) 2003-2005 The Regents of The University of Michigan
32810SN/A * All rights reserved.
42810SN/A *
52810SN/A * Redistribution and use in source and binary forms, with or without
62810SN/A * modification, are permitted provided that the following conditions are
72810SN/A * met: redistributions of source code must retain the above copyright
82810SN/A * notice, this list of conditions and the following disclaimer;
92810SN/A * redistributions in binary form must reproduce the above copyright
102810SN/A * notice, this list of conditions and the following disclaimer in the
112810SN/A * documentation and/or other materials provided with the distribution;
122810SN/A * neither the name of the copyright holders nor the names of its
132810SN/A * contributors may be used to endorse or promote products derived from
142810SN/A * this software without specific prior written permission.
152810SN/A *
162810SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
172810SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
182810SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
192810SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
202810SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
212810SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
222810SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
232810SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
242810SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
252810SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
262810SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
272810SN/A *
282810SN/A * Authors: Erik Hallnor
294458SN/A *          Steve Reinhardt
304458SN/A *          Ron Dreslinski
312810SN/A */
322810SN/A
332810SN/A/**
342810SN/A * @file
352810SN/A * Declares a basic cache interface BaseCache.
362810SN/A */
372810SN/A
382810SN/A#ifndef __BASE_CACHE_HH__
392810SN/A#define __BASE_CACHE_HH__
402810SN/A
417676Snate@binkert.org#include <algorithm>
427676Snate@binkert.org#include <list>
437676Snate@binkert.org#include <string>
442810SN/A#include <vector>
452810SN/A
462825SN/A#include "base/misc.hh"
472810SN/A#include "base/statistics.hh"
482810SN/A#include "base/trace.hh"
496215Snate@binkert.org#include "base/types.hh"
506978SLisa.Hsu@amd.com#include "config/full_system.hh"
515338Sstever@gmail.com#include "mem/cache/mshr_queue.hh"
522810SN/A#include "mem/mem_object.hh"
532810SN/A#include "mem/packet.hh"
548229Snate@binkert.org#include "mem/request.hh"
554626SN/A#include "mem/tport.hh"
565034SN/A#include "params/BaseCache.hh"
572811SN/A#include "sim/eventq.hh"
584626SN/A#include "sim/sim_exit.hh"
592810SN/A
603194SN/Aclass MSHR;
612810SN/A/**
622810SN/A * A basic cache interface. Implements some common functions for speed.
632810SN/A */
642810SN/Aclass BaseCache : public MemObject
652810SN/A{
664628SN/A    /**
674628SN/A     * Indexes to enumerate the MSHR queues.
684628SN/A     */
694628SN/A    enum MSHRQueueIndex {
704628SN/A        MSHRQueue_MSHRs,
714628SN/A        MSHRQueue_WriteBuffer
724628SN/A    };
734628SN/A
744628SN/A    /**
754628SN/A     * Reasons for caches to be blocked.
764628SN/A     */
774628SN/A    enum BlockedCause {
784628SN/A        Blocked_NoMSHRs = MSHRQueue_MSHRs,
794628SN/A        Blocked_NoWBBuffers = MSHRQueue_WriteBuffer,
804628SN/A        Blocked_NoTargets,
814628SN/A        NUM_BLOCKED_CAUSES
824628SN/A    };
834628SN/A
844628SN/A  public:
854628SN/A    /**
864628SN/A     * Reasons for cache to request a bus.
874628SN/A     */
884628SN/A    enum RequestCause {
894628SN/A        Request_MSHR = MSHRQueue_MSHRs,
904628SN/A        Request_WB = MSHRQueue_WriteBuffer,
914628SN/A        Request_PF,
924628SN/A        NUM_REQUEST_CAUSES
934628SN/A    };
944628SN/A
954628SN/A  private:
964628SN/A
974626SN/A    class CachePort : public SimpleTimingPort
982810SN/A    {
992844SN/A      public:
1002810SN/A        BaseCache *cache;
1012810SN/A
1023738SN/A      protected:
1034965SN/A        CachePort(const std::string &_name, BaseCache *_cache,
1046122SSteve.Reinhardt@amd.com                  const std::string &_label);
1054458SN/A
1062810SN/A        virtual void recvStatusChange(Status status);
1072810SN/A
1086227Snate@binkert.org        virtual unsigned deviceBlockSize() const;
1092810SN/A
1104458SN/A        bool recvRetryCommon();
1113013SN/A
1124666SN/A        typedef EventWrapper<Port, &Port::sendRetry>
1134666SN/A            SendRetryEvent;
1144666SN/A
1155314SN/A        const std::string label;
1165314SN/A
1172811SN/A      public:
1184458SN/A        void setOtherPort(CachePort *_otherPort) { otherPort = _otherPort; }
1194458SN/A
1202810SN/A        void setBlocked();
1212810SN/A
1222810SN/A        void clearBlocked();
1232810SN/A
1245314SN/A        bool checkFunctional(PacketPtr pkt);
1253606SN/A
1264458SN/A        CachePort *otherPort;
1274458SN/A
1282810SN/A        bool blocked;
1292810SN/A
1302897SN/A        bool mustSendRetry;
1312897SN/A
1324458SN/A        void requestBus(RequestCause cause, Tick time)
1334458SN/A        {
1344888SN/A            DPRINTF(CachePort, "Asserting bus request for cause %d\n", cause);
1354666SN/A            if (!waitingOnRetry) {
1364666SN/A                schedSendEvent(time);
1374458SN/A            }
1384458SN/A        }
1394458SN/A
1404626SN/A        void respond(PacketPtr pkt, Tick time) {
1414626SN/A            schedSendTiming(pkt, time);
1424626SN/A        }
1432811SN/A    };
1442810SN/A
1453338SN/A  public: //Made public so coherence can get at it.
1463338SN/A    CachePort *cpuSidePort;
1473738SN/A    CachePort *memSidePort;
1483338SN/A
1494626SN/A  protected:
1504626SN/A
1514626SN/A    /** Miss status registers */
1524626SN/A    MSHRQueue mshrQueue;
1534626SN/A
1544626SN/A    /** Write/writeback buffer */
1554626SN/A    MSHRQueue writeBuffer;
1564626SN/A
1574628SN/A    MSHR *allocateBufferInternal(MSHRQueue *mq, Addr addr, int size,
1584628SN/A                                 PacketPtr pkt, Tick time, bool requestBus)
1594628SN/A    {
1604666SN/A        MSHR *mshr = mq->allocate(addr, size, pkt, time, order++);
1614628SN/A
1624628SN/A        if (mq->isFull()) {
1634628SN/A            setBlocked((BlockedCause)mq->index);
1644628SN/A        }
1654628SN/A
1664628SN/A        if (requestBus) {
1674628SN/A            requestMemSideBus((RequestCause)mq->index, time);
1684628SN/A        }
1694628SN/A
1704628SN/A        return mshr;
1714628SN/A    }
1724628SN/A
1737667Ssteve.reinhardt@amd.com    void markInServiceInternal(MSHR *mshr, PacketPtr pkt)
1744628SN/A    {
1754628SN/A        MSHRQueue *mq = mshr->queue;
1764628SN/A        bool wasFull = mq->isFull();
1777667Ssteve.reinhardt@amd.com        mq->markInService(mshr, pkt);
1784628SN/A        if (wasFull && !mq->isFull()) {
1794628SN/A            clearBlocked((BlockedCause)mq->index);
1804628SN/A        }
1814628SN/A    }
1824628SN/A
1834626SN/A    /** Block size of this cache */
1846227Snate@binkert.org    const unsigned blkSize;
1854626SN/A
1864630SN/A    /**
1874630SN/A     * The latency of a hit in this device.
1884630SN/A     */
1894630SN/A    int hitLatency;
1904630SN/A
1914626SN/A    /** The number of targets for each MSHR. */
1924626SN/A    const int numTarget;
1934626SN/A
1946122SSteve.Reinhardt@amd.com    /** Do we forward snoops from mem side port through to cpu side port? */
1956122SSteve.Reinhardt@amd.com    bool forwardSnoops;
1964626SN/A
1978134SAli.Saidi@ARM.com    /** Is this cache a toplevel cache (e.g. L1, I/O cache). If so we should
1988134SAli.Saidi@ARM.com     * never try to forward ownership and similar optimizations to the cpu
1998134SAli.Saidi@ARM.com     * side */
2008134SAli.Saidi@ARM.com    bool isTopLevel;
2018134SAli.Saidi@ARM.com
2022810SN/A    /**
2032810SN/A     * Bit vector of the blocking reasons for the access path.
2042810SN/A     * @sa #BlockedCause
2052810SN/A     */
2062810SN/A    uint8_t blocked;
2072810SN/A
2086122SSteve.Reinhardt@amd.com    /** Increasing order number assigned to each incoming request. */
2096122SSteve.Reinhardt@amd.com    uint64_t order;
2106122SSteve.Reinhardt@amd.com
2112810SN/A    /** Stores time the cache blocked for statistics. */
2122810SN/A    Tick blockedCycle;
2132810SN/A
2144626SN/A    /** Pointer to the MSHR that has no targets. */
2154626SN/A    MSHR *noTargetMSHR;
2162810SN/A
2172810SN/A    /** The number of misses to trigger an exit event. */
2182810SN/A    Counter missCount;
2192810SN/A
2203503SN/A    /** The drain event. */
2213503SN/A    Event *drainEvent;
2223503SN/A
2236122SSteve.Reinhardt@amd.com    /**
2246122SSteve.Reinhardt@amd.com     * The address range to which the cache responds on the CPU side.
2256122SSteve.Reinhardt@amd.com     * Normally this is all possible memory addresses. */
2266122SSteve.Reinhardt@amd.com    Range<Addr> addrRange;
2276122SSteve.Reinhardt@amd.com
2286978SLisa.Hsu@amd.com    /** number of cpus sharing this cache - from config file */
2296978SLisa.Hsu@amd.com    int _numCpus;
2306978SLisa.Hsu@amd.com
2312810SN/A  public:
2326978SLisa.Hsu@amd.com    int numCpus() { return _numCpus; }
2332810SN/A    // Statistics
2342810SN/A    /**
2352810SN/A     * @addtogroup CacheStatistics
2362810SN/A     * @{
2372810SN/A     */
2382810SN/A
2392810SN/A    /** Number of hits per thread for each type of command. @sa Packet::Command */
2405999Snate@binkert.org    Stats::Vector hits[MemCmd::NUM_MEM_CMDS];
2412810SN/A    /** Number of hits for demand accesses. */
2422810SN/A    Stats::Formula demandHits;
2432810SN/A    /** Number of hit for all accesses. */
2442810SN/A    Stats::Formula overallHits;
2452810SN/A
2462810SN/A    /** Number of misses per thread for each type of command. @sa Packet::Command */
2475999Snate@binkert.org    Stats::Vector misses[MemCmd::NUM_MEM_CMDS];
2482810SN/A    /** Number of misses for demand accesses. */
2492810SN/A    Stats::Formula demandMisses;
2502810SN/A    /** Number of misses for all accesses. */
2512810SN/A    Stats::Formula overallMisses;
2522810SN/A
2532810SN/A    /**
2542810SN/A     * Total number of cycles per thread/command spent waiting for a miss.
2552810SN/A     * Used to calculate the average miss latency.
2562810SN/A     */
2575999Snate@binkert.org    Stats::Vector missLatency[MemCmd::NUM_MEM_CMDS];
2582810SN/A    /** Total number of cycles spent waiting for demand misses. */
2592810SN/A    Stats::Formula demandMissLatency;
2602810SN/A    /** Total number of cycles spent waiting for all misses. */
2612810SN/A    Stats::Formula overallMissLatency;
2622810SN/A
2632810SN/A    /** The number of accesses per command and thread. */
2644022SN/A    Stats::Formula accesses[MemCmd::NUM_MEM_CMDS];
2652810SN/A    /** The number of demand accesses. */
2662810SN/A    Stats::Formula demandAccesses;
2672810SN/A    /** The number of overall accesses. */
2682810SN/A    Stats::Formula overallAccesses;
2692810SN/A
2702810SN/A    /** The miss rate per command and thread. */
2714022SN/A    Stats::Formula missRate[MemCmd::NUM_MEM_CMDS];
2722810SN/A    /** The miss rate of all demand accesses. */
2732810SN/A    Stats::Formula demandMissRate;
2742810SN/A    /** The miss rate for all accesses. */
2752810SN/A    Stats::Formula overallMissRate;
2762810SN/A
2772810SN/A    /** The average miss latency per command and thread. */
2784022SN/A    Stats::Formula avgMissLatency[MemCmd::NUM_MEM_CMDS];
2792810SN/A    /** The average miss latency for demand misses. */
2802810SN/A    Stats::Formula demandAvgMissLatency;
2812810SN/A    /** The average miss latency for all misses. */
2822810SN/A    Stats::Formula overallAvgMissLatency;
2832810SN/A
2842810SN/A    /** The total number of cycles blocked for each blocked cause. */
2855999Snate@binkert.org    Stats::Vector blocked_cycles;
2862810SN/A    /** The number of times this cache blocked for each blocked cause. */
2875999Snate@binkert.org    Stats::Vector blocked_causes;
2882810SN/A
2892810SN/A    /** The average number of cycles blocked for each blocked cause. */
2902810SN/A    Stats::Formula avg_blocked;
2912810SN/A
2922810SN/A    /** The number of fast writes (WH64) performed. */
2935999Snate@binkert.org    Stats::Scalar fastWrites;
2942810SN/A
2952810SN/A    /** The number of cache copies performed. */
2965999Snate@binkert.org    Stats::Scalar cacheCopies;
2972810SN/A
2984626SN/A    /** Number of blocks written back per thread. */
2995999Snate@binkert.org    Stats::Vector writebacks;
3004626SN/A
3014626SN/A    /** Number of misses that hit in the MSHRs per command and thread. */
3025999Snate@binkert.org    Stats::Vector mshr_hits[MemCmd::NUM_MEM_CMDS];
3034626SN/A    /** Demand misses that hit in the MSHRs. */
3044626SN/A    Stats::Formula demandMshrHits;
3054626SN/A    /** Total number of misses that hit in the MSHRs. */
3064626SN/A    Stats::Formula overallMshrHits;
3074626SN/A
3084626SN/A    /** Number of misses that miss in the MSHRs, per command and thread. */
3095999Snate@binkert.org    Stats::Vector mshr_misses[MemCmd::NUM_MEM_CMDS];
3104626SN/A    /** Demand misses that miss in the MSHRs. */
3114626SN/A    Stats::Formula demandMshrMisses;
3124626SN/A    /** Total number of misses that miss in the MSHRs. */
3134626SN/A    Stats::Formula overallMshrMisses;
3144626SN/A
3154626SN/A    /** Number of misses that miss in the MSHRs, per command and thread. */
3165999Snate@binkert.org    Stats::Vector mshr_uncacheable[MemCmd::NUM_MEM_CMDS];
3174626SN/A    /** Total number of misses that miss in the MSHRs. */
3184626SN/A    Stats::Formula overallMshrUncacheable;
3194626SN/A
3204626SN/A    /** Total cycle latency of each MSHR miss, per command and thread. */
3215999Snate@binkert.org    Stats::Vector mshr_miss_latency[MemCmd::NUM_MEM_CMDS];
3224626SN/A    /** Total cycle latency of demand MSHR misses. */
3234626SN/A    Stats::Formula demandMshrMissLatency;
3244626SN/A    /** Total cycle latency of overall MSHR misses. */
3254626SN/A    Stats::Formula overallMshrMissLatency;
3264626SN/A
3274626SN/A    /** Total cycle latency of each MSHR miss, per command and thread. */
3285999Snate@binkert.org    Stats::Vector mshr_uncacheable_lat[MemCmd::NUM_MEM_CMDS];
3294626SN/A    /** Total cycle latency of overall MSHR misses. */
3304626SN/A    Stats::Formula overallMshrUncacheableLatency;
3314626SN/A
3327461Snate@binkert.org#if 0
3334626SN/A    /** The total number of MSHR accesses per command and thread. */
3344626SN/A    Stats::Formula mshrAccesses[MemCmd::NUM_MEM_CMDS];
3354626SN/A    /** The total number of demand MSHR accesses. */
3364626SN/A    Stats::Formula demandMshrAccesses;
3374626SN/A    /** The total number of MSHR accesses. */
3384626SN/A    Stats::Formula overallMshrAccesses;
3397461Snate@binkert.org#endif
3404626SN/A
3414626SN/A    /** The miss rate in the MSHRs pre command and thread. */
3424626SN/A    Stats::Formula mshrMissRate[MemCmd::NUM_MEM_CMDS];
3434626SN/A    /** The demand miss rate in the MSHRs. */
3444626SN/A    Stats::Formula demandMshrMissRate;
3454626SN/A    /** The overall miss rate in the MSHRs. */
3464626SN/A    Stats::Formula overallMshrMissRate;
3474626SN/A
3484626SN/A    /** The average latency of an MSHR miss, per command and thread. */
3494626SN/A    Stats::Formula avgMshrMissLatency[MemCmd::NUM_MEM_CMDS];
3504626SN/A    /** The average latency of a demand MSHR miss. */
3514626SN/A    Stats::Formula demandAvgMshrMissLatency;
3524626SN/A    /** The average overall latency of an MSHR miss. */
3534626SN/A    Stats::Formula overallAvgMshrMissLatency;
3544626SN/A
3554626SN/A    /** The average latency of an MSHR miss, per command and thread. */
3564626SN/A    Stats::Formula avgMshrUncacheableLatency[MemCmd::NUM_MEM_CMDS];
3574626SN/A    /** The average overall latency of an MSHR miss. */
3584626SN/A    Stats::Formula overallAvgMshrUncacheableLatency;
3594626SN/A
3604626SN/A    /** The number of times a thread hit its MSHR cap. */
3615999Snate@binkert.org    Stats::Vector mshr_cap_events;
3624626SN/A    /** The number of times software prefetches caused the MSHR to block. */
3635999Snate@binkert.org    Stats::Vector soft_prefetch_mshr_full;
3644626SN/A
3655999Snate@binkert.org    Stats::Scalar mshr_no_allocate_misses;
3664626SN/A
3672810SN/A    /**
3682810SN/A     * @}
3692810SN/A     */
3702810SN/A
3712810SN/A    /**
3722810SN/A     * Register stats for this object.
3732810SN/A     */
3742810SN/A    virtual void regStats();
3752810SN/A
3762810SN/A  public:
3775034SN/A    typedef BaseCacheParams Params;
3785034SN/A    BaseCache(const Params *p);
3795034SN/A    ~BaseCache() {}
3803606SN/A
3812858SN/A    virtual void init();
3822858SN/A
3832810SN/A    /**
3842810SN/A     * Query block size of a cache.
3852810SN/A     * @return  The block size
3862810SN/A     */
3876227Snate@binkert.org    unsigned
3886227Snate@binkert.org    getBlockSize() const
3892810SN/A    {
3902810SN/A        return blkSize;
3912810SN/A    }
3922810SN/A
3934626SN/A
3946666Ssteve.reinhardt@amd.com    Addr blockAlign(Addr addr) const { return (addr & ~(Addr(blkSize - 1))); }
3954626SN/A
3964626SN/A
3976122SSteve.Reinhardt@amd.com    const Range<Addr> &getAddrRange() const { return addrRange; }
3986122SSteve.Reinhardt@amd.com
3994628SN/A    MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool requestBus)
4004628SN/A    {
4014902SN/A        assert(!pkt->req->isUncacheable());
4024628SN/A        return allocateBufferInternal(&mshrQueue,
4034628SN/A                                      blockAlign(pkt->getAddr()), blkSize,
4044628SN/A                                      pkt, time, requestBus);
4054628SN/A    }
4064628SN/A
4074902SN/A    MSHR *allocateWriteBuffer(PacketPtr pkt, Tick time, bool requestBus)
4084628SN/A    {
4094902SN/A        assert(pkt->isWrite() && !pkt->isRead());
4104902SN/A        return allocateBufferInternal(&writeBuffer,
4114902SN/A                                      pkt->getAddr(), pkt->getSize(),
4124628SN/A                                      pkt, time, requestBus);
4134628SN/A    }
4144628SN/A
4154902SN/A    MSHR *allocateUncachedReadBuffer(PacketPtr pkt, Tick time, bool requestBus)
4164902SN/A    {
4174902SN/A        assert(pkt->req->isUncacheable());
4184902SN/A        assert(pkt->isRead());
4194902SN/A        return allocateBufferInternal(&mshrQueue,
4204902SN/A                                      pkt->getAddr(), pkt->getSize(),
4214902SN/A                                      pkt, time, requestBus);
4224902SN/A    }
4234628SN/A
4242810SN/A    /**
4252810SN/A     * Returns true if the cache is blocked for accesses.
4262810SN/A     */
4272810SN/A    bool isBlocked()
4282810SN/A    {
4292810SN/A        return blocked != 0;
4302810SN/A    }
4312810SN/A
4322810SN/A    /**
4332810SN/A     * Marks the access path of the cache as blocked for the given cause. This
4342810SN/A     * also sets the blocked flag in the slave interface.
4352810SN/A     * @param cause The reason for the cache blocking.
4362810SN/A     */
4372810SN/A    void setBlocked(BlockedCause cause)
4382810SN/A    {
4392810SN/A        uint8_t flag = 1 << cause;
4402810SN/A        if (blocked == 0) {
4412810SN/A            blocked_causes[cause]++;
4427823Ssteve.reinhardt@amd.com            blockedCycle = curTick();
4434630SN/A            cpuSidePort->setBlocked();
4442810SN/A        }
4454630SN/A        blocked |= flag;
4464630SN/A        DPRINTF(Cache,"Blocking for cause %d, mask=%d\n", cause, blocked);
4472810SN/A    }
4482810SN/A
4492810SN/A    /**
4502810SN/A     * Marks the cache as unblocked for the given cause. This also clears the
4512810SN/A     * blocked flags in the appropriate interfaces.
4522810SN/A     * @param cause The newly unblocked cause.
4532810SN/A     * @warning Calling this function can cause a blocked request on the bus to
4542810SN/A     * access the cache. The cache must be in a state to handle that request.
4552810SN/A     */
4562810SN/A    void clearBlocked(BlockedCause cause)
4572810SN/A    {
4582810SN/A        uint8_t flag = 1 << cause;
4594630SN/A        blocked &= ~flag;
4604630SN/A        DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked);
4614630SN/A        if (blocked == 0) {
4627823Ssteve.reinhardt@amd.com            blocked_cycles[cause] += curTick() - blockedCycle;
4634630SN/A            cpuSidePort->clearBlocked();
4642810SN/A        }
4652810SN/A    }
4662810SN/A
4672810SN/A    /**
4682810SN/A     * Request the master bus for the given cause and time.
4692810SN/A     * @param cause The reason for the request.
4702810SN/A     * @param time The time to make the request.
4712810SN/A     */
4724458SN/A    void requestMemSideBus(RequestCause cause, Tick time)
4732810SN/A    {
4744458SN/A        memSidePort->requestBus(cause, time);
4752810SN/A    }
4762810SN/A
4772810SN/A    /**
4782810SN/A     * Clear the master bus request for the given cause.
4792810SN/A     * @param cause The request reason to clear.
4802810SN/A     */
4814458SN/A    void deassertMemSideBusRequest(RequestCause cause)
4822810SN/A    {
4835875Ssteve.reinhardt@amd.com        // Obsolete... we no longer signal bus requests explicitly so
4845875Ssteve.reinhardt@amd.com        // we can't deassert them.  Leaving this in as a no-op since
4855875Ssteve.reinhardt@amd.com        // the prefetcher calls it to indicate that it no longer wants
4865875Ssteve.reinhardt@amd.com        // to request a prefetch, and someday that might be
4875875Ssteve.reinhardt@amd.com        // interesting again.
4882811SN/A    }
4893503SN/A
4903503SN/A    virtual unsigned int drain(Event *de);
4913503SN/A
4924626SN/A    virtual bool inCache(Addr addr) = 0;
4934626SN/A
4944626SN/A    virtual bool inMissQueue(Addr addr) = 0;
4954626SN/A
4966978SLisa.Hsu@amd.com    void incMissCount(PacketPtr pkt, int id)
4973503SN/A    {
4986978SLisa.Hsu@amd.com
4996978SLisa.Hsu@amd.com        if (pkt->cmd == MemCmd::Writeback) {
5006978SLisa.Hsu@amd.com            assert(id == -1);
5016978SLisa.Hsu@amd.com            misses[pkt->cmdToIndex()][0]++;
5026978SLisa.Hsu@amd.com            /* same thing for writeback hits as misses - no context id
5036978SLisa.Hsu@amd.com             * available, meanwhile writeback hit/miss stats are not used
5046978SLisa.Hsu@amd.com             * in any aggregate hit/miss calculations, so just lump them all
5056978SLisa.Hsu@amd.com             * in bucket 0 */
5066978SLisa.Hsu@amd.com#if FULL_SYSTEM
5076978SLisa.Hsu@amd.com        } else if (id == -1) {
5086978SLisa.Hsu@amd.com            // Device accesses have id -1
5096978SLisa.Hsu@amd.com            // lump device accesses into their own bucket
5106978SLisa.Hsu@amd.com            misses[pkt->cmdToIndex()][_numCpus]++;
5116978SLisa.Hsu@amd.com#endif
5126978SLisa.Hsu@amd.com        } else {
5136978SLisa.Hsu@amd.com            misses[pkt->cmdToIndex()][id % _numCpus]++;
5146978SLisa.Hsu@amd.com        }
5154626SN/A
5164626SN/A        if (missCount) {
5174626SN/A            --missCount;
5184626SN/A            if (missCount == 0)
5194626SN/A                exitSimLoop("A cache reached the maximum miss count");
5203503SN/A        }
5213503SN/A    }
5226978SLisa.Hsu@amd.com    void incHitCount(PacketPtr pkt, int id)
5236978SLisa.Hsu@amd.com    {
5246978SLisa.Hsu@amd.com
5256978SLisa.Hsu@amd.com        /* Writeback requests don't have a context id associated with
5266978SLisa.Hsu@amd.com         * them, so attributing a hit to a -1 context id is obviously a
5276978SLisa.Hsu@amd.com         * problem.  I've noticed in the stats that hits are split into
5286978SLisa.Hsu@amd.com         * demand and non-demand hits - neither of which include writeback
5296978SLisa.Hsu@amd.com         * hits, so here, I'll just put the writeback hits into bucket 0
5306978SLisa.Hsu@amd.com         * since it won't mess with any other stats -hsul */
5316978SLisa.Hsu@amd.com        if (pkt->cmd == MemCmd::Writeback) {
5326978SLisa.Hsu@amd.com            assert(id == -1);
5336978SLisa.Hsu@amd.com            hits[pkt->cmdToIndex()][0]++;
5346978SLisa.Hsu@amd.com#if FULL_SYSTEM
5356978SLisa.Hsu@amd.com        } else if (id == -1) {
5366978SLisa.Hsu@amd.com            // Device accesses have id -1
5376978SLisa.Hsu@amd.com            // lump device accesses into their own bucket
5386978SLisa.Hsu@amd.com            hits[pkt->cmdToIndex()][_numCpus]++;
5396978SLisa.Hsu@amd.com#endif
5406978SLisa.Hsu@amd.com        } else {
5416978SLisa.Hsu@amd.com            /* the % is necessary in case there are switch cpus */
5426978SLisa.Hsu@amd.com            hits[pkt->cmdToIndex()][id % _numCpus]++;
5436978SLisa.Hsu@amd.com        }
5446978SLisa.Hsu@amd.com    }
5453503SN/A
5462810SN/A};
5472810SN/A
5482810SN/A#endif //__BASE_CACHE_HH__
549