base.hh revision 10319
12810SN/A/*
22810SN/A * Copyright (c) 2012-2013 ARM Limited
32810SN/A * All rights reserved.
42810SN/A *
52810SN/A * The license below extends only to copyright in the software and shall
62810SN/A * not be construed as granting a license to any other intellectual
72810SN/A * property including but not limited to intellectual property relating
82810SN/A * to a hardware implementation of the functionality of the software
92810SN/A * licensed hereunder.  You may use the software subject to the license
102810SN/A * terms below provided that you ensure that this notice is replicated
112810SN/A * unmodified and in its entirety in all distributions of the software,
122810SN/A * modified or unmodified, in source code or in binary form.
132810SN/A *
142810SN/A * Copyright (c) 2003-2005 The Regents of The University of Michigan
152810SN/A * All rights reserved.
162810SN/A *
172810SN/A * Redistribution and use in source and binary forms, with or without
182810SN/A * modification, are permitted provided that the following conditions are
192810SN/A * met: redistributions of source code must retain the above copyright
202810SN/A * notice, this list of conditions and the following disclaimer;
212810SN/A * redistributions in binary form must reproduce the above copyright
222810SN/A * notice, this list of conditions and the following disclaimer in the
232810SN/A * documentation and/or other materials provided with the distribution;
242810SN/A * neither the name of the copyright holders nor the names of its
252810SN/A * contributors may be used to endorse or promote products derived from
262810SN/A * this software without specific prior written permission.
272810SN/A *
282810SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
294458SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
304458SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
312810SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
322810SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
332810SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
342810SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
352810SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
362810SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
372810SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
382810SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
392810SN/A *
402810SN/A * Authors: Erik Hallnor
417676Snate@binkert.org *          Steve Reinhardt
427676Snate@binkert.org *          Ron Dreslinski
437676Snate@binkert.org */
442810SN/A
452810SN/A/**
462825SN/A * @file
472810SN/A * Declares a basic cache interface BaseCache.
482810SN/A */
496215Snate@binkert.org
506978SLisa.Hsu@amd.com#ifndef __BASE_CACHE_HH__
518232Snate@binkert.org#define __BASE_CACHE_HH__
528232Snate@binkert.org
535338Sstever@gmail.com#include <algorithm>
542810SN/A#include <list>
552810SN/A#include <string>
568229Snate@binkert.org#include <vector>
574626SN/A
585034SN/A#include "base/misc.hh"
592811SN/A#include "base/statistics.hh"
604626SN/A#include "base/trace.hh"
612810SN/A#include "base/types.hh"
623194SN/A#include "debug/Cache.hh"
632810SN/A#include "debug/CachePort.hh"
642810SN/A#include "mem/cache/mshr_queue.hh"
652810SN/A#include "mem/mem_object.hh"
662810SN/A#include "mem/packet.hh"
672810SN/A#include "mem/qport.hh"
684628SN/A#include "mem/request.hh"
694628SN/A#include "params/BaseCache.hh"
704628SN/A#include "sim/eventq.hh"
714628SN/A#include "sim/full_system.hh"
724628SN/A#include "sim/sim_exit.hh"
734628SN/A#include "sim/system.hh"
744628SN/A
754628SN/Aclass MSHR;
764628SN/A/**
774628SN/A * A basic cache interface. Implements some common functions for speed.
784628SN/A */
794628SN/Aclass BaseCache : public MemObject
804628SN/A{
814628SN/A    /**
824628SN/A     * Indexes to enumerate the MSHR queues.
834628SN/A     */
844628SN/A    enum MSHRQueueIndex {
854628SN/A        MSHRQueue_MSHRs,
864628SN/A        MSHRQueue_WriteBuffer
874628SN/A    };
884628SN/A
894628SN/A  public:
904628SN/A    /**
914628SN/A     * Reasons for caches to be blocked.
924628SN/A     */
934628SN/A    enum BlockedCause {
944628SN/A        Blocked_NoMSHRs = MSHRQueue_MSHRs,
954628SN/A        Blocked_NoWBBuffers = MSHRQueue_WriteBuffer,
964628SN/A        Blocked_NoTargets,
974628SN/A        NUM_BLOCKED_CAUSES
984628SN/A    };
994626SN/A
1002810SN/A    /**
1012844SN/A     * Reasons for cache to request a bus.
1022810SN/A     */
1032810SN/A    enum RequestCause {
1043738SN/A        Request_MSHR = MSHRQueue_MSHRs,
1054965SN/A        Request_WB = MSHRQueue_WriteBuffer,
1066122SSteve.Reinhardt@amd.com        Request_PF,
1074458SN/A        NUM_REQUEST_CAUSES
1088711Sandreas.hansson@arm.com    };
1092810SN/A
1106227Snate@binkert.org  protected:
1112810SN/A
1124458SN/A    /**
1133013SN/A     * A cache master port is used for the memory-side port of the
1144666SN/A     * cache, and in addition to the basic timing port that only sends
1154666SN/A     * response packets through a transmit list, it also offers the
1164666SN/A     * ability to schedule and send request packets (requests &
1175314SN/A     * writebacks). The send event is scheduled through requestBus,
1185314SN/A     * and the sendDeferredPacket of the timing port is modified to
1192811SN/A     * consider both the transmit list and the requests from the MSHR.
1204458SN/A     */
1214458SN/A    class CacheMasterPort : public QueuedMasterPort
1222810SN/A    {
1232810SN/A
1242810SN/A      public:
1252810SN/A
1265314SN/A        /**
1273606SN/A         * Schedule a send of a request packet (from the MSHR). Note
1284458SN/A         * that we could already have a retry or a transmit list of
1294458SN/A         * responses outstanding.
1302810SN/A         */
1312810SN/A        void requestBus(RequestCause cause, Tick time)
1322897SN/A        {
1332897SN/A            DPRINTF(CachePort, "Asserting bus request for cause %d\n", cause);
1344458SN/A            queue.schedSendEvent(time);
1354458SN/A        }
1364888SN/A
1374666SN/A      protected:
1384666SN/A
1394458SN/A        CacheMasterPort(const std::string &_name, BaseCache *_cache,
1404458SN/A                        MasterPacketQueue &_queue) :
1414458SN/A            QueuedMasterPort(_name, _cache, _queue)
1424626SN/A        { }
1434626SN/A
1444626SN/A        /**
1452811SN/A         * Memory-side port always snoops.
1462810SN/A         *
1473338SN/A         * @return always true
1483338SN/A         */
1493738SN/A        virtual bool isSnooping() const { return true; }
1503338SN/A    };
1514626SN/A
1524626SN/A    /**
1534626SN/A     * A cache slave port is used for the CPU-side port of the cache,
1544626SN/A     * and it is basically a simple timing port that uses a transmit
1554626SN/A     * list for responses to the CPU (or connected master). In
1564626SN/A     * addition, it has the functionality to block the port for
1574626SN/A     * incoming requests. If blocked, the port will issue a retry once
1584626SN/A     * unblocked.
1594628SN/A     */
1604628SN/A    class CacheSlavePort : public QueuedSlavePort
1614628SN/A    {
1624666SN/A
1634628SN/A      public:
1644628SN/A
1654628SN/A        /** Do not accept any new requests. */
1664628SN/A        void setBlocked();
1674628SN/A
1684628SN/A        /** Return to normal operation and accept new requests. */
1694628SN/A        void clearBlocked();
1704628SN/A
1714628SN/A      protected:
1724628SN/A
1734628SN/A        CacheSlavePort(const std::string &_name, BaseCache *_cache,
1744628SN/A                       const std::string &_label);
1757667Ssteve.reinhardt@amd.com
1764628SN/A        /** A normal packet queue used to store responses. */
1774628SN/A        SlavePacketQueue queue;
1784628SN/A
1797667Ssteve.reinhardt@amd.com        bool blocked;
1804628SN/A
1814628SN/A        bool mustSendRetry;
1824628SN/A
1834628SN/A      private:
1844628SN/A
1854626SN/A        EventWrapper<SlavePort, &SlavePort::sendRetry> sendRetryEvent;
1866227Snate@binkert.org
1874626SN/A    };
1884630SN/A
1894630SN/A    CacheSlavePort *cpuSidePort;
1904630SN/A    CacheMasterPort *memSidePort;
1914630SN/A
1924630SN/A  protected:
1934626SN/A
1944626SN/A    /** Miss status registers */
1954626SN/A    MSHRQueue mshrQueue;
1966122SSteve.Reinhardt@amd.com
1976122SSteve.Reinhardt@amd.com    /** Write/writeback buffer */
1984626SN/A    MSHRQueue writeBuffer;
1998134SAli.Saidi@ARM.com
2008134SAli.Saidi@ARM.com    MSHR *allocateBufferInternal(MSHRQueue *mq, Addr addr, int size,
2018134SAli.Saidi@ARM.com                                 PacketPtr pkt, Tick time, bool requestBus)
2028134SAli.Saidi@ARM.com    {
2038134SAli.Saidi@ARM.com        MSHR *mshr = mq->allocate(addr, size, pkt, time, order++);
2042810SN/A
2052810SN/A        if (mq->isFull()) {
2062810SN/A            setBlocked((BlockedCause)mq->index);
2072810SN/A        }
2082810SN/A
2092810SN/A        if (requestBus) {
2106122SSteve.Reinhardt@amd.com            requestMemSideBus((RequestCause)mq->index, time);
2116122SSteve.Reinhardt@amd.com        }
2126122SSteve.Reinhardt@amd.com
2132810SN/A        return mshr;
2142810SN/A    }
2152810SN/A
2164626SN/A    void markInServiceInternal(MSHR *mshr, PacketPtr pkt)
2174626SN/A    {
2182810SN/A        MSHRQueue *mq = mshr->queue;
2192810SN/A        bool wasFull = mq->isFull();
2202810SN/A        mq->markInService(mshr, pkt);
2212810SN/A        if (wasFull && !mq->isFull()) {
2223503SN/A            clearBlocked((BlockedCause)mq->index);
2233503SN/A        }
2243503SN/A    }
2256122SSteve.Reinhardt@amd.com
2266122SSteve.Reinhardt@amd.com    /**
2276122SSteve.Reinhardt@amd.com     * Write back dirty blocks in the cache using functional accesses.
2286122SSteve.Reinhardt@amd.com     */
2296122SSteve.Reinhardt@amd.com    virtual void memWriteback() = 0;
2306978SLisa.Hsu@amd.com    /**
2316978SLisa.Hsu@amd.com     * Invalidates all blocks in the cache.
2326978SLisa.Hsu@amd.com     *
2332810SN/A     * @warn Dirty cache lines will not be written back to
2346978SLisa.Hsu@amd.com     * memory. Make sure to call functionalWriteback() first if you
2352810SN/A     * want the to write them to memory.
2362810SN/A     */
2372810SN/A    virtual void memInvalidate() = 0;
2382810SN/A    /**
2392810SN/A     * Determine if there are any dirty blocks in the cache.
2402810SN/A     *
2412810SN/A     * \return true if at least one block is dirty, false otherwise.
2425999Snate@binkert.org     */
2432810SN/A    virtual bool isDirty() const = 0;
2442810SN/A
2452810SN/A    /** Block size of this cache */
2462810SN/A    const unsigned blkSize;
2472810SN/A
2482810SN/A    /**
2495999Snate@binkert.org     * The latency of a hit in this device.
2502810SN/A     */
2512810SN/A    const Cycles hitLatency;
2522810SN/A
2532810SN/A    /**
2542810SN/A     * The latency of sending reponse to its upper level cache/core on a
2552810SN/A     * linefill. In most contemporary processors, the return path on a cache
2562810SN/A     * miss is much quicker that the hit latency. The responseLatency parameter
2572810SN/A     * tries to capture this latency.
2582810SN/A     */
2595999Snate@binkert.org    const Cycles responseLatency;
2602810SN/A
2612810SN/A    /** The number of targets for each MSHR. */
2622810SN/A    const int numTarget;
2632810SN/A
2642810SN/A    /** Do we forward snoops from mem side port through to cpu side port? */
2652810SN/A    const bool forwardSnoops;
2664022SN/A
2672810SN/A    /** Is this cache a toplevel cache (e.g. L1, I/O cache). If so we should
2682810SN/A     * never try to forward ownership and similar optimizations to the cpu
2692810SN/A     * side */
2702810SN/A    const bool isTopLevel;
2712810SN/A
2722810SN/A    /**
2734022SN/A     * Bit vector of the blocking reasons for the access path.
2742810SN/A     * @sa #BlockedCause
2752810SN/A     */
2762810SN/A    uint8_t blocked;
2772810SN/A
2782810SN/A    /** Increasing order number assigned to each incoming request. */
2792810SN/A    uint64_t order;
2804022SN/A
2812810SN/A    /** Stores time the cache blocked for statistics. */
2822810SN/A    Cycles blockedCycle;
2832810SN/A
2842810SN/A    /** Pointer to the MSHR that has no targets. */
2852810SN/A    MSHR *noTargetMSHR;
2862810SN/A
2875999Snate@binkert.org    /** The number of misses to trigger an exit event. */
2882810SN/A    Counter missCount;
2895999Snate@binkert.org
2902810SN/A    /**
2912810SN/A     * The address range to which the cache responds on the CPU side.
2922810SN/A     * Normally this is all possible memory addresses. */
2932810SN/A    const AddrRangeList addrRanges;
2942810SN/A
2955999Snate@binkert.org  public:
2962810SN/A    /** System we are currently operating in. */
2972810SN/A    System *system;
2985999Snate@binkert.org
2992810SN/A    // Statistics
3004626SN/A    /**
3015999Snate@binkert.org     * @addtogroup CacheStatistics
3024626SN/A     * @{
3034626SN/A     */
3045999Snate@binkert.org
3054626SN/A    /** Number of hits per thread for each type of command. @sa Packet::Command */
3064626SN/A    Stats::Vector hits[MemCmd::NUM_MEM_CMDS];
3074626SN/A    /** Number of hits for demand accesses. */
3084626SN/A    Stats::Formula demandHits;
3094626SN/A    /** Number of hit for all accesses. */
3104626SN/A    Stats::Formula overallHits;
3115999Snate@binkert.org
3124626SN/A    /** Number of misses per thread for each type of command. @sa Packet::Command */
3134626SN/A    Stats::Vector misses[MemCmd::NUM_MEM_CMDS];
3144626SN/A    /** Number of misses for demand accesses. */
3154626SN/A    Stats::Formula demandMisses;
3164626SN/A    /** Number of misses for all accesses. */
3174626SN/A    Stats::Formula overallMisses;
3185999Snate@binkert.org
3194626SN/A    /**
3204626SN/A     * Total number of cycles per thread/command spent waiting for a miss.
3214626SN/A     * Used to calculate the average miss latency.
3224626SN/A     */
3235999Snate@binkert.org    Stats::Vector missLatency[MemCmd::NUM_MEM_CMDS];
3244626SN/A    /** Total number of cycles spent waiting for demand misses. */
3254626SN/A    Stats::Formula demandMissLatency;
3264626SN/A    /** Total number of cycles spent waiting for all misses. */
3274626SN/A    Stats::Formula overallMissLatency;
3284626SN/A
3294626SN/A    /** The number of accesses per command and thread. */
3305999Snate@binkert.org    Stats::Formula accesses[MemCmd::NUM_MEM_CMDS];
3314626SN/A    /** The number of demand accesses. */
3324626SN/A    Stats::Formula demandAccesses;
3334626SN/A    /** The number of overall accesses. */
3347461Snate@binkert.org    Stats::Formula overallAccesses;
3354626SN/A
3364626SN/A    /** The miss rate per command and thread. */
3374626SN/A    Stats::Formula missRate[MemCmd::NUM_MEM_CMDS];
3384626SN/A    /** The miss rate of all demand accesses. */
3394626SN/A    Stats::Formula demandMissRate;
3404626SN/A    /** The miss rate for all accesses. */
3417461Snate@binkert.org    Stats::Formula overallMissRate;
3424626SN/A
3434626SN/A    /** The average miss latency per command and thread. */
3444626SN/A    Stats::Formula avgMissLatency[MemCmd::NUM_MEM_CMDS];
3454626SN/A    /** The average miss latency for demand misses. */
3464626SN/A    Stats::Formula demandAvgMissLatency;
3474626SN/A    /** The average miss latency for all misses. */
3484626SN/A    Stats::Formula overallAvgMissLatency;
3494626SN/A
3504626SN/A    /** The total number of cycles blocked for each blocked cause. */
3514626SN/A    Stats::Vector blocked_cycles;
3524626SN/A    /** The number of times this cache blocked for each blocked cause. */
3534626SN/A    Stats::Vector blocked_causes;
3544626SN/A
3554626SN/A    /** The average number of cycles blocked for each blocked cause. */
3564626SN/A    Stats::Formula avg_blocked;
3574626SN/A
3584626SN/A    /** The number of fast writes (WH64) performed. */
3594626SN/A    Stats::Scalar fastWrites;
3604626SN/A
3614626SN/A    /** The number of cache copies performed. */
3624626SN/A    Stats::Scalar cacheCopies;
3635999Snate@binkert.org
3644626SN/A    /** Number of blocks written back per thread. */
3655999Snate@binkert.org    Stats::Vector writebacks;
3664626SN/A
3675999Snate@binkert.org    /** Number of misses that hit in the MSHRs per command and thread. */
3684626SN/A    Stats::Vector mshr_hits[MemCmd::NUM_MEM_CMDS];
3692810SN/A    /** Demand misses that hit in the MSHRs. */
3702810SN/A    Stats::Formula demandMshrHits;
3712810SN/A    /** Total number of misses that hit in the MSHRs. */
3722810SN/A    Stats::Formula overallMshrHits;
3732810SN/A
3742810SN/A    /** Number of misses that miss in the MSHRs, per command and thread. */
3752810SN/A    Stats::Vector mshr_misses[MemCmd::NUM_MEM_CMDS];
3762810SN/A    /** Demand misses that miss in the MSHRs. */
3772810SN/A    Stats::Formula demandMshrMisses;
3782810SN/A    /** Total number of misses that miss in the MSHRs. */
3795034SN/A    Stats::Formula overallMshrMisses;
3805034SN/A
3815034SN/A    /** Number of misses that miss in the MSHRs, per command and thread. */
3823606SN/A    Stats::Vector mshr_uncacheable[MemCmd::NUM_MEM_CMDS];
3832858SN/A    /** Total number of misses that miss in the MSHRs. */
3842858SN/A    Stats::Formula overallMshrUncacheable;
3852810SN/A
3862810SN/A    /** Total cycle latency of each MSHR miss, per command and thread. */
3872810SN/A    Stats::Vector mshr_miss_latency[MemCmd::NUM_MEM_CMDS];
3882810SN/A    /** Total cycle latency of demand MSHR misses. */
3896227Snate@binkert.org    Stats::Formula demandMshrMissLatency;
3906227Snate@binkert.org    /** Total cycle latency of overall MSHR misses. */
3912810SN/A    Stats::Formula overallMshrMissLatency;
3922810SN/A
3932810SN/A    /** Total cycle latency of each MSHR miss, per command and thread. */
3942810SN/A    Stats::Vector mshr_uncacheable_lat[MemCmd::NUM_MEM_CMDS];
3954626SN/A    /** Total cycle latency of overall MSHR misses. */
3966666Ssteve.reinhardt@amd.com    Stats::Formula overallMshrUncacheableLatency;
3974626SN/A
3984626SN/A#if 0
3996122SSteve.Reinhardt@amd.com    /** The total number of MSHR accesses per command and thread. */
4006122SSteve.Reinhardt@amd.com    Stats::Formula mshrAccesses[MemCmd::NUM_MEM_CMDS];
4014628SN/A    /** The total number of demand MSHR accesses. */
4024628SN/A    Stats::Formula demandMshrAccesses;
4034902SN/A    /** The total number of MSHR accesses. */
4044628SN/A    Stats::Formula overallMshrAccesses;
4054628SN/A#endif
4064628SN/A
4074628SN/A    /** The miss rate in the MSHRs pre command and thread. */
4084628SN/A    Stats::Formula mshrMissRate[MemCmd::NUM_MEM_CMDS];
4094902SN/A    /** The demand miss rate in the MSHRs. */
4104628SN/A    Stats::Formula demandMshrMissRate;
4114902SN/A    /** The overall miss rate in the MSHRs. */
4124902SN/A    Stats::Formula overallMshrMissRate;
4134902SN/A
4144628SN/A    /** The average latency of an MSHR miss, per command and thread. */
4154628SN/A    Stats::Formula avgMshrMissLatency[MemCmd::NUM_MEM_CMDS];
4164628SN/A    /** The average latency of a demand MSHR miss. */
4174902SN/A    Stats::Formula demandAvgMshrMissLatency;
4184902SN/A    /** The average overall latency of an MSHR miss. */
4194902SN/A    Stats::Formula overallAvgMshrMissLatency;
4204902SN/A
4214902SN/A    /** The average latency of an MSHR miss, per command and thread. */
4224902SN/A    Stats::Formula avgMshrUncacheableLatency[MemCmd::NUM_MEM_CMDS];
4234902SN/A    /** The average overall latency of an MSHR miss. */
4244902SN/A    Stats::Formula overallAvgMshrUncacheableLatency;
4254628SN/A
4262810SN/A    /** The number of times a thread hit its MSHR cap. */
4272810SN/A    Stats::Vector mshr_cap_events;
4282810SN/A    /** The number of times software prefetches caused the MSHR to block. */
4292810SN/A    Stats::Vector soft_prefetch_mshr_full;
4302810SN/A
4312810SN/A    Stats::Scalar mshr_no_allocate_misses;
4322810SN/A
4332810SN/A    /**
4342810SN/A     * @}
4352810SN/A     */
4362810SN/A
4372810SN/A    /**
4382810SN/A     * Register stats for this object.
4392810SN/A     */
4402810SN/A    virtual void regStats();
4412810SN/A
4422810SN/A  public:
4432810SN/A    typedef BaseCacheParams Params;
4447823Ssteve.reinhardt@amd.com    BaseCache(const Params *p);
4454630SN/A    ~BaseCache() {}
4462810SN/A
4474630SN/A    virtual void init();
4484630SN/A
4492810SN/A    virtual BaseMasterPort &getMasterPort(const std::string &if_name,
4502810SN/A                                          PortID idx = InvalidPortID);
4512810SN/A    virtual BaseSlavePort &getSlavePort(const std::string &if_name,
4522810SN/A                                        PortID idx = InvalidPortID);
4532810SN/A
4542810SN/A    /**
4552810SN/A     * Query block size of a cache.
4562810SN/A     * @return  The block size
4572810SN/A     */
4582810SN/A    unsigned
4592810SN/A    getBlockSize() const
4602810SN/A    {
4614630SN/A        return blkSize;
4624630SN/A    }
4634630SN/A
4647823Ssteve.reinhardt@amd.com
4654630SN/A    Addr blockAlign(Addr addr) const { return (addr & ~(Addr(blkSize - 1))); }
4662810SN/A
4672810SN/A
4682810SN/A    const AddrRangeList &getAddrRanges() const { return addrRanges; }
4692810SN/A
4702810SN/A    MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool requestBus)
4712810SN/A    {
4722810SN/A        assert(!pkt->req->isUncacheable());
4732810SN/A        return allocateBufferInternal(&mshrQueue,
4744458SN/A                                      blockAlign(pkt->getAddr()), blkSize,
4752810SN/A                                      pkt, time, requestBus);
4764458SN/A    }
4772810SN/A
4782810SN/A    MSHR *allocateWriteBuffer(PacketPtr pkt, Tick time, bool requestBus)
4792810SN/A    {
4802810SN/A        assert(pkt->isWrite() && !pkt->isRead());
4812810SN/A        return allocateBufferInternal(&writeBuffer,
4822810SN/A                                      pkt->getAddr(), pkt->getSize(),
4834458SN/A                                      pkt, time, requestBus);
4842810SN/A    }
4855875Ssteve.reinhardt@amd.com
4865875Ssteve.reinhardt@amd.com    MSHR *allocateUncachedReadBuffer(PacketPtr pkt, Tick time, bool requestBus)
4875875Ssteve.reinhardt@amd.com    {
4885875Ssteve.reinhardt@amd.com        assert(pkt->req->isUncacheable());
4895875Ssteve.reinhardt@amd.com        assert(pkt->isRead());
4902811SN/A        return allocateBufferInternal(&mshrQueue,
4913503SN/A                                      pkt->getAddr(), pkt->getSize(),
4923503SN/A                                      pkt, time, requestBus);
4933503SN/A    }
4944626SN/A
4954626SN/A    /**
4964626SN/A     * Returns true if the cache is blocked for accesses.
4974626SN/A     */
4986978SLisa.Hsu@amd.com    bool isBlocked() const
4993503SN/A    {
5006978SLisa.Hsu@amd.com        return blocked != 0;
5016978SLisa.Hsu@amd.com    }
5026978SLisa.Hsu@amd.com
5036978SLisa.Hsu@amd.com    /**
5046978SLisa.Hsu@amd.com     * Marks the access path of the cache as blocked for the given cause. This
5056978SLisa.Hsu@amd.com     * also sets the blocked flag in the slave interface.
5066978SLisa.Hsu@amd.com     * @param cause The reason for the cache blocking.
5076978SLisa.Hsu@amd.com     */
5086978SLisa.Hsu@amd.com    void setBlocked(BlockedCause cause)
5096978SLisa.Hsu@amd.com    {
5106978SLisa.Hsu@amd.com        uint8_t flag = 1 << cause;
5116978SLisa.Hsu@amd.com        if (blocked == 0) {
5126978SLisa.Hsu@amd.com            blocked_causes[cause]++;
5136978SLisa.Hsu@amd.com            blockedCycle = curCycle();
5146978SLisa.Hsu@amd.com            cpuSidePort->setBlocked();
5156978SLisa.Hsu@amd.com        }
5166978SLisa.Hsu@amd.com        blocked |= flag;
5174626SN/A        DPRINTF(Cache,"Blocking for cause %d, mask=%d\n", cause, blocked);
5184626SN/A    }
5194626SN/A
5204626SN/A    /**
5214626SN/A     * Marks the cache as unblocked for the given cause. This also clears the
5223503SN/A     * blocked flags in the appropriate interfaces.
5233503SN/A     * @param cause The newly unblocked cause.
5246978SLisa.Hsu@amd.com     * @warning Calling this function can cause a blocked request on the bus to
5256978SLisa.Hsu@amd.com     * access the cache. The cache must be in a state to handle that request.
5266978SLisa.Hsu@amd.com     */
5276978SLisa.Hsu@amd.com    void clearBlocked(BlockedCause cause)
5286978SLisa.Hsu@amd.com    {
5296978SLisa.Hsu@amd.com        uint8_t flag = 1 << cause;
5306978SLisa.Hsu@amd.com        blocked &= ~flag;
5316978SLisa.Hsu@amd.com        DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked);
5326978SLisa.Hsu@amd.com        if (blocked == 0) {
5336978SLisa.Hsu@amd.com            blocked_cycles[cause] += curCycle() - blockedCycle;
5346978SLisa.Hsu@amd.com            cpuSidePort->clearBlocked();
5356978SLisa.Hsu@amd.com        }
5366978SLisa.Hsu@amd.com    }
5376978SLisa.Hsu@amd.com
5386978SLisa.Hsu@amd.com    /**
5396978SLisa.Hsu@amd.com     * Request the master bus for the given cause and time.
5406978SLisa.Hsu@amd.com     * @param cause The reason for the request.
5416978SLisa.Hsu@amd.com     * @param time The time to make the request.
5426978SLisa.Hsu@amd.com     */
5436978SLisa.Hsu@amd.com    void requestMemSideBus(RequestCause cause, Tick time)
5446978SLisa.Hsu@amd.com    {
5456978SLisa.Hsu@amd.com        memSidePort->requestBus(cause, time);
5466978SLisa.Hsu@amd.com    }
5473503SN/A
5482810SN/A    /**
5492810SN/A     * Clear the master bus request for the given cause.
5502810SN/A     * @param cause The request reason to clear.
551     */
552    void deassertMemSideBusRequest(RequestCause cause)
553    {
554        // Obsolete... we no longer signal bus requests explicitly so
555        // we can't deassert them.  Leaving this in as a no-op since
556        // the prefetcher calls it to indicate that it no longer wants
557        // to request a prefetch, and someday that might be
558        // interesting again.
559    }
560
561    virtual unsigned int drain(DrainManager *dm);
562
563    virtual bool inCache(Addr addr, bool is_secure) const = 0;
564
565    virtual bool inMissQueue(Addr addr, bool is_secure) const = 0;
566
567    void incMissCount(PacketPtr pkt)
568    {
569        assert(pkt->req->masterId() < system->maxMasters());
570        misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
571        pkt->req->incAccessDepth();
572        if (missCount) {
573            --missCount;
574            if (missCount == 0)
575                exitSimLoop("A cache reached the maximum miss count");
576        }
577    }
578    void incHitCount(PacketPtr pkt)
579    {
580        assert(pkt->req->masterId() < system->maxMasters());
581        hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
582
583    }
584
585};
586
587#endif //__BASE_CACHE_HH__
588