base.cc revision 3606
12137SN/A/*
25268Sksewell@umich.edu * Copyright (c) 2003-2005 The Regents of The University of Michigan
35254Sksewell@umich.edu * All rights reserved.
45254Sksewell@umich.edu *
52137SN/A * Redistribution and use in source and binary forms, with or without
65254Sksewell@umich.edu * modification, are permitted provided that the following conditions are
75254Sksewell@umich.edu * met: redistributions of source code must retain the above copyright
85254Sksewell@umich.edu * notice, this list of conditions and the following disclaimer;
95254Sksewell@umich.edu * redistributions in binary form must reproduce the above copyright
105254Sksewell@umich.edu * notice, this list of conditions and the following disclaimer in the
115254Sksewell@umich.edu * documentation and/or other materials provided with the distribution;
125254Sksewell@umich.edu * neither the name of the copyright holders nor the names of its
135254Sksewell@umich.edu * contributors may be used to endorse or promote products derived from
145254Sksewell@umich.edu * this software without specific prior written permission.
155254Sksewell@umich.edu *
162137SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
175254Sksewell@umich.edu * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
185254Sksewell@umich.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
195254Sksewell@umich.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
205254Sksewell@umich.edu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
215254Sksewell@umich.edu * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
225254Sksewell@umich.edu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
235254Sksewell@umich.edu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
245254Sksewell@umich.edu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
255254Sksewell@umich.edu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
265254Sksewell@umich.edu * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
275254Sksewell@umich.edu *
282665Ssaidi@eecs.umich.edu * Authors: Erik Hallnor
295268Sksewell@umich.edu */
305268Sksewell@umich.edu
312137SN/A/**
322137SN/A * @file
332597SN/A * Definition of BaseCache functions.
342597SN/A */
352137SN/A
362137SN/A#include "cpu/base.hh"
372137SN/A#include "cpu/smt.hh"
382680Sktlim@umich.edu#include "mem/cache/base_cache.hh"
392137SN/A#include "mem/cache/miss/mshr.hh"
402137SN/A
412137SN/Ausing namespace std;
424661Sksewell@umich.edu
432137SN/ABaseCache::CachePort::CachePort(const std::string &_name, BaseCache *_cache,
444661Sksewell@umich.edu                                bool _isCpuSide)
452137SN/A    : Port(_name, _cache), cache(_cache), isCpuSide(_isCpuSide)
462137SN/A{
472137SN/A    blocked = false;
482137SN/A    waitingOnRetry = false;
492137SN/A    //Start ports at null if more than one is created we should panic
502137SN/A    //cpuSidePort = NULL;
513114Sgblack@eecs.umich.edu    //memSidePort = NULL;
522680Sktlim@umich.edu}
532137SN/A
545958Sgblack@eecs.umich.eduvoid
552137SN/ABaseCache::CachePort::recvStatusChange(Port::Status status)
562137SN/A{
574661Sksewell@umich.edu    cache->recvStatusChange(status, isCpuSide);
582137SN/A}
592137SN/A
602137SN/Avoid
612137SN/ABaseCache::CachePort::getDeviceAddressRanges(AddrRangeList &resp,
622680Sktlim@umich.edu                                       AddrRangeList &snoop)
632137SN/A{
642137SN/A    cache->getAddressRanges(resp, snoop, isCpuSide);
652137SN/A}
662484SN/A
672137SN/Aint
682137SN/ABaseCache::CachePort::deviceBlockSize()
692137SN/A{
703114Sgblack@eecs.umich.edu    return cache->getBlockSize();
712680Sktlim@umich.edu}
722137SN/A
735958Sgblack@eecs.umich.edubool
745958Sgblack@eecs.umich.eduBaseCache::CachePort::recvTiming(PacketPtr pkt)
752137SN/A{
762137SN/A    if (isCpuSide
772137SN/A        && !pkt->req->isUncacheable()
782137SN/A        && pkt->isInvalidate()
795958Sgblack@eecs.umich.edu        && !pkt->isRead() && !pkt->isWrite()) {
802137SN/A        //Upgrade or Invalidate
812137SN/A        //Look into what happens if two slave caches on bus
822680Sktlim@umich.edu        DPRINTF(Cache, "%s %x ?\n", pkt->cmdString(), pkt->getAddr());
832137SN/A
842137SN/A        assert(!(pkt->flags & SATISFIED));
852137SN/A        pkt->flags |= SATISFIED;
862137SN/A        //Invalidates/Upgrades need no response if they get the bus
872484SN/A        return true;
882137SN/A    }
892137SN/A
902137SN/A    if (pkt->isRequest() && blocked)
912137SN/A    {
922137SN/A        DPRINTF(Cache,"Scheduling a retry while blocked\n");
932137SN/A        mustSendRetry = true;
942137SN/A        return false;
952484SN/A    }
962137SN/A    return cache->doTimingAccess(pkt, this, isCpuSide);
973114Sgblack@eecs.umich.edu}
982680Sktlim@umich.edu
992137SN/ATick
1005958Sgblack@eecs.umich.eduBaseCache::CachePort::recvAtomic(PacketPtr pkt)
1015958Sgblack@eecs.umich.edu{
1022137SN/A    return cache->doAtomicAccess(pkt, isCpuSide);
1032137SN/A}
1042137SN/A
1052137SN/Abool
1065958Sgblack@eecs.umich.eduBaseCache::CachePort::checkFunctional(PacketPtr pkt)
1072137SN/A{
1082680Sktlim@umich.edu    //Check storage here first
1092484SN/A    list<PacketPtr>::iterator i = drainList.begin();
1102137SN/A    list<PacketPtr>::iterator iend = drainList.end();
1112137SN/A    bool notDone = true;
1122137SN/A    while (i != iend && notDone) {
1132137SN/A        PacketPtr target = *i;
1142137SN/A        // If the target contains data, and it overlaps the
1152484SN/A        // probed request, need to update data
1162137SN/A        if (target->intersect(pkt)) {
1172137SN/A            notDone = fixPacket(pkt, target);
1182137SN/A        }
1192137SN/A        i++;
1202137SN/A    }
1212137SN/A    //Also check the response not yet ready to be on the list
1222137SN/A    std::list<std::pair<Tick,PacketPtr> >::iterator j = transmitList.begin();
1232137SN/A    std::list<std::pair<Tick,PacketPtr> >::iterator jend = transmitList.end();
1242484SN/A
1252137SN/A    while (j != jend && notDone) {
1262137SN/A        PacketPtr target = j->second;
1272137SN/A        // If the target contains data, and it overlaps the
1282137SN/A        // probed request, need to update data
1292553SN/A        if (target->intersect(pkt))
1302137SN/A            notDone = fixPacket(pkt, target);
1312484SN/A        j++;
1322484SN/A    }
1332137SN/A    return notDone;
1342137SN/A}
1352484SN/A
1362137SN/Avoid
1372484SN/ABaseCache::CachePort::recvFunctional(PacketPtr pkt)
1382137SN/A{
1392553SN/A    bool notDone = checkFunctional(pkt);
1402484SN/A    if (notDone)
1415748SSteve.Reinhardt@amd.com        cache->doFunctionalAccess(pkt, isCpuSide);
1422484SN/A}
1432137SN/A
1442484SN/Avoid
1452484SN/ABaseCache::CachePort::checkAndSendFunctional(PacketPtr pkt)
1462137SN/A{
1472137SN/A    bool notDone = checkFunctional(pkt);
1482484SN/A    if (notDone)
1492484SN/A        sendFunctional(pkt);
1502484SN/A}
1512484SN/A
1522484SN/Avoid
1532484SN/ABaseCache::CachePort::recvRetry()
1542484SN/A{
1552484SN/A    PacketPtr pkt;
1562484SN/A    assert(waitingOnRetry);
1572137SN/A    if (!drainList.empty()) {
1582484SN/A        DPRINTF(CachePort, "%s attempting to send a retry for response\n", name());
1592484SN/A        //We have some responses to drain first
1602137SN/A        if (sendTiming(drainList.front())) {
1614661Sksewell@umich.edu            DPRINTF(CachePort, "%s sucessful in sending a retry for response\n", name());
1622484SN/A            drainList.pop_front();
1635513SMichael.Adler@intel.com            if (!drainList.empty() ||
1642484SN/A                !isCpuSide && cache->doMasterRequest() ||
1652137SN/A                isCpuSide && cache->doSlaveRequest()) {
1664661Sksewell@umich.edu
1672484SN/A                DPRINTF(CachePort, "%s has more responses/requests\n", name());
1682484SN/A                BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this, false);
1695748SSteve.Reinhardt@amd.com                reqCpu->schedule(curTick + 1);
1702491SN/A            }
1712484SN/A            waitingOnRetry = false;
1722484SN/A        }
1732491SN/A        // Check if we're done draining once this list is empty
1742491SN/A        if (drainList.empty())
1752137SN/A            cache->checkDrain();
1762484SN/A    }
1772484SN/A    else if (!isCpuSide)
1785867Sksewell@umich.edu    {
1792686Sksewell@umich.edu        DPRINTF(CachePort, "%s attempting to send a retry for MSHR\n", name());
1802484SN/A        if (!cache->doMasterRequest()) {
1812484SN/A            //This can happen if I am the owner of a block and see an upgrade
1822484SN/A            //while the block was in my WB Buffers.  I just remove the
1832484SN/A            //wb and de-assert the masterRequest
1845513SMichael.Adler@intel.com            waitingOnRetry = false;
1852137SN/A            return;
1862484SN/A        }
1872484SN/A        pkt = cache->getPacket();
1882484SN/A        MSHR* mshr = (MSHR*) pkt->senderState;
1892484SN/A        //Copy the packet, it may be modified/destroyed elsewhere
1902484SN/A        PacketPtr copyPkt = new Packet(*pkt);
1912495SN/A        copyPkt->dataStatic<uint8_t>(pkt->getPtr<uint8_t>());
1922495SN/A        mshr->pkt = copyPkt;
1932484SN/A
1942484SN/A        bool success = sendTiming(pkt);
1952495SN/A        DPRINTF(Cache, "Address %x was %s in sending the timing request\n",
1962484SN/A                pkt->getAddr(), success ? "succesful" : "unsuccesful");
1972495SN/A
1982484SN/A        waitingOnRetry = !success;
1994661Sksewell@umich.edu        if (waitingOnRetry) {
2004661Sksewell@umich.edu            DPRINTF(CachePort, "%s now waiting on a retry\n", name());
2014661Sksewell@umich.edu        }
2022484SN/A
2032484SN/A        cache->sendResult(pkt, mshr, success);
2042484SN/A
2052484SN/A        if (success && cache->doMasterRequest())
2062484SN/A        {
2072484SN/A            DPRINTF(CachePort, "%s has more requests\n", name());
2082484SN/A            //Still more to issue, rerequest in 1 cycle
2095513SMichael.Adler@intel.com            BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this, false);
2102484SN/A            reqCpu->schedule(curTick + 1);
2112484SN/A        }
2122484SN/A    }
2132484SN/A    else
2142553SN/A    {
2152495SN/A        assert(cache->doSlaveRequest());
2162686Sksewell@umich.edu        //pkt = cache->getCoherencePacket();
2172686Sksewell@umich.edu        //We save the packet, no reordering on CSHRS
2184661Sksewell@umich.edu        pkt = cache->getCoherencePacket();
2194661Sksewell@umich.edu        MSHR* cshr = (MSHR*)pkt->senderState;
2202484SN/A        bool success = sendTiming(pkt);
2212484SN/A        cache->sendCoherenceResult(pkt, cshr, success);
2222484SN/A        waitingOnRetry = !success;
2232484SN/A        if (success && cache->doSlaveRequest())
2242484SN/A        {
2252484SN/A            DPRINTF(CachePort, "%s has more requests\n", name());
2262484SN/A            //Still more to issue, rerequest in 1 cycle
2272484SN/A            BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this, false);
2282484SN/A            reqCpu->schedule(curTick + 1);
2292484SN/A        }
2302553SN/A    }
2312484SN/A    if (waitingOnRetry) DPRINTF(CachePort, "%s STILL Waiting on retry\n", name());
2322553SN/A    else DPRINTF(CachePort, "%s no longer waiting on retry\n", name());
2332484SN/A    return;
2342484SN/A}
2352484SN/Avoid
2362484SN/ABaseCache::CachePort::setBlocked()
2372484SN/A{
2382484SN/A    assert(!blocked);
2392484SN/A    DPRINTF(Cache, "Cache Blocking\n");
2402484SN/A    blocked = true;
2412484SN/A    //Clear the retry flag
2422484SN/A    mustSendRetry = false;
2432484SN/A}
2444661Sksewell@umich.edu
2452484SN/Avoid
2462492SN/ABaseCache::CachePort::clearBlocked()
2472491SN/A{
2482491SN/A    assert(blocked);
2492495SN/A    DPRINTF(Cache, "Cache Unblocking\n");
2502484SN/A    blocked = false;
2512484SN/A    if (mustSendRetry)
2522484SN/A    {
2532484SN/A        DPRINTF(Cache, "Cache Sending Retry\n");
2542484SN/A        mustSendRetry = false;
2552484SN/A        sendRetry();
2562484SN/A    }
2572484SN/A}
2582484SN/A
2592484SN/ABaseCache::CacheEvent::CacheEvent(CachePort *_cachePort, bool _newResponse)
2602484SN/A    : Event(&mainEventQueue, CPU_Tick_Pri), cachePort(_cachePort),
2612484SN/A      newResponse(_newResponse)
2622484SN/A{
2632484SN/A    if (!newResponse)
2642484SN/A        this->setFlags(AutoDelete);
2652484SN/A    pkt = NULL;
2662484SN/A}
2672484SN/A
2682686Sksewell@umich.eduvoid
2692484SN/ABaseCache::CacheEvent::process()
2702553SN/A{
2712484SN/A    if (!newResponse)
2722484SN/A    {
2732484SN/A        if (cachePort->waitingOnRetry) return;
2742484SN/A       //We have some responses to drain first
2752484SN/A        if (!cachePort->drainList.empty()) {
2762484SN/A            DPRINTF(CachePort, "%s trying to drain a response\n", cachePort->name());
2774661Sksewell@umich.edu            if (cachePort->sendTiming(cachePort->drainList.front())) {
2782484SN/A                DPRINTF(CachePort, "%s drains a response succesfully\n", cachePort->name());
2792484SN/A                cachePort->drainList.pop_front();
2802484SN/A                if (!cachePort->drainList.empty() ||
2812484SN/A                    !cachePort->isCpuSide && cachePort->cache->doMasterRequest() ||
2822484SN/A                    cachePort->isCpuSide && cachePort->cache->doSlaveRequest()) {
2832484SN/A
2842484SN/A                    DPRINTF(CachePort, "%s still has outstanding bus reqs\n", cachePort->name());
2852484SN/A                    this->schedule(curTick + 1);
2862484SN/A                }
2872484SN/A            }
2882484SN/A            else {
2892484SN/A                cachePort->waitingOnRetry = true;
2902484SN/A                DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
2915877Shsul@eecs.umich.edu            }
2922484SN/A        }
2932484SN/A        else if (!cachePort->isCpuSide)
2942484SN/A        {            //MSHR
2952484SN/A            DPRINTF(CachePort, "%s trying to send a MSHR request\n", cachePort->name());
2962484SN/A            if (!cachePort->cache->doMasterRequest()) {
2972484SN/A                //This can happen if I am the owner of a block and see an upgrade
2982484SN/A                //while the block was in my WB Buffers.  I just remove the
2992484SN/A                //wb and de-assert the masterRequest
3002484SN/A                return;
3012484SN/A            }
3022484SN/A
3032484SN/A            pkt = cachePort->cache->getPacket();
3042484SN/A            MSHR* mshr = (MSHR*) pkt->senderState;
3052484SN/A            //Copy the packet, it may be modified/destroyed elsewhere
3062137SN/A            PacketPtr copyPkt = new Packet(*pkt);
3072484SN/A            copyPkt->dataStatic<uint8_t>(pkt->getPtr<uint8_t>());
3082484SN/A            mshr->pkt = copyPkt;
3092484SN/A
3102484SN/A            bool success = cachePort->sendTiming(pkt);
3112484SN/A            DPRINTF(Cache, "Address %x was %s in sending the timing request\n",
3122484SN/A                    pkt->getAddr(), success ? "succesful" : "unsuccesful");
3132484SN/A
3142484SN/A            cachePort->waitingOnRetry = !success;
3152484SN/A            if (cachePort->waitingOnRetry) {
3162484SN/A                DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
3172484SN/A            }
3184661Sksewell@umich.edu
3194661Sksewell@umich.edu            cachePort->cache->sendResult(pkt, mshr, success);
3202484SN/A            if (success && cachePort->cache->doMasterRequest())
3212484SN/A            {
3222484SN/A                DPRINTF(CachePort, "%s still more MSHR requests to send\n",
3234661Sksewell@umich.edu                        cachePort->name());
3242484SN/A                //Still more to issue, rerequest in 1 cycle
3252484SN/A                pkt = NULL;
3262484SN/A                this->schedule(curTick+1);
3275513SMichael.Adler@intel.com            }
3282484SN/A        }
3292484SN/A        else
3302484SN/A        {
3312484SN/A            //CSHR
3322484SN/A            assert(cachePort->cache->doSlaveRequest());
3332484SN/A            pkt = cachePort->cache->getCoherencePacket();
3342484SN/A            MSHR* cshr = (MSHR*) pkt->senderState;
3352484SN/A            bool success = cachePort->sendTiming(pkt);
3362484SN/A            cachePort->cache->sendCoherenceResult(pkt, cshr, success);
3372484SN/A            cachePort->waitingOnRetry = !success;
3382553SN/A            if (cachePort->waitingOnRetry)
3392553SN/A                DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
3402484SN/A            if (success && cachePort->cache->doSlaveRequest())
3412484SN/A            {
3422484SN/A                DPRINTF(CachePort, "%s still more CSHR requests to send\n",
3432484SN/A                        cachePort->name());
3442686Sksewell@umich.edu                //Still more to issue, rerequest in 1 cycle
3452484SN/A                pkt = NULL;
3462484SN/A                this->schedule(curTick+1);
3472484SN/A            }
3482484SN/A        }
3492484SN/A        return;
3502484SN/A    }
3512484SN/A    //Else it's a response Response
3522484SN/A    assert(cachePort->transmitList.size());
3532484SN/A    assert(cachePort->transmitList.front().first <= curTick);
3542484SN/A    pkt = cachePort->transmitList.front().second;
3552484SN/A    cachePort->transmitList.pop_front();
3562484SN/A    if (!cachePort->transmitList.empty()) {
3572484SN/A        Tick time = cachePort->transmitList.front().first;
3582484SN/A        schedule(time <= curTick ? curTick+1 : time);
3592484SN/A    }
3602484SN/A
3612484SN/A    if (pkt->flags & NACKED_LINE)
3622484SN/A        pkt->result = Packet::Nacked;
3632484SN/A    else
3642484SN/A        pkt->result = Packet::Success;
3652484SN/A    pkt->makeTimingResponse();
3662484SN/A    DPRINTF(CachePort, "%s attempting to send a response\n", cachePort->name());
3672484SN/A    if (!cachePort->drainList.empty() || cachePort->waitingOnRetry) {
3682484SN/A        //Already have a list, just append
3692484SN/A        cachePort->drainList.push_back(pkt);
3702495SN/A        DPRINTF(CachePort, "%s appending response onto drain list\n", cachePort->name());
3712484SN/A    }
3722484SN/A    else if (!cachePort->sendTiming(pkt)) {
3732484SN/A        //It failed, save it to list of drain events
3742484SN/A        DPRINTF(CachePort, "%s now waiting for a retry\n", cachePort->name());
3752484SN/A        cachePort->drainList.push_back(pkt);
3762484SN/A        cachePort->waitingOnRetry = true;
3772484SN/A    }
3782484SN/A
3792484SN/A    // Check if we're done draining once this list is empty
3802484SN/A    if (cachePort->drainList.empty() && cachePort->transmitList.empty())
3812484SN/A        cachePort->cache->checkDrain();
3822484SN/A}
3832484SN/A
3842484SN/Aconst char *
3852484SN/ABaseCache::CacheEvent::description()
3862484SN/A{
3872484SN/A    return "timing event\n";
3882484SN/A}
3892484SN/A
3902484SN/APort*
3912484SN/ABaseCache::getPort(const std::string &if_name, int idx)
3922484SN/A{
3932484SN/A    if (if_name == "")
3942484SN/A    {
3952484SN/A        if(cpuSidePort == NULL) {
3962484SN/A            cpuSidePort = new CachePort(name() + "-cpu_side_port", this, true);
3972484SN/A            sendEvent = new CacheEvent(cpuSidePort, true);
3982484SN/A        }
3992484SN/A        return cpuSidePort;
4002484SN/A    }
4012484SN/A    else if (if_name == "functional")
4022484SN/A    {
4032137SN/A        return new CachePort(name() + "-cpu_side_port", this, true);
4042484SN/A    }
4052484SN/A    else if (if_name == "cpu_side")
4064661Sksewell@umich.edu    {
4072137SN/A        if(cpuSidePort == NULL) {
4082137SN/A            cpuSidePort = new CachePort(name() + "-cpu_side_port", this, true);
4094661Sksewell@umich.edu            sendEvent = new CacheEvent(cpuSidePort, true);
4105154Sgblack@eecs.umich.edu        }
4115154Sgblack@eecs.umich.edu        return cpuSidePort;
4125154Sgblack@eecs.umich.edu    }
4132137SN/A    else if (if_name == "mem_side")
4144661Sksewell@umich.edu    {
4154661Sksewell@umich.edu        if (memSidePort != NULL)
4164661Sksewell@umich.edu            panic("Already have a mem side for this cache\n");
4174661Sksewell@umich.edu        memSidePort = new CachePort(name() + "-mem_side_port", this, false);
4184661Sksewell@umich.edu        memSendEvent = new CacheEvent(memSidePort, true);
4194661Sksewell@umich.edu        return memSidePort;
4204661Sksewell@umich.edu    }
4212137SN/A    else panic("Port name %s unrecognized\n", if_name);
4222137SN/A}
4232137SN/A
4242137SN/Avoid
4252484SN/ABaseCache::init()
4262484SN/A{
4272484SN/A    if (!cpuSidePort || !memSidePort)
4282484SN/A        panic("Cache not hooked up on both sides\n");
4292137SN/A    cpuSidePort->sendStatusChange(Port::RangeChange);
4302484SN/A}
4312484SN/A
4322137SN/Avoid
4334661Sksewell@umich.eduBaseCache::regStats()
4344661Sksewell@umich.edu{
4354661Sksewell@umich.edu    Request temp_req((Addr) NULL, 4, 0);
4364661Sksewell@umich.edu    Packet::Command temp_cmd = Packet::ReadReq;
4374661Sksewell@umich.edu    Packet temp_pkt(&temp_req, temp_cmd, 0);  //@todo FIx command strings so this isn't neccessary
438    temp_pkt.allocate(); //Temp allocate, all need data
439
440    using namespace Stats;
441
442    // Hit statistics
443    for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
444        Packet::Command cmd = (Packet::Command)access_idx;
445        const string &cstr = temp_pkt.cmdIdxToString(cmd);
446
447        hits[access_idx]
448            .init(maxThreadsPerCPU)
449            .name(name() + "." + cstr + "_hits")
450            .desc("number of " + cstr + " hits")
451            .flags(total | nozero | nonan)
452            ;
453    }
454
455    demandHits
456        .name(name() + ".demand_hits")
457        .desc("number of demand (read+write) hits")
458        .flags(total)
459        ;
460    demandHits = hits[Packet::ReadReq] + hits[Packet::WriteReq];
461
462    overallHits
463        .name(name() + ".overall_hits")
464        .desc("number of overall hits")
465        .flags(total)
466        ;
467    overallHits = demandHits + hits[Packet::SoftPFReq] + hits[Packet::HardPFReq]
468        + hits[Packet::Writeback];
469
470    // Miss statistics
471    for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
472        Packet::Command cmd = (Packet::Command)access_idx;
473        const string &cstr = temp_pkt.cmdIdxToString(cmd);
474
475        misses[access_idx]
476            .init(maxThreadsPerCPU)
477            .name(name() + "." + cstr + "_misses")
478            .desc("number of " + cstr + " misses")
479            .flags(total | nozero | nonan)
480            ;
481    }
482
483    demandMisses
484        .name(name() + ".demand_misses")
485        .desc("number of demand (read+write) misses")
486        .flags(total)
487        ;
488    demandMisses = misses[Packet::ReadReq] + misses[Packet::WriteReq];
489
490    overallMisses
491        .name(name() + ".overall_misses")
492        .desc("number of overall misses")
493        .flags(total)
494        ;
495    overallMisses = demandMisses + misses[Packet::SoftPFReq] +
496        misses[Packet::HardPFReq] + misses[Packet::Writeback];
497
498    // Miss latency statistics
499    for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
500        Packet::Command cmd = (Packet::Command)access_idx;
501        const string &cstr = temp_pkt.cmdIdxToString(cmd);
502
503        missLatency[access_idx]
504            .init(maxThreadsPerCPU)
505            .name(name() + "." + cstr + "_miss_latency")
506            .desc("number of " + cstr + " miss cycles")
507            .flags(total | nozero | nonan)
508            ;
509    }
510
511    demandMissLatency
512        .name(name() + ".demand_miss_latency")
513        .desc("number of demand (read+write) miss cycles")
514        .flags(total)
515        ;
516    demandMissLatency = missLatency[Packet::ReadReq] + missLatency[Packet::WriteReq];
517
518    overallMissLatency
519        .name(name() + ".overall_miss_latency")
520        .desc("number of overall miss cycles")
521        .flags(total)
522        ;
523    overallMissLatency = demandMissLatency + missLatency[Packet::SoftPFReq] +
524        missLatency[Packet::HardPFReq];
525
526    // access formulas
527    for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
528        Packet::Command cmd = (Packet::Command)access_idx;
529        const string &cstr = temp_pkt.cmdIdxToString(cmd);
530
531        accesses[access_idx]
532            .name(name() + "." + cstr + "_accesses")
533            .desc("number of " + cstr + " accesses(hits+misses)")
534            .flags(total | nozero | nonan)
535            ;
536
537        accesses[access_idx] = hits[access_idx] + misses[access_idx];
538    }
539
540    demandAccesses
541        .name(name() + ".demand_accesses")
542        .desc("number of demand (read+write) accesses")
543        .flags(total)
544        ;
545    demandAccesses = demandHits + demandMisses;
546
547    overallAccesses
548        .name(name() + ".overall_accesses")
549        .desc("number of overall (read+write) accesses")
550        .flags(total)
551        ;
552    overallAccesses = overallHits + overallMisses;
553
554    // miss rate formulas
555    for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
556        Packet::Command cmd = (Packet::Command)access_idx;
557        const string &cstr = temp_pkt.cmdIdxToString(cmd);
558
559        missRate[access_idx]
560            .name(name() + "." + cstr + "_miss_rate")
561            .desc("miss rate for " + cstr + " accesses")
562            .flags(total | nozero | nonan)
563            ;
564
565        missRate[access_idx] = misses[access_idx] / accesses[access_idx];
566    }
567
568    demandMissRate
569        .name(name() + ".demand_miss_rate")
570        .desc("miss rate for demand accesses")
571        .flags(total)
572        ;
573    demandMissRate = demandMisses / demandAccesses;
574
575    overallMissRate
576        .name(name() + ".overall_miss_rate")
577        .desc("miss rate for overall accesses")
578        .flags(total)
579        ;
580    overallMissRate = overallMisses / overallAccesses;
581
582    // miss latency formulas
583    for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
584        Packet::Command cmd = (Packet::Command)access_idx;
585        const string &cstr = temp_pkt.cmdIdxToString(cmd);
586
587        avgMissLatency[access_idx]
588            .name(name() + "." + cstr + "_avg_miss_latency")
589            .desc("average " + cstr + " miss latency")
590            .flags(total | nozero | nonan)
591            ;
592
593        avgMissLatency[access_idx] =
594            missLatency[access_idx] / misses[access_idx];
595    }
596
597    demandAvgMissLatency
598        .name(name() + ".demand_avg_miss_latency")
599        .desc("average overall miss latency")
600        .flags(total)
601        ;
602    demandAvgMissLatency = demandMissLatency / demandMisses;
603
604    overallAvgMissLatency
605        .name(name() + ".overall_avg_miss_latency")
606        .desc("average overall miss latency")
607        .flags(total)
608        ;
609    overallAvgMissLatency = overallMissLatency / overallMisses;
610
611    blocked_cycles.init(NUM_BLOCKED_CAUSES);
612    blocked_cycles
613        .name(name() + ".blocked_cycles")
614        .desc("number of cycles access was blocked")
615        .subname(Blocked_NoMSHRs, "no_mshrs")
616        .subname(Blocked_NoTargets, "no_targets")
617        ;
618
619
620    blocked_causes.init(NUM_BLOCKED_CAUSES);
621    blocked_causes
622        .name(name() + ".blocked")
623        .desc("number of cycles access was blocked")
624        .subname(Blocked_NoMSHRs, "no_mshrs")
625        .subname(Blocked_NoTargets, "no_targets")
626        ;
627
628    avg_blocked
629        .name(name() + ".avg_blocked_cycles")
630        .desc("average number of cycles each access was blocked")
631        .subname(Blocked_NoMSHRs, "no_mshrs")
632        .subname(Blocked_NoTargets, "no_targets")
633        ;
634
635    avg_blocked = blocked_cycles / blocked_causes;
636
637    fastWrites
638        .name(name() + ".fast_writes")
639        .desc("number of fast writes performed")
640        ;
641
642    cacheCopies
643        .name(name() + ".cache_copies")
644        .desc("number of cache copies performed")
645        ;
646
647}
648
649unsigned int
650BaseCache::drain(Event *de)
651{
652    // Set status
653    if (!canDrain()) {
654        drainEvent = de;
655
656        changeState(SimObject::Draining);
657        return 1;
658    }
659
660    changeState(SimObject::Drained);
661    return 0;
662}
663