base.cc revision 13945
12810SN/A/* 29614Srene.dejong@arm.com * Copyright (c) 2012-2013, 2018-2019 ARM Limited 38856Sandreas.hansson@arm.com * All rights reserved. 48856Sandreas.hansson@arm.com * 58856Sandreas.hansson@arm.com * The license below extends only to copyright in the software and shall 68856Sandreas.hansson@arm.com * not be construed as granting a license to any other intellectual 78856Sandreas.hansson@arm.com * property including but not limited to intellectual property relating 88856Sandreas.hansson@arm.com * to a hardware implementation of the functionality of the software 98856Sandreas.hansson@arm.com * licensed hereunder. You may use the software subject to the license 108856Sandreas.hansson@arm.com * terms below provided that you ensure that this notice is replicated 118856Sandreas.hansson@arm.com * unmodified and in its entirety in all distributions of the software, 128856Sandreas.hansson@arm.com * modified or unmodified, in source code or in binary form. 138856Sandreas.hansson@arm.com * 142810SN/A * Copyright (c) 2003-2005 The Regents of The University of Michigan 152810SN/A * All rights reserved. 162810SN/A * 172810SN/A * Redistribution and use in source and binary forms, with or without 182810SN/A * modification, are permitted provided that the following conditions are 192810SN/A * met: redistributions of source code must retain the above copyright 202810SN/A * notice, this list of conditions and the following disclaimer; 212810SN/A * redistributions in binary form must reproduce the above copyright 222810SN/A * notice, this list of conditions and the following disclaimer in the 232810SN/A * documentation and/or other materials provided with the distribution; 242810SN/A * neither the name of the copyright holders nor the names of its 252810SN/A * contributors may be used to endorse or promote products derived from 262810SN/A * this software without specific prior written permission. 272810SN/A * 282810SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 292810SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 302810SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 312810SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 322810SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 332810SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 342810SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 352810SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 362810SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 372810SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 382810SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 392810SN/A * 402810SN/A * Authors: Erik Hallnor 412810SN/A * Nikos Nikoleris 422810SN/A */ 432810SN/A 442810SN/A/** 452810SN/A * @file 462810SN/A * Definition of BaseCache functions. 472810SN/A */ 488232Snate@binkert.org 499152Satgutier@umich.edu#include "mem/cache/base.hh" 509795Sandreas.hansson@arm.com 519795Sandreas.hansson@arm.com#include "base/compiler.hh" 5210263Satgutier@umich.edu#include "base/logging.hh" 535338Sstever@gmail.com#include "debug/Cache.hh" 549795Sandreas.hansson@arm.com#include "debug/CachePort.hh" 555338Sstever@gmail.com#include "debug/CacheRepl.hh" 568786Sgblack@eecs.umich.edu#include "debug/CacheVerbose.hh" 572810SN/A#include "mem/cache/compressors/base.hh" 582810SN/A#include "mem/cache/mshr.hh" 592810SN/A#include "mem/cache/prefetch/base.hh" 608856Sandreas.hansson@arm.com#include "mem/cache/queue_entry.hh" 618856Sandreas.hansson@arm.com#include "params/BaseCache.hh" 628856Sandreas.hansson@arm.com#include "params/WriteAllocator.hh" 638922Swilliam.wang@arm.com#include "sim/core.hh" 648914Sandreas.hansson@arm.com 658856Sandreas.hansson@arm.comclass BaseMasterPort; 668856Sandreas.hansson@arm.comclass BaseSlavePort; 674475SN/A 685034SN/Ausing namespace std; 695034SN/A 705314SN/ABaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name, 715314SN/A BaseCache *_cache, 724628SN/A const std::string &_label) 739814Sandreas.hansson@arm.com : QueuedSlavePort(_name, _cache, queue), 749263Smrinmoy.ghosh@arm.com queue(*_cache, *this, true, _label), 759263Smrinmoy.ghosh@arm.com blocked(false), mustSendRetry(false), 765034SN/A sendRetryEvent([this]{ processSendRetry(); }, _name) 776122SSteve.Reinhardt@amd.com{ 788134SAli.Saidi@ARM.com} 794626SN/A 804626SN/ABaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size) 815034SN/A : ClockedObject(p), 828883SAli.Saidi@ARM.com cpuSidePort (p->name + ".cpu_side", this, "CpuSidePort"), 838833Sdam.sunwoo@arm.com memSidePort(p->name + ".mem_side", this, "MemSidePort"), 844458SN/A mshrQueue("MSHRs", p->mshrs, 0, p->demand_mshr_reserve), // see below 852810SN/A writeBuffer("write buffer", p->write_buffers, p->mshrs), // see below 862810SN/A tags(p->tags), 873013SN/A compressor(p->compressor), 888856Sandreas.hansson@arm.com prefetcher(p->prefetcher), 892810SN/A writeAllocator(p->write_allocator), 903013SN/A writebackClean(p->writeback_clean), 918856Sandreas.hansson@arm.com tempBlockWriteback(nullptr), 922810SN/A writebackTempBlockAtomicEvent([this]{ writebackTempBlockAtomic(); }, 939614Srene.dejong@arm.com name(), false, 949614Srene.dejong@arm.com EventBase::Delayed_Writeback_Pri), 959614Srene.dejong@arm.com blkSize(blk_size), 969614Srene.dejong@arm.com lookupLatency(p->tag_latency), 979614Srene.dejong@arm.com dataLatency(p->data_latency), 989614Srene.dejong@arm.com forwardLatency(p->tag_latency), 999614Srene.dejong@arm.com fillLatency(p->data_latency), 1002810SN/A responseLatency(p->response_latency), 1012810SN/A sequentialAccess(p->sequential_access), 1022810SN/A numTarget(p->tgts_per_mshr), 1038856Sandreas.hansson@arm.com forwardSnoops(true), 1042810SN/A clusivity(p->clusivity), 1053013SN/A isReadOnly(p->is_read_only), 1068856Sandreas.hansson@arm.com blocked(0), 1073013SN/A order(0), 1088856Sandreas.hansson@arm.com noTargetMSHR(nullptr), 1094666SN/A missCount(p->max_miss_count), 1108922Swilliam.wang@arm.com addrRanges(p->addr_ranges.begin(), p->addr_ranges.end()), 1112897SN/A system(p->system) 1122810SN/A{ 1132810SN/A // the MSHR queue has no reserve entries as we check the MSHR 11410344Sandreas.hansson@arm.com // queue on every single allocation, whereas the write queue has 11510344Sandreas.hansson@arm.com // as many reserve entries as we have MSHRs, since every MSHR may 11610344Sandreas.hansson@arm.com // eventually require a writeback, and we do not check the write 11710344Sandreas.hansson@arm.com // buffer before committing to an MSHR 11810344Sandreas.hansson@arm.com 11910344Sandreas.hansson@arm.com // forward snoops is overridden in init() once we can query 12010344Sandreas.hansson@arm.com // whether the connected master is actually snooping or not 12110344Sandreas.hansson@arm.com 12210344Sandreas.hansson@arm.com tempBlock = new TempCacheBlk(blkSize); 1232844SN/A 1242810SN/A tags->tagsInit(); 1252858SN/A if (prefetcher) 1262858SN/A prefetcher->setCache(this); 1278856Sandreas.hansson@arm.com} 1288922Swilliam.wang@arm.com 1298711Sandreas.hansson@arm.comBaseCache::~BaseCache() 1302858SN/A{ 1312858SN/A delete tempBlock; 1329294Sandreas.hansson@arm.com} 1339294Sandreas.hansson@arm.com 1348922Swilliam.wang@arm.comvoid 1358922Swilliam.wang@arm.comBaseCache::CacheSlavePort::setBlocked() 1368922Swilliam.wang@arm.com{ 1378922Swilliam.wang@arm.com assert(!blocked); 1388922Swilliam.wang@arm.com DPRINTF(CachePort, "Port is blocking new requests\n"); 1398922Swilliam.wang@arm.com blocked = true; 1408922Swilliam.wang@arm.com // if we already scheduled a retry in this cycle, but it has not yet 1418922Swilliam.wang@arm.com // happened, cancel it 1429294Sandreas.hansson@arm.com if (sendRetryEvent.scheduled()) { 1439294Sandreas.hansson@arm.com owner.deschedule(sendRetryEvent); 1448922Swilliam.wang@arm.com DPRINTF(CachePort, "Port descheduled retry\n"); 1458922Swilliam.wang@arm.com mustSendRetry = true; 1468922Swilliam.wang@arm.com } 1478922Swilliam.wang@arm.com} 1488922Swilliam.wang@arm.com 1498922Swilliam.wang@arm.comvoid 1508922Swilliam.wang@arm.comBaseCache::CacheSlavePort::clearBlocked() 1514628SN/A{ 1522858SN/A assert(blocked); 1532810SN/A DPRINTF(CachePort, "Port is accepting new requests\n"); 1542810SN/A blocked = false; 1552810SN/A if (mustSendRetry) { 1562810SN/A // @TODO: need to find a better time (next cycle?) 1572810SN/A owner.schedule(sendRetryEvent, curTick() + 1); 1584022SN/A } 1594022SN/A} 1604022SN/A 1612810SN/Avoid 1622810SN/ABaseCache::CacheSlavePort::processSendRetry() 1638833Sdam.sunwoo@arm.com{ 1642810SN/A DPRINTF(CachePort, "Port is sending retry\n"); 1652810SN/A 1662810SN/A // reset the flag and call retry 1672810SN/A mustSendRetry = false; 1688833Sdam.sunwoo@arm.com sendRetryReq(); 1698833Sdam.sunwoo@arm.com} 1708833Sdam.sunwoo@arm.com 1712810SN/AAddr 1722810SN/ABaseCache::regenerateBlkAddr(CacheBlk* blk) 1734871SN/A{ 1744871SN/A if (blk != tempBlock) { 1754871SN/A return tags->regenerateBlkAddr(blk); 1764871SN/A } else { 1774871SN/A return tempBlock->getAddr(); 1784871SN/A } 1794871SN/A} 1804871SN/A 1814871SN/Avoid 1824871SN/ABaseCache::init() 1832810SN/A{ 1842810SN/A if (!cpuSidePort.isConnected() || !memSidePort.isConnected()) 1852810SN/A fatal("Cache ports on %s are not connected\n", name()); 1868833Sdam.sunwoo@arm.com cpuSidePort.sendRangeChange(); 1872810SN/A forwardSnoops = cpuSidePort.isSnooping(); 1884871SN/A} 1898833Sdam.sunwoo@arm.com 1908833Sdam.sunwoo@arm.comPort & 1918833Sdam.sunwoo@arm.comBaseCache::getPort(const std::string &if_name, PortID idx) 1922810SN/A{ 1932810SN/A if (if_name == "mem_side") { 1942810SN/A return memSidePort; 1952810SN/A } else if (if_name == "cpu_side") { 1968833Sdam.sunwoo@arm.com return cpuSidePort; 1972810SN/A } else { 1984871SN/A return ClockedObject::getPort(if_name, idx); 1998833Sdam.sunwoo@arm.com } 2008833Sdam.sunwoo@arm.com} 2018833Sdam.sunwoo@arm.com 2022810SN/Abool 2032810SN/ABaseCache::inRange(Addr addr) const 2044022SN/A{ 2054022SN/A for (const auto& r : addrRanges) { 2064022SN/A if (r.contains(addr)) { 2072810SN/A return true; 2082810SN/A } 2098833Sdam.sunwoo@arm.com } 2102810SN/A return false; 2112810SN/A} 2122810SN/A 2132810SN/Avoid 2148833Sdam.sunwoo@arm.comBaseCache::handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time) 2158833Sdam.sunwoo@arm.com{ 2168833Sdam.sunwoo@arm.com if (pkt->needsResponse()) { 2172810SN/A // These delays should have been consumed by now 2182810SN/A assert(pkt->headerDelay == 0); 2192810SN/A assert(pkt->payloadDelay == 0); 2202810SN/A 2212810SN/A pkt->makeTimingResponse(); 2228833Sdam.sunwoo@arm.com 2232810SN/A // In this case we are considering request_time that takes 2244871SN/A // into account the delay of the xbar, if any, and just 2258833Sdam.sunwoo@arm.com // lat, neglecting responseLatency, modelling hit latency 2268833Sdam.sunwoo@arm.com // just as the value of lat overriden by access(), which calls 2278833Sdam.sunwoo@arm.com // the calculateAccessLatency() function. 2282810SN/A cpuSidePort.schedTimingResp(pkt, request_time); 2292810SN/A } else { 2302810SN/A DPRINTF(Cache, "%s satisfied %s, no response needed\n", __func__, 2312810SN/A pkt->print()); 2328833Sdam.sunwoo@arm.com 2332810SN/A // queue the packet for deletion, as the sending cache is 2344871SN/A // still relying on it; if the block is found in access(), 2358833Sdam.sunwoo@arm.com // CleanEvict and Writeback messages will be deleted 2368833Sdam.sunwoo@arm.com // here as well 2378833Sdam.sunwoo@arm.com pendingDelete.reset(pkt); 2382810SN/A } 2392810SN/A} 2404022SN/A 2414022SN/Avoid 2424022SN/ABaseCache::handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk, 2432810SN/A Tick forward_time, Tick request_time) 2442810SN/A{ 2458833Sdam.sunwoo@arm.com if (writeAllocator && 2462810SN/A pkt && pkt->isWrite() && !pkt->req->isUncacheable()) { 2472810SN/A writeAllocator->updateMode(pkt->getAddr(), pkt->getSize(), 2482810SN/A pkt->getBlockAddr(blkSize)); 2492810SN/A } 2508833Sdam.sunwoo@arm.com 2518833Sdam.sunwoo@arm.com if (mshr) { 2528833Sdam.sunwoo@arm.com /// MSHR hit 2532810SN/A /// @note writebacks will be checked in getNextMSHR() 2542810SN/A /// for any conflicting requests to the same block 2552810SN/A 2562810SN/A //@todo remove hw_pf here 2572810SN/A 2588833Sdam.sunwoo@arm.com // Coalesce unless it was a software prefetch (see above). 2592810SN/A if (pkt) { 2604871SN/A assert(!pkt->isWriteback()); 2618833Sdam.sunwoo@arm.com // CleanEvicts corresponding to blocks which have 2628833Sdam.sunwoo@arm.com // outstanding requests in MSHRs are simply sunk here 2638833Sdam.sunwoo@arm.com if (pkt->cmd == MemCmd::CleanEvict) { 2642810SN/A pendingDelete.reset(pkt); 2652810SN/A } else if (pkt->cmd == MemCmd::WriteClean) { 2662810SN/A // A WriteClean should never coalesce with any 2672810SN/A // outstanding cache maintenance requests. 2688833Sdam.sunwoo@arm.com 2692810SN/A // We use forward_time here because there is an 2704871SN/A // uncached memory write, forwarded to WriteBuffer. 2718833Sdam.sunwoo@arm.com allocateWriteBuffer(pkt, forward_time); 2728833Sdam.sunwoo@arm.com } else { 2738833Sdam.sunwoo@arm.com DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__, 2742810SN/A pkt->print()); 2752810SN/A 2764022SN/A assert(pkt->req->masterId() < system->maxMasters()); 2774022SN/A mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++; 2784022SN/A 2792810SN/A // We use forward_time here because it is the same 2802810SN/A // considering new targets. We have multiple 2812810SN/A // requests for the same address here. It 2822810SN/A // specifies the latency to allocate an internal 2832810SN/A // buffer and to schedule an event to the queued 2842810SN/A // port and also takes into account the additional 2858833Sdam.sunwoo@arm.com // delay of the xbar. 2862810SN/A mshr->allocateTarget(pkt, forward_time, order++, 2878833Sdam.sunwoo@arm.com allocOnFill(pkt->cmd)); 2888833Sdam.sunwoo@arm.com if (mshr->getNumTargets() == numTarget) { 2898833Sdam.sunwoo@arm.com noTargetMSHR = mshr; 2902810SN/A setBlocked(Blocked_NoTargets); 2912810SN/A // need to be careful with this... if this mshr isn't 2922810SN/A // ready yet (i.e. time > curTick()), we don't want to 2932810SN/A // move it ahead of mshrs that are ready 2942810SN/A // mshrQueue.moveToFront(mshr); 2958833Sdam.sunwoo@arm.com } 2962810SN/A } 2972810SN/A } 2988833Sdam.sunwoo@arm.com } else { 2998833Sdam.sunwoo@arm.com // no MSHR 3008833Sdam.sunwoo@arm.com assert(pkt->req->masterId() < system->maxMasters()); 3012810SN/A mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 3022810SN/A 3032810SN/A if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean) { 3042810SN/A // We use forward_time here because there is an 3058833Sdam.sunwoo@arm.com // writeback or writeclean, forwarded to WriteBuffer. 3062810SN/A allocateWriteBuffer(pkt, forward_time); 3072810SN/A } else { 3088833Sdam.sunwoo@arm.com if (blk && blk->isValid()) { 3098833Sdam.sunwoo@arm.com // If we have a write miss to a valid block, we 3108833Sdam.sunwoo@arm.com // need to mark the block non-readable. Otherwise 3112810SN/A // if we allow reads while there's an outstanding 3122810SN/A // write miss, the read could return stale data 3134022SN/A // out of the cache block... a more aggressive 3144022SN/A // system could detect the overlap (if any) and 3154022SN/A // forward data out of the MSHRs, but we don't do 3162810SN/A // that yet. Note that we do need to leave the 3172810SN/A // block valid so that it stays in the cache, in 3182810SN/A // case we get an upgrade response (and hence no 3192810SN/A // new data) when the write miss completes. 3202810SN/A // As long as CPUs do proper store/load forwarding 3212810SN/A // internally, and have a sufficiently weak memory 3228833Sdam.sunwoo@arm.com // model, this is probably unnecessary, but at some 3232810SN/A // point it must have seemed like we needed it... 3248833Sdam.sunwoo@arm.com assert((pkt->needsWritable() && !blk->isWritable()) || 3258833Sdam.sunwoo@arm.com pkt->req->isCacheMaintenance()); 3268833Sdam.sunwoo@arm.com blk->status &= ~BlkReadable; 3272810SN/A } 3282810SN/A // Here we are using forward_time, modelling the latency of 3292810SN/A // a miss (outbound) just as forwardLatency, neglecting the 3302810SN/A // lookupLatency component. 3312810SN/A allocateMissBuffer(pkt, forward_time); 3328833Sdam.sunwoo@arm.com } 3332810SN/A } 3342810SN/A} 3358833Sdam.sunwoo@arm.com 3368833Sdam.sunwoo@arm.comvoid 3378833Sdam.sunwoo@arm.comBaseCache::recvTimingReq(PacketPtr pkt) 3382810SN/A{ 3392810SN/A // anything that is merely forwarded pays for the forward latency and 3402810SN/A // the delay provided by the crossbar 3412810SN/A Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 3428833Sdam.sunwoo@arm.com 3432810SN/A Cycles lat; 3442810SN/A CacheBlk *blk = nullptr; 3458833Sdam.sunwoo@arm.com bool satisfied = false; 3468833Sdam.sunwoo@arm.com { 3478833Sdam.sunwoo@arm.com PacketList writebacks; 3482810SN/A // Note that lat is passed by reference here. The function 3492810SN/A // access() will set the lat value. 3504022SN/A satisfied = access(pkt, blk, lat, writebacks); 3514022SN/A 3524022SN/A // After the evicted blocks are selected, they must be forwarded 3532810SN/A // to the write buffer to ensure they logically precede anything 3542810SN/A // happening below 3552810SN/A doWritebacks(writebacks, clockEdge(lat + forwardLatency)); 3562810SN/A } 3572810SN/A 3582810SN/A // Here we charge the headerDelay that takes into account the latencies 3592810SN/A // of the bus, if the packet comes from it. 3602810SN/A // The latency charged is just the value set by the access() function. 3618833Sdam.sunwoo@arm.com // In case of a hit we are neglecting response latency. 3628833Sdam.sunwoo@arm.com // In case of a miss we are neglecting forward latency. 3638833Sdam.sunwoo@arm.com Tick request_time = clockEdge(lat); 3648833Sdam.sunwoo@arm.com // Here we reset the timing of the packet. 3652810SN/A pkt->headerDelay = pkt->payloadDelay = 0; 3662810SN/A 3672810SN/A if (satisfied) { 3682810SN/A // notify before anything else as later handleTimingReqHit might turn 3692810SN/A // the packet in a response 3708833Sdam.sunwoo@arm.com ppHit->notify(pkt); 3712810SN/A 3722810SN/A if (prefetcher && blk && blk->wasPrefetched()) { 3738833Sdam.sunwoo@arm.com blk->status &= ~BlkHWPrefetched; 3748833Sdam.sunwoo@arm.com } 3758833Sdam.sunwoo@arm.com 3762810SN/A handleTimingReqHit(pkt, blk, request_time); 3772810SN/A } else { 3782810SN/A handleTimingReqMiss(pkt, blk, forward_time, request_time); 3792810SN/A 3808833Sdam.sunwoo@arm.com ppMiss->notify(pkt); 3812810SN/A } 3822810SN/A 3838833Sdam.sunwoo@arm.com if (prefetcher) { 3848833Sdam.sunwoo@arm.com // track time of availability of next prefetch, if any 3858833Sdam.sunwoo@arm.com Tick next_pf_time = prefetcher->nextPrefetchReadyTime(); 3862810SN/A if (next_pf_time != MaxTick) { 3872810SN/A schedMemSideSendEvent(next_pf_time); 3882810SN/A } 3892810SN/A } 3902810SN/A} 3912810SN/A 3922810SN/Avoid 3932810SN/ABaseCache::handleUncacheableWriteResp(PacketPtr pkt) 3942810SN/A{ 3952810SN/A Tick completion_time = clockEdge(responseLatency) + 3962810SN/A pkt->headerDelay + pkt->payloadDelay; 3972810SN/A 3982810SN/A // Reset the bus additional time as it is now accounted for 3992810SN/A pkt->headerDelay = pkt->payloadDelay = 0; 4002810SN/A 4012810SN/A cpuSidePort.schedTimingResp(pkt, completion_time); 4022810SN/A} 4032810SN/A 4042810SN/Avoid 4052810SN/ABaseCache::recvTimingResp(PacketPtr pkt) 4062810SN/A{ 4072810SN/A assert(pkt->isResponse()); 4082810SN/A 4092810SN/A // all header delay should be paid for by the crossbar, unless 4102810SN/A // this is a prefetch response from above 4112810SN/A panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp, 4122810SN/A "%s saw a non-zero packet delay\n", name()); 4132810SN/A 4142810SN/A const bool is_error = pkt->isError(); 4152810SN/A 4162810SN/A if (is_error) { 4172810SN/A DPRINTF(Cache, "%s: Cache received %s with error\n", __func__, 4182810SN/A pkt->print()); 4192810SN/A } 4202810SN/A 4212810SN/A DPRINTF(Cache, "%s: Handling response %s\n", __func__, 4222826SN/A pkt->print()); 4234626SN/A 4248833Sdam.sunwoo@arm.com // if this is a write, we should be looking at an uncacheable 4254626SN/A // write 4264626SN/A if (pkt->isWrite()) { 4278833Sdam.sunwoo@arm.com assert(pkt->req->isUncacheable()); 4284626SN/A handleUncacheableWriteResp(pkt); 4298833Sdam.sunwoo@arm.com return; 4308833Sdam.sunwoo@arm.com } 4318833Sdam.sunwoo@arm.com 4324626SN/A // we have dealt with any (uncacheable) writes above, from here on 4334626SN/A // we know we are dealing with an MSHR due to a miss or a prefetch 4344626SN/A MSHR *mshr = dynamic_cast<MSHR*>(pkt->popSenderState()); 4354626SN/A assert(mshr); 4364626SN/A 4374626SN/A if (mshr == noTargetMSHR) { 4384626SN/A // we always clear at least one target 4394626SN/A clearBlocked(Blocked_NoTargets); 4408833Sdam.sunwoo@arm.com noTargetMSHR = nullptr; 4414626SN/A } 4424626SN/A 4434626SN/A // Initial target is used just for stats 4444626SN/A QueueEntry::Target *initial_tgt = mshr->getTarget(); 4458833Sdam.sunwoo@arm.com int stats_cmd_idx = initial_tgt->pkt->cmdToIndex(); 4468833Sdam.sunwoo@arm.com Tick miss_latency = curTick() - initial_tgt->recvTime; 4478833Sdam.sunwoo@arm.com 4484626SN/A if (pkt->req->isUncacheable()) { 4494626SN/A assert(pkt->req->masterId() < system->maxMasters()); 4504626SN/A mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] += 4514626SN/A miss_latency; 4524626SN/A } else { 4538833Sdam.sunwoo@arm.com assert(pkt->req->masterId() < system->maxMasters()); 4544626SN/A mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] += 4554871SN/A miss_latency; 4568833Sdam.sunwoo@arm.com } 4578833Sdam.sunwoo@arm.com 4588833Sdam.sunwoo@arm.com PacketList writebacks; 4594626SN/A 4604626SN/A bool is_fill = !mshr->isForward && 4614626SN/A (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp || 4624626SN/A mshr->wasWholeLineWrite); 4638833Sdam.sunwoo@arm.com 4644626SN/A // make sure that if the mshr was due to a whole line write then 4654871SN/A // the response is an invalidation 4668833Sdam.sunwoo@arm.com assert(!mshr->wasWholeLineWrite || pkt->isInvalidate()); 4678833Sdam.sunwoo@arm.com 4688833Sdam.sunwoo@arm.com CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 4694626SN/A 4704626SN/A if (is_fill && !is_error) { 4714626SN/A DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n", 4724626SN/A pkt->getAddr()); 4734626SN/A 4744626SN/A const bool allocate = (writeAllocator && mshr->wasWholeLineWrite) ? 4754626SN/A writeAllocator->allocate() : mshr->allocOnFill(); 4768833Sdam.sunwoo@arm.com blk = handleFill(pkt, blk, writebacks, allocate); 4774626SN/A assert(blk != nullptr); 4784626SN/A ppFill->notify(pkt); 4794626SN/A } 4804626SN/A 4818833Sdam.sunwoo@arm.com if (blk && blk->isValid() && pkt->isClean() && !pkt->isInvalidate()) { 4828833Sdam.sunwoo@arm.com // The block was marked not readable while there was a pending 4838833Sdam.sunwoo@arm.com // cache maintenance operation, restore its flag. 4844626SN/A blk->status |= BlkReadable; 4854626SN/A 4864626SN/A // This was a cache clean operation (without invalidate) 4874626SN/A // and we have a copy of the block already. Since there 4884626SN/A // is no invalidation, we can promote targets that don't 4898833Sdam.sunwoo@arm.com // require a writable copy 4904626SN/A mshr->promoteReadable(); 4914871SN/A } 4928833Sdam.sunwoo@arm.com 4938833Sdam.sunwoo@arm.com if (blk && blk->isWritable() && !pkt->req->isCacheInvalidate()) { 4948833Sdam.sunwoo@arm.com // If at this point the referenced block is writable and the 4954626SN/A // response is not a cache invalidate, we promote targets that 4964626SN/A // were deferred as we couldn't guarrantee a writable copy 4974626SN/A mshr->promoteWritable(); 4984626SN/A } 4998833Sdam.sunwoo@arm.com 5004626SN/A serviceMSHRTargets(mshr, pkt, blk); 5014871SN/A 5028833Sdam.sunwoo@arm.com if (mshr->promoteDeferredTargets()) { 5038833Sdam.sunwoo@arm.com // avoid later read getting stale data while write miss is 5048833Sdam.sunwoo@arm.com // outstanding.. see comment in timingAccess() 5054626SN/A if (blk) { 5064626SN/A blk->status &= ~BlkReadable; 5074626SN/A } 5084626SN/A mshrQueue.markPending(mshr); 5094626SN/A schedMemSideSendEvent(clockEdge() + pkt->payloadDelay); 5104626SN/A } else { 5114626SN/A // while we deallocate an mshr from the queue we still have to 5128833Sdam.sunwoo@arm.com // check the isFull condition before and after as we might 5134626SN/A // have been using the reserved entries already 5144626SN/A const bool was_full = mshrQueue.isFull(); 5154626SN/A mshrQueue.deallocate(mshr); 5164626SN/A if (was_full && !mshrQueue.isFull()) { 5178833Sdam.sunwoo@arm.com clearBlocked(Blocked_NoMSHRs); 5188833Sdam.sunwoo@arm.com } 5198833Sdam.sunwoo@arm.com 5204626SN/A // Request the bus for a prefetch if this deallocation freed enough 5214626SN/A // MSHRs for a prefetch to take place 5224626SN/A if (prefetcher && mshrQueue.canPrefetch()) { 5234626SN/A Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(), 5244626SN/A clockEdge()); 5258833Sdam.sunwoo@arm.com if (next_pf_time != MaxTick) 5264626SN/A schedMemSideSendEvent(next_pf_time); 5274871SN/A } 5288833Sdam.sunwoo@arm.com } 5298833Sdam.sunwoo@arm.com 5308833Sdam.sunwoo@arm.com // if we used temp block, check to see if its valid and then clear it out 5314626SN/A if (blk == tempBlock && tempBlock->isValid()) { 5324626SN/A evictBlock(blk, writebacks); 5334626SN/A } 5344626SN/A 5358833Sdam.sunwoo@arm.com const Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 5364626SN/A // copy writebacks to write buffer 5374871SN/A doWritebacks(writebacks, forward_time); 5384871SN/A 5398833Sdam.sunwoo@arm.com DPRINTF(CacheVerbose, "%s: Leaving with %s\n", __func__, pkt->print()); 5408833Sdam.sunwoo@arm.com delete pkt; 5418833Sdam.sunwoo@arm.com} 5424626SN/A 5434626SN/A 5444626SN/ATick 5454626SN/ABaseCache::recvAtomic(PacketPtr pkt) 5464626SN/A{ 5474626SN/A // should assert here that there are no outstanding MSHRs or 5484626SN/A // writebacks... that would mean that someone used an atomic 5498833Sdam.sunwoo@arm.com // access in timing mode 5504626SN/A 5514626SN/A // We use lookupLatency here because it is used to specify the latency 5524626SN/A // to access. 5534626SN/A Cycles lat = lookupLatency; 5548833Sdam.sunwoo@arm.com 5558833Sdam.sunwoo@arm.com CacheBlk *blk = nullptr; 5568833Sdam.sunwoo@arm.com PacketList writebacks; 5574626SN/A bool satisfied = access(pkt, blk, lat, writebacks); 5584626SN/A 5594626SN/A if (pkt->isClean() && blk && blk->isDirty()) { 5604626SN/A // A cache clean opearation is looking for a dirty 5614626SN/A // block. If a dirty block is encountered a WriteClean 5628833Sdam.sunwoo@arm.com // will update any copies to the path to the memory 5634626SN/A // until the point of reference. 5644871SN/A DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n", 5654871SN/A __func__, pkt->print(), blk->print()); 5668833Sdam.sunwoo@arm.com PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id); 5678833Sdam.sunwoo@arm.com writebacks.push_back(wb_pkt); 5688833Sdam.sunwoo@arm.com pkt->setSatisfied(); 5694626SN/A } 5704626SN/A 5714626SN/A // handle writebacks resulting from the access here to ensure they 5724626SN/A // logically precede anything happening below 5734626SN/A doWritebacksAtomic(writebacks); 5744626SN/A assert(writebacks.empty()); 5754626SN/A 5768833Sdam.sunwoo@arm.com if (!satisfied) { 5774626SN/A lat += handleAtomicReqMiss(pkt, blk, writebacks); 5784626SN/A } 5794626SN/A 5804626SN/A // Note that we don't invoke the prefetcher at all in atomic mode. 5818833Sdam.sunwoo@arm.com // It's not clear how to do it properly, particularly for 5828833Sdam.sunwoo@arm.com // prefetchers that aggressively generate prefetch candidates and 5838833Sdam.sunwoo@arm.com // rely on bandwidth contention to throttle them; these will tend 5844626SN/A // to pollute the cache in atomic mode since there is no bandwidth 5854626SN/A // contention. If we ever do want to enable prefetching in atomic 5864626SN/A // mode, though, this is the place to do it... see timingAccess() 5874626SN/A // for an example (though we'd want to issue the prefetch(es) 5884626SN/A // immediately rather than calling requestMemSideBus() as we do 5898833Sdam.sunwoo@arm.com // there). 5904626SN/A 5914871SN/A // do any writebacks resulting from the response handling 5924871SN/A doWritebacksAtomic(writebacks); 5934871SN/A 5948833Sdam.sunwoo@arm.com // if we used temp block, check to see if its valid and if so 5958833Sdam.sunwoo@arm.com // clear it out, but only do so after the call to recvAtomic is 5968833Sdam.sunwoo@arm.com // finished so that any downstream observers (such as a snoop 5974626SN/A // filter), first see the fill, and only then see the eviction 5984626SN/A if (blk == tempBlock && tempBlock->isValid()) { 5994626SN/A // the atomic CPU calls recvAtomic for fetch and load/store 6004626SN/A // sequentuially, and we may already have a tempBlock 6014626SN/A // writeback from the fetch that we have not yet sent 6024626SN/A if (tempBlockWriteback) { 6034626SN/A // if that is the case, write the prevoius one back, and 6044626SN/A // do not schedule any new event 6054626SN/A writebackTempBlockAtomic(); 6064626SN/A } else { 6074626SN/A // the writeback/clean eviction happens after the call to 6084626SN/A // recvAtomic has finished (but before any successive 6094626SN/A // calls), so that the response handling from the fill is 6104626SN/A // allowed to happen first 6114626SN/A schedule(writebackTempBlockAtomicEvent, curTick()); 6124626SN/A } 6134626SN/A 6144626SN/A tempBlockWriteback = evictBlock(blk); 6154626SN/A } 6164626SN/A 6174626SN/A if (pkt->needsResponse()) { 6184626SN/A pkt->makeAtomicResponse(); 6194626SN/A } 6204626SN/A 6214626SN/A return lat * clockPeriod(); 6224626SN/A} 6234626SN/A 6244626SN/Avoid 6254626SN/ABaseCache::functionalAccess(PacketPtr pkt, bool from_cpu_side) 6264626SN/A{ 6274626SN/A Addr blk_addr = pkt->getBlockAddr(blkSize); 6284626SN/A bool is_secure = pkt->isSecure(); 6294626SN/A CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 6304626SN/A MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 6314626SN/A 6324626SN/A pkt->pushLabel(name()); 6334626SN/A 6344626SN/A CacheBlkPrintWrapper cbpw(blk); 6354626SN/A 6364626SN/A // Note that just because an L2/L3 has valid data doesn't mean an 6374626SN/A // L1 doesn't have a more up-to-date modified copy that still 6384626SN/A // needs to be found. As a result we always update the request if 6394626SN/A // we have it, but only declare it satisfied if we are the owner. 6404626SN/A 6414626SN/A // see if we have data at all (owned or otherwise) 6428833Sdam.sunwoo@arm.com bool have_data = blk && blk->isValid() 6438833Sdam.sunwoo@arm.com && pkt->trySatisfyFunctional(&cbpw, blk_addr, is_secure, blkSize, 6448833Sdam.sunwoo@arm.com blk->data); 6458833Sdam.sunwoo@arm.com 6464626SN/A // data we have is dirty if marked as such or if we have an 6474626SN/A // in-service MSHR that is pending a modified line 6484626SN/A bool have_dirty = 6494626SN/A have_data && (blk->isDirty() || 6504626SN/A (mshr && mshr->inService && mshr->isPendingModified())); 6518833Sdam.sunwoo@arm.com 6524626SN/A bool done = have_dirty || 6534626SN/A cpuSidePort.trySatisfyFunctional(pkt) || 6548833Sdam.sunwoo@arm.com mshrQueue.trySatisfyFunctional(pkt) || 6558833Sdam.sunwoo@arm.com writeBuffer.trySatisfyFunctional(pkt) || 6568833Sdam.sunwoo@arm.com memSidePort.trySatisfyFunctional(pkt); 6574626SN/A 6584626SN/A DPRINTF(CacheVerbose, "%s: %s %s%s%s\n", __func__, pkt->print(), 6594626SN/A (blk && blk->isValid()) ? "valid " : "", 6604626SN/A have_data ? "data " : "", done ? "done " : ""); 6618833Sdam.sunwoo@arm.com 6624626SN/A // We're leaving the cache, so pop cache->name() label 6634626SN/A pkt->popLabel(); 6648833Sdam.sunwoo@arm.com 6658833Sdam.sunwoo@arm.com if (done) { 6668833Sdam.sunwoo@arm.com pkt->makeResponse(); 6674626SN/A } else { 6684626SN/A // if it came as a request from the CPU side then make sure it 6694626SN/A // continues towards the memory side 6704626SN/A if (from_cpu_side) { 6714626SN/A memSidePort.sendFunctional(pkt); 6724626SN/A } else if (cpuSidePort.isSnooping()) { 6734626SN/A // if it came from the memory side, it must be a snoop request 6744626SN/A // and we should only forward it if we are forwarding snoops 6754626SN/A cpuSidePort.sendFunctionalSnoop(pkt); 6764626SN/A } 6774626SN/A } 6784626SN/A} 6794626SN/A 6808833Sdam.sunwoo@arm.com 6818833Sdam.sunwoo@arm.comvoid 6828833Sdam.sunwoo@arm.comBaseCache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt) 6838833Sdam.sunwoo@arm.com{ 6844626SN/A assert(pkt->isRequest()); 6854626SN/A 6864626SN/A uint64_t overwrite_val; 6874626SN/A bool overwrite_mem; 6884626SN/A uint64_t condition_val64; 6898833Sdam.sunwoo@arm.com uint32_t condition_val32; 6904626SN/A 6914626SN/A int offset = pkt->getOffset(blkSize); 6928833Sdam.sunwoo@arm.com uint8_t *blk_data = blk->data + offset; 6938833Sdam.sunwoo@arm.com 6948833Sdam.sunwoo@arm.com assert(sizeof(uint64_t) >= pkt->getSize()); 6954626SN/A 6964626SN/A overwrite_mem = true; 6974626SN/A // keep a copy of our possible write value, and copy what is at the 6984626SN/A // memory address into the packet 6998833Sdam.sunwoo@arm.com pkt->writeData((uint8_t *)&overwrite_val); 7004626SN/A pkt->setData(blk_data); 7014626SN/A 7028833Sdam.sunwoo@arm.com if (pkt->req->isCondSwap()) { 7038833Sdam.sunwoo@arm.com if (pkt->getSize() == sizeof(uint64_t)) { 7048833Sdam.sunwoo@arm.com condition_val64 = pkt->req->getExtraData(); 7054626SN/A overwrite_mem = !std::memcmp(&condition_val64, blk_data, 7064626SN/A sizeof(uint64_t)); 7074626SN/A } else if (pkt->getSize() == sizeof(uint32_t)) { 7084626SN/A condition_val32 = (uint32_t)pkt->req->getExtraData(); 7094626SN/A overwrite_mem = !std::memcmp(&condition_val32, blk_data, 7104626SN/A sizeof(uint32_t)); 7114626SN/A } else 7124626SN/A panic("Invalid size for conditional read/write\n"); 7134626SN/A } 7144626SN/A 7154626SN/A if (overwrite_mem) { 7164626SN/A std::memcpy(blk_data, &overwrite_val, pkt->getSize()); 7174626SN/A blk->status |= BlkDirty; 7188833Sdam.sunwoo@arm.com } 7198833Sdam.sunwoo@arm.com} 7208833Sdam.sunwoo@arm.com 7218833Sdam.sunwoo@arm.comQueueEntry* 7224626SN/ABaseCache::getNextQueueEntry() 7234626SN/A{ 7244626SN/A // Check both MSHR queue and write buffer for potential requests, 7254626SN/A // note that null does not mean there is no request, it could 7264626SN/A // simply be that it is not ready 7278833Sdam.sunwoo@arm.com MSHR *miss_mshr = mshrQueue.getNext(); 7284626SN/A WriteQueueEntry *wq_entry = writeBuffer.getNext(); 7294626SN/A 7308833Sdam.sunwoo@arm.com // If we got a write buffer request ready, first priority is a 7318833Sdam.sunwoo@arm.com // full write buffer, otherwise we favour the miss requests 7328833Sdam.sunwoo@arm.com if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) { 7334626SN/A // need to search MSHR queue for conflicting earlier miss. 7344626SN/A MSHR *conflict_mshr = mshrQueue.findPending(wq_entry); 7358833Sdam.sunwoo@arm.com 7364626SN/A if (conflict_mshr && conflict_mshr->order < wq_entry->order) { 7374626SN/A // Service misses in order until conflict is cleared. 7388833Sdam.sunwoo@arm.com return conflict_mshr; 7394626SN/A 7408833Sdam.sunwoo@arm.com // @todo Note that we ignore the ready time of the conflict here 7418833Sdam.sunwoo@arm.com } 7428833Sdam.sunwoo@arm.com 7434626SN/A // No conflicts; issue write 7444626SN/A return wq_entry; 7454626SN/A } else if (miss_mshr) { 7468833Sdam.sunwoo@arm.com // need to check for conflicting earlier writeback 7474626SN/A WriteQueueEntry *conflict_mshr = writeBuffer.findPending(miss_mshr); 7484626SN/A if (conflict_mshr) { 7498833Sdam.sunwoo@arm.com // not sure why we don't check order here... it was in the 7504626SN/A // original code but commented out. 7518833Sdam.sunwoo@arm.com 7528833Sdam.sunwoo@arm.com // The only way this happens is if we are 7538833Sdam.sunwoo@arm.com // doing a write and we didn't have permissions 7544626SN/A // then subsequently saw a writeback (owned got evicted) 7554626SN/A // We need to make sure to perform the writeback first 7564626SN/A // To preserve the dirty data, then we can issue the write 7574626SN/A 7584626SN/A // should we return wq_entry here instead? I.e. do we 7594626SN/A // have to flush writes in order? I don't think so... not 7602810SN/A // for Alpha anyway. Maybe for x86? 7613503SN/A return conflict_mshr; 7623503SN/A 7639342SAndreas.Sandberg@arm.com // @todo Note that we ignore the ready time of the conflict here 7643503SN/A } 7659347SAndreas.Sandberg@arm.com 7669347SAndreas.Sandberg@arm.com // No conflicts; issue read 7674626SN/A return miss_mshr; 7683503SN/A } 7694626SN/A 7709342SAndreas.Sandberg@arm.com // fall through... no pending requests. Try a prefetch. 7719152Satgutier@umich.edu assert(!miss_mshr && !wq_entry); 7724626SN/A if (prefetcher && mshrQueue.canPrefetch()) { 7733503SN/A // If we have a miss queue slot, we can try a prefetch 7743503SN/A PacketPtr pkt = prefetcher->getPacket(); 7759342SAndreas.Sandberg@arm.com if (pkt) { 7763503SN/A Addr pf_addr = pkt->getBlockAddr(blkSize); 7773503SN/A if (!tags->findBlock(pf_addr, pkt->isSecure()) && 7789795Sandreas.hansson@arm.com !mshrQueue.findMatch(pf_addr, pkt->isSecure()) && 7799795Sandreas.hansson@arm.com !writeBuffer.findMatch(pf_addr, pkt->isSecure())) { 7809795Sandreas.hansson@arm.com // Update statistic on number of prefetches issued 7819795Sandreas.hansson@arm.com // (hwpf_mshr_misses) 7829814Sandreas.hansson@arm.com assert(pkt->req->masterId() < system->maxMasters()); 7839795Sandreas.hansson@arm.com mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 7849796Sprakash.ramrakhyani@arm.com 7859796Sprakash.ramrakhyani@arm.com // allocate an MSHR and return it, note 7869796Sprakash.ramrakhyani@arm.com // that we send the packet straight away, so do not 7879796Sprakash.ramrakhyani@arm.com // schedule the send 7889796Sprakash.ramrakhyani@arm.com return allocateMissBuffer(pkt, curTick(), false); 7899796Sprakash.ramrakhyani@arm.com } else { 7909796Sprakash.ramrakhyani@arm.com // free the request and packet 7919796Sprakash.ramrakhyani@arm.com delete pkt; 7929796Sprakash.ramrakhyani@arm.com } 7939796Sprakash.ramrakhyani@arm.com } 79410263Satgutier@umich.edu } 79510263Satgutier@umich.edu 7969795Sandreas.hansson@arm.com return nullptr; 7979796Sprakash.ramrakhyani@arm.com} 7989795Sandreas.hansson@arm.com 7999795Sandreas.hansson@arm.comvoid 800BaseCache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool, bool) 801{ 802 assert(pkt->isRequest()); 803 804 assert(blk && blk->isValid()); 805 // Occasionally this is not true... if we are a lower-level cache 806 // satisfying a string of Read and ReadEx requests from 807 // upper-level caches, a Read will mark the block as shared but we 808 // can satisfy a following ReadEx anyway since we can rely on the 809 // Read requester(s) to have buffered the ReadEx snoop and to 810 // invalidate their blocks after receiving them. 811 // assert(!pkt->needsWritable() || blk->isWritable()); 812 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize); 813 814 // Check RMW operations first since both isRead() and 815 // isWrite() will be true for them 816 if (pkt->cmd == MemCmd::SwapReq) { 817 if (pkt->isAtomicOp()) { 818 // extract data from cache and save it into the data field in 819 // the packet as a return value from this atomic op 820 int offset = tags->extractBlkOffset(pkt->getAddr()); 821 uint8_t *blk_data = blk->data + offset; 822 pkt->setData(blk_data); 823 824 // execute AMO operation 825 (*(pkt->getAtomicOp()))(blk_data); 826 827 // set block status to dirty 828 blk->status |= BlkDirty; 829 } else { 830 cmpAndSwap(blk, pkt); 831 } 832 } else if (pkt->isWrite()) { 833 // we have the block in a writable state and can go ahead, 834 // note that the line may be also be considered writable in 835 // downstream caches along the path to memory, but always 836 // Exclusive, and never Modified 837 assert(blk->isWritable()); 838 // Write or WriteLine at the first cache with block in writable state 839 if (blk->checkWrite(pkt)) { 840 pkt->writeDataToBlock(blk->data, blkSize); 841 } 842 // Always mark the line as dirty (and thus transition to the 843 // Modified state) even if we are a failed StoreCond so we 844 // supply data to any snoops that have appended themselves to 845 // this cache before knowing the store will fail. 846 blk->status |= BlkDirty; 847 DPRINTF(CacheVerbose, "%s for %s (write)\n", __func__, pkt->print()); 848 } else if (pkt->isRead()) { 849 if (pkt->isLLSC()) { 850 blk->trackLoadLocked(pkt); 851 } 852 853 // all read responses have a data payload 854 assert(pkt->hasRespData()); 855 pkt->setDataFromBlock(blk->data, blkSize); 856 } else if (pkt->isUpgrade()) { 857 // sanity check 858 assert(!pkt->hasSharers()); 859 860 if (blk->isDirty()) { 861 // we were in the Owned state, and a cache above us that 862 // has the line in Shared state needs to be made aware 863 // that the data it already has is in fact dirty 864 pkt->setCacheResponding(); 865 blk->status &= ~BlkDirty; 866 } 867 } else if (pkt->isClean()) { 868 blk->status &= ~BlkDirty; 869 } else { 870 assert(pkt->isInvalidate()); 871 invalidateBlock(blk); 872 DPRINTF(CacheVerbose, "%s for %s (invalidation)\n", __func__, 873 pkt->print()); 874 } 875} 876 877///////////////////////////////////////////////////// 878// 879// Access path: requests coming in from the CPU side 880// 881///////////////////////////////////////////////////// 882Cycles 883BaseCache::calculateTagOnlyLatency(const uint32_t delay, 884 const Cycles lookup_lat) const 885{ 886 // A tag-only access has to wait for the packet to arrive in order to 887 // perform the tag lookup. 888 return ticksToCycles(delay) + lookup_lat; 889} 890 891Cycles 892BaseCache::calculateAccessLatency(const CacheBlk* blk, const uint32_t delay, 893 const Cycles lookup_lat) const 894{ 895 Cycles lat(0); 896 897 if (blk != nullptr) { 898 // As soon as the access arrives, for sequential accesses first access 899 // tags, then the data entry. In the case of parallel accesses the 900 // latency is dictated by the slowest of tag and data latencies. 901 if (sequentialAccess) { 902 lat = ticksToCycles(delay) + lookup_lat + dataLatency; 903 } else { 904 lat = ticksToCycles(delay) + std::max(lookup_lat, dataLatency); 905 } 906 907 // Check if the block to be accessed is available. If not, apply the 908 // access latency on top of when the block is ready to be accessed. 909 const Tick tick = curTick() + delay; 910 const Tick when_ready = blk->getWhenReady(); 911 if (when_ready > tick && 912 ticksToCycles(when_ready - tick) > lat) { 913 lat += ticksToCycles(when_ready - tick); 914 } 915 } else { 916 // In case of a miss, we neglect the data access in a parallel 917 // configuration (i.e., the data access will be stopped as soon as 918 // we find out it is a miss), and use the tag-only latency. 919 lat = calculateTagOnlyLatency(delay, lookup_lat); 920 } 921 922 return lat; 923} 924 925bool 926BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, 927 PacketList &writebacks) 928{ 929 // sanity check 930 assert(pkt->isRequest()); 931 932 chatty_assert(!(isReadOnly && pkt->isWrite()), 933 "Should never see a write in a read-only cache %s\n", 934 name()); 935 936 // Access block in the tags 937 Cycles tag_latency(0); 938 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), tag_latency); 939 940 DPRINTF(Cache, "%s for %s %s\n", __func__, pkt->print(), 941 blk ? "hit " + blk->print() : "miss"); 942 943 if (pkt->req->isCacheMaintenance()) { 944 // A cache maintenance operation is always forwarded to the 945 // memory below even if the block is found in dirty state. 946 947 // We defer any changes to the state of the block until we 948 // create and mark as in service the mshr for the downstream 949 // packet. 950 951 // Calculate access latency on top of when the packet arrives. This 952 // takes into account the bus delay. 953 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 954 955 return false; 956 } 957 958 if (pkt->isEviction()) { 959 // We check for presence of block in above caches before issuing 960 // Writeback or CleanEvict to write buffer. Therefore the only 961 // possible cases can be of a CleanEvict packet coming from above 962 // encountering a Writeback generated in this cache peer cache and 963 // waiting in the write buffer. Cases of upper level peer caches 964 // generating CleanEvict and Writeback or simply CleanEvict and 965 // CleanEvict almost simultaneously will be caught by snoops sent out 966 // by crossbar. 967 WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(), 968 pkt->isSecure()); 969 if (wb_entry) { 970 assert(wb_entry->getNumTargets() == 1); 971 PacketPtr wbPkt = wb_entry->getTarget()->pkt; 972 assert(wbPkt->isWriteback()); 973 974 if (pkt->isCleanEviction()) { 975 // The CleanEvict and WritebackClean snoops into other 976 // peer caches of the same level while traversing the 977 // crossbar. If a copy of the block is found, the 978 // packet is deleted in the crossbar. Hence, none of 979 // the other upper level caches connected to this 980 // cache have the block, so we can clear the 981 // BLOCK_CACHED flag in the Writeback if set and 982 // discard the CleanEvict by returning true. 983 wbPkt->clearBlockCached(); 984 985 // A clean evict does not need to access the data array 986 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 987 988 return true; 989 } else { 990 assert(pkt->cmd == MemCmd::WritebackDirty); 991 // Dirty writeback from above trumps our clean 992 // writeback... discard here 993 // Note: markInService will remove entry from writeback buffer. 994 markInService(wb_entry); 995 delete wbPkt; 996 } 997 } 998 } 999 1000 // Writeback handling is special case. We can write the block into 1001 // the cache without having a writeable copy (or any copy at all). 1002 if (pkt->isWriteback()) { 1003 assert(blkSize == pkt->getSize()); 1004 1005 // we could get a clean writeback while we are having 1006 // outstanding accesses to a block, do the simple thing for 1007 // now and drop the clean writeback so that we do not upset 1008 // any ordering/decisions about ownership already taken 1009 if (pkt->cmd == MemCmd::WritebackClean && 1010 mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) { 1011 DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, " 1012 "dropping\n", pkt->getAddr()); 1013 1014 // A writeback searches for the block, then writes the data. 1015 // As the writeback is being dropped, the data is not touched, 1016 // and we just had to wait for the time to find a match in the 1017 // MSHR. As of now assume a mshr queue search takes as long as 1018 // a tag lookup for simplicity. 1019 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 1020 1021 return true; 1022 } 1023 1024 if (!blk) { 1025 // need to do a replacement 1026 blk = allocateBlock(pkt, writebacks); 1027 if (!blk) { 1028 // no replaceable block available: give up, fwd to next level. 1029 incMissCount(pkt); 1030 1031 // A writeback searches for the block, then writes the data. 1032 // As the block could not be found, it was a tag-only access. 1033 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 1034 1035 return false; 1036 } 1037 1038 blk->status |= BlkReadable; 1039 } else { 1040 if (compressor) { 1041 // This is an overwrite to an existing block, therefore we need 1042 // to check for data expansion (i.e., block was compressed with 1043 // a smaller size, and now it doesn't fit the entry anymore). 1044 // If that is the case we might need to evict blocks. 1045 // @todo Update compression data 1046 } 1047 } 1048 1049 // only mark the block dirty if we got a writeback command, 1050 // and leave it as is for a clean writeback 1051 if (pkt->cmd == MemCmd::WritebackDirty) { 1052 // TODO: the coherent cache can assert(!blk->isDirty()); 1053 blk->status |= BlkDirty; 1054 } 1055 // if the packet does not have sharers, it is passing 1056 // writable, and we got the writeback in Modified or Exclusive 1057 // state, if not we are in the Owned or Shared state 1058 if (!pkt->hasSharers()) { 1059 blk->status |= BlkWritable; 1060 } 1061 // nothing else to do; writeback doesn't expect response 1062 assert(!pkt->needsResponse()); 1063 pkt->writeDataToBlock(blk->data, blkSize); 1064 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 1065 incHitCount(pkt); 1066 1067 // A writeback searches for the block, then writes the data 1068 lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency); 1069 1070 // When the packet metadata arrives, the tag lookup will be done while 1071 // the payload is arriving. Then the block will be ready to access as 1072 // soon as the fill is done 1073 blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay + 1074 std::max(cyclesToTicks(tag_latency), (uint64_t)pkt->payloadDelay)); 1075 1076 return true; 1077 } else if (pkt->cmd == MemCmd::CleanEvict) { 1078 // A CleanEvict does not need to access the data array 1079 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 1080 1081 if (blk) { 1082 // Found the block in the tags, need to stop CleanEvict from 1083 // propagating further down the hierarchy. Returning true will 1084 // treat the CleanEvict like a satisfied write request and delete 1085 // it. 1086 return true; 1087 } 1088 // We didn't find the block here, propagate the CleanEvict further 1089 // down the memory hierarchy. Returning false will treat the CleanEvict 1090 // like a Writeback which could not find a replaceable block so has to 1091 // go to next level. 1092 return false; 1093 } else if (pkt->cmd == MemCmd::WriteClean) { 1094 // WriteClean handling is a special case. We can allocate a 1095 // block directly if it doesn't exist and we can update the 1096 // block immediately. The WriteClean transfers the ownership 1097 // of the block as well. 1098 assert(blkSize == pkt->getSize()); 1099 1100 if (!blk) { 1101 if (pkt->writeThrough()) { 1102 // A writeback searches for the block, then writes the data. 1103 // As the block could not be found, it was a tag-only access. 1104 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 1105 1106 // if this is a write through packet, we don't try to 1107 // allocate if the block is not present 1108 return false; 1109 } else { 1110 // a writeback that misses needs to allocate a new block 1111 blk = allocateBlock(pkt, writebacks); 1112 if (!blk) { 1113 // no replaceable block available: give up, fwd to 1114 // next level. 1115 incMissCount(pkt); 1116 1117 // A writeback searches for the block, then writes the 1118 // data. As the block could not be found, it was a tag-only 1119 // access. 1120 lat = calculateTagOnlyLatency(pkt->headerDelay, 1121 tag_latency); 1122 1123 return false; 1124 } 1125 1126 blk->status |= BlkReadable; 1127 } 1128 } else { 1129 if (compressor) { 1130 // @todo Update compression data 1131 } 1132 } 1133 1134 // at this point either this is a writeback or a write-through 1135 // write clean operation and the block is already in this 1136 // cache, we need to update the data and the block flags 1137 assert(blk); 1138 // TODO: the coherent cache can assert(!blk->isDirty()); 1139 if (!pkt->writeThrough()) { 1140 blk->status |= BlkDirty; 1141 } 1142 // nothing else to do; writeback doesn't expect response 1143 assert(!pkt->needsResponse()); 1144 pkt->writeDataToBlock(blk->data, blkSize); 1145 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 1146 1147 incHitCount(pkt); 1148 1149 // A writeback searches for the block, then writes the data 1150 lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency); 1151 1152 // When the packet metadata arrives, the tag lookup will be done while 1153 // the payload is arriving. Then the block will be ready to access as 1154 // soon as the fill is done 1155 blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay + 1156 std::max(cyclesToTicks(tag_latency), (uint64_t)pkt->payloadDelay)); 1157 1158 // if this a write-through packet it will be sent to cache 1159 // below 1160 return !pkt->writeThrough(); 1161 } else if (blk && (pkt->needsWritable() ? blk->isWritable() : 1162 blk->isReadable())) { 1163 // OK to satisfy access 1164 incHitCount(pkt); 1165 1166 // Calculate access latency based on the need to access the data array 1167 if (pkt->isRead() || pkt->isWrite()) { 1168 lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency); 1169 1170 // When a block is compressed, it must first be decompressed 1171 // before being read. This adds to the access latency. 1172 if (compressor && pkt->isRead()) { 1173 lat += compressor->getDecompressionLatency(blk); 1174 } 1175 } else { 1176 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 1177 } 1178 1179 satisfyRequest(pkt, blk); 1180 maintainClusivity(pkt->fromCache(), blk); 1181 1182 return true; 1183 } 1184 1185 // Can't satisfy access normally... either no block (blk == nullptr) 1186 // or have block but need writable 1187 1188 incMissCount(pkt); 1189 1190 lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency); 1191 1192 if (!blk && pkt->isLLSC() && pkt->isWrite()) { 1193 // complete miss on store conditional... just give up now 1194 pkt->req->setExtraData(0); 1195 return true; 1196 } 1197 1198 return false; 1199} 1200 1201void 1202BaseCache::maintainClusivity(bool from_cache, CacheBlk *blk) 1203{ 1204 if (from_cache && blk && blk->isValid() && !blk->isDirty() && 1205 clusivity == Enums::mostly_excl) { 1206 // if we have responded to a cache, and our block is still 1207 // valid, but not dirty, and this cache is mostly exclusive 1208 // with respect to the cache above, drop the block 1209 invalidateBlock(blk); 1210 } 1211} 1212 1213CacheBlk* 1214BaseCache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks, 1215 bool allocate) 1216{ 1217 assert(pkt->isResponse()); 1218 Addr addr = pkt->getAddr(); 1219 bool is_secure = pkt->isSecure(); 1220#if TRACING_ON 1221 CacheBlk::State old_state = blk ? blk->status : 0; 1222#endif 1223 1224 // When handling a fill, we should have no writes to this line. 1225 assert(addr == pkt->getBlockAddr(blkSize)); 1226 assert(!writeBuffer.findMatch(addr, is_secure)); 1227 1228 if (!blk) { 1229 // better have read new data... 1230 assert(pkt->hasData() || pkt->cmd == MemCmd::InvalidateResp); 1231 1232 // need to do a replacement if allocating, otherwise we stick 1233 // with the temporary storage 1234 blk = allocate ? allocateBlock(pkt, writebacks) : nullptr; 1235 1236 if (!blk) { 1237 // No replaceable block or a mostly exclusive 1238 // cache... just use temporary storage to complete the 1239 // current request and then get rid of it 1240 blk = tempBlock; 1241 tempBlock->insert(addr, is_secure); 1242 DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr, 1243 is_secure ? "s" : "ns"); 1244 } 1245 } else { 1246 // existing block... probably an upgrade 1247 // don't clear block status... if block is already dirty we 1248 // don't want to lose that 1249 } 1250 1251 // Block is guaranteed to be valid at this point 1252 assert(blk->isValid()); 1253 assert(blk->isSecure() == is_secure); 1254 assert(regenerateBlkAddr(blk) == addr); 1255 1256 blk->status |= BlkReadable; 1257 1258 // sanity check for whole-line writes, which should always be 1259 // marked as writable as part of the fill, and then later marked 1260 // dirty as part of satisfyRequest 1261 if (pkt->cmd == MemCmd::InvalidateResp) { 1262 assert(!pkt->hasSharers()); 1263 } 1264 1265 // here we deal with setting the appropriate state of the line, 1266 // and we start by looking at the hasSharers flag, and ignore the 1267 // cacheResponding flag (normally signalling dirty data) if the 1268 // packet has sharers, thus the line is never allocated as Owned 1269 // (dirty but not writable), and always ends up being either 1270 // Shared, Exclusive or Modified, see Packet::setCacheResponding 1271 // for more details 1272 if (!pkt->hasSharers()) { 1273 // we could get a writable line from memory (rather than a 1274 // cache) even in a read-only cache, note that we set this bit 1275 // even for a read-only cache, possibly revisit this decision 1276 blk->status |= BlkWritable; 1277 1278 // check if we got this via cache-to-cache transfer (i.e., from a 1279 // cache that had the block in Modified or Owned state) 1280 if (pkt->cacheResponding()) { 1281 // we got the block in Modified state, and invalidated the 1282 // owners copy 1283 blk->status |= BlkDirty; 1284 1285 chatty_assert(!isReadOnly, "Should never see dirty snoop response " 1286 "in read-only cache %s\n", name()); 1287 1288 } else if (pkt->cmd.isSWPrefetch() && pkt->needsWritable()) { 1289 // All other copies of the block were invalidated and we 1290 // have an exclusive copy. 1291 1292 // The coherence protocol assumes that if we fetched an 1293 // exclusive copy of the block, we have the intention to 1294 // modify it. Therefore the MSHR for the PrefetchExReq has 1295 // been the point of ordering and this cache has commited 1296 // to respond to snoops for the block. 1297 // 1298 // In most cases this is true anyway - a PrefetchExReq 1299 // will be followed by a WriteReq. However, if that 1300 // doesn't happen, the block is not marked as dirty and 1301 // the cache doesn't respond to snoops that has committed 1302 // to do so. 1303 // 1304 // To avoid deadlocks in cases where there is a snoop 1305 // between the PrefetchExReq and the expected WriteReq, we 1306 // proactively mark the block as Dirty. 1307 1308 blk->status |= BlkDirty; 1309 1310 panic_if(!isReadOnly, "Prefetch exclusive requests from read-only " 1311 "cache %s\n", name()); 1312 } 1313 } 1314 1315 DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n", 1316 addr, is_secure ? "s" : "ns", old_state, blk->print()); 1317 1318 // if we got new data, copy it in (checking for a read response 1319 // and a response that has data is the same in the end) 1320 if (pkt->isRead()) { 1321 // sanity checks 1322 assert(pkt->hasData()); 1323 assert(pkt->getSize() == blkSize); 1324 1325 pkt->writeDataToBlock(blk->data, blkSize); 1326 } 1327 // The block will be ready when the payload arrives and the fill is done 1328 blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay + 1329 pkt->payloadDelay); 1330 1331 return blk; 1332} 1333 1334CacheBlk* 1335BaseCache::allocateBlock(const PacketPtr pkt, PacketList &writebacks) 1336{ 1337 // Get address 1338 const Addr addr = pkt->getAddr(); 1339 1340 // Get secure bit 1341 const bool is_secure = pkt->isSecure(); 1342 1343 // Block size and compression related access latency. Only relevant if 1344 // using a compressor, otherwise there is no extra delay, and the block 1345 // is fully sized 1346 std::size_t blk_size_bits = blkSize*8; 1347 Cycles compression_lat = Cycles(0); 1348 Cycles decompression_lat = Cycles(0); 1349 1350 // If a compressor is being used, it is called to compress data before 1351 // insertion. Although in Gem5 the data is stored uncompressed, even if a 1352 // compressor is used, the compression/decompression methods are called to 1353 // calculate the amount of extra cycles needed to read or write compressed 1354 // blocks. 1355 if (compressor) { 1356 compressor->compress(pkt->getConstPtr<uint64_t>(), compression_lat, 1357 decompression_lat, blk_size_bits); 1358 } 1359 1360 // Find replacement victim 1361 std::vector<CacheBlk*> evict_blks; 1362 CacheBlk *victim = tags->findVictim(addr, is_secure, blk_size_bits, 1363 evict_blks); 1364 1365 // It is valid to return nullptr if there is no victim 1366 if (!victim) 1367 return nullptr; 1368 1369 // Print victim block's information 1370 DPRINTF(CacheRepl, "Replacement victim: %s\n", victim->print()); 1371 1372 // Check for transient state allocations. If any of the entries listed 1373 // for eviction has a transient state, the allocation fails 1374 bool replacement = false; 1375 for (const auto& blk : evict_blks) { 1376 if (blk->isValid()) { 1377 replacement = true; 1378 1379 Addr repl_addr = regenerateBlkAddr(blk); 1380 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure()); 1381 if (repl_mshr) { 1382 // must be an outstanding upgrade or clean request 1383 // on a block we're about to replace... 1384 assert((!blk->isWritable() && repl_mshr->needsWritable()) || 1385 repl_mshr->isCleaning()); 1386 1387 // too hard to replace block with transient state 1388 // allocation failed, block not inserted 1389 return nullptr; 1390 } 1391 } 1392 } 1393 1394 // The victim will be replaced by a new entry, so increase the replacement 1395 // counter if a valid block is being replaced 1396 if (replacement) { 1397 // Evict valid blocks associated to this victim block 1398 for (const auto& blk : evict_blks) { 1399 if (blk->isValid()) { 1400 DPRINTF(CacheRepl, "Evicting %s (%#llx) to make room for " \ 1401 "%#llx (%s)\n", blk->print(), regenerateBlkAddr(blk), 1402 addr, is_secure); 1403 1404 if (blk->wasPrefetched()) { 1405 unusedPrefetches++; 1406 } 1407 1408 evictBlock(blk, writebacks); 1409 } 1410 } 1411 1412 replacements++; 1413 } 1414 1415 // If using a compressor, set compression data. This must be done before 1416 // block insertion, as compressed tags use this information. 1417 if (compressor) { 1418 compressor->setSizeBits(victim, blk_size_bits); 1419 compressor->setDecompressionLatency(victim, decompression_lat); 1420 } 1421 1422 // Insert new block at victimized entry 1423 tags->insertBlock(pkt, victim); 1424 1425 return victim; 1426} 1427 1428void 1429BaseCache::invalidateBlock(CacheBlk *blk) 1430{ 1431 // If handling a block present in the Tags, let it do its invalidation 1432 // process, which will update stats and invalidate the block itself 1433 if (blk != tempBlock) { 1434 tags->invalidate(blk); 1435 } else { 1436 tempBlock->invalidate(); 1437 } 1438} 1439 1440void 1441BaseCache::evictBlock(CacheBlk *blk, PacketList &writebacks) 1442{ 1443 PacketPtr pkt = evictBlock(blk); 1444 if (pkt) { 1445 writebacks.push_back(pkt); 1446 } 1447} 1448 1449PacketPtr 1450BaseCache::writebackBlk(CacheBlk *blk) 1451{ 1452 chatty_assert(!isReadOnly || writebackClean, 1453 "Writeback from read-only cache"); 1454 assert(blk && blk->isValid() && (blk->isDirty() || writebackClean)); 1455 1456 writebacks[Request::wbMasterId]++; 1457 1458 RequestPtr req = std::make_shared<Request>( 1459 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId); 1460 1461 if (blk->isSecure()) 1462 req->setFlags(Request::SECURE); 1463 1464 req->taskId(blk->task_id); 1465 1466 PacketPtr pkt = 1467 new Packet(req, blk->isDirty() ? 1468 MemCmd::WritebackDirty : MemCmd::WritebackClean); 1469 1470 DPRINTF(Cache, "Create Writeback %s writable: %d, dirty: %d\n", 1471 pkt->print(), blk->isWritable(), blk->isDirty()); 1472 1473 if (blk->isWritable()) { 1474 // not asserting shared means we pass the block in modified 1475 // state, mark our own block non-writeable 1476 blk->status &= ~BlkWritable; 1477 } else { 1478 // we are in the Owned state, tell the receiver 1479 pkt->setHasSharers(); 1480 } 1481 1482 // make sure the block is not marked dirty 1483 blk->status &= ~BlkDirty; 1484 1485 pkt->allocate(); 1486 pkt->setDataFromBlock(blk->data, blkSize); 1487 1488 // When a block is compressed, it must first be decompressed before being 1489 // sent for writeback. 1490 if (compressor) { 1491 pkt->payloadDelay = compressor->getDecompressionLatency(blk); 1492 } 1493 1494 return pkt; 1495} 1496 1497PacketPtr 1498BaseCache::writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id) 1499{ 1500 RequestPtr req = std::make_shared<Request>( 1501 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId); 1502 1503 if (blk->isSecure()) { 1504 req->setFlags(Request::SECURE); 1505 } 1506 req->taskId(blk->task_id); 1507 1508 PacketPtr pkt = new Packet(req, MemCmd::WriteClean, blkSize, id); 1509 1510 if (dest) { 1511 req->setFlags(dest); 1512 pkt->setWriteThrough(); 1513 } 1514 1515 DPRINTF(Cache, "Create %s writable: %d, dirty: %d\n", pkt->print(), 1516 blk->isWritable(), blk->isDirty()); 1517 1518 if (blk->isWritable()) { 1519 // not asserting shared means we pass the block in modified 1520 // state, mark our own block non-writeable 1521 blk->status &= ~BlkWritable; 1522 } else { 1523 // we are in the Owned state, tell the receiver 1524 pkt->setHasSharers(); 1525 } 1526 1527 // make sure the block is not marked dirty 1528 blk->status &= ~BlkDirty; 1529 1530 pkt->allocate(); 1531 pkt->setDataFromBlock(blk->data, blkSize); 1532 1533 // When a block is compressed, it must first be decompressed before being 1534 // sent for writeback. 1535 if (compressor) { 1536 pkt->payloadDelay = compressor->getDecompressionLatency(blk); 1537 } 1538 1539 return pkt; 1540} 1541 1542 1543void 1544BaseCache::memWriteback() 1545{ 1546 tags->forEachBlk([this](CacheBlk &blk) { writebackVisitor(blk); }); 1547} 1548 1549void 1550BaseCache::memInvalidate() 1551{ 1552 tags->forEachBlk([this](CacheBlk &blk) { invalidateVisitor(blk); }); 1553} 1554 1555bool 1556BaseCache::isDirty() const 1557{ 1558 return tags->anyBlk([](CacheBlk &blk) { return blk.isDirty(); }); 1559} 1560 1561bool 1562BaseCache::coalesce() const 1563{ 1564 return writeAllocator && writeAllocator->coalesce(); 1565} 1566 1567void 1568BaseCache::writebackVisitor(CacheBlk &blk) 1569{ 1570 if (blk.isDirty()) { 1571 assert(blk.isValid()); 1572 1573 RequestPtr request = std::make_shared<Request>( 1574 regenerateBlkAddr(&blk), blkSize, 0, Request::funcMasterId); 1575 1576 request->taskId(blk.task_id); 1577 if (blk.isSecure()) { 1578 request->setFlags(Request::SECURE); 1579 } 1580 1581 Packet packet(request, MemCmd::WriteReq); 1582 packet.dataStatic(blk.data); 1583 1584 memSidePort.sendFunctional(&packet); 1585 1586 blk.status &= ~BlkDirty; 1587 } 1588} 1589 1590void 1591BaseCache::invalidateVisitor(CacheBlk &blk) 1592{ 1593 if (blk.isDirty()) 1594 warn_once("Invalidating dirty cache lines. " \ 1595 "Expect things to break.\n"); 1596 1597 if (blk.isValid()) { 1598 assert(!blk.isDirty()); 1599 invalidateBlock(&blk); 1600 } 1601} 1602 1603Tick 1604BaseCache::nextQueueReadyTime() const 1605{ 1606 Tick nextReady = std::min(mshrQueue.nextReadyTime(), 1607 writeBuffer.nextReadyTime()); 1608 1609 // Don't signal prefetch ready time if no MSHRs available 1610 // Will signal once enoguh MSHRs are deallocated 1611 if (prefetcher && mshrQueue.canPrefetch()) { 1612 nextReady = std::min(nextReady, 1613 prefetcher->nextPrefetchReadyTime()); 1614 } 1615 1616 return nextReady; 1617} 1618 1619 1620bool 1621BaseCache::sendMSHRQueuePacket(MSHR* mshr) 1622{ 1623 assert(mshr); 1624 1625 // use request from 1st target 1626 PacketPtr tgt_pkt = mshr->getTarget()->pkt; 1627 1628 DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print()); 1629 1630 // if the cache is in write coalescing mode or (additionally) in 1631 // no allocation mode, and we have a write packet with an MSHR 1632 // that is not a whole-line write (due to incompatible flags etc), 1633 // then reset the write mode 1634 if (writeAllocator && writeAllocator->coalesce() && tgt_pkt->isWrite()) { 1635 if (!mshr->isWholeLineWrite()) { 1636 // if we are currently write coalescing, hold on the 1637 // MSHR as many cycles extra as we need to completely 1638 // write a cache line 1639 if (writeAllocator->delay(mshr->blkAddr)) { 1640 Tick delay = blkSize / tgt_pkt->getSize() * clockPeriod(); 1641 DPRINTF(CacheVerbose, "Delaying pkt %s %llu ticks to allow " 1642 "for write coalescing\n", tgt_pkt->print(), delay); 1643 mshrQueue.delay(mshr, delay); 1644 return false; 1645 } else { 1646 writeAllocator->reset(); 1647 } 1648 } else { 1649 writeAllocator->resetDelay(mshr->blkAddr); 1650 } 1651 } 1652 1653 CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure); 1654 1655 // either a prefetch that is not present upstream, or a normal 1656 // MSHR request, proceed to get the packet to send downstream 1657 PacketPtr pkt = createMissPacket(tgt_pkt, blk, mshr->needsWritable(), 1658 mshr->isWholeLineWrite()); 1659 1660 mshr->isForward = (pkt == nullptr); 1661 1662 if (mshr->isForward) { 1663 // not a cache block request, but a response is expected 1664 // make copy of current packet to forward, keep current 1665 // copy for response handling 1666 pkt = new Packet(tgt_pkt, false, true); 1667 assert(!pkt->isWrite()); 1668 } 1669 1670 // play it safe and append (rather than set) the sender state, 1671 // as forwarded packets may already have existing state 1672 pkt->pushSenderState(mshr); 1673 1674 if (pkt->isClean() && blk && blk->isDirty()) { 1675 // A cache clean opearation is looking for a dirty block. Mark 1676 // the packet so that the destination xbar can determine that 1677 // there will be a follow-up write packet as well. 1678 pkt->setSatisfied(); 1679 } 1680 1681 if (!memSidePort.sendTimingReq(pkt)) { 1682 // we are awaiting a retry, but we 1683 // delete the packet and will be creating a new packet 1684 // when we get the opportunity 1685 delete pkt; 1686 1687 // note that we have now masked any requestBus and 1688 // schedSendEvent (we will wait for a retry before 1689 // doing anything), and this is so even if we do not 1690 // care about this packet and might override it before 1691 // it gets retried 1692 return true; 1693 } else { 1694 // As part of the call to sendTimingReq the packet is 1695 // forwarded to all neighbouring caches (and any caches 1696 // above them) as a snoop. Thus at this point we know if 1697 // any of the neighbouring caches are responding, and if 1698 // so, we know it is dirty, and we can determine if it is 1699 // being passed as Modified, making our MSHR the ordering 1700 // point 1701 bool pending_modified_resp = !pkt->hasSharers() && 1702 pkt->cacheResponding(); 1703 markInService(mshr, pending_modified_resp); 1704 1705 if (pkt->isClean() && blk && blk->isDirty()) { 1706 // A cache clean opearation is looking for a dirty 1707 // block. If a dirty block is encountered a WriteClean 1708 // will update any copies to the path to the memory 1709 // until the point of reference. 1710 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n", 1711 __func__, pkt->print(), blk->print()); 1712 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), 1713 pkt->id); 1714 PacketList writebacks; 1715 writebacks.push_back(wb_pkt); 1716 doWritebacks(writebacks, 0); 1717 } 1718 1719 return false; 1720 } 1721} 1722 1723bool 1724BaseCache::sendWriteQueuePacket(WriteQueueEntry* wq_entry) 1725{ 1726 assert(wq_entry); 1727 1728 // always a single target for write queue entries 1729 PacketPtr tgt_pkt = wq_entry->getTarget()->pkt; 1730 1731 DPRINTF(Cache, "%s: write %s\n", __func__, tgt_pkt->print()); 1732 1733 // forward as is, both for evictions and uncacheable writes 1734 if (!memSidePort.sendTimingReq(tgt_pkt)) { 1735 // note that we have now masked any requestBus and 1736 // schedSendEvent (we will wait for a retry before 1737 // doing anything), and this is so even if we do not 1738 // care about this packet and might override it before 1739 // it gets retried 1740 return true; 1741 } else { 1742 markInService(wq_entry); 1743 return false; 1744 } 1745} 1746 1747void 1748BaseCache::serialize(CheckpointOut &cp) const 1749{ 1750 bool dirty(isDirty()); 1751 1752 if (dirty) { 1753 warn("*** The cache still contains dirty data. ***\n"); 1754 warn(" Make sure to drain the system using the correct flags.\n"); 1755 warn(" This checkpoint will not restore correctly " \ 1756 "and dirty data in the cache will be lost!\n"); 1757 } 1758 1759 // Since we don't checkpoint the data in the cache, any dirty data 1760 // will be lost when restoring from a checkpoint of a system that 1761 // wasn't drained properly. Flag the checkpoint as invalid if the 1762 // cache contains dirty data. 1763 bool bad_checkpoint(dirty); 1764 SERIALIZE_SCALAR(bad_checkpoint); 1765} 1766 1767void 1768BaseCache::unserialize(CheckpointIn &cp) 1769{ 1770 bool bad_checkpoint; 1771 UNSERIALIZE_SCALAR(bad_checkpoint); 1772 if (bad_checkpoint) { 1773 fatal("Restoring from checkpoints with dirty caches is not " 1774 "supported in the classic memory system. Please remove any " 1775 "caches or drain them properly before taking checkpoints.\n"); 1776 } 1777} 1778 1779void 1780BaseCache::regStats() 1781{ 1782 ClockedObject::regStats(); 1783 1784 using namespace Stats; 1785 1786 // Hit statistics 1787 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1788 MemCmd cmd(access_idx); 1789 const string &cstr = cmd.toString(); 1790 1791 hits[access_idx] 1792 .init(system->maxMasters()) 1793 .name(name() + "." + cstr + "_hits") 1794 .desc("number of " + cstr + " hits") 1795 .flags(total | nozero | nonan) 1796 ; 1797 for (int i = 0; i < system->maxMasters(); i++) { 1798 hits[access_idx].subname(i, system->getMasterName(i)); 1799 } 1800 } 1801 1802// These macros make it easier to sum the right subset of commands and 1803// to change the subset of commands that are considered "demand" vs 1804// "non-demand" 1805#define SUM_DEMAND(s) \ 1806 (s[MemCmd::ReadReq] + s[MemCmd::WriteReq] + s[MemCmd::WriteLineReq] + \ 1807 s[MemCmd::ReadExReq] + s[MemCmd::ReadCleanReq] + s[MemCmd::ReadSharedReq]) 1808 1809// should writebacks be included here? prior code was inconsistent... 1810#define SUM_NON_DEMAND(s) \ 1811 (s[MemCmd::SoftPFReq] + s[MemCmd::HardPFReq] + s[MemCmd::SoftPFExReq]) 1812 1813 demandHits 1814 .name(name() + ".demand_hits") 1815 .desc("number of demand (read+write) hits") 1816 .flags(total | nozero | nonan) 1817 ; 1818 demandHits = SUM_DEMAND(hits); 1819 for (int i = 0; i < system->maxMasters(); i++) { 1820 demandHits.subname(i, system->getMasterName(i)); 1821 } 1822 1823 overallHits 1824 .name(name() + ".overall_hits") 1825 .desc("number of overall hits") 1826 .flags(total | nozero | nonan) 1827 ; 1828 overallHits = demandHits + SUM_NON_DEMAND(hits); 1829 for (int i = 0; i < system->maxMasters(); i++) { 1830 overallHits.subname(i, system->getMasterName(i)); 1831 } 1832 1833 // Miss statistics 1834 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1835 MemCmd cmd(access_idx); 1836 const string &cstr = cmd.toString(); 1837 1838 misses[access_idx] 1839 .init(system->maxMasters()) 1840 .name(name() + "." + cstr + "_misses") 1841 .desc("number of " + cstr + " misses") 1842 .flags(total | nozero | nonan) 1843 ; 1844 for (int i = 0; i < system->maxMasters(); i++) { 1845 misses[access_idx].subname(i, system->getMasterName(i)); 1846 } 1847 } 1848 1849 demandMisses 1850 .name(name() + ".demand_misses") 1851 .desc("number of demand (read+write) misses") 1852 .flags(total | nozero | nonan) 1853 ; 1854 demandMisses = SUM_DEMAND(misses); 1855 for (int i = 0; i < system->maxMasters(); i++) { 1856 demandMisses.subname(i, system->getMasterName(i)); 1857 } 1858 1859 overallMisses 1860 .name(name() + ".overall_misses") 1861 .desc("number of overall misses") 1862 .flags(total | nozero | nonan) 1863 ; 1864 overallMisses = demandMisses + SUM_NON_DEMAND(misses); 1865 for (int i = 0; i < system->maxMasters(); i++) { 1866 overallMisses.subname(i, system->getMasterName(i)); 1867 } 1868 1869 // Miss latency statistics 1870 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1871 MemCmd cmd(access_idx); 1872 const string &cstr = cmd.toString(); 1873 1874 missLatency[access_idx] 1875 .init(system->maxMasters()) 1876 .name(name() + "." + cstr + "_miss_latency") 1877 .desc("number of " + cstr + " miss cycles") 1878 .flags(total | nozero | nonan) 1879 ; 1880 for (int i = 0; i < system->maxMasters(); i++) { 1881 missLatency[access_idx].subname(i, system->getMasterName(i)); 1882 } 1883 } 1884 1885 demandMissLatency 1886 .name(name() + ".demand_miss_latency") 1887 .desc("number of demand (read+write) miss cycles") 1888 .flags(total | nozero | nonan) 1889 ; 1890 demandMissLatency = SUM_DEMAND(missLatency); 1891 for (int i = 0; i < system->maxMasters(); i++) { 1892 demandMissLatency.subname(i, system->getMasterName(i)); 1893 } 1894 1895 overallMissLatency 1896 .name(name() + ".overall_miss_latency") 1897 .desc("number of overall miss cycles") 1898 .flags(total | nozero | nonan) 1899 ; 1900 overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency); 1901 for (int i = 0; i < system->maxMasters(); i++) { 1902 overallMissLatency.subname(i, system->getMasterName(i)); 1903 } 1904 1905 // access formulas 1906 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1907 MemCmd cmd(access_idx); 1908 const string &cstr = cmd.toString(); 1909 1910 accesses[access_idx] 1911 .name(name() + "." + cstr + "_accesses") 1912 .desc("number of " + cstr + " accesses(hits+misses)") 1913 .flags(total | nozero | nonan) 1914 ; 1915 accesses[access_idx] = hits[access_idx] + misses[access_idx]; 1916 1917 for (int i = 0; i < system->maxMasters(); i++) { 1918 accesses[access_idx].subname(i, system->getMasterName(i)); 1919 } 1920 } 1921 1922 demandAccesses 1923 .name(name() + ".demand_accesses") 1924 .desc("number of demand (read+write) accesses") 1925 .flags(total | nozero | nonan) 1926 ; 1927 demandAccesses = demandHits + demandMisses; 1928 for (int i = 0; i < system->maxMasters(); i++) { 1929 demandAccesses.subname(i, system->getMasterName(i)); 1930 } 1931 1932 overallAccesses 1933 .name(name() + ".overall_accesses") 1934 .desc("number of overall (read+write) accesses") 1935 .flags(total | nozero | nonan) 1936 ; 1937 overallAccesses = overallHits + overallMisses; 1938 for (int i = 0; i < system->maxMasters(); i++) { 1939 overallAccesses.subname(i, system->getMasterName(i)); 1940 } 1941 1942 // miss rate formulas 1943 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1944 MemCmd cmd(access_idx); 1945 const string &cstr = cmd.toString(); 1946 1947 missRate[access_idx] 1948 .name(name() + "." + cstr + "_miss_rate") 1949 .desc("miss rate for " + cstr + " accesses") 1950 .flags(total | nozero | nonan) 1951 ; 1952 missRate[access_idx] = misses[access_idx] / accesses[access_idx]; 1953 1954 for (int i = 0; i < system->maxMasters(); i++) { 1955 missRate[access_idx].subname(i, system->getMasterName(i)); 1956 } 1957 } 1958 1959 demandMissRate 1960 .name(name() + ".demand_miss_rate") 1961 .desc("miss rate for demand accesses") 1962 .flags(total | nozero | nonan) 1963 ; 1964 demandMissRate = demandMisses / demandAccesses; 1965 for (int i = 0; i < system->maxMasters(); i++) { 1966 demandMissRate.subname(i, system->getMasterName(i)); 1967 } 1968 1969 overallMissRate 1970 .name(name() + ".overall_miss_rate") 1971 .desc("miss rate for overall accesses") 1972 .flags(total | nozero | nonan) 1973 ; 1974 overallMissRate = overallMisses / overallAccesses; 1975 for (int i = 0; i < system->maxMasters(); i++) { 1976 overallMissRate.subname(i, system->getMasterName(i)); 1977 } 1978 1979 // miss latency formulas 1980 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1981 MemCmd cmd(access_idx); 1982 const string &cstr = cmd.toString(); 1983 1984 avgMissLatency[access_idx] 1985 .name(name() + "." + cstr + "_avg_miss_latency") 1986 .desc("average " + cstr + " miss latency") 1987 .flags(total | nozero | nonan) 1988 ; 1989 avgMissLatency[access_idx] = 1990 missLatency[access_idx] / misses[access_idx]; 1991 1992 for (int i = 0; i < system->maxMasters(); i++) { 1993 avgMissLatency[access_idx].subname(i, system->getMasterName(i)); 1994 } 1995 } 1996 1997 demandAvgMissLatency 1998 .name(name() + ".demand_avg_miss_latency") 1999 .desc("average overall miss latency") 2000 .flags(total | nozero | nonan) 2001 ; 2002 demandAvgMissLatency = demandMissLatency / demandMisses; 2003 for (int i = 0; i < system->maxMasters(); i++) { 2004 demandAvgMissLatency.subname(i, system->getMasterName(i)); 2005 } 2006 2007 overallAvgMissLatency 2008 .name(name() + ".overall_avg_miss_latency") 2009 .desc("average overall miss latency") 2010 .flags(total | nozero | nonan) 2011 ; 2012 overallAvgMissLatency = overallMissLatency / overallMisses; 2013 for (int i = 0; i < system->maxMasters(); i++) { 2014 overallAvgMissLatency.subname(i, system->getMasterName(i)); 2015 } 2016 2017 blocked_cycles.init(NUM_BLOCKED_CAUSES); 2018 blocked_cycles 2019 .name(name() + ".blocked_cycles") 2020 .desc("number of cycles access was blocked") 2021 .subname(Blocked_NoMSHRs, "no_mshrs") 2022 .subname(Blocked_NoTargets, "no_targets") 2023 ; 2024 2025 2026 blocked_causes.init(NUM_BLOCKED_CAUSES); 2027 blocked_causes 2028 .name(name() + ".blocked") 2029 .desc("number of cycles access was blocked") 2030 .subname(Blocked_NoMSHRs, "no_mshrs") 2031 .subname(Blocked_NoTargets, "no_targets") 2032 ; 2033 2034 avg_blocked 2035 .name(name() + ".avg_blocked_cycles") 2036 .desc("average number of cycles each access was blocked") 2037 .subname(Blocked_NoMSHRs, "no_mshrs") 2038 .subname(Blocked_NoTargets, "no_targets") 2039 ; 2040 2041 avg_blocked = blocked_cycles / blocked_causes; 2042 2043 unusedPrefetches 2044 .name(name() + ".unused_prefetches") 2045 .desc("number of HardPF blocks evicted w/o reference") 2046 .flags(nozero) 2047 ; 2048 2049 writebacks 2050 .init(system->maxMasters()) 2051 .name(name() + ".writebacks") 2052 .desc("number of writebacks") 2053 .flags(total | nozero | nonan) 2054 ; 2055 for (int i = 0; i < system->maxMasters(); i++) { 2056 writebacks.subname(i, system->getMasterName(i)); 2057 } 2058 2059 // MSHR statistics 2060 // MSHR hit statistics 2061 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2062 MemCmd cmd(access_idx); 2063 const string &cstr = cmd.toString(); 2064 2065 mshr_hits[access_idx] 2066 .init(system->maxMasters()) 2067 .name(name() + "." + cstr + "_mshr_hits") 2068 .desc("number of " + cstr + " MSHR hits") 2069 .flags(total | nozero | nonan) 2070 ; 2071 for (int i = 0; i < system->maxMasters(); i++) { 2072 mshr_hits[access_idx].subname(i, system->getMasterName(i)); 2073 } 2074 } 2075 2076 demandMshrHits 2077 .name(name() + ".demand_mshr_hits") 2078 .desc("number of demand (read+write) MSHR hits") 2079 .flags(total | nozero | nonan) 2080 ; 2081 demandMshrHits = SUM_DEMAND(mshr_hits); 2082 for (int i = 0; i < system->maxMasters(); i++) { 2083 demandMshrHits.subname(i, system->getMasterName(i)); 2084 } 2085 2086 overallMshrHits 2087 .name(name() + ".overall_mshr_hits") 2088 .desc("number of overall MSHR hits") 2089 .flags(total | nozero | nonan) 2090 ; 2091 overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshr_hits); 2092 for (int i = 0; i < system->maxMasters(); i++) { 2093 overallMshrHits.subname(i, system->getMasterName(i)); 2094 } 2095 2096 // MSHR miss statistics 2097 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2098 MemCmd cmd(access_idx); 2099 const string &cstr = cmd.toString(); 2100 2101 mshr_misses[access_idx] 2102 .init(system->maxMasters()) 2103 .name(name() + "." + cstr + "_mshr_misses") 2104 .desc("number of " + cstr + " MSHR misses") 2105 .flags(total | nozero | nonan) 2106 ; 2107 for (int i = 0; i < system->maxMasters(); i++) { 2108 mshr_misses[access_idx].subname(i, system->getMasterName(i)); 2109 } 2110 } 2111 2112 demandMshrMisses 2113 .name(name() + ".demand_mshr_misses") 2114 .desc("number of demand (read+write) MSHR misses") 2115 .flags(total | nozero | nonan) 2116 ; 2117 demandMshrMisses = SUM_DEMAND(mshr_misses); 2118 for (int i = 0; i < system->maxMasters(); i++) { 2119 demandMshrMisses.subname(i, system->getMasterName(i)); 2120 } 2121 2122 overallMshrMisses 2123 .name(name() + ".overall_mshr_misses") 2124 .desc("number of overall MSHR misses") 2125 .flags(total | nozero | nonan) 2126 ; 2127 overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshr_misses); 2128 for (int i = 0; i < system->maxMasters(); i++) { 2129 overallMshrMisses.subname(i, system->getMasterName(i)); 2130 } 2131 2132 // MSHR miss latency statistics 2133 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2134 MemCmd cmd(access_idx); 2135 const string &cstr = cmd.toString(); 2136 2137 mshr_miss_latency[access_idx] 2138 .init(system->maxMasters()) 2139 .name(name() + "." + cstr + "_mshr_miss_latency") 2140 .desc("number of " + cstr + " MSHR miss cycles") 2141 .flags(total | nozero | nonan) 2142 ; 2143 for (int i = 0; i < system->maxMasters(); i++) { 2144 mshr_miss_latency[access_idx].subname(i, system->getMasterName(i)); 2145 } 2146 } 2147 2148 demandMshrMissLatency 2149 .name(name() + ".demand_mshr_miss_latency") 2150 .desc("number of demand (read+write) MSHR miss cycles") 2151 .flags(total | nozero | nonan) 2152 ; 2153 demandMshrMissLatency = SUM_DEMAND(mshr_miss_latency); 2154 for (int i = 0; i < system->maxMasters(); i++) { 2155 demandMshrMissLatency.subname(i, system->getMasterName(i)); 2156 } 2157 2158 overallMshrMissLatency 2159 .name(name() + ".overall_mshr_miss_latency") 2160 .desc("number of overall MSHR miss cycles") 2161 .flags(total | nozero | nonan) 2162 ; 2163 overallMshrMissLatency = 2164 demandMshrMissLatency + SUM_NON_DEMAND(mshr_miss_latency); 2165 for (int i = 0; i < system->maxMasters(); i++) { 2166 overallMshrMissLatency.subname(i, system->getMasterName(i)); 2167 } 2168 2169 // MSHR uncacheable statistics 2170 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2171 MemCmd cmd(access_idx); 2172 const string &cstr = cmd.toString(); 2173 2174 mshr_uncacheable[access_idx] 2175 .init(system->maxMasters()) 2176 .name(name() + "." + cstr + "_mshr_uncacheable") 2177 .desc("number of " + cstr + " MSHR uncacheable") 2178 .flags(total | nozero | nonan) 2179 ; 2180 for (int i = 0; i < system->maxMasters(); i++) { 2181 mshr_uncacheable[access_idx].subname(i, system->getMasterName(i)); 2182 } 2183 } 2184 2185 overallMshrUncacheable 2186 .name(name() + ".overall_mshr_uncacheable_misses") 2187 .desc("number of overall MSHR uncacheable misses") 2188 .flags(total | nozero | nonan) 2189 ; 2190 overallMshrUncacheable = 2191 SUM_DEMAND(mshr_uncacheable) + SUM_NON_DEMAND(mshr_uncacheable); 2192 for (int i = 0; i < system->maxMasters(); i++) { 2193 overallMshrUncacheable.subname(i, system->getMasterName(i)); 2194 } 2195 2196 // MSHR miss latency statistics 2197 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2198 MemCmd cmd(access_idx); 2199 const string &cstr = cmd.toString(); 2200 2201 mshr_uncacheable_lat[access_idx] 2202 .init(system->maxMasters()) 2203 .name(name() + "." + cstr + "_mshr_uncacheable_latency") 2204 .desc("number of " + cstr + " MSHR uncacheable cycles") 2205 .flags(total | nozero | nonan) 2206 ; 2207 for (int i = 0; i < system->maxMasters(); i++) { 2208 mshr_uncacheable_lat[access_idx].subname( 2209 i, system->getMasterName(i)); 2210 } 2211 } 2212 2213 overallMshrUncacheableLatency 2214 .name(name() + ".overall_mshr_uncacheable_latency") 2215 .desc("number of overall MSHR uncacheable cycles") 2216 .flags(total | nozero | nonan) 2217 ; 2218 overallMshrUncacheableLatency = 2219 SUM_DEMAND(mshr_uncacheable_lat) + 2220 SUM_NON_DEMAND(mshr_uncacheable_lat); 2221 for (int i = 0; i < system->maxMasters(); i++) { 2222 overallMshrUncacheableLatency.subname(i, system->getMasterName(i)); 2223 } 2224 2225#if 0 2226 // MSHR access formulas 2227 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2228 MemCmd cmd(access_idx); 2229 const string &cstr = cmd.toString(); 2230 2231 mshrAccesses[access_idx] 2232 .name(name() + "." + cstr + "_mshr_accesses") 2233 .desc("number of " + cstr + " mshr accesses(hits+misses)") 2234 .flags(total | nozero | nonan) 2235 ; 2236 mshrAccesses[access_idx] = 2237 mshr_hits[access_idx] + mshr_misses[access_idx] 2238 + mshr_uncacheable[access_idx]; 2239 } 2240 2241 demandMshrAccesses 2242 .name(name() + ".demand_mshr_accesses") 2243 .desc("number of demand (read+write) mshr accesses") 2244 .flags(total | nozero | nonan) 2245 ; 2246 demandMshrAccesses = demandMshrHits + demandMshrMisses; 2247 2248 overallMshrAccesses 2249 .name(name() + ".overall_mshr_accesses") 2250 .desc("number of overall (read+write) mshr accesses") 2251 .flags(total | nozero | nonan) 2252 ; 2253 overallMshrAccesses = overallMshrHits + overallMshrMisses 2254 + overallMshrUncacheable; 2255#endif 2256 2257 // MSHR miss rate formulas 2258 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2259 MemCmd cmd(access_idx); 2260 const string &cstr = cmd.toString(); 2261 2262 mshrMissRate[access_idx] 2263 .name(name() + "." + cstr + "_mshr_miss_rate") 2264 .desc("mshr miss rate for " + cstr + " accesses") 2265 .flags(total | nozero | nonan) 2266 ; 2267 mshrMissRate[access_idx] = 2268 mshr_misses[access_idx] / accesses[access_idx]; 2269 2270 for (int i = 0; i < system->maxMasters(); i++) { 2271 mshrMissRate[access_idx].subname(i, system->getMasterName(i)); 2272 } 2273 } 2274 2275 demandMshrMissRate 2276 .name(name() + ".demand_mshr_miss_rate") 2277 .desc("mshr miss rate for demand accesses") 2278 .flags(total | nozero | nonan) 2279 ; 2280 demandMshrMissRate = demandMshrMisses / demandAccesses; 2281 for (int i = 0; i < system->maxMasters(); i++) { 2282 demandMshrMissRate.subname(i, system->getMasterName(i)); 2283 } 2284 2285 overallMshrMissRate 2286 .name(name() + ".overall_mshr_miss_rate") 2287 .desc("mshr miss rate for overall accesses") 2288 .flags(total | nozero | nonan) 2289 ; 2290 overallMshrMissRate = overallMshrMisses / overallAccesses; 2291 for (int i = 0; i < system->maxMasters(); i++) { 2292 overallMshrMissRate.subname(i, system->getMasterName(i)); 2293 } 2294 2295 // mshrMiss latency formulas 2296 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2297 MemCmd cmd(access_idx); 2298 const string &cstr = cmd.toString(); 2299 2300 avgMshrMissLatency[access_idx] 2301 .name(name() + "." + cstr + "_avg_mshr_miss_latency") 2302 .desc("average " + cstr + " mshr miss latency") 2303 .flags(total | nozero | nonan) 2304 ; 2305 avgMshrMissLatency[access_idx] = 2306 mshr_miss_latency[access_idx] / mshr_misses[access_idx]; 2307 2308 for (int i = 0; i < system->maxMasters(); i++) { 2309 avgMshrMissLatency[access_idx].subname( 2310 i, system->getMasterName(i)); 2311 } 2312 } 2313 2314 demandAvgMshrMissLatency 2315 .name(name() + ".demand_avg_mshr_miss_latency") 2316 .desc("average overall mshr miss latency") 2317 .flags(total | nozero | nonan) 2318 ; 2319 demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses; 2320 for (int i = 0; i < system->maxMasters(); i++) { 2321 demandAvgMshrMissLatency.subname(i, system->getMasterName(i)); 2322 } 2323 2324 overallAvgMshrMissLatency 2325 .name(name() + ".overall_avg_mshr_miss_latency") 2326 .desc("average overall mshr miss latency") 2327 .flags(total | nozero | nonan) 2328 ; 2329 overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses; 2330 for (int i = 0; i < system->maxMasters(); i++) { 2331 overallAvgMshrMissLatency.subname(i, system->getMasterName(i)); 2332 } 2333 2334 // mshrUncacheable latency formulas 2335 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2336 MemCmd cmd(access_idx); 2337 const string &cstr = cmd.toString(); 2338 2339 avgMshrUncacheableLatency[access_idx] 2340 .name(name() + "." + cstr + "_avg_mshr_uncacheable_latency") 2341 .desc("average " + cstr + " mshr uncacheable latency") 2342 .flags(total | nozero | nonan) 2343 ; 2344 avgMshrUncacheableLatency[access_idx] = 2345 mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx]; 2346 2347 for (int i = 0; i < system->maxMasters(); i++) { 2348 avgMshrUncacheableLatency[access_idx].subname( 2349 i, system->getMasterName(i)); 2350 } 2351 } 2352 2353 overallAvgMshrUncacheableLatency 2354 .name(name() + ".overall_avg_mshr_uncacheable_latency") 2355 .desc("average overall mshr uncacheable latency") 2356 .flags(total | nozero | nonan) 2357 ; 2358 overallAvgMshrUncacheableLatency = 2359 overallMshrUncacheableLatency / overallMshrUncacheable; 2360 for (int i = 0; i < system->maxMasters(); i++) { 2361 overallAvgMshrUncacheableLatency.subname(i, system->getMasterName(i)); 2362 } 2363 2364 replacements 2365 .name(name() + ".replacements") 2366 .desc("number of replacements") 2367 ; 2368} 2369 2370void 2371BaseCache::regProbePoints() 2372{ 2373 ppHit = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Hit"); 2374 ppMiss = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Miss"); 2375 ppFill = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Fill"); 2376} 2377 2378/////////////// 2379// 2380// CpuSidePort 2381// 2382/////////////// 2383bool 2384BaseCache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt) 2385{ 2386 // Snoops shouldn't happen when bypassing caches 2387 assert(!cache->system->bypassCaches()); 2388 2389 assert(pkt->isResponse()); 2390 2391 // Express snoop responses from master to slave, e.g., from L1 to L2 2392 cache->recvTimingSnoopResp(pkt); 2393 return true; 2394} 2395 2396 2397bool 2398BaseCache::CpuSidePort::tryTiming(PacketPtr pkt) 2399{ 2400 if (cache->system->bypassCaches() || pkt->isExpressSnoop()) { 2401 // always let express snoop packets through even if blocked 2402 return true; 2403 } else if (blocked || mustSendRetry) { 2404 // either already committed to send a retry, or blocked 2405 mustSendRetry = true; 2406 return false; 2407 } 2408 mustSendRetry = false; 2409 return true; 2410} 2411 2412bool 2413BaseCache::CpuSidePort::recvTimingReq(PacketPtr pkt) 2414{ 2415 assert(pkt->isRequest()); 2416 2417 if (cache->system->bypassCaches()) { 2418 // Just forward the packet if caches are disabled. 2419 // @todo This should really enqueue the packet rather 2420 bool M5_VAR_USED success = cache->memSidePort.sendTimingReq(pkt); 2421 assert(success); 2422 return true; 2423 } else if (tryTiming(pkt)) { 2424 cache->recvTimingReq(pkt); 2425 return true; 2426 } 2427 return false; 2428} 2429 2430Tick 2431BaseCache::CpuSidePort::recvAtomic(PacketPtr pkt) 2432{ 2433 if (cache->system->bypassCaches()) { 2434 // Forward the request if the system is in cache bypass mode. 2435 return cache->memSidePort.sendAtomic(pkt); 2436 } else { 2437 return cache->recvAtomic(pkt); 2438 } 2439} 2440 2441void 2442BaseCache::CpuSidePort::recvFunctional(PacketPtr pkt) 2443{ 2444 if (cache->system->bypassCaches()) { 2445 // The cache should be flushed if we are in cache bypass mode, 2446 // so we don't need to check if we need to update anything. 2447 cache->memSidePort.sendFunctional(pkt); 2448 return; 2449 } 2450 2451 // functional request 2452 cache->functionalAccess(pkt, true); 2453} 2454 2455AddrRangeList 2456BaseCache::CpuSidePort::getAddrRanges() const 2457{ 2458 return cache->getAddrRanges(); 2459} 2460 2461 2462BaseCache:: 2463CpuSidePort::CpuSidePort(const std::string &_name, BaseCache *_cache, 2464 const std::string &_label) 2465 : CacheSlavePort(_name, _cache, _label), cache(_cache) 2466{ 2467} 2468 2469/////////////// 2470// 2471// MemSidePort 2472// 2473/////////////// 2474bool 2475BaseCache::MemSidePort::recvTimingResp(PacketPtr pkt) 2476{ 2477 cache->recvTimingResp(pkt); 2478 return true; 2479} 2480 2481// Express snooping requests to memside port 2482void 2483BaseCache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt) 2484{ 2485 // Snoops shouldn't happen when bypassing caches 2486 assert(!cache->system->bypassCaches()); 2487 2488 // handle snooping requests 2489 cache->recvTimingSnoopReq(pkt); 2490} 2491 2492Tick 2493BaseCache::MemSidePort::recvAtomicSnoop(PacketPtr pkt) 2494{ 2495 // Snoops shouldn't happen when bypassing caches 2496 assert(!cache->system->bypassCaches()); 2497 2498 return cache->recvAtomicSnoop(pkt); 2499} 2500 2501void 2502BaseCache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt) 2503{ 2504 // Snoops shouldn't happen when bypassing caches 2505 assert(!cache->system->bypassCaches()); 2506 2507 // functional snoop (note that in contrast to atomic we don't have 2508 // a specific functionalSnoop method, as they have the same 2509 // behaviour regardless) 2510 cache->functionalAccess(pkt, false); 2511} 2512 2513void 2514BaseCache::CacheReqPacketQueue::sendDeferredPacket() 2515{ 2516 // sanity check 2517 assert(!waitingOnRetry); 2518 2519 // there should never be any deferred request packets in the 2520 // queue, instead we resly on the cache to provide the packets 2521 // from the MSHR queue or write queue 2522 assert(deferredPacketReadyTime() == MaxTick); 2523 2524 // check for request packets (requests & writebacks) 2525 QueueEntry* entry = cache.getNextQueueEntry(); 2526 2527 if (!entry) { 2528 // can happen if e.g. we attempt a writeback and fail, but 2529 // before the retry, the writeback is eliminated because 2530 // we snoop another cache's ReadEx. 2531 } else { 2532 // let our snoop responses go first if there are responses to 2533 // the same addresses 2534 if (checkConflictingSnoop(entry->getTarget()->pkt)) { 2535 return; 2536 } 2537 waitingOnRetry = entry->sendPacket(cache); 2538 } 2539 2540 // if we succeeded and are not waiting for a retry, schedule the 2541 // next send considering when the next queue is ready, note that 2542 // snoop responses have their own packet queue and thus schedule 2543 // their own events 2544 if (!waitingOnRetry) { 2545 schedSendEvent(cache.nextQueueReadyTime()); 2546 } 2547} 2548 2549BaseCache::MemSidePort::MemSidePort(const std::string &_name, 2550 BaseCache *_cache, 2551 const std::string &_label) 2552 : CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue), 2553 _reqQueue(*_cache, *this, _snoopRespQueue, _label), 2554 _snoopRespQueue(*_cache, *this, true, _label), cache(_cache) 2555{ 2556} 2557 2558void 2559WriteAllocator::updateMode(Addr write_addr, unsigned write_size, 2560 Addr blk_addr) 2561{ 2562 // check if we are continuing where the last write ended 2563 if (nextAddr == write_addr) { 2564 delayCtr[blk_addr] = delayThreshold; 2565 // stop if we have already saturated 2566 if (mode != WriteMode::NO_ALLOCATE) { 2567 byteCount += write_size; 2568 // switch to streaming mode if we have passed the lower 2569 // threshold 2570 if (mode == WriteMode::ALLOCATE && 2571 byteCount > coalesceLimit) { 2572 mode = WriteMode::COALESCE; 2573 DPRINTF(Cache, "Switched to write coalescing\n"); 2574 } else if (mode == WriteMode::COALESCE && 2575 byteCount > noAllocateLimit) { 2576 // and continue and switch to non-allocating mode if we 2577 // pass the upper threshold 2578 mode = WriteMode::NO_ALLOCATE; 2579 DPRINTF(Cache, "Switched to write-no-allocate\n"); 2580 } 2581 } 2582 } else { 2583 // we did not see a write matching the previous one, start 2584 // over again 2585 byteCount = write_size; 2586 mode = WriteMode::ALLOCATE; 2587 resetDelay(blk_addr); 2588 } 2589 nextAddr = write_addr + write_size; 2590} 2591 2592WriteAllocator* 2593WriteAllocatorParams::create() 2594{ 2595 return new WriteAllocator(this); 2596} 2597