base.cc revision 13947
12810SN/A/* 213932Snikos.nikoleris@arm.com * Copyright (c) 2012-2013, 2018-2019 ARM Limited 38856Sandreas.hansson@arm.com * All rights reserved. 48856Sandreas.hansson@arm.com * 58856Sandreas.hansson@arm.com * The license below extends only to copyright in the software and shall 68856Sandreas.hansson@arm.com * not be construed as granting a license to any other intellectual 78856Sandreas.hansson@arm.com * property including but not limited to intellectual property relating 88856Sandreas.hansson@arm.com * to a hardware implementation of the functionality of the software 98856Sandreas.hansson@arm.com * licensed hereunder. You may use the software subject to the license 108856Sandreas.hansson@arm.com * terms below provided that you ensure that this notice is replicated 118856Sandreas.hansson@arm.com * unmodified and in its entirety in all distributions of the software, 128856Sandreas.hansson@arm.com * modified or unmodified, in source code or in binary form. 138856Sandreas.hansson@arm.com * 142810SN/A * Copyright (c) 2003-2005 The Regents of The University of Michigan 152810SN/A * All rights reserved. 162810SN/A * 172810SN/A * Redistribution and use in source and binary forms, with or without 182810SN/A * modification, are permitted provided that the following conditions are 192810SN/A * met: redistributions of source code must retain the above copyright 202810SN/A * notice, this list of conditions and the following disclaimer; 212810SN/A * redistributions in binary form must reproduce the above copyright 222810SN/A * notice, this list of conditions and the following disclaimer in the 232810SN/A * documentation and/or other materials provided with the distribution; 242810SN/A * neither the name of the copyright holders nor the names of its 252810SN/A * contributors may be used to endorse or promote products derived from 262810SN/A * this software without specific prior written permission. 272810SN/A * 282810SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 292810SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 302810SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 312810SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 322810SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 332810SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 342810SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 352810SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 362810SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 372810SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 382810SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 392810SN/A * 402810SN/A * Authors: Erik Hallnor 4112724Snikos.nikoleris@arm.com * Nikos Nikoleris 422810SN/A */ 432810SN/A 442810SN/A/** 452810SN/A * @file 462810SN/A * Definition of BaseCache functions. 472810SN/A */ 482810SN/A 4911486Snikos.nikoleris@arm.com#include "mem/cache/base.hh" 5011486Snikos.nikoleris@arm.com 5112724Snikos.nikoleris@arm.com#include "base/compiler.hh" 5212724Snikos.nikoleris@arm.com#include "base/logging.hh" 538232Snate@binkert.org#include "debug/Cache.hh" 5413947Sodanrc@yahoo.com.br#include "debug/CacheComp.hh" 5512724Snikos.nikoleris@arm.com#include "debug/CachePort.hh" 5613222Sodanrc@yahoo.com.br#include "debug/CacheRepl.hh" 5712724Snikos.nikoleris@arm.com#include "debug/CacheVerbose.hh" 5813945Sodanrc@yahoo.com.br#include "mem/cache/compressors/base.hh" 5911486Snikos.nikoleris@arm.com#include "mem/cache/mshr.hh" 6012724Snikos.nikoleris@arm.com#include "mem/cache/prefetch/base.hh" 6112724Snikos.nikoleris@arm.com#include "mem/cache/queue_entry.hh" 6213947Sodanrc@yahoo.com.br#include "mem/cache/tags/super_blk.hh" 6312724Snikos.nikoleris@arm.com#include "params/BaseCache.hh" 6413352Snikos.nikoleris@arm.com#include "params/WriteAllocator.hh" 6512724Snikos.nikoleris@arm.com#include "sim/core.hh" 6612724Snikos.nikoleris@arm.com 6712724Snikos.nikoleris@arm.comclass BaseMasterPort; 6812724Snikos.nikoleris@arm.comclass BaseSlavePort; 692810SN/A 702810SN/Ausing namespace std; 712810SN/A 728856Sandreas.hansson@arm.comBaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name, 738856Sandreas.hansson@arm.com BaseCache *_cache, 748856Sandreas.hansson@arm.com const std::string &_label) 7513564Snikos.nikoleris@arm.com : QueuedSlavePort(_name, _cache, queue), 7613564Snikos.nikoleris@arm.com queue(*_cache, *this, true, _label), 7712084Sspwilson2@wisc.edu blocked(false), mustSendRetry(false), 7812084Sspwilson2@wisc.edu sendRetryEvent([this]{ processSendRetry(); }, _name) 798856Sandreas.hansson@arm.com{ 808856Sandreas.hansson@arm.com} 814475SN/A 8211053Sandreas.hansson@arm.comBaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size) 8313892Sgabeblack@google.com : ClockedObject(p), 8412724Snikos.nikoleris@arm.com cpuSidePort (p->name + ".cpu_side", this, "CpuSidePort"), 8512724Snikos.nikoleris@arm.com memSidePort(p->name + ".mem_side", this, "MemSidePort"), 8611377Sandreas.hansson@arm.com mshrQueue("MSHRs", p->mshrs, 0, p->demand_mshr_reserve), // see below 8711377Sandreas.hansson@arm.com writeBuffer("write buffer", p->write_buffers, p->mshrs), // see below 8812724Snikos.nikoleris@arm.com tags(p->tags), 8913945Sodanrc@yahoo.com.br compressor(p->compressor), 9012724Snikos.nikoleris@arm.com prefetcher(p->prefetcher), 9113352Snikos.nikoleris@arm.com writeAllocator(p->write_allocator), 9212724Snikos.nikoleris@arm.com writebackClean(p->writeback_clean), 9312724Snikos.nikoleris@arm.com tempBlockWriteback(nullptr), 9412724Snikos.nikoleris@arm.com writebackTempBlockAtomicEvent([this]{ writebackTempBlockAtomic(); }, 9512724Snikos.nikoleris@arm.com name(), false, 9612724Snikos.nikoleris@arm.com EventBase::Delayed_Writeback_Pri), 9711053Sandreas.hansson@arm.com blkSize(blk_size), 9811722Ssophiane.senni@gmail.com lookupLatency(p->tag_latency), 9911722Ssophiane.senni@gmail.com dataLatency(p->data_latency), 10011722Ssophiane.senni@gmail.com forwardLatency(p->tag_latency), 10111722Ssophiane.senni@gmail.com fillLatency(p->data_latency), 1029263Smrinmoy.ghosh@arm.com responseLatency(p->response_latency), 10313418Sodanrc@yahoo.com.br sequentialAccess(p->sequential_access), 1045034SN/A numTarget(p->tgts_per_mshr), 10511331Sandreas.hansson@arm.com forwardSnoops(true), 10612724Snikos.nikoleris@arm.com clusivity(p->clusivity), 10710884Sandreas.hansson@arm.com isReadOnly(p->is_read_only), 1084626SN/A blocked(0), 10910360Sandreas.hansson@arm.com order(0), 11011484Snikos.nikoleris@arm.com noTargetMSHR(nullptr), 1115034SN/A missCount(p->max_miss_count), 1128883SAli.Saidi@ARM.com addrRanges(p->addr_ranges.begin(), p->addr_ranges.end()), 1138833Sdam.sunwoo@arm.com system(p->system) 1144458SN/A{ 11511377Sandreas.hansson@arm.com // the MSHR queue has no reserve entries as we check the MSHR 11611377Sandreas.hansson@arm.com // queue on every single allocation, whereas the write queue has 11711377Sandreas.hansson@arm.com // as many reserve entries as we have MSHRs, since every MSHR may 11811377Sandreas.hansson@arm.com // eventually require a writeback, and we do not check the write 11911377Sandreas.hansson@arm.com // buffer before committing to an MSHR 12011377Sandreas.hansson@arm.com 12111331Sandreas.hansson@arm.com // forward snoops is overridden in init() once we can query 12211331Sandreas.hansson@arm.com // whether the connected master is actually snooping or not 12312724Snikos.nikoleris@arm.com 12412843Srmk35@cl.cam.ac.uk tempBlock = new TempCacheBlk(blkSize); 12512724Snikos.nikoleris@arm.com 12613419Sodanrc@yahoo.com.br tags->tagsInit(); 12712724Snikos.nikoleris@arm.com if (prefetcher) 12812724Snikos.nikoleris@arm.com prefetcher->setCache(this); 12912724Snikos.nikoleris@arm.com} 13012724Snikos.nikoleris@arm.com 13112724Snikos.nikoleris@arm.comBaseCache::~BaseCache() 13212724Snikos.nikoleris@arm.com{ 13312724Snikos.nikoleris@arm.com delete tempBlock; 1342810SN/A} 1352810SN/A 1363013SN/Avoid 1378856Sandreas.hansson@arm.comBaseCache::CacheSlavePort::setBlocked() 1382810SN/A{ 1393013SN/A assert(!blocked); 14010714Sandreas.hansson@arm.com DPRINTF(CachePort, "Port is blocking new requests\n"); 1412810SN/A blocked = true; 1429614Srene.dejong@arm.com // if we already scheduled a retry in this cycle, but it has not yet 1439614Srene.dejong@arm.com // happened, cancel it 1449614Srene.dejong@arm.com if (sendRetryEvent.scheduled()) { 14510345SCurtis.Dunham@arm.com owner.deschedule(sendRetryEvent); 14610714Sandreas.hansson@arm.com DPRINTF(CachePort, "Port descheduled retry\n"); 14710345SCurtis.Dunham@arm.com mustSendRetry = true; 1489614Srene.dejong@arm.com } 1492810SN/A} 1502810SN/A 1512810SN/Avoid 1528856Sandreas.hansson@arm.comBaseCache::CacheSlavePort::clearBlocked() 1532810SN/A{ 1543013SN/A assert(blocked); 15510714Sandreas.hansson@arm.com DPRINTF(CachePort, "Port is accepting new requests\n"); 1563013SN/A blocked = false; 1578856Sandreas.hansson@arm.com if (mustSendRetry) { 15810714Sandreas.hansson@arm.com // @TODO: need to find a better time (next cycle?) 1598922Swilliam.wang@arm.com owner.schedule(sendRetryEvent, curTick() + 1); 1602897SN/A } 1612810SN/A} 1622810SN/A 16310344Sandreas.hansson@arm.comvoid 16410344Sandreas.hansson@arm.comBaseCache::CacheSlavePort::processSendRetry() 16510344Sandreas.hansson@arm.com{ 16610714Sandreas.hansson@arm.com DPRINTF(CachePort, "Port is sending retry\n"); 16710344Sandreas.hansson@arm.com 16810344Sandreas.hansson@arm.com // reset the flag and call retry 16910344Sandreas.hansson@arm.com mustSendRetry = false; 17010713Sandreas.hansson@arm.com sendRetryReq(); 17110344Sandreas.hansson@arm.com} 1722844SN/A 17312730Sodanrc@yahoo.com.brAddr 17412730Sodanrc@yahoo.com.brBaseCache::regenerateBlkAddr(CacheBlk* blk) 17512730Sodanrc@yahoo.com.br{ 17612730Sodanrc@yahoo.com.br if (blk != tempBlock) { 17712730Sodanrc@yahoo.com.br return tags->regenerateBlkAddr(blk); 17812730Sodanrc@yahoo.com.br } else { 17912730Sodanrc@yahoo.com.br return tempBlock->getAddr(); 18012730Sodanrc@yahoo.com.br } 18112730Sodanrc@yahoo.com.br} 18212730Sodanrc@yahoo.com.br 1832810SN/Avoid 1842858SN/ABaseCache::init() 1852858SN/A{ 18612724Snikos.nikoleris@arm.com if (!cpuSidePort.isConnected() || !memSidePort.isConnected()) 1878922Swilliam.wang@arm.com fatal("Cache ports on %s are not connected\n", name()); 18812724Snikos.nikoleris@arm.com cpuSidePort.sendRangeChange(); 18912724Snikos.nikoleris@arm.com forwardSnoops = cpuSidePort.isSnooping(); 1902858SN/A} 1912858SN/A 19213784Sgabeblack@google.comPort & 19313784Sgabeblack@google.comBaseCache::getPort(const std::string &if_name, PortID idx) 1948922Swilliam.wang@arm.com{ 1958922Swilliam.wang@arm.com if (if_name == "mem_side") { 19612724Snikos.nikoleris@arm.com return memSidePort; 19713784Sgabeblack@google.com } else if (if_name == "cpu_side") { 19813784Sgabeblack@google.com return cpuSidePort; 1998922Swilliam.wang@arm.com } else { 20013892Sgabeblack@google.com return ClockedObject::getPort(if_name, idx); 2018922Swilliam.wang@arm.com } 2028922Swilliam.wang@arm.com} 2034628SN/A 20410821Sandreas.hansson@arm.combool 20510821Sandreas.hansson@arm.comBaseCache::inRange(Addr addr) const 20610821Sandreas.hansson@arm.com{ 20710821Sandreas.hansson@arm.com for (const auto& r : addrRanges) { 20810821Sandreas.hansson@arm.com if (r.contains(addr)) { 20910821Sandreas.hansson@arm.com return true; 21010821Sandreas.hansson@arm.com } 21110821Sandreas.hansson@arm.com } 21210821Sandreas.hansson@arm.com return false; 21310821Sandreas.hansson@arm.com} 21410821Sandreas.hansson@arm.com 2152858SN/Avoid 21612724Snikos.nikoleris@arm.comBaseCache::handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time) 21712724Snikos.nikoleris@arm.com{ 21812724Snikos.nikoleris@arm.com if (pkt->needsResponse()) { 21913745Sodanrc@yahoo.com.br // These delays should have been consumed by now 22013745Sodanrc@yahoo.com.br assert(pkt->headerDelay == 0); 22113745Sodanrc@yahoo.com.br assert(pkt->payloadDelay == 0); 22213745Sodanrc@yahoo.com.br 22312724Snikos.nikoleris@arm.com pkt->makeTimingResponse(); 22412724Snikos.nikoleris@arm.com 22512724Snikos.nikoleris@arm.com // In this case we are considering request_time that takes 22612724Snikos.nikoleris@arm.com // into account the delay of the xbar, if any, and just 22712724Snikos.nikoleris@arm.com // lat, neglecting responseLatency, modelling hit latency 22813418Sodanrc@yahoo.com.br // just as the value of lat overriden by access(), which calls 22913418Sodanrc@yahoo.com.br // the calculateAccessLatency() function. 23013564Snikos.nikoleris@arm.com cpuSidePort.schedTimingResp(pkt, request_time); 23112724Snikos.nikoleris@arm.com } else { 23212724Snikos.nikoleris@arm.com DPRINTF(Cache, "%s satisfied %s, no response needed\n", __func__, 23312724Snikos.nikoleris@arm.com pkt->print()); 23412724Snikos.nikoleris@arm.com 23512724Snikos.nikoleris@arm.com // queue the packet for deletion, as the sending cache is 23612724Snikos.nikoleris@arm.com // still relying on it; if the block is found in access(), 23712724Snikos.nikoleris@arm.com // CleanEvict and Writeback messages will be deleted 23812724Snikos.nikoleris@arm.com // here as well 23912724Snikos.nikoleris@arm.com pendingDelete.reset(pkt); 24012724Snikos.nikoleris@arm.com } 24112724Snikos.nikoleris@arm.com} 24212724Snikos.nikoleris@arm.com 24312724Snikos.nikoleris@arm.comvoid 24412724Snikos.nikoleris@arm.comBaseCache::handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk, 24512724Snikos.nikoleris@arm.com Tick forward_time, Tick request_time) 24612724Snikos.nikoleris@arm.com{ 24713352Snikos.nikoleris@arm.com if (writeAllocator && 24813352Snikos.nikoleris@arm.com pkt && pkt->isWrite() && !pkt->req->isUncacheable()) { 24913352Snikos.nikoleris@arm.com writeAllocator->updateMode(pkt->getAddr(), pkt->getSize(), 25013352Snikos.nikoleris@arm.com pkt->getBlockAddr(blkSize)); 25113352Snikos.nikoleris@arm.com } 25213352Snikos.nikoleris@arm.com 25312724Snikos.nikoleris@arm.com if (mshr) { 25412724Snikos.nikoleris@arm.com /// MSHR hit 25512724Snikos.nikoleris@arm.com /// @note writebacks will be checked in getNextMSHR() 25612724Snikos.nikoleris@arm.com /// for any conflicting requests to the same block 25712724Snikos.nikoleris@arm.com 25812724Snikos.nikoleris@arm.com //@todo remove hw_pf here 25912724Snikos.nikoleris@arm.com 26012724Snikos.nikoleris@arm.com // Coalesce unless it was a software prefetch (see above). 26112724Snikos.nikoleris@arm.com if (pkt) { 26212724Snikos.nikoleris@arm.com assert(!pkt->isWriteback()); 26312724Snikos.nikoleris@arm.com // CleanEvicts corresponding to blocks which have 26412724Snikos.nikoleris@arm.com // outstanding requests in MSHRs are simply sunk here 26512724Snikos.nikoleris@arm.com if (pkt->cmd == MemCmd::CleanEvict) { 26612724Snikos.nikoleris@arm.com pendingDelete.reset(pkt); 26712724Snikos.nikoleris@arm.com } else if (pkt->cmd == MemCmd::WriteClean) { 26812724Snikos.nikoleris@arm.com // A WriteClean should never coalesce with any 26912724Snikos.nikoleris@arm.com // outstanding cache maintenance requests. 27012724Snikos.nikoleris@arm.com 27112724Snikos.nikoleris@arm.com // We use forward_time here because there is an 27212724Snikos.nikoleris@arm.com // uncached memory write, forwarded to WriteBuffer. 27312724Snikos.nikoleris@arm.com allocateWriteBuffer(pkt, forward_time); 27412724Snikos.nikoleris@arm.com } else { 27512724Snikos.nikoleris@arm.com DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__, 27612724Snikos.nikoleris@arm.com pkt->print()); 27712724Snikos.nikoleris@arm.com 27812724Snikos.nikoleris@arm.com assert(pkt->req->masterId() < system->maxMasters()); 27912724Snikos.nikoleris@arm.com mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++; 28012724Snikos.nikoleris@arm.com 28112724Snikos.nikoleris@arm.com // We use forward_time here because it is the same 28212724Snikos.nikoleris@arm.com // considering new targets. We have multiple 28312724Snikos.nikoleris@arm.com // requests for the same address here. It 28412724Snikos.nikoleris@arm.com // specifies the latency to allocate an internal 28512724Snikos.nikoleris@arm.com // buffer and to schedule an event to the queued 28612724Snikos.nikoleris@arm.com // port and also takes into account the additional 28712724Snikos.nikoleris@arm.com // delay of the xbar. 28812724Snikos.nikoleris@arm.com mshr->allocateTarget(pkt, forward_time, order++, 28912724Snikos.nikoleris@arm.com allocOnFill(pkt->cmd)); 29012724Snikos.nikoleris@arm.com if (mshr->getNumTargets() == numTarget) { 29112724Snikos.nikoleris@arm.com noTargetMSHR = mshr; 29212724Snikos.nikoleris@arm.com setBlocked(Blocked_NoTargets); 29312724Snikos.nikoleris@arm.com // need to be careful with this... if this mshr isn't 29412724Snikos.nikoleris@arm.com // ready yet (i.e. time > curTick()), we don't want to 29512724Snikos.nikoleris@arm.com // move it ahead of mshrs that are ready 29612724Snikos.nikoleris@arm.com // mshrQueue.moveToFront(mshr); 29712724Snikos.nikoleris@arm.com } 29812724Snikos.nikoleris@arm.com } 29912724Snikos.nikoleris@arm.com } 30012724Snikos.nikoleris@arm.com } else { 30112724Snikos.nikoleris@arm.com // no MSHR 30212724Snikos.nikoleris@arm.com assert(pkt->req->masterId() < system->maxMasters()); 30312724Snikos.nikoleris@arm.com mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 30412724Snikos.nikoleris@arm.com 30512724Snikos.nikoleris@arm.com if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean) { 30612724Snikos.nikoleris@arm.com // We use forward_time here because there is an 30712724Snikos.nikoleris@arm.com // writeback or writeclean, forwarded to WriteBuffer. 30812724Snikos.nikoleris@arm.com allocateWriteBuffer(pkt, forward_time); 30912724Snikos.nikoleris@arm.com } else { 31012724Snikos.nikoleris@arm.com if (blk && blk->isValid()) { 31112724Snikos.nikoleris@arm.com // If we have a write miss to a valid block, we 31212724Snikos.nikoleris@arm.com // need to mark the block non-readable. Otherwise 31312724Snikos.nikoleris@arm.com // if we allow reads while there's an outstanding 31412724Snikos.nikoleris@arm.com // write miss, the read could return stale data 31512724Snikos.nikoleris@arm.com // out of the cache block... a more aggressive 31612724Snikos.nikoleris@arm.com // system could detect the overlap (if any) and 31712724Snikos.nikoleris@arm.com // forward data out of the MSHRs, but we don't do 31812724Snikos.nikoleris@arm.com // that yet. Note that we do need to leave the 31912724Snikos.nikoleris@arm.com // block valid so that it stays in the cache, in 32012724Snikos.nikoleris@arm.com // case we get an upgrade response (and hence no 32112724Snikos.nikoleris@arm.com // new data) when the write miss completes. 32212724Snikos.nikoleris@arm.com // As long as CPUs do proper store/load forwarding 32312724Snikos.nikoleris@arm.com // internally, and have a sufficiently weak memory 32412724Snikos.nikoleris@arm.com // model, this is probably unnecessary, but at some 32512724Snikos.nikoleris@arm.com // point it must have seemed like we needed it... 32612724Snikos.nikoleris@arm.com assert((pkt->needsWritable() && !blk->isWritable()) || 32712724Snikos.nikoleris@arm.com pkt->req->isCacheMaintenance()); 32812724Snikos.nikoleris@arm.com blk->status &= ~BlkReadable; 32912724Snikos.nikoleris@arm.com } 33012724Snikos.nikoleris@arm.com // Here we are using forward_time, modelling the latency of 33112724Snikos.nikoleris@arm.com // a miss (outbound) just as forwardLatency, neglecting the 33212724Snikos.nikoleris@arm.com // lookupLatency component. 33312724Snikos.nikoleris@arm.com allocateMissBuffer(pkt, forward_time); 33412724Snikos.nikoleris@arm.com } 33512724Snikos.nikoleris@arm.com } 33612724Snikos.nikoleris@arm.com} 33712724Snikos.nikoleris@arm.com 33812724Snikos.nikoleris@arm.comvoid 33912724Snikos.nikoleris@arm.comBaseCache::recvTimingReq(PacketPtr pkt) 34012724Snikos.nikoleris@arm.com{ 34112724Snikos.nikoleris@arm.com // anything that is merely forwarded pays for the forward latency and 34212724Snikos.nikoleris@arm.com // the delay provided by the crossbar 34312724Snikos.nikoleris@arm.com Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 34412724Snikos.nikoleris@arm.com 34513418Sodanrc@yahoo.com.br Cycles lat; 34612724Snikos.nikoleris@arm.com CacheBlk *blk = nullptr; 34712724Snikos.nikoleris@arm.com bool satisfied = false; 34812724Snikos.nikoleris@arm.com { 34912724Snikos.nikoleris@arm.com PacketList writebacks; 35012724Snikos.nikoleris@arm.com // Note that lat is passed by reference here. The function 35113418Sodanrc@yahoo.com.br // access() will set the lat value. 35212724Snikos.nikoleris@arm.com satisfied = access(pkt, blk, lat, writebacks); 35312724Snikos.nikoleris@arm.com 35413747Sodanrc@yahoo.com.br // After the evicted blocks are selected, they must be forwarded 35513747Sodanrc@yahoo.com.br // to the write buffer to ensure they logically precede anything 35613747Sodanrc@yahoo.com.br // happening below 35713747Sodanrc@yahoo.com.br doWritebacks(writebacks, clockEdge(lat + forwardLatency)); 35812724Snikos.nikoleris@arm.com } 35912724Snikos.nikoleris@arm.com 36012724Snikos.nikoleris@arm.com // Here we charge the headerDelay that takes into account the latencies 36112724Snikos.nikoleris@arm.com // of the bus, if the packet comes from it. 36213418Sodanrc@yahoo.com.br // The latency charged is just the value set by the access() function. 36312724Snikos.nikoleris@arm.com // In case of a hit we are neglecting response latency. 36412724Snikos.nikoleris@arm.com // In case of a miss we are neglecting forward latency. 36513746Sodanrc@yahoo.com.br Tick request_time = clockEdge(lat); 36612724Snikos.nikoleris@arm.com // Here we reset the timing of the packet. 36712724Snikos.nikoleris@arm.com pkt->headerDelay = pkt->payloadDelay = 0; 36812724Snikos.nikoleris@arm.com 36912724Snikos.nikoleris@arm.com if (satisfied) { 37013416Sjavier.bueno@metempsy.com // notify before anything else as later handleTimingReqHit might turn 37113416Sjavier.bueno@metempsy.com // the packet in a response 37213416Sjavier.bueno@metempsy.com ppHit->notify(pkt); 37312724Snikos.nikoleris@arm.com 37413416Sjavier.bueno@metempsy.com if (prefetcher && blk && blk->wasPrefetched()) { 37513416Sjavier.bueno@metempsy.com blk->status &= ~BlkHWPrefetched; 37612724Snikos.nikoleris@arm.com } 37712724Snikos.nikoleris@arm.com 37812724Snikos.nikoleris@arm.com handleTimingReqHit(pkt, blk, request_time); 37912724Snikos.nikoleris@arm.com } else { 38012724Snikos.nikoleris@arm.com handleTimingReqMiss(pkt, blk, forward_time, request_time); 38112724Snikos.nikoleris@arm.com 38213416Sjavier.bueno@metempsy.com ppMiss->notify(pkt); 38312724Snikos.nikoleris@arm.com } 38412724Snikos.nikoleris@arm.com 38513416Sjavier.bueno@metempsy.com if (prefetcher) { 38613416Sjavier.bueno@metempsy.com // track time of availability of next prefetch, if any 38713416Sjavier.bueno@metempsy.com Tick next_pf_time = prefetcher->nextPrefetchReadyTime(); 38813416Sjavier.bueno@metempsy.com if (next_pf_time != MaxTick) { 38913416Sjavier.bueno@metempsy.com schedMemSideSendEvent(next_pf_time); 39013416Sjavier.bueno@metempsy.com } 39112724Snikos.nikoleris@arm.com } 39212724Snikos.nikoleris@arm.com} 39312724Snikos.nikoleris@arm.com 39412724Snikos.nikoleris@arm.comvoid 39512724Snikos.nikoleris@arm.comBaseCache::handleUncacheableWriteResp(PacketPtr pkt) 39612724Snikos.nikoleris@arm.com{ 39712724Snikos.nikoleris@arm.com Tick completion_time = clockEdge(responseLatency) + 39812724Snikos.nikoleris@arm.com pkt->headerDelay + pkt->payloadDelay; 39912724Snikos.nikoleris@arm.com 40012724Snikos.nikoleris@arm.com // Reset the bus additional time as it is now accounted for 40112724Snikos.nikoleris@arm.com pkt->headerDelay = pkt->payloadDelay = 0; 40212724Snikos.nikoleris@arm.com 40313564Snikos.nikoleris@arm.com cpuSidePort.schedTimingResp(pkt, completion_time); 40412724Snikos.nikoleris@arm.com} 40512724Snikos.nikoleris@arm.com 40612724Snikos.nikoleris@arm.comvoid 40712724Snikos.nikoleris@arm.comBaseCache::recvTimingResp(PacketPtr pkt) 40812724Snikos.nikoleris@arm.com{ 40912724Snikos.nikoleris@arm.com assert(pkt->isResponse()); 41012724Snikos.nikoleris@arm.com 41112724Snikos.nikoleris@arm.com // all header delay should be paid for by the crossbar, unless 41212724Snikos.nikoleris@arm.com // this is a prefetch response from above 41312724Snikos.nikoleris@arm.com panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp, 41412724Snikos.nikoleris@arm.com "%s saw a non-zero packet delay\n", name()); 41512724Snikos.nikoleris@arm.com 41612724Snikos.nikoleris@arm.com const bool is_error = pkt->isError(); 41712724Snikos.nikoleris@arm.com 41812724Snikos.nikoleris@arm.com if (is_error) { 41912724Snikos.nikoleris@arm.com DPRINTF(Cache, "%s: Cache received %s with error\n", __func__, 42012724Snikos.nikoleris@arm.com pkt->print()); 42112724Snikos.nikoleris@arm.com } 42212724Snikos.nikoleris@arm.com 42312724Snikos.nikoleris@arm.com DPRINTF(Cache, "%s: Handling response %s\n", __func__, 42412724Snikos.nikoleris@arm.com pkt->print()); 42512724Snikos.nikoleris@arm.com 42612724Snikos.nikoleris@arm.com // if this is a write, we should be looking at an uncacheable 42712724Snikos.nikoleris@arm.com // write 42812724Snikos.nikoleris@arm.com if (pkt->isWrite()) { 42912724Snikos.nikoleris@arm.com assert(pkt->req->isUncacheable()); 43012724Snikos.nikoleris@arm.com handleUncacheableWriteResp(pkt); 43112724Snikos.nikoleris@arm.com return; 43212724Snikos.nikoleris@arm.com } 43312724Snikos.nikoleris@arm.com 43412724Snikos.nikoleris@arm.com // we have dealt with any (uncacheable) writes above, from here on 43512724Snikos.nikoleris@arm.com // we know we are dealing with an MSHR due to a miss or a prefetch 43612724Snikos.nikoleris@arm.com MSHR *mshr = dynamic_cast<MSHR*>(pkt->popSenderState()); 43712724Snikos.nikoleris@arm.com assert(mshr); 43812724Snikos.nikoleris@arm.com 43912724Snikos.nikoleris@arm.com if (mshr == noTargetMSHR) { 44012724Snikos.nikoleris@arm.com // we always clear at least one target 44112724Snikos.nikoleris@arm.com clearBlocked(Blocked_NoTargets); 44212724Snikos.nikoleris@arm.com noTargetMSHR = nullptr; 44312724Snikos.nikoleris@arm.com } 44412724Snikos.nikoleris@arm.com 44512724Snikos.nikoleris@arm.com // Initial target is used just for stats 44613859Sodanrc@yahoo.com.br QueueEntry::Target *initial_tgt = mshr->getTarget(); 44712724Snikos.nikoleris@arm.com int stats_cmd_idx = initial_tgt->pkt->cmdToIndex(); 44812724Snikos.nikoleris@arm.com Tick miss_latency = curTick() - initial_tgt->recvTime; 44912724Snikos.nikoleris@arm.com 45012724Snikos.nikoleris@arm.com if (pkt->req->isUncacheable()) { 45112724Snikos.nikoleris@arm.com assert(pkt->req->masterId() < system->maxMasters()); 45212724Snikos.nikoleris@arm.com mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] += 45312724Snikos.nikoleris@arm.com miss_latency; 45412724Snikos.nikoleris@arm.com } else { 45512724Snikos.nikoleris@arm.com assert(pkt->req->masterId() < system->maxMasters()); 45612724Snikos.nikoleris@arm.com mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] += 45712724Snikos.nikoleris@arm.com miss_latency; 45812724Snikos.nikoleris@arm.com } 45912724Snikos.nikoleris@arm.com 46012724Snikos.nikoleris@arm.com PacketList writebacks; 46112724Snikos.nikoleris@arm.com 46212724Snikos.nikoleris@arm.com bool is_fill = !mshr->isForward && 46313350Snikos.nikoleris@arm.com (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp || 46413350Snikos.nikoleris@arm.com mshr->wasWholeLineWrite); 46513350Snikos.nikoleris@arm.com 46613350Snikos.nikoleris@arm.com // make sure that if the mshr was due to a whole line write then 46713350Snikos.nikoleris@arm.com // the response is an invalidation 46813350Snikos.nikoleris@arm.com assert(!mshr->wasWholeLineWrite || pkt->isInvalidate()); 46912724Snikos.nikoleris@arm.com 47012724Snikos.nikoleris@arm.com CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 47112724Snikos.nikoleris@arm.com 47212724Snikos.nikoleris@arm.com if (is_fill && !is_error) { 47312724Snikos.nikoleris@arm.com DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n", 47412724Snikos.nikoleris@arm.com pkt->getAddr()); 47512724Snikos.nikoleris@arm.com 47613352Snikos.nikoleris@arm.com const bool allocate = (writeAllocator && mshr->wasWholeLineWrite) ? 47713352Snikos.nikoleris@arm.com writeAllocator->allocate() : mshr->allocOnFill(); 47813352Snikos.nikoleris@arm.com blk = handleFill(pkt, blk, writebacks, allocate); 47912724Snikos.nikoleris@arm.com assert(blk != nullptr); 48013717Sivan.pizarro@metempsy.com ppFill->notify(pkt); 48112724Snikos.nikoleris@arm.com } 48212724Snikos.nikoleris@arm.com 48312724Snikos.nikoleris@arm.com if (blk && blk->isValid() && pkt->isClean() && !pkt->isInvalidate()) { 48412724Snikos.nikoleris@arm.com // The block was marked not readable while there was a pending 48512724Snikos.nikoleris@arm.com // cache maintenance operation, restore its flag. 48612724Snikos.nikoleris@arm.com blk->status |= BlkReadable; 48712794Snikos.nikoleris@arm.com 48812794Snikos.nikoleris@arm.com // This was a cache clean operation (without invalidate) 48912794Snikos.nikoleris@arm.com // and we have a copy of the block already. Since there 49012794Snikos.nikoleris@arm.com // is no invalidation, we can promote targets that don't 49112794Snikos.nikoleris@arm.com // require a writable copy 49212794Snikos.nikoleris@arm.com mshr->promoteReadable(); 49312724Snikos.nikoleris@arm.com } 49412724Snikos.nikoleris@arm.com 49512724Snikos.nikoleris@arm.com if (blk && blk->isWritable() && !pkt->req->isCacheInvalidate()) { 49612724Snikos.nikoleris@arm.com // If at this point the referenced block is writable and the 49712724Snikos.nikoleris@arm.com // response is not a cache invalidate, we promote targets that 49812724Snikos.nikoleris@arm.com // were deferred as we couldn't guarrantee a writable copy 49912724Snikos.nikoleris@arm.com mshr->promoteWritable(); 50012724Snikos.nikoleris@arm.com } 50112724Snikos.nikoleris@arm.com 50213478Sodanrc@yahoo.com.br serviceMSHRTargets(mshr, pkt, blk); 50312724Snikos.nikoleris@arm.com 50412724Snikos.nikoleris@arm.com if (mshr->promoteDeferredTargets()) { 50512724Snikos.nikoleris@arm.com // avoid later read getting stale data while write miss is 50612724Snikos.nikoleris@arm.com // outstanding.. see comment in timingAccess() 50712724Snikos.nikoleris@arm.com if (blk) { 50812724Snikos.nikoleris@arm.com blk->status &= ~BlkReadable; 50912724Snikos.nikoleris@arm.com } 51012724Snikos.nikoleris@arm.com mshrQueue.markPending(mshr); 51112724Snikos.nikoleris@arm.com schedMemSideSendEvent(clockEdge() + pkt->payloadDelay); 51212724Snikos.nikoleris@arm.com } else { 51312724Snikos.nikoleris@arm.com // while we deallocate an mshr from the queue we still have to 51412724Snikos.nikoleris@arm.com // check the isFull condition before and after as we might 51512724Snikos.nikoleris@arm.com // have been using the reserved entries already 51612724Snikos.nikoleris@arm.com const bool was_full = mshrQueue.isFull(); 51712724Snikos.nikoleris@arm.com mshrQueue.deallocate(mshr); 51812724Snikos.nikoleris@arm.com if (was_full && !mshrQueue.isFull()) { 51912724Snikos.nikoleris@arm.com clearBlocked(Blocked_NoMSHRs); 52012724Snikos.nikoleris@arm.com } 52112724Snikos.nikoleris@arm.com 52212724Snikos.nikoleris@arm.com // Request the bus for a prefetch if this deallocation freed enough 52312724Snikos.nikoleris@arm.com // MSHRs for a prefetch to take place 52412724Snikos.nikoleris@arm.com if (prefetcher && mshrQueue.canPrefetch()) { 52512724Snikos.nikoleris@arm.com Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(), 52612724Snikos.nikoleris@arm.com clockEdge()); 52712724Snikos.nikoleris@arm.com if (next_pf_time != MaxTick) 52812724Snikos.nikoleris@arm.com schedMemSideSendEvent(next_pf_time); 52912724Snikos.nikoleris@arm.com } 53012724Snikos.nikoleris@arm.com } 53112724Snikos.nikoleris@arm.com 53212724Snikos.nikoleris@arm.com // if we used temp block, check to see if its valid and then clear it out 53312724Snikos.nikoleris@arm.com if (blk == tempBlock && tempBlock->isValid()) { 53412724Snikos.nikoleris@arm.com evictBlock(blk, writebacks); 53512724Snikos.nikoleris@arm.com } 53612724Snikos.nikoleris@arm.com 53712724Snikos.nikoleris@arm.com const Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 53812724Snikos.nikoleris@arm.com // copy writebacks to write buffer 53912724Snikos.nikoleris@arm.com doWritebacks(writebacks, forward_time); 54012724Snikos.nikoleris@arm.com 54112724Snikos.nikoleris@arm.com DPRINTF(CacheVerbose, "%s: Leaving with %s\n", __func__, pkt->print()); 54212724Snikos.nikoleris@arm.com delete pkt; 54312724Snikos.nikoleris@arm.com} 54412724Snikos.nikoleris@arm.com 54512724Snikos.nikoleris@arm.com 54612724Snikos.nikoleris@arm.comTick 54712724Snikos.nikoleris@arm.comBaseCache::recvAtomic(PacketPtr pkt) 54812724Snikos.nikoleris@arm.com{ 54912724Snikos.nikoleris@arm.com // should assert here that there are no outstanding MSHRs or 55012724Snikos.nikoleris@arm.com // writebacks... that would mean that someone used an atomic 55112724Snikos.nikoleris@arm.com // access in timing mode 55212724Snikos.nikoleris@arm.com 55313412Snikos.nikoleris@arm.com // We use lookupLatency here because it is used to specify the latency 55413412Snikos.nikoleris@arm.com // to access. 55513412Snikos.nikoleris@arm.com Cycles lat = lookupLatency; 55613412Snikos.nikoleris@arm.com 55712724Snikos.nikoleris@arm.com CacheBlk *blk = nullptr; 55812724Snikos.nikoleris@arm.com PacketList writebacks; 55912724Snikos.nikoleris@arm.com bool satisfied = access(pkt, blk, lat, writebacks); 56012724Snikos.nikoleris@arm.com 56112724Snikos.nikoleris@arm.com if (pkt->isClean() && blk && blk->isDirty()) { 56212724Snikos.nikoleris@arm.com // A cache clean opearation is looking for a dirty 56312724Snikos.nikoleris@arm.com // block. If a dirty block is encountered a WriteClean 56412724Snikos.nikoleris@arm.com // will update any copies to the path to the memory 56512724Snikos.nikoleris@arm.com // until the point of reference. 56612724Snikos.nikoleris@arm.com DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n", 56712724Snikos.nikoleris@arm.com __func__, pkt->print(), blk->print()); 56812724Snikos.nikoleris@arm.com PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id); 56912724Snikos.nikoleris@arm.com writebacks.push_back(wb_pkt); 57012724Snikos.nikoleris@arm.com pkt->setSatisfied(); 57112724Snikos.nikoleris@arm.com } 57212724Snikos.nikoleris@arm.com 57312724Snikos.nikoleris@arm.com // handle writebacks resulting from the access here to ensure they 57412820Srmk35@cl.cam.ac.uk // logically precede anything happening below 57512724Snikos.nikoleris@arm.com doWritebacksAtomic(writebacks); 57612724Snikos.nikoleris@arm.com assert(writebacks.empty()); 57712724Snikos.nikoleris@arm.com 57812724Snikos.nikoleris@arm.com if (!satisfied) { 57912724Snikos.nikoleris@arm.com lat += handleAtomicReqMiss(pkt, blk, writebacks); 58012724Snikos.nikoleris@arm.com } 58112724Snikos.nikoleris@arm.com 58212724Snikos.nikoleris@arm.com // Note that we don't invoke the prefetcher at all in atomic mode. 58312724Snikos.nikoleris@arm.com // It's not clear how to do it properly, particularly for 58412724Snikos.nikoleris@arm.com // prefetchers that aggressively generate prefetch candidates and 58512724Snikos.nikoleris@arm.com // rely on bandwidth contention to throttle them; these will tend 58612724Snikos.nikoleris@arm.com // to pollute the cache in atomic mode since there is no bandwidth 58712724Snikos.nikoleris@arm.com // contention. If we ever do want to enable prefetching in atomic 58812724Snikos.nikoleris@arm.com // mode, though, this is the place to do it... see timingAccess() 58912724Snikos.nikoleris@arm.com // for an example (though we'd want to issue the prefetch(es) 59012724Snikos.nikoleris@arm.com // immediately rather than calling requestMemSideBus() as we do 59112724Snikos.nikoleris@arm.com // there). 59212724Snikos.nikoleris@arm.com 59312724Snikos.nikoleris@arm.com // do any writebacks resulting from the response handling 59412724Snikos.nikoleris@arm.com doWritebacksAtomic(writebacks); 59512724Snikos.nikoleris@arm.com 59612724Snikos.nikoleris@arm.com // if we used temp block, check to see if its valid and if so 59712724Snikos.nikoleris@arm.com // clear it out, but only do so after the call to recvAtomic is 59812724Snikos.nikoleris@arm.com // finished so that any downstream observers (such as a snoop 59912724Snikos.nikoleris@arm.com // filter), first see the fill, and only then see the eviction 60012724Snikos.nikoleris@arm.com if (blk == tempBlock && tempBlock->isValid()) { 60112724Snikos.nikoleris@arm.com // the atomic CPU calls recvAtomic for fetch and load/store 60212724Snikos.nikoleris@arm.com // sequentuially, and we may already have a tempBlock 60312724Snikos.nikoleris@arm.com // writeback from the fetch that we have not yet sent 60412724Snikos.nikoleris@arm.com if (tempBlockWriteback) { 60512724Snikos.nikoleris@arm.com // if that is the case, write the prevoius one back, and 60612724Snikos.nikoleris@arm.com // do not schedule any new event 60712724Snikos.nikoleris@arm.com writebackTempBlockAtomic(); 60812724Snikos.nikoleris@arm.com } else { 60912724Snikos.nikoleris@arm.com // the writeback/clean eviction happens after the call to 61012724Snikos.nikoleris@arm.com // recvAtomic has finished (but before any successive 61112724Snikos.nikoleris@arm.com // calls), so that the response handling from the fill is 61212724Snikos.nikoleris@arm.com // allowed to happen first 61312724Snikos.nikoleris@arm.com schedule(writebackTempBlockAtomicEvent, curTick()); 61412724Snikos.nikoleris@arm.com } 61512724Snikos.nikoleris@arm.com 61612724Snikos.nikoleris@arm.com tempBlockWriteback = evictBlock(blk); 61712724Snikos.nikoleris@arm.com } 61812724Snikos.nikoleris@arm.com 61912724Snikos.nikoleris@arm.com if (pkt->needsResponse()) { 62012724Snikos.nikoleris@arm.com pkt->makeAtomicResponse(); 62112724Snikos.nikoleris@arm.com } 62212724Snikos.nikoleris@arm.com 62312724Snikos.nikoleris@arm.com return lat * clockPeriod(); 62412724Snikos.nikoleris@arm.com} 62512724Snikos.nikoleris@arm.com 62612724Snikos.nikoleris@arm.comvoid 62712724Snikos.nikoleris@arm.comBaseCache::functionalAccess(PacketPtr pkt, bool from_cpu_side) 62812724Snikos.nikoleris@arm.com{ 62912724Snikos.nikoleris@arm.com Addr blk_addr = pkt->getBlockAddr(blkSize); 63012724Snikos.nikoleris@arm.com bool is_secure = pkt->isSecure(); 63112724Snikos.nikoleris@arm.com CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 63212724Snikos.nikoleris@arm.com MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 63312724Snikos.nikoleris@arm.com 63412724Snikos.nikoleris@arm.com pkt->pushLabel(name()); 63512724Snikos.nikoleris@arm.com 63612724Snikos.nikoleris@arm.com CacheBlkPrintWrapper cbpw(blk); 63712724Snikos.nikoleris@arm.com 63812724Snikos.nikoleris@arm.com // Note that just because an L2/L3 has valid data doesn't mean an 63912724Snikos.nikoleris@arm.com // L1 doesn't have a more up-to-date modified copy that still 64012724Snikos.nikoleris@arm.com // needs to be found. As a result we always update the request if 64112724Snikos.nikoleris@arm.com // we have it, but only declare it satisfied if we are the owner. 64212724Snikos.nikoleris@arm.com 64312724Snikos.nikoleris@arm.com // see if we have data at all (owned or otherwise) 64412724Snikos.nikoleris@arm.com bool have_data = blk && blk->isValid() 64512823Srmk35@cl.cam.ac.uk && pkt->trySatisfyFunctional(&cbpw, blk_addr, is_secure, blkSize, 64612823Srmk35@cl.cam.ac.uk blk->data); 64712724Snikos.nikoleris@arm.com 64812724Snikos.nikoleris@arm.com // data we have is dirty if marked as such or if we have an 64912724Snikos.nikoleris@arm.com // in-service MSHR that is pending a modified line 65012724Snikos.nikoleris@arm.com bool have_dirty = 65112724Snikos.nikoleris@arm.com have_data && (blk->isDirty() || 65212724Snikos.nikoleris@arm.com (mshr && mshr->inService && mshr->isPendingModified())); 65312724Snikos.nikoleris@arm.com 65412724Snikos.nikoleris@arm.com bool done = have_dirty || 65512823Srmk35@cl.cam.ac.uk cpuSidePort.trySatisfyFunctional(pkt) || 65613862Sodanrc@yahoo.com.br mshrQueue.trySatisfyFunctional(pkt) || 65713862Sodanrc@yahoo.com.br writeBuffer.trySatisfyFunctional(pkt) || 65812823Srmk35@cl.cam.ac.uk memSidePort.trySatisfyFunctional(pkt); 65912724Snikos.nikoleris@arm.com 66012724Snikos.nikoleris@arm.com DPRINTF(CacheVerbose, "%s: %s %s%s%s\n", __func__, pkt->print(), 66112724Snikos.nikoleris@arm.com (blk && blk->isValid()) ? "valid " : "", 66212724Snikos.nikoleris@arm.com have_data ? "data " : "", done ? "done " : ""); 66312724Snikos.nikoleris@arm.com 66412724Snikos.nikoleris@arm.com // We're leaving the cache, so pop cache->name() label 66512724Snikos.nikoleris@arm.com pkt->popLabel(); 66612724Snikos.nikoleris@arm.com 66712724Snikos.nikoleris@arm.com if (done) { 66812724Snikos.nikoleris@arm.com pkt->makeResponse(); 66912724Snikos.nikoleris@arm.com } else { 67012724Snikos.nikoleris@arm.com // if it came as a request from the CPU side then make sure it 67112724Snikos.nikoleris@arm.com // continues towards the memory side 67212724Snikos.nikoleris@arm.com if (from_cpu_side) { 67312724Snikos.nikoleris@arm.com memSidePort.sendFunctional(pkt); 67412724Snikos.nikoleris@arm.com } else if (cpuSidePort.isSnooping()) { 67512724Snikos.nikoleris@arm.com // if it came from the memory side, it must be a snoop request 67612724Snikos.nikoleris@arm.com // and we should only forward it if we are forwarding snoops 67712724Snikos.nikoleris@arm.com cpuSidePort.sendFunctionalSnoop(pkt); 67812724Snikos.nikoleris@arm.com } 67912724Snikos.nikoleris@arm.com } 68012724Snikos.nikoleris@arm.com} 68112724Snikos.nikoleris@arm.com 68212724Snikos.nikoleris@arm.com 68312724Snikos.nikoleris@arm.comvoid 68412724Snikos.nikoleris@arm.comBaseCache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt) 68512724Snikos.nikoleris@arm.com{ 68612724Snikos.nikoleris@arm.com assert(pkt->isRequest()); 68712724Snikos.nikoleris@arm.com 68812724Snikos.nikoleris@arm.com uint64_t overwrite_val; 68912724Snikos.nikoleris@arm.com bool overwrite_mem; 69012724Snikos.nikoleris@arm.com uint64_t condition_val64; 69112724Snikos.nikoleris@arm.com uint32_t condition_val32; 69212724Snikos.nikoleris@arm.com 69312724Snikos.nikoleris@arm.com int offset = pkt->getOffset(blkSize); 69412724Snikos.nikoleris@arm.com uint8_t *blk_data = blk->data + offset; 69512724Snikos.nikoleris@arm.com 69612724Snikos.nikoleris@arm.com assert(sizeof(uint64_t) >= pkt->getSize()); 69712724Snikos.nikoleris@arm.com 69812724Snikos.nikoleris@arm.com overwrite_mem = true; 69912724Snikos.nikoleris@arm.com // keep a copy of our possible write value, and copy what is at the 70012724Snikos.nikoleris@arm.com // memory address into the packet 70112724Snikos.nikoleris@arm.com pkt->writeData((uint8_t *)&overwrite_val); 70212724Snikos.nikoleris@arm.com pkt->setData(blk_data); 70312724Snikos.nikoleris@arm.com 70412724Snikos.nikoleris@arm.com if (pkt->req->isCondSwap()) { 70512724Snikos.nikoleris@arm.com if (pkt->getSize() == sizeof(uint64_t)) { 70612724Snikos.nikoleris@arm.com condition_val64 = pkt->req->getExtraData(); 70712724Snikos.nikoleris@arm.com overwrite_mem = !std::memcmp(&condition_val64, blk_data, 70812724Snikos.nikoleris@arm.com sizeof(uint64_t)); 70912724Snikos.nikoleris@arm.com } else if (pkt->getSize() == sizeof(uint32_t)) { 71012724Snikos.nikoleris@arm.com condition_val32 = (uint32_t)pkt->req->getExtraData(); 71112724Snikos.nikoleris@arm.com overwrite_mem = !std::memcmp(&condition_val32, blk_data, 71212724Snikos.nikoleris@arm.com sizeof(uint32_t)); 71312724Snikos.nikoleris@arm.com } else 71412724Snikos.nikoleris@arm.com panic("Invalid size for conditional read/write\n"); 71512724Snikos.nikoleris@arm.com } 71612724Snikos.nikoleris@arm.com 71712724Snikos.nikoleris@arm.com if (overwrite_mem) { 71812724Snikos.nikoleris@arm.com std::memcpy(blk_data, &overwrite_val, pkt->getSize()); 71912724Snikos.nikoleris@arm.com blk->status |= BlkDirty; 72012724Snikos.nikoleris@arm.com } 72112724Snikos.nikoleris@arm.com} 72212724Snikos.nikoleris@arm.com 72312724Snikos.nikoleris@arm.comQueueEntry* 72412724Snikos.nikoleris@arm.comBaseCache::getNextQueueEntry() 72512724Snikos.nikoleris@arm.com{ 72612724Snikos.nikoleris@arm.com // Check both MSHR queue and write buffer for potential requests, 72712724Snikos.nikoleris@arm.com // note that null does not mean there is no request, it could 72812724Snikos.nikoleris@arm.com // simply be that it is not ready 72912724Snikos.nikoleris@arm.com MSHR *miss_mshr = mshrQueue.getNext(); 73012724Snikos.nikoleris@arm.com WriteQueueEntry *wq_entry = writeBuffer.getNext(); 73112724Snikos.nikoleris@arm.com 73212724Snikos.nikoleris@arm.com // If we got a write buffer request ready, first priority is a 73312724Snikos.nikoleris@arm.com // full write buffer, otherwise we favour the miss requests 73412724Snikos.nikoleris@arm.com if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) { 73512724Snikos.nikoleris@arm.com // need to search MSHR queue for conflicting earlier miss. 73613861Sodanrc@yahoo.com.br MSHR *conflict_mshr = mshrQueue.findPending(wq_entry); 73712724Snikos.nikoleris@arm.com 73812724Snikos.nikoleris@arm.com if (conflict_mshr && conflict_mshr->order < wq_entry->order) { 73912724Snikos.nikoleris@arm.com // Service misses in order until conflict is cleared. 74012724Snikos.nikoleris@arm.com return conflict_mshr; 74112724Snikos.nikoleris@arm.com 74212724Snikos.nikoleris@arm.com // @todo Note that we ignore the ready time of the conflict here 74312724Snikos.nikoleris@arm.com } 74412724Snikos.nikoleris@arm.com 74512724Snikos.nikoleris@arm.com // No conflicts; issue write 74612724Snikos.nikoleris@arm.com return wq_entry; 74712724Snikos.nikoleris@arm.com } else if (miss_mshr) { 74812724Snikos.nikoleris@arm.com // need to check for conflicting earlier writeback 74913861Sodanrc@yahoo.com.br WriteQueueEntry *conflict_mshr = writeBuffer.findPending(miss_mshr); 75012724Snikos.nikoleris@arm.com if (conflict_mshr) { 75112724Snikos.nikoleris@arm.com // not sure why we don't check order here... it was in the 75212724Snikos.nikoleris@arm.com // original code but commented out. 75312724Snikos.nikoleris@arm.com 75412724Snikos.nikoleris@arm.com // The only way this happens is if we are 75512724Snikos.nikoleris@arm.com // doing a write and we didn't have permissions 75612724Snikos.nikoleris@arm.com // then subsequently saw a writeback (owned got evicted) 75712724Snikos.nikoleris@arm.com // We need to make sure to perform the writeback first 75812724Snikos.nikoleris@arm.com // To preserve the dirty data, then we can issue the write 75912724Snikos.nikoleris@arm.com 76012724Snikos.nikoleris@arm.com // should we return wq_entry here instead? I.e. do we 76112724Snikos.nikoleris@arm.com // have to flush writes in order? I don't think so... not 76212724Snikos.nikoleris@arm.com // for Alpha anyway. Maybe for x86? 76312724Snikos.nikoleris@arm.com return conflict_mshr; 76412724Snikos.nikoleris@arm.com 76512724Snikos.nikoleris@arm.com // @todo Note that we ignore the ready time of the conflict here 76612724Snikos.nikoleris@arm.com } 76712724Snikos.nikoleris@arm.com 76812724Snikos.nikoleris@arm.com // No conflicts; issue read 76912724Snikos.nikoleris@arm.com return miss_mshr; 77012724Snikos.nikoleris@arm.com } 77112724Snikos.nikoleris@arm.com 77212724Snikos.nikoleris@arm.com // fall through... no pending requests. Try a prefetch. 77312724Snikos.nikoleris@arm.com assert(!miss_mshr && !wq_entry); 77412724Snikos.nikoleris@arm.com if (prefetcher && mshrQueue.canPrefetch()) { 77512724Snikos.nikoleris@arm.com // If we have a miss queue slot, we can try a prefetch 77612724Snikos.nikoleris@arm.com PacketPtr pkt = prefetcher->getPacket(); 77712724Snikos.nikoleris@arm.com if (pkt) { 77812724Snikos.nikoleris@arm.com Addr pf_addr = pkt->getBlockAddr(blkSize); 77912724Snikos.nikoleris@arm.com if (!tags->findBlock(pf_addr, pkt->isSecure()) && 78012724Snikos.nikoleris@arm.com !mshrQueue.findMatch(pf_addr, pkt->isSecure()) && 78112724Snikos.nikoleris@arm.com !writeBuffer.findMatch(pf_addr, pkt->isSecure())) { 78212724Snikos.nikoleris@arm.com // Update statistic on number of prefetches issued 78312724Snikos.nikoleris@arm.com // (hwpf_mshr_misses) 78412724Snikos.nikoleris@arm.com assert(pkt->req->masterId() < system->maxMasters()); 78512724Snikos.nikoleris@arm.com mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 78612724Snikos.nikoleris@arm.com 78712724Snikos.nikoleris@arm.com // allocate an MSHR and return it, note 78812724Snikos.nikoleris@arm.com // that we send the packet straight away, so do not 78912724Snikos.nikoleris@arm.com // schedule the send 79012724Snikos.nikoleris@arm.com return allocateMissBuffer(pkt, curTick(), false); 79112724Snikos.nikoleris@arm.com } else { 79212724Snikos.nikoleris@arm.com // free the request and packet 79312724Snikos.nikoleris@arm.com delete pkt; 79412724Snikos.nikoleris@arm.com } 79512724Snikos.nikoleris@arm.com } 79612724Snikos.nikoleris@arm.com } 79712724Snikos.nikoleris@arm.com 79812724Snikos.nikoleris@arm.com return nullptr; 79912724Snikos.nikoleris@arm.com} 80012724Snikos.nikoleris@arm.com 80113947Sodanrc@yahoo.com.brbool 80213947Sodanrc@yahoo.com.brBaseCache::updateCompressionData(CacheBlk *blk, const uint64_t* data, 80313947Sodanrc@yahoo.com.br PacketList &writebacks) 80413947Sodanrc@yahoo.com.br{ 80513947Sodanrc@yahoo.com.br // tempBlock does not exist in the tags, so don't do anything for it. 80613947Sodanrc@yahoo.com.br if (blk == tempBlock) { 80713947Sodanrc@yahoo.com.br return true; 80813947Sodanrc@yahoo.com.br } 80913947Sodanrc@yahoo.com.br 81013947Sodanrc@yahoo.com.br // Get superblock of the given block 81113947Sodanrc@yahoo.com.br CompressionBlk* compression_blk = static_cast<CompressionBlk*>(blk); 81213947Sodanrc@yahoo.com.br const SuperBlk* superblock = static_cast<const SuperBlk*>( 81313947Sodanrc@yahoo.com.br compression_blk->getSectorBlock()); 81413947Sodanrc@yahoo.com.br 81513947Sodanrc@yahoo.com.br // The compressor is called to compress the updated data, so that its 81613947Sodanrc@yahoo.com.br // metadata can be updated. 81713947Sodanrc@yahoo.com.br std::size_t compression_size = 0; 81813947Sodanrc@yahoo.com.br Cycles compression_lat = Cycles(0); 81913947Sodanrc@yahoo.com.br Cycles decompression_lat = Cycles(0); 82013947Sodanrc@yahoo.com.br compressor->compress(data, compression_lat, decompression_lat, 82113947Sodanrc@yahoo.com.br compression_size); 82213947Sodanrc@yahoo.com.br 82313947Sodanrc@yahoo.com.br // If block's compression factor increased, it may not be co-allocatable 82413947Sodanrc@yahoo.com.br // anymore. If so, some blocks might need to be evicted to make room for 82513947Sodanrc@yahoo.com.br // the bigger block 82613947Sodanrc@yahoo.com.br 82713947Sodanrc@yahoo.com.br // Get previous compressed size 82813947Sodanrc@yahoo.com.br const std::size_t M5_VAR_USED prev_size = compression_blk->getSizeBits(); 82913947Sodanrc@yahoo.com.br 83013947Sodanrc@yahoo.com.br // Check if new data is co-allocatable 83113947Sodanrc@yahoo.com.br const bool is_co_allocatable = superblock->isCompressed(compression_blk) && 83213947Sodanrc@yahoo.com.br superblock->canCoAllocate(compression_size); 83313947Sodanrc@yahoo.com.br 83413947Sodanrc@yahoo.com.br // If block was compressed, possibly co-allocated with other blocks, and 83513947Sodanrc@yahoo.com.br // cannot be co-allocated anymore, one or more blocks must be evicted to 83613947Sodanrc@yahoo.com.br // make room for the expanded block. As of now we decide to evict the co- 83713947Sodanrc@yahoo.com.br // allocated blocks to make room for the expansion, but other approaches 83813947Sodanrc@yahoo.com.br // that take the replacement data of the superblock into account may 83913947Sodanrc@yahoo.com.br // generate better results 84013947Sodanrc@yahoo.com.br std::vector<CacheBlk*> evict_blks; 84113947Sodanrc@yahoo.com.br const bool was_compressed = compression_blk->isCompressed(); 84213947Sodanrc@yahoo.com.br if (was_compressed && !is_co_allocatable) { 84313947Sodanrc@yahoo.com.br // Get all co-allocated blocks 84413947Sodanrc@yahoo.com.br for (const auto& sub_blk : superblock->blks) { 84513947Sodanrc@yahoo.com.br if (sub_blk->isValid() && (compression_blk != sub_blk)) { 84613947Sodanrc@yahoo.com.br // Check for transient state allocations. If any of the 84713947Sodanrc@yahoo.com.br // entries listed for eviction has a transient state, the 84813947Sodanrc@yahoo.com.br // allocation fails 84913947Sodanrc@yahoo.com.br const Addr repl_addr = regenerateBlkAddr(sub_blk); 85013947Sodanrc@yahoo.com.br const MSHR *repl_mshr = 85113947Sodanrc@yahoo.com.br mshrQueue.findMatch(repl_addr, sub_blk->isSecure()); 85213947Sodanrc@yahoo.com.br if (repl_mshr) { 85313947Sodanrc@yahoo.com.br DPRINTF(CacheRepl, "Aborting data expansion of %s due " \ 85413947Sodanrc@yahoo.com.br "to replacement of block in transient state: %s\n", 85513947Sodanrc@yahoo.com.br compression_blk->print(), sub_blk->print()); 85613947Sodanrc@yahoo.com.br // Too hard to replace block with transient state, so it 85713947Sodanrc@yahoo.com.br // cannot be evicted. Mark the update as failed and expect 85813947Sodanrc@yahoo.com.br // the caller to evict this block. Since this is called 85913947Sodanrc@yahoo.com.br // only when writebacks arrive, and packets do not contain 86013947Sodanrc@yahoo.com.br // compressed data, there is no need to decompress 86113947Sodanrc@yahoo.com.br compression_blk->setSizeBits(blkSize * 8); 86213947Sodanrc@yahoo.com.br compression_blk->setDecompressionLatency(Cycles(0)); 86313947Sodanrc@yahoo.com.br compression_blk->setUncompressed(); 86413947Sodanrc@yahoo.com.br return false; 86513947Sodanrc@yahoo.com.br } 86613947Sodanrc@yahoo.com.br 86713947Sodanrc@yahoo.com.br evict_blks.push_back(sub_blk); 86813947Sodanrc@yahoo.com.br } 86913947Sodanrc@yahoo.com.br } 87013947Sodanrc@yahoo.com.br 87113947Sodanrc@yahoo.com.br // Update the number of data expansions 87213947Sodanrc@yahoo.com.br dataExpansions++; 87313947Sodanrc@yahoo.com.br 87413947Sodanrc@yahoo.com.br DPRINTF(CacheComp, "Data expansion: expanding [%s] from %d to %d bits" 87513947Sodanrc@yahoo.com.br "\n", blk->print(), prev_size, compression_size); 87613947Sodanrc@yahoo.com.br } 87713947Sodanrc@yahoo.com.br 87813947Sodanrc@yahoo.com.br // We always store compressed blocks when possible 87913947Sodanrc@yahoo.com.br if (is_co_allocatable) { 88013947Sodanrc@yahoo.com.br compression_blk->setCompressed(); 88113947Sodanrc@yahoo.com.br } else { 88213947Sodanrc@yahoo.com.br compression_blk->setUncompressed(); 88313947Sodanrc@yahoo.com.br } 88413947Sodanrc@yahoo.com.br compression_blk->setSizeBits(compression_size); 88513947Sodanrc@yahoo.com.br compression_blk->setDecompressionLatency(decompression_lat); 88613947Sodanrc@yahoo.com.br 88713947Sodanrc@yahoo.com.br // Evict valid blocks 88813947Sodanrc@yahoo.com.br for (const auto& evict_blk : evict_blks) { 88913947Sodanrc@yahoo.com.br if (evict_blk->isValid()) { 89013947Sodanrc@yahoo.com.br if (evict_blk->wasPrefetched()) { 89113947Sodanrc@yahoo.com.br unusedPrefetches++; 89213947Sodanrc@yahoo.com.br } 89313947Sodanrc@yahoo.com.br evictBlock(evict_blk, writebacks); 89413947Sodanrc@yahoo.com.br } 89513947Sodanrc@yahoo.com.br } 89613947Sodanrc@yahoo.com.br 89713947Sodanrc@yahoo.com.br return true; 89813947Sodanrc@yahoo.com.br} 89913947Sodanrc@yahoo.com.br 90012724Snikos.nikoleris@arm.comvoid 90112724Snikos.nikoleris@arm.comBaseCache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool, bool) 90212724Snikos.nikoleris@arm.com{ 90312724Snikos.nikoleris@arm.com assert(pkt->isRequest()); 90412724Snikos.nikoleris@arm.com 90512724Snikos.nikoleris@arm.com assert(blk && blk->isValid()); 90612724Snikos.nikoleris@arm.com // Occasionally this is not true... if we are a lower-level cache 90712724Snikos.nikoleris@arm.com // satisfying a string of Read and ReadEx requests from 90812724Snikos.nikoleris@arm.com // upper-level caches, a Read will mark the block as shared but we 90912724Snikos.nikoleris@arm.com // can satisfy a following ReadEx anyway since we can rely on the 91012724Snikos.nikoleris@arm.com // Read requester(s) to have buffered the ReadEx snoop and to 91112724Snikos.nikoleris@arm.com // invalidate their blocks after receiving them. 91212724Snikos.nikoleris@arm.com // assert(!pkt->needsWritable() || blk->isWritable()); 91312724Snikos.nikoleris@arm.com assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize); 91412724Snikos.nikoleris@arm.com 91512724Snikos.nikoleris@arm.com // Check RMW operations first since both isRead() and 91612724Snikos.nikoleris@arm.com // isWrite() will be true for them 91712724Snikos.nikoleris@arm.com if (pkt->cmd == MemCmd::SwapReq) { 91812766Sqtt2@cornell.edu if (pkt->isAtomicOp()) { 91912766Sqtt2@cornell.edu // extract data from cache and save it into the data field in 92012766Sqtt2@cornell.edu // the packet as a return value from this atomic op 92112766Sqtt2@cornell.edu int offset = tags->extractBlkOffset(pkt->getAddr()); 92212766Sqtt2@cornell.edu uint8_t *blk_data = blk->data + offset; 92313377Sodanrc@yahoo.com.br pkt->setData(blk_data); 92412766Sqtt2@cornell.edu 92512766Sqtt2@cornell.edu // execute AMO operation 92612766Sqtt2@cornell.edu (*(pkt->getAtomicOp()))(blk_data); 92712766Sqtt2@cornell.edu 92812766Sqtt2@cornell.edu // set block status to dirty 92912766Sqtt2@cornell.edu blk->status |= BlkDirty; 93012766Sqtt2@cornell.edu } else { 93112766Sqtt2@cornell.edu cmpAndSwap(blk, pkt); 93212766Sqtt2@cornell.edu } 93312724Snikos.nikoleris@arm.com } else if (pkt->isWrite()) { 93412724Snikos.nikoleris@arm.com // we have the block in a writable state and can go ahead, 93512724Snikos.nikoleris@arm.com // note that the line may be also be considered writable in 93612724Snikos.nikoleris@arm.com // downstream caches along the path to memory, but always 93712724Snikos.nikoleris@arm.com // Exclusive, and never Modified 93812724Snikos.nikoleris@arm.com assert(blk->isWritable()); 93912724Snikos.nikoleris@arm.com // Write or WriteLine at the first cache with block in writable state 94012724Snikos.nikoleris@arm.com if (blk->checkWrite(pkt)) { 94112724Snikos.nikoleris@arm.com pkt->writeDataToBlock(blk->data, blkSize); 94212724Snikos.nikoleris@arm.com } 94312724Snikos.nikoleris@arm.com // Always mark the line as dirty (and thus transition to the 94412724Snikos.nikoleris@arm.com // Modified state) even if we are a failed StoreCond so we 94512724Snikos.nikoleris@arm.com // supply data to any snoops that have appended themselves to 94612724Snikos.nikoleris@arm.com // this cache before knowing the store will fail. 94712724Snikos.nikoleris@arm.com blk->status |= BlkDirty; 94812724Snikos.nikoleris@arm.com DPRINTF(CacheVerbose, "%s for %s (write)\n", __func__, pkt->print()); 94912724Snikos.nikoleris@arm.com } else if (pkt->isRead()) { 95012724Snikos.nikoleris@arm.com if (pkt->isLLSC()) { 95112724Snikos.nikoleris@arm.com blk->trackLoadLocked(pkt); 95212724Snikos.nikoleris@arm.com } 95312724Snikos.nikoleris@arm.com 95412724Snikos.nikoleris@arm.com // all read responses have a data payload 95512724Snikos.nikoleris@arm.com assert(pkt->hasRespData()); 95612724Snikos.nikoleris@arm.com pkt->setDataFromBlock(blk->data, blkSize); 95712724Snikos.nikoleris@arm.com } else if (pkt->isUpgrade()) { 95812724Snikos.nikoleris@arm.com // sanity check 95912724Snikos.nikoleris@arm.com assert(!pkt->hasSharers()); 96012724Snikos.nikoleris@arm.com 96112724Snikos.nikoleris@arm.com if (blk->isDirty()) { 96212724Snikos.nikoleris@arm.com // we were in the Owned state, and a cache above us that 96312724Snikos.nikoleris@arm.com // has the line in Shared state needs to be made aware 96412724Snikos.nikoleris@arm.com // that the data it already has is in fact dirty 96512724Snikos.nikoleris@arm.com pkt->setCacheResponding(); 96612724Snikos.nikoleris@arm.com blk->status &= ~BlkDirty; 96712724Snikos.nikoleris@arm.com } 96812794Snikos.nikoleris@arm.com } else if (pkt->isClean()) { 96912794Snikos.nikoleris@arm.com blk->status &= ~BlkDirty; 97012724Snikos.nikoleris@arm.com } else { 97112724Snikos.nikoleris@arm.com assert(pkt->isInvalidate()); 97212724Snikos.nikoleris@arm.com invalidateBlock(blk); 97312724Snikos.nikoleris@arm.com DPRINTF(CacheVerbose, "%s for %s (invalidation)\n", __func__, 97412724Snikos.nikoleris@arm.com pkt->print()); 97512724Snikos.nikoleris@arm.com } 97612724Snikos.nikoleris@arm.com} 97712724Snikos.nikoleris@arm.com 97812724Snikos.nikoleris@arm.com///////////////////////////////////////////////////// 97912724Snikos.nikoleris@arm.com// 98012724Snikos.nikoleris@arm.com// Access path: requests coming in from the CPU side 98112724Snikos.nikoleris@arm.com// 98212724Snikos.nikoleris@arm.com///////////////////////////////////////////////////// 98313418Sodanrc@yahoo.com.brCycles 98413749Sodanrc@yahoo.com.brBaseCache::calculateTagOnlyLatency(const uint32_t delay, 98513749Sodanrc@yahoo.com.br const Cycles lookup_lat) const 98613749Sodanrc@yahoo.com.br{ 98713749Sodanrc@yahoo.com.br // A tag-only access has to wait for the packet to arrive in order to 98813749Sodanrc@yahoo.com.br // perform the tag lookup. 98913749Sodanrc@yahoo.com.br return ticksToCycles(delay) + lookup_lat; 99013749Sodanrc@yahoo.com.br} 99113749Sodanrc@yahoo.com.br 99213749Sodanrc@yahoo.com.brCycles 99313746Sodanrc@yahoo.com.brBaseCache::calculateAccessLatency(const CacheBlk* blk, const uint32_t delay, 99413418Sodanrc@yahoo.com.br const Cycles lookup_lat) const 99513418Sodanrc@yahoo.com.br{ 99613746Sodanrc@yahoo.com.br Cycles lat(0); 99713418Sodanrc@yahoo.com.br 99813418Sodanrc@yahoo.com.br if (blk != nullptr) { 99913746Sodanrc@yahoo.com.br // As soon as the access arrives, for sequential accesses first access 100013746Sodanrc@yahoo.com.br // tags, then the data entry. In the case of parallel accesses the 100113746Sodanrc@yahoo.com.br // latency is dictated by the slowest of tag and data latencies. 100213418Sodanrc@yahoo.com.br if (sequentialAccess) { 100313746Sodanrc@yahoo.com.br lat = ticksToCycles(delay) + lookup_lat + dataLatency; 100413418Sodanrc@yahoo.com.br } else { 100513746Sodanrc@yahoo.com.br lat = ticksToCycles(delay) + std::max(lookup_lat, dataLatency); 100613418Sodanrc@yahoo.com.br } 100713418Sodanrc@yahoo.com.br 100813418Sodanrc@yahoo.com.br // Check if the block to be accessed is available. If not, apply the 100913477Sodanrc@yahoo.com.br // access latency on top of when the block is ready to be accessed. 101013746Sodanrc@yahoo.com.br const Tick tick = curTick() + delay; 101113477Sodanrc@yahoo.com.br const Tick when_ready = blk->getWhenReady(); 101213746Sodanrc@yahoo.com.br if (when_ready > tick && 101313746Sodanrc@yahoo.com.br ticksToCycles(when_ready - tick) > lat) { 101413746Sodanrc@yahoo.com.br lat += ticksToCycles(when_ready - tick); 101513418Sodanrc@yahoo.com.br } 101613746Sodanrc@yahoo.com.br } else { 101713749Sodanrc@yahoo.com.br // In case of a miss, we neglect the data access in a parallel 101813749Sodanrc@yahoo.com.br // configuration (i.e., the data access will be stopped as soon as 101913749Sodanrc@yahoo.com.br // we find out it is a miss), and use the tag-only latency. 102013749Sodanrc@yahoo.com.br lat = calculateTagOnlyLatency(delay, lookup_lat); 102113418Sodanrc@yahoo.com.br } 102213418Sodanrc@yahoo.com.br 102313418Sodanrc@yahoo.com.br return lat; 102413418Sodanrc@yahoo.com.br} 102512724Snikos.nikoleris@arm.com 102612724Snikos.nikoleris@arm.combool 102712724Snikos.nikoleris@arm.comBaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, 102812724Snikos.nikoleris@arm.com PacketList &writebacks) 102912724Snikos.nikoleris@arm.com{ 103012724Snikos.nikoleris@arm.com // sanity check 103112724Snikos.nikoleris@arm.com assert(pkt->isRequest()); 103212724Snikos.nikoleris@arm.com 103312724Snikos.nikoleris@arm.com chatty_assert(!(isReadOnly && pkt->isWrite()), 103412724Snikos.nikoleris@arm.com "Should never see a write in a read-only cache %s\n", 103512724Snikos.nikoleris@arm.com name()); 103612724Snikos.nikoleris@arm.com 103713418Sodanrc@yahoo.com.br // Access block in the tags 103813418Sodanrc@yahoo.com.br Cycles tag_latency(0); 103913418Sodanrc@yahoo.com.br blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), tag_latency); 104013418Sodanrc@yahoo.com.br 104112724Snikos.nikoleris@arm.com DPRINTF(Cache, "%s for %s %s\n", __func__, pkt->print(), 104212724Snikos.nikoleris@arm.com blk ? "hit " + blk->print() : "miss"); 104312724Snikos.nikoleris@arm.com 104412724Snikos.nikoleris@arm.com if (pkt->req->isCacheMaintenance()) { 104512724Snikos.nikoleris@arm.com // A cache maintenance operation is always forwarded to the 104612724Snikos.nikoleris@arm.com // memory below even if the block is found in dirty state. 104712724Snikos.nikoleris@arm.com 104812724Snikos.nikoleris@arm.com // We defer any changes to the state of the block until we 104912724Snikos.nikoleris@arm.com // create and mark as in service the mshr for the downstream 105012724Snikos.nikoleris@arm.com // packet. 105113749Sodanrc@yahoo.com.br 105213749Sodanrc@yahoo.com.br // Calculate access latency on top of when the packet arrives. This 105313749Sodanrc@yahoo.com.br // takes into account the bus delay. 105413749Sodanrc@yahoo.com.br lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 105513749Sodanrc@yahoo.com.br 105612724Snikos.nikoleris@arm.com return false; 105712724Snikos.nikoleris@arm.com } 105812724Snikos.nikoleris@arm.com 105912724Snikos.nikoleris@arm.com if (pkt->isEviction()) { 106012724Snikos.nikoleris@arm.com // We check for presence of block in above caches before issuing 106112724Snikos.nikoleris@arm.com // Writeback or CleanEvict to write buffer. Therefore the only 106212724Snikos.nikoleris@arm.com // possible cases can be of a CleanEvict packet coming from above 106312724Snikos.nikoleris@arm.com // encountering a Writeback generated in this cache peer cache and 106412724Snikos.nikoleris@arm.com // waiting in the write buffer. Cases of upper level peer caches 106512724Snikos.nikoleris@arm.com // generating CleanEvict and Writeback or simply CleanEvict and 106612724Snikos.nikoleris@arm.com // CleanEvict almost simultaneously will be caught by snoops sent out 106712724Snikos.nikoleris@arm.com // by crossbar. 106812724Snikos.nikoleris@arm.com WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(), 106912724Snikos.nikoleris@arm.com pkt->isSecure()); 107012724Snikos.nikoleris@arm.com if (wb_entry) { 107112724Snikos.nikoleris@arm.com assert(wb_entry->getNumTargets() == 1); 107212724Snikos.nikoleris@arm.com PacketPtr wbPkt = wb_entry->getTarget()->pkt; 107312724Snikos.nikoleris@arm.com assert(wbPkt->isWriteback()); 107412724Snikos.nikoleris@arm.com 107512724Snikos.nikoleris@arm.com if (pkt->isCleanEviction()) { 107612724Snikos.nikoleris@arm.com // The CleanEvict and WritebackClean snoops into other 107712724Snikos.nikoleris@arm.com // peer caches of the same level while traversing the 107812724Snikos.nikoleris@arm.com // crossbar. If a copy of the block is found, the 107912724Snikos.nikoleris@arm.com // packet is deleted in the crossbar. Hence, none of 108012724Snikos.nikoleris@arm.com // the other upper level caches connected to this 108112724Snikos.nikoleris@arm.com // cache have the block, so we can clear the 108212724Snikos.nikoleris@arm.com // BLOCK_CACHED flag in the Writeback if set and 108312724Snikos.nikoleris@arm.com // discard the CleanEvict by returning true. 108412724Snikos.nikoleris@arm.com wbPkt->clearBlockCached(); 108513749Sodanrc@yahoo.com.br 108613749Sodanrc@yahoo.com.br // A clean evict does not need to access the data array 108713749Sodanrc@yahoo.com.br lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 108813749Sodanrc@yahoo.com.br 108912724Snikos.nikoleris@arm.com return true; 109012724Snikos.nikoleris@arm.com } else { 109112724Snikos.nikoleris@arm.com assert(pkt->cmd == MemCmd::WritebackDirty); 109212724Snikos.nikoleris@arm.com // Dirty writeback from above trumps our clean 109312724Snikos.nikoleris@arm.com // writeback... discard here 109412724Snikos.nikoleris@arm.com // Note: markInService will remove entry from writeback buffer. 109512724Snikos.nikoleris@arm.com markInService(wb_entry); 109612724Snikos.nikoleris@arm.com delete wbPkt; 109712724Snikos.nikoleris@arm.com } 109812724Snikos.nikoleris@arm.com } 109912724Snikos.nikoleris@arm.com } 110012724Snikos.nikoleris@arm.com 110112724Snikos.nikoleris@arm.com // Writeback handling is special case. We can write the block into 110212724Snikos.nikoleris@arm.com // the cache without having a writeable copy (or any copy at all). 110312724Snikos.nikoleris@arm.com if (pkt->isWriteback()) { 110412724Snikos.nikoleris@arm.com assert(blkSize == pkt->getSize()); 110512724Snikos.nikoleris@arm.com 110612724Snikos.nikoleris@arm.com // we could get a clean writeback while we are having 110712724Snikos.nikoleris@arm.com // outstanding accesses to a block, do the simple thing for 110812724Snikos.nikoleris@arm.com // now and drop the clean writeback so that we do not upset 110912724Snikos.nikoleris@arm.com // any ordering/decisions about ownership already taken 111012724Snikos.nikoleris@arm.com if (pkt->cmd == MemCmd::WritebackClean && 111112724Snikos.nikoleris@arm.com mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) { 111212724Snikos.nikoleris@arm.com DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, " 111312724Snikos.nikoleris@arm.com "dropping\n", pkt->getAddr()); 111413749Sodanrc@yahoo.com.br 111513749Sodanrc@yahoo.com.br // A writeback searches for the block, then writes the data. 111613749Sodanrc@yahoo.com.br // As the writeback is being dropped, the data is not touched, 111713749Sodanrc@yahoo.com.br // and we just had to wait for the time to find a match in the 111813749Sodanrc@yahoo.com.br // MSHR. As of now assume a mshr queue search takes as long as 111913749Sodanrc@yahoo.com.br // a tag lookup for simplicity. 112013749Sodanrc@yahoo.com.br lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 112113749Sodanrc@yahoo.com.br 112212724Snikos.nikoleris@arm.com return true; 112312724Snikos.nikoleris@arm.com } 112412724Snikos.nikoleris@arm.com 112512724Snikos.nikoleris@arm.com if (!blk) { 112612724Snikos.nikoleris@arm.com // need to do a replacement 112712754Sodanrc@yahoo.com.br blk = allocateBlock(pkt, writebacks); 112812724Snikos.nikoleris@arm.com if (!blk) { 112912724Snikos.nikoleris@arm.com // no replaceable block available: give up, fwd to next level. 113012724Snikos.nikoleris@arm.com incMissCount(pkt); 113113749Sodanrc@yahoo.com.br 113213749Sodanrc@yahoo.com.br // A writeback searches for the block, then writes the data. 113313749Sodanrc@yahoo.com.br // As the block could not be found, it was a tag-only access. 113413749Sodanrc@yahoo.com.br lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 113513749Sodanrc@yahoo.com.br 113612724Snikos.nikoleris@arm.com return false; 113712724Snikos.nikoleris@arm.com } 113812724Snikos.nikoleris@arm.com 113913445Sodanrc@yahoo.com.br blk->status |= BlkReadable; 114013947Sodanrc@yahoo.com.br } else if (compressor) { 114113947Sodanrc@yahoo.com.br // This is an overwrite to an existing block, therefore we need 114213947Sodanrc@yahoo.com.br // to check for data expansion (i.e., block was compressed with 114313947Sodanrc@yahoo.com.br // a smaller size, and now it doesn't fit the entry anymore). 114413947Sodanrc@yahoo.com.br // If that is the case we might need to evict blocks. 114513947Sodanrc@yahoo.com.br if (!updateCompressionData(blk, pkt->getConstPtr<uint64_t>(), 114613947Sodanrc@yahoo.com.br writebacks)) { 114713947Sodanrc@yahoo.com.br // This is a failed data expansion (write), which happened 114813947Sodanrc@yahoo.com.br // after finding the replacement entries and accessing the 114913947Sodanrc@yahoo.com.br // block's data. There were no replaceable entries available 115013947Sodanrc@yahoo.com.br // to make room for the expanded block, and since it does not 115113947Sodanrc@yahoo.com.br // fit anymore and it has been properly updated to contain 115213947Sodanrc@yahoo.com.br // the new data, forward it to the next level 115313947Sodanrc@yahoo.com.br lat = calculateAccessLatency(blk, pkt->headerDelay, 115413947Sodanrc@yahoo.com.br tag_latency); 115513947Sodanrc@yahoo.com.br invalidateBlock(blk); 115613947Sodanrc@yahoo.com.br return false; 115713945Sodanrc@yahoo.com.br } 115812724Snikos.nikoleris@arm.com } 115913945Sodanrc@yahoo.com.br 116012724Snikos.nikoleris@arm.com // only mark the block dirty if we got a writeback command, 116112724Snikos.nikoleris@arm.com // and leave it as is for a clean writeback 116212724Snikos.nikoleris@arm.com if (pkt->cmd == MemCmd::WritebackDirty) { 116312724Snikos.nikoleris@arm.com // TODO: the coherent cache can assert(!blk->isDirty()); 116412724Snikos.nikoleris@arm.com blk->status |= BlkDirty; 116512724Snikos.nikoleris@arm.com } 116612724Snikos.nikoleris@arm.com // if the packet does not have sharers, it is passing 116712724Snikos.nikoleris@arm.com // writable, and we got the writeback in Modified or Exclusive 116812724Snikos.nikoleris@arm.com // state, if not we are in the Owned or Shared state 116912724Snikos.nikoleris@arm.com if (!pkt->hasSharers()) { 117012724Snikos.nikoleris@arm.com blk->status |= BlkWritable; 117112724Snikos.nikoleris@arm.com } 117212724Snikos.nikoleris@arm.com // nothing else to do; writeback doesn't expect response 117312724Snikos.nikoleris@arm.com assert(!pkt->needsResponse()); 117412724Snikos.nikoleris@arm.com pkt->writeDataToBlock(blk->data, blkSize); 117512724Snikos.nikoleris@arm.com DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 117612724Snikos.nikoleris@arm.com incHitCount(pkt); 117713748Sodanrc@yahoo.com.br 117813765Sodanrc@yahoo.com.br // A writeback searches for the block, then writes the data 117913765Sodanrc@yahoo.com.br lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency); 118013765Sodanrc@yahoo.com.br 118113748Sodanrc@yahoo.com.br // When the packet metadata arrives, the tag lookup will be done while 118213748Sodanrc@yahoo.com.br // the payload is arriving. Then the block will be ready to access as 118313748Sodanrc@yahoo.com.br // soon as the fill is done 118413477Sodanrc@yahoo.com.br blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay + 118513748Sodanrc@yahoo.com.br std::max(cyclesToTicks(tag_latency), (uint64_t)pkt->payloadDelay)); 118613749Sodanrc@yahoo.com.br 118712724Snikos.nikoleris@arm.com return true; 118812724Snikos.nikoleris@arm.com } else if (pkt->cmd == MemCmd::CleanEvict) { 118913749Sodanrc@yahoo.com.br // A CleanEvict does not need to access the data array 119013749Sodanrc@yahoo.com.br lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 119113749Sodanrc@yahoo.com.br 119212724Snikos.nikoleris@arm.com if (blk) { 119312724Snikos.nikoleris@arm.com // Found the block in the tags, need to stop CleanEvict from 119412724Snikos.nikoleris@arm.com // propagating further down the hierarchy. Returning true will 119512724Snikos.nikoleris@arm.com // treat the CleanEvict like a satisfied write request and delete 119612724Snikos.nikoleris@arm.com // it. 119712724Snikos.nikoleris@arm.com return true; 119812724Snikos.nikoleris@arm.com } 119912724Snikos.nikoleris@arm.com // We didn't find the block here, propagate the CleanEvict further 120012724Snikos.nikoleris@arm.com // down the memory hierarchy. Returning false will treat the CleanEvict 120112724Snikos.nikoleris@arm.com // like a Writeback which could not find a replaceable block so has to 120212724Snikos.nikoleris@arm.com // go to next level. 120312724Snikos.nikoleris@arm.com return false; 120412724Snikos.nikoleris@arm.com } else if (pkt->cmd == MemCmd::WriteClean) { 120512724Snikos.nikoleris@arm.com // WriteClean handling is a special case. We can allocate a 120612724Snikos.nikoleris@arm.com // block directly if it doesn't exist and we can update the 120712724Snikos.nikoleris@arm.com // block immediately. The WriteClean transfers the ownership 120812724Snikos.nikoleris@arm.com // of the block as well. 120912724Snikos.nikoleris@arm.com assert(blkSize == pkt->getSize()); 121012724Snikos.nikoleris@arm.com 121112724Snikos.nikoleris@arm.com if (!blk) { 121212724Snikos.nikoleris@arm.com if (pkt->writeThrough()) { 121313749Sodanrc@yahoo.com.br // A writeback searches for the block, then writes the data. 121413749Sodanrc@yahoo.com.br // As the block could not be found, it was a tag-only access. 121513749Sodanrc@yahoo.com.br lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 121613749Sodanrc@yahoo.com.br 121712724Snikos.nikoleris@arm.com // if this is a write through packet, we don't try to 121812724Snikos.nikoleris@arm.com // allocate if the block is not present 121912724Snikos.nikoleris@arm.com return false; 122012724Snikos.nikoleris@arm.com } else { 122112724Snikos.nikoleris@arm.com // a writeback that misses needs to allocate a new block 122212754Sodanrc@yahoo.com.br blk = allocateBlock(pkt, writebacks); 122312724Snikos.nikoleris@arm.com if (!blk) { 122412724Snikos.nikoleris@arm.com // no replaceable block available: give up, fwd to 122512724Snikos.nikoleris@arm.com // next level. 122612724Snikos.nikoleris@arm.com incMissCount(pkt); 122713749Sodanrc@yahoo.com.br 122813749Sodanrc@yahoo.com.br // A writeback searches for the block, then writes the 122913749Sodanrc@yahoo.com.br // data. As the block could not be found, it was a tag-only 123013749Sodanrc@yahoo.com.br // access. 123113749Sodanrc@yahoo.com.br lat = calculateTagOnlyLatency(pkt->headerDelay, 123213749Sodanrc@yahoo.com.br tag_latency); 123313749Sodanrc@yahoo.com.br 123412724Snikos.nikoleris@arm.com return false; 123512724Snikos.nikoleris@arm.com } 123612724Snikos.nikoleris@arm.com 123713445Sodanrc@yahoo.com.br blk->status |= BlkReadable; 123812724Snikos.nikoleris@arm.com } 123913947Sodanrc@yahoo.com.br } else if (compressor) { 124013947Sodanrc@yahoo.com.br // This is an overwrite to an existing block, therefore we need 124113947Sodanrc@yahoo.com.br // to check for data expansion (i.e., block was compressed with 124213947Sodanrc@yahoo.com.br // a smaller size, and now it doesn't fit the entry anymore). 124313947Sodanrc@yahoo.com.br // If that is the case we might need to evict blocks. 124413947Sodanrc@yahoo.com.br if (!updateCompressionData(blk, pkt->getConstPtr<uint64_t>(), 124513947Sodanrc@yahoo.com.br writebacks)) { 124613947Sodanrc@yahoo.com.br // This is a failed data expansion (write), which happened 124713947Sodanrc@yahoo.com.br // after finding the replacement entries and accessing the 124813947Sodanrc@yahoo.com.br // block's data. There were no replaceable entries available 124913947Sodanrc@yahoo.com.br // to make room for the expanded block, and since it does not 125013947Sodanrc@yahoo.com.br // fit anymore and it has been properly updated to contain 125113947Sodanrc@yahoo.com.br // the new data, forward it to the next level 125213947Sodanrc@yahoo.com.br lat = calculateAccessLatency(blk, pkt->headerDelay, 125313947Sodanrc@yahoo.com.br tag_latency); 125413947Sodanrc@yahoo.com.br invalidateBlock(blk); 125513947Sodanrc@yahoo.com.br return false; 125613945Sodanrc@yahoo.com.br } 125712724Snikos.nikoleris@arm.com } 125812724Snikos.nikoleris@arm.com 125912724Snikos.nikoleris@arm.com // at this point either this is a writeback or a write-through 126012724Snikos.nikoleris@arm.com // write clean operation and the block is already in this 126112724Snikos.nikoleris@arm.com // cache, we need to update the data and the block flags 126212724Snikos.nikoleris@arm.com assert(blk); 126312724Snikos.nikoleris@arm.com // TODO: the coherent cache can assert(!blk->isDirty()); 126412724Snikos.nikoleris@arm.com if (!pkt->writeThrough()) { 126512724Snikos.nikoleris@arm.com blk->status |= BlkDirty; 126612724Snikos.nikoleris@arm.com } 126712724Snikos.nikoleris@arm.com // nothing else to do; writeback doesn't expect response 126812724Snikos.nikoleris@arm.com assert(!pkt->needsResponse()); 126912724Snikos.nikoleris@arm.com pkt->writeDataToBlock(blk->data, blkSize); 127012724Snikos.nikoleris@arm.com DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 127112724Snikos.nikoleris@arm.com 127212724Snikos.nikoleris@arm.com incHitCount(pkt); 127313748Sodanrc@yahoo.com.br 127413765Sodanrc@yahoo.com.br // A writeback searches for the block, then writes the data 127513765Sodanrc@yahoo.com.br lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency); 127613765Sodanrc@yahoo.com.br 127713748Sodanrc@yahoo.com.br // When the packet metadata arrives, the tag lookup will be done while 127813748Sodanrc@yahoo.com.br // the payload is arriving. Then the block will be ready to access as 127913748Sodanrc@yahoo.com.br // soon as the fill is done 128013477Sodanrc@yahoo.com.br blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay + 128113748Sodanrc@yahoo.com.br std::max(cyclesToTicks(tag_latency), (uint64_t)pkt->payloadDelay)); 128213748Sodanrc@yahoo.com.br 128313947Sodanrc@yahoo.com.br // If this a write-through packet it will be sent to cache below 128412724Snikos.nikoleris@arm.com return !pkt->writeThrough(); 128512724Snikos.nikoleris@arm.com } else if (blk && (pkt->needsWritable() ? blk->isWritable() : 128612724Snikos.nikoleris@arm.com blk->isReadable())) { 128712724Snikos.nikoleris@arm.com // OK to satisfy access 128812724Snikos.nikoleris@arm.com incHitCount(pkt); 128912724Snikos.nikoleris@arm.com 129013749Sodanrc@yahoo.com.br // Calculate access latency based on the need to access the data array 129113749Sodanrc@yahoo.com.br if (pkt->isRead() || pkt->isWrite()) { 129213749Sodanrc@yahoo.com.br lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency); 129313945Sodanrc@yahoo.com.br 129413945Sodanrc@yahoo.com.br // When a block is compressed, it must first be decompressed 129513945Sodanrc@yahoo.com.br // before being read. This adds to the access latency. 129613945Sodanrc@yahoo.com.br if (compressor && pkt->isRead()) { 129713945Sodanrc@yahoo.com.br lat += compressor->getDecompressionLatency(blk); 129813945Sodanrc@yahoo.com.br } 129913749Sodanrc@yahoo.com.br } else { 130013749Sodanrc@yahoo.com.br lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 130113749Sodanrc@yahoo.com.br } 130213749Sodanrc@yahoo.com.br 130313765Sodanrc@yahoo.com.br satisfyRequest(pkt, blk); 130413765Sodanrc@yahoo.com.br maintainClusivity(pkt->fromCache(), blk); 130513765Sodanrc@yahoo.com.br 130612724Snikos.nikoleris@arm.com return true; 130712724Snikos.nikoleris@arm.com } 130812724Snikos.nikoleris@arm.com 130912724Snikos.nikoleris@arm.com // Can't satisfy access normally... either no block (blk == nullptr) 131012724Snikos.nikoleris@arm.com // or have block but need writable 131112724Snikos.nikoleris@arm.com 131212724Snikos.nikoleris@arm.com incMissCount(pkt); 131312724Snikos.nikoleris@arm.com 131413749Sodanrc@yahoo.com.br lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency); 131513749Sodanrc@yahoo.com.br 131612724Snikos.nikoleris@arm.com if (!blk && pkt->isLLSC() && pkt->isWrite()) { 131712724Snikos.nikoleris@arm.com // complete miss on store conditional... just give up now 131812724Snikos.nikoleris@arm.com pkt->req->setExtraData(0); 131912724Snikos.nikoleris@arm.com return true; 132012724Snikos.nikoleris@arm.com } 132112724Snikos.nikoleris@arm.com 132212724Snikos.nikoleris@arm.com return false; 132312724Snikos.nikoleris@arm.com} 132412724Snikos.nikoleris@arm.com 132512724Snikos.nikoleris@arm.comvoid 132612724Snikos.nikoleris@arm.comBaseCache::maintainClusivity(bool from_cache, CacheBlk *blk) 132712724Snikos.nikoleris@arm.com{ 132812724Snikos.nikoleris@arm.com if (from_cache && blk && blk->isValid() && !blk->isDirty() && 132912724Snikos.nikoleris@arm.com clusivity == Enums::mostly_excl) { 133012724Snikos.nikoleris@arm.com // if we have responded to a cache, and our block is still 133112724Snikos.nikoleris@arm.com // valid, but not dirty, and this cache is mostly exclusive 133212724Snikos.nikoleris@arm.com // with respect to the cache above, drop the block 133312724Snikos.nikoleris@arm.com invalidateBlock(blk); 133412724Snikos.nikoleris@arm.com } 133512724Snikos.nikoleris@arm.com} 133612724Snikos.nikoleris@arm.com 133712724Snikos.nikoleris@arm.comCacheBlk* 133812724Snikos.nikoleris@arm.comBaseCache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks, 133912724Snikos.nikoleris@arm.com bool allocate) 134012724Snikos.nikoleris@arm.com{ 134113350Snikos.nikoleris@arm.com assert(pkt->isResponse()); 134212724Snikos.nikoleris@arm.com Addr addr = pkt->getAddr(); 134312724Snikos.nikoleris@arm.com bool is_secure = pkt->isSecure(); 134412724Snikos.nikoleris@arm.com#if TRACING_ON 134512724Snikos.nikoleris@arm.com CacheBlk::State old_state = blk ? blk->status : 0; 134612724Snikos.nikoleris@arm.com#endif 134712724Snikos.nikoleris@arm.com 134812724Snikos.nikoleris@arm.com // When handling a fill, we should have no writes to this line. 134912724Snikos.nikoleris@arm.com assert(addr == pkt->getBlockAddr(blkSize)); 135012724Snikos.nikoleris@arm.com assert(!writeBuffer.findMatch(addr, is_secure)); 135112724Snikos.nikoleris@arm.com 135212724Snikos.nikoleris@arm.com if (!blk) { 135312724Snikos.nikoleris@arm.com // better have read new data... 135413350Snikos.nikoleris@arm.com assert(pkt->hasData() || pkt->cmd == MemCmd::InvalidateResp); 135512724Snikos.nikoleris@arm.com 135612724Snikos.nikoleris@arm.com // need to do a replacement if allocating, otherwise we stick 135712724Snikos.nikoleris@arm.com // with the temporary storage 135812754Sodanrc@yahoo.com.br blk = allocate ? allocateBlock(pkt, writebacks) : nullptr; 135912724Snikos.nikoleris@arm.com 136012724Snikos.nikoleris@arm.com if (!blk) { 136112724Snikos.nikoleris@arm.com // No replaceable block or a mostly exclusive 136212724Snikos.nikoleris@arm.com // cache... just use temporary storage to complete the 136312724Snikos.nikoleris@arm.com // current request and then get rid of it 136412724Snikos.nikoleris@arm.com blk = tempBlock; 136512730Sodanrc@yahoo.com.br tempBlock->insert(addr, is_secure); 136612724Snikos.nikoleris@arm.com DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr, 136712724Snikos.nikoleris@arm.com is_secure ? "s" : "ns"); 136812724Snikos.nikoleris@arm.com } 136912724Snikos.nikoleris@arm.com } else { 137012724Snikos.nikoleris@arm.com // existing block... probably an upgrade 137112724Snikos.nikoleris@arm.com // don't clear block status... if block is already dirty we 137212724Snikos.nikoleris@arm.com // don't want to lose that 137312724Snikos.nikoleris@arm.com } 137412724Snikos.nikoleris@arm.com 137513445Sodanrc@yahoo.com.br // Block is guaranteed to be valid at this point 137613445Sodanrc@yahoo.com.br assert(blk->isValid()); 137713445Sodanrc@yahoo.com.br assert(blk->isSecure() == is_secure); 137813445Sodanrc@yahoo.com.br assert(regenerateBlkAddr(blk) == addr); 137913445Sodanrc@yahoo.com.br 138013445Sodanrc@yahoo.com.br blk->status |= BlkReadable; 138112724Snikos.nikoleris@arm.com 138212724Snikos.nikoleris@arm.com // sanity check for whole-line writes, which should always be 138312724Snikos.nikoleris@arm.com // marked as writable as part of the fill, and then later marked 138412724Snikos.nikoleris@arm.com // dirty as part of satisfyRequest 138513350Snikos.nikoleris@arm.com if (pkt->cmd == MemCmd::InvalidateResp) { 138612724Snikos.nikoleris@arm.com assert(!pkt->hasSharers()); 138712724Snikos.nikoleris@arm.com } 138812724Snikos.nikoleris@arm.com 138912724Snikos.nikoleris@arm.com // here we deal with setting the appropriate state of the line, 139012724Snikos.nikoleris@arm.com // and we start by looking at the hasSharers flag, and ignore the 139112724Snikos.nikoleris@arm.com // cacheResponding flag (normally signalling dirty data) if the 139212724Snikos.nikoleris@arm.com // packet has sharers, thus the line is never allocated as Owned 139312724Snikos.nikoleris@arm.com // (dirty but not writable), and always ends up being either 139412724Snikos.nikoleris@arm.com // Shared, Exclusive or Modified, see Packet::setCacheResponding 139512724Snikos.nikoleris@arm.com // for more details 139612724Snikos.nikoleris@arm.com if (!pkt->hasSharers()) { 139712724Snikos.nikoleris@arm.com // we could get a writable line from memory (rather than a 139812724Snikos.nikoleris@arm.com // cache) even in a read-only cache, note that we set this bit 139912724Snikos.nikoleris@arm.com // even for a read-only cache, possibly revisit this decision 140012724Snikos.nikoleris@arm.com blk->status |= BlkWritable; 140112724Snikos.nikoleris@arm.com 140212724Snikos.nikoleris@arm.com // check if we got this via cache-to-cache transfer (i.e., from a 140312724Snikos.nikoleris@arm.com // cache that had the block in Modified or Owned state) 140412724Snikos.nikoleris@arm.com if (pkt->cacheResponding()) { 140512724Snikos.nikoleris@arm.com // we got the block in Modified state, and invalidated the 140612724Snikos.nikoleris@arm.com // owners copy 140712724Snikos.nikoleris@arm.com blk->status |= BlkDirty; 140812724Snikos.nikoleris@arm.com 140912724Snikos.nikoleris@arm.com chatty_assert(!isReadOnly, "Should never see dirty snoop response " 141012724Snikos.nikoleris@arm.com "in read-only cache %s\n", name()); 141113932Snikos.nikoleris@arm.com 141213932Snikos.nikoleris@arm.com } else if (pkt->cmd.isSWPrefetch() && pkt->needsWritable()) { 141313932Snikos.nikoleris@arm.com // All other copies of the block were invalidated and we 141413932Snikos.nikoleris@arm.com // have an exclusive copy. 141513932Snikos.nikoleris@arm.com 141613932Snikos.nikoleris@arm.com // The coherence protocol assumes that if we fetched an 141713932Snikos.nikoleris@arm.com // exclusive copy of the block, we have the intention to 141813932Snikos.nikoleris@arm.com // modify it. Therefore the MSHR for the PrefetchExReq has 141913932Snikos.nikoleris@arm.com // been the point of ordering and this cache has commited 142013932Snikos.nikoleris@arm.com // to respond to snoops for the block. 142113932Snikos.nikoleris@arm.com // 142213932Snikos.nikoleris@arm.com // In most cases this is true anyway - a PrefetchExReq 142313932Snikos.nikoleris@arm.com // will be followed by a WriteReq. However, if that 142413932Snikos.nikoleris@arm.com // doesn't happen, the block is not marked as dirty and 142513932Snikos.nikoleris@arm.com // the cache doesn't respond to snoops that has committed 142613932Snikos.nikoleris@arm.com // to do so. 142713932Snikos.nikoleris@arm.com // 142813932Snikos.nikoleris@arm.com // To avoid deadlocks in cases where there is a snoop 142913932Snikos.nikoleris@arm.com // between the PrefetchExReq and the expected WriteReq, we 143013932Snikos.nikoleris@arm.com // proactively mark the block as Dirty. 143113932Snikos.nikoleris@arm.com 143213932Snikos.nikoleris@arm.com blk->status |= BlkDirty; 143313932Snikos.nikoleris@arm.com 143413932Snikos.nikoleris@arm.com panic_if(!isReadOnly, "Prefetch exclusive requests from read-only " 143513932Snikos.nikoleris@arm.com "cache %s\n", name()); 143612724Snikos.nikoleris@arm.com } 143712724Snikos.nikoleris@arm.com } 143812724Snikos.nikoleris@arm.com 143912724Snikos.nikoleris@arm.com DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n", 144012724Snikos.nikoleris@arm.com addr, is_secure ? "s" : "ns", old_state, blk->print()); 144112724Snikos.nikoleris@arm.com 144212724Snikos.nikoleris@arm.com // if we got new data, copy it in (checking for a read response 144312724Snikos.nikoleris@arm.com // and a response that has data is the same in the end) 144412724Snikos.nikoleris@arm.com if (pkt->isRead()) { 144512724Snikos.nikoleris@arm.com // sanity checks 144612724Snikos.nikoleris@arm.com assert(pkt->hasData()); 144712724Snikos.nikoleris@arm.com assert(pkt->getSize() == blkSize); 144812724Snikos.nikoleris@arm.com 144912724Snikos.nikoleris@arm.com pkt->writeDataToBlock(blk->data, blkSize); 145012724Snikos.nikoleris@arm.com } 145113750Sodanrc@yahoo.com.br // The block will be ready when the payload arrives and the fill is done 145213750Sodanrc@yahoo.com.br blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay + 145313750Sodanrc@yahoo.com.br pkt->payloadDelay); 145412724Snikos.nikoleris@arm.com 145512724Snikos.nikoleris@arm.com return blk; 145612724Snikos.nikoleris@arm.com} 145712724Snikos.nikoleris@arm.com 145812724Snikos.nikoleris@arm.comCacheBlk* 145912754Sodanrc@yahoo.com.brBaseCache::allocateBlock(const PacketPtr pkt, PacketList &writebacks) 146012724Snikos.nikoleris@arm.com{ 146112754Sodanrc@yahoo.com.br // Get address 146212754Sodanrc@yahoo.com.br const Addr addr = pkt->getAddr(); 146312754Sodanrc@yahoo.com.br 146412754Sodanrc@yahoo.com.br // Get secure bit 146512754Sodanrc@yahoo.com.br const bool is_secure = pkt->isSecure(); 146612754Sodanrc@yahoo.com.br 146713945Sodanrc@yahoo.com.br // Block size and compression related access latency. Only relevant if 146813945Sodanrc@yahoo.com.br // using a compressor, otherwise there is no extra delay, and the block 146913945Sodanrc@yahoo.com.br // is fully sized 147013941Sodanrc@yahoo.com.br std::size_t blk_size_bits = blkSize*8; 147113945Sodanrc@yahoo.com.br Cycles compression_lat = Cycles(0); 147213945Sodanrc@yahoo.com.br Cycles decompression_lat = Cycles(0); 147313945Sodanrc@yahoo.com.br 147413945Sodanrc@yahoo.com.br // If a compressor is being used, it is called to compress data before 147513945Sodanrc@yahoo.com.br // insertion. Although in Gem5 the data is stored uncompressed, even if a 147613945Sodanrc@yahoo.com.br // compressor is used, the compression/decompression methods are called to 147713945Sodanrc@yahoo.com.br // calculate the amount of extra cycles needed to read or write compressed 147813945Sodanrc@yahoo.com.br // blocks. 147913945Sodanrc@yahoo.com.br if (compressor) { 148013945Sodanrc@yahoo.com.br compressor->compress(pkt->getConstPtr<uint64_t>(), compression_lat, 148113945Sodanrc@yahoo.com.br decompression_lat, blk_size_bits); 148213945Sodanrc@yahoo.com.br } 148313941Sodanrc@yahoo.com.br 148412724Snikos.nikoleris@arm.com // Find replacement victim 148512744Sodanrc@yahoo.com.br std::vector<CacheBlk*> evict_blks; 148613941Sodanrc@yahoo.com.br CacheBlk *victim = tags->findVictim(addr, is_secure, blk_size_bits, 148713941Sodanrc@yahoo.com.br evict_blks); 148812724Snikos.nikoleris@arm.com 148912724Snikos.nikoleris@arm.com // It is valid to return nullptr if there is no victim 149012744Sodanrc@yahoo.com.br if (!victim) 149112724Snikos.nikoleris@arm.com return nullptr; 149212724Snikos.nikoleris@arm.com 149313222Sodanrc@yahoo.com.br // Print victim block's information 149413222Sodanrc@yahoo.com.br DPRINTF(CacheRepl, "Replacement victim: %s\n", victim->print()); 149513222Sodanrc@yahoo.com.br 149612744Sodanrc@yahoo.com.br // Check for transient state allocations. If any of the entries listed 149712744Sodanrc@yahoo.com.br // for eviction has a transient state, the allocation fails 149813866Sodanrc@yahoo.com.br bool replacement = false; 149912744Sodanrc@yahoo.com.br for (const auto& blk : evict_blks) { 150012744Sodanrc@yahoo.com.br if (blk->isValid()) { 150113866Sodanrc@yahoo.com.br replacement = true; 150213866Sodanrc@yahoo.com.br 150312744Sodanrc@yahoo.com.br Addr repl_addr = regenerateBlkAddr(blk); 150412744Sodanrc@yahoo.com.br MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure()); 150512744Sodanrc@yahoo.com.br if (repl_mshr) { 150612744Sodanrc@yahoo.com.br // must be an outstanding upgrade or clean request 150712744Sodanrc@yahoo.com.br // on a block we're about to replace... 150812744Sodanrc@yahoo.com.br assert((!blk->isWritable() && repl_mshr->needsWritable()) || 150912744Sodanrc@yahoo.com.br repl_mshr->isCleaning()); 151012724Snikos.nikoleris@arm.com 151112744Sodanrc@yahoo.com.br // too hard to replace block with transient state 151212744Sodanrc@yahoo.com.br // allocation failed, block not inserted 151312744Sodanrc@yahoo.com.br return nullptr; 151412744Sodanrc@yahoo.com.br } 151512744Sodanrc@yahoo.com.br } 151612744Sodanrc@yahoo.com.br } 151712744Sodanrc@yahoo.com.br 151812744Sodanrc@yahoo.com.br // The victim will be replaced by a new entry, so increase the replacement 151912744Sodanrc@yahoo.com.br // counter if a valid block is being replaced 152013866Sodanrc@yahoo.com.br if (replacement) { 152113866Sodanrc@yahoo.com.br // Evict valid blocks associated to this victim block 152213863Sodanrc@yahoo.com.br for (const auto& blk : evict_blks) { 152313863Sodanrc@yahoo.com.br if (blk->isValid()) { 152413863Sodanrc@yahoo.com.br DPRINTF(CacheRepl, "Evicting %s (%#llx) to make room for " \ 152513863Sodanrc@yahoo.com.br "%#llx (%s)\n", blk->print(), regenerateBlkAddr(blk), 152613863Sodanrc@yahoo.com.br addr, is_secure); 152713866Sodanrc@yahoo.com.br 152813866Sodanrc@yahoo.com.br if (blk->wasPrefetched()) { 152913866Sodanrc@yahoo.com.br unusedPrefetches++; 153013866Sodanrc@yahoo.com.br } 153113866Sodanrc@yahoo.com.br 153213866Sodanrc@yahoo.com.br evictBlock(blk, writebacks); 153313863Sodanrc@yahoo.com.br } 153413863Sodanrc@yahoo.com.br } 153512744Sodanrc@yahoo.com.br 153612744Sodanrc@yahoo.com.br replacements++; 153712744Sodanrc@yahoo.com.br } 153812744Sodanrc@yahoo.com.br 153913945Sodanrc@yahoo.com.br // If using a compressor, set compression data. This must be done before 154013945Sodanrc@yahoo.com.br // block insertion, as compressed tags use this information. 154113945Sodanrc@yahoo.com.br if (compressor) { 154213945Sodanrc@yahoo.com.br compressor->setSizeBits(victim, blk_size_bits); 154313945Sodanrc@yahoo.com.br compressor->setDecompressionLatency(victim, decompression_lat); 154413945Sodanrc@yahoo.com.br } 154513945Sodanrc@yahoo.com.br 154612754Sodanrc@yahoo.com.br // Insert new block at victimized entry 154713752Sodanrc@yahoo.com.br tags->insertBlock(pkt, victim); 154812754Sodanrc@yahoo.com.br 154912744Sodanrc@yahoo.com.br return victim; 155012724Snikos.nikoleris@arm.com} 155112724Snikos.nikoleris@arm.com 155212724Snikos.nikoleris@arm.comvoid 155312724Snikos.nikoleris@arm.comBaseCache::invalidateBlock(CacheBlk *blk) 155412724Snikos.nikoleris@arm.com{ 155513376Sodanrc@yahoo.com.br // If handling a block present in the Tags, let it do its invalidation 155613376Sodanrc@yahoo.com.br // process, which will update stats and invalidate the block itself 155713376Sodanrc@yahoo.com.br if (blk != tempBlock) { 155812724Snikos.nikoleris@arm.com tags->invalidate(blk); 155913376Sodanrc@yahoo.com.br } else { 156013376Sodanrc@yahoo.com.br tempBlock->invalidate(); 156113376Sodanrc@yahoo.com.br } 156212724Snikos.nikoleris@arm.com} 156312724Snikos.nikoleris@arm.com 156413358Sodanrc@yahoo.com.brvoid 156513358Sodanrc@yahoo.com.brBaseCache::evictBlock(CacheBlk *blk, PacketList &writebacks) 156613358Sodanrc@yahoo.com.br{ 156713358Sodanrc@yahoo.com.br PacketPtr pkt = evictBlock(blk); 156813358Sodanrc@yahoo.com.br if (pkt) { 156913358Sodanrc@yahoo.com.br writebacks.push_back(pkt); 157013358Sodanrc@yahoo.com.br } 157113358Sodanrc@yahoo.com.br} 157213358Sodanrc@yahoo.com.br 157312724Snikos.nikoleris@arm.comPacketPtr 157412724Snikos.nikoleris@arm.comBaseCache::writebackBlk(CacheBlk *blk) 157512724Snikos.nikoleris@arm.com{ 157612724Snikos.nikoleris@arm.com chatty_assert(!isReadOnly || writebackClean, 157712724Snikos.nikoleris@arm.com "Writeback from read-only cache"); 157812724Snikos.nikoleris@arm.com assert(blk && blk->isValid() && (blk->isDirty() || writebackClean)); 157912724Snikos.nikoleris@arm.com 158012724Snikos.nikoleris@arm.com writebacks[Request::wbMasterId]++; 158112724Snikos.nikoleris@arm.com 158212749Sgiacomo.travaglini@arm.com RequestPtr req = std::make_shared<Request>( 158312749Sgiacomo.travaglini@arm.com regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId); 158412749Sgiacomo.travaglini@arm.com 158512724Snikos.nikoleris@arm.com if (blk->isSecure()) 158612724Snikos.nikoleris@arm.com req->setFlags(Request::SECURE); 158712724Snikos.nikoleris@arm.com 158812724Snikos.nikoleris@arm.com req->taskId(blk->task_id); 158912724Snikos.nikoleris@arm.com 159012724Snikos.nikoleris@arm.com PacketPtr pkt = 159112724Snikos.nikoleris@arm.com new Packet(req, blk->isDirty() ? 159212724Snikos.nikoleris@arm.com MemCmd::WritebackDirty : MemCmd::WritebackClean); 159312724Snikos.nikoleris@arm.com 159412724Snikos.nikoleris@arm.com DPRINTF(Cache, "Create Writeback %s writable: %d, dirty: %d\n", 159512724Snikos.nikoleris@arm.com pkt->print(), blk->isWritable(), blk->isDirty()); 159612724Snikos.nikoleris@arm.com 159712724Snikos.nikoleris@arm.com if (blk->isWritable()) { 159812724Snikos.nikoleris@arm.com // not asserting shared means we pass the block in modified 159912724Snikos.nikoleris@arm.com // state, mark our own block non-writeable 160012724Snikos.nikoleris@arm.com blk->status &= ~BlkWritable; 160112724Snikos.nikoleris@arm.com } else { 160212724Snikos.nikoleris@arm.com // we are in the Owned state, tell the receiver 160312724Snikos.nikoleris@arm.com pkt->setHasSharers(); 160412724Snikos.nikoleris@arm.com } 160512724Snikos.nikoleris@arm.com 160612724Snikos.nikoleris@arm.com // make sure the block is not marked dirty 160712724Snikos.nikoleris@arm.com blk->status &= ~BlkDirty; 160812724Snikos.nikoleris@arm.com 160912724Snikos.nikoleris@arm.com pkt->allocate(); 161012724Snikos.nikoleris@arm.com pkt->setDataFromBlock(blk->data, blkSize); 161112724Snikos.nikoleris@arm.com 161213945Sodanrc@yahoo.com.br // When a block is compressed, it must first be decompressed before being 161313945Sodanrc@yahoo.com.br // sent for writeback. 161413945Sodanrc@yahoo.com.br if (compressor) { 161513945Sodanrc@yahoo.com.br pkt->payloadDelay = compressor->getDecompressionLatency(blk); 161613945Sodanrc@yahoo.com.br } 161713945Sodanrc@yahoo.com.br 161812724Snikos.nikoleris@arm.com return pkt; 161912724Snikos.nikoleris@arm.com} 162012724Snikos.nikoleris@arm.com 162112724Snikos.nikoleris@arm.comPacketPtr 162212724Snikos.nikoleris@arm.comBaseCache::writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id) 162312724Snikos.nikoleris@arm.com{ 162412749Sgiacomo.travaglini@arm.com RequestPtr req = std::make_shared<Request>( 162512749Sgiacomo.travaglini@arm.com regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId); 162612749Sgiacomo.travaglini@arm.com 162712724Snikos.nikoleris@arm.com if (blk->isSecure()) { 162812724Snikos.nikoleris@arm.com req->setFlags(Request::SECURE); 162912724Snikos.nikoleris@arm.com } 163012724Snikos.nikoleris@arm.com req->taskId(blk->task_id); 163112724Snikos.nikoleris@arm.com 163212724Snikos.nikoleris@arm.com PacketPtr pkt = new Packet(req, MemCmd::WriteClean, blkSize, id); 163312724Snikos.nikoleris@arm.com 163412724Snikos.nikoleris@arm.com if (dest) { 163512724Snikos.nikoleris@arm.com req->setFlags(dest); 163612724Snikos.nikoleris@arm.com pkt->setWriteThrough(); 163712724Snikos.nikoleris@arm.com } 163812724Snikos.nikoleris@arm.com 163912724Snikos.nikoleris@arm.com DPRINTF(Cache, "Create %s writable: %d, dirty: %d\n", pkt->print(), 164012724Snikos.nikoleris@arm.com blk->isWritable(), blk->isDirty()); 164112724Snikos.nikoleris@arm.com 164212724Snikos.nikoleris@arm.com if (blk->isWritable()) { 164312724Snikos.nikoleris@arm.com // not asserting shared means we pass the block in modified 164412724Snikos.nikoleris@arm.com // state, mark our own block non-writeable 164512724Snikos.nikoleris@arm.com blk->status &= ~BlkWritable; 164612724Snikos.nikoleris@arm.com } else { 164712724Snikos.nikoleris@arm.com // we are in the Owned state, tell the receiver 164812724Snikos.nikoleris@arm.com pkt->setHasSharers(); 164912724Snikos.nikoleris@arm.com } 165012724Snikos.nikoleris@arm.com 165112724Snikos.nikoleris@arm.com // make sure the block is not marked dirty 165212724Snikos.nikoleris@arm.com blk->status &= ~BlkDirty; 165312724Snikos.nikoleris@arm.com 165412724Snikos.nikoleris@arm.com pkt->allocate(); 165512724Snikos.nikoleris@arm.com pkt->setDataFromBlock(blk->data, blkSize); 165612724Snikos.nikoleris@arm.com 165713945Sodanrc@yahoo.com.br // When a block is compressed, it must first be decompressed before being 165813945Sodanrc@yahoo.com.br // sent for writeback. 165913945Sodanrc@yahoo.com.br if (compressor) { 166013945Sodanrc@yahoo.com.br pkt->payloadDelay = compressor->getDecompressionLatency(blk); 166113945Sodanrc@yahoo.com.br } 166213945Sodanrc@yahoo.com.br 166312724Snikos.nikoleris@arm.com return pkt; 166412724Snikos.nikoleris@arm.com} 166512724Snikos.nikoleris@arm.com 166612724Snikos.nikoleris@arm.com 166712724Snikos.nikoleris@arm.comvoid 166812724Snikos.nikoleris@arm.comBaseCache::memWriteback() 166912724Snikos.nikoleris@arm.com{ 167012728Snikos.nikoleris@arm.com tags->forEachBlk([this](CacheBlk &blk) { writebackVisitor(blk); }); 167112724Snikos.nikoleris@arm.com} 167212724Snikos.nikoleris@arm.com 167312724Snikos.nikoleris@arm.comvoid 167412724Snikos.nikoleris@arm.comBaseCache::memInvalidate() 167512724Snikos.nikoleris@arm.com{ 167612728Snikos.nikoleris@arm.com tags->forEachBlk([this](CacheBlk &blk) { invalidateVisitor(blk); }); 167712724Snikos.nikoleris@arm.com} 167812724Snikos.nikoleris@arm.com 167912724Snikos.nikoleris@arm.combool 168012724Snikos.nikoleris@arm.comBaseCache::isDirty() const 168112724Snikos.nikoleris@arm.com{ 168212728Snikos.nikoleris@arm.com return tags->anyBlk([](CacheBlk &blk) { return blk.isDirty(); }); 168312724Snikos.nikoleris@arm.com} 168412724Snikos.nikoleris@arm.com 168513416Sjavier.bueno@metempsy.combool 168613416Sjavier.bueno@metempsy.comBaseCache::coalesce() const 168713416Sjavier.bueno@metempsy.com{ 168813416Sjavier.bueno@metempsy.com return writeAllocator && writeAllocator->coalesce(); 168913416Sjavier.bueno@metempsy.com} 169013416Sjavier.bueno@metempsy.com 169112728Snikos.nikoleris@arm.comvoid 169212724Snikos.nikoleris@arm.comBaseCache::writebackVisitor(CacheBlk &blk) 169312724Snikos.nikoleris@arm.com{ 169412724Snikos.nikoleris@arm.com if (blk.isDirty()) { 169512724Snikos.nikoleris@arm.com assert(blk.isValid()); 169612724Snikos.nikoleris@arm.com 169712749Sgiacomo.travaglini@arm.com RequestPtr request = std::make_shared<Request>( 169812749Sgiacomo.travaglini@arm.com regenerateBlkAddr(&blk), blkSize, 0, Request::funcMasterId); 169912749Sgiacomo.travaglini@arm.com 170012749Sgiacomo.travaglini@arm.com request->taskId(blk.task_id); 170112724Snikos.nikoleris@arm.com if (blk.isSecure()) { 170212749Sgiacomo.travaglini@arm.com request->setFlags(Request::SECURE); 170312724Snikos.nikoleris@arm.com } 170412724Snikos.nikoleris@arm.com 170512749Sgiacomo.travaglini@arm.com Packet packet(request, MemCmd::WriteReq); 170612724Snikos.nikoleris@arm.com packet.dataStatic(blk.data); 170712724Snikos.nikoleris@arm.com 170812724Snikos.nikoleris@arm.com memSidePort.sendFunctional(&packet); 170912724Snikos.nikoleris@arm.com 171012724Snikos.nikoleris@arm.com blk.status &= ~BlkDirty; 171112724Snikos.nikoleris@arm.com } 171212724Snikos.nikoleris@arm.com} 171312724Snikos.nikoleris@arm.com 171412728Snikos.nikoleris@arm.comvoid 171512724Snikos.nikoleris@arm.comBaseCache::invalidateVisitor(CacheBlk &blk) 171612724Snikos.nikoleris@arm.com{ 171712724Snikos.nikoleris@arm.com if (blk.isDirty()) 171812724Snikos.nikoleris@arm.com warn_once("Invalidating dirty cache lines. " \ 171912724Snikos.nikoleris@arm.com "Expect things to break.\n"); 172012724Snikos.nikoleris@arm.com 172112724Snikos.nikoleris@arm.com if (blk.isValid()) { 172212724Snikos.nikoleris@arm.com assert(!blk.isDirty()); 172312724Snikos.nikoleris@arm.com invalidateBlock(&blk); 172412724Snikos.nikoleris@arm.com } 172512724Snikos.nikoleris@arm.com} 172612724Snikos.nikoleris@arm.com 172712724Snikos.nikoleris@arm.comTick 172812724Snikos.nikoleris@arm.comBaseCache::nextQueueReadyTime() const 172912724Snikos.nikoleris@arm.com{ 173012724Snikos.nikoleris@arm.com Tick nextReady = std::min(mshrQueue.nextReadyTime(), 173112724Snikos.nikoleris@arm.com writeBuffer.nextReadyTime()); 173212724Snikos.nikoleris@arm.com 173312724Snikos.nikoleris@arm.com // Don't signal prefetch ready time if no MSHRs available 173412724Snikos.nikoleris@arm.com // Will signal once enoguh MSHRs are deallocated 173512724Snikos.nikoleris@arm.com if (prefetcher && mshrQueue.canPrefetch()) { 173612724Snikos.nikoleris@arm.com nextReady = std::min(nextReady, 173712724Snikos.nikoleris@arm.com prefetcher->nextPrefetchReadyTime()); 173812724Snikos.nikoleris@arm.com } 173912724Snikos.nikoleris@arm.com 174012724Snikos.nikoleris@arm.com return nextReady; 174112724Snikos.nikoleris@arm.com} 174212724Snikos.nikoleris@arm.com 174312724Snikos.nikoleris@arm.com 174412724Snikos.nikoleris@arm.combool 174512724Snikos.nikoleris@arm.comBaseCache::sendMSHRQueuePacket(MSHR* mshr) 174612724Snikos.nikoleris@arm.com{ 174712724Snikos.nikoleris@arm.com assert(mshr); 174812724Snikos.nikoleris@arm.com 174912724Snikos.nikoleris@arm.com // use request from 1st target 175012724Snikos.nikoleris@arm.com PacketPtr tgt_pkt = mshr->getTarget()->pkt; 175112724Snikos.nikoleris@arm.com 175212724Snikos.nikoleris@arm.com DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print()); 175312724Snikos.nikoleris@arm.com 175413352Snikos.nikoleris@arm.com // if the cache is in write coalescing mode or (additionally) in 175513352Snikos.nikoleris@arm.com // no allocation mode, and we have a write packet with an MSHR 175613352Snikos.nikoleris@arm.com // that is not a whole-line write (due to incompatible flags etc), 175713352Snikos.nikoleris@arm.com // then reset the write mode 175813352Snikos.nikoleris@arm.com if (writeAllocator && writeAllocator->coalesce() && tgt_pkt->isWrite()) { 175913352Snikos.nikoleris@arm.com if (!mshr->isWholeLineWrite()) { 176013352Snikos.nikoleris@arm.com // if we are currently write coalescing, hold on the 176113352Snikos.nikoleris@arm.com // MSHR as many cycles extra as we need to completely 176213352Snikos.nikoleris@arm.com // write a cache line 176313352Snikos.nikoleris@arm.com if (writeAllocator->delay(mshr->blkAddr)) { 176413352Snikos.nikoleris@arm.com Tick delay = blkSize / tgt_pkt->getSize() * clockPeriod(); 176513352Snikos.nikoleris@arm.com DPRINTF(CacheVerbose, "Delaying pkt %s %llu ticks to allow " 176613352Snikos.nikoleris@arm.com "for write coalescing\n", tgt_pkt->print(), delay); 176713352Snikos.nikoleris@arm.com mshrQueue.delay(mshr, delay); 176813352Snikos.nikoleris@arm.com return false; 176913352Snikos.nikoleris@arm.com } else { 177013352Snikos.nikoleris@arm.com writeAllocator->reset(); 177113352Snikos.nikoleris@arm.com } 177213352Snikos.nikoleris@arm.com } else { 177313352Snikos.nikoleris@arm.com writeAllocator->resetDelay(mshr->blkAddr); 177413352Snikos.nikoleris@arm.com } 177513352Snikos.nikoleris@arm.com } 177613352Snikos.nikoleris@arm.com 177712724Snikos.nikoleris@arm.com CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure); 177812724Snikos.nikoleris@arm.com 177912724Snikos.nikoleris@arm.com // either a prefetch that is not present upstream, or a normal 178012724Snikos.nikoleris@arm.com // MSHR request, proceed to get the packet to send downstream 178113350Snikos.nikoleris@arm.com PacketPtr pkt = createMissPacket(tgt_pkt, blk, mshr->needsWritable(), 178213350Snikos.nikoleris@arm.com mshr->isWholeLineWrite()); 178312724Snikos.nikoleris@arm.com 178412724Snikos.nikoleris@arm.com mshr->isForward = (pkt == nullptr); 178512724Snikos.nikoleris@arm.com 178612724Snikos.nikoleris@arm.com if (mshr->isForward) { 178712724Snikos.nikoleris@arm.com // not a cache block request, but a response is expected 178812724Snikos.nikoleris@arm.com // make copy of current packet to forward, keep current 178912724Snikos.nikoleris@arm.com // copy for response handling 179012724Snikos.nikoleris@arm.com pkt = new Packet(tgt_pkt, false, true); 179112724Snikos.nikoleris@arm.com assert(!pkt->isWrite()); 179212724Snikos.nikoleris@arm.com } 179312724Snikos.nikoleris@arm.com 179412724Snikos.nikoleris@arm.com // play it safe and append (rather than set) the sender state, 179512724Snikos.nikoleris@arm.com // as forwarded packets may already have existing state 179612724Snikos.nikoleris@arm.com pkt->pushSenderState(mshr); 179712724Snikos.nikoleris@arm.com 179812724Snikos.nikoleris@arm.com if (pkt->isClean() && blk && blk->isDirty()) { 179912724Snikos.nikoleris@arm.com // A cache clean opearation is looking for a dirty block. Mark 180012724Snikos.nikoleris@arm.com // the packet so that the destination xbar can determine that 180112724Snikos.nikoleris@arm.com // there will be a follow-up write packet as well. 180212724Snikos.nikoleris@arm.com pkt->setSatisfied(); 180312724Snikos.nikoleris@arm.com } 180412724Snikos.nikoleris@arm.com 180512724Snikos.nikoleris@arm.com if (!memSidePort.sendTimingReq(pkt)) { 180612724Snikos.nikoleris@arm.com // we are awaiting a retry, but we 180712724Snikos.nikoleris@arm.com // delete the packet and will be creating a new packet 180812724Snikos.nikoleris@arm.com // when we get the opportunity 180912724Snikos.nikoleris@arm.com delete pkt; 181012724Snikos.nikoleris@arm.com 181112724Snikos.nikoleris@arm.com // note that we have now masked any requestBus and 181212724Snikos.nikoleris@arm.com // schedSendEvent (we will wait for a retry before 181312724Snikos.nikoleris@arm.com // doing anything), and this is so even if we do not 181412724Snikos.nikoleris@arm.com // care about this packet and might override it before 181512724Snikos.nikoleris@arm.com // it gets retried 181612724Snikos.nikoleris@arm.com return true; 181712724Snikos.nikoleris@arm.com } else { 181812724Snikos.nikoleris@arm.com // As part of the call to sendTimingReq the packet is 181912724Snikos.nikoleris@arm.com // forwarded to all neighbouring caches (and any caches 182012724Snikos.nikoleris@arm.com // above them) as a snoop. Thus at this point we know if 182112724Snikos.nikoleris@arm.com // any of the neighbouring caches are responding, and if 182212724Snikos.nikoleris@arm.com // so, we know it is dirty, and we can determine if it is 182312724Snikos.nikoleris@arm.com // being passed as Modified, making our MSHR the ordering 182412724Snikos.nikoleris@arm.com // point 182512724Snikos.nikoleris@arm.com bool pending_modified_resp = !pkt->hasSharers() && 182612724Snikos.nikoleris@arm.com pkt->cacheResponding(); 182712724Snikos.nikoleris@arm.com markInService(mshr, pending_modified_resp); 182812724Snikos.nikoleris@arm.com 182912724Snikos.nikoleris@arm.com if (pkt->isClean() && blk && blk->isDirty()) { 183012724Snikos.nikoleris@arm.com // A cache clean opearation is looking for a dirty 183112724Snikos.nikoleris@arm.com // block. If a dirty block is encountered a WriteClean 183212724Snikos.nikoleris@arm.com // will update any copies to the path to the memory 183312724Snikos.nikoleris@arm.com // until the point of reference. 183412724Snikos.nikoleris@arm.com DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n", 183512724Snikos.nikoleris@arm.com __func__, pkt->print(), blk->print()); 183612724Snikos.nikoleris@arm.com PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), 183712724Snikos.nikoleris@arm.com pkt->id); 183812724Snikos.nikoleris@arm.com PacketList writebacks; 183912724Snikos.nikoleris@arm.com writebacks.push_back(wb_pkt); 184012724Snikos.nikoleris@arm.com doWritebacks(writebacks, 0); 184112724Snikos.nikoleris@arm.com } 184212724Snikos.nikoleris@arm.com 184312724Snikos.nikoleris@arm.com return false; 184412724Snikos.nikoleris@arm.com } 184512724Snikos.nikoleris@arm.com} 184612724Snikos.nikoleris@arm.com 184712724Snikos.nikoleris@arm.combool 184812724Snikos.nikoleris@arm.comBaseCache::sendWriteQueuePacket(WriteQueueEntry* wq_entry) 184912724Snikos.nikoleris@arm.com{ 185012724Snikos.nikoleris@arm.com assert(wq_entry); 185112724Snikos.nikoleris@arm.com 185212724Snikos.nikoleris@arm.com // always a single target for write queue entries 185312724Snikos.nikoleris@arm.com PacketPtr tgt_pkt = wq_entry->getTarget()->pkt; 185412724Snikos.nikoleris@arm.com 185512724Snikos.nikoleris@arm.com DPRINTF(Cache, "%s: write %s\n", __func__, tgt_pkt->print()); 185612724Snikos.nikoleris@arm.com 185712724Snikos.nikoleris@arm.com // forward as is, both for evictions and uncacheable writes 185812724Snikos.nikoleris@arm.com if (!memSidePort.sendTimingReq(tgt_pkt)) { 185912724Snikos.nikoleris@arm.com // note that we have now masked any requestBus and 186012724Snikos.nikoleris@arm.com // schedSendEvent (we will wait for a retry before 186112724Snikos.nikoleris@arm.com // doing anything), and this is so even if we do not 186212724Snikos.nikoleris@arm.com // care about this packet and might override it before 186312724Snikos.nikoleris@arm.com // it gets retried 186412724Snikos.nikoleris@arm.com return true; 186512724Snikos.nikoleris@arm.com } else { 186612724Snikos.nikoleris@arm.com markInService(wq_entry); 186712724Snikos.nikoleris@arm.com return false; 186812724Snikos.nikoleris@arm.com } 186912724Snikos.nikoleris@arm.com} 187012724Snikos.nikoleris@arm.com 187112724Snikos.nikoleris@arm.comvoid 187212724Snikos.nikoleris@arm.comBaseCache::serialize(CheckpointOut &cp) const 187312724Snikos.nikoleris@arm.com{ 187412724Snikos.nikoleris@arm.com bool dirty(isDirty()); 187512724Snikos.nikoleris@arm.com 187612724Snikos.nikoleris@arm.com if (dirty) { 187712724Snikos.nikoleris@arm.com warn("*** The cache still contains dirty data. ***\n"); 187812724Snikos.nikoleris@arm.com warn(" Make sure to drain the system using the correct flags.\n"); 187912724Snikos.nikoleris@arm.com warn(" This checkpoint will not restore correctly " \ 188012724Snikos.nikoleris@arm.com "and dirty data in the cache will be lost!\n"); 188112724Snikos.nikoleris@arm.com } 188212724Snikos.nikoleris@arm.com 188312724Snikos.nikoleris@arm.com // Since we don't checkpoint the data in the cache, any dirty data 188412724Snikos.nikoleris@arm.com // will be lost when restoring from a checkpoint of a system that 188512724Snikos.nikoleris@arm.com // wasn't drained properly. Flag the checkpoint as invalid if the 188612724Snikos.nikoleris@arm.com // cache contains dirty data. 188712724Snikos.nikoleris@arm.com bool bad_checkpoint(dirty); 188812724Snikos.nikoleris@arm.com SERIALIZE_SCALAR(bad_checkpoint); 188912724Snikos.nikoleris@arm.com} 189012724Snikos.nikoleris@arm.com 189112724Snikos.nikoleris@arm.comvoid 189212724Snikos.nikoleris@arm.comBaseCache::unserialize(CheckpointIn &cp) 189312724Snikos.nikoleris@arm.com{ 189412724Snikos.nikoleris@arm.com bool bad_checkpoint; 189512724Snikos.nikoleris@arm.com UNSERIALIZE_SCALAR(bad_checkpoint); 189612724Snikos.nikoleris@arm.com if (bad_checkpoint) { 189712724Snikos.nikoleris@arm.com fatal("Restoring from checkpoints with dirty caches is not " 189812724Snikos.nikoleris@arm.com "supported in the classic memory system. Please remove any " 189912724Snikos.nikoleris@arm.com "caches or drain them properly before taking checkpoints.\n"); 190012724Snikos.nikoleris@arm.com } 190112724Snikos.nikoleris@arm.com} 190212724Snikos.nikoleris@arm.com 190312724Snikos.nikoleris@arm.comvoid 19042810SN/ABaseCache::regStats() 19052810SN/A{ 190613892Sgabeblack@google.com ClockedObject::regStats(); 190711522Sstephan.diestelhorst@arm.com 19082810SN/A using namespace Stats; 19092810SN/A 19102810SN/A // Hit statistics 19114022SN/A for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 19124022SN/A MemCmd cmd(access_idx); 19134022SN/A const string &cstr = cmd.toString(); 19142810SN/A 19152810SN/A hits[access_idx] 19168833Sdam.sunwoo@arm.com .init(system->maxMasters()) 19172810SN/A .name(name() + "." + cstr + "_hits") 19182810SN/A .desc("number of " + cstr + " hits") 19192810SN/A .flags(total | nozero | nonan) 19202810SN/A ; 19218833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 19228833Sdam.sunwoo@arm.com hits[access_idx].subname(i, system->getMasterName(i)); 19238833Sdam.sunwoo@arm.com } 19242810SN/A } 19252810SN/A 19264871SN/A// These macros make it easier to sum the right subset of commands and 19274871SN/A// to change the subset of commands that are considered "demand" vs 19284871SN/A// "non-demand" 19294871SN/A#define SUM_DEMAND(s) \ 193011455Sandreas.hansson@arm.com (s[MemCmd::ReadReq] + s[MemCmd::WriteReq] + s[MemCmd::WriteLineReq] + \ 193110885Sandreas.hansson@arm.com s[MemCmd::ReadExReq] + s[MemCmd::ReadCleanReq] + s[MemCmd::ReadSharedReq]) 19324871SN/A 19334871SN/A// should writebacks be included here? prior code was inconsistent... 19344871SN/A#define SUM_NON_DEMAND(s) \ 193513367Syuetsu.kodama@riken.jp (s[MemCmd::SoftPFReq] + s[MemCmd::HardPFReq] + s[MemCmd::SoftPFExReq]) 19364871SN/A 19372810SN/A demandHits 19382810SN/A .name(name() + ".demand_hits") 19392810SN/A .desc("number of demand (read+write) hits") 19408833Sdam.sunwoo@arm.com .flags(total | nozero | nonan) 19412810SN/A ; 19424871SN/A demandHits = SUM_DEMAND(hits); 19438833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 19448833Sdam.sunwoo@arm.com demandHits.subname(i, system->getMasterName(i)); 19458833Sdam.sunwoo@arm.com } 19462810SN/A 19472810SN/A overallHits 19482810SN/A .name(name() + ".overall_hits") 19492810SN/A .desc("number of overall hits") 19508833Sdam.sunwoo@arm.com .flags(total | nozero | nonan) 19512810SN/A ; 19524871SN/A overallHits = demandHits + SUM_NON_DEMAND(hits); 19538833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 19548833Sdam.sunwoo@arm.com overallHits.subname(i, system->getMasterName(i)); 19558833Sdam.sunwoo@arm.com } 19562810SN/A 19572810SN/A // Miss statistics 19584022SN/A for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 19594022SN/A MemCmd cmd(access_idx); 19604022SN/A const string &cstr = cmd.toString(); 19612810SN/A 19622810SN/A misses[access_idx] 19638833Sdam.sunwoo@arm.com .init(system->maxMasters()) 19642810SN/A .name(name() + "." + cstr + "_misses") 19652810SN/A .desc("number of " + cstr + " misses") 19662810SN/A .flags(total | nozero | nonan) 19672810SN/A ; 19688833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 19698833Sdam.sunwoo@arm.com misses[access_idx].subname(i, system->getMasterName(i)); 19708833Sdam.sunwoo@arm.com } 19712810SN/A } 19722810SN/A 19732810SN/A demandMisses 19742810SN/A .name(name() + ".demand_misses") 19752810SN/A .desc("number of demand (read+write) misses") 19768833Sdam.sunwoo@arm.com .flags(total | nozero | nonan) 19772810SN/A ; 19784871SN/A demandMisses = SUM_DEMAND(misses); 19798833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 19808833Sdam.sunwoo@arm.com demandMisses.subname(i, system->getMasterName(i)); 19818833Sdam.sunwoo@arm.com } 19822810SN/A 19832810SN/A overallMisses 19842810SN/A .name(name() + ".overall_misses") 19852810SN/A .desc("number of overall misses") 19868833Sdam.sunwoo@arm.com .flags(total | nozero | nonan) 19872810SN/A ; 19884871SN/A overallMisses = demandMisses + SUM_NON_DEMAND(misses); 19898833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 19908833Sdam.sunwoo@arm.com overallMisses.subname(i, system->getMasterName(i)); 19918833Sdam.sunwoo@arm.com } 19922810SN/A 19932810SN/A // Miss latency statistics 19944022SN/A for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 19954022SN/A MemCmd cmd(access_idx); 19964022SN/A const string &cstr = cmd.toString(); 19972810SN/A 19982810SN/A missLatency[access_idx] 19998833Sdam.sunwoo@arm.com .init(system->maxMasters()) 20002810SN/A .name(name() + "." + cstr + "_miss_latency") 20012810SN/A .desc("number of " + cstr + " miss cycles") 20022810SN/A .flags(total | nozero | nonan) 20032810SN/A ; 20048833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 20058833Sdam.sunwoo@arm.com missLatency[access_idx].subname(i, system->getMasterName(i)); 20068833Sdam.sunwoo@arm.com } 20072810SN/A } 20082810SN/A 20092810SN/A demandMissLatency 20102810SN/A .name(name() + ".demand_miss_latency") 20112810SN/A .desc("number of demand (read+write) miss cycles") 20128833Sdam.sunwoo@arm.com .flags(total | nozero | nonan) 20132810SN/A ; 20144871SN/A demandMissLatency = SUM_DEMAND(missLatency); 20158833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 20168833Sdam.sunwoo@arm.com demandMissLatency.subname(i, system->getMasterName(i)); 20178833Sdam.sunwoo@arm.com } 20182810SN/A 20192810SN/A overallMissLatency 20202810SN/A .name(name() + ".overall_miss_latency") 20212810SN/A .desc("number of overall miss cycles") 20228833Sdam.sunwoo@arm.com .flags(total | nozero | nonan) 20232810SN/A ; 20244871SN/A overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency); 20258833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 20268833Sdam.sunwoo@arm.com overallMissLatency.subname(i, system->getMasterName(i)); 20278833Sdam.sunwoo@arm.com } 20282810SN/A 20292810SN/A // access formulas 20304022SN/A for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 20314022SN/A MemCmd cmd(access_idx); 20324022SN/A const string &cstr = cmd.toString(); 20332810SN/A 20342810SN/A accesses[access_idx] 20352810SN/A .name(name() + "." + cstr + "_accesses") 20362810SN/A .desc("number of " + cstr + " accesses(hits+misses)") 20372810SN/A .flags(total | nozero | nonan) 20382810SN/A ; 20398833Sdam.sunwoo@arm.com accesses[access_idx] = hits[access_idx] + misses[access_idx]; 20402810SN/A 20418833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 20428833Sdam.sunwoo@arm.com accesses[access_idx].subname(i, system->getMasterName(i)); 20438833Sdam.sunwoo@arm.com } 20442810SN/A } 20452810SN/A 20462810SN/A demandAccesses 20472810SN/A .name(name() + ".demand_accesses") 20482810SN/A .desc("number of demand (read+write) accesses") 20498833Sdam.sunwoo@arm.com .flags(total | nozero | nonan) 20502810SN/A ; 20512810SN/A demandAccesses = demandHits + demandMisses; 20528833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 20538833Sdam.sunwoo@arm.com demandAccesses.subname(i, system->getMasterName(i)); 20548833Sdam.sunwoo@arm.com } 20552810SN/A 20562810SN/A overallAccesses 20572810SN/A .name(name() + ".overall_accesses") 20582810SN/A .desc("number of overall (read+write) accesses") 20598833Sdam.sunwoo@arm.com .flags(total | nozero | nonan) 20602810SN/A ; 20612810SN/A overallAccesses = overallHits + overallMisses; 20628833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 20638833Sdam.sunwoo@arm.com overallAccesses.subname(i, system->getMasterName(i)); 20648833Sdam.sunwoo@arm.com } 20652810SN/A 20662810SN/A // miss rate formulas 20674022SN/A for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 20684022SN/A MemCmd cmd(access_idx); 20694022SN/A const string &cstr = cmd.toString(); 20702810SN/A 20712810SN/A missRate[access_idx] 20722810SN/A .name(name() + "." + cstr + "_miss_rate") 20732810SN/A .desc("miss rate for " + cstr + " accesses") 20742810SN/A .flags(total | nozero | nonan) 20752810SN/A ; 20768833Sdam.sunwoo@arm.com missRate[access_idx] = misses[access_idx] / accesses[access_idx]; 20772810SN/A 20788833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 20798833Sdam.sunwoo@arm.com missRate[access_idx].subname(i, system->getMasterName(i)); 20808833Sdam.sunwoo@arm.com } 20812810SN/A } 20822810SN/A 20832810SN/A demandMissRate 20842810SN/A .name(name() + ".demand_miss_rate") 20852810SN/A .desc("miss rate for demand accesses") 20868833Sdam.sunwoo@arm.com .flags(total | nozero | nonan) 20872810SN/A ; 20882810SN/A demandMissRate = demandMisses / demandAccesses; 20898833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 20908833Sdam.sunwoo@arm.com demandMissRate.subname(i, system->getMasterName(i)); 20918833Sdam.sunwoo@arm.com } 20922810SN/A 20932810SN/A overallMissRate 20942810SN/A .name(name() + ".overall_miss_rate") 20952810SN/A .desc("miss rate for overall accesses") 20968833Sdam.sunwoo@arm.com .flags(total | nozero | nonan) 20972810SN/A ; 20982810SN/A overallMissRate = overallMisses / overallAccesses; 20998833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 21008833Sdam.sunwoo@arm.com overallMissRate.subname(i, system->getMasterName(i)); 21018833Sdam.sunwoo@arm.com } 21022810SN/A 21032810SN/A // miss latency formulas 21044022SN/A for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 21054022SN/A MemCmd cmd(access_idx); 21064022SN/A const string &cstr = cmd.toString(); 21072810SN/A 21082810SN/A avgMissLatency[access_idx] 21092810SN/A .name(name() + "." + cstr + "_avg_miss_latency") 21102810SN/A .desc("average " + cstr + " miss latency") 21112810SN/A .flags(total | nozero | nonan) 21122810SN/A ; 21132810SN/A avgMissLatency[access_idx] = 21142810SN/A missLatency[access_idx] / misses[access_idx]; 21158833Sdam.sunwoo@arm.com 21168833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 21178833Sdam.sunwoo@arm.com avgMissLatency[access_idx].subname(i, system->getMasterName(i)); 21188833Sdam.sunwoo@arm.com } 21192810SN/A } 21202810SN/A 21212810SN/A demandAvgMissLatency 21222810SN/A .name(name() + ".demand_avg_miss_latency") 21232810SN/A .desc("average overall miss latency") 21248833Sdam.sunwoo@arm.com .flags(total | nozero | nonan) 21252810SN/A ; 21262810SN/A demandAvgMissLatency = demandMissLatency / demandMisses; 21278833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 21288833Sdam.sunwoo@arm.com demandAvgMissLatency.subname(i, system->getMasterName(i)); 21298833Sdam.sunwoo@arm.com } 21302810SN/A 21312810SN/A overallAvgMissLatency 21322810SN/A .name(name() + ".overall_avg_miss_latency") 21332810SN/A .desc("average overall miss latency") 21348833Sdam.sunwoo@arm.com .flags(total | nozero | nonan) 21352810SN/A ; 21362810SN/A overallAvgMissLatency = overallMissLatency / overallMisses; 21378833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 21388833Sdam.sunwoo@arm.com overallAvgMissLatency.subname(i, system->getMasterName(i)); 21398833Sdam.sunwoo@arm.com } 21402810SN/A 21412810SN/A blocked_cycles.init(NUM_BLOCKED_CAUSES); 21422810SN/A blocked_cycles 21432810SN/A .name(name() + ".blocked_cycles") 21442810SN/A .desc("number of cycles access was blocked") 21452810SN/A .subname(Blocked_NoMSHRs, "no_mshrs") 21462810SN/A .subname(Blocked_NoTargets, "no_targets") 21472810SN/A ; 21482810SN/A 21492810SN/A 21502810SN/A blocked_causes.init(NUM_BLOCKED_CAUSES); 21512810SN/A blocked_causes 21522810SN/A .name(name() + ".blocked") 21532810SN/A .desc("number of cycles access was blocked") 21542810SN/A .subname(Blocked_NoMSHRs, "no_mshrs") 21552810SN/A .subname(Blocked_NoTargets, "no_targets") 21562810SN/A ; 21572810SN/A 21582810SN/A avg_blocked 21592810SN/A .name(name() + ".avg_blocked_cycles") 21602810SN/A .desc("average number of cycles each access was blocked") 21612810SN/A .subname(Blocked_NoMSHRs, "no_mshrs") 21622810SN/A .subname(Blocked_NoTargets, "no_targets") 21632810SN/A ; 21642810SN/A 21652810SN/A avg_blocked = blocked_cycles / blocked_causes; 21662810SN/A 216711436SRekai.GonzalezAlberquilla@arm.com unusedPrefetches 216811436SRekai.GonzalezAlberquilla@arm.com .name(name() + ".unused_prefetches") 216911436SRekai.GonzalezAlberquilla@arm.com .desc("number of HardPF blocks evicted w/o reference") 217011436SRekai.GonzalezAlberquilla@arm.com .flags(nozero) 217111436SRekai.GonzalezAlberquilla@arm.com ; 217211436SRekai.GonzalezAlberquilla@arm.com 21734626SN/A writebacks 21748833Sdam.sunwoo@arm.com .init(system->maxMasters()) 21754626SN/A .name(name() + ".writebacks") 21764626SN/A .desc("number of writebacks") 21778833Sdam.sunwoo@arm.com .flags(total | nozero | nonan) 21784626SN/A ; 21798833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 21808833Sdam.sunwoo@arm.com writebacks.subname(i, system->getMasterName(i)); 21818833Sdam.sunwoo@arm.com } 21824626SN/A 21834626SN/A // MSHR statistics 21844626SN/A // MSHR hit statistics 21854626SN/A for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 21864626SN/A MemCmd cmd(access_idx); 21874626SN/A const string &cstr = cmd.toString(); 21884626SN/A 21894626SN/A mshr_hits[access_idx] 21908833Sdam.sunwoo@arm.com .init(system->maxMasters()) 21914626SN/A .name(name() + "." + cstr + "_mshr_hits") 21924626SN/A .desc("number of " + cstr + " MSHR hits") 21934626SN/A .flags(total | nozero | nonan) 21944626SN/A ; 21958833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 21968833Sdam.sunwoo@arm.com mshr_hits[access_idx].subname(i, system->getMasterName(i)); 21978833Sdam.sunwoo@arm.com } 21984626SN/A } 21994626SN/A 22004626SN/A demandMshrHits 22014626SN/A .name(name() + ".demand_mshr_hits") 22024626SN/A .desc("number of demand (read+write) MSHR hits") 22038833Sdam.sunwoo@arm.com .flags(total | nozero | nonan) 22044626SN/A ; 22054871SN/A demandMshrHits = SUM_DEMAND(mshr_hits); 22068833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 22078833Sdam.sunwoo@arm.com demandMshrHits.subname(i, system->getMasterName(i)); 22088833Sdam.sunwoo@arm.com } 22094626SN/A 22104626SN/A overallMshrHits 22114626SN/A .name(name() + ".overall_mshr_hits") 22124626SN/A .desc("number of overall MSHR hits") 22138833Sdam.sunwoo@arm.com .flags(total | nozero | nonan) 22144626SN/A ; 22154871SN/A overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshr_hits); 22168833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 22178833Sdam.sunwoo@arm.com overallMshrHits.subname(i, system->getMasterName(i)); 22188833Sdam.sunwoo@arm.com } 22194626SN/A 22204626SN/A // MSHR miss statistics 22214626SN/A for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 22224626SN/A MemCmd cmd(access_idx); 22234626SN/A const string &cstr = cmd.toString(); 22244626SN/A 22254626SN/A mshr_misses[access_idx] 22268833Sdam.sunwoo@arm.com .init(system->maxMasters()) 22274626SN/A .name(name() + "." + cstr + "_mshr_misses") 22284626SN/A .desc("number of " + cstr + " MSHR misses") 22294626SN/A .flags(total | nozero | nonan) 22304626SN/A ; 22318833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 22328833Sdam.sunwoo@arm.com mshr_misses[access_idx].subname(i, system->getMasterName(i)); 22338833Sdam.sunwoo@arm.com } 22344626SN/A } 22354626SN/A 22364626SN/A demandMshrMisses 22374626SN/A .name(name() + ".demand_mshr_misses") 22384626SN/A .desc("number of demand (read+write) MSHR misses") 22398833Sdam.sunwoo@arm.com .flags(total | nozero | nonan) 22404626SN/A ; 22414871SN/A demandMshrMisses = SUM_DEMAND(mshr_misses); 22428833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 22438833Sdam.sunwoo@arm.com demandMshrMisses.subname(i, system->getMasterName(i)); 22448833Sdam.sunwoo@arm.com } 22454626SN/A 22464626SN/A overallMshrMisses 22474626SN/A .name(name() + ".overall_mshr_misses") 22484626SN/A .desc("number of overall MSHR misses") 22498833Sdam.sunwoo@arm.com .flags(total | nozero | nonan) 22504626SN/A ; 22514871SN/A overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshr_misses); 22528833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 22538833Sdam.sunwoo@arm.com overallMshrMisses.subname(i, system->getMasterName(i)); 22548833Sdam.sunwoo@arm.com } 22554626SN/A 22564626SN/A // MSHR miss latency statistics 22574626SN/A for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 22584626SN/A MemCmd cmd(access_idx); 22594626SN/A const string &cstr = cmd.toString(); 22604626SN/A 22614626SN/A mshr_miss_latency[access_idx] 22628833Sdam.sunwoo@arm.com .init(system->maxMasters()) 22634626SN/A .name(name() + "." + cstr + "_mshr_miss_latency") 22644626SN/A .desc("number of " + cstr + " MSHR miss cycles") 22654626SN/A .flags(total | nozero | nonan) 22664626SN/A ; 22678833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 22688833Sdam.sunwoo@arm.com mshr_miss_latency[access_idx].subname(i, system->getMasterName(i)); 22698833Sdam.sunwoo@arm.com } 22704626SN/A } 22714626SN/A 22724626SN/A demandMshrMissLatency 22734626SN/A .name(name() + ".demand_mshr_miss_latency") 22744626SN/A .desc("number of demand (read+write) MSHR miss cycles") 22758833Sdam.sunwoo@arm.com .flags(total | nozero | nonan) 22764626SN/A ; 22774871SN/A demandMshrMissLatency = SUM_DEMAND(mshr_miss_latency); 22788833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 22798833Sdam.sunwoo@arm.com demandMshrMissLatency.subname(i, system->getMasterName(i)); 22808833Sdam.sunwoo@arm.com } 22814626SN/A 22824626SN/A overallMshrMissLatency 22834626SN/A .name(name() + ".overall_mshr_miss_latency") 22844626SN/A .desc("number of overall MSHR miss cycles") 22858833Sdam.sunwoo@arm.com .flags(total | nozero | nonan) 22864626SN/A ; 22874871SN/A overallMshrMissLatency = 22884871SN/A demandMshrMissLatency + SUM_NON_DEMAND(mshr_miss_latency); 22898833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 22908833Sdam.sunwoo@arm.com overallMshrMissLatency.subname(i, system->getMasterName(i)); 22918833Sdam.sunwoo@arm.com } 22924626SN/A 22934626SN/A // MSHR uncacheable statistics 22944626SN/A for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 22954626SN/A MemCmd cmd(access_idx); 22964626SN/A const string &cstr = cmd.toString(); 22974626SN/A 22984626SN/A mshr_uncacheable[access_idx] 22998833Sdam.sunwoo@arm.com .init(system->maxMasters()) 23004626SN/A .name(name() + "." + cstr + "_mshr_uncacheable") 23014626SN/A .desc("number of " + cstr + " MSHR uncacheable") 23024626SN/A .flags(total | nozero | nonan) 23034626SN/A ; 23048833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 23058833Sdam.sunwoo@arm.com mshr_uncacheable[access_idx].subname(i, system->getMasterName(i)); 23068833Sdam.sunwoo@arm.com } 23074626SN/A } 23084626SN/A 23094626SN/A overallMshrUncacheable 23104626SN/A .name(name() + ".overall_mshr_uncacheable_misses") 23114626SN/A .desc("number of overall MSHR uncacheable misses") 23128833Sdam.sunwoo@arm.com .flags(total | nozero | nonan) 23134626SN/A ; 23144871SN/A overallMshrUncacheable = 23154871SN/A SUM_DEMAND(mshr_uncacheable) + SUM_NON_DEMAND(mshr_uncacheable); 23168833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 23178833Sdam.sunwoo@arm.com overallMshrUncacheable.subname(i, system->getMasterName(i)); 23188833Sdam.sunwoo@arm.com } 23194626SN/A 23204626SN/A // MSHR miss latency statistics 23214626SN/A for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 23224626SN/A MemCmd cmd(access_idx); 23234626SN/A const string &cstr = cmd.toString(); 23244626SN/A 23254626SN/A mshr_uncacheable_lat[access_idx] 23268833Sdam.sunwoo@arm.com .init(system->maxMasters()) 23274626SN/A .name(name() + "." + cstr + "_mshr_uncacheable_latency") 23284626SN/A .desc("number of " + cstr + " MSHR uncacheable cycles") 23294626SN/A .flags(total | nozero | nonan) 23304626SN/A ; 23318833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 233211483Snikos.nikoleris@arm.com mshr_uncacheable_lat[access_idx].subname( 233311483Snikos.nikoleris@arm.com i, system->getMasterName(i)); 23348833Sdam.sunwoo@arm.com } 23354626SN/A } 23364626SN/A 23374626SN/A overallMshrUncacheableLatency 23384626SN/A .name(name() + ".overall_mshr_uncacheable_latency") 23394626SN/A .desc("number of overall MSHR uncacheable cycles") 23408833Sdam.sunwoo@arm.com .flags(total | nozero | nonan) 23414626SN/A ; 23424871SN/A overallMshrUncacheableLatency = 23434871SN/A SUM_DEMAND(mshr_uncacheable_lat) + 23444871SN/A SUM_NON_DEMAND(mshr_uncacheable_lat); 23458833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 23468833Sdam.sunwoo@arm.com overallMshrUncacheableLatency.subname(i, system->getMasterName(i)); 23478833Sdam.sunwoo@arm.com } 23484626SN/A 23494626SN/A#if 0 23504626SN/A // MSHR access formulas 23514626SN/A for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 23524626SN/A MemCmd cmd(access_idx); 23534626SN/A const string &cstr = cmd.toString(); 23544626SN/A 23554626SN/A mshrAccesses[access_idx] 23564626SN/A .name(name() + "." + cstr + "_mshr_accesses") 23574626SN/A .desc("number of " + cstr + " mshr accesses(hits+misses)") 23584626SN/A .flags(total | nozero | nonan) 23594626SN/A ; 23604626SN/A mshrAccesses[access_idx] = 23614626SN/A mshr_hits[access_idx] + mshr_misses[access_idx] 23624626SN/A + mshr_uncacheable[access_idx]; 23634626SN/A } 23644626SN/A 23654626SN/A demandMshrAccesses 23664626SN/A .name(name() + ".demand_mshr_accesses") 23674626SN/A .desc("number of demand (read+write) mshr accesses") 23684626SN/A .flags(total | nozero | nonan) 23694626SN/A ; 23704626SN/A demandMshrAccesses = demandMshrHits + demandMshrMisses; 23714626SN/A 23724626SN/A overallMshrAccesses 23734626SN/A .name(name() + ".overall_mshr_accesses") 23744626SN/A .desc("number of overall (read+write) mshr accesses") 23754626SN/A .flags(total | nozero | nonan) 23764626SN/A ; 23774626SN/A overallMshrAccesses = overallMshrHits + overallMshrMisses 23784626SN/A + overallMshrUncacheable; 23794626SN/A#endif 23804626SN/A 23814626SN/A // MSHR miss rate formulas 23824626SN/A for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 23834626SN/A MemCmd cmd(access_idx); 23844626SN/A const string &cstr = cmd.toString(); 23854626SN/A 23864626SN/A mshrMissRate[access_idx] 23874626SN/A .name(name() + "." + cstr + "_mshr_miss_rate") 23884626SN/A .desc("mshr miss rate for " + cstr + " accesses") 23894626SN/A .flags(total | nozero | nonan) 23904626SN/A ; 23914626SN/A mshrMissRate[access_idx] = 23924626SN/A mshr_misses[access_idx] / accesses[access_idx]; 23938833Sdam.sunwoo@arm.com 23948833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 23958833Sdam.sunwoo@arm.com mshrMissRate[access_idx].subname(i, system->getMasterName(i)); 23968833Sdam.sunwoo@arm.com } 23974626SN/A } 23984626SN/A 23994626SN/A demandMshrMissRate 24004626SN/A .name(name() + ".demand_mshr_miss_rate") 24014626SN/A .desc("mshr miss rate for demand accesses") 24028833Sdam.sunwoo@arm.com .flags(total | nozero | nonan) 24034626SN/A ; 24044626SN/A demandMshrMissRate = demandMshrMisses / demandAccesses; 24058833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 24068833Sdam.sunwoo@arm.com demandMshrMissRate.subname(i, system->getMasterName(i)); 24078833Sdam.sunwoo@arm.com } 24084626SN/A 24094626SN/A overallMshrMissRate 24104626SN/A .name(name() + ".overall_mshr_miss_rate") 24114626SN/A .desc("mshr miss rate for overall accesses") 24128833Sdam.sunwoo@arm.com .flags(total | nozero | nonan) 24134626SN/A ; 24144626SN/A overallMshrMissRate = overallMshrMisses / overallAccesses; 24158833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 24168833Sdam.sunwoo@arm.com overallMshrMissRate.subname(i, system->getMasterName(i)); 24178833Sdam.sunwoo@arm.com } 24184626SN/A 24194626SN/A // mshrMiss latency formulas 24204626SN/A for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 24214626SN/A MemCmd cmd(access_idx); 24224626SN/A const string &cstr = cmd.toString(); 24234626SN/A 24244626SN/A avgMshrMissLatency[access_idx] 24254626SN/A .name(name() + "." + cstr + "_avg_mshr_miss_latency") 24264626SN/A .desc("average " + cstr + " mshr miss latency") 24274626SN/A .flags(total | nozero | nonan) 24284626SN/A ; 24294626SN/A avgMshrMissLatency[access_idx] = 24304626SN/A mshr_miss_latency[access_idx] / mshr_misses[access_idx]; 24318833Sdam.sunwoo@arm.com 24328833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 243311483Snikos.nikoleris@arm.com avgMshrMissLatency[access_idx].subname( 243411483Snikos.nikoleris@arm.com i, system->getMasterName(i)); 24358833Sdam.sunwoo@arm.com } 24364626SN/A } 24374626SN/A 24384626SN/A demandAvgMshrMissLatency 24394626SN/A .name(name() + ".demand_avg_mshr_miss_latency") 24404626SN/A .desc("average overall mshr miss latency") 24418833Sdam.sunwoo@arm.com .flags(total | nozero | nonan) 24424626SN/A ; 24434626SN/A demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses; 24448833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 24458833Sdam.sunwoo@arm.com demandAvgMshrMissLatency.subname(i, system->getMasterName(i)); 24468833Sdam.sunwoo@arm.com } 24474626SN/A 24484626SN/A overallAvgMshrMissLatency 24494626SN/A .name(name() + ".overall_avg_mshr_miss_latency") 24504626SN/A .desc("average overall mshr miss latency") 24518833Sdam.sunwoo@arm.com .flags(total | nozero | nonan) 24524626SN/A ; 24534626SN/A overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses; 24548833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 24558833Sdam.sunwoo@arm.com overallAvgMshrMissLatency.subname(i, system->getMasterName(i)); 24568833Sdam.sunwoo@arm.com } 24574626SN/A 24584626SN/A // mshrUncacheable latency formulas 24594626SN/A for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 24604626SN/A MemCmd cmd(access_idx); 24614626SN/A const string &cstr = cmd.toString(); 24624626SN/A 24634626SN/A avgMshrUncacheableLatency[access_idx] 24644626SN/A .name(name() + "." + cstr + "_avg_mshr_uncacheable_latency") 24654626SN/A .desc("average " + cstr + " mshr uncacheable latency") 24664626SN/A .flags(total | nozero | nonan) 24674626SN/A ; 24684626SN/A avgMshrUncacheableLatency[access_idx] = 24694626SN/A mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx]; 24708833Sdam.sunwoo@arm.com 24718833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 247211483Snikos.nikoleris@arm.com avgMshrUncacheableLatency[access_idx].subname( 247311483Snikos.nikoleris@arm.com i, system->getMasterName(i)); 24748833Sdam.sunwoo@arm.com } 24754626SN/A } 24764626SN/A 24774626SN/A overallAvgMshrUncacheableLatency 24784626SN/A .name(name() + ".overall_avg_mshr_uncacheable_latency") 24794626SN/A .desc("average overall mshr uncacheable latency") 24808833Sdam.sunwoo@arm.com .flags(total | nozero | nonan) 24814626SN/A ; 248211483Snikos.nikoleris@arm.com overallAvgMshrUncacheableLatency = 248311483Snikos.nikoleris@arm.com overallMshrUncacheableLatency / overallMshrUncacheable; 24848833Sdam.sunwoo@arm.com for (int i = 0; i < system->maxMasters(); i++) { 24858833Sdam.sunwoo@arm.com overallAvgMshrUncacheableLatency.subname(i, system->getMasterName(i)); 24868833Sdam.sunwoo@arm.com } 24874626SN/A 248812702Snikos.nikoleris@arm.com replacements 248912702Snikos.nikoleris@arm.com .name(name() + ".replacements") 249012702Snikos.nikoleris@arm.com .desc("number of replacements") 249112702Snikos.nikoleris@arm.com ; 249213947Sodanrc@yahoo.com.br 249313947Sodanrc@yahoo.com.br dataExpansions 249413947Sodanrc@yahoo.com.br .name(name() + ".data_expansions") 249513947Sodanrc@yahoo.com.br .desc("number of data expansions") 249613947Sodanrc@yahoo.com.br .flags(nozero | nonan) 249713947Sodanrc@yahoo.com.br ; 24982810SN/A} 249912724Snikos.nikoleris@arm.com 250013416Sjavier.bueno@metempsy.comvoid 250113416Sjavier.bueno@metempsy.comBaseCache::regProbePoints() 250213416Sjavier.bueno@metempsy.com{ 250313416Sjavier.bueno@metempsy.com ppHit = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Hit"); 250413416Sjavier.bueno@metempsy.com ppMiss = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Miss"); 250513717Sivan.pizarro@metempsy.com ppFill = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Fill"); 250613416Sjavier.bueno@metempsy.com} 250713416Sjavier.bueno@metempsy.com 250812724Snikos.nikoleris@arm.com/////////////// 250912724Snikos.nikoleris@arm.com// 251012724Snikos.nikoleris@arm.com// CpuSidePort 251112724Snikos.nikoleris@arm.com// 251212724Snikos.nikoleris@arm.com/////////////// 251312724Snikos.nikoleris@arm.combool 251412724Snikos.nikoleris@arm.comBaseCache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt) 251512724Snikos.nikoleris@arm.com{ 251612725Snikos.nikoleris@arm.com // Snoops shouldn't happen when bypassing caches 251712725Snikos.nikoleris@arm.com assert(!cache->system->bypassCaches()); 251812725Snikos.nikoleris@arm.com 251912725Snikos.nikoleris@arm.com assert(pkt->isResponse()); 252012725Snikos.nikoleris@arm.com 252112724Snikos.nikoleris@arm.com // Express snoop responses from master to slave, e.g., from L1 to L2 252212724Snikos.nikoleris@arm.com cache->recvTimingSnoopResp(pkt); 252312724Snikos.nikoleris@arm.com return true; 252412724Snikos.nikoleris@arm.com} 252512724Snikos.nikoleris@arm.com 252612724Snikos.nikoleris@arm.com 252712724Snikos.nikoleris@arm.combool 252812724Snikos.nikoleris@arm.comBaseCache::CpuSidePort::tryTiming(PacketPtr pkt) 252912724Snikos.nikoleris@arm.com{ 253012725Snikos.nikoleris@arm.com if (cache->system->bypassCaches() || pkt->isExpressSnoop()) { 253112724Snikos.nikoleris@arm.com // always let express snoop packets through even if blocked 253212724Snikos.nikoleris@arm.com return true; 253312724Snikos.nikoleris@arm.com } else if (blocked || mustSendRetry) { 253412724Snikos.nikoleris@arm.com // either already committed to send a retry, or blocked 253512724Snikos.nikoleris@arm.com mustSendRetry = true; 253612724Snikos.nikoleris@arm.com return false; 253712724Snikos.nikoleris@arm.com } 253812724Snikos.nikoleris@arm.com mustSendRetry = false; 253912724Snikos.nikoleris@arm.com return true; 254012724Snikos.nikoleris@arm.com} 254112724Snikos.nikoleris@arm.com 254212724Snikos.nikoleris@arm.combool 254312724Snikos.nikoleris@arm.comBaseCache::CpuSidePort::recvTimingReq(PacketPtr pkt) 254412724Snikos.nikoleris@arm.com{ 254512725Snikos.nikoleris@arm.com assert(pkt->isRequest()); 254612725Snikos.nikoleris@arm.com 254712725Snikos.nikoleris@arm.com if (cache->system->bypassCaches()) { 254812725Snikos.nikoleris@arm.com // Just forward the packet if caches are disabled. 254912725Snikos.nikoleris@arm.com // @todo This should really enqueue the packet rather 255012725Snikos.nikoleris@arm.com bool M5_VAR_USED success = cache->memSidePort.sendTimingReq(pkt); 255112725Snikos.nikoleris@arm.com assert(success); 255212725Snikos.nikoleris@arm.com return true; 255312725Snikos.nikoleris@arm.com } else if (tryTiming(pkt)) { 255412724Snikos.nikoleris@arm.com cache->recvTimingReq(pkt); 255512724Snikos.nikoleris@arm.com return true; 255612724Snikos.nikoleris@arm.com } 255712724Snikos.nikoleris@arm.com return false; 255812724Snikos.nikoleris@arm.com} 255912724Snikos.nikoleris@arm.com 256012724Snikos.nikoleris@arm.comTick 256112724Snikos.nikoleris@arm.comBaseCache::CpuSidePort::recvAtomic(PacketPtr pkt) 256212724Snikos.nikoleris@arm.com{ 256312725Snikos.nikoleris@arm.com if (cache->system->bypassCaches()) { 256412725Snikos.nikoleris@arm.com // Forward the request if the system is in cache bypass mode. 256512725Snikos.nikoleris@arm.com return cache->memSidePort.sendAtomic(pkt); 256612725Snikos.nikoleris@arm.com } else { 256712725Snikos.nikoleris@arm.com return cache->recvAtomic(pkt); 256812725Snikos.nikoleris@arm.com } 256912724Snikos.nikoleris@arm.com} 257012724Snikos.nikoleris@arm.com 257112724Snikos.nikoleris@arm.comvoid 257212724Snikos.nikoleris@arm.comBaseCache::CpuSidePort::recvFunctional(PacketPtr pkt) 257312724Snikos.nikoleris@arm.com{ 257412725Snikos.nikoleris@arm.com if (cache->system->bypassCaches()) { 257512725Snikos.nikoleris@arm.com // The cache should be flushed if we are in cache bypass mode, 257612725Snikos.nikoleris@arm.com // so we don't need to check if we need to update anything. 257712725Snikos.nikoleris@arm.com cache->memSidePort.sendFunctional(pkt); 257812725Snikos.nikoleris@arm.com return; 257912725Snikos.nikoleris@arm.com } 258012725Snikos.nikoleris@arm.com 258112724Snikos.nikoleris@arm.com // functional request 258212724Snikos.nikoleris@arm.com cache->functionalAccess(pkt, true); 258312724Snikos.nikoleris@arm.com} 258412724Snikos.nikoleris@arm.com 258512724Snikos.nikoleris@arm.comAddrRangeList 258612724Snikos.nikoleris@arm.comBaseCache::CpuSidePort::getAddrRanges() const 258712724Snikos.nikoleris@arm.com{ 258812724Snikos.nikoleris@arm.com return cache->getAddrRanges(); 258912724Snikos.nikoleris@arm.com} 259012724Snikos.nikoleris@arm.com 259112724Snikos.nikoleris@arm.com 259212724Snikos.nikoleris@arm.comBaseCache:: 259312724Snikos.nikoleris@arm.comCpuSidePort::CpuSidePort(const std::string &_name, BaseCache *_cache, 259412724Snikos.nikoleris@arm.com const std::string &_label) 259512724Snikos.nikoleris@arm.com : CacheSlavePort(_name, _cache, _label), cache(_cache) 259612724Snikos.nikoleris@arm.com{ 259712724Snikos.nikoleris@arm.com} 259812724Snikos.nikoleris@arm.com 259912724Snikos.nikoleris@arm.com/////////////// 260012724Snikos.nikoleris@arm.com// 260112724Snikos.nikoleris@arm.com// MemSidePort 260212724Snikos.nikoleris@arm.com// 260312724Snikos.nikoleris@arm.com/////////////// 260412724Snikos.nikoleris@arm.combool 260512724Snikos.nikoleris@arm.comBaseCache::MemSidePort::recvTimingResp(PacketPtr pkt) 260612724Snikos.nikoleris@arm.com{ 260712724Snikos.nikoleris@arm.com cache->recvTimingResp(pkt); 260812724Snikos.nikoleris@arm.com return true; 260912724Snikos.nikoleris@arm.com} 261012724Snikos.nikoleris@arm.com 261112724Snikos.nikoleris@arm.com// Express snooping requests to memside port 261212724Snikos.nikoleris@arm.comvoid 261312724Snikos.nikoleris@arm.comBaseCache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt) 261412724Snikos.nikoleris@arm.com{ 261512725Snikos.nikoleris@arm.com // Snoops shouldn't happen when bypassing caches 261612725Snikos.nikoleris@arm.com assert(!cache->system->bypassCaches()); 261712725Snikos.nikoleris@arm.com 261812724Snikos.nikoleris@arm.com // handle snooping requests 261912724Snikos.nikoleris@arm.com cache->recvTimingSnoopReq(pkt); 262012724Snikos.nikoleris@arm.com} 262112724Snikos.nikoleris@arm.com 262212724Snikos.nikoleris@arm.comTick 262312724Snikos.nikoleris@arm.comBaseCache::MemSidePort::recvAtomicSnoop(PacketPtr pkt) 262412724Snikos.nikoleris@arm.com{ 262512725Snikos.nikoleris@arm.com // Snoops shouldn't happen when bypassing caches 262612725Snikos.nikoleris@arm.com assert(!cache->system->bypassCaches()); 262712725Snikos.nikoleris@arm.com 262812724Snikos.nikoleris@arm.com return cache->recvAtomicSnoop(pkt); 262912724Snikos.nikoleris@arm.com} 263012724Snikos.nikoleris@arm.com 263112724Snikos.nikoleris@arm.comvoid 263212724Snikos.nikoleris@arm.comBaseCache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt) 263312724Snikos.nikoleris@arm.com{ 263412725Snikos.nikoleris@arm.com // Snoops shouldn't happen when bypassing caches 263512725Snikos.nikoleris@arm.com assert(!cache->system->bypassCaches()); 263612725Snikos.nikoleris@arm.com 263712724Snikos.nikoleris@arm.com // functional snoop (note that in contrast to atomic we don't have 263812724Snikos.nikoleris@arm.com // a specific functionalSnoop method, as they have the same 263912724Snikos.nikoleris@arm.com // behaviour regardless) 264012724Snikos.nikoleris@arm.com cache->functionalAccess(pkt, false); 264112724Snikos.nikoleris@arm.com} 264212724Snikos.nikoleris@arm.com 264312724Snikos.nikoleris@arm.comvoid 264412724Snikos.nikoleris@arm.comBaseCache::CacheReqPacketQueue::sendDeferredPacket() 264512724Snikos.nikoleris@arm.com{ 264612724Snikos.nikoleris@arm.com // sanity check 264712724Snikos.nikoleris@arm.com assert(!waitingOnRetry); 264812724Snikos.nikoleris@arm.com 264912724Snikos.nikoleris@arm.com // there should never be any deferred request packets in the 265012724Snikos.nikoleris@arm.com // queue, instead we resly on the cache to provide the packets 265112724Snikos.nikoleris@arm.com // from the MSHR queue or write queue 265212724Snikos.nikoleris@arm.com assert(deferredPacketReadyTime() == MaxTick); 265312724Snikos.nikoleris@arm.com 265412724Snikos.nikoleris@arm.com // check for request packets (requests & writebacks) 265512724Snikos.nikoleris@arm.com QueueEntry* entry = cache.getNextQueueEntry(); 265612724Snikos.nikoleris@arm.com 265712724Snikos.nikoleris@arm.com if (!entry) { 265812724Snikos.nikoleris@arm.com // can happen if e.g. we attempt a writeback and fail, but 265912724Snikos.nikoleris@arm.com // before the retry, the writeback is eliminated because 266012724Snikos.nikoleris@arm.com // we snoop another cache's ReadEx. 266112724Snikos.nikoleris@arm.com } else { 266212724Snikos.nikoleris@arm.com // let our snoop responses go first if there are responses to 266312724Snikos.nikoleris@arm.com // the same addresses 266413860Sodanrc@yahoo.com.br if (checkConflictingSnoop(entry->getTarget()->pkt)) { 266512724Snikos.nikoleris@arm.com return; 266612724Snikos.nikoleris@arm.com } 266712724Snikos.nikoleris@arm.com waitingOnRetry = entry->sendPacket(cache); 266812724Snikos.nikoleris@arm.com } 266912724Snikos.nikoleris@arm.com 267012724Snikos.nikoleris@arm.com // if we succeeded and are not waiting for a retry, schedule the 267112724Snikos.nikoleris@arm.com // next send considering when the next queue is ready, note that 267212724Snikos.nikoleris@arm.com // snoop responses have their own packet queue and thus schedule 267312724Snikos.nikoleris@arm.com // their own events 267412724Snikos.nikoleris@arm.com if (!waitingOnRetry) { 267512724Snikos.nikoleris@arm.com schedSendEvent(cache.nextQueueReadyTime()); 267612724Snikos.nikoleris@arm.com } 267712724Snikos.nikoleris@arm.com} 267812724Snikos.nikoleris@arm.com 267912724Snikos.nikoleris@arm.comBaseCache::MemSidePort::MemSidePort(const std::string &_name, 268012724Snikos.nikoleris@arm.com BaseCache *_cache, 268112724Snikos.nikoleris@arm.com const std::string &_label) 268212724Snikos.nikoleris@arm.com : CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue), 268312724Snikos.nikoleris@arm.com _reqQueue(*_cache, *this, _snoopRespQueue, _label), 268413564Snikos.nikoleris@arm.com _snoopRespQueue(*_cache, *this, true, _label), cache(_cache) 268512724Snikos.nikoleris@arm.com{ 268612724Snikos.nikoleris@arm.com} 268713352Snikos.nikoleris@arm.com 268813352Snikos.nikoleris@arm.comvoid 268913352Snikos.nikoleris@arm.comWriteAllocator::updateMode(Addr write_addr, unsigned write_size, 269013352Snikos.nikoleris@arm.com Addr blk_addr) 269113352Snikos.nikoleris@arm.com{ 269213352Snikos.nikoleris@arm.com // check if we are continuing where the last write ended 269313352Snikos.nikoleris@arm.com if (nextAddr == write_addr) { 269413352Snikos.nikoleris@arm.com delayCtr[blk_addr] = delayThreshold; 269513352Snikos.nikoleris@arm.com // stop if we have already saturated 269613352Snikos.nikoleris@arm.com if (mode != WriteMode::NO_ALLOCATE) { 269713352Snikos.nikoleris@arm.com byteCount += write_size; 269813352Snikos.nikoleris@arm.com // switch to streaming mode if we have passed the lower 269913352Snikos.nikoleris@arm.com // threshold 270013352Snikos.nikoleris@arm.com if (mode == WriteMode::ALLOCATE && 270113352Snikos.nikoleris@arm.com byteCount > coalesceLimit) { 270213352Snikos.nikoleris@arm.com mode = WriteMode::COALESCE; 270313352Snikos.nikoleris@arm.com DPRINTF(Cache, "Switched to write coalescing\n"); 270413352Snikos.nikoleris@arm.com } else if (mode == WriteMode::COALESCE && 270513352Snikos.nikoleris@arm.com byteCount > noAllocateLimit) { 270613352Snikos.nikoleris@arm.com // and continue and switch to non-allocating mode if we 270713352Snikos.nikoleris@arm.com // pass the upper threshold 270813352Snikos.nikoleris@arm.com mode = WriteMode::NO_ALLOCATE; 270913352Snikos.nikoleris@arm.com DPRINTF(Cache, "Switched to write-no-allocate\n"); 271013352Snikos.nikoleris@arm.com } 271113352Snikos.nikoleris@arm.com } 271213352Snikos.nikoleris@arm.com } else { 271313352Snikos.nikoleris@arm.com // we did not see a write matching the previous one, start 271413352Snikos.nikoleris@arm.com // over again 271513352Snikos.nikoleris@arm.com byteCount = write_size; 271613352Snikos.nikoleris@arm.com mode = WriteMode::ALLOCATE; 271713352Snikos.nikoleris@arm.com resetDelay(blk_addr); 271813352Snikos.nikoleris@arm.com } 271913352Snikos.nikoleris@arm.com nextAddr = write_addr + write_size; 272013352Snikos.nikoleris@arm.com} 272113352Snikos.nikoleris@arm.com 272213352Snikos.nikoleris@arm.comWriteAllocator* 272313352Snikos.nikoleris@arm.comWriteAllocatorParams::create() 272413352Snikos.nikoleris@arm.com{ 272513352Snikos.nikoleris@arm.com return new WriteAllocator(this); 272613352Snikos.nikoleris@arm.com} 2727