base.cc revision 13367
12914Ssaidi@eecs.umich.edu/* 28856Sandreas.hansson@arm.com * Copyright (c) 2012-2013, 2018 ARM Limited 38856Sandreas.hansson@arm.com * All rights reserved. 48856Sandreas.hansson@arm.com * 58856Sandreas.hansson@arm.com * The license below extends only to copyright in the software and shall 68856Sandreas.hansson@arm.com * not be construed as granting a license to any other intellectual 78856Sandreas.hansson@arm.com * property including but not limited to intellectual property relating 88856Sandreas.hansson@arm.com * to a hardware implementation of the functionality of the software 98856Sandreas.hansson@arm.com * licensed hereunder. You may use the software subject to the license 108856Sandreas.hansson@arm.com * terms below provided that you ensure that this notice is replicated 118856Sandreas.hansson@arm.com * unmodified and in its entirety in all distributions of the software, 128856Sandreas.hansson@arm.com * modified or unmodified, in source code or in binary form. 138856Sandreas.hansson@arm.com * 142914Ssaidi@eecs.umich.edu * Copyright (c) 2003-2005 The Regents of The University of Michigan 152914Ssaidi@eecs.umich.edu * All rights reserved. 162914Ssaidi@eecs.umich.edu * 172914Ssaidi@eecs.umich.edu * Redistribution and use in source and binary forms, with or without 182914Ssaidi@eecs.umich.edu * modification, are permitted provided that the following conditions are 192914Ssaidi@eecs.umich.edu * met: redistributions of source code must retain the above copyright 202914Ssaidi@eecs.umich.edu * notice, this list of conditions and the following disclaimer; 212914Ssaidi@eecs.umich.edu * redistributions in binary form must reproduce the above copyright 222914Ssaidi@eecs.umich.edu * notice, this list of conditions and the following disclaimer in the 232914Ssaidi@eecs.umich.edu * documentation and/or other materials provided with the distribution; 242914Ssaidi@eecs.umich.edu * neither the name of the copyright holders nor the names of its 252914Ssaidi@eecs.umich.edu * contributors may be used to endorse or promote products derived from 262914Ssaidi@eecs.umich.edu * this software without specific prior written permission. 272914Ssaidi@eecs.umich.edu * 282914Ssaidi@eecs.umich.edu * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 292914Ssaidi@eecs.umich.edu * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 302914Ssaidi@eecs.umich.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 312914Ssaidi@eecs.umich.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 322914Ssaidi@eecs.umich.edu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 332914Ssaidi@eecs.umich.edu * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 342914Ssaidi@eecs.umich.edu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 352914Ssaidi@eecs.umich.edu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 362914Ssaidi@eecs.umich.edu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 372914Ssaidi@eecs.umich.edu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 382914Ssaidi@eecs.umich.edu * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 392914Ssaidi@eecs.umich.edu * 402914Ssaidi@eecs.umich.edu * Authors: Erik Hallnor 418856Sandreas.hansson@arm.com * Nikos Nikoleris 422914Ssaidi@eecs.umich.edu */ 432914Ssaidi@eecs.umich.edu 4411793Sbrandon.potter@amd.com/** 4511793Sbrandon.potter@amd.com * @file 468708Sandreas.hansson@arm.com * Definition of BaseCache functions. 472914Ssaidi@eecs.umich.edu */ 488914Sandreas.hansson@arm.com 498914Sandreas.hansson@arm.com#include "mem/cache/base.hh" 509097Sandreas.hansson@arm.com 515740Snate@binkert.org#include "base/compiler.hh" 525740Snate@binkert.org#include "base/logging.hh" 535740Snate@binkert.org#include "debug/Cache.hh" 544490Sstever@eecs.umich.edu#include "debug/CachePort.hh" 554490Sstever@eecs.umich.edu#include "debug/CacheRepl.hh" 564490Sstever@eecs.umich.edu#include "debug/CacheVerbose.hh" 5710713Sandreas.hansson@arm.com#include "mem/cache/mshr.hh" 588914Sandreas.hansson@arm.com#include "mem/cache/prefetch/base.hh" 593296Ssaidi@eecs.umich.edu#include "mem/cache/queue_entry.hh" 604929Sstever@gmail.com#include "params/BaseCache.hh" 613091Sstever@eecs.umich.edu#include "params/WriteAllocator.hh" 623091Sstever@eecs.umich.edu#include "sim/core.hh" 633091Sstever@eecs.umich.edu 648975Sandreas.hansson@arm.comclass BaseMasterPort; 653091Sstever@eecs.umich.educlass BaseSlavePort; 669662Sandreas.hansson@arm.com 6711284Sandreas.hansson@arm.comusing namespace std; 6811284Sandreas.hansson@arm.com 6911284Sandreas.hansson@arm.comBaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name, 7011284Sandreas.hansson@arm.com BaseCache *_cache, 7111284Sandreas.hansson@arm.com const std::string &_label) 724670Sstever@eecs.umich.edu : QueuedSlavePort(_name, _cache, queue), queue(*_cache, *this, _label), 734626Sstever@eecs.umich.edu blocked(false), mustSendRetry(false), 743091Sstever@eecs.umich.edu sendRetryEvent([this]{ processSendRetry(); }, _name) 753175Srdreslin@umich.edu{ 764626Sstever@eecs.umich.edu} 774670Sstever@eecs.umich.edu 784670Sstever@eecs.umich.eduBaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size) 794626Sstever@eecs.umich.edu : MemObject(p), 809163Sandreas.hansson@arm.com cpuSidePort (p->name + ".cpu_side", this, "CpuSidePort"), 814626Sstever@eecs.umich.edu memSidePort(p->name + ".mem_side", this, "MemSidePort"), 8211190Sandreas.hansson@arm.com mshrQueue("MSHRs", p->mshrs, 0, p->demand_mshr_reserve), // see below 8311190Sandreas.hansson@arm.com writeBuffer("write buffer", p->write_buffers, p->mshrs), // see below 843309Srdreslin@umich.edu tags(p->tags), 854670Sstever@eecs.umich.edu prefetcher(p->prefetcher), 863091Sstever@eecs.umich.edu prefetchOnAccess(p->prefetch_on_access), 873091Sstever@eecs.umich.edu writeAllocator(p->write_allocator), 88 writebackClean(p->writeback_clean), 89 tempBlockWriteback(nullptr), 90 writebackTempBlockAtomicEvent([this]{ writebackTempBlockAtomic(); }, 91 name(), false, 92 EventBase::Delayed_Writeback_Pri), 93 blkSize(blk_size), 94 lookupLatency(p->tag_latency), 95 dataLatency(p->data_latency), 96 forwardLatency(p->tag_latency), 97 fillLatency(p->data_latency), 98 responseLatency(p->response_latency), 99 numTarget(p->tgts_per_mshr), 100 forwardSnoops(true), 101 clusivity(p->clusivity), 102 isReadOnly(p->is_read_only), 103 blocked(0), 104 order(0), 105 noTargetMSHR(nullptr), 106 missCount(p->max_miss_count), 107 addrRanges(p->addr_ranges.begin(), p->addr_ranges.end()), 108 system(p->system) 109{ 110 // the MSHR queue has no reserve entries as we check the MSHR 111 // queue on every single allocation, whereas the write queue has 112 // as many reserve entries as we have MSHRs, since every MSHR may 113 // eventually require a writeback, and we do not check the write 114 // buffer before committing to an MSHR 115 116 // forward snoops is overridden in init() once we can query 117 // whether the connected master is actually snooping or not 118 119 tempBlock = new TempCacheBlk(blkSize); 120 121 tags->init(this); 122 if (prefetcher) 123 prefetcher->setCache(this); 124} 125 126BaseCache::~BaseCache() 127{ 128 delete tempBlock; 129} 130 131void 132BaseCache::CacheSlavePort::setBlocked() 133{ 134 assert(!blocked); 135 DPRINTF(CachePort, "Port is blocking new requests\n"); 136 blocked = true; 137 // if we already scheduled a retry in this cycle, but it has not yet 138 // happened, cancel it 139 if (sendRetryEvent.scheduled()) { 140 owner.deschedule(sendRetryEvent); 141 DPRINTF(CachePort, "Port descheduled retry\n"); 142 mustSendRetry = true; 143 } 144} 145 146void 147BaseCache::CacheSlavePort::clearBlocked() 148{ 149 assert(blocked); 150 DPRINTF(CachePort, "Port is accepting new requests\n"); 151 blocked = false; 152 if (mustSendRetry) { 153 // @TODO: need to find a better time (next cycle?) 154 owner.schedule(sendRetryEvent, curTick() + 1); 155 } 156} 157 158void 159BaseCache::CacheSlavePort::processSendRetry() 160{ 161 DPRINTF(CachePort, "Port is sending retry\n"); 162 163 // reset the flag and call retry 164 mustSendRetry = false; 165 sendRetryReq(); 166} 167 168Addr 169BaseCache::regenerateBlkAddr(CacheBlk* blk) 170{ 171 if (blk != tempBlock) { 172 return tags->regenerateBlkAddr(blk); 173 } else { 174 return tempBlock->getAddr(); 175 } 176} 177 178void 179BaseCache::init() 180{ 181 if (!cpuSidePort.isConnected() || !memSidePort.isConnected()) 182 fatal("Cache ports on %s are not connected\n", name()); 183 cpuSidePort.sendRangeChange(); 184 forwardSnoops = cpuSidePort.isSnooping(); 185} 186 187BaseMasterPort & 188BaseCache::getMasterPort(const std::string &if_name, PortID idx) 189{ 190 if (if_name == "mem_side") { 191 return memSidePort; 192 } else { 193 return MemObject::getMasterPort(if_name, idx); 194 } 195} 196 197BaseSlavePort & 198BaseCache::getSlavePort(const std::string &if_name, PortID idx) 199{ 200 if (if_name == "cpu_side") { 201 return cpuSidePort; 202 } else { 203 return MemObject::getSlavePort(if_name, idx); 204 } 205} 206 207bool 208BaseCache::inRange(Addr addr) const 209{ 210 for (const auto& r : addrRanges) { 211 if (r.contains(addr)) { 212 return true; 213 } 214 } 215 return false; 216} 217 218void 219BaseCache::handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time) 220{ 221 if (pkt->needsResponse()) { 222 pkt->makeTimingResponse(); 223 // @todo: Make someone pay for this 224 pkt->headerDelay = pkt->payloadDelay = 0; 225 226 // In this case we are considering request_time that takes 227 // into account the delay of the xbar, if any, and just 228 // lat, neglecting responseLatency, modelling hit latency 229 // just as lookupLatency or or the value of lat overriden 230 // by access(), that calls accessBlock() function. 231 cpuSidePort.schedTimingResp(pkt, request_time, true); 232 } else { 233 DPRINTF(Cache, "%s satisfied %s, no response needed\n", __func__, 234 pkt->print()); 235 236 // queue the packet for deletion, as the sending cache is 237 // still relying on it; if the block is found in access(), 238 // CleanEvict and Writeback messages will be deleted 239 // here as well 240 pendingDelete.reset(pkt); 241 } 242} 243 244void 245BaseCache::handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk, 246 Tick forward_time, Tick request_time) 247{ 248 if (writeAllocator && 249 pkt && pkt->isWrite() && !pkt->req->isUncacheable()) { 250 writeAllocator->updateMode(pkt->getAddr(), pkt->getSize(), 251 pkt->getBlockAddr(blkSize)); 252 } 253 254 if (mshr) { 255 /// MSHR hit 256 /// @note writebacks will be checked in getNextMSHR() 257 /// for any conflicting requests to the same block 258 259 //@todo remove hw_pf here 260 261 // Coalesce unless it was a software prefetch (see above). 262 if (pkt) { 263 assert(!pkt->isWriteback()); 264 // CleanEvicts corresponding to blocks which have 265 // outstanding requests in MSHRs are simply sunk here 266 if (pkt->cmd == MemCmd::CleanEvict) { 267 pendingDelete.reset(pkt); 268 } else if (pkt->cmd == MemCmd::WriteClean) { 269 // A WriteClean should never coalesce with any 270 // outstanding cache maintenance requests. 271 272 // We use forward_time here because there is an 273 // uncached memory write, forwarded to WriteBuffer. 274 allocateWriteBuffer(pkt, forward_time); 275 } else { 276 DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__, 277 pkt->print()); 278 279 assert(pkt->req->masterId() < system->maxMasters()); 280 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++; 281 282 // We use forward_time here because it is the same 283 // considering new targets. We have multiple 284 // requests for the same address here. It 285 // specifies the latency to allocate an internal 286 // buffer and to schedule an event to the queued 287 // port and also takes into account the additional 288 // delay of the xbar. 289 mshr->allocateTarget(pkt, forward_time, order++, 290 allocOnFill(pkt->cmd)); 291 if (mshr->getNumTargets() == numTarget) { 292 noTargetMSHR = mshr; 293 setBlocked(Blocked_NoTargets); 294 // need to be careful with this... if this mshr isn't 295 // ready yet (i.e. time > curTick()), we don't want to 296 // move it ahead of mshrs that are ready 297 // mshrQueue.moveToFront(mshr); 298 } 299 } 300 } 301 } else { 302 // no MSHR 303 assert(pkt->req->masterId() < system->maxMasters()); 304 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 305 306 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean) { 307 // We use forward_time here because there is an 308 // writeback or writeclean, forwarded to WriteBuffer. 309 allocateWriteBuffer(pkt, forward_time); 310 } else { 311 if (blk && blk->isValid()) { 312 // If we have a write miss to a valid block, we 313 // need to mark the block non-readable. Otherwise 314 // if we allow reads while there's an outstanding 315 // write miss, the read could return stale data 316 // out of the cache block... a more aggressive 317 // system could detect the overlap (if any) and 318 // forward data out of the MSHRs, but we don't do 319 // that yet. Note that we do need to leave the 320 // block valid so that it stays in the cache, in 321 // case we get an upgrade response (and hence no 322 // new data) when the write miss completes. 323 // As long as CPUs do proper store/load forwarding 324 // internally, and have a sufficiently weak memory 325 // model, this is probably unnecessary, but at some 326 // point it must have seemed like we needed it... 327 assert((pkt->needsWritable() && !blk->isWritable()) || 328 pkt->req->isCacheMaintenance()); 329 blk->status &= ~BlkReadable; 330 } 331 // Here we are using forward_time, modelling the latency of 332 // a miss (outbound) just as forwardLatency, neglecting the 333 // lookupLatency component. 334 allocateMissBuffer(pkt, forward_time); 335 } 336 } 337} 338 339void 340BaseCache::recvTimingReq(PacketPtr pkt) 341{ 342 // anything that is merely forwarded pays for the forward latency and 343 // the delay provided by the crossbar 344 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 345 346 // We use lookupLatency here because it is used to specify the latency 347 // to access. 348 Cycles lat = lookupLatency; 349 CacheBlk *blk = nullptr; 350 bool satisfied = false; 351 { 352 PacketList writebacks; 353 // Note that lat is passed by reference here. The function 354 // access() calls accessBlock() which can modify lat value. 355 satisfied = access(pkt, blk, lat, writebacks); 356 357 // copy writebacks to write buffer here to ensure they logically 358 // precede anything happening below 359 doWritebacks(writebacks, forward_time); 360 } 361 362 // Here we charge the headerDelay that takes into account the latencies 363 // of the bus, if the packet comes from it. 364 // The latency charged it is just lat that is the value of lookupLatency 365 // modified by access() function, or if not just lookupLatency. 366 // In case of a hit we are neglecting response latency. 367 // In case of a miss we are neglecting forward latency. 368 Tick request_time = clockEdge(lat) + pkt->headerDelay; 369 // Here we reset the timing of the packet. 370 pkt->headerDelay = pkt->payloadDelay = 0; 371 // track time of availability of next prefetch, if any 372 Tick next_pf_time = MaxTick; 373 374 if (satisfied) { 375 // if need to notify the prefetcher we have to do it before 376 // anything else as later handleTimingReqHit might turn the 377 // packet in a response 378 if (prefetcher && 379 (prefetchOnAccess || (blk && blk->wasPrefetched()))) { 380 if (blk) 381 blk->status &= ~BlkHWPrefetched; 382 383 // Don't notify on SWPrefetch 384 if (!pkt->cmd.isSWPrefetch()) { 385 assert(!pkt->req->isCacheMaintenance()); 386 next_pf_time = prefetcher->notify(pkt); 387 } 388 } 389 390 handleTimingReqHit(pkt, blk, request_time); 391 } else { 392 handleTimingReqMiss(pkt, blk, forward_time, request_time); 393 394 // We should call the prefetcher reguardless if the request is 395 // satisfied or not, reguardless if the request is in the MSHR 396 // or not. The request could be a ReadReq hit, but still not 397 // satisfied (potentially because of a prior write to the same 398 // cache line. So, even when not satisfied, there is an MSHR 399 // already allocated for this, we need to let the prefetcher 400 // know about the request 401 402 // Don't notify prefetcher on SWPrefetch, cache maintenance 403 // operations or for writes that we are coaslescing. 404 if (prefetcher && pkt && 405 !pkt->cmd.isSWPrefetch() && 406 !pkt->req->isCacheMaintenance() && 407 !(writeAllocator && writeAllocator->coalesce() && 408 pkt->isWrite())) { 409 next_pf_time = prefetcher->notify(pkt); 410 } 411 } 412 413 if (next_pf_time != MaxTick) { 414 schedMemSideSendEvent(next_pf_time); 415 } 416} 417 418void 419BaseCache::handleUncacheableWriteResp(PacketPtr pkt) 420{ 421 Tick completion_time = clockEdge(responseLatency) + 422 pkt->headerDelay + pkt->payloadDelay; 423 424 // Reset the bus additional time as it is now accounted for 425 pkt->headerDelay = pkt->payloadDelay = 0; 426 427 cpuSidePort.schedTimingResp(pkt, completion_time, true); 428} 429 430void 431BaseCache::recvTimingResp(PacketPtr pkt) 432{ 433 assert(pkt->isResponse()); 434 435 // all header delay should be paid for by the crossbar, unless 436 // this is a prefetch response from above 437 panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp, 438 "%s saw a non-zero packet delay\n", name()); 439 440 const bool is_error = pkt->isError(); 441 442 if (is_error) { 443 DPRINTF(Cache, "%s: Cache received %s with error\n", __func__, 444 pkt->print()); 445 } 446 447 DPRINTF(Cache, "%s: Handling response %s\n", __func__, 448 pkt->print()); 449 450 // if this is a write, we should be looking at an uncacheable 451 // write 452 if (pkt->isWrite()) { 453 assert(pkt->req->isUncacheable()); 454 handleUncacheableWriteResp(pkt); 455 return; 456 } 457 458 // we have dealt with any (uncacheable) writes above, from here on 459 // we know we are dealing with an MSHR due to a miss or a prefetch 460 MSHR *mshr = dynamic_cast<MSHR*>(pkt->popSenderState()); 461 assert(mshr); 462 463 if (mshr == noTargetMSHR) { 464 // we always clear at least one target 465 clearBlocked(Blocked_NoTargets); 466 noTargetMSHR = nullptr; 467 } 468 469 // Initial target is used just for stats 470 MSHR::Target *initial_tgt = mshr->getTarget(); 471 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex(); 472 Tick miss_latency = curTick() - initial_tgt->recvTime; 473 474 if (pkt->req->isUncacheable()) { 475 assert(pkt->req->masterId() < system->maxMasters()); 476 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] += 477 miss_latency; 478 } else { 479 assert(pkt->req->masterId() < system->maxMasters()); 480 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] += 481 miss_latency; 482 } 483 484 PacketList writebacks; 485 486 bool is_fill = !mshr->isForward && 487 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp || 488 mshr->wasWholeLineWrite); 489 490 // make sure that if the mshr was due to a whole line write then 491 // the response is an invalidation 492 assert(!mshr->wasWholeLineWrite || pkt->isInvalidate()); 493 494 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 495 496 if (is_fill && !is_error) { 497 DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n", 498 pkt->getAddr()); 499 500 const bool allocate = (writeAllocator && mshr->wasWholeLineWrite) ? 501 writeAllocator->allocate() : mshr->allocOnFill(); 502 blk = handleFill(pkt, blk, writebacks, allocate); 503 assert(blk != nullptr); 504 } 505 506 if (blk && blk->isValid() && pkt->isClean() && !pkt->isInvalidate()) { 507 // The block was marked not readable while there was a pending 508 // cache maintenance operation, restore its flag. 509 blk->status |= BlkReadable; 510 511 // This was a cache clean operation (without invalidate) 512 // and we have a copy of the block already. Since there 513 // is no invalidation, we can promote targets that don't 514 // require a writable copy 515 mshr->promoteReadable(); 516 } 517 518 if (blk && blk->isWritable() && !pkt->req->isCacheInvalidate()) { 519 // If at this point the referenced block is writable and the 520 // response is not a cache invalidate, we promote targets that 521 // were deferred as we couldn't guarrantee a writable copy 522 mshr->promoteWritable(); 523 } 524 525 serviceMSHRTargets(mshr, pkt, blk, writebacks); 526 527 if (mshr->promoteDeferredTargets()) { 528 // avoid later read getting stale data while write miss is 529 // outstanding.. see comment in timingAccess() 530 if (blk) { 531 blk->status &= ~BlkReadable; 532 } 533 mshrQueue.markPending(mshr); 534 schedMemSideSendEvent(clockEdge() + pkt->payloadDelay); 535 } else { 536 // while we deallocate an mshr from the queue we still have to 537 // check the isFull condition before and after as we might 538 // have been using the reserved entries already 539 const bool was_full = mshrQueue.isFull(); 540 mshrQueue.deallocate(mshr); 541 if (was_full && !mshrQueue.isFull()) { 542 clearBlocked(Blocked_NoMSHRs); 543 } 544 545 // Request the bus for a prefetch if this deallocation freed enough 546 // MSHRs for a prefetch to take place 547 if (prefetcher && mshrQueue.canPrefetch()) { 548 Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(), 549 clockEdge()); 550 if (next_pf_time != MaxTick) 551 schedMemSideSendEvent(next_pf_time); 552 } 553 } 554 555 // if we used temp block, check to see if its valid and then clear it out 556 if (blk == tempBlock && tempBlock->isValid()) { 557 evictBlock(blk, writebacks); 558 } 559 560 const Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 561 // copy writebacks to write buffer 562 doWritebacks(writebacks, forward_time); 563 564 DPRINTF(CacheVerbose, "%s: Leaving with %s\n", __func__, pkt->print()); 565 delete pkt; 566} 567 568 569Tick 570BaseCache::recvAtomic(PacketPtr pkt) 571{ 572 // We are in atomic mode so we pay just for lookupLatency here. 573 Cycles lat = lookupLatency; 574 575 // follow the same flow as in recvTimingReq, and check if a cache 576 // above us is responding 577 if (pkt->cacheResponding() && !pkt->isClean()) { 578 assert(!pkt->req->isCacheInvalidate()); 579 DPRINTF(Cache, "Cache above responding to %s: not responding\n", 580 pkt->print()); 581 582 // if a cache is responding, and it had the line in Owned 583 // rather than Modified state, we need to invalidate any 584 // copies that are not on the same path to memory 585 assert(pkt->needsWritable() && !pkt->responderHadWritable()); 586 lat += ticksToCycles(memSidePort.sendAtomic(pkt)); 587 588 return lat * clockPeriod(); 589 } 590 591 // should assert here that there are no outstanding MSHRs or 592 // writebacks... that would mean that someone used an atomic 593 // access in timing mode 594 595 CacheBlk *blk = nullptr; 596 PacketList writebacks; 597 bool satisfied = access(pkt, blk, lat, writebacks); 598 599 if (pkt->isClean() && blk && blk->isDirty()) { 600 // A cache clean opearation is looking for a dirty 601 // block. If a dirty block is encountered a WriteClean 602 // will update any copies to the path to the memory 603 // until the point of reference. 604 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n", 605 __func__, pkt->print(), blk->print()); 606 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id); 607 writebacks.push_back(wb_pkt); 608 pkt->setSatisfied(); 609 } 610 611 // handle writebacks resulting from the access here to ensure they 612 // logically precede anything happening below 613 doWritebacksAtomic(writebacks); 614 assert(writebacks.empty()); 615 616 if (!satisfied) { 617 lat += handleAtomicReqMiss(pkt, blk, writebacks); 618 } 619 620 // Note that we don't invoke the prefetcher at all in atomic mode. 621 // It's not clear how to do it properly, particularly for 622 // prefetchers that aggressively generate prefetch candidates and 623 // rely on bandwidth contention to throttle them; these will tend 624 // to pollute the cache in atomic mode since there is no bandwidth 625 // contention. If we ever do want to enable prefetching in atomic 626 // mode, though, this is the place to do it... see timingAccess() 627 // for an example (though we'd want to issue the prefetch(es) 628 // immediately rather than calling requestMemSideBus() as we do 629 // there). 630 631 // do any writebacks resulting from the response handling 632 doWritebacksAtomic(writebacks); 633 634 // if we used temp block, check to see if its valid and if so 635 // clear it out, but only do so after the call to recvAtomic is 636 // finished so that any downstream observers (such as a snoop 637 // filter), first see the fill, and only then see the eviction 638 if (blk == tempBlock && tempBlock->isValid()) { 639 // the atomic CPU calls recvAtomic for fetch and load/store 640 // sequentuially, and we may already have a tempBlock 641 // writeback from the fetch that we have not yet sent 642 if (tempBlockWriteback) { 643 // if that is the case, write the prevoius one back, and 644 // do not schedule any new event 645 writebackTempBlockAtomic(); 646 } else { 647 // the writeback/clean eviction happens after the call to 648 // recvAtomic has finished (but before any successive 649 // calls), so that the response handling from the fill is 650 // allowed to happen first 651 schedule(writebackTempBlockAtomicEvent, curTick()); 652 } 653 654 tempBlockWriteback = evictBlock(blk); 655 } 656 657 if (pkt->needsResponse()) { 658 pkt->makeAtomicResponse(); 659 } 660 661 return lat * clockPeriod(); 662} 663 664void 665BaseCache::functionalAccess(PacketPtr pkt, bool from_cpu_side) 666{ 667 Addr blk_addr = pkt->getBlockAddr(blkSize); 668 bool is_secure = pkt->isSecure(); 669 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 670 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 671 672 pkt->pushLabel(name()); 673 674 CacheBlkPrintWrapper cbpw(blk); 675 676 // Note that just because an L2/L3 has valid data doesn't mean an 677 // L1 doesn't have a more up-to-date modified copy that still 678 // needs to be found. As a result we always update the request if 679 // we have it, but only declare it satisfied if we are the owner. 680 681 // see if we have data at all (owned or otherwise) 682 bool have_data = blk && blk->isValid() 683 && pkt->trySatisfyFunctional(&cbpw, blk_addr, is_secure, blkSize, 684 blk->data); 685 686 // data we have is dirty if marked as such or if we have an 687 // in-service MSHR that is pending a modified line 688 bool have_dirty = 689 have_data && (blk->isDirty() || 690 (mshr && mshr->inService && mshr->isPendingModified())); 691 692 bool done = have_dirty || 693 cpuSidePort.trySatisfyFunctional(pkt) || 694 mshrQueue.trySatisfyFunctional(pkt, blk_addr) || 695 writeBuffer.trySatisfyFunctional(pkt, blk_addr) || 696 memSidePort.trySatisfyFunctional(pkt); 697 698 DPRINTF(CacheVerbose, "%s: %s %s%s%s\n", __func__, pkt->print(), 699 (blk && blk->isValid()) ? "valid " : "", 700 have_data ? "data " : "", done ? "done " : ""); 701 702 // We're leaving the cache, so pop cache->name() label 703 pkt->popLabel(); 704 705 if (done) { 706 pkt->makeResponse(); 707 } else { 708 // if it came as a request from the CPU side then make sure it 709 // continues towards the memory side 710 if (from_cpu_side) { 711 memSidePort.sendFunctional(pkt); 712 } else if (cpuSidePort.isSnooping()) { 713 // if it came from the memory side, it must be a snoop request 714 // and we should only forward it if we are forwarding snoops 715 cpuSidePort.sendFunctionalSnoop(pkt); 716 } 717 } 718} 719 720 721void 722BaseCache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt) 723{ 724 assert(pkt->isRequest()); 725 726 uint64_t overwrite_val; 727 bool overwrite_mem; 728 uint64_t condition_val64; 729 uint32_t condition_val32; 730 731 int offset = pkt->getOffset(blkSize); 732 uint8_t *blk_data = blk->data + offset; 733 734 assert(sizeof(uint64_t) >= pkt->getSize()); 735 736 overwrite_mem = true; 737 // keep a copy of our possible write value, and copy what is at the 738 // memory address into the packet 739 pkt->writeData((uint8_t *)&overwrite_val); 740 pkt->setData(blk_data); 741 742 if (pkt->req->isCondSwap()) { 743 if (pkt->getSize() == sizeof(uint64_t)) { 744 condition_val64 = pkt->req->getExtraData(); 745 overwrite_mem = !std::memcmp(&condition_val64, blk_data, 746 sizeof(uint64_t)); 747 } else if (pkt->getSize() == sizeof(uint32_t)) { 748 condition_val32 = (uint32_t)pkt->req->getExtraData(); 749 overwrite_mem = !std::memcmp(&condition_val32, blk_data, 750 sizeof(uint32_t)); 751 } else 752 panic("Invalid size for conditional read/write\n"); 753 } 754 755 if (overwrite_mem) { 756 std::memcpy(blk_data, &overwrite_val, pkt->getSize()); 757 blk->status |= BlkDirty; 758 } 759} 760 761QueueEntry* 762BaseCache::getNextQueueEntry() 763{ 764 // Check both MSHR queue and write buffer for potential requests, 765 // note that null does not mean there is no request, it could 766 // simply be that it is not ready 767 MSHR *miss_mshr = mshrQueue.getNext(); 768 WriteQueueEntry *wq_entry = writeBuffer.getNext(); 769 770 // If we got a write buffer request ready, first priority is a 771 // full write buffer, otherwise we favour the miss requests 772 if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) { 773 // need to search MSHR queue for conflicting earlier miss. 774 MSHR *conflict_mshr = 775 mshrQueue.findPending(wq_entry->blkAddr, 776 wq_entry->isSecure); 777 778 if (conflict_mshr && conflict_mshr->order < wq_entry->order) { 779 // Service misses in order until conflict is cleared. 780 return conflict_mshr; 781 782 // @todo Note that we ignore the ready time of the conflict here 783 } 784 785 // No conflicts; issue write 786 return wq_entry; 787 } else if (miss_mshr) { 788 // need to check for conflicting earlier writeback 789 WriteQueueEntry *conflict_mshr = 790 writeBuffer.findPending(miss_mshr->blkAddr, 791 miss_mshr->isSecure); 792 if (conflict_mshr) { 793 // not sure why we don't check order here... it was in the 794 // original code but commented out. 795 796 // The only way this happens is if we are 797 // doing a write and we didn't have permissions 798 // then subsequently saw a writeback (owned got evicted) 799 // We need to make sure to perform the writeback first 800 // To preserve the dirty data, then we can issue the write 801 802 // should we return wq_entry here instead? I.e. do we 803 // have to flush writes in order? I don't think so... not 804 // for Alpha anyway. Maybe for x86? 805 return conflict_mshr; 806 807 // @todo Note that we ignore the ready time of the conflict here 808 } 809 810 // No conflicts; issue read 811 return miss_mshr; 812 } 813 814 // fall through... no pending requests. Try a prefetch. 815 assert(!miss_mshr && !wq_entry); 816 if (prefetcher && mshrQueue.canPrefetch()) { 817 // If we have a miss queue slot, we can try a prefetch 818 PacketPtr pkt = prefetcher->getPacket(); 819 if (pkt) { 820 Addr pf_addr = pkt->getBlockAddr(blkSize); 821 if (!tags->findBlock(pf_addr, pkt->isSecure()) && 822 !mshrQueue.findMatch(pf_addr, pkt->isSecure()) && 823 !writeBuffer.findMatch(pf_addr, pkt->isSecure())) { 824 // Update statistic on number of prefetches issued 825 // (hwpf_mshr_misses) 826 assert(pkt->req->masterId() < system->maxMasters()); 827 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 828 829 // allocate an MSHR and return it, note 830 // that we send the packet straight away, so do not 831 // schedule the send 832 return allocateMissBuffer(pkt, curTick(), false); 833 } else { 834 // free the request and packet 835 delete pkt; 836 } 837 } 838 } 839 840 return nullptr; 841} 842 843void 844BaseCache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool, bool) 845{ 846 assert(pkt->isRequest()); 847 848 assert(blk && blk->isValid()); 849 // Occasionally this is not true... if we are a lower-level cache 850 // satisfying a string of Read and ReadEx requests from 851 // upper-level caches, a Read will mark the block as shared but we 852 // can satisfy a following ReadEx anyway since we can rely on the 853 // Read requester(s) to have buffered the ReadEx snoop and to 854 // invalidate their blocks after receiving them. 855 // assert(!pkt->needsWritable() || blk->isWritable()); 856 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize); 857 858 // Check RMW operations first since both isRead() and 859 // isWrite() will be true for them 860 if (pkt->cmd == MemCmd::SwapReq) { 861 if (pkt->isAtomicOp()) { 862 // extract data from cache and save it into the data field in 863 // the packet as a return value from this atomic op 864 865 int offset = tags->extractBlkOffset(pkt->getAddr()); 866 uint8_t *blk_data = blk->data + offset; 867 std::memcpy(pkt->getPtr<uint8_t>(), blk_data, pkt->getSize()); 868 869 // execute AMO operation 870 (*(pkt->getAtomicOp()))(blk_data); 871 872 // set block status to dirty 873 blk->status |= BlkDirty; 874 } else { 875 cmpAndSwap(blk, pkt); 876 } 877 } else if (pkt->isWrite()) { 878 // we have the block in a writable state and can go ahead, 879 // note that the line may be also be considered writable in 880 // downstream caches along the path to memory, but always 881 // Exclusive, and never Modified 882 assert(blk->isWritable()); 883 // Write or WriteLine at the first cache with block in writable state 884 if (blk->checkWrite(pkt)) { 885 pkt->writeDataToBlock(blk->data, blkSize); 886 } 887 // Always mark the line as dirty (and thus transition to the 888 // Modified state) even if we are a failed StoreCond so we 889 // supply data to any snoops that have appended themselves to 890 // this cache before knowing the store will fail. 891 blk->status |= BlkDirty; 892 DPRINTF(CacheVerbose, "%s for %s (write)\n", __func__, pkt->print()); 893 } else if (pkt->isRead()) { 894 if (pkt->isLLSC()) { 895 blk->trackLoadLocked(pkt); 896 } 897 898 // all read responses have a data payload 899 assert(pkt->hasRespData()); 900 pkt->setDataFromBlock(blk->data, blkSize); 901 } else if (pkt->isUpgrade()) { 902 // sanity check 903 assert(!pkt->hasSharers()); 904 905 if (blk->isDirty()) { 906 // we were in the Owned state, and a cache above us that 907 // has the line in Shared state needs to be made aware 908 // that the data it already has is in fact dirty 909 pkt->setCacheResponding(); 910 blk->status &= ~BlkDirty; 911 } 912 } else if (pkt->isClean()) { 913 blk->status &= ~BlkDirty; 914 } else { 915 assert(pkt->isInvalidate()); 916 invalidateBlock(blk); 917 DPRINTF(CacheVerbose, "%s for %s (invalidation)\n", __func__, 918 pkt->print()); 919 } 920} 921 922///////////////////////////////////////////////////// 923// 924// Access path: requests coming in from the CPU side 925// 926///////////////////////////////////////////////////// 927 928bool 929BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, 930 PacketList &writebacks) 931{ 932 // sanity check 933 assert(pkt->isRequest()); 934 935 chatty_assert(!(isReadOnly && pkt->isWrite()), 936 "Should never see a write in a read-only cache %s\n", 937 name()); 938 939 // Here lat is the value passed as parameter to accessBlock() function 940 // that can modify its value. 941 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), lat); 942 943 DPRINTF(Cache, "%s for %s %s\n", __func__, pkt->print(), 944 blk ? "hit " + blk->print() : "miss"); 945 946 if (pkt->req->isCacheMaintenance()) { 947 // A cache maintenance operation is always forwarded to the 948 // memory below even if the block is found in dirty state. 949 950 // We defer any changes to the state of the block until we 951 // create and mark as in service the mshr for the downstream 952 // packet. 953 return false; 954 } 955 956 if (pkt->isEviction()) { 957 // We check for presence of block in above caches before issuing 958 // Writeback or CleanEvict to write buffer. Therefore the only 959 // possible cases can be of a CleanEvict packet coming from above 960 // encountering a Writeback generated in this cache peer cache and 961 // waiting in the write buffer. Cases of upper level peer caches 962 // generating CleanEvict and Writeback or simply CleanEvict and 963 // CleanEvict almost simultaneously will be caught by snoops sent out 964 // by crossbar. 965 WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(), 966 pkt->isSecure()); 967 if (wb_entry) { 968 assert(wb_entry->getNumTargets() == 1); 969 PacketPtr wbPkt = wb_entry->getTarget()->pkt; 970 assert(wbPkt->isWriteback()); 971 972 if (pkt->isCleanEviction()) { 973 // The CleanEvict and WritebackClean snoops into other 974 // peer caches of the same level while traversing the 975 // crossbar. If a copy of the block is found, the 976 // packet is deleted in the crossbar. Hence, none of 977 // the other upper level caches connected to this 978 // cache have the block, so we can clear the 979 // BLOCK_CACHED flag in the Writeback if set and 980 // discard the CleanEvict by returning true. 981 wbPkt->clearBlockCached(); 982 return true; 983 } else { 984 assert(pkt->cmd == MemCmd::WritebackDirty); 985 // Dirty writeback from above trumps our clean 986 // writeback... discard here 987 // Note: markInService will remove entry from writeback buffer. 988 markInService(wb_entry); 989 delete wbPkt; 990 } 991 } 992 } 993 994 // Writeback handling is special case. We can write the block into 995 // the cache without having a writeable copy (or any copy at all). 996 if (pkt->isWriteback()) { 997 assert(blkSize == pkt->getSize()); 998 999 // we could get a clean writeback while we are having 1000 // outstanding accesses to a block, do the simple thing for 1001 // now and drop the clean writeback so that we do not upset 1002 // any ordering/decisions about ownership already taken 1003 if (pkt->cmd == MemCmd::WritebackClean && 1004 mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) { 1005 DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, " 1006 "dropping\n", pkt->getAddr()); 1007 return true; 1008 } 1009 1010 if (!blk) { 1011 // need to do a replacement 1012 blk = allocateBlock(pkt, writebacks); 1013 if (!blk) { 1014 // no replaceable block available: give up, fwd to next level. 1015 incMissCount(pkt); 1016 return false; 1017 } 1018 1019 blk->status |= (BlkValid | BlkReadable); 1020 } 1021 // only mark the block dirty if we got a writeback command, 1022 // and leave it as is for a clean writeback 1023 if (pkt->cmd == MemCmd::WritebackDirty) { 1024 // TODO: the coherent cache can assert(!blk->isDirty()); 1025 blk->status |= BlkDirty; 1026 } 1027 // if the packet does not have sharers, it is passing 1028 // writable, and we got the writeback in Modified or Exclusive 1029 // state, if not we are in the Owned or Shared state 1030 if (!pkt->hasSharers()) { 1031 blk->status |= BlkWritable; 1032 } 1033 // nothing else to do; writeback doesn't expect response 1034 assert(!pkt->needsResponse()); 1035 pkt->writeDataToBlock(blk->data, blkSize); 1036 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 1037 incHitCount(pkt); 1038 // populate the time when the block will be ready to access. 1039 blk->whenReady = clockEdge(fillLatency) + pkt->headerDelay + 1040 pkt->payloadDelay; 1041 return true; 1042 } else if (pkt->cmd == MemCmd::CleanEvict) { 1043 if (blk) { 1044 // Found the block in the tags, need to stop CleanEvict from 1045 // propagating further down the hierarchy. Returning true will 1046 // treat the CleanEvict like a satisfied write request and delete 1047 // it. 1048 return true; 1049 } 1050 // We didn't find the block here, propagate the CleanEvict further 1051 // down the memory hierarchy. Returning false will treat the CleanEvict 1052 // like a Writeback which could not find a replaceable block so has to 1053 // go to next level. 1054 return false; 1055 } else if (pkt->cmd == MemCmd::WriteClean) { 1056 // WriteClean handling is a special case. We can allocate a 1057 // block directly if it doesn't exist and we can update the 1058 // block immediately. The WriteClean transfers the ownership 1059 // of the block as well. 1060 assert(blkSize == pkt->getSize()); 1061 1062 if (!blk) { 1063 if (pkt->writeThrough()) { 1064 // if this is a write through packet, we don't try to 1065 // allocate if the block is not present 1066 return false; 1067 } else { 1068 // a writeback that misses needs to allocate a new block 1069 blk = allocateBlock(pkt, writebacks); 1070 if (!blk) { 1071 // no replaceable block available: give up, fwd to 1072 // next level. 1073 incMissCount(pkt); 1074 return false; 1075 } 1076 1077 blk->status |= (BlkValid | BlkReadable); 1078 } 1079 } 1080 1081 // at this point either this is a writeback or a write-through 1082 // write clean operation and the block is already in this 1083 // cache, we need to update the data and the block flags 1084 assert(blk); 1085 // TODO: the coherent cache can assert(!blk->isDirty()); 1086 if (!pkt->writeThrough()) { 1087 blk->status |= BlkDirty; 1088 } 1089 // nothing else to do; writeback doesn't expect response 1090 assert(!pkt->needsResponse()); 1091 pkt->writeDataToBlock(blk->data, blkSize); 1092 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 1093 1094 incHitCount(pkt); 1095 // populate the time when the block will be ready to access. 1096 blk->whenReady = clockEdge(fillLatency) + pkt->headerDelay + 1097 pkt->payloadDelay; 1098 // if this a write-through packet it will be sent to cache 1099 // below 1100 return !pkt->writeThrough(); 1101 } else if (blk && (pkt->needsWritable() ? blk->isWritable() : 1102 blk->isReadable())) { 1103 // OK to satisfy access 1104 incHitCount(pkt); 1105 satisfyRequest(pkt, blk); 1106 maintainClusivity(pkt->fromCache(), blk); 1107 1108 return true; 1109 } 1110 1111 // Can't satisfy access normally... either no block (blk == nullptr) 1112 // or have block but need writable 1113 1114 incMissCount(pkt); 1115 1116 if (!blk && pkt->isLLSC() && pkt->isWrite()) { 1117 // complete miss on store conditional... just give up now 1118 pkt->req->setExtraData(0); 1119 return true; 1120 } 1121 1122 return false; 1123} 1124 1125void 1126BaseCache::maintainClusivity(bool from_cache, CacheBlk *blk) 1127{ 1128 if (from_cache && blk && blk->isValid() && !blk->isDirty() && 1129 clusivity == Enums::mostly_excl) { 1130 // if we have responded to a cache, and our block is still 1131 // valid, but not dirty, and this cache is mostly exclusive 1132 // with respect to the cache above, drop the block 1133 invalidateBlock(blk); 1134 } 1135} 1136 1137CacheBlk* 1138BaseCache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks, 1139 bool allocate) 1140{ 1141 assert(pkt->isResponse()); 1142 Addr addr = pkt->getAddr(); 1143 bool is_secure = pkt->isSecure(); 1144#if TRACING_ON 1145 CacheBlk::State old_state = blk ? blk->status : 0; 1146#endif 1147 1148 // When handling a fill, we should have no writes to this line. 1149 assert(addr == pkt->getBlockAddr(blkSize)); 1150 assert(!writeBuffer.findMatch(addr, is_secure)); 1151 1152 if (!blk) { 1153 // better have read new data... 1154 assert(pkt->hasData() || pkt->cmd == MemCmd::InvalidateResp); 1155 1156 // need to do a replacement if allocating, otherwise we stick 1157 // with the temporary storage 1158 blk = allocate ? allocateBlock(pkt, writebacks) : nullptr; 1159 1160 if (!blk) { 1161 // No replaceable block or a mostly exclusive 1162 // cache... just use temporary storage to complete the 1163 // current request and then get rid of it 1164 assert(!tempBlock->isValid()); 1165 blk = tempBlock; 1166 tempBlock->insert(addr, is_secure); 1167 DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr, 1168 is_secure ? "s" : "ns"); 1169 } 1170 1171 // we should never be overwriting a valid block 1172 assert(!blk->isValid()); 1173 } else { 1174 // existing block... probably an upgrade 1175 assert(regenerateBlkAddr(blk) == addr); 1176 assert(blk->isSecure() == is_secure); 1177 // either we're getting new data or the block should already be valid 1178 assert(pkt->hasData() || blk->isValid()); 1179 // don't clear block status... if block is already dirty we 1180 // don't want to lose that 1181 } 1182 1183 blk->status |= BlkValid | BlkReadable; 1184 1185 // sanity check for whole-line writes, which should always be 1186 // marked as writable as part of the fill, and then later marked 1187 // dirty as part of satisfyRequest 1188 if (pkt->cmd == MemCmd::InvalidateResp) { 1189 assert(!pkt->hasSharers()); 1190 } 1191 1192 // here we deal with setting the appropriate state of the line, 1193 // and we start by looking at the hasSharers flag, and ignore the 1194 // cacheResponding flag (normally signalling dirty data) if the 1195 // packet has sharers, thus the line is never allocated as Owned 1196 // (dirty but not writable), and always ends up being either 1197 // Shared, Exclusive or Modified, see Packet::setCacheResponding 1198 // for more details 1199 if (!pkt->hasSharers()) { 1200 // we could get a writable line from memory (rather than a 1201 // cache) even in a read-only cache, note that we set this bit 1202 // even for a read-only cache, possibly revisit this decision 1203 blk->status |= BlkWritable; 1204 1205 // check if we got this via cache-to-cache transfer (i.e., from a 1206 // cache that had the block in Modified or Owned state) 1207 if (pkt->cacheResponding()) { 1208 // we got the block in Modified state, and invalidated the 1209 // owners copy 1210 blk->status |= BlkDirty; 1211 1212 chatty_assert(!isReadOnly, "Should never see dirty snoop response " 1213 "in read-only cache %s\n", name()); 1214 } 1215 } 1216 1217 DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n", 1218 addr, is_secure ? "s" : "ns", old_state, blk->print()); 1219 1220 // if we got new data, copy it in (checking for a read response 1221 // and a response that has data is the same in the end) 1222 if (pkt->isRead()) { 1223 // sanity checks 1224 assert(pkt->hasData()); 1225 assert(pkt->getSize() == blkSize); 1226 1227 pkt->writeDataToBlock(blk->data, blkSize); 1228 } 1229 // We pay for fillLatency here. 1230 blk->whenReady = clockEdge() + fillLatency * clockPeriod() + 1231 pkt->payloadDelay; 1232 1233 return blk; 1234} 1235 1236CacheBlk* 1237BaseCache::allocateBlock(const PacketPtr pkt, PacketList &writebacks) 1238{ 1239 // Get address 1240 const Addr addr = pkt->getAddr(); 1241 1242 // Get secure bit 1243 const bool is_secure = pkt->isSecure(); 1244 1245 // Find replacement victim 1246 std::vector<CacheBlk*> evict_blks; 1247 CacheBlk *victim = tags->findVictim(addr, is_secure, evict_blks); 1248 1249 // It is valid to return nullptr if there is no victim 1250 if (!victim) 1251 return nullptr; 1252 1253 // Print victim block's information 1254 DPRINTF(CacheRepl, "Replacement victim: %s\n", victim->print()); 1255 1256 // Check for transient state allocations. If any of the entries listed 1257 // for eviction has a transient state, the allocation fails 1258 for (const auto& blk : evict_blks) { 1259 if (blk->isValid()) { 1260 Addr repl_addr = regenerateBlkAddr(blk); 1261 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure()); 1262 if (repl_mshr) { 1263 // must be an outstanding upgrade or clean request 1264 // on a block we're about to replace... 1265 assert((!blk->isWritable() && repl_mshr->needsWritable()) || 1266 repl_mshr->isCleaning()); 1267 1268 // too hard to replace block with transient state 1269 // allocation failed, block not inserted 1270 return nullptr; 1271 } 1272 } 1273 } 1274 1275 // The victim will be replaced by a new entry, so increase the replacement 1276 // counter if a valid block is being replaced 1277 if (victim->isValid()) { 1278 DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx " 1279 "(%s): %s\n", regenerateBlkAddr(victim), 1280 victim->isSecure() ? "s" : "ns", 1281 addr, is_secure ? "s" : "ns", 1282 victim->isDirty() ? "writeback" : "clean"); 1283 1284 replacements++; 1285 } 1286 1287 // Evict valid blocks associated to this victim block 1288 for (const auto& blk : evict_blks) { 1289 if (blk->isValid()) { 1290 if (blk->wasPrefetched()) { 1291 unusedPrefetches++; 1292 } 1293 1294 evictBlock(blk, writebacks); 1295 } 1296 } 1297 1298 // Insert new block at victimized entry 1299 tags->insertBlock(addr, is_secure, pkt->req->masterId(), 1300 pkt->req->taskId(), victim); 1301 1302 return victim; 1303} 1304 1305void 1306BaseCache::invalidateBlock(CacheBlk *blk) 1307{ 1308 if (blk != tempBlock) 1309 tags->invalidate(blk); 1310 blk->invalidate(); 1311} 1312 1313void 1314BaseCache::evictBlock(CacheBlk *blk, PacketList &writebacks) 1315{ 1316 PacketPtr pkt = evictBlock(blk); 1317 if (pkt) { 1318 writebacks.push_back(pkt); 1319 } 1320} 1321 1322PacketPtr 1323BaseCache::writebackBlk(CacheBlk *blk) 1324{ 1325 chatty_assert(!isReadOnly || writebackClean, 1326 "Writeback from read-only cache"); 1327 assert(blk && blk->isValid() && (blk->isDirty() || writebackClean)); 1328 1329 writebacks[Request::wbMasterId]++; 1330 1331 RequestPtr req = std::make_shared<Request>( 1332 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId); 1333 1334 if (blk->isSecure()) 1335 req->setFlags(Request::SECURE); 1336 1337 req->taskId(blk->task_id); 1338 1339 PacketPtr pkt = 1340 new Packet(req, blk->isDirty() ? 1341 MemCmd::WritebackDirty : MemCmd::WritebackClean); 1342 1343 DPRINTF(Cache, "Create Writeback %s writable: %d, dirty: %d\n", 1344 pkt->print(), blk->isWritable(), blk->isDirty()); 1345 1346 if (blk->isWritable()) { 1347 // not asserting shared means we pass the block in modified 1348 // state, mark our own block non-writeable 1349 blk->status &= ~BlkWritable; 1350 } else { 1351 // we are in the Owned state, tell the receiver 1352 pkt->setHasSharers(); 1353 } 1354 1355 // make sure the block is not marked dirty 1356 blk->status &= ~BlkDirty; 1357 1358 pkt->allocate(); 1359 pkt->setDataFromBlock(blk->data, blkSize); 1360 1361 return pkt; 1362} 1363 1364PacketPtr 1365BaseCache::writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id) 1366{ 1367 RequestPtr req = std::make_shared<Request>( 1368 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId); 1369 1370 if (blk->isSecure()) { 1371 req->setFlags(Request::SECURE); 1372 } 1373 req->taskId(blk->task_id); 1374 1375 PacketPtr pkt = new Packet(req, MemCmd::WriteClean, blkSize, id); 1376 1377 if (dest) { 1378 req->setFlags(dest); 1379 pkt->setWriteThrough(); 1380 } 1381 1382 DPRINTF(Cache, "Create %s writable: %d, dirty: %d\n", pkt->print(), 1383 blk->isWritable(), blk->isDirty()); 1384 1385 if (blk->isWritable()) { 1386 // not asserting shared means we pass the block in modified 1387 // state, mark our own block non-writeable 1388 blk->status &= ~BlkWritable; 1389 } else { 1390 // we are in the Owned state, tell the receiver 1391 pkt->setHasSharers(); 1392 } 1393 1394 // make sure the block is not marked dirty 1395 blk->status &= ~BlkDirty; 1396 1397 pkt->allocate(); 1398 pkt->setDataFromBlock(blk->data, blkSize); 1399 1400 return pkt; 1401} 1402 1403 1404void 1405BaseCache::memWriteback() 1406{ 1407 tags->forEachBlk([this](CacheBlk &blk) { writebackVisitor(blk); }); 1408} 1409 1410void 1411BaseCache::memInvalidate() 1412{ 1413 tags->forEachBlk([this](CacheBlk &blk) { invalidateVisitor(blk); }); 1414} 1415 1416bool 1417BaseCache::isDirty() const 1418{ 1419 return tags->anyBlk([](CacheBlk &blk) { return blk.isDirty(); }); 1420} 1421 1422void 1423BaseCache::writebackVisitor(CacheBlk &blk) 1424{ 1425 if (blk.isDirty()) { 1426 assert(blk.isValid()); 1427 1428 RequestPtr request = std::make_shared<Request>( 1429 regenerateBlkAddr(&blk), blkSize, 0, Request::funcMasterId); 1430 1431 request->taskId(blk.task_id); 1432 if (blk.isSecure()) { 1433 request->setFlags(Request::SECURE); 1434 } 1435 1436 Packet packet(request, MemCmd::WriteReq); 1437 packet.dataStatic(blk.data); 1438 1439 memSidePort.sendFunctional(&packet); 1440 1441 blk.status &= ~BlkDirty; 1442 } 1443} 1444 1445void 1446BaseCache::invalidateVisitor(CacheBlk &blk) 1447{ 1448 if (blk.isDirty()) 1449 warn_once("Invalidating dirty cache lines. " \ 1450 "Expect things to break.\n"); 1451 1452 if (blk.isValid()) { 1453 assert(!blk.isDirty()); 1454 invalidateBlock(&blk); 1455 } 1456} 1457 1458Tick 1459BaseCache::nextQueueReadyTime() const 1460{ 1461 Tick nextReady = std::min(mshrQueue.nextReadyTime(), 1462 writeBuffer.nextReadyTime()); 1463 1464 // Don't signal prefetch ready time if no MSHRs available 1465 // Will signal once enoguh MSHRs are deallocated 1466 if (prefetcher && mshrQueue.canPrefetch()) { 1467 nextReady = std::min(nextReady, 1468 prefetcher->nextPrefetchReadyTime()); 1469 } 1470 1471 return nextReady; 1472} 1473 1474 1475bool 1476BaseCache::sendMSHRQueuePacket(MSHR* mshr) 1477{ 1478 assert(mshr); 1479 1480 // use request from 1st target 1481 PacketPtr tgt_pkt = mshr->getTarget()->pkt; 1482 1483 DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print()); 1484 1485 // if the cache is in write coalescing mode or (additionally) in 1486 // no allocation mode, and we have a write packet with an MSHR 1487 // that is not a whole-line write (due to incompatible flags etc), 1488 // then reset the write mode 1489 if (writeAllocator && writeAllocator->coalesce() && tgt_pkt->isWrite()) { 1490 if (!mshr->isWholeLineWrite()) { 1491 // if we are currently write coalescing, hold on the 1492 // MSHR as many cycles extra as we need to completely 1493 // write a cache line 1494 if (writeAllocator->delay(mshr->blkAddr)) { 1495 Tick delay = blkSize / tgt_pkt->getSize() * clockPeriod(); 1496 DPRINTF(CacheVerbose, "Delaying pkt %s %llu ticks to allow " 1497 "for write coalescing\n", tgt_pkt->print(), delay); 1498 mshrQueue.delay(mshr, delay); 1499 return false; 1500 } else { 1501 writeAllocator->reset(); 1502 } 1503 } else { 1504 writeAllocator->resetDelay(mshr->blkAddr); 1505 } 1506 } 1507 1508 CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure); 1509 1510 // either a prefetch that is not present upstream, or a normal 1511 // MSHR request, proceed to get the packet to send downstream 1512 PacketPtr pkt = createMissPacket(tgt_pkt, blk, mshr->needsWritable(), 1513 mshr->isWholeLineWrite()); 1514 1515 mshr->isForward = (pkt == nullptr); 1516 1517 if (mshr->isForward) { 1518 // not a cache block request, but a response is expected 1519 // make copy of current packet to forward, keep current 1520 // copy for response handling 1521 pkt = new Packet(tgt_pkt, false, true); 1522 assert(!pkt->isWrite()); 1523 } 1524 1525 // play it safe and append (rather than set) the sender state, 1526 // as forwarded packets may already have existing state 1527 pkt->pushSenderState(mshr); 1528 1529 if (pkt->isClean() && blk && blk->isDirty()) { 1530 // A cache clean opearation is looking for a dirty block. Mark 1531 // the packet so that the destination xbar can determine that 1532 // there will be a follow-up write packet as well. 1533 pkt->setSatisfied(); 1534 } 1535 1536 if (!memSidePort.sendTimingReq(pkt)) { 1537 // we are awaiting a retry, but we 1538 // delete the packet and will be creating a new packet 1539 // when we get the opportunity 1540 delete pkt; 1541 1542 // note that we have now masked any requestBus and 1543 // schedSendEvent (we will wait for a retry before 1544 // doing anything), and this is so even if we do not 1545 // care about this packet and might override it before 1546 // it gets retried 1547 return true; 1548 } else { 1549 // As part of the call to sendTimingReq the packet is 1550 // forwarded to all neighbouring caches (and any caches 1551 // above them) as a snoop. Thus at this point we know if 1552 // any of the neighbouring caches are responding, and if 1553 // so, we know it is dirty, and we can determine if it is 1554 // being passed as Modified, making our MSHR the ordering 1555 // point 1556 bool pending_modified_resp = !pkt->hasSharers() && 1557 pkt->cacheResponding(); 1558 markInService(mshr, pending_modified_resp); 1559 1560 if (pkt->isClean() && blk && blk->isDirty()) { 1561 // A cache clean opearation is looking for a dirty 1562 // block. If a dirty block is encountered a WriteClean 1563 // will update any copies to the path to the memory 1564 // until the point of reference. 1565 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n", 1566 __func__, pkt->print(), blk->print()); 1567 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), 1568 pkt->id); 1569 PacketList writebacks; 1570 writebacks.push_back(wb_pkt); 1571 doWritebacks(writebacks, 0); 1572 } 1573 1574 return false; 1575 } 1576} 1577 1578bool 1579BaseCache::sendWriteQueuePacket(WriteQueueEntry* wq_entry) 1580{ 1581 assert(wq_entry); 1582 1583 // always a single target for write queue entries 1584 PacketPtr tgt_pkt = wq_entry->getTarget()->pkt; 1585 1586 DPRINTF(Cache, "%s: write %s\n", __func__, tgt_pkt->print()); 1587 1588 // forward as is, both for evictions and uncacheable writes 1589 if (!memSidePort.sendTimingReq(tgt_pkt)) { 1590 // note that we have now masked any requestBus and 1591 // schedSendEvent (we will wait for a retry before 1592 // doing anything), and this is so even if we do not 1593 // care about this packet and might override it before 1594 // it gets retried 1595 return true; 1596 } else { 1597 markInService(wq_entry); 1598 return false; 1599 } 1600} 1601 1602void 1603BaseCache::serialize(CheckpointOut &cp) const 1604{ 1605 bool dirty(isDirty()); 1606 1607 if (dirty) { 1608 warn("*** The cache still contains dirty data. ***\n"); 1609 warn(" Make sure to drain the system using the correct flags.\n"); 1610 warn(" This checkpoint will not restore correctly " \ 1611 "and dirty data in the cache will be lost!\n"); 1612 } 1613 1614 // Since we don't checkpoint the data in the cache, any dirty data 1615 // will be lost when restoring from a checkpoint of a system that 1616 // wasn't drained properly. Flag the checkpoint as invalid if the 1617 // cache contains dirty data. 1618 bool bad_checkpoint(dirty); 1619 SERIALIZE_SCALAR(bad_checkpoint); 1620} 1621 1622void 1623BaseCache::unserialize(CheckpointIn &cp) 1624{ 1625 bool bad_checkpoint; 1626 UNSERIALIZE_SCALAR(bad_checkpoint); 1627 if (bad_checkpoint) { 1628 fatal("Restoring from checkpoints with dirty caches is not " 1629 "supported in the classic memory system. Please remove any " 1630 "caches or drain them properly before taking checkpoints.\n"); 1631 } 1632} 1633 1634void 1635BaseCache::regStats() 1636{ 1637 MemObject::regStats(); 1638 1639 using namespace Stats; 1640 1641 // Hit statistics 1642 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1643 MemCmd cmd(access_idx); 1644 const string &cstr = cmd.toString(); 1645 1646 hits[access_idx] 1647 .init(system->maxMasters()) 1648 .name(name() + "." + cstr + "_hits") 1649 .desc("number of " + cstr + " hits") 1650 .flags(total | nozero | nonan) 1651 ; 1652 for (int i = 0; i < system->maxMasters(); i++) { 1653 hits[access_idx].subname(i, system->getMasterName(i)); 1654 } 1655 } 1656 1657// These macros make it easier to sum the right subset of commands and 1658// to change the subset of commands that are considered "demand" vs 1659// "non-demand" 1660#define SUM_DEMAND(s) \ 1661 (s[MemCmd::ReadReq] + s[MemCmd::WriteReq] + s[MemCmd::WriteLineReq] + \ 1662 s[MemCmd::ReadExReq] + s[MemCmd::ReadCleanReq] + s[MemCmd::ReadSharedReq]) 1663 1664// should writebacks be included here? prior code was inconsistent... 1665#define SUM_NON_DEMAND(s) \ 1666 (s[MemCmd::SoftPFReq] + s[MemCmd::HardPFReq] + s[MemCmd::SoftPFExReq]) 1667 1668 demandHits 1669 .name(name() + ".demand_hits") 1670 .desc("number of demand (read+write) hits") 1671 .flags(total | nozero | nonan) 1672 ; 1673 demandHits = SUM_DEMAND(hits); 1674 for (int i = 0; i < system->maxMasters(); i++) { 1675 demandHits.subname(i, system->getMasterName(i)); 1676 } 1677 1678 overallHits 1679 .name(name() + ".overall_hits") 1680 .desc("number of overall hits") 1681 .flags(total | nozero | nonan) 1682 ; 1683 overallHits = demandHits + SUM_NON_DEMAND(hits); 1684 for (int i = 0; i < system->maxMasters(); i++) { 1685 overallHits.subname(i, system->getMasterName(i)); 1686 } 1687 1688 // Miss statistics 1689 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1690 MemCmd cmd(access_idx); 1691 const string &cstr = cmd.toString(); 1692 1693 misses[access_idx] 1694 .init(system->maxMasters()) 1695 .name(name() + "." + cstr + "_misses") 1696 .desc("number of " + cstr + " misses") 1697 .flags(total | nozero | nonan) 1698 ; 1699 for (int i = 0; i < system->maxMasters(); i++) { 1700 misses[access_idx].subname(i, system->getMasterName(i)); 1701 } 1702 } 1703 1704 demandMisses 1705 .name(name() + ".demand_misses") 1706 .desc("number of demand (read+write) misses") 1707 .flags(total | nozero | nonan) 1708 ; 1709 demandMisses = SUM_DEMAND(misses); 1710 for (int i = 0; i < system->maxMasters(); i++) { 1711 demandMisses.subname(i, system->getMasterName(i)); 1712 } 1713 1714 overallMisses 1715 .name(name() + ".overall_misses") 1716 .desc("number of overall misses") 1717 .flags(total | nozero | nonan) 1718 ; 1719 overallMisses = demandMisses + SUM_NON_DEMAND(misses); 1720 for (int i = 0; i < system->maxMasters(); i++) { 1721 overallMisses.subname(i, system->getMasterName(i)); 1722 } 1723 1724 // Miss latency statistics 1725 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1726 MemCmd cmd(access_idx); 1727 const string &cstr = cmd.toString(); 1728 1729 missLatency[access_idx] 1730 .init(system->maxMasters()) 1731 .name(name() + "." + cstr + "_miss_latency") 1732 .desc("number of " + cstr + " miss cycles") 1733 .flags(total | nozero | nonan) 1734 ; 1735 for (int i = 0; i < system->maxMasters(); i++) { 1736 missLatency[access_idx].subname(i, system->getMasterName(i)); 1737 } 1738 } 1739 1740 demandMissLatency 1741 .name(name() + ".demand_miss_latency") 1742 .desc("number of demand (read+write) miss cycles") 1743 .flags(total | nozero | nonan) 1744 ; 1745 demandMissLatency = SUM_DEMAND(missLatency); 1746 for (int i = 0; i < system->maxMasters(); i++) { 1747 demandMissLatency.subname(i, system->getMasterName(i)); 1748 } 1749 1750 overallMissLatency 1751 .name(name() + ".overall_miss_latency") 1752 .desc("number of overall miss cycles") 1753 .flags(total | nozero | nonan) 1754 ; 1755 overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency); 1756 for (int i = 0; i < system->maxMasters(); i++) { 1757 overallMissLatency.subname(i, system->getMasterName(i)); 1758 } 1759 1760 // access formulas 1761 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1762 MemCmd cmd(access_idx); 1763 const string &cstr = cmd.toString(); 1764 1765 accesses[access_idx] 1766 .name(name() + "." + cstr + "_accesses") 1767 .desc("number of " + cstr + " accesses(hits+misses)") 1768 .flags(total | nozero | nonan) 1769 ; 1770 accesses[access_idx] = hits[access_idx] + misses[access_idx]; 1771 1772 for (int i = 0; i < system->maxMasters(); i++) { 1773 accesses[access_idx].subname(i, system->getMasterName(i)); 1774 } 1775 } 1776 1777 demandAccesses 1778 .name(name() + ".demand_accesses") 1779 .desc("number of demand (read+write) accesses") 1780 .flags(total | nozero | nonan) 1781 ; 1782 demandAccesses = demandHits + demandMisses; 1783 for (int i = 0; i < system->maxMasters(); i++) { 1784 demandAccesses.subname(i, system->getMasterName(i)); 1785 } 1786 1787 overallAccesses 1788 .name(name() + ".overall_accesses") 1789 .desc("number of overall (read+write) accesses") 1790 .flags(total | nozero | nonan) 1791 ; 1792 overallAccesses = overallHits + overallMisses; 1793 for (int i = 0; i < system->maxMasters(); i++) { 1794 overallAccesses.subname(i, system->getMasterName(i)); 1795 } 1796 1797 // miss rate formulas 1798 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1799 MemCmd cmd(access_idx); 1800 const string &cstr = cmd.toString(); 1801 1802 missRate[access_idx] 1803 .name(name() + "." + cstr + "_miss_rate") 1804 .desc("miss rate for " + cstr + " accesses") 1805 .flags(total | nozero | nonan) 1806 ; 1807 missRate[access_idx] = misses[access_idx] / accesses[access_idx]; 1808 1809 for (int i = 0; i < system->maxMasters(); i++) { 1810 missRate[access_idx].subname(i, system->getMasterName(i)); 1811 } 1812 } 1813 1814 demandMissRate 1815 .name(name() + ".demand_miss_rate") 1816 .desc("miss rate for demand accesses") 1817 .flags(total | nozero | nonan) 1818 ; 1819 demandMissRate = demandMisses / demandAccesses; 1820 for (int i = 0; i < system->maxMasters(); i++) { 1821 demandMissRate.subname(i, system->getMasterName(i)); 1822 } 1823 1824 overallMissRate 1825 .name(name() + ".overall_miss_rate") 1826 .desc("miss rate for overall accesses") 1827 .flags(total | nozero | nonan) 1828 ; 1829 overallMissRate = overallMisses / overallAccesses; 1830 for (int i = 0; i < system->maxMasters(); i++) { 1831 overallMissRate.subname(i, system->getMasterName(i)); 1832 } 1833 1834 // miss latency formulas 1835 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1836 MemCmd cmd(access_idx); 1837 const string &cstr = cmd.toString(); 1838 1839 avgMissLatency[access_idx] 1840 .name(name() + "." + cstr + "_avg_miss_latency") 1841 .desc("average " + cstr + " miss latency") 1842 .flags(total | nozero | nonan) 1843 ; 1844 avgMissLatency[access_idx] = 1845 missLatency[access_idx] / misses[access_idx]; 1846 1847 for (int i = 0; i < system->maxMasters(); i++) { 1848 avgMissLatency[access_idx].subname(i, system->getMasterName(i)); 1849 } 1850 } 1851 1852 demandAvgMissLatency 1853 .name(name() + ".demand_avg_miss_latency") 1854 .desc("average overall miss latency") 1855 .flags(total | nozero | nonan) 1856 ; 1857 demandAvgMissLatency = demandMissLatency / demandMisses; 1858 for (int i = 0; i < system->maxMasters(); i++) { 1859 demandAvgMissLatency.subname(i, system->getMasterName(i)); 1860 } 1861 1862 overallAvgMissLatency 1863 .name(name() + ".overall_avg_miss_latency") 1864 .desc("average overall miss latency") 1865 .flags(total | nozero | nonan) 1866 ; 1867 overallAvgMissLatency = overallMissLatency / overallMisses; 1868 for (int i = 0; i < system->maxMasters(); i++) { 1869 overallAvgMissLatency.subname(i, system->getMasterName(i)); 1870 } 1871 1872 blocked_cycles.init(NUM_BLOCKED_CAUSES); 1873 blocked_cycles 1874 .name(name() + ".blocked_cycles") 1875 .desc("number of cycles access was blocked") 1876 .subname(Blocked_NoMSHRs, "no_mshrs") 1877 .subname(Blocked_NoTargets, "no_targets") 1878 ; 1879 1880 1881 blocked_causes.init(NUM_BLOCKED_CAUSES); 1882 blocked_causes 1883 .name(name() + ".blocked") 1884 .desc("number of cycles access was blocked") 1885 .subname(Blocked_NoMSHRs, "no_mshrs") 1886 .subname(Blocked_NoTargets, "no_targets") 1887 ; 1888 1889 avg_blocked 1890 .name(name() + ".avg_blocked_cycles") 1891 .desc("average number of cycles each access was blocked") 1892 .subname(Blocked_NoMSHRs, "no_mshrs") 1893 .subname(Blocked_NoTargets, "no_targets") 1894 ; 1895 1896 avg_blocked = blocked_cycles / blocked_causes; 1897 1898 unusedPrefetches 1899 .name(name() + ".unused_prefetches") 1900 .desc("number of HardPF blocks evicted w/o reference") 1901 .flags(nozero) 1902 ; 1903 1904 writebacks 1905 .init(system->maxMasters()) 1906 .name(name() + ".writebacks") 1907 .desc("number of writebacks") 1908 .flags(total | nozero | nonan) 1909 ; 1910 for (int i = 0; i < system->maxMasters(); i++) { 1911 writebacks.subname(i, system->getMasterName(i)); 1912 } 1913 1914 // MSHR statistics 1915 // MSHR hit statistics 1916 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1917 MemCmd cmd(access_idx); 1918 const string &cstr = cmd.toString(); 1919 1920 mshr_hits[access_idx] 1921 .init(system->maxMasters()) 1922 .name(name() + "." + cstr + "_mshr_hits") 1923 .desc("number of " + cstr + " MSHR hits") 1924 .flags(total | nozero | nonan) 1925 ; 1926 for (int i = 0; i < system->maxMasters(); i++) { 1927 mshr_hits[access_idx].subname(i, system->getMasterName(i)); 1928 } 1929 } 1930 1931 demandMshrHits 1932 .name(name() + ".demand_mshr_hits") 1933 .desc("number of demand (read+write) MSHR hits") 1934 .flags(total | nozero | nonan) 1935 ; 1936 demandMshrHits = SUM_DEMAND(mshr_hits); 1937 for (int i = 0; i < system->maxMasters(); i++) { 1938 demandMshrHits.subname(i, system->getMasterName(i)); 1939 } 1940 1941 overallMshrHits 1942 .name(name() + ".overall_mshr_hits") 1943 .desc("number of overall MSHR hits") 1944 .flags(total | nozero | nonan) 1945 ; 1946 overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshr_hits); 1947 for (int i = 0; i < system->maxMasters(); i++) { 1948 overallMshrHits.subname(i, system->getMasterName(i)); 1949 } 1950 1951 // MSHR miss statistics 1952 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1953 MemCmd cmd(access_idx); 1954 const string &cstr = cmd.toString(); 1955 1956 mshr_misses[access_idx] 1957 .init(system->maxMasters()) 1958 .name(name() + "." + cstr + "_mshr_misses") 1959 .desc("number of " + cstr + " MSHR misses") 1960 .flags(total | nozero | nonan) 1961 ; 1962 for (int i = 0; i < system->maxMasters(); i++) { 1963 mshr_misses[access_idx].subname(i, system->getMasterName(i)); 1964 } 1965 } 1966 1967 demandMshrMisses 1968 .name(name() + ".demand_mshr_misses") 1969 .desc("number of demand (read+write) MSHR misses") 1970 .flags(total | nozero | nonan) 1971 ; 1972 demandMshrMisses = SUM_DEMAND(mshr_misses); 1973 for (int i = 0; i < system->maxMasters(); i++) { 1974 demandMshrMisses.subname(i, system->getMasterName(i)); 1975 } 1976 1977 overallMshrMisses 1978 .name(name() + ".overall_mshr_misses") 1979 .desc("number of overall MSHR misses") 1980 .flags(total | nozero | nonan) 1981 ; 1982 overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshr_misses); 1983 for (int i = 0; i < system->maxMasters(); i++) { 1984 overallMshrMisses.subname(i, system->getMasterName(i)); 1985 } 1986 1987 // MSHR miss latency statistics 1988 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1989 MemCmd cmd(access_idx); 1990 const string &cstr = cmd.toString(); 1991 1992 mshr_miss_latency[access_idx] 1993 .init(system->maxMasters()) 1994 .name(name() + "." + cstr + "_mshr_miss_latency") 1995 .desc("number of " + cstr + " MSHR miss cycles") 1996 .flags(total | nozero | nonan) 1997 ; 1998 for (int i = 0; i < system->maxMasters(); i++) { 1999 mshr_miss_latency[access_idx].subname(i, system->getMasterName(i)); 2000 } 2001 } 2002 2003 demandMshrMissLatency 2004 .name(name() + ".demand_mshr_miss_latency") 2005 .desc("number of demand (read+write) MSHR miss cycles") 2006 .flags(total | nozero | nonan) 2007 ; 2008 demandMshrMissLatency = SUM_DEMAND(mshr_miss_latency); 2009 for (int i = 0; i < system->maxMasters(); i++) { 2010 demandMshrMissLatency.subname(i, system->getMasterName(i)); 2011 } 2012 2013 overallMshrMissLatency 2014 .name(name() + ".overall_mshr_miss_latency") 2015 .desc("number of overall MSHR miss cycles") 2016 .flags(total | nozero | nonan) 2017 ; 2018 overallMshrMissLatency = 2019 demandMshrMissLatency + SUM_NON_DEMAND(mshr_miss_latency); 2020 for (int i = 0; i < system->maxMasters(); i++) { 2021 overallMshrMissLatency.subname(i, system->getMasterName(i)); 2022 } 2023 2024 // MSHR uncacheable statistics 2025 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2026 MemCmd cmd(access_idx); 2027 const string &cstr = cmd.toString(); 2028 2029 mshr_uncacheable[access_idx] 2030 .init(system->maxMasters()) 2031 .name(name() + "." + cstr + "_mshr_uncacheable") 2032 .desc("number of " + cstr + " MSHR uncacheable") 2033 .flags(total | nozero | nonan) 2034 ; 2035 for (int i = 0; i < system->maxMasters(); i++) { 2036 mshr_uncacheable[access_idx].subname(i, system->getMasterName(i)); 2037 } 2038 } 2039 2040 overallMshrUncacheable 2041 .name(name() + ".overall_mshr_uncacheable_misses") 2042 .desc("number of overall MSHR uncacheable misses") 2043 .flags(total | nozero | nonan) 2044 ; 2045 overallMshrUncacheable = 2046 SUM_DEMAND(mshr_uncacheable) + SUM_NON_DEMAND(mshr_uncacheable); 2047 for (int i = 0; i < system->maxMasters(); i++) { 2048 overallMshrUncacheable.subname(i, system->getMasterName(i)); 2049 } 2050 2051 // MSHR miss latency statistics 2052 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2053 MemCmd cmd(access_idx); 2054 const string &cstr = cmd.toString(); 2055 2056 mshr_uncacheable_lat[access_idx] 2057 .init(system->maxMasters()) 2058 .name(name() + "." + cstr + "_mshr_uncacheable_latency") 2059 .desc("number of " + cstr + " MSHR uncacheable cycles") 2060 .flags(total | nozero | nonan) 2061 ; 2062 for (int i = 0; i < system->maxMasters(); i++) { 2063 mshr_uncacheable_lat[access_idx].subname( 2064 i, system->getMasterName(i)); 2065 } 2066 } 2067 2068 overallMshrUncacheableLatency 2069 .name(name() + ".overall_mshr_uncacheable_latency") 2070 .desc("number of overall MSHR uncacheable cycles") 2071 .flags(total | nozero | nonan) 2072 ; 2073 overallMshrUncacheableLatency = 2074 SUM_DEMAND(mshr_uncacheable_lat) + 2075 SUM_NON_DEMAND(mshr_uncacheable_lat); 2076 for (int i = 0; i < system->maxMasters(); i++) { 2077 overallMshrUncacheableLatency.subname(i, system->getMasterName(i)); 2078 } 2079 2080#if 0 2081 // MSHR access formulas 2082 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2083 MemCmd cmd(access_idx); 2084 const string &cstr = cmd.toString(); 2085 2086 mshrAccesses[access_idx] 2087 .name(name() + "." + cstr + "_mshr_accesses") 2088 .desc("number of " + cstr + " mshr accesses(hits+misses)") 2089 .flags(total | nozero | nonan) 2090 ; 2091 mshrAccesses[access_idx] = 2092 mshr_hits[access_idx] + mshr_misses[access_idx] 2093 + mshr_uncacheable[access_idx]; 2094 } 2095 2096 demandMshrAccesses 2097 .name(name() + ".demand_mshr_accesses") 2098 .desc("number of demand (read+write) mshr accesses") 2099 .flags(total | nozero | nonan) 2100 ; 2101 demandMshrAccesses = demandMshrHits + demandMshrMisses; 2102 2103 overallMshrAccesses 2104 .name(name() + ".overall_mshr_accesses") 2105 .desc("number of overall (read+write) mshr accesses") 2106 .flags(total | nozero | nonan) 2107 ; 2108 overallMshrAccesses = overallMshrHits + overallMshrMisses 2109 + overallMshrUncacheable; 2110#endif 2111 2112 // MSHR miss rate formulas 2113 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2114 MemCmd cmd(access_idx); 2115 const string &cstr = cmd.toString(); 2116 2117 mshrMissRate[access_idx] 2118 .name(name() + "." + cstr + "_mshr_miss_rate") 2119 .desc("mshr miss rate for " + cstr + " accesses") 2120 .flags(total | nozero | nonan) 2121 ; 2122 mshrMissRate[access_idx] = 2123 mshr_misses[access_idx] / accesses[access_idx]; 2124 2125 for (int i = 0; i < system->maxMasters(); i++) { 2126 mshrMissRate[access_idx].subname(i, system->getMasterName(i)); 2127 } 2128 } 2129 2130 demandMshrMissRate 2131 .name(name() + ".demand_mshr_miss_rate") 2132 .desc("mshr miss rate for demand accesses") 2133 .flags(total | nozero | nonan) 2134 ; 2135 demandMshrMissRate = demandMshrMisses / demandAccesses; 2136 for (int i = 0; i < system->maxMasters(); i++) { 2137 demandMshrMissRate.subname(i, system->getMasterName(i)); 2138 } 2139 2140 overallMshrMissRate 2141 .name(name() + ".overall_mshr_miss_rate") 2142 .desc("mshr miss rate for overall accesses") 2143 .flags(total | nozero | nonan) 2144 ; 2145 overallMshrMissRate = overallMshrMisses / overallAccesses; 2146 for (int i = 0; i < system->maxMasters(); i++) { 2147 overallMshrMissRate.subname(i, system->getMasterName(i)); 2148 } 2149 2150 // mshrMiss latency formulas 2151 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2152 MemCmd cmd(access_idx); 2153 const string &cstr = cmd.toString(); 2154 2155 avgMshrMissLatency[access_idx] 2156 .name(name() + "." + cstr + "_avg_mshr_miss_latency") 2157 .desc("average " + cstr + " mshr miss latency") 2158 .flags(total | nozero | nonan) 2159 ; 2160 avgMshrMissLatency[access_idx] = 2161 mshr_miss_latency[access_idx] / mshr_misses[access_idx]; 2162 2163 for (int i = 0; i < system->maxMasters(); i++) { 2164 avgMshrMissLatency[access_idx].subname( 2165 i, system->getMasterName(i)); 2166 } 2167 } 2168 2169 demandAvgMshrMissLatency 2170 .name(name() + ".demand_avg_mshr_miss_latency") 2171 .desc("average overall mshr miss latency") 2172 .flags(total | nozero | nonan) 2173 ; 2174 demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses; 2175 for (int i = 0; i < system->maxMasters(); i++) { 2176 demandAvgMshrMissLatency.subname(i, system->getMasterName(i)); 2177 } 2178 2179 overallAvgMshrMissLatency 2180 .name(name() + ".overall_avg_mshr_miss_latency") 2181 .desc("average overall mshr miss latency") 2182 .flags(total | nozero | nonan) 2183 ; 2184 overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses; 2185 for (int i = 0; i < system->maxMasters(); i++) { 2186 overallAvgMshrMissLatency.subname(i, system->getMasterName(i)); 2187 } 2188 2189 // mshrUncacheable latency formulas 2190 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2191 MemCmd cmd(access_idx); 2192 const string &cstr = cmd.toString(); 2193 2194 avgMshrUncacheableLatency[access_idx] 2195 .name(name() + "." + cstr + "_avg_mshr_uncacheable_latency") 2196 .desc("average " + cstr + " mshr uncacheable latency") 2197 .flags(total | nozero | nonan) 2198 ; 2199 avgMshrUncacheableLatency[access_idx] = 2200 mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx]; 2201 2202 for (int i = 0; i < system->maxMasters(); i++) { 2203 avgMshrUncacheableLatency[access_idx].subname( 2204 i, system->getMasterName(i)); 2205 } 2206 } 2207 2208 overallAvgMshrUncacheableLatency 2209 .name(name() + ".overall_avg_mshr_uncacheable_latency") 2210 .desc("average overall mshr uncacheable latency") 2211 .flags(total | nozero | nonan) 2212 ; 2213 overallAvgMshrUncacheableLatency = 2214 overallMshrUncacheableLatency / overallMshrUncacheable; 2215 for (int i = 0; i < system->maxMasters(); i++) { 2216 overallAvgMshrUncacheableLatency.subname(i, system->getMasterName(i)); 2217 } 2218 2219 replacements 2220 .name(name() + ".replacements") 2221 .desc("number of replacements") 2222 ; 2223} 2224 2225/////////////// 2226// 2227// CpuSidePort 2228// 2229/////////////// 2230bool 2231BaseCache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt) 2232{ 2233 // Snoops shouldn't happen when bypassing caches 2234 assert(!cache->system->bypassCaches()); 2235 2236 assert(pkt->isResponse()); 2237 2238 // Express snoop responses from master to slave, e.g., from L1 to L2 2239 cache->recvTimingSnoopResp(pkt); 2240 return true; 2241} 2242 2243 2244bool 2245BaseCache::CpuSidePort::tryTiming(PacketPtr pkt) 2246{ 2247 if (cache->system->bypassCaches() || pkt->isExpressSnoop()) { 2248 // always let express snoop packets through even if blocked 2249 return true; 2250 } else if (blocked || mustSendRetry) { 2251 // either already committed to send a retry, or blocked 2252 mustSendRetry = true; 2253 return false; 2254 } 2255 mustSendRetry = false; 2256 return true; 2257} 2258 2259bool 2260BaseCache::CpuSidePort::recvTimingReq(PacketPtr pkt) 2261{ 2262 assert(pkt->isRequest()); 2263 2264 if (cache->system->bypassCaches()) { 2265 // Just forward the packet if caches are disabled. 2266 // @todo This should really enqueue the packet rather 2267 bool M5_VAR_USED success = cache->memSidePort.sendTimingReq(pkt); 2268 assert(success); 2269 return true; 2270 } else if (tryTiming(pkt)) { 2271 cache->recvTimingReq(pkt); 2272 return true; 2273 } 2274 return false; 2275} 2276 2277Tick 2278BaseCache::CpuSidePort::recvAtomic(PacketPtr pkt) 2279{ 2280 if (cache->system->bypassCaches()) { 2281 // Forward the request if the system is in cache bypass mode. 2282 return cache->memSidePort.sendAtomic(pkt); 2283 } else { 2284 return cache->recvAtomic(pkt); 2285 } 2286} 2287 2288void 2289BaseCache::CpuSidePort::recvFunctional(PacketPtr pkt) 2290{ 2291 if (cache->system->bypassCaches()) { 2292 // The cache should be flushed if we are in cache bypass mode, 2293 // so we don't need to check if we need to update anything. 2294 cache->memSidePort.sendFunctional(pkt); 2295 return; 2296 } 2297 2298 // functional request 2299 cache->functionalAccess(pkt, true); 2300} 2301 2302AddrRangeList 2303BaseCache::CpuSidePort::getAddrRanges() const 2304{ 2305 return cache->getAddrRanges(); 2306} 2307 2308 2309BaseCache:: 2310CpuSidePort::CpuSidePort(const std::string &_name, BaseCache *_cache, 2311 const std::string &_label) 2312 : CacheSlavePort(_name, _cache, _label), cache(_cache) 2313{ 2314} 2315 2316/////////////// 2317// 2318// MemSidePort 2319// 2320/////////////// 2321bool 2322BaseCache::MemSidePort::recvTimingResp(PacketPtr pkt) 2323{ 2324 cache->recvTimingResp(pkt); 2325 return true; 2326} 2327 2328// Express snooping requests to memside port 2329void 2330BaseCache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt) 2331{ 2332 // Snoops shouldn't happen when bypassing caches 2333 assert(!cache->system->bypassCaches()); 2334 2335 // handle snooping requests 2336 cache->recvTimingSnoopReq(pkt); 2337} 2338 2339Tick 2340BaseCache::MemSidePort::recvAtomicSnoop(PacketPtr pkt) 2341{ 2342 // Snoops shouldn't happen when bypassing caches 2343 assert(!cache->system->bypassCaches()); 2344 2345 return cache->recvAtomicSnoop(pkt); 2346} 2347 2348void 2349BaseCache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt) 2350{ 2351 // Snoops shouldn't happen when bypassing caches 2352 assert(!cache->system->bypassCaches()); 2353 2354 // functional snoop (note that in contrast to atomic we don't have 2355 // a specific functionalSnoop method, as they have the same 2356 // behaviour regardless) 2357 cache->functionalAccess(pkt, false); 2358} 2359 2360void 2361BaseCache::CacheReqPacketQueue::sendDeferredPacket() 2362{ 2363 // sanity check 2364 assert(!waitingOnRetry); 2365 2366 // there should never be any deferred request packets in the 2367 // queue, instead we resly on the cache to provide the packets 2368 // from the MSHR queue or write queue 2369 assert(deferredPacketReadyTime() == MaxTick); 2370 2371 // check for request packets (requests & writebacks) 2372 QueueEntry* entry = cache.getNextQueueEntry(); 2373 2374 if (!entry) { 2375 // can happen if e.g. we attempt a writeback and fail, but 2376 // before the retry, the writeback is eliminated because 2377 // we snoop another cache's ReadEx. 2378 } else { 2379 // let our snoop responses go first if there are responses to 2380 // the same addresses 2381 if (checkConflictingSnoop(entry->blkAddr)) { 2382 return; 2383 } 2384 waitingOnRetry = entry->sendPacket(cache); 2385 } 2386 2387 // if we succeeded and are not waiting for a retry, schedule the 2388 // next send considering when the next queue is ready, note that 2389 // snoop responses have their own packet queue and thus schedule 2390 // their own events 2391 if (!waitingOnRetry) { 2392 schedSendEvent(cache.nextQueueReadyTime()); 2393 } 2394} 2395 2396BaseCache::MemSidePort::MemSidePort(const std::string &_name, 2397 BaseCache *_cache, 2398 const std::string &_label) 2399 : CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue), 2400 _reqQueue(*_cache, *this, _snoopRespQueue, _label), 2401 _snoopRespQueue(*_cache, *this, _label), cache(_cache) 2402{ 2403} 2404 2405void 2406WriteAllocator::updateMode(Addr write_addr, unsigned write_size, 2407 Addr blk_addr) 2408{ 2409 // check if we are continuing where the last write ended 2410 if (nextAddr == write_addr) { 2411 delayCtr[blk_addr] = delayThreshold; 2412 // stop if we have already saturated 2413 if (mode != WriteMode::NO_ALLOCATE) { 2414 byteCount += write_size; 2415 // switch to streaming mode if we have passed the lower 2416 // threshold 2417 if (mode == WriteMode::ALLOCATE && 2418 byteCount > coalesceLimit) { 2419 mode = WriteMode::COALESCE; 2420 DPRINTF(Cache, "Switched to write coalescing\n"); 2421 } else if (mode == WriteMode::COALESCE && 2422 byteCount > noAllocateLimit) { 2423 // and continue and switch to non-allocating mode if we 2424 // pass the upper threshold 2425 mode = WriteMode::NO_ALLOCATE; 2426 DPRINTF(Cache, "Switched to write-no-allocate\n"); 2427 } 2428 } 2429 } else { 2430 // we did not see a write matching the previous one, start 2431 // over again 2432 byteCount = write_size; 2433 mode = WriteMode::ALLOCATE; 2434 resetDelay(blk_addr); 2435 } 2436 nextAddr = write_addr + write_size; 2437} 2438 2439WriteAllocator* 2440WriteAllocatorParams::create() 2441{ 2442 return new WriteAllocator(this); 2443} 2444