base.cc revision 13717
1/* 2 * Copyright (c) 2012-2013, 2018 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2003-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Erik Hallnor 41 * Nikos Nikoleris 42 */ 43 44/** 45 * @file 46 * Definition of BaseCache functions. 47 */ 48 49#include "mem/cache/base.hh" 50 51#include "base/compiler.hh" 52#include "base/logging.hh" 53#include "debug/Cache.hh" 54#include "debug/CachePort.hh" 55#include "debug/CacheRepl.hh" 56#include "debug/CacheVerbose.hh" 57#include "mem/cache/mshr.hh" 58#include "mem/cache/prefetch/base.hh" 59#include "mem/cache/queue_entry.hh" 60#include "params/BaseCache.hh" 61#include "params/WriteAllocator.hh" 62#include "sim/core.hh" 63 64class BaseMasterPort; 65class BaseSlavePort; 66 67using namespace std; 68 69BaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name, 70 BaseCache *_cache, 71 const std::string &_label) 72 : QueuedSlavePort(_name, _cache, queue), 73 queue(*_cache, *this, true, _label), 74 blocked(false), mustSendRetry(false), 75 sendRetryEvent([this]{ processSendRetry(); }, _name) 76{ 77} 78 79BaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size) 80 : MemObject(p), 81 cpuSidePort (p->name + ".cpu_side", this, "CpuSidePort"), 82 memSidePort(p->name + ".mem_side", this, "MemSidePort"), 83 mshrQueue("MSHRs", p->mshrs, 0, p->demand_mshr_reserve), // see below 84 writeBuffer("write buffer", p->write_buffers, p->mshrs), // see below 85 tags(p->tags), 86 prefetcher(p->prefetcher), 87 writeAllocator(p->write_allocator), 88 writebackClean(p->writeback_clean), 89 tempBlockWriteback(nullptr), 90 writebackTempBlockAtomicEvent([this]{ writebackTempBlockAtomic(); }, 91 name(), false, 92 EventBase::Delayed_Writeback_Pri), 93 blkSize(blk_size), 94 lookupLatency(p->tag_latency), 95 dataLatency(p->data_latency), 96 forwardLatency(p->tag_latency), 97 fillLatency(p->data_latency), 98 responseLatency(p->response_latency), 99 sequentialAccess(p->sequential_access), 100 numTarget(p->tgts_per_mshr), 101 forwardSnoops(true), 102 clusivity(p->clusivity), 103 isReadOnly(p->is_read_only), 104 blocked(0), 105 order(0), 106 noTargetMSHR(nullptr), 107 missCount(p->max_miss_count), 108 addrRanges(p->addr_ranges.begin(), p->addr_ranges.end()), 109 system(p->system) 110{ 111 // the MSHR queue has no reserve entries as we check the MSHR 112 // queue on every single allocation, whereas the write queue has 113 // as many reserve entries as we have MSHRs, since every MSHR may 114 // eventually require a writeback, and we do not check the write 115 // buffer before committing to an MSHR 116 117 // forward snoops is overridden in init() once we can query 118 // whether the connected master is actually snooping or not 119 120 tempBlock = new TempCacheBlk(blkSize); 121 122 tags->tagsInit(); 123 if (prefetcher) 124 prefetcher->setCache(this); 125} 126 127BaseCache::~BaseCache() 128{ 129 delete tempBlock; 130} 131 132void 133BaseCache::CacheSlavePort::setBlocked() 134{ 135 assert(!blocked); 136 DPRINTF(CachePort, "Port is blocking new requests\n"); 137 blocked = true; 138 // if we already scheduled a retry in this cycle, but it has not yet 139 // happened, cancel it 140 if (sendRetryEvent.scheduled()) { 141 owner.deschedule(sendRetryEvent); 142 DPRINTF(CachePort, "Port descheduled retry\n"); 143 mustSendRetry = true; 144 } 145} 146 147void 148BaseCache::CacheSlavePort::clearBlocked() 149{ 150 assert(blocked); 151 DPRINTF(CachePort, "Port is accepting new requests\n"); 152 blocked = false; 153 if (mustSendRetry) { 154 // @TODO: need to find a better time (next cycle?) 155 owner.schedule(sendRetryEvent, curTick() + 1); 156 } 157} 158 159void 160BaseCache::CacheSlavePort::processSendRetry() 161{ 162 DPRINTF(CachePort, "Port is sending retry\n"); 163 164 // reset the flag and call retry 165 mustSendRetry = false; 166 sendRetryReq(); 167} 168 169Addr 170BaseCache::regenerateBlkAddr(CacheBlk* blk) 171{ 172 if (blk != tempBlock) { 173 return tags->regenerateBlkAddr(blk); 174 } else { 175 return tempBlock->getAddr(); 176 } 177} 178 179void 180BaseCache::init() 181{ 182 if (!cpuSidePort.isConnected() || !memSidePort.isConnected()) 183 fatal("Cache ports on %s are not connected\n", name()); 184 cpuSidePort.sendRangeChange(); 185 forwardSnoops = cpuSidePort.isSnooping(); 186} 187 188BaseMasterPort & 189BaseCache::getMasterPort(const std::string &if_name, PortID idx) 190{ 191 if (if_name == "mem_side") { 192 return memSidePort; 193 } else { 194 return MemObject::getMasterPort(if_name, idx); 195 } 196} 197 198BaseSlavePort & 199BaseCache::getSlavePort(const std::string &if_name, PortID idx) 200{ 201 if (if_name == "cpu_side") { 202 return cpuSidePort; 203 } else { 204 return MemObject::getSlavePort(if_name, idx); 205 } 206} 207 208bool 209BaseCache::inRange(Addr addr) const 210{ 211 for (const auto& r : addrRanges) { 212 if (r.contains(addr)) { 213 return true; 214 } 215 } 216 return false; 217} 218 219void 220BaseCache::handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time) 221{ 222 if (pkt->needsResponse()) { 223 pkt->makeTimingResponse(); 224 // @todo: Make someone pay for this 225 pkt->headerDelay = pkt->payloadDelay = 0; 226 227 // In this case we are considering request_time that takes 228 // into account the delay of the xbar, if any, and just 229 // lat, neglecting responseLatency, modelling hit latency 230 // just as the value of lat overriden by access(), which calls 231 // the calculateAccessLatency() function. 232 cpuSidePort.schedTimingResp(pkt, request_time); 233 } else { 234 DPRINTF(Cache, "%s satisfied %s, no response needed\n", __func__, 235 pkt->print()); 236 237 // queue the packet for deletion, as the sending cache is 238 // still relying on it; if the block is found in access(), 239 // CleanEvict and Writeback messages will be deleted 240 // here as well 241 pendingDelete.reset(pkt); 242 } 243} 244 245void 246BaseCache::handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk, 247 Tick forward_time, Tick request_time) 248{ 249 if (writeAllocator && 250 pkt && pkt->isWrite() && !pkt->req->isUncacheable()) { 251 writeAllocator->updateMode(pkt->getAddr(), pkt->getSize(), 252 pkt->getBlockAddr(blkSize)); 253 } 254 255 if (mshr) { 256 /// MSHR hit 257 /// @note writebacks will be checked in getNextMSHR() 258 /// for any conflicting requests to the same block 259 260 //@todo remove hw_pf here 261 262 // Coalesce unless it was a software prefetch (see above). 263 if (pkt) { 264 assert(!pkt->isWriteback()); 265 // CleanEvicts corresponding to blocks which have 266 // outstanding requests in MSHRs are simply sunk here 267 if (pkt->cmd == MemCmd::CleanEvict) { 268 pendingDelete.reset(pkt); 269 } else if (pkt->cmd == MemCmd::WriteClean) { 270 // A WriteClean should never coalesce with any 271 // outstanding cache maintenance requests. 272 273 // We use forward_time here because there is an 274 // uncached memory write, forwarded to WriteBuffer. 275 allocateWriteBuffer(pkt, forward_time); 276 } else { 277 DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__, 278 pkt->print()); 279 280 assert(pkt->req->masterId() < system->maxMasters()); 281 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++; 282 283 // We use forward_time here because it is the same 284 // considering new targets. We have multiple 285 // requests for the same address here. It 286 // specifies the latency to allocate an internal 287 // buffer and to schedule an event to the queued 288 // port and also takes into account the additional 289 // delay of the xbar. 290 mshr->allocateTarget(pkt, forward_time, order++, 291 allocOnFill(pkt->cmd)); 292 if (mshr->getNumTargets() == numTarget) { 293 noTargetMSHR = mshr; 294 setBlocked(Blocked_NoTargets); 295 // need to be careful with this... if this mshr isn't 296 // ready yet (i.e. time > curTick()), we don't want to 297 // move it ahead of mshrs that are ready 298 // mshrQueue.moveToFront(mshr); 299 } 300 } 301 } 302 } else { 303 // no MSHR 304 assert(pkt->req->masterId() < system->maxMasters()); 305 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 306 307 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean) { 308 // We use forward_time here because there is an 309 // writeback or writeclean, forwarded to WriteBuffer. 310 allocateWriteBuffer(pkt, forward_time); 311 } else { 312 if (blk && blk->isValid()) { 313 // If we have a write miss to a valid block, we 314 // need to mark the block non-readable. Otherwise 315 // if we allow reads while there's an outstanding 316 // write miss, the read could return stale data 317 // out of the cache block... a more aggressive 318 // system could detect the overlap (if any) and 319 // forward data out of the MSHRs, but we don't do 320 // that yet. Note that we do need to leave the 321 // block valid so that it stays in the cache, in 322 // case we get an upgrade response (and hence no 323 // new data) when the write miss completes. 324 // As long as CPUs do proper store/load forwarding 325 // internally, and have a sufficiently weak memory 326 // model, this is probably unnecessary, but at some 327 // point it must have seemed like we needed it... 328 assert((pkt->needsWritable() && !blk->isWritable()) || 329 pkt->req->isCacheMaintenance()); 330 blk->status &= ~BlkReadable; 331 } 332 // Here we are using forward_time, modelling the latency of 333 // a miss (outbound) just as forwardLatency, neglecting the 334 // lookupLatency component. 335 allocateMissBuffer(pkt, forward_time); 336 } 337 } 338} 339 340void 341BaseCache::recvTimingReq(PacketPtr pkt) 342{ 343 // anything that is merely forwarded pays for the forward latency and 344 // the delay provided by the crossbar 345 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 346 347 Cycles lat; 348 CacheBlk *blk = nullptr; 349 bool satisfied = false; 350 { 351 PacketList writebacks; 352 // Note that lat is passed by reference here. The function 353 // access() will set the lat value. 354 satisfied = access(pkt, blk, lat, writebacks); 355 356 // copy writebacks to write buffer here to ensure they logically 357 // precede anything happening below 358 doWritebacks(writebacks, forward_time); 359 } 360 361 // Here we charge the headerDelay that takes into account the latencies 362 // of the bus, if the packet comes from it. 363 // The latency charged is just the value set by the access() function. 364 // In case of a hit we are neglecting response latency. 365 // In case of a miss we are neglecting forward latency. 366 Tick request_time = clockEdge(lat) + pkt->headerDelay; 367 // Here we reset the timing of the packet. 368 pkt->headerDelay = pkt->payloadDelay = 0; 369 370 if (satisfied) { 371 // notify before anything else as later handleTimingReqHit might turn 372 // the packet in a response 373 ppHit->notify(pkt); 374 375 if (prefetcher && blk && blk->wasPrefetched()) { 376 blk->status &= ~BlkHWPrefetched; 377 } 378 379 handleTimingReqHit(pkt, blk, request_time); 380 } else { 381 handleTimingReqMiss(pkt, blk, forward_time, request_time); 382 383 ppMiss->notify(pkt); 384 } 385 386 if (prefetcher) { 387 // track time of availability of next prefetch, if any 388 Tick next_pf_time = prefetcher->nextPrefetchReadyTime(); 389 if (next_pf_time != MaxTick) { 390 schedMemSideSendEvent(next_pf_time); 391 } 392 } 393} 394 395void 396BaseCache::handleUncacheableWriteResp(PacketPtr pkt) 397{ 398 Tick completion_time = clockEdge(responseLatency) + 399 pkt->headerDelay + pkt->payloadDelay; 400 401 // Reset the bus additional time as it is now accounted for 402 pkt->headerDelay = pkt->payloadDelay = 0; 403 404 cpuSidePort.schedTimingResp(pkt, completion_time); 405} 406 407void 408BaseCache::recvTimingResp(PacketPtr pkt) 409{ 410 assert(pkt->isResponse()); 411 412 // all header delay should be paid for by the crossbar, unless 413 // this is a prefetch response from above 414 panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp, 415 "%s saw a non-zero packet delay\n", name()); 416 417 const bool is_error = pkt->isError(); 418 419 if (is_error) { 420 DPRINTF(Cache, "%s: Cache received %s with error\n", __func__, 421 pkt->print()); 422 } 423 424 DPRINTF(Cache, "%s: Handling response %s\n", __func__, 425 pkt->print()); 426 427 // if this is a write, we should be looking at an uncacheable 428 // write 429 if (pkt->isWrite()) { 430 assert(pkt->req->isUncacheable()); 431 handleUncacheableWriteResp(pkt); 432 return; 433 } 434 435 // we have dealt with any (uncacheable) writes above, from here on 436 // we know we are dealing with an MSHR due to a miss or a prefetch 437 MSHR *mshr = dynamic_cast<MSHR*>(pkt->popSenderState()); 438 assert(mshr); 439 440 if (mshr == noTargetMSHR) { 441 // we always clear at least one target 442 clearBlocked(Blocked_NoTargets); 443 noTargetMSHR = nullptr; 444 } 445 446 // Initial target is used just for stats 447 MSHR::Target *initial_tgt = mshr->getTarget(); 448 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex(); 449 Tick miss_latency = curTick() - initial_tgt->recvTime; 450 451 if (pkt->req->isUncacheable()) { 452 assert(pkt->req->masterId() < system->maxMasters()); 453 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] += 454 miss_latency; 455 } else { 456 assert(pkt->req->masterId() < system->maxMasters()); 457 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] += 458 miss_latency; 459 } 460 461 PacketList writebacks; 462 463 bool is_fill = !mshr->isForward && 464 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp || 465 mshr->wasWholeLineWrite); 466 467 // make sure that if the mshr was due to a whole line write then 468 // the response is an invalidation 469 assert(!mshr->wasWholeLineWrite || pkt->isInvalidate()); 470 471 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 472 473 if (is_fill && !is_error) { 474 DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n", 475 pkt->getAddr()); 476 477 const bool allocate = (writeAllocator && mshr->wasWholeLineWrite) ? 478 writeAllocator->allocate() : mshr->allocOnFill(); 479 blk = handleFill(pkt, blk, writebacks, allocate); 480 assert(blk != nullptr); 481 ppFill->notify(pkt); 482 } 483 484 if (blk && blk->isValid() && pkt->isClean() && !pkt->isInvalidate()) { 485 // The block was marked not readable while there was a pending 486 // cache maintenance operation, restore its flag. 487 blk->status |= BlkReadable; 488 489 // This was a cache clean operation (without invalidate) 490 // and we have a copy of the block already. Since there 491 // is no invalidation, we can promote targets that don't 492 // require a writable copy 493 mshr->promoteReadable(); 494 } 495 496 if (blk && blk->isWritable() && !pkt->req->isCacheInvalidate()) { 497 // If at this point the referenced block is writable and the 498 // response is not a cache invalidate, we promote targets that 499 // were deferred as we couldn't guarrantee a writable copy 500 mshr->promoteWritable(); 501 } 502 503 serviceMSHRTargets(mshr, pkt, blk); 504 505 if (mshr->promoteDeferredTargets()) { 506 // avoid later read getting stale data while write miss is 507 // outstanding.. see comment in timingAccess() 508 if (blk) { 509 blk->status &= ~BlkReadable; 510 } 511 mshrQueue.markPending(mshr); 512 schedMemSideSendEvent(clockEdge() + pkt->payloadDelay); 513 } else { 514 // while we deallocate an mshr from the queue we still have to 515 // check the isFull condition before and after as we might 516 // have been using the reserved entries already 517 const bool was_full = mshrQueue.isFull(); 518 mshrQueue.deallocate(mshr); 519 if (was_full && !mshrQueue.isFull()) { 520 clearBlocked(Blocked_NoMSHRs); 521 } 522 523 // Request the bus for a prefetch if this deallocation freed enough 524 // MSHRs for a prefetch to take place 525 if (prefetcher && mshrQueue.canPrefetch()) { 526 Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(), 527 clockEdge()); 528 if (next_pf_time != MaxTick) 529 schedMemSideSendEvent(next_pf_time); 530 } 531 } 532 533 // if we used temp block, check to see if its valid and then clear it out 534 if (blk == tempBlock && tempBlock->isValid()) { 535 evictBlock(blk, writebacks); 536 } 537 538 const Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 539 // copy writebacks to write buffer 540 doWritebacks(writebacks, forward_time); 541 542 DPRINTF(CacheVerbose, "%s: Leaving with %s\n", __func__, pkt->print()); 543 delete pkt; 544} 545 546 547Tick 548BaseCache::recvAtomic(PacketPtr pkt) 549{ 550 // should assert here that there are no outstanding MSHRs or 551 // writebacks... that would mean that someone used an atomic 552 // access in timing mode 553 554 // We use lookupLatency here because it is used to specify the latency 555 // to access. 556 Cycles lat = lookupLatency; 557 558 CacheBlk *blk = nullptr; 559 PacketList writebacks; 560 bool satisfied = access(pkt, blk, lat, writebacks); 561 562 if (pkt->isClean() && blk && blk->isDirty()) { 563 // A cache clean opearation is looking for a dirty 564 // block. If a dirty block is encountered a WriteClean 565 // will update any copies to the path to the memory 566 // until the point of reference. 567 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n", 568 __func__, pkt->print(), blk->print()); 569 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id); 570 writebacks.push_back(wb_pkt); 571 pkt->setSatisfied(); 572 } 573 574 // handle writebacks resulting from the access here to ensure they 575 // logically precede anything happening below 576 doWritebacksAtomic(writebacks); 577 assert(writebacks.empty()); 578 579 if (!satisfied) { 580 lat += handleAtomicReqMiss(pkt, blk, writebacks); 581 } 582 583 // Note that we don't invoke the prefetcher at all in atomic mode. 584 // It's not clear how to do it properly, particularly for 585 // prefetchers that aggressively generate prefetch candidates and 586 // rely on bandwidth contention to throttle them; these will tend 587 // to pollute the cache in atomic mode since there is no bandwidth 588 // contention. If we ever do want to enable prefetching in atomic 589 // mode, though, this is the place to do it... see timingAccess() 590 // for an example (though we'd want to issue the prefetch(es) 591 // immediately rather than calling requestMemSideBus() as we do 592 // there). 593 594 // do any writebacks resulting from the response handling 595 doWritebacksAtomic(writebacks); 596 597 // if we used temp block, check to see if its valid and if so 598 // clear it out, but only do so after the call to recvAtomic is 599 // finished so that any downstream observers (such as a snoop 600 // filter), first see the fill, and only then see the eviction 601 if (blk == tempBlock && tempBlock->isValid()) { 602 // the atomic CPU calls recvAtomic for fetch and load/store 603 // sequentuially, and we may already have a tempBlock 604 // writeback from the fetch that we have not yet sent 605 if (tempBlockWriteback) { 606 // if that is the case, write the prevoius one back, and 607 // do not schedule any new event 608 writebackTempBlockAtomic(); 609 } else { 610 // the writeback/clean eviction happens after the call to 611 // recvAtomic has finished (but before any successive 612 // calls), so that the response handling from the fill is 613 // allowed to happen first 614 schedule(writebackTempBlockAtomicEvent, curTick()); 615 } 616 617 tempBlockWriteback = evictBlock(blk); 618 } 619 620 if (pkt->needsResponse()) { 621 pkt->makeAtomicResponse(); 622 } 623 624 return lat * clockPeriod(); 625} 626 627void 628BaseCache::functionalAccess(PacketPtr pkt, bool from_cpu_side) 629{ 630 Addr blk_addr = pkt->getBlockAddr(blkSize); 631 bool is_secure = pkt->isSecure(); 632 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 633 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 634 635 pkt->pushLabel(name()); 636 637 CacheBlkPrintWrapper cbpw(blk); 638 639 // Note that just because an L2/L3 has valid data doesn't mean an 640 // L1 doesn't have a more up-to-date modified copy that still 641 // needs to be found. As a result we always update the request if 642 // we have it, but only declare it satisfied if we are the owner. 643 644 // see if we have data at all (owned or otherwise) 645 bool have_data = blk && blk->isValid() 646 && pkt->trySatisfyFunctional(&cbpw, blk_addr, is_secure, blkSize, 647 blk->data); 648 649 // data we have is dirty if marked as such or if we have an 650 // in-service MSHR that is pending a modified line 651 bool have_dirty = 652 have_data && (blk->isDirty() || 653 (mshr && mshr->inService && mshr->isPendingModified())); 654 655 bool done = have_dirty || 656 cpuSidePort.trySatisfyFunctional(pkt) || 657 mshrQueue.trySatisfyFunctional(pkt, blk_addr) || 658 writeBuffer.trySatisfyFunctional(pkt, blk_addr) || 659 memSidePort.trySatisfyFunctional(pkt); 660 661 DPRINTF(CacheVerbose, "%s: %s %s%s%s\n", __func__, pkt->print(), 662 (blk && blk->isValid()) ? "valid " : "", 663 have_data ? "data " : "", done ? "done " : ""); 664 665 // We're leaving the cache, so pop cache->name() label 666 pkt->popLabel(); 667 668 if (done) { 669 pkt->makeResponse(); 670 } else { 671 // if it came as a request from the CPU side then make sure it 672 // continues towards the memory side 673 if (from_cpu_side) { 674 memSidePort.sendFunctional(pkt); 675 } else if (cpuSidePort.isSnooping()) { 676 // if it came from the memory side, it must be a snoop request 677 // and we should only forward it if we are forwarding snoops 678 cpuSidePort.sendFunctionalSnoop(pkt); 679 } 680 } 681} 682 683 684void 685BaseCache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt) 686{ 687 assert(pkt->isRequest()); 688 689 uint64_t overwrite_val; 690 bool overwrite_mem; 691 uint64_t condition_val64; 692 uint32_t condition_val32; 693 694 int offset = pkt->getOffset(blkSize); 695 uint8_t *blk_data = blk->data + offset; 696 697 assert(sizeof(uint64_t) >= pkt->getSize()); 698 699 overwrite_mem = true; 700 // keep a copy of our possible write value, and copy what is at the 701 // memory address into the packet 702 pkt->writeData((uint8_t *)&overwrite_val); 703 pkt->setData(blk_data); 704 705 if (pkt->req->isCondSwap()) { 706 if (pkt->getSize() == sizeof(uint64_t)) { 707 condition_val64 = pkt->req->getExtraData(); 708 overwrite_mem = !std::memcmp(&condition_val64, blk_data, 709 sizeof(uint64_t)); 710 } else if (pkt->getSize() == sizeof(uint32_t)) { 711 condition_val32 = (uint32_t)pkt->req->getExtraData(); 712 overwrite_mem = !std::memcmp(&condition_val32, blk_data, 713 sizeof(uint32_t)); 714 } else 715 panic("Invalid size for conditional read/write\n"); 716 } 717 718 if (overwrite_mem) { 719 std::memcpy(blk_data, &overwrite_val, pkt->getSize()); 720 blk->status |= BlkDirty; 721 } 722} 723 724QueueEntry* 725BaseCache::getNextQueueEntry() 726{ 727 // Check both MSHR queue and write buffer for potential requests, 728 // note that null does not mean there is no request, it could 729 // simply be that it is not ready 730 MSHR *miss_mshr = mshrQueue.getNext(); 731 WriteQueueEntry *wq_entry = writeBuffer.getNext(); 732 733 // If we got a write buffer request ready, first priority is a 734 // full write buffer, otherwise we favour the miss requests 735 if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) { 736 // need to search MSHR queue for conflicting earlier miss. 737 MSHR *conflict_mshr = 738 mshrQueue.findPending(wq_entry->blkAddr, 739 wq_entry->isSecure); 740 741 if (conflict_mshr && conflict_mshr->order < wq_entry->order) { 742 // Service misses in order until conflict is cleared. 743 return conflict_mshr; 744 745 // @todo Note that we ignore the ready time of the conflict here 746 } 747 748 // No conflicts; issue write 749 return wq_entry; 750 } else if (miss_mshr) { 751 // need to check for conflicting earlier writeback 752 WriteQueueEntry *conflict_mshr = 753 writeBuffer.findPending(miss_mshr->blkAddr, 754 miss_mshr->isSecure); 755 if (conflict_mshr) { 756 // not sure why we don't check order here... it was in the 757 // original code but commented out. 758 759 // The only way this happens is if we are 760 // doing a write and we didn't have permissions 761 // then subsequently saw a writeback (owned got evicted) 762 // We need to make sure to perform the writeback first 763 // To preserve the dirty data, then we can issue the write 764 765 // should we return wq_entry here instead? I.e. do we 766 // have to flush writes in order? I don't think so... not 767 // for Alpha anyway. Maybe for x86? 768 return conflict_mshr; 769 770 // @todo Note that we ignore the ready time of the conflict here 771 } 772 773 // No conflicts; issue read 774 return miss_mshr; 775 } 776 777 // fall through... no pending requests. Try a prefetch. 778 assert(!miss_mshr && !wq_entry); 779 if (prefetcher && mshrQueue.canPrefetch()) { 780 // If we have a miss queue slot, we can try a prefetch 781 PacketPtr pkt = prefetcher->getPacket(); 782 if (pkt) { 783 Addr pf_addr = pkt->getBlockAddr(blkSize); 784 if (!tags->findBlock(pf_addr, pkt->isSecure()) && 785 !mshrQueue.findMatch(pf_addr, pkt->isSecure()) && 786 !writeBuffer.findMatch(pf_addr, pkt->isSecure())) { 787 // Update statistic on number of prefetches issued 788 // (hwpf_mshr_misses) 789 assert(pkt->req->masterId() < system->maxMasters()); 790 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 791 792 // allocate an MSHR and return it, note 793 // that we send the packet straight away, so do not 794 // schedule the send 795 return allocateMissBuffer(pkt, curTick(), false); 796 } else { 797 // free the request and packet 798 delete pkt; 799 } 800 } 801 } 802 803 return nullptr; 804} 805 806void 807BaseCache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool, bool) 808{ 809 assert(pkt->isRequest()); 810 811 assert(blk && blk->isValid()); 812 // Occasionally this is not true... if we are a lower-level cache 813 // satisfying a string of Read and ReadEx requests from 814 // upper-level caches, a Read will mark the block as shared but we 815 // can satisfy a following ReadEx anyway since we can rely on the 816 // Read requester(s) to have buffered the ReadEx snoop and to 817 // invalidate their blocks after receiving them. 818 // assert(!pkt->needsWritable() || blk->isWritable()); 819 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize); 820 821 // Check RMW operations first since both isRead() and 822 // isWrite() will be true for them 823 if (pkt->cmd == MemCmd::SwapReq) { 824 if (pkt->isAtomicOp()) { 825 // extract data from cache and save it into the data field in 826 // the packet as a return value from this atomic op 827 int offset = tags->extractBlkOffset(pkt->getAddr()); 828 uint8_t *blk_data = blk->data + offset; 829 pkt->setData(blk_data); 830 831 // execute AMO operation 832 (*(pkt->getAtomicOp()))(blk_data); 833 834 // set block status to dirty 835 blk->status |= BlkDirty; 836 } else { 837 cmpAndSwap(blk, pkt); 838 } 839 } else if (pkt->isWrite()) { 840 // we have the block in a writable state and can go ahead, 841 // note that the line may be also be considered writable in 842 // downstream caches along the path to memory, but always 843 // Exclusive, and never Modified 844 assert(blk->isWritable()); 845 // Write or WriteLine at the first cache with block in writable state 846 if (blk->checkWrite(pkt)) { 847 pkt->writeDataToBlock(blk->data, blkSize); 848 } 849 // Always mark the line as dirty (and thus transition to the 850 // Modified state) even if we are a failed StoreCond so we 851 // supply data to any snoops that have appended themselves to 852 // this cache before knowing the store will fail. 853 blk->status |= BlkDirty; 854 DPRINTF(CacheVerbose, "%s for %s (write)\n", __func__, pkt->print()); 855 } else if (pkt->isRead()) { 856 if (pkt->isLLSC()) { 857 blk->trackLoadLocked(pkt); 858 } 859 860 // all read responses have a data payload 861 assert(pkt->hasRespData()); 862 pkt->setDataFromBlock(blk->data, blkSize); 863 } else if (pkt->isUpgrade()) { 864 // sanity check 865 assert(!pkt->hasSharers()); 866 867 if (blk->isDirty()) { 868 // we were in the Owned state, and a cache above us that 869 // has the line in Shared state needs to be made aware 870 // that the data it already has is in fact dirty 871 pkt->setCacheResponding(); 872 blk->status &= ~BlkDirty; 873 } 874 } else if (pkt->isClean()) { 875 blk->status &= ~BlkDirty; 876 } else { 877 assert(pkt->isInvalidate()); 878 invalidateBlock(blk); 879 DPRINTF(CacheVerbose, "%s for %s (invalidation)\n", __func__, 880 pkt->print()); 881 } 882} 883 884///////////////////////////////////////////////////// 885// 886// Access path: requests coming in from the CPU side 887// 888///////////////////////////////////////////////////// 889Cycles 890BaseCache::calculateAccessLatency(const CacheBlk* blk, 891 const Cycles lookup_lat) const 892{ 893 Cycles lat(lookup_lat); 894 895 if (blk != nullptr) { 896 // First access tags, then data 897 if (sequentialAccess) { 898 lat += dataLatency; 899 // Latency is dictated by the slowest of tag and data latencies 900 } else { 901 lat = std::max(lookup_lat, dataLatency); 902 } 903 904 // Check if the block to be accessed is available. If not, apply the 905 // access latency on top of when the block is ready to be accessed. 906 const Tick when_ready = blk->getWhenReady(); 907 if (when_ready > curTick() && 908 ticksToCycles(when_ready - curTick()) > lat) { 909 lat += ticksToCycles(when_ready - curTick()); 910 } 911 } 912 913 return lat; 914} 915 916bool 917BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, 918 PacketList &writebacks) 919{ 920 // sanity check 921 assert(pkt->isRequest()); 922 923 chatty_assert(!(isReadOnly && pkt->isWrite()), 924 "Should never see a write in a read-only cache %s\n", 925 name()); 926 927 // Access block in the tags 928 Cycles tag_latency(0); 929 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), tag_latency); 930 931 // Calculate access latency 932 lat = calculateAccessLatency(blk, tag_latency); 933 934 DPRINTF(Cache, "%s for %s %s\n", __func__, pkt->print(), 935 blk ? "hit " + blk->print() : "miss"); 936 937 if (pkt->req->isCacheMaintenance()) { 938 // A cache maintenance operation is always forwarded to the 939 // memory below even if the block is found in dirty state. 940 941 // We defer any changes to the state of the block until we 942 // create and mark as in service the mshr for the downstream 943 // packet. 944 return false; 945 } 946 947 if (pkt->isEviction()) { 948 // We check for presence of block in above caches before issuing 949 // Writeback or CleanEvict to write buffer. Therefore the only 950 // possible cases can be of a CleanEvict packet coming from above 951 // encountering a Writeback generated in this cache peer cache and 952 // waiting in the write buffer. Cases of upper level peer caches 953 // generating CleanEvict and Writeback or simply CleanEvict and 954 // CleanEvict almost simultaneously will be caught by snoops sent out 955 // by crossbar. 956 WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(), 957 pkt->isSecure()); 958 if (wb_entry) { 959 assert(wb_entry->getNumTargets() == 1); 960 PacketPtr wbPkt = wb_entry->getTarget()->pkt; 961 assert(wbPkt->isWriteback()); 962 963 if (pkt->isCleanEviction()) { 964 // The CleanEvict and WritebackClean snoops into other 965 // peer caches of the same level while traversing the 966 // crossbar. If a copy of the block is found, the 967 // packet is deleted in the crossbar. Hence, none of 968 // the other upper level caches connected to this 969 // cache have the block, so we can clear the 970 // BLOCK_CACHED flag in the Writeback if set and 971 // discard the CleanEvict by returning true. 972 wbPkt->clearBlockCached(); 973 return true; 974 } else { 975 assert(pkt->cmd == MemCmd::WritebackDirty); 976 // Dirty writeback from above trumps our clean 977 // writeback... discard here 978 // Note: markInService will remove entry from writeback buffer. 979 markInService(wb_entry); 980 delete wbPkt; 981 } 982 } 983 } 984 985 // Writeback handling is special case. We can write the block into 986 // the cache without having a writeable copy (or any copy at all). 987 if (pkt->isWriteback()) { 988 assert(blkSize == pkt->getSize()); 989 990 // we could get a clean writeback while we are having 991 // outstanding accesses to a block, do the simple thing for 992 // now and drop the clean writeback so that we do not upset 993 // any ordering/decisions about ownership already taken 994 if (pkt->cmd == MemCmd::WritebackClean && 995 mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) { 996 DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, " 997 "dropping\n", pkt->getAddr()); 998 return true; 999 } 1000 1001 if (!blk) { 1002 // need to do a replacement 1003 blk = allocateBlock(pkt, writebacks); 1004 if (!blk) { 1005 // no replaceable block available: give up, fwd to next level. 1006 incMissCount(pkt); 1007 return false; 1008 } 1009 1010 blk->status |= BlkReadable; 1011 } 1012 // only mark the block dirty if we got a writeback command, 1013 // and leave it as is for a clean writeback 1014 if (pkt->cmd == MemCmd::WritebackDirty) { 1015 // TODO: the coherent cache can assert(!blk->isDirty()); 1016 blk->status |= BlkDirty; 1017 } 1018 // if the packet does not have sharers, it is passing 1019 // writable, and we got the writeback in Modified or Exclusive 1020 // state, if not we are in the Owned or Shared state 1021 if (!pkt->hasSharers()) { 1022 blk->status |= BlkWritable; 1023 } 1024 // nothing else to do; writeback doesn't expect response 1025 assert(!pkt->needsResponse()); 1026 pkt->writeDataToBlock(blk->data, blkSize); 1027 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 1028 incHitCount(pkt); 1029 // populate the time when the block will be ready to access. 1030 blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay + 1031 pkt->payloadDelay); 1032 return true; 1033 } else if (pkt->cmd == MemCmd::CleanEvict) { 1034 if (blk) { 1035 // Found the block in the tags, need to stop CleanEvict from 1036 // propagating further down the hierarchy. Returning true will 1037 // treat the CleanEvict like a satisfied write request and delete 1038 // it. 1039 return true; 1040 } 1041 // We didn't find the block here, propagate the CleanEvict further 1042 // down the memory hierarchy. Returning false will treat the CleanEvict 1043 // like a Writeback which could not find a replaceable block so has to 1044 // go to next level. 1045 return false; 1046 } else if (pkt->cmd == MemCmd::WriteClean) { 1047 // WriteClean handling is a special case. We can allocate a 1048 // block directly if it doesn't exist and we can update the 1049 // block immediately. The WriteClean transfers the ownership 1050 // of the block as well. 1051 assert(blkSize == pkt->getSize()); 1052 1053 if (!blk) { 1054 if (pkt->writeThrough()) { 1055 // if this is a write through packet, we don't try to 1056 // allocate if the block is not present 1057 return false; 1058 } else { 1059 // a writeback that misses needs to allocate a new block 1060 blk = allocateBlock(pkt, writebacks); 1061 if (!blk) { 1062 // no replaceable block available: give up, fwd to 1063 // next level. 1064 incMissCount(pkt); 1065 return false; 1066 } 1067 1068 blk->status |= BlkReadable; 1069 } 1070 } 1071 1072 // at this point either this is a writeback or a write-through 1073 // write clean operation and the block is already in this 1074 // cache, we need to update the data and the block flags 1075 assert(blk); 1076 // TODO: the coherent cache can assert(!blk->isDirty()); 1077 if (!pkt->writeThrough()) { 1078 blk->status |= BlkDirty; 1079 } 1080 // nothing else to do; writeback doesn't expect response 1081 assert(!pkt->needsResponse()); 1082 pkt->writeDataToBlock(blk->data, blkSize); 1083 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 1084 1085 incHitCount(pkt); 1086 // populate the time when the block will be ready to access. 1087 blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay + 1088 pkt->payloadDelay); 1089 // if this a write-through packet it will be sent to cache 1090 // below 1091 return !pkt->writeThrough(); 1092 } else if (blk && (pkt->needsWritable() ? blk->isWritable() : 1093 blk->isReadable())) { 1094 // OK to satisfy access 1095 incHitCount(pkt); 1096 satisfyRequest(pkt, blk); 1097 maintainClusivity(pkt->fromCache(), blk); 1098 1099 return true; 1100 } 1101 1102 // Can't satisfy access normally... either no block (blk == nullptr) 1103 // or have block but need writable 1104 1105 incMissCount(pkt); 1106 1107 if (!blk && pkt->isLLSC() && pkt->isWrite()) { 1108 // complete miss on store conditional... just give up now 1109 pkt->req->setExtraData(0); 1110 return true; 1111 } 1112 1113 return false; 1114} 1115 1116void 1117BaseCache::maintainClusivity(bool from_cache, CacheBlk *blk) 1118{ 1119 if (from_cache && blk && blk->isValid() && !blk->isDirty() && 1120 clusivity == Enums::mostly_excl) { 1121 // if we have responded to a cache, and our block is still 1122 // valid, but not dirty, and this cache is mostly exclusive 1123 // with respect to the cache above, drop the block 1124 invalidateBlock(blk); 1125 } 1126} 1127 1128CacheBlk* 1129BaseCache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks, 1130 bool allocate) 1131{ 1132 assert(pkt->isResponse()); 1133 Addr addr = pkt->getAddr(); 1134 bool is_secure = pkt->isSecure(); 1135#if TRACING_ON 1136 CacheBlk::State old_state = blk ? blk->status : 0; 1137#endif 1138 1139 // When handling a fill, we should have no writes to this line. 1140 assert(addr == pkt->getBlockAddr(blkSize)); 1141 assert(!writeBuffer.findMatch(addr, is_secure)); 1142 1143 if (!blk) { 1144 // better have read new data... 1145 assert(pkt->hasData() || pkt->cmd == MemCmd::InvalidateResp); 1146 1147 // need to do a replacement if allocating, otherwise we stick 1148 // with the temporary storage 1149 blk = allocate ? allocateBlock(pkt, writebacks) : nullptr; 1150 1151 if (!blk) { 1152 // No replaceable block or a mostly exclusive 1153 // cache... just use temporary storage to complete the 1154 // current request and then get rid of it 1155 blk = tempBlock; 1156 tempBlock->insert(addr, is_secure); 1157 DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr, 1158 is_secure ? "s" : "ns"); 1159 } 1160 } else { 1161 // existing block... probably an upgrade 1162 // don't clear block status... if block is already dirty we 1163 // don't want to lose that 1164 } 1165 1166 // Block is guaranteed to be valid at this point 1167 assert(blk->isValid()); 1168 assert(blk->isSecure() == is_secure); 1169 assert(regenerateBlkAddr(blk) == addr); 1170 1171 blk->status |= BlkReadable; 1172 1173 // sanity check for whole-line writes, which should always be 1174 // marked as writable as part of the fill, and then later marked 1175 // dirty as part of satisfyRequest 1176 if (pkt->cmd == MemCmd::InvalidateResp) { 1177 assert(!pkt->hasSharers()); 1178 } 1179 1180 // here we deal with setting the appropriate state of the line, 1181 // and we start by looking at the hasSharers flag, and ignore the 1182 // cacheResponding flag (normally signalling dirty data) if the 1183 // packet has sharers, thus the line is never allocated as Owned 1184 // (dirty but not writable), and always ends up being either 1185 // Shared, Exclusive or Modified, see Packet::setCacheResponding 1186 // for more details 1187 if (!pkt->hasSharers()) { 1188 // we could get a writable line from memory (rather than a 1189 // cache) even in a read-only cache, note that we set this bit 1190 // even for a read-only cache, possibly revisit this decision 1191 blk->status |= BlkWritable; 1192 1193 // check if we got this via cache-to-cache transfer (i.e., from a 1194 // cache that had the block in Modified or Owned state) 1195 if (pkt->cacheResponding()) { 1196 // we got the block in Modified state, and invalidated the 1197 // owners copy 1198 blk->status |= BlkDirty; 1199 1200 chatty_assert(!isReadOnly, "Should never see dirty snoop response " 1201 "in read-only cache %s\n", name()); 1202 } 1203 } 1204 1205 DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n", 1206 addr, is_secure ? "s" : "ns", old_state, blk->print()); 1207 1208 // if we got new data, copy it in (checking for a read response 1209 // and a response that has data is the same in the end) 1210 if (pkt->isRead()) { 1211 // sanity checks 1212 assert(pkt->hasData()); 1213 assert(pkt->getSize() == blkSize); 1214 1215 pkt->writeDataToBlock(blk->data, blkSize); 1216 } 1217 // We pay for fillLatency here. 1218 blk->setWhenReady(clockEdge(fillLatency) + pkt->payloadDelay); 1219 1220 return blk; 1221} 1222 1223CacheBlk* 1224BaseCache::allocateBlock(const PacketPtr pkt, PacketList &writebacks) 1225{ 1226 // Get address 1227 const Addr addr = pkt->getAddr(); 1228 1229 // Get secure bit 1230 const bool is_secure = pkt->isSecure(); 1231 1232 // Find replacement victim 1233 std::vector<CacheBlk*> evict_blks; 1234 CacheBlk *victim = tags->findVictim(addr, is_secure, evict_blks); 1235 1236 // It is valid to return nullptr if there is no victim 1237 if (!victim) 1238 return nullptr; 1239 1240 // Print victim block's information 1241 DPRINTF(CacheRepl, "Replacement victim: %s\n", victim->print()); 1242 1243 // Check for transient state allocations. If any of the entries listed 1244 // for eviction has a transient state, the allocation fails 1245 for (const auto& blk : evict_blks) { 1246 if (blk->isValid()) { 1247 Addr repl_addr = regenerateBlkAddr(blk); 1248 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure()); 1249 if (repl_mshr) { 1250 // must be an outstanding upgrade or clean request 1251 // on a block we're about to replace... 1252 assert((!blk->isWritable() && repl_mshr->needsWritable()) || 1253 repl_mshr->isCleaning()); 1254 1255 // too hard to replace block with transient state 1256 // allocation failed, block not inserted 1257 return nullptr; 1258 } 1259 } 1260 } 1261 1262 // The victim will be replaced by a new entry, so increase the replacement 1263 // counter if a valid block is being replaced 1264 if (victim->isValid()) { 1265 DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx " 1266 "(%s): %s\n", regenerateBlkAddr(victim), 1267 victim->isSecure() ? "s" : "ns", 1268 addr, is_secure ? "s" : "ns", 1269 victim->isDirty() ? "writeback" : "clean"); 1270 1271 replacements++; 1272 } 1273 1274 // Evict valid blocks associated to this victim block 1275 for (const auto& blk : evict_blks) { 1276 if (blk->isValid()) { 1277 if (blk->wasPrefetched()) { 1278 unusedPrefetches++; 1279 } 1280 1281 evictBlock(blk, writebacks); 1282 } 1283 } 1284 1285 // Insert new block at victimized entry 1286 tags->insertBlock(addr, is_secure, pkt->req->masterId(), 1287 pkt->req->taskId(), victim); 1288 1289 return victim; 1290} 1291 1292void 1293BaseCache::invalidateBlock(CacheBlk *blk) 1294{ 1295 // If handling a block present in the Tags, let it do its invalidation 1296 // process, which will update stats and invalidate the block itself 1297 if (blk != tempBlock) { 1298 tags->invalidate(blk); 1299 } else { 1300 tempBlock->invalidate(); 1301 } 1302} 1303 1304void 1305BaseCache::evictBlock(CacheBlk *blk, PacketList &writebacks) 1306{ 1307 PacketPtr pkt = evictBlock(blk); 1308 if (pkt) { 1309 writebacks.push_back(pkt); 1310 } 1311} 1312 1313PacketPtr 1314BaseCache::writebackBlk(CacheBlk *blk) 1315{ 1316 chatty_assert(!isReadOnly || writebackClean, 1317 "Writeback from read-only cache"); 1318 assert(blk && blk->isValid() && (blk->isDirty() || writebackClean)); 1319 1320 writebacks[Request::wbMasterId]++; 1321 1322 RequestPtr req = std::make_shared<Request>( 1323 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId); 1324 1325 if (blk->isSecure()) 1326 req->setFlags(Request::SECURE); 1327 1328 req->taskId(blk->task_id); 1329 1330 PacketPtr pkt = 1331 new Packet(req, blk->isDirty() ? 1332 MemCmd::WritebackDirty : MemCmd::WritebackClean); 1333 1334 DPRINTF(Cache, "Create Writeback %s writable: %d, dirty: %d\n", 1335 pkt->print(), blk->isWritable(), blk->isDirty()); 1336 1337 if (blk->isWritable()) { 1338 // not asserting shared means we pass the block in modified 1339 // state, mark our own block non-writeable 1340 blk->status &= ~BlkWritable; 1341 } else { 1342 // we are in the Owned state, tell the receiver 1343 pkt->setHasSharers(); 1344 } 1345 1346 // make sure the block is not marked dirty 1347 blk->status &= ~BlkDirty; 1348 1349 pkt->allocate(); 1350 pkt->setDataFromBlock(blk->data, blkSize); 1351 1352 return pkt; 1353} 1354 1355PacketPtr 1356BaseCache::writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id) 1357{ 1358 RequestPtr req = std::make_shared<Request>( 1359 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId); 1360 1361 if (blk->isSecure()) { 1362 req->setFlags(Request::SECURE); 1363 } 1364 req->taskId(blk->task_id); 1365 1366 PacketPtr pkt = new Packet(req, MemCmd::WriteClean, blkSize, id); 1367 1368 if (dest) { 1369 req->setFlags(dest); 1370 pkt->setWriteThrough(); 1371 } 1372 1373 DPRINTF(Cache, "Create %s writable: %d, dirty: %d\n", pkt->print(), 1374 blk->isWritable(), blk->isDirty()); 1375 1376 if (blk->isWritable()) { 1377 // not asserting shared means we pass the block in modified 1378 // state, mark our own block non-writeable 1379 blk->status &= ~BlkWritable; 1380 } else { 1381 // we are in the Owned state, tell the receiver 1382 pkt->setHasSharers(); 1383 } 1384 1385 // make sure the block is not marked dirty 1386 blk->status &= ~BlkDirty; 1387 1388 pkt->allocate(); 1389 pkt->setDataFromBlock(blk->data, blkSize); 1390 1391 return pkt; 1392} 1393 1394 1395void 1396BaseCache::memWriteback() 1397{ 1398 tags->forEachBlk([this](CacheBlk &blk) { writebackVisitor(blk); }); 1399} 1400 1401void 1402BaseCache::memInvalidate() 1403{ 1404 tags->forEachBlk([this](CacheBlk &blk) { invalidateVisitor(blk); }); 1405} 1406 1407bool 1408BaseCache::isDirty() const 1409{ 1410 return tags->anyBlk([](CacheBlk &blk) { return blk.isDirty(); }); 1411} 1412 1413bool 1414BaseCache::coalesce() const 1415{ 1416 return writeAllocator && writeAllocator->coalesce(); 1417} 1418 1419void 1420BaseCache::writebackVisitor(CacheBlk &blk) 1421{ 1422 if (blk.isDirty()) { 1423 assert(blk.isValid()); 1424 1425 RequestPtr request = std::make_shared<Request>( 1426 regenerateBlkAddr(&blk), blkSize, 0, Request::funcMasterId); 1427 1428 request->taskId(blk.task_id); 1429 if (blk.isSecure()) { 1430 request->setFlags(Request::SECURE); 1431 } 1432 1433 Packet packet(request, MemCmd::WriteReq); 1434 packet.dataStatic(blk.data); 1435 1436 memSidePort.sendFunctional(&packet); 1437 1438 blk.status &= ~BlkDirty; 1439 } 1440} 1441 1442void 1443BaseCache::invalidateVisitor(CacheBlk &blk) 1444{ 1445 if (blk.isDirty()) 1446 warn_once("Invalidating dirty cache lines. " \ 1447 "Expect things to break.\n"); 1448 1449 if (blk.isValid()) { 1450 assert(!blk.isDirty()); 1451 invalidateBlock(&blk); 1452 } 1453} 1454 1455Tick 1456BaseCache::nextQueueReadyTime() const 1457{ 1458 Tick nextReady = std::min(mshrQueue.nextReadyTime(), 1459 writeBuffer.nextReadyTime()); 1460 1461 // Don't signal prefetch ready time if no MSHRs available 1462 // Will signal once enoguh MSHRs are deallocated 1463 if (prefetcher && mshrQueue.canPrefetch()) { 1464 nextReady = std::min(nextReady, 1465 prefetcher->nextPrefetchReadyTime()); 1466 } 1467 1468 return nextReady; 1469} 1470 1471 1472bool 1473BaseCache::sendMSHRQueuePacket(MSHR* mshr) 1474{ 1475 assert(mshr); 1476 1477 // use request from 1st target 1478 PacketPtr tgt_pkt = mshr->getTarget()->pkt; 1479 1480 DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print()); 1481 1482 // if the cache is in write coalescing mode or (additionally) in 1483 // no allocation mode, and we have a write packet with an MSHR 1484 // that is not a whole-line write (due to incompatible flags etc), 1485 // then reset the write mode 1486 if (writeAllocator && writeAllocator->coalesce() && tgt_pkt->isWrite()) { 1487 if (!mshr->isWholeLineWrite()) { 1488 // if we are currently write coalescing, hold on the 1489 // MSHR as many cycles extra as we need to completely 1490 // write a cache line 1491 if (writeAllocator->delay(mshr->blkAddr)) { 1492 Tick delay = blkSize / tgt_pkt->getSize() * clockPeriod(); 1493 DPRINTF(CacheVerbose, "Delaying pkt %s %llu ticks to allow " 1494 "for write coalescing\n", tgt_pkt->print(), delay); 1495 mshrQueue.delay(mshr, delay); 1496 return false; 1497 } else { 1498 writeAllocator->reset(); 1499 } 1500 } else { 1501 writeAllocator->resetDelay(mshr->blkAddr); 1502 } 1503 } 1504 1505 CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure); 1506 1507 // either a prefetch that is not present upstream, or a normal 1508 // MSHR request, proceed to get the packet to send downstream 1509 PacketPtr pkt = createMissPacket(tgt_pkt, blk, mshr->needsWritable(), 1510 mshr->isWholeLineWrite()); 1511 1512 mshr->isForward = (pkt == nullptr); 1513 1514 if (mshr->isForward) { 1515 // not a cache block request, but a response is expected 1516 // make copy of current packet to forward, keep current 1517 // copy for response handling 1518 pkt = new Packet(tgt_pkt, false, true); 1519 assert(!pkt->isWrite()); 1520 } 1521 1522 // play it safe and append (rather than set) the sender state, 1523 // as forwarded packets may already have existing state 1524 pkt->pushSenderState(mshr); 1525 1526 if (pkt->isClean() && blk && blk->isDirty()) { 1527 // A cache clean opearation is looking for a dirty block. Mark 1528 // the packet so that the destination xbar can determine that 1529 // there will be a follow-up write packet as well. 1530 pkt->setSatisfied(); 1531 } 1532 1533 if (!memSidePort.sendTimingReq(pkt)) { 1534 // we are awaiting a retry, but we 1535 // delete the packet and will be creating a new packet 1536 // when we get the opportunity 1537 delete pkt; 1538 1539 // note that we have now masked any requestBus and 1540 // schedSendEvent (we will wait for a retry before 1541 // doing anything), and this is so even if we do not 1542 // care about this packet and might override it before 1543 // it gets retried 1544 return true; 1545 } else { 1546 // As part of the call to sendTimingReq the packet is 1547 // forwarded to all neighbouring caches (and any caches 1548 // above them) as a snoop. Thus at this point we know if 1549 // any of the neighbouring caches are responding, and if 1550 // so, we know it is dirty, and we can determine if it is 1551 // being passed as Modified, making our MSHR the ordering 1552 // point 1553 bool pending_modified_resp = !pkt->hasSharers() && 1554 pkt->cacheResponding(); 1555 markInService(mshr, pending_modified_resp); 1556 1557 if (pkt->isClean() && blk && blk->isDirty()) { 1558 // A cache clean opearation is looking for a dirty 1559 // block. If a dirty block is encountered a WriteClean 1560 // will update any copies to the path to the memory 1561 // until the point of reference. 1562 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n", 1563 __func__, pkt->print(), blk->print()); 1564 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), 1565 pkt->id); 1566 PacketList writebacks; 1567 writebacks.push_back(wb_pkt); 1568 doWritebacks(writebacks, 0); 1569 } 1570 1571 return false; 1572 } 1573} 1574 1575bool 1576BaseCache::sendWriteQueuePacket(WriteQueueEntry* wq_entry) 1577{ 1578 assert(wq_entry); 1579 1580 // always a single target for write queue entries 1581 PacketPtr tgt_pkt = wq_entry->getTarget()->pkt; 1582 1583 DPRINTF(Cache, "%s: write %s\n", __func__, tgt_pkt->print()); 1584 1585 // forward as is, both for evictions and uncacheable writes 1586 if (!memSidePort.sendTimingReq(tgt_pkt)) { 1587 // note that we have now masked any requestBus and 1588 // schedSendEvent (we will wait for a retry before 1589 // doing anything), and this is so even if we do not 1590 // care about this packet and might override it before 1591 // it gets retried 1592 return true; 1593 } else { 1594 markInService(wq_entry); 1595 return false; 1596 } 1597} 1598 1599void 1600BaseCache::serialize(CheckpointOut &cp) const 1601{ 1602 bool dirty(isDirty()); 1603 1604 if (dirty) { 1605 warn("*** The cache still contains dirty data. ***\n"); 1606 warn(" Make sure to drain the system using the correct flags.\n"); 1607 warn(" This checkpoint will not restore correctly " \ 1608 "and dirty data in the cache will be lost!\n"); 1609 } 1610 1611 // Since we don't checkpoint the data in the cache, any dirty data 1612 // will be lost when restoring from a checkpoint of a system that 1613 // wasn't drained properly. Flag the checkpoint as invalid if the 1614 // cache contains dirty data. 1615 bool bad_checkpoint(dirty); 1616 SERIALIZE_SCALAR(bad_checkpoint); 1617} 1618 1619void 1620BaseCache::unserialize(CheckpointIn &cp) 1621{ 1622 bool bad_checkpoint; 1623 UNSERIALIZE_SCALAR(bad_checkpoint); 1624 if (bad_checkpoint) { 1625 fatal("Restoring from checkpoints with dirty caches is not " 1626 "supported in the classic memory system. Please remove any " 1627 "caches or drain them properly before taking checkpoints.\n"); 1628 } 1629} 1630 1631void 1632BaseCache::regStats() 1633{ 1634 MemObject::regStats(); 1635 1636 using namespace Stats; 1637 1638 // Hit statistics 1639 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1640 MemCmd cmd(access_idx); 1641 const string &cstr = cmd.toString(); 1642 1643 hits[access_idx] 1644 .init(system->maxMasters()) 1645 .name(name() + "." + cstr + "_hits") 1646 .desc("number of " + cstr + " hits") 1647 .flags(total | nozero | nonan) 1648 ; 1649 for (int i = 0; i < system->maxMasters(); i++) { 1650 hits[access_idx].subname(i, system->getMasterName(i)); 1651 } 1652 } 1653 1654// These macros make it easier to sum the right subset of commands and 1655// to change the subset of commands that are considered "demand" vs 1656// "non-demand" 1657#define SUM_DEMAND(s) \ 1658 (s[MemCmd::ReadReq] + s[MemCmd::WriteReq] + s[MemCmd::WriteLineReq] + \ 1659 s[MemCmd::ReadExReq] + s[MemCmd::ReadCleanReq] + s[MemCmd::ReadSharedReq]) 1660 1661// should writebacks be included here? prior code was inconsistent... 1662#define SUM_NON_DEMAND(s) \ 1663 (s[MemCmd::SoftPFReq] + s[MemCmd::HardPFReq] + s[MemCmd::SoftPFExReq]) 1664 1665 demandHits 1666 .name(name() + ".demand_hits") 1667 .desc("number of demand (read+write) hits") 1668 .flags(total | nozero | nonan) 1669 ; 1670 demandHits = SUM_DEMAND(hits); 1671 for (int i = 0; i < system->maxMasters(); i++) { 1672 demandHits.subname(i, system->getMasterName(i)); 1673 } 1674 1675 overallHits 1676 .name(name() + ".overall_hits") 1677 .desc("number of overall hits") 1678 .flags(total | nozero | nonan) 1679 ; 1680 overallHits = demandHits + SUM_NON_DEMAND(hits); 1681 for (int i = 0; i < system->maxMasters(); i++) { 1682 overallHits.subname(i, system->getMasterName(i)); 1683 } 1684 1685 // Miss statistics 1686 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1687 MemCmd cmd(access_idx); 1688 const string &cstr = cmd.toString(); 1689 1690 misses[access_idx] 1691 .init(system->maxMasters()) 1692 .name(name() + "." + cstr + "_misses") 1693 .desc("number of " + cstr + " misses") 1694 .flags(total | nozero | nonan) 1695 ; 1696 for (int i = 0; i < system->maxMasters(); i++) { 1697 misses[access_idx].subname(i, system->getMasterName(i)); 1698 } 1699 } 1700 1701 demandMisses 1702 .name(name() + ".demand_misses") 1703 .desc("number of demand (read+write) misses") 1704 .flags(total | nozero | nonan) 1705 ; 1706 demandMisses = SUM_DEMAND(misses); 1707 for (int i = 0; i < system->maxMasters(); i++) { 1708 demandMisses.subname(i, system->getMasterName(i)); 1709 } 1710 1711 overallMisses 1712 .name(name() + ".overall_misses") 1713 .desc("number of overall misses") 1714 .flags(total | nozero | nonan) 1715 ; 1716 overallMisses = demandMisses + SUM_NON_DEMAND(misses); 1717 for (int i = 0; i < system->maxMasters(); i++) { 1718 overallMisses.subname(i, system->getMasterName(i)); 1719 } 1720 1721 // Miss latency statistics 1722 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1723 MemCmd cmd(access_idx); 1724 const string &cstr = cmd.toString(); 1725 1726 missLatency[access_idx] 1727 .init(system->maxMasters()) 1728 .name(name() + "." + cstr + "_miss_latency") 1729 .desc("number of " + cstr + " miss cycles") 1730 .flags(total | nozero | nonan) 1731 ; 1732 for (int i = 0; i < system->maxMasters(); i++) { 1733 missLatency[access_idx].subname(i, system->getMasterName(i)); 1734 } 1735 } 1736 1737 demandMissLatency 1738 .name(name() + ".demand_miss_latency") 1739 .desc("number of demand (read+write) miss cycles") 1740 .flags(total | nozero | nonan) 1741 ; 1742 demandMissLatency = SUM_DEMAND(missLatency); 1743 for (int i = 0; i < system->maxMasters(); i++) { 1744 demandMissLatency.subname(i, system->getMasterName(i)); 1745 } 1746 1747 overallMissLatency 1748 .name(name() + ".overall_miss_latency") 1749 .desc("number of overall miss cycles") 1750 .flags(total | nozero | nonan) 1751 ; 1752 overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency); 1753 for (int i = 0; i < system->maxMasters(); i++) { 1754 overallMissLatency.subname(i, system->getMasterName(i)); 1755 } 1756 1757 // access formulas 1758 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1759 MemCmd cmd(access_idx); 1760 const string &cstr = cmd.toString(); 1761 1762 accesses[access_idx] 1763 .name(name() + "." + cstr + "_accesses") 1764 .desc("number of " + cstr + " accesses(hits+misses)") 1765 .flags(total | nozero | nonan) 1766 ; 1767 accesses[access_idx] = hits[access_idx] + misses[access_idx]; 1768 1769 for (int i = 0; i < system->maxMasters(); i++) { 1770 accesses[access_idx].subname(i, system->getMasterName(i)); 1771 } 1772 } 1773 1774 demandAccesses 1775 .name(name() + ".demand_accesses") 1776 .desc("number of demand (read+write) accesses") 1777 .flags(total | nozero | nonan) 1778 ; 1779 demandAccesses = demandHits + demandMisses; 1780 for (int i = 0; i < system->maxMasters(); i++) { 1781 demandAccesses.subname(i, system->getMasterName(i)); 1782 } 1783 1784 overallAccesses 1785 .name(name() + ".overall_accesses") 1786 .desc("number of overall (read+write) accesses") 1787 .flags(total | nozero | nonan) 1788 ; 1789 overallAccesses = overallHits + overallMisses; 1790 for (int i = 0; i < system->maxMasters(); i++) { 1791 overallAccesses.subname(i, system->getMasterName(i)); 1792 } 1793 1794 // miss rate formulas 1795 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1796 MemCmd cmd(access_idx); 1797 const string &cstr = cmd.toString(); 1798 1799 missRate[access_idx] 1800 .name(name() + "." + cstr + "_miss_rate") 1801 .desc("miss rate for " + cstr + " accesses") 1802 .flags(total | nozero | nonan) 1803 ; 1804 missRate[access_idx] = misses[access_idx] / accesses[access_idx]; 1805 1806 for (int i = 0; i < system->maxMasters(); i++) { 1807 missRate[access_idx].subname(i, system->getMasterName(i)); 1808 } 1809 } 1810 1811 demandMissRate 1812 .name(name() + ".demand_miss_rate") 1813 .desc("miss rate for demand accesses") 1814 .flags(total | nozero | nonan) 1815 ; 1816 demandMissRate = demandMisses / demandAccesses; 1817 for (int i = 0; i < system->maxMasters(); i++) { 1818 demandMissRate.subname(i, system->getMasterName(i)); 1819 } 1820 1821 overallMissRate 1822 .name(name() + ".overall_miss_rate") 1823 .desc("miss rate for overall accesses") 1824 .flags(total | nozero | nonan) 1825 ; 1826 overallMissRate = overallMisses / overallAccesses; 1827 for (int i = 0; i < system->maxMasters(); i++) { 1828 overallMissRate.subname(i, system->getMasterName(i)); 1829 } 1830 1831 // miss latency formulas 1832 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1833 MemCmd cmd(access_idx); 1834 const string &cstr = cmd.toString(); 1835 1836 avgMissLatency[access_idx] 1837 .name(name() + "." + cstr + "_avg_miss_latency") 1838 .desc("average " + cstr + " miss latency") 1839 .flags(total | nozero | nonan) 1840 ; 1841 avgMissLatency[access_idx] = 1842 missLatency[access_idx] / misses[access_idx]; 1843 1844 for (int i = 0; i < system->maxMasters(); i++) { 1845 avgMissLatency[access_idx].subname(i, system->getMasterName(i)); 1846 } 1847 } 1848 1849 demandAvgMissLatency 1850 .name(name() + ".demand_avg_miss_latency") 1851 .desc("average overall miss latency") 1852 .flags(total | nozero | nonan) 1853 ; 1854 demandAvgMissLatency = demandMissLatency / demandMisses; 1855 for (int i = 0; i < system->maxMasters(); i++) { 1856 demandAvgMissLatency.subname(i, system->getMasterName(i)); 1857 } 1858 1859 overallAvgMissLatency 1860 .name(name() + ".overall_avg_miss_latency") 1861 .desc("average overall miss latency") 1862 .flags(total | nozero | nonan) 1863 ; 1864 overallAvgMissLatency = overallMissLatency / overallMisses; 1865 for (int i = 0; i < system->maxMasters(); i++) { 1866 overallAvgMissLatency.subname(i, system->getMasterName(i)); 1867 } 1868 1869 blocked_cycles.init(NUM_BLOCKED_CAUSES); 1870 blocked_cycles 1871 .name(name() + ".blocked_cycles") 1872 .desc("number of cycles access was blocked") 1873 .subname(Blocked_NoMSHRs, "no_mshrs") 1874 .subname(Blocked_NoTargets, "no_targets") 1875 ; 1876 1877 1878 blocked_causes.init(NUM_BLOCKED_CAUSES); 1879 blocked_causes 1880 .name(name() + ".blocked") 1881 .desc("number of cycles access was blocked") 1882 .subname(Blocked_NoMSHRs, "no_mshrs") 1883 .subname(Blocked_NoTargets, "no_targets") 1884 ; 1885 1886 avg_blocked 1887 .name(name() + ".avg_blocked_cycles") 1888 .desc("average number of cycles each access was blocked") 1889 .subname(Blocked_NoMSHRs, "no_mshrs") 1890 .subname(Blocked_NoTargets, "no_targets") 1891 ; 1892 1893 avg_blocked = blocked_cycles / blocked_causes; 1894 1895 unusedPrefetches 1896 .name(name() + ".unused_prefetches") 1897 .desc("number of HardPF blocks evicted w/o reference") 1898 .flags(nozero) 1899 ; 1900 1901 writebacks 1902 .init(system->maxMasters()) 1903 .name(name() + ".writebacks") 1904 .desc("number of writebacks") 1905 .flags(total | nozero | nonan) 1906 ; 1907 for (int i = 0; i < system->maxMasters(); i++) { 1908 writebacks.subname(i, system->getMasterName(i)); 1909 } 1910 1911 // MSHR statistics 1912 // MSHR hit statistics 1913 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1914 MemCmd cmd(access_idx); 1915 const string &cstr = cmd.toString(); 1916 1917 mshr_hits[access_idx] 1918 .init(system->maxMasters()) 1919 .name(name() + "." + cstr + "_mshr_hits") 1920 .desc("number of " + cstr + " MSHR hits") 1921 .flags(total | nozero | nonan) 1922 ; 1923 for (int i = 0; i < system->maxMasters(); i++) { 1924 mshr_hits[access_idx].subname(i, system->getMasterName(i)); 1925 } 1926 } 1927 1928 demandMshrHits 1929 .name(name() + ".demand_mshr_hits") 1930 .desc("number of demand (read+write) MSHR hits") 1931 .flags(total | nozero | nonan) 1932 ; 1933 demandMshrHits = SUM_DEMAND(mshr_hits); 1934 for (int i = 0; i < system->maxMasters(); i++) { 1935 demandMshrHits.subname(i, system->getMasterName(i)); 1936 } 1937 1938 overallMshrHits 1939 .name(name() + ".overall_mshr_hits") 1940 .desc("number of overall MSHR hits") 1941 .flags(total | nozero | nonan) 1942 ; 1943 overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshr_hits); 1944 for (int i = 0; i < system->maxMasters(); i++) { 1945 overallMshrHits.subname(i, system->getMasterName(i)); 1946 } 1947 1948 // MSHR miss statistics 1949 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1950 MemCmd cmd(access_idx); 1951 const string &cstr = cmd.toString(); 1952 1953 mshr_misses[access_idx] 1954 .init(system->maxMasters()) 1955 .name(name() + "." + cstr + "_mshr_misses") 1956 .desc("number of " + cstr + " MSHR misses") 1957 .flags(total | nozero | nonan) 1958 ; 1959 for (int i = 0; i < system->maxMasters(); i++) { 1960 mshr_misses[access_idx].subname(i, system->getMasterName(i)); 1961 } 1962 } 1963 1964 demandMshrMisses 1965 .name(name() + ".demand_mshr_misses") 1966 .desc("number of demand (read+write) MSHR misses") 1967 .flags(total | nozero | nonan) 1968 ; 1969 demandMshrMisses = SUM_DEMAND(mshr_misses); 1970 for (int i = 0; i < system->maxMasters(); i++) { 1971 demandMshrMisses.subname(i, system->getMasterName(i)); 1972 } 1973 1974 overallMshrMisses 1975 .name(name() + ".overall_mshr_misses") 1976 .desc("number of overall MSHR misses") 1977 .flags(total | nozero | nonan) 1978 ; 1979 overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshr_misses); 1980 for (int i = 0; i < system->maxMasters(); i++) { 1981 overallMshrMisses.subname(i, system->getMasterName(i)); 1982 } 1983 1984 // MSHR miss latency statistics 1985 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1986 MemCmd cmd(access_idx); 1987 const string &cstr = cmd.toString(); 1988 1989 mshr_miss_latency[access_idx] 1990 .init(system->maxMasters()) 1991 .name(name() + "." + cstr + "_mshr_miss_latency") 1992 .desc("number of " + cstr + " MSHR miss cycles") 1993 .flags(total | nozero | nonan) 1994 ; 1995 for (int i = 0; i < system->maxMasters(); i++) { 1996 mshr_miss_latency[access_idx].subname(i, system->getMasterName(i)); 1997 } 1998 } 1999 2000 demandMshrMissLatency 2001 .name(name() + ".demand_mshr_miss_latency") 2002 .desc("number of demand (read+write) MSHR miss cycles") 2003 .flags(total | nozero | nonan) 2004 ; 2005 demandMshrMissLatency = SUM_DEMAND(mshr_miss_latency); 2006 for (int i = 0; i < system->maxMasters(); i++) { 2007 demandMshrMissLatency.subname(i, system->getMasterName(i)); 2008 } 2009 2010 overallMshrMissLatency 2011 .name(name() + ".overall_mshr_miss_latency") 2012 .desc("number of overall MSHR miss cycles") 2013 .flags(total | nozero | nonan) 2014 ; 2015 overallMshrMissLatency = 2016 demandMshrMissLatency + SUM_NON_DEMAND(mshr_miss_latency); 2017 for (int i = 0; i < system->maxMasters(); i++) { 2018 overallMshrMissLatency.subname(i, system->getMasterName(i)); 2019 } 2020 2021 // MSHR uncacheable statistics 2022 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2023 MemCmd cmd(access_idx); 2024 const string &cstr = cmd.toString(); 2025 2026 mshr_uncacheable[access_idx] 2027 .init(system->maxMasters()) 2028 .name(name() + "." + cstr + "_mshr_uncacheable") 2029 .desc("number of " + cstr + " MSHR uncacheable") 2030 .flags(total | nozero | nonan) 2031 ; 2032 for (int i = 0; i < system->maxMasters(); i++) { 2033 mshr_uncacheable[access_idx].subname(i, system->getMasterName(i)); 2034 } 2035 } 2036 2037 overallMshrUncacheable 2038 .name(name() + ".overall_mshr_uncacheable_misses") 2039 .desc("number of overall MSHR uncacheable misses") 2040 .flags(total | nozero | nonan) 2041 ; 2042 overallMshrUncacheable = 2043 SUM_DEMAND(mshr_uncacheable) + SUM_NON_DEMAND(mshr_uncacheable); 2044 for (int i = 0; i < system->maxMasters(); i++) { 2045 overallMshrUncacheable.subname(i, system->getMasterName(i)); 2046 } 2047 2048 // MSHR miss latency statistics 2049 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2050 MemCmd cmd(access_idx); 2051 const string &cstr = cmd.toString(); 2052 2053 mshr_uncacheable_lat[access_idx] 2054 .init(system->maxMasters()) 2055 .name(name() + "." + cstr + "_mshr_uncacheable_latency") 2056 .desc("number of " + cstr + " MSHR uncacheable cycles") 2057 .flags(total | nozero | nonan) 2058 ; 2059 for (int i = 0; i < system->maxMasters(); i++) { 2060 mshr_uncacheable_lat[access_idx].subname( 2061 i, system->getMasterName(i)); 2062 } 2063 } 2064 2065 overallMshrUncacheableLatency 2066 .name(name() + ".overall_mshr_uncacheable_latency") 2067 .desc("number of overall MSHR uncacheable cycles") 2068 .flags(total | nozero | nonan) 2069 ; 2070 overallMshrUncacheableLatency = 2071 SUM_DEMAND(mshr_uncacheable_lat) + 2072 SUM_NON_DEMAND(mshr_uncacheable_lat); 2073 for (int i = 0; i < system->maxMasters(); i++) { 2074 overallMshrUncacheableLatency.subname(i, system->getMasterName(i)); 2075 } 2076 2077#if 0 2078 // MSHR access formulas 2079 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2080 MemCmd cmd(access_idx); 2081 const string &cstr = cmd.toString(); 2082 2083 mshrAccesses[access_idx] 2084 .name(name() + "." + cstr + "_mshr_accesses") 2085 .desc("number of " + cstr + " mshr accesses(hits+misses)") 2086 .flags(total | nozero | nonan) 2087 ; 2088 mshrAccesses[access_idx] = 2089 mshr_hits[access_idx] + mshr_misses[access_idx] 2090 + mshr_uncacheable[access_idx]; 2091 } 2092 2093 demandMshrAccesses 2094 .name(name() + ".demand_mshr_accesses") 2095 .desc("number of demand (read+write) mshr accesses") 2096 .flags(total | nozero | nonan) 2097 ; 2098 demandMshrAccesses = demandMshrHits + demandMshrMisses; 2099 2100 overallMshrAccesses 2101 .name(name() + ".overall_mshr_accesses") 2102 .desc("number of overall (read+write) mshr accesses") 2103 .flags(total | nozero | nonan) 2104 ; 2105 overallMshrAccesses = overallMshrHits + overallMshrMisses 2106 + overallMshrUncacheable; 2107#endif 2108 2109 // MSHR miss rate formulas 2110 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2111 MemCmd cmd(access_idx); 2112 const string &cstr = cmd.toString(); 2113 2114 mshrMissRate[access_idx] 2115 .name(name() + "." + cstr + "_mshr_miss_rate") 2116 .desc("mshr miss rate for " + cstr + " accesses") 2117 .flags(total | nozero | nonan) 2118 ; 2119 mshrMissRate[access_idx] = 2120 mshr_misses[access_idx] / accesses[access_idx]; 2121 2122 for (int i = 0; i < system->maxMasters(); i++) { 2123 mshrMissRate[access_idx].subname(i, system->getMasterName(i)); 2124 } 2125 } 2126 2127 demandMshrMissRate 2128 .name(name() + ".demand_mshr_miss_rate") 2129 .desc("mshr miss rate for demand accesses") 2130 .flags(total | nozero | nonan) 2131 ; 2132 demandMshrMissRate = demandMshrMisses / demandAccesses; 2133 for (int i = 0; i < system->maxMasters(); i++) { 2134 demandMshrMissRate.subname(i, system->getMasterName(i)); 2135 } 2136 2137 overallMshrMissRate 2138 .name(name() + ".overall_mshr_miss_rate") 2139 .desc("mshr miss rate for overall accesses") 2140 .flags(total | nozero | nonan) 2141 ; 2142 overallMshrMissRate = overallMshrMisses / overallAccesses; 2143 for (int i = 0; i < system->maxMasters(); i++) { 2144 overallMshrMissRate.subname(i, system->getMasterName(i)); 2145 } 2146 2147 // mshrMiss latency formulas 2148 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2149 MemCmd cmd(access_idx); 2150 const string &cstr = cmd.toString(); 2151 2152 avgMshrMissLatency[access_idx] 2153 .name(name() + "." + cstr + "_avg_mshr_miss_latency") 2154 .desc("average " + cstr + " mshr miss latency") 2155 .flags(total | nozero | nonan) 2156 ; 2157 avgMshrMissLatency[access_idx] = 2158 mshr_miss_latency[access_idx] / mshr_misses[access_idx]; 2159 2160 for (int i = 0; i < system->maxMasters(); i++) { 2161 avgMshrMissLatency[access_idx].subname( 2162 i, system->getMasterName(i)); 2163 } 2164 } 2165 2166 demandAvgMshrMissLatency 2167 .name(name() + ".demand_avg_mshr_miss_latency") 2168 .desc("average overall mshr miss latency") 2169 .flags(total | nozero | nonan) 2170 ; 2171 demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses; 2172 for (int i = 0; i < system->maxMasters(); i++) { 2173 demandAvgMshrMissLatency.subname(i, system->getMasterName(i)); 2174 } 2175 2176 overallAvgMshrMissLatency 2177 .name(name() + ".overall_avg_mshr_miss_latency") 2178 .desc("average overall mshr miss latency") 2179 .flags(total | nozero | nonan) 2180 ; 2181 overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses; 2182 for (int i = 0; i < system->maxMasters(); i++) { 2183 overallAvgMshrMissLatency.subname(i, system->getMasterName(i)); 2184 } 2185 2186 // mshrUncacheable latency formulas 2187 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2188 MemCmd cmd(access_idx); 2189 const string &cstr = cmd.toString(); 2190 2191 avgMshrUncacheableLatency[access_idx] 2192 .name(name() + "." + cstr + "_avg_mshr_uncacheable_latency") 2193 .desc("average " + cstr + " mshr uncacheable latency") 2194 .flags(total | nozero | nonan) 2195 ; 2196 avgMshrUncacheableLatency[access_idx] = 2197 mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx]; 2198 2199 for (int i = 0; i < system->maxMasters(); i++) { 2200 avgMshrUncacheableLatency[access_idx].subname( 2201 i, system->getMasterName(i)); 2202 } 2203 } 2204 2205 overallAvgMshrUncacheableLatency 2206 .name(name() + ".overall_avg_mshr_uncacheable_latency") 2207 .desc("average overall mshr uncacheable latency") 2208 .flags(total | nozero | nonan) 2209 ; 2210 overallAvgMshrUncacheableLatency = 2211 overallMshrUncacheableLatency / overallMshrUncacheable; 2212 for (int i = 0; i < system->maxMasters(); i++) { 2213 overallAvgMshrUncacheableLatency.subname(i, system->getMasterName(i)); 2214 } 2215 2216 replacements 2217 .name(name() + ".replacements") 2218 .desc("number of replacements") 2219 ; 2220} 2221 2222void 2223BaseCache::regProbePoints() 2224{ 2225 ppHit = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Hit"); 2226 ppMiss = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Miss"); 2227 ppFill = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Fill"); 2228} 2229 2230/////////////// 2231// 2232// CpuSidePort 2233// 2234/////////////// 2235bool 2236BaseCache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt) 2237{ 2238 // Snoops shouldn't happen when bypassing caches 2239 assert(!cache->system->bypassCaches()); 2240 2241 assert(pkt->isResponse()); 2242 2243 // Express snoop responses from master to slave, e.g., from L1 to L2 2244 cache->recvTimingSnoopResp(pkt); 2245 return true; 2246} 2247 2248 2249bool 2250BaseCache::CpuSidePort::tryTiming(PacketPtr pkt) 2251{ 2252 if (cache->system->bypassCaches() || pkt->isExpressSnoop()) { 2253 // always let express snoop packets through even if blocked 2254 return true; 2255 } else if (blocked || mustSendRetry) { 2256 // either already committed to send a retry, or blocked 2257 mustSendRetry = true; 2258 return false; 2259 } 2260 mustSendRetry = false; 2261 return true; 2262} 2263 2264bool 2265BaseCache::CpuSidePort::recvTimingReq(PacketPtr pkt) 2266{ 2267 assert(pkt->isRequest()); 2268 2269 if (cache->system->bypassCaches()) { 2270 // Just forward the packet if caches are disabled. 2271 // @todo This should really enqueue the packet rather 2272 bool M5_VAR_USED success = cache->memSidePort.sendTimingReq(pkt); 2273 assert(success); 2274 return true; 2275 } else if (tryTiming(pkt)) { 2276 cache->recvTimingReq(pkt); 2277 return true; 2278 } 2279 return false; 2280} 2281 2282Tick 2283BaseCache::CpuSidePort::recvAtomic(PacketPtr pkt) 2284{ 2285 if (cache->system->bypassCaches()) { 2286 // Forward the request if the system is in cache bypass mode. 2287 return cache->memSidePort.sendAtomic(pkt); 2288 } else { 2289 return cache->recvAtomic(pkt); 2290 } 2291} 2292 2293void 2294BaseCache::CpuSidePort::recvFunctional(PacketPtr pkt) 2295{ 2296 if (cache->system->bypassCaches()) { 2297 // The cache should be flushed if we are in cache bypass mode, 2298 // so we don't need to check if we need to update anything. 2299 cache->memSidePort.sendFunctional(pkt); 2300 return; 2301 } 2302 2303 // functional request 2304 cache->functionalAccess(pkt, true); 2305} 2306 2307AddrRangeList 2308BaseCache::CpuSidePort::getAddrRanges() const 2309{ 2310 return cache->getAddrRanges(); 2311} 2312 2313 2314BaseCache:: 2315CpuSidePort::CpuSidePort(const std::string &_name, BaseCache *_cache, 2316 const std::string &_label) 2317 : CacheSlavePort(_name, _cache, _label), cache(_cache) 2318{ 2319} 2320 2321/////////////// 2322// 2323// MemSidePort 2324// 2325/////////////// 2326bool 2327BaseCache::MemSidePort::recvTimingResp(PacketPtr pkt) 2328{ 2329 cache->recvTimingResp(pkt); 2330 return true; 2331} 2332 2333// Express snooping requests to memside port 2334void 2335BaseCache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt) 2336{ 2337 // Snoops shouldn't happen when bypassing caches 2338 assert(!cache->system->bypassCaches()); 2339 2340 // handle snooping requests 2341 cache->recvTimingSnoopReq(pkt); 2342} 2343 2344Tick 2345BaseCache::MemSidePort::recvAtomicSnoop(PacketPtr pkt) 2346{ 2347 // Snoops shouldn't happen when bypassing caches 2348 assert(!cache->system->bypassCaches()); 2349 2350 return cache->recvAtomicSnoop(pkt); 2351} 2352 2353void 2354BaseCache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt) 2355{ 2356 // Snoops shouldn't happen when bypassing caches 2357 assert(!cache->system->bypassCaches()); 2358 2359 // functional snoop (note that in contrast to atomic we don't have 2360 // a specific functionalSnoop method, as they have the same 2361 // behaviour regardless) 2362 cache->functionalAccess(pkt, false); 2363} 2364 2365void 2366BaseCache::CacheReqPacketQueue::sendDeferredPacket() 2367{ 2368 // sanity check 2369 assert(!waitingOnRetry); 2370 2371 // there should never be any deferred request packets in the 2372 // queue, instead we resly on the cache to provide the packets 2373 // from the MSHR queue or write queue 2374 assert(deferredPacketReadyTime() == MaxTick); 2375 2376 // check for request packets (requests & writebacks) 2377 QueueEntry* entry = cache.getNextQueueEntry(); 2378 2379 if (!entry) { 2380 // can happen if e.g. we attempt a writeback and fail, but 2381 // before the retry, the writeback is eliminated because 2382 // we snoop another cache's ReadEx. 2383 } else { 2384 // let our snoop responses go first if there are responses to 2385 // the same addresses 2386 if (checkConflictingSnoop(entry->blkAddr)) { 2387 return; 2388 } 2389 waitingOnRetry = entry->sendPacket(cache); 2390 } 2391 2392 // if we succeeded and are not waiting for a retry, schedule the 2393 // next send considering when the next queue is ready, note that 2394 // snoop responses have their own packet queue and thus schedule 2395 // their own events 2396 if (!waitingOnRetry) { 2397 schedSendEvent(cache.nextQueueReadyTime()); 2398 } 2399} 2400 2401BaseCache::MemSidePort::MemSidePort(const std::string &_name, 2402 BaseCache *_cache, 2403 const std::string &_label) 2404 : CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue), 2405 _reqQueue(*_cache, *this, _snoopRespQueue, _label), 2406 _snoopRespQueue(*_cache, *this, true, _label), cache(_cache) 2407{ 2408} 2409 2410void 2411WriteAllocator::updateMode(Addr write_addr, unsigned write_size, 2412 Addr blk_addr) 2413{ 2414 // check if we are continuing where the last write ended 2415 if (nextAddr == write_addr) { 2416 delayCtr[blk_addr] = delayThreshold; 2417 // stop if we have already saturated 2418 if (mode != WriteMode::NO_ALLOCATE) { 2419 byteCount += write_size; 2420 // switch to streaming mode if we have passed the lower 2421 // threshold 2422 if (mode == WriteMode::ALLOCATE && 2423 byteCount > coalesceLimit) { 2424 mode = WriteMode::COALESCE; 2425 DPRINTF(Cache, "Switched to write coalescing\n"); 2426 } else if (mode == WriteMode::COALESCE && 2427 byteCount > noAllocateLimit) { 2428 // and continue and switch to non-allocating mode if we 2429 // pass the upper threshold 2430 mode = WriteMode::NO_ALLOCATE; 2431 DPRINTF(Cache, "Switched to write-no-allocate\n"); 2432 } 2433 } 2434 } else { 2435 // we did not see a write matching the previous one, start 2436 // over again 2437 byteCount = write_size; 2438 mode = WriteMode::ALLOCATE; 2439 resetDelay(blk_addr); 2440 } 2441 nextAddr = write_addr + write_size; 2442} 2443 2444WriteAllocator* 2445WriteAllocatorParams::create() 2446{ 2447 return new WriteAllocator(this); 2448} 2449