base.cc revision 13478
1/* 2 * Copyright (c) 2012-2013, 2018 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2003-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Erik Hallnor 41 * Nikos Nikoleris 42 */ 43 44/** 45 * @file 46 * Definition of BaseCache functions. 47 */ 48 49#include "mem/cache/base.hh" 50 51#include "base/compiler.hh" 52#include "base/logging.hh" 53#include "debug/Cache.hh" 54#include "debug/CachePort.hh" 55#include "debug/CacheRepl.hh" 56#include "debug/CacheVerbose.hh" 57#include "mem/cache/mshr.hh" 58#include "mem/cache/prefetch/base.hh" 59#include "mem/cache/queue_entry.hh" 60#include "params/BaseCache.hh" 61#include "params/WriteAllocator.hh" 62#include "sim/core.hh" 63 64class BaseMasterPort; 65class BaseSlavePort; 66 67using namespace std; 68 69BaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name, 70 BaseCache *_cache, 71 const std::string &_label) 72 : QueuedSlavePort(_name, _cache, queue), queue(*_cache, *this, _label), 73 blocked(false), mustSendRetry(false), 74 sendRetryEvent([this]{ processSendRetry(); }, _name) 75{ 76} 77 78BaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size) 79 : MemObject(p), 80 cpuSidePort (p->name + ".cpu_side", this, "CpuSidePort"), 81 memSidePort(p->name + ".mem_side", this, "MemSidePort"), 82 mshrQueue("MSHRs", p->mshrs, 0, p->demand_mshr_reserve), // see below 83 writeBuffer("write buffer", p->write_buffers, p->mshrs), // see below 84 tags(p->tags), 85 prefetcher(p->prefetcher), 86 writeAllocator(p->write_allocator), 87 writebackClean(p->writeback_clean), 88 tempBlockWriteback(nullptr), 89 writebackTempBlockAtomicEvent([this]{ writebackTempBlockAtomic(); }, 90 name(), false, 91 EventBase::Delayed_Writeback_Pri), 92 blkSize(blk_size), 93 lookupLatency(p->tag_latency), 94 dataLatency(p->data_latency), 95 forwardLatency(p->tag_latency), 96 fillLatency(p->data_latency), 97 responseLatency(p->response_latency), 98 sequentialAccess(p->sequential_access), 99 numTarget(p->tgts_per_mshr), 100 forwardSnoops(true), 101 clusivity(p->clusivity), 102 isReadOnly(p->is_read_only), 103 blocked(0), 104 order(0), 105 noTargetMSHR(nullptr), 106 missCount(p->max_miss_count), 107 addrRanges(p->addr_ranges.begin(), p->addr_ranges.end()), 108 system(p->system) 109{ 110 // the MSHR queue has no reserve entries as we check the MSHR 111 // queue on every single allocation, whereas the write queue has 112 // as many reserve entries as we have MSHRs, since every MSHR may 113 // eventually require a writeback, and we do not check the write 114 // buffer before committing to an MSHR 115 116 // forward snoops is overridden in init() once we can query 117 // whether the connected master is actually snooping or not 118 119 tempBlock = new TempCacheBlk(blkSize); 120 121 tags->tagsInit(); 122 if (prefetcher) 123 prefetcher->setCache(this); 124} 125 126BaseCache::~BaseCache() 127{ 128 delete tempBlock; 129} 130 131void 132BaseCache::CacheSlavePort::setBlocked() 133{ 134 assert(!blocked); 135 DPRINTF(CachePort, "Port is blocking new requests\n"); 136 blocked = true; 137 // if we already scheduled a retry in this cycle, but it has not yet 138 // happened, cancel it 139 if (sendRetryEvent.scheduled()) { 140 owner.deschedule(sendRetryEvent); 141 DPRINTF(CachePort, "Port descheduled retry\n"); 142 mustSendRetry = true; 143 } 144} 145 146void 147BaseCache::CacheSlavePort::clearBlocked() 148{ 149 assert(blocked); 150 DPRINTF(CachePort, "Port is accepting new requests\n"); 151 blocked = false; 152 if (mustSendRetry) { 153 // @TODO: need to find a better time (next cycle?) 154 owner.schedule(sendRetryEvent, curTick() + 1); 155 } 156} 157 158void 159BaseCache::CacheSlavePort::processSendRetry() 160{ 161 DPRINTF(CachePort, "Port is sending retry\n"); 162 163 // reset the flag and call retry 164 mustSendRetry = false; 165 sendRetryReq(); 166} 167 168Addr 169BaseCache::regenerateBlkAddr(CacheBlk* blk) 170{ 171 if (blk != tempBlock) { 172 return tags->regenerateBlkAddr(blk); 173 } else { 174 return tempBlock->getAddr(); 175 } 176} 177 178void 179BaseCache::init() 180{ 181 if (!cpuSidePort.isConnected() || !memSidePort.isConnected()) 182 fatal("Cache ports on %s are not connected\n", name()); 183 cpuSidePort.sendRangeChange(); 184 forwardSnoops = cpuSidePort.isSnooping(); 185} 186 187BaseMasterPort & 188BaseCache::getMasterPort(const std::string &if_name, PortID idx) 189{ 190 if (if_name == "mem_side") { 191 return memSidePort; 192 } else { 193 return MemObject::getMasterPort(if_name, idx); 194 } 195} 196 197BaseSlavePort & 198BaseCache::getSlavePort(const std::string &if_name, PortID idx) 199{ 200 if (if_name == "cpu_side") { 201 return cpuSidePort; 202 } else { 203 return MemObject::getSlavePort(if_name, idx); 204 } 205} 206 207bool 208BaseCache::inRange(Addr addr) const 209{ 210 for (const auto& r : addrRanges) { 211 if (r.contains(addr)) { 212 return true; 213 } 214 } 215 return false; 216} 217 218void 219BaseCache::handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time) 220{ 221 if (pkt->needsResponse()) { 222 pkt->makeTimingResponse(); 223 // @todo: Make someone pay for this 224 pkt->headerDelay = pkt->payloadDelay = 0; 225 226 // In this case we are considering request_time that takes 227 // into account the delay of the xbar, if any, and just 228 // lat, neglecting responseLatency, modelling hit latency 229 // just as the value of lat overriden by access(), which calls 230 // the calculateAccessLatency() function. 231 cpuSidePort.schedTimingResp(pkt, request_time, true); 232 } else { 233 DPRINTF(Cache, "%s satisfied %s, no response needed\n", __func__, 234 pkt->print()); 235 236 // queue the packet for deletion, as the sending cache is 237 // still relying on it; if the block is found in access(), 238 // CleanEvict and Writeback messages will be deleted 239 // here as well 240 pendingDelete.reset(pkt); 241 } 242} 243 244void 245BaseCache::handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk, 246 Tick forward_time, Tick request_time) 247{ 248 if (writeAllocator && 249 pkt && pkt->isWrite() && !pkt->req->isUncacheable()) { 250 writeAllocator->updateMode(pkt->getAddr(), pkt->getSize(), 251 pkt->getBlockAddr(blkSize)); 252 } 253 254 if (mshr) { 255 /// MSHR hit 256 /// @note writebacks will be checked in getNextMSHR() 257 /// for any conflicting requests to the same block 258 259 //@todo remove hw_pf here 260 261 // Coalesce unless it was a software prefetch (see above). 262 if (pkt) { 263 assert(!pkt->isWriteback()); 264 // CleanEvicts corresponding to blocks which have 265 // outstanding requests in MSHRs are simply sunk here 266 if (pkt->cmd == MemCmd::CleanEvict) { 267 pendingDelete.reset(pkt); 268 } else if (pkt->cmd == MemCmd::WriteClean) { 269 // A WriteClean should never coalesce with any 270 // outstanding cache maintenance requests. 271 272 // We use forward_time here because there is an 273 // uncached memory write, forwarded to WriteBuffer. 274 allocateWriteBuffer(pkt, forward_time); 275 } else { 276 DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__, 277 pkt->print()); 278 279 assert(pkt->req->masterId() < system->maxMasters()); 280 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++; 281 282 // We use forward_time here because it is the same 283 // considering new targets. We have multiple 284 // requests for the same address here. It 285 // specifies the latency to allocate an internal 286 // buffer and to schedule an event to the queued 287 // port and also takes into account the additional 288 // delay of the xbar. 289 mshr->allocateTarget(pkt, forward_time, order++, 290 allocOnFill(pkt->cmd)); 291 if (mshr->getNumTargets() == numTarget) { 292 noTargetMSHR = mshr; 293 setBlocked(Blocked_NoTargets); 294 // need to be careful with this... if this mshr isn't 295 // ready yet (i.e. time > curTick()), we don't want to 296 // move it ahead of mshrs that are ready 297 // mshrQueue.moveToFront(mshr); 298 } 299 } 300 } 301 } else { 302 // no MSHR 303 assert(pkt->req->masterId() < system->maxMasters()); 304 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 305 306 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean) { 307 // We use forward_time here because there is an 308 // writeback or writeclean, forwarded to WriteBuffer. 309 allocateWriteBuffer(pkt, forward_time); 310 } else { 311 if (blk && blk->isValid()) { 312 // If we have a write miss to a valid block, we 313 // need to mark the block non-readable. Otherwise 314 // if we allow reads while there's an outstanding 315 // write miss, the read could return stale data 316 // out of the cache block... a more aggressive 317 // system could detect the overlap (if any) and 318 // forward data out of the MSHRs, but we don't do 319 // that yet. Note that we do need to leave the 320 // block valid so that it stays in the cache, in 321 // case we get an upgrade response (and hence no 322 // new data) when the write miss completes. 323 // As long as CPUs do proper store/load forwarding 324 // internally, and have a sufficiently weak memory 325 // model, this is probably unnecessary, but at some 326 // point it must have seemed like we needed it... 327 assert((pkt->needsWritable() && !blk->isWritable()) || 328 pkt->req->isCacheMaintenance()); 329 blk->status &= ~BlkReadable; 330 } 331 // Here we are using forward_time, modelling the latency of 332 // a miss (outbound) just as forwardLatency, neglecting the 333 // lookupLatency component. 334 allocateMissBuffer(pkt, forward_time); 335 } 336 } 337} 338 339void 340BaseCache::recvTimingReq(PacketPtr pkt) 341{ 342 // anything that is merely forwarded pays for the forward latency and 343 // the delay provided by the crossbar 344 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 345 346 Cycles lat; 347 CacheBlk *blk = nullptr; 348 bool satisfied = false; 349 { 350 PacketList writebacks; 351 // Note that lat is passed by reference here. The function 352 // access() will set the lat value. 353 satisfied = access(pkt, blk, lat, writebacks); 354 355 // copy writebacks to write buffer here to ensure they logically 356 // precede anything happening below 357 doWritebacks(writebacks, forward_time); 358 } 359 360 // Here we charge the headerDelay that takes into account the latencies 361 // of the bus, if the packet comes from it. 362 // The latency charged is just the value set by the access() function. 363 // In case of a hit we are neglecting response latency. 364 // In case of a miss we are neglecting forward latency. 365 Tick request_time = clockEdge(lat) + pkt->headerDelay; 366 // Here we reset the timing of the packet. 367 pkt->headerDelay = pkt->payloadDelay = 0; 368 369 if (satisfied) { 370 // notify before anything else as later handleTimingReqHit might turn 371 // the packet in a response 372 ppHit->notify(pkt); 373 374 if (prefetcher && blk && blk->wasPrefetched()) { 375 blk->status &= ~BlkHWPrefetched; 376 } 377 378 handleTimingReqHit(pkt, blk, request_time); 379 } else { 380 handleTimingReqMiss(pkt, blk, forward_time, request_time); 381 382 ppMiss->notify(pkt); 383 } 384 385 if (prefetcher) { 386 // track time of availability of next prefetch, if any 387 Tick next_pf_time = prefetcher->nextPrefetchReadyTime(); 388 if (next_pf_time != MaxTick) { 389 schedMemSideSendEvent(next_pf_time); 390 } 391 } 392} 393 394void 395BaseCache::handleUncacheableWriteResp(PacketPtr pkt) 396{ 397 Tick completion_time = clockEdge(responseLatency) + 398 pkt->headerDelay + pkt->payloadDelay; 399 400 // Reset the bus additional time as it is now accounted for 401 pkt->headerDelay = pkt->payloadDelay = 0; 402 403 cpuSidePort.schedTimingResp(pkt, completion_time, true); 404} 405 406void 407BaseCache::recvTimingResp(PacketPtr pkt) 408{ 409 assert(pkt->isResponse()); 410 411 // all header delay should be paid for by the crossbar, unless 412 // this is a prefetch response from above 413 panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp, 414 "%s saw a non-zero packet delay\n", name()); 415 416 const bool is_error = pkt->isError(); 417 418 if (is_error) { 419 DPRINTF(Cache, "%s: Cache received %s with error\n", __func__, 420 pkt->print()); 421 } 422 423 DPRINTF(Cache, "%s: Handling response %s\n", __func__, 424 pkt->print()); 425 426 // if this is a write, we should be looking at an uncacheable 427 // write 428 if (pkt->isWrite()) { 429 assert(pkt->req->isUncacheable()); 430 handleUncacheableWriteResp(pkt); 431 return; 432 } 433 434 // we have dealt with any (uncacheable) writes above, from here on 435 // we know we are dealing with an MSHR due to a miss or a prefetch 436 MSHR *mshr = dynamic_cast<MSHR*>(pkt->popSenderState()); 437 assert(mshr); 438 439 if (mshr == noTargetMSHR) { 440 // we always clear at least one target 441 clearBlocked(Blocked_NoTargets); 442 noTargetMSHR = nullptr; 443 } 444 445 // Initial target is used just for stats 446 MSHR::Target *initial_tgt = mshr->getTarget(); 447 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex(); 448 Tick miss_latency = curTick() - initial_tgt->recvTime; 449 450 if (pkt->req->isUncacheable()) { 451 assert(pkt->req->masterId() < system->maxMasters()); 452 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] += 453 miss_latency; 454 } else { 455 assert(pkt->req->masterId() < system->maxMasters()); 456 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] += 457 miss_latency; 458 } 459 460 PacketList writebacks; 461 462 bool is_fill = !mshr->isForward && 463 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp || 464 mshr->wasWholeLineWrite); 465 466 // make sure that if the mshr was due to a whole line write then 467 // the response is an invalidation 468 assert(!mshr->wasWholeLineWrite || pkt->isInvalidate()); 469 470 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 471 472 if (is_fill && !is_error) { 473 DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n", 474 pkt->getAddr()); 475 476 const bool allocate = (writeAllocator && mshr->wasWholeLineWrite) ? 477 writeAllocator->allocate() : mshr->allocOnFill(); 478 blk = handleFill(pkt, blk, writebacks, allocate); 479 assert(blk != nullptr); 480 } 481 482 if (blk && blk->isValid() && pkt->isClean() && !pkt->isInvalidate()) { 483 // The block was marked not readable while there was a pending 484 // cache maintenance operation, restore its flag. 485 blk->status |= BlkReadable; 486 487 // This was a cache clean operation (without invalidate) 488 // and we have a copy of the block already. Since there 489 // is no invalidation, we can promote targets that don't 490 // require a writable copy 491 mshr->promoteReadable(); 492 } 493 494 if (blk && blk->isWritable() && !pkt->req->isCacheInvalidate()) { 495 // If at this point the referenced block is writable and the 496 // response is not a cache invalidate, we promote targets that 497 // were deferred as we couldn't guarrantee a writable copy 498 mshr->promoteWritable(); 499 } 500 501 serviceMSHRTargets(mshr, pkt, blk); 502 503 if (mshr->promoteDeferredTargets()) { 504 // avoid later read getting stale data while write miss is 505 // outstanding.. see comment in timingAccess() 506 if (blk) { 507 blk->status &= ~BlkReadable; 508 } 509 mshrQueue.markPending(mshr); 510 schedMemSideSendEvent(clockEdge() + pkt->payloadDelay); 511 } else { 512 // while we deallocate an mshr from the queue we still have to 513 // check the isFull condition before and after as we might 514 // have been using the reserved entries already 515 const bool was_full = mshrQueue.isFull(); 516 mshrQueue.deallocate(mshr); 517 if (was_full && !mshrQueue.isFull()) { 518 clearBlocked(Blocked_NoMSHRs); 519 } 520 521 // Request the bus for a prefetch if this deallocation freed enough 522 // MSHRs for a prefetch to take place 523 if (prefetcher && mshrQueue.canPrefetch()) { 524 Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(), 525 clockEdge()); 526 if (next_pf_time != MaxTick) 527 schedMemSideSendEvent(next_pf_time); 528 } 529 } 530 531 // if we used temp block, check to see if its valid and then clear it out 532 if (blk == tempBlock && tempBlock->isValid()) { 533 evictBlock(blk, writebacks); 534 } 535 536 const Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 537 // copy writebacks to write buffer 538 doWritebacks(writebacks, forward_time); 539 540 DPRINTF(CacheVerbose, "%s: Leaving with %s\n", __func__, pkt->print()); 541 delete pkt; 542} 543 544 545Tick 546BaseCache::recvAtomic(PacketPtr pkt) 547{ 548 // should assert here that there are no outstanding MSHRs or 549 // writebacks... that would mean that someone used an atomic 550 // access in timing mode 551 552 // We use lookupLatency here because it is used to specify the latency 553 // to access. 554 Cycles lat = lookupLatency; 555 556 CacheBlk *blk = nullptr; 557 PacketList writebacks; 558 bool satisfied = access(pkt, blk, lat, writebacks); 559 560 if (pkt->isClean() && blk && blk->isDirty()) { 561 // A cache clean opearation is looking for a dirty 562 // block. If a dirty block is encountered a WriteClean 563 // will update any copies to the path to the memory 564 // until the point of reference. 565 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n", 566 __func__, pkt->print(), blk->print()); 567 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id); 568 writebacks.push_back(wb_pkt); 569 pkt->setSatisfied(); 570 } 571 572 // handle writebacks resulting from the access here to ensure they 573 // logically precede anything happening below 574 doWritebacksAtomic(writebacks); 575 assert(writebacks.empty()); 576 577 if (!satisfied) { 578 lat += handleAtomicReqMiss(pkt, blk, writebacks); 579 } 580 581 // Note that we don't invoke the prefetcher at all in atomic mode. 582 // It's not clear how to do it properly, particularly for 583 // prefetchers that aggressively generate prefetch candidates and 584 // rely on bandwidth contention to throttle them; these will tend 585 // to pollute the cache in atomic mode since there is no bandwidth 586 // contention. If we ever do want to enable prefetching in atomic 587 // mode, though, this is the place to do it... see timingAccess() 588 // for an example (though we'd want to issue the prefetch(es) 589 // immediately rather than calling requestMemSideBus() as we do 590 // there). 591 592 // do any writebacks resulting from the response handling 593 doWritebacksAtomic(writebacks); 594 595 // if we used temp block, check to see if its valid and if so 596 // clear it out, but only do so after the call to recvAtomic is 597 // finished so that any downstream observers (such as a snoop 598 // filter), first see the fill, and only then see the eviction 599 if (blk == tempBlock && tempBlock->isValid()) { 600 // the atomic CPU calls recvAtomic for fetch and load/store 601 // sequentuially, and we may already have a tempBlock 602 // writeback from the fetch that we have not yet sent 603 if (tempBlockWriteback) { 604 // if that is the case, write the prevoius one back, and 605 // do not schedule any new event 606 writebackTempBlockAtomic(); 607 } else { 608 // the writeback/clean eviction happens after the call to 609 // recvAtomic has finished (but before any successive 610 // calls), so that the response handling from the fill is 611 // allowed to happen first 612 schedule(writebackTempBlockAtomicEvent, curTick()); 613 } 614 615 tempBlockWriteback = evictBlock(blk); 616 } 617 618 if (pkt->needsResponse()) { 619 pkt->makeAtomicResponse(); 620 } 621 622 return lat * clockPeriod(); 623} 624 625void 626BaseCache::functionalAccess(PacketPtr pkt, bool from_cpu_side) 627{ 628 Addr blk_addr = pkt->getBlockAddr(blkSize); 629 bool is_secure = pkt->isSecure(); 630 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 631 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 632 633 pkt->pushLabel(name()); 634 635 CacheBlkPrintWrapper cbpw(blk); 636 637 // Note that just because an L2/L3 has valid data doesn't mean an 638 // L1 doesn't have a more up-to-date modified copy that still 639 // needs to be found. As a result we always update the request if 640 // we have it, but only declare it satisfied if we are the owner. 641 642 // see if we have data at all (owned or otherwise) 643 bool have_data = blk && blk->isValid() 644 && pkt->trySatisfyFunctional(&cbpw, blk_addr, is_secure, blkSize, 645 blk->data); 646 647 // data we have is dirty if marked as such or if we have an 648 // in-service MSHR that is pending a modified line 649 bool have_dirty = 650 have_data && (blk->isDirty() || 651 (mshr && mshr->inService && mshr->isPendingModified())); 652 653 bool done = have_dirty || 654 cpuSidePort.trySatisfyFunctional(pkt) || 655 mshrQueue.trySatisfyFunctional(pkt, blk_addr) || 656 writeBuffer.trySatisfyFunctional(pkt, blk_addr) || 657 memSidePort.trySatisfyFunctional(pkt); 658 659 DPRINTF(CacheVerbose, "%s: %s %s%s%s\n", __func__, pkt->print(), 660 (blk && blk->isValid()) ? "valid " : "", 661 have_data ? "data " : "", done ? "done " : ""); 662 663 // We're leaving the cache, so pop cache->name() label 664 pkt->popLabel(); 665 666 if (done) { 667 pkt->makeResponse(); 668 } else { 669 // if it came as a request from the CPU side then make sure it 670 // continues towards the memory side 671 if (from_cpu_side) { 672 memSidePort.sendFunctional(pkt); 673 } else if (cpuSidePort.isSnooping()) { 674 // if it came from the memory side, it must be a snoop request 675 // and we should only forward it if we are forwarding snoops 676 cpuSidePort.sendFunctionalSnoop(pkt); 677 } 678 } 679} 680 681 682void 683BaseCache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt) 684{ 685 assert(pkt->isRequest()); 686 687 uint64_t overwrite_val; 688 bool overwrite_mem; 689 uint64_t condition_val64; 690 uint32_t condition_val32; 691 692 int offset = pkt->getOffset(blkSize); 693 uint8_t *blk_data = blk->data + offset; 694 695 assert(sizeof(uint64_t) >= pkt->getSize()); 696 697 overwrite_mem = true; 698 // keep a copy of our possible write value, and copy what is at the 699 // memory address into the packet 700 pkt->writeData((uint8_t *)&overwrite_val); 701 pkt->setData(blk_data); 702 703 if (pkt->req->isCondSwap()) { 704 if (pkt->getSize() == sizeof(uint64_t)) { 705 condition_val64 = pkt->req->getExtraData(); 706 overwrite_mem = !std::memcmp(&condition_val64, blk_data, 707 sizeof(uint64_t)); 708 } else if (pkt->getSize() == sizeof(uint32_t)) { 709 condition_val32 = (uint32_t)pkt->req->getExtraData(); 710 overwrite_mem = !std::memcmp(&condition_val32, blk_data, 711 sizeof(uint32_t)); 712 } else 713 panic("Invalid size for conditional read/write\n"); 714 } 715 716 if (overwrite_mem) { 717 std::memcpy(blk_data, &overwrite_val, pkt->getSize()); 718 blk->status |= BlkDirty; 719 } 720} 721 722QueueEntry* 723BaseCache::getNextQueueEntry() 724{ 725 // Check both MSHR queue and write buffer for potential requests, 726 // note that null does not mean there is no request, it could 727 // simply be that it is not ready 728 MSHR *miss_mshr = mshrQueue.getNext(); 729 WriteQueueEntry *wq_entry = writeBuffer.getNext(); 730 731 // If we got a write buffer request ready, first priority is a 732 // full write buffer, otherwise we favour the miss requests 733 if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) { 734 // need to search MSHR queue for conflicting earlier miss. 735 MSHR *conflict_mshr = 736 mshrQueue.findPending(wq_entry->blkAddr, 737 wq_entry->isSecure); 738 739 if (conflict_mshr && conflict_mshr->order < wq_entry->order) { 740 // Service misses in order until conflict is cleared. 741 return conflict_mshr; 742 743 // @todo Note that we ignore the ready time of the conflict here 744 } 745 746 // No conflicts; issue write 747 return wq_entry; 748 } else if (miss_mshr) { 749 // need to check for conflicting earlier writeback 750 WriteQueueEntry *conflict_mshr = 751 writeBuffer.findPending(miss_mshr->blkAddr, 752 miss_mshr->isSecure); 753 if (conflict_mshr) { 754 // not sure why we don't check order here... it was in the 755 // original code but commented out. 756 757 // The only way this happens is if we are 758 // doing a write and we didn't have permissions 759 // then subsequently saw a writeback (owned got evicted) 760 // We need to make sure to perform the writeback first 761 // To preserve the dirty data, then we can issue the write 762 763 // should we return wq_entry here instead? I.e. do we 764 // have to flush writes in order? I don't think so... not 765 // for Alpha anyway. Maybe for x86? 766 return conflict_mshr; 767 768 // @todo Note that we ignore the ready time of the conflict here 769 } 770 771 // No conflicts; issue read 772 return miss_mshr; 773 } 774 775 // fall through... no pending requests. Try a prefetch. 776 assert(!miss_mshr && !wq_entry); 777 if (prefetcher && mshrQueue.canPrefetch()) { 778 // If we have a miss queue slot, we can try a prefetch 779 PacketPtr pkt = prefetcher->getPacket(); 780 if (pkt) { 781 Addr pf_addr = pkt->getBlockAddr(blkSize); 782 if (!tags->findBlock(pf_addr, pkt->isSecure()) && 783 !mshrQueue.findMatch(pf_addr, pkt->isSecure()) && 784 !writeBuffer.findMatch(pf_addr, pkt->isSecure())) { 785 // Update statistic on number of prefetches issued 786 // (hwpf_mshr_misses) 787 assert(pkt->req->masterId() < system->maxMasters()); 788 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 789 790 // allocate an MSHR and return it, note 791 // that we send the packet straight away, so do not 792 // schedule the send 793 return allocateMissBuffer(pkt, curTick(), false); 794 } else { 795 // free the request and packet 796 delete pkt; 797 } 798 } 799 } 800 801 return nullptr; 802} 803 804void 805BaseCache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool, bool) 806{ 807 assert(pkt->isRequest()); 808 809 assert(blk && blk->isValid()); 810 // Occasionally this is not true... if we are a lower-level cache 811 // satisfying a string of Read and ReadEx requests from 812 // upper-level caches, a Read will mark the block as shared but we 813 // can satisfy a following ReadEx anyway since we can rely on the 814 // Read requester(s) to have buffered the ReadEx snoop and to 815 // invalidate their blocks after receiving them. 816 // assert(!pkt->needsWritable() || blk->isWritable()); 817 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize); 818 819 // Check RMW operations first since both isRead() and 820 // isWrite() will be true for them 821 if (pkt->cmd == MemCmd::SwapReq) { 822 if (pkt->isAtomicOp()) { 823 // extract data from cache and save it into the data field in 824 // the packet as a return value from this atomic op 825 int offset = tags->extractBlkOffset(pkt->getAddr()); 826 uint8_t *blk_data = blk->data + offset; 827 pkt->setData(blk_data); 828 829 // execute AMO operation 830 (*(pkt->getAtomicOp()))(blk_data); 831 832 // set block status to dirty 833 blk->status |= BlkDirty; 834 } else { 835 cmpAndSwap(blk, pkt); 836 } 837 } else if (pkt->isWrite()) { 838 // we have the block in a writable state and can go ahead, 839 // note that the line may be also be considered writable in 840 // downstream caches along the path to memory, but always 841 // Exclusive, and never Modified 842 assert(blk->isWritable()); 843 // Write or WriteLine at the first cache with block in writable state 844 if (blk->checkWrite(pkt)) { 845 pkt->writeDataToBlock(blk->data, blkSize); 846 } 847 // Always mark the line as dirty (and thus transition to the 848 // Modified state) even if we are a failed StoreCond so we 849 // supply data to any snoops that have appended themselves to 850 // this cache before knowing the store will fail. 851 blk->status |= BlkDirty; 852 DPRINTF(CacheVerbose, "%s for %s (write)\n", __func__, pkt->print()); 853 } else if (pkt->isRead()) { 854 if (pkt->isLLSC()) { 855 blk->trackLoadLocked(pkt); 856 } 857 858 // all read responses have a data payload 859 assert(pkt->hasRespData()); 860 pkt->setDataFromBlock(blk->data, blkSize); 861 } else if (pkt->isUpgrade()) { 862 // sanity check 863 assert(!pkt->hasSharers()); 864 865 if (blk->isDirty()) { 866 // we were in the Owned state, and a cache above us that 867 // has the line in Shared state needs to be made aware 868 // that the data it already has is in fact dirty 869 pkt->setCacheResponding(); 870 blk->status &= ~BlkDirty; 871 } 872 } else if (pkt->isClean()) { 873 blk->status &= ~BlkDirty; 874 } else { 875 assert(pkt->isInvalidate()); 876 invalidateBlock(blk); 877 DPRINTF(CacheVerbose, "%s for %s (invalidation)\n", __func__, 878 pkt->print()); 879 } 880} 881 882///////////////////////////////////////////////////// 883// 884// Access path: requests coming in from the CPU side 885// 886///////////////////////////////////////////////////// 887Cycles 888BaseCache::calculateAccessLatency(const CacheBlk* blk, 889 const Cycles lookup_lat) const 890{ 891 Cycles lat(lookup_lat); 892 893 if (blk != nullptr) { 894 // First access tags, then data 895 if (sequentialAccess) { 896 lat += dataLatency; 897 // Latency is dictated by the slowest of tag and data latencies 898 } else { 899 lat = std::max(lookup_lat, dataLatency); 900 } 901 902 // Check if the block to be accessed is available. If not, apply the 903 // access latency on top of when the block is ready to be accessed. 904 const Tick when_ready = blk->getWhenReady(); 905 if (when_ready > curTick() && 906 ticksToCycles(when_ready - curTick()) > lat) { 907 lat += ticksToCycles(when_ready - curTick()); 908 } 909 } 910 911 return lat; 912} 913 914bool 915BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, 916 PacketList &writebacks) 917{ 918 // sanity check 919 assert(pkt->isRequest()); 920 921 chatty_assert(!(isReadOnly && pkt->isWrite()), 922 "Should never see a write in a read-only cache %s\n", 923 name()); 924 925 // Access block in the tags 926 Cycles tag_latency(0); 927 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), tag_latency); 928 929 // Calculate access latency 930 lat = calculateAccessLatency(blk, tag_latency); 931 932 DPRINTF(Cache, "%s for %s %s\n", __func__, pkt->print(), 933 blk ? "hit " + blk->print() : "miss"); 934 935 if (pkt->req->isCacheMaintenance()) { 936 // A cache maintenance operation is always forwarded to the 937 // memory below even if the block is found in dirty state. 938 939 // We defer any changes to the state of the block until we 940 // create and mark as in service the mshr for the downstream 941 // packet. 942 return false; 943 } 944 945 if (pkt->isEviction()) { 946 // We check for presence of block in above caches before issuing 947 // Writeback or CleanEvict to write buffer. Therefore the only 948 // possible cases can be of a CleanEvict packet coming from above 949 // encountering a Writeback generated in this cache peer cache and 950 // waiting in the write buffer. Cases of upper level peer caches 951 // generating CleanEvict and Writeback or simply CleanEvict and 952 // CleanEvict almost simultaneously will be caught by snoops sent out 953 // by crossbar. 954 WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(), 955 pkt->isSecure()); 956 if (wb_entry) { 957 assert(wb_entry->getNumTargets() == 1); 958 PacketPtr wbPkt = wb_entry->getTarget()->pkt; 959 assert(wbPkt->isWriteback()); 960 961 if (pkt->isCleanEviction()) { 962 // The CleanEvict and WritebackClean snoops into other 963 // peer caches of the same level while traversing the 964 // crossbar. If a copy of the block is found, the 965 // packet is deleted in the crossbar. Hence, none of 966 // the other upper level caches connected to this 967 // cache have the block, so we can clear the 968 // BLOCK_CACHED flag in the Writeback if set and 969 // discard the CleanEvict by returning true. 970 wbPkt->clearBlockCached(); 971 return true; 972 } else { 973 assert(pkt->cmd == MemCmd::WritebackDirty); 974 // Dirty writeback from above trumps our clean 975 // writeback... discard here 976 // Note: markInService will remove entry from writeback buffer. 977 markInService(wb_entry); 978 delete wbPkt; 979 } 980 } 981 } 982 983 // Writeback handling is special case. We can write the block into 984 // the cache without having a writeable copy (or any copy at all). 985 if (pkt->isWriteback()) { 986 assert(blkSize == pkt->getSize()); 987 988 // we could get a clean writeback while we are having 989 // outstanding accesses to a block, do the simple thing for 990 // now and drop the clean writeback so that we do not upset 991 // any ordering/decisions about ownership already taken 992 if (pkt->cmd == MemCmd::WritebackClean && 993 mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) { 994 DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, " 995 "dropping\n", pkt->getAddr()); 996 return true; 997 } 998 999 if (!blk) { 1000 // need to do a replacement 1001 blk = allocateBlock(pkt, writebacks); 1002 if (!blk) { 1003 // no replaceable block available: give up, fwd to next level. 1004 incMissCount(pkt); 1005 return false; 1006 } 1007 1008 blk->status |= BlkReadable; 1009 } 1010 // only mark the block dirty if we got a writeback command, 1011 // and leave it as is for a clean writeback 1012 if (pkt->cmd == MemCmd::WritebackDirty) { 1013 // TODO: the coherent cache can assert(!blk->isDirty()); 1014 blk->status |= BlkDirty; 1015 } 1016 // if the packet does not have sharers, it is passing 1017 // writable, and we got the writeback in Modified or Exclusive 1018 // state, if not we are in the Owned or Shared state 1019 if (!pkt->hasSharers()) { 1020 blk->status |= BlkWritable; 1021 } 1022 // nothing else to do; writeback doesn't expect response 1023 assert(!pkt->needsResponse()); 1024 pkt->writeDataToBlock(blk->data, blkSize); 1025 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 1026 incHitCount(pkt); 1027 // populate the time when the block will be ready to access. 1028 blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay + 1029 pkt->payloadDelay); 1030 return true; 1031 } else if (pkt->cmd == MemCmd::CleanEvict) { 1032 if (blk) { 1033 // Found the block in the tags, need to stop CleanEvict from 1034 // propagating further down the hierarchy. Returning true will 1035 // treat the CleanEvict like a satisfied write request and delete 1036 // it. 1037 return true; 1038 } 1039 // We didn't find the block here, propagate the CleanEvict further 1040 // down the memory hierarchy. Returning false will treat the CleanEvict 1041 // like a Writeback which could not find a replaceable block so has to 1042 // go to next level. 1043 return false; 1044 } else if (pkt->cmd == MemCmd::WriteClean) { 1045 // WriteClean handling is a special case. We can allocate a 1046 // block directly if it doesn't exist and we can update the 1047 // block immediately. The WriteClean transfers the ownership 1048 // of the block as well. 1049 assert(blkSize == pkt->getSize()); 1050 1051 if (!blk) { 1052 if (pkt->writeThrough()) { 1053 // if this is a write through packet, we don't try to 1054 // allocate if the block is not present 1055 return false; 1056 } else { 1057 // a writeback that misses needs to allocate a new block 1058 blk = allocateBlock(pkt, writebacks); 1059 if (!blk) { 1060 // no replaceable block available: give up, fwd to 1061 // next level. 1062 incMissCount(pkt); 1063 return false; 1064 } 1065 1066 blk->status |= BlkReadable; 1067 } 1068 } 1069 1070 // at this point either this is a writeback or a write-through 1071 // write clean operation and the block is already in this 1072 // cache, we need to update the data and the block flags 1073 assert(blk); 1074 // TODO: the coherent cache can assert(!blk->isDirty()); 1075 if (!pkt->writeThrough()) { 1076 blk->status |= BlkDirty; 1077 } 1078 // nothing else to do; writeback doesn't expect response 1079 assert(!pkt->needsResponse()); 1080 pkt->writeDataToBlock(blk->data, blkSize); 1081 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 1082 1083 incHitCount(pkt); 1084 // populate the time when the block will be ready to access. 1085 blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay + 1086 pkt->payloadDelay); 1087 // if this a write-through packet it will be sent to cache 1088 // below 1089 return !pkt->writeThrough(); 1090 } else if (blk && (pkt->needsWritable() ? blk->isWritable() : 1091 blk->isReadable())) { 1092 // OK to satisfy access 1093 incHitCount(pkt); 1094 satisfyRequest(pkt, blk); 1095 maintainClusivity(pkt->fromCache(), blk); 1096 1097 return true; 1098 } 1099 1100 // Can't satisfy access normally... either no block (blk == nullptr) 1101 // or have block but need writable 1102 1103 incMissCount(pkt); 1104 1105 if (!blk && pkt->isLLSC() && pkt->isWrite()) { 1106 // complete miss on store conditional... just give up now 1107 pkt->req->setExtraData(0); 1108 return true; 1109 } 1110 1111 return false; 1112} 1113 1114void 1115BaseCache::maintainClusivity(bool from_cache, CacheBlk *blk) 1116{ 1117 if (from_cache && blk && blk->isValid() && !blk->isDirty() && 1118 clusivity == Enums::mostly_excl) { 1119 // if we have responded to a cache, and our block is still 1120 // valid, but not dirty, and this cache is mostly exclusive 1121 // with respect to the cache above, drop the block 1122 invalidateBlock(blk); 1123 } 1124} 1125 1126CacheBlk* 1127BaseCache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks, 1128 bool allocate) 1129{ 1130 assert(pkt->isResponse()); 1131 Addr addr = pkt->getAddr(); 1132 bool is_secure = pkt->isSecure(); 1133#if TRACING_ON 1134 CacheBlk::State old_state = blk ? blk->status : 0; 1135#endif 1136 1137 // When handling a fill, we should have no writes to this line. 1138 assert(addr == pkt->getBlockAddr(blkSize)); 1139 assert(!writeBuffer.findMatch(addr, is_secure)); 1140 1141 if (!blk) { 1142 // better have read new data... 1143 assert(pkt->hasData() || pkt->cmd == MemCmd::InvalidateResp); 1144 1145 // need to do a replacement if allocating, otherwise we stick 1146 // with the temporary storage 1147 blk = allocate ? allocateBlock(pkt, writebacks) : nullptr; 1148 1149 if (!blk) { 1150 // No replaceable block or a mostly exclusive 1151 // cache... just use temporary storage to complete the 1152 // current request and then get rid of it 1153 blk = tempBlock; 1154 tempBlock->insert(addr, is_secure); 1155 DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr, 1156 is_secure ? "s" : "ns"); 1157 } 1158 } else { 1159 // existing block... probably an upgrade 1160 // don't clear block status... if block is already dirty we 1161 // don't want to lose that 1162 } 1163 1164 // Block is guaranteed to be valid at this point 1165 assert(blk->isValid()); 1166 assert(blk->isSecure() == is_secure); 1167 assert(regenerateBlkAddr(blk) == addr); 1168 1169 blk->status |= BlkReadable; 1170 1171 // sanity check for whole-line writes, which should always be 1172 // marked as writable as part of the fill, and then later marked 1173 // dirty as part of satisfyRequest 1174 if (pkt->cmd == MemCmd::InvalidateResp) { 1175 assert(!pkt->hasSharers()); 1176 } 1177 1178 // here we deal with setting the appropriate state of the line, 1179 // and we start by looking at the hasSharers flag, and ignore the 1180 // cacheResponding flag (normally signalling dirty data) if the 1181 // packet has sharers, thus the line is never allocated as Owned 1182 // (dirty but not writable), and always ends up being either 1183 // Shared, Exclusive or Modified, see Packet::setCacheResponding 1184 // for more details 1185 if (!pkt->hasSharers()) { 1186 // we could get a writable line from memory (rather than a 1187 // cache) even in a read-only cache, note that we set this bit 1188 // even for a read-only cache, possibly revisit this decision 1189 blk->status |= BlkWritable; 1190 1191 // check if we got this via cache-to-cache transfer (i.e., from a 1192 // cache that had the block in Modified or Owned state) 1193 if (pkt->cacheResponding()) { 1194 // we got the block in Modified state, and invalidated the 1195 // owners copy 1196 blk->status |= BlkDirty; 1197 1198 chatty_assert(!isReadOnly, "Should never see dirty snoop response " 1199 "in read-only cache %s\n", name()); 1200 } 1201 } 1202 1203 DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n", 1204 addr, is_secure ? "s" : "ns", old_state, blk->print()); 1205 1206 // if we got new data, copy it in (checking for a read response 1207 // and a response that has data is the same in the end) 1208 if (pkt->isRead()) { 1209 // sanity checks 1210 assert(pkt->hasData()); 1211 assert(pkt->getSize() == blkSize); 1212 1213 pkt->writeDataToBlock(blk->data, blkSize); 1214 } 1215 // We pay for fillLatency here. 1216 blk->setWhenReady(clockEdge(fillLatency) + pkt->payloadDelay); 1217 1218 return blk; 1219} 1220 1221CacheBlk* 1222BaseCache::allocateBlock(const PacketPtr pkt, PacketList &writebacks) 1223{ 1224 // Get address 1225 const Addr addr = pkt->getAddr(); 1226 1227 // Get secure bit 1228 const bool is_secure = pkt->isSecure(); 1229 1230 // Find replacement victim 1231 std::vector<CacheBlk*> evict_blks; 1232 CacheBlk *victim = tags->findVictim(addr, is_secure, evict_blks); 1233 1234 // It is valid to return nullptr if there is no victim 1235 if (!victim) 1236 return nullptr; 1237 1238 // Print victim block's information 1239 DPRINTF(CacheRepl, "Replacement victim: %s\n", victim->print()); 1240 1241 // Check for transient state allocations. If any of the entries listed 1242 // for eviction has a transient state, the allocation fails 1243 for (const auto& blk : evict_blks) { 1244 if (blk->isValid()) { 1245 Addr repl_addr = regenerateBlkAddr(blk); 1246 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure()); 1247 if (repl_mshr) { 1248 // must be an outstanding upgrade or clean request 1249 // on a block we're about to replace... 1250 assert((!blk->isWritable() && repl_mshr->needsWritable()) || 1251 repl_mshr->isCleaning()); 1252 1253 // too hard to replace block with transient state 1254 // allocation failed, block not inserted 1255 return nullptr; 1256 } 1257 } 1258 } 1259 1260 // The victim will be replaced by a new entry, so increase the replacement 1261 // counter if a valid block is being replaced 1262 if (victim->isValid()) { 1263 DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx " 1264 "(%s): %s\n", regenerateBlkAddr(victim), 1265 victim->isSecure() ? "s" : "ns", 1266 addr, is_secure ? "s" : "ns", 1267 victim->isDirty() ? "writeback" : "clean"); 1268 1269 replacements++; 1270 } 1271 1272 // Evict valid blocks associated to this victim block 1273 for (const auto& blk : evict_blks) { 1274 if (blk->isValid()) { 1275 if (blk->wasPrefetched()) { 1276 unusedPrefetches++; 1277 } 1278 1279 evictBlock(blk, writebacks); 1280 } 1281 } 1282 1283 // Insert new block at victimized entry 1284 tags->insertBlock(addr, is_secure, pkt->req->masterId(), 1285 pkt->req->taskId(), victim); 1286 1287 return victim; 1288} 1289 1290void 1291BaseCache::invalidateBlock(CacheBlk *blk) 1292{ 1293 // If handling a block present in the Tags, let it do its invalidation 1294 // process, which will update stats and invalidate the block itself 1295 if (blk != tempBlock) { 1296 tags->invalidate(blk); 1297 } else { 1298 tempBlock->invalidate(); 1299 } 1300} 1301 1302void 1303BaseCache::evictBlock(CacheBlk *blk, PacketList &writebacks) 1304{ 1305 PacketPtr pkt = evictBlock(blk); 1306 if (pkt) { 1307 writebacks.push_back(pkt); 1308 } 1309} 1310 1311PacketPtr 1312BaseCache::writebackBlk(CacheBlk *blk) 1313{ 1314 chatty_assert(!isReadOnly || writebackClean, 1315 "Writeback from read-only cache"); 1316 assert(blk && blk->isValid() && (blk->isDirty() || writebackClean)); 1317 1318 writebacks[Request::wbMasterId]++; 1319 1320 RequestPtr req = std::make_shared<Request>( 1321 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId); 1322 1323 if (blk->isSecure()) 1324 req->setFlags(Request::SECURE); 1325 1326 req->taskId(blk->task_id); 1327 1328 PacketPtr pkt = 1329 new Packet(req, blk->isDirty() ? 1330 MemCmd::WritebackDirty : MemCmd::WritebackClean); 1331 1332 DPRINTF(Cache, "Create Writeback %s writable: %d, dirty: %d\n", 1333 pkt->print(), blk->isWritable(), blk->isDirty()); 1334 1335 if (blk->isWritable()) { 1336 // not asserting shared means we pass the block in modified 1337 // state, mark our own block non-writeable 1338 blk->status &= ~BlkWritable; 1339 } else { 1340 // we are in the Owned state, tell the receiver 1341 pkt->setHasSharers(); 1342 } 1343 1344 // make sure the block is not marked dirty 1345 blk->status &= ~BlkDirty; 1346 1347 pkt->allocate(); 1348 pkt->setDataFromBlock(blk->data, blkSize); 1349 1350 return pkt; 1351} 1352 1353PacketPtr 1354BaseCache::writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id) 1355{ 1356 RequestPtr req = std::make_shared<Request>( 1357 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId); 1358 1359 if (blk->isSecure()) { 1360 req->setFlags(Request::SECURE); 1361 } 1362 req->taskId(blk->task_id); 1363 1364 PacketPtr pkt = new Packet(req, MemCmd::WriteClean, blkSize, id); 1365 1366 if (dest) { 1367 req->setFlags(dest); 1368 pkt->setWriteThrough(); 1369 } 1370 1371 DPRINTF(Cache, "Create %s writable: %d, dirty: %d\n", pkt->print(), 1372 blk->isWritable(), blk->isDirty()); 1373 1374 if (blk->isWritable()) { 1375 // not asserting shared means we pass the block in modified 1376 // state, mark our own block non-writeable 1377 blk->status &= ~BlkWritable; 1378 } else { 1379 // we are in the Owned state, tell the receiver 1380 pkt->setHasSharers(); 1381 } 1382 1383 // make sure the block is not marked dirty 1384 blk->status &= ~BlkDirty; 1385 1386 pkt->allocate(); 1387 pkt->setDataFromBlock(blk->data, blkSize); 1388 1389 return pkt; 1390} 1391 1392 1393void 1394BaseCache::memWriteback() 1395{ 1396 tags->forEachBlk([this](CacheBlk &blk) { writebackVisitor(blk); }); 1397} 1398 1399void 1400BaseCache::memInvalidate() 1401{ 1402 tags->forEachBlk([this](CacheBlk &blk) { invalidateVisitor(blk); }); 1403} 1404 1405bool 1406BaseCache::isDirty() const 1407{ 1408 return tags->anyBlk([](CacheBlk &blk) { return blk.isDirty(); }); 1409} 1410 1411bool 1412BaseCache::coalesce() const 1413{ 1414 return writeAllocator && writeAllocator->coalesce(); 1415} 1416 1417void 1418BaseCache::writebackVisitor(CacheBlk &blk) 1419{ 1420 if (blk.isDirty()) { 1421 assert(blk.isValid()); 1422 1423 RequestPtr request = std::make_shared<Request>( 1424 regenerateBlkAddr(&blk), blkSize, 0, Request::funcMasterId); 1425 1426 request->taskId(blk.task_id); 1427 if (blk.isSecure()) { 1428 request->setFlags(Request::SECURE); 1429 } 1430 1431 Packet packet(request, MemCmd::WriteReq); 1432 packet.dataStatic(blk.data); 1433 1434 memSidePort.sendFunctional(&packet); 1435 1436 blk.status &= ~BlkDirty; 1437 } 1438} 1439 1440void 1441BaseCache::invalidateVisitor(CacheBlk &blk) 1442{ 1443 if (blk.isDirty()) 1444 warn_once("Invalidating dirty cache lines. " \ 1445 "Expect things to break.\n"); 1446 1447 if (blk.isValid()) { 1448 assert(!blk.isDirty()); 1449 invalidateBlock(&blk); 1450 } 1451} 1452 1453Tick 1454BaseCache::nextQueueReadyTime() const 1455{ 1456 Tick nextReady = std::min(mshrQueue.nextReadyTime(), 1457 writeBuffer.nextReadyTime()); 1458 1459 // Don't signal prefetch ready time if no MSHRs available 1460 // Will signal once enoguh MSHRs are deallocated 1461 if (prefetcher && mshrQueue.canPrefetch()) { 1462 nextReady = std::min(nextReady, 1463 prefetcher->nextPrefetchReadyTime()); 1464 } 1465 1466 return nextReady; 1467} 1468 1469 1470bool 1471BaseCache::sendMSHRQueuePacket(MSHR* mshr) 1472{ 1473 assert(mshr); 1474 1475 // use request from 1st target 1476 PacketPtr tgt_pkt = mshr->getTarget()->pkt; 1477 1478 DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print()); 1479 1480 // if the cache is in write coalescing mode or (additionally) in 1481 // no allocation mode, and we have a write packet with an MSHR 1482 // that is not a whole-line write (due to incompatible flags etc), 1483 // then reset the write mode 1484 if (writeAllocator && writeAllocator->coalesce() && tgt_pkt->isWrite()) { 1485 if (!mshr->isWholeLineWrite()) { 1486 // if we are currently write coalescing, hold on the 1487 // MSHR as many cycles extra as we need to completely 1488 // write a cache line 1489 if (writeAllocator->delay(mshr->blkAddr)) { 1490 Tick delay = blkSize / tgt_pkt->getSize() * clockPeriod(); 1491 DPRINTF(CacheVerbose, "Delaying pkt %s %llu ticks to allow " 1492 "for write coalescing\n", tgt_pkt->print(), delay); 1493 mshrQueue.delay(mshr, delay); 1494 return false; 1495 } else { 1496 writeAllocator->reset(); 1497 } 1498 } else { 1499 writeAllocator->resetDelay(mshr->blkAddr); 1500 } 1501 } 1502 1503 CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure); 1504 1505 // either a prefetch that is not present upstream, or a normal 1506 // MSHR request, proceed to get the packet to send downstream 1507 PacketPtr pkt = createMissPacket(tgt_pkt, blk, mshr->needsWritable(), 1508 mshr->isWholeLineWrite()); 1509 1510 mshr->isForward = (pkt == nullptr); 1511 1512 if (mshr->isForward) { 1513 // not a cache block request, but a response is expected 1514 // make copy of current packet to forward, keep current 1515 // copy for response handling 1516 pkt = new Packet(tgt_pkt, false, true); 1517 assert(!pkt->isWrite()); 1518 } 1519 1520 // play it safe and append (rather than set) the sender state, 1521 // as forwarded packets may already have existing state 1522 pkt->pushSenderState(mshr); 1523 1524 if (pkt->isClean() && blk && blk->isDirty()) { 1525 // A cache clean opearation is looking for a dirty block. Mark 1526 // the packet so that the destination xbar can determine that 1527 // there will be a follow-up write packet as well. 1528 pkt->setSatisfied(); 1529 } 1530 1531 if (!memSidePort.sendTimingReq(pkt)) { 1532 // we are awaiting a retry, but we 1533 // delete the packet and will be creating a new packet 1534 // when we get the opportunity 1535 delete pkt; 1536 1537 // note that we have now masked any requestBus and 1538 // schedSendEvent (we will wait for a retry before 1539 // doing anything), and this is so even if we do not 1540 // care about this packet and might override it before 1541 // it gets retried 1542 return true; 1543 } else { 1544 // As part of the call to sendTimingReq the packet is 1545 // forwarded to all neighbouring caches (and any caches 1546 // above them) as a snoop. Thus at this point we know if 1547 // any of the neighbouring caches are responding, and if 1548 // so, we know it is dirty, and we can determine if it is 1549 // being passed as Modified, making our MSHR the ordering 1550 // point 1551 bool pending_modified_resp = !pkt->hasSharers() && 1552 pkt->cacheResponding(); 1553 markInService(mshr, pending_modified_resp); 1554 1555 if (pkt->isClean() && blk && blk->isDirty()) { 1556 // A cache clean opearation is looking for a dirty 1557 // block. If a dirty block is encountered a WriteClean 1558 // will update any copies to the path to the memory 1559 // until the point of reference. 1560 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n", 1561 __func__, pkt->print(), blk->print()); 1562 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), 1563 pkt->id); 1564 PacketList writebacks; 1565 writebacks.push_back(wb_pkt); 1566 doWritebacks(writebacks, 0); 1567 } 1568 1569 return false; 1570 } 1571} 1572 1573bool 1574BaseCache::sendWriteQueuePacket(WriteQueueEntry* wq_entry) 1575{ 1576 assert(wq_entry); 1577 1578 // always a single target for write queue entries 1579 PacketPtr tgt_pkt = wq_entry->getTarget()->pkt; 1580 1581 DPRINTF(Cache, "%s: write %s\n", __func__, tgt_pkt->print()); 1582 1583 // forward as is, both for evictions and uncacheable writes 1584 if (!memSidePort.sendTimingReq(tgt_pkt)) { 1585 // note that we have now masked any requestBus and 1586 // schedSendEvent (we will wait for a retry before 1587 // doing anything), and this is so even if we do not 1588 // care about this packet and might override it before 1589 // it gets retried 1590 return true; 1591 } else { 1592 markInService(wq_entry); 1593 return false; 1594 } 1595} 1596 1597void 1598BaseCache::serialize(CheckpointOut &cp) const 1599{ 1600 bool dirty(isDirty()); 1601 1602 if (dirty) { 1603 warn("*** The cache still contains dirty data. ***\n"); 1604 warn(" Make sure to drain the system using the correct flags.\n"); 1605 warn(" This checkpoint will not restore correctly " \ 1606 "and dirty data in the cache will be lost!\n"); 1607 } 1608 1609 // Since we don't checkpoint the data in the cache, any dirty data 1610 // will be lost when restoring from a checkpoint of a system that 1611 // wasn't drained properly. Flag the checkpoint as invalid if the 1612 // cache contains dirty data. 1613 bool bad_checkpoint(dirty); 1614 SERIALIZE_SCALAR(bad_checkpoint); 1615} 1616 1617void 1618BaseCache::unserialize(CheckpointIn &cp) 1619{ 1620 bool bad_checkpoint; 1621 UNSERIALIZE_SCALAR(bad_checkpoint); 1622 if (bad_checkpoint) { 1623 fatal("Restoring from checkpoints with dirty caches is not " 1624 "supported in the classic memory system. Please remove any " 1625 "caches or drain them properly before taking checkpoints.\n"); 1626 } 1627} 1628 1629void 1630BaseCache::regStats() 1631{ 1632 MemObject::regStats(); 1633 1634 using namespace Stats; 1635 1636 // Hit statistics 1637 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1638 MemCmd cmd(access_idx); 1639 const string &cstr = cmd.toString(); 1640 1641 hits[access_idx] 1642 .init(system->maxMasters()) 1643 .name(name() + "." + cstr + "_hits") 1644 .desc("number of " + cstr + " hits") 1645 .flags(total | nozero | nonan) 1646 ; 1647 for (int i = 0; i < system->maxMasters(); i++) { 1648 hits[access_idx].subname(i, system->getMasterName(i)); 1649 } 1650 } 1651 1652// These macros make it easier to sum the right subset of commands and 1653// to change the subset of commands that are considered "demand" vs 1654// "non-demand" 1655#define SUM_DEMAND(s) \ 1656 (s[MemCmd::ReadReq] + s[MemCmd::WriteReq] + s[MemCmd::WriteLineReq] + \ 1657 s[MemCmd::ReadExReq] + s[MemCmd::ReadCleanReq] + s[MemCmd::ReadSharedReq]) 1658 1659// should writebacks be included here? prior code was inconsistent... 1660#define SUM_NON_DEMAND(s) \ 1661 (s[MemCmd::SoftPFReq] + s[MemCmd::HardPFReq] + s[MemCmd::SoftPFExReq]) 1662 1663 demandHits 1664 .name(name() + ".demand_hits") 1665 .desc("number of demand (read+write) hits") 1666 .flags(total | nozero | nonan) 1667 ; 1668 demandHits = SUM_DEMAND(hits); 1669 for (int i = 0; i < system->maxMasters(); i++) { 1670 demandHits.subname(i, system->getMasterName(i)); 1671 } 1672 1673 overallHits 1674 .name(name() + ".overall_hits") 1675 .desc("number of overall hits") 1676 .flags(total | nozero | nonan) 1677 ; 1678 overallHits = demandHits + SUM_NON_DEMAND(hits); 1679 for (int i = 0; i < system->maxMasters(); i++) { 1680 overallHits.subname(i, system->getMasterName(i)); 1681 } 1682 1683 // Miss statistics 1684 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1685 MemCmd cmd(access_idx); 1686 const string &cstr = cmd.toString(); 1687 1688 misses[access_idx] 1689 .init(system->maxMasters()) 1690 .name(name() + "." + cstr + "_misses") 1691 .desc("number of " + cstr + " misses") 1692 .flags(total | nozero | nonan) 1693 ; 1694 for (int i = 0; i < system->maxMasters(); i++) { 1695 misses[access_idx].subname(i, system->getMasterName(i)); 1696 } 1697 } 1698 1699 demandMisses 1700 .name(name() + ".demand_misses") 1701 .desc("number of demand (read+write) misses") 1702 .flags(total | nozero | nonan) 1703 ; 1704 demandMisses = SUM_DEMAND(misses); 1705 for (int i = 0; i < system->maxMasters(); i++) { 1706 demandMisses.subname(i, system->getMasterName(i)); 1707 } 1708 1709 overallMisses 1710 .name(name() + ".overall_misses") 1711 .desc("number of overall misses") 1712 .flags(total | nozero | nonan) 1713 ; 1714 overallMisses = demandMisses + SUM_NON_DEMAND(misses); 1715 for (int i = 0; i < system->maxMasters(); i++) { 1716 overallMisses.subname(i, system->getMasterName(i)); 1717 } 1718 1719 // Miss latency statistics 1720 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1721 MemCmd cmd(access_idx); 1722 const string &cstr = cmd.toString(); 1723 1724 missLatency[access_idx] 1725 .init(system->maxMasters()) 1726 .name(name() + "." + cstr + "_miss_latency") 1727 .desc("number of " + cstr + " miss cycles") 1728 .flags(total | nozero | nonan) 1729 ; 1730 for (int i = 0; i < system->maxMasters(); i++) { 1731 missLatency[access_idx].subname(i, system->getMasterName(i)); 1732 } 1733 } 1734 1735 demandMissLatency 1736 .name(name() + ".demand_miss_latency") 1737 .desc("number of demand (read+write) miss cycles") 1738 .flags(total | nozero | nonan) 1739 ; 1740 demandMissLatency = SUM_DEMAND(missLatency); 1741 for (int i = 0; i < system->maxMasters(); i++) { 1742 demandMissLatency.subname(i, system->getMasterName(i)); 1743 } 1744 1745 overallMissLatency 1746 .name(name() + ".overall_miss_latency") 1747 .desc("number of overall miss cycles") 1748 .flags(total | nozero | nonan) 1749 ; 1750 overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency); 1751 for (int i = 0; i < system->maxMasters(); i++) { 1752 overallMissLatency.subname(i, system->getMasterName(i)); 1753 } 1754 1755 // access formulas 1756 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1757 MemCmd cmd(access_idx); 1758 const string &cstr = cmd.toString(); 1759 1760 accesses[access_idx] 1761 .name(name() + "." + cstr + "_accesses") 1762 .desc("number of " + cstr + " accesses(hits+misses)") 1763 .flags(total | nozero | nonan) 1764 ; 1765 accesses[access_idx] = hits[access_idx] + misses[access_idx]; 1766 1767 for (int i = 0; i < system->maxMasters(); i++) { 1768 accesses[access_idx].subname(i, system->getMasterName(i)); 1769 } 1770 } 1771 1772 demandAccesses 1773 .name(name() + ".demand_accesses") 1774 .desc("number of demand (read+write) accesses") 1775 .flags(total | nozero | nonan) 1776 ; 1777 demandAccesses = demandHits + demandMisses; 1778 for (int i = 0; i < system->maxMasters(); i++) { 1779 demandAccesses.subname(i, system->getMasterName(i)); 1780 } 1781 1782 overallAccesses 1783 .name(name() + ".overall_accesses") 1784 .desc("number of overall (read+write) accesses") 1785 .flags(total | nozero | nonan) 1786 ; 1787 overallAccesses = overallHits + overallMisses; 1788 for (int i = 0; i < system->maxMasters(); i++) { 1789 overallAccesses.subname(i, system->getMasterName(i)); 1790 } 1791 1792 // miss rate formulas 1793 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1794 MemCmd cmd(access_idx); 1795 const string &cstr = cmd.toString(); 1796 1797 missRate[access_idx] 1798 .name(name() + "." + cstr + "_miss_rate") 1799 .desc("miss rate for " + cstr + " accesses") 1800 .flags(total | nozero | nonan) 1801 ; 1802 missRate[access_idx] = misses[access_idx] / accesses[access_idx]; 1803 1804 for (int i = 0; i < system->maxMasters(); i++) { 1805 missRate[access_idx].subname(i, system->getMasterName(i)); 1806 } 1807 } 1808 1809 demandMissRate 1810 .name(name() + ".demand_miss_rate") 1811 .desc("miss rate for demand accesses") 1812 .flags(total | nozero | nonan) 1813 ; 1814 demandMissRate = demandMisses / demandAccesses; 1815 for (int i = 0; i < system->maxMasters(); i++) { 1816 demandMissRate.subname(i, system->getMasterName(i)); 1817 } 1818 1819 overallMissRate 1820 .name(name() + ".overall_miss_rate") 1821 .desc("miss rate for overall accesses") 1822 .flags(total | nozero | nonan) 1823 ; 1824 overallMissRate = overallMisses / overallAccesses; 1825 for (int i = 0; i < system->maxMasters(); i++) { 1826 overallMissRate.subname(i, system->getMasterName(i)); 1827 } 1828 1829 // miss latency formulas 1830 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1831 MemCmd cmd(access_idx); 1832 const string &cstr = cmd.toString(); 1833 1834 avgMissLatency[access_idx] 1835 .name(name() + "." + cstr + "_avg_miss_latency") 1836 .desc("average " + cstr + " miss latency") 1837 .flags(total | nozero | nonan) 1838 ; 1839 avgMissLatency[access_idx] = 1840 missLatency[access_idx] / misses[access_idx]; 1841 1842 for (int i = 0; i < system->maxMasters(); i++) { 1843 avgMissLatency[access_idx].subname(i, system->getMasterName(i)); 1844 } 1845 } 1846 1847 demandAvgMissLatency 1848 .name(name() + ".demand_avg_miss_latency") 1849 .desc("average overall miss latency") 1850 .flags(total | nozero | nonan) 1851 ; 1852 demandAvgMissLatency = demandMissLatency / demandMisses; 1853 for (int i = 0; i < system->maxMasters(); i++) { 1854 demandAvgMissLatency.subname(i, system->getMasterName(i)); 1855 } 1856 1857 overallAvgMissLatency 1858 .name(name() + ".overall_avg_miss_latency") 1859 .desc("average overall miss latency") 1860 .flags(total | nozero | nonan) 1861 ; 1862 overallAvgMissLatency = overallMissLatency / overallMisses; 1863 for (int i = 0; i < system->maxMasters(); i++) { 1864 overallAvgMissLatency.subname(i, system->getMasterName(i)); 1865 } 1866 1867 blocked_cycles.init(NUM_BLOCKED_CAUSES); 1868 blocked_cycles 1869 .name(name() + ".blocked_cycles") 1870 .desc("number of cycles access was blocked") 1871 .subname(Blocked_NoMSHRs, "no_mshrs") 1872 .subname(Blocked_NoTargets, "no_targets") 1873 ; 1874 1875 1876 blocked_causes.init(NUM_BLOCKED_CAUSES); 1877 blocked_causes 1878 .name(name() + ".blocked") 1879 .desc("number of cycles access was blocked") 1880 .subname(Blocked_NoMSHRs, "no_mshrs") 1881 .subname(Blocked_NoTargets, "no_targets") 1882 ; 1883 1884 avg_blocked 1885 .name(name() + ".avg_blocked_cycles") 1886 .desc("average number of cycles each access was blocked") 1887 .subname(Blocked_NoMSHRs, "no_mshrs") 1888 .subname(Blocked_NoTargets, "no_targets") 1889 ; 1890 1891 avg_blocked = blocked_cycles / blocked_causes; 1892 1893 unusedPrefetches 1894 .name(name() + ".unused_prefetches") 1895 .desc("number of HardPF blocks evicted w/o reference") 1896 .flags(nozero) 1897 ; 1898 1899 writebacks 1900 .init(system->maxMasters()) 1901 .name(name() + ".writebacks") 1902 .desc("number of writebacks") 1903 .flags(total | nozero | nonan) 1904 ; 1905 for (int i = 0; i < system->maxMasters(); i++) { 1906 writebacks.subname(i, system->getMasterName(i)); 1907 } 1908 1909 // MSHR statistics 1910 // MSHR hit statistics 1911 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1912 MemCmd cmd(access_idx); 1913 const string &cstr = cmd.toString(); 1914 1915 mshr_hits[access_idx] 1916 .init(system->maxMasters()) 1917 .name(name() + "." + cstr + "_mshr_hits") 1918 .desc("number of " + cstr + " MSHR hits") 1919 .flags(total | nozero | nonan) 1920 ; 1921 for (int i = 0; i < system->maxMasters(); i++) { 1922 mshr_hits[access_idx].subname(i, system->getMasterName(i)); 1923 } 1924 } 1925 1926 demandMshrHits 1927 .name(name() + ".demand_mshr_hits") 1928 .desc("number of demand (read+write) MSHR hits") 1929 .flags(total | nozero | nonan) 1930 ; 1931 demandMshrHits = SUM_DEMAND(mshr_hits); 1932 for (int i = 0; i < system->maxMasters(); i++) { 1933 demandMshrHits.subname(i, system->getMasterName(i)); 1934 } 1935 1936 overallMshrHits 1937 .name(name() + ".overall_mshr_hits") 1938 .desc("number of overall MSHR hits") 1939 .flags(total | nozero | nonan) 1940 ; 1941 overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshr_hits); 1942 for (int i = 0; i < system->maxMasters(); i++) { 1943 overallMshrHits.subname(i, system->getMasterName(i)); 1944 } 1945 1946 // MSHR miss statistics 1947 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1948 MemCmd cmd(access_idx); 1949 const string &cstr = cmd.toString(); 1950 1951 mshr_misses[access_idx] 1952 .init(system->maxMasters()) 1953 .name(name() + "." + cstr + "_mshr_misses") 1954 .desc("number of " + cstr + " MSHR misses") 1955 .flags(total | nozero | nonan) 1956 ; 1957 for (int i = 0; i < system->maxMasters(); i++) { 1958 mshr_misses[access_idx].subname(i, system->getMasterName(i)); 1959 } 1960 } 1961 1962 demandMshrMisses 1963 .name(name() + ".demand_mshr_misses") 1964 .desc("number of demand (read+write) MSHR misses") 1965 .flags(total | nozero | nonan) 1966 ; 1967 demandMshrMisses = SUM_DEMAND(mshr_misses); 1968 for (int i = 0; i < system->maxMasters(); i++) { 1969 demandMshrMisses.subname(i, system->getMasterName(i)); 1970 } 1971 1972 overallMshrMisses 1973 .name(name() + ".overall_mshr_misses") 1974 .desc("number of overall MSHR misses") 1975 .flags(total | nozero | nonan) 1976 ; 1977 overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshr_misses); 1978 for (int i = 0; i < system->maxMasters(); i++) { 1979 overallMshrMisses.subname(i, system->getMasterName(i)); 1980 } 1981 1982 // MSHR miss latency statistics 1983 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1984 MemCmd cmd(access_idx); 1985 const string &cstr = cmd.toString(); 1986 1987 mshr_miss_latency[access_idx] 1988 .init(system->maxMasters()) 1989 .name(name() + "." + cstr + "_mshr_miss_latency") 1990 .desc("number of " + cstr + " MSHR miss cycles") 1991 .flags(total | nozero | nonan) 1992 ; 1993 for (int i = 0; i < system->maxMasters(); i++) { 1994 mshr_miss_latency[access_idx].subname(i, system->getMasterName(i)); 1995 } 1996 } 1997 1998 demandMshrMissLatency 1999 .name(name() + ".demand_mshr_miss_latency") 2000 .desc("number of demand (read+write) MSHR miss cycles") 2001 .flags(total | nozero | nonan) 2002 ; 2003 demandMshrMissLatency = SUM_DEMAND(mshr_miss_latency); 2004 for (int i = 0; i < system->maxMasters(); i++) { 2005 demandMshrMissLatency.subname(i, system->getMasterName(i)); 2006 } 2007 2008 overallMshrMissLatency 2009 .name(name() + ".overall_mshr_miss_latency") 2010 .desc("number of overall MSHR miss cycles") 2011 .flags(total | nozero | nonan) 2012 ; 2013 overallMshrMissLatency = 2014 demandMshrMissLatency + SUM_NON_DEMAND(mshr_miss_latency); 2015 for (int i = 0; i < system->maxMasters(); i++) { 2016 overallMshrMissLatency.subname(i, system->getMasterName(i)); 2017 } 2018 2019 // MSHR uncacheable statistics 2020 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2021 MemCmd cmd(access_idx); 2022 const string &cstr = cmd.toString(); 2023 2024 mshr_uncacheable[access_idx] 2025 .init(system->maxMasters()) 2026 .name(name() + "." + cstr + "_mshr_uncacheable") 2027 .desc("number of " + cstr + " MSHR uncacheable") 2028 .flags(total | nozero | nonan) 2029 ; 2030 for (int i = 0; i < system->maxMasters(); i++) { 2031 mshr_uncacheable[access_idx].subname(i, system->getMasterName(i)); 2032 } 2033 } 2034 2035 overallMshrUncacheable 2036 .name(name() + ".overall_mshr_uncacheable_misses") 2037 .desc("number of overall MSHR uncacheable misses") 2038 .flags(total | nozero | nonan) 2039 ; 2040 overallMshrUncacheable = 2041 SUM_DEMAND(mshr_uncacheable) + SUM_NON_DEMAND(mshr_uncacheable); 2042 for (int i = 0; i < system->maxMasters(); i++) { 2043 overallMshrUncacheable.subname(i, system->getMasterName(i)); 2044 } 2045 2046 // MSHR miss latency statistics 2047 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2048 MemCmd cmd(access_idx); 2049 const string &cstr = cmd.toString(); 2050 2051 mshr_uncacheable_lat[access_idx] 2052 .init(system->maxMasters()) 2053 .name(name() + "." + cstr + "_mshr_uncacheable_latency") 2054 .desc("number of " + cstr + " MSHR uncacheable cycles") 2055 .flags(total | nozero | nonan) 2056 ; 2057 for (int i = 0; i < system->maxMasters(); i++) { 2058 mshr_uncacheable_lat[access_idx].subname( 2059 i, system->getMasterName(i)); 2060 } 2061 } 2062 2063 overallMshrUncacheableLatency 2064 .name(name() + ".overall_mshr_uncacheable_latency") 2065 .desc("number of overall MSHR uncacheable cycles") 2066 .flags(total | nozero | nonan) 2067 ; 2068 overallMshrUncacheableLatency = 2069 SUM_DEMAND(mshr_uncacheable_lat) + 2070 SUM_NON_DEMAND(mshr_uncacheable_lat); 2071 for (int i = 0; i < system->maxMasters(); i++) { 2072 overallMshrUncacheableLatency.subname(i, system->getMasterName(i)); 2073 } 2074 2075#if 0 2076 // MSHR access formulas 2077 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2078 MemCmd cmd(access_idx); 2079 const string &cstr = cmd.toString(); 2080 2081 mshrAccesses[access_idx] 2082 .name(name() + "." + cstr + "_mshr_accesses") 2083 .desc("number of " + cstr + " mshr accesses(hits+misses)") 2084 .flags(total | nozero | nonan) 2085 ; 2086 mshrAccesses[access_idx] = 2087 mshr_hits[access_idx] + mshr_misses[access_idx] 2088 + mshr_uncacheable[access_idx]; 2089 } 2090 2091 demandMshrAccesses 2092 .name(name() + ".demand_mshr_accesses") 2093 .desc("number of demand (read+write) mshr accesses") 2094 .flags(total | nozero | nonan) 2095 ; 2096 demandMshrAccesses = demandMshrHits + demandMshrMisses; 2097 2098 overallMshrAccesses 2099 .name(name() + ".overall_mshr_accesses") 2100 .desc("number of overall (read+write) mshr accesses") 2101 .flags(total | nozero | nonan) 2102 ; 2103 overallMshrAccesses = overallMshrHits + overallMshrMisses 2104 + overallMshrUncacheable; 2105#endif 2106 2107 // MSHR miss rate formulas 2108 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2109 MemCmd cmd(access_idx); 2110 const string &cstr = cmd.toString(); 2111 2112 mshrMissRate[access_idx] 2113 .name(name() + "." + cstr + "_mshr_miss_rate") 2114 .desc("mshr miss rate for " + cstr + " accesses") 2115 .flags(total | nozero | nonan) 2116 ; 2117 mshrMissRate[access_idx] = 2118 mshr_misses[access_idx] / accesses[access_idx]; 2119 2120 for (int i = 0; i < system->maxMasters(); i++) { 2121 mshrMissRate[access_idx].subname(i, system->getMasterName(i)); 2122 } 2123 } 2124 2125 demandMshrMissRate 2126 .name(name() + ".demand_mshr_miss_rate") 2127 .desc("mshr miss rate for demand accesses") 2128 .flags(total | nozero | nonan) 2129 ; 2130 demandMshrMissRate = demandMshrMisses / demandAccesses; 2131 for (int i = 0; i < system->maxMasters(); i++) { 2132 demandMshrMissRate.subname(i, system->getMasterName(i)); 2133 } 2134 2135 overallMshrMissRate 2136 .name(name() + ".overall_mshr_miss_rate") 2137 .desc("mshr miss rate for overall accesses") 2138 .flags(total | nozero | nonan) 2139 ; 2140 overallMshrMissRate = overallMshrMisses / overallAccesses; 2141 for (int i = 0; i < system->maxMasters(); i++) { 2142 overallMshrMissRate.subname(i, system->getMasterName(i)); 2143 } 2144 2145 // mshrMiss latency formulas 2146 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2147 MemCmd cmd(access_idx); 2148 const string &cstr = cmd.toString(); 2149 2150 avgMshrMissLatency[access_idx] 2151 .name(name() + "." + cstr + "_avg_mshr_miss_latency") 2152 .desc("average " + cstr + " mshr miss latency") 2153 .flags(total | nozero | nonan) 2154 ; 2155 avgMshrMissLatency[access_idx] = 2156 mshr_miss_latency[access_idx] / mshr_misses[access_idx]; 2157 2158 for (int i = 0; i < system->maxMasters(); i++) { 2159 avgMshrMissLatency[access_idx].subname( 2160 i, system->getMasterName(i)); 2161 } 2162 } 2163 2164 demandAvgMshrMissLatency 2165 .name(name() + ".demand_avg_mshr_miss_latency") 2166 .desc("average overall mshr miss latency") 2167 .flags(total | nozero | nonan) 2168 ; 2169 demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses; 2170 for (int i = 0; i < system->maxMasters(); i++) { 2171 demandAvgMshrMissLatency.subname(i, system->getMasterName(i)); 2172 } 2173 2174 overallAvgMshrMissLatency 2175 .name(name() + ".overall_avg_mshr_miss_latency") 2176 .desc("average overall mshr miss latency") 2177 .flags(total | nozero | nonan) 2178 ; 2179 overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses; 2180 for (int i = 0; i < system->maxMasters(); i++) { 2181 overallAvgMshrMissLatency.subname(i, system->getMasterName(i)); 2182 } 2183 2184 // mshrUncacheable latency formulas 2185 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2186 MemCmd cmd(access_idx); 2187 const string &cstr = cmd.toString(); 2188 2189 avgMshrUncacheableLatency[access_idx] 2190 .name(name() + "." + cstr + "_avg_mshr_uncacheable_latency") 2191 .desc("average " + cstr + " mshr uncacheable latency") 2192 .flags(total | nozero | nonan) 2193 ; 2194 avgMshrUncacheableLatency[access_idx] = 2195 mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx]; 2196 2197 for (int i = 0; i < system->maxMasters(); i++) { 2198 avgMshrUncacheableLatency[access_idx].subname( 2199 i, system->getMasterName(i)); 2200 } 2201 } 2202 2203 overallAvgMshrUncacheableLatency 2204 .name(name() + ".overall_avg_mshr_uncacheable_latency") 2205 .desc("average overall mshr uncacheable latency") 2206 .flags(total | nozero | nonan) 2207 ; 2208 overallAvgMshrUncacheableLatency = 2209 overallMshrUncacheableLatency / overallMshrUncacheable; 2210 for (int i = 0; i < system->maxMasters(); i++) { 2211 overallAvgMshrUncacheableLatency.subname(i, system->getMasterName(i)); 2212 } 2213 2214 replacements 2215 .name(name() + ".replacements") 2216 .desc("number of replacements") 2217 ; 2218} 2219 2220void 2221BaseCache::regProbePoints() 2222{ 2223 ppHit = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Hit"); 2224 ppMiss = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Miss"); 2225} 2226 2227/////////////// 2228// 2229// CpuSidePort 2230// 2231/////////////// 2232bool 2233BaseCache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt) 2234{ 2235 // Snoops shouldn't happen when bypassing caches 2236 assert(!cache->system->bypassCaches()); 2237 2238 assert(pkt->isResponse()); 2239 2240 // Express snoop responses from master to slave, e.g., from L1 to L2 2241 cache->recvTimingSnoopResp(pkt); 2242 return true; 2243} 2244 2245 2246bool 2247BaseCache::CpuSidePort::tryTiming(PacketPtr pkt) 2248{ 2249 if (cache->system->bypassCaches() || pkt->isExpressSnoop()) { 2250 // always let express snoop packets through even if blocked 2251 return true; 2252 } else if (blocked || mustSendRetry) { 2253 // either already committed to send a retry, or blocked 2254 mustSendRetry = true; 2255 return false; 2256 } 2257 mustSendRetry = false; 2258 return true; 2259} 2260 2261bool 2262BaseCache::CpuSidePort::recvTimingReq(PacketPtr pkt) 2263{ 2264 assert(pkt->isRequest()); 2265 2266 if (cache->system->bypassCaches()) { 2267 // Just forward the packet if caches are disabled. 2268 // @todo This should really enqueue the packet rather 2269 bool M5_VAR_USED success = cache->memSidePort.sendTimingReq(pkt); 2270 assert(success); 2271 return true; 2272 } else if (tryTiming(pkt)) { 2273 cache->recvTimingReq(pkt); 2274 return true; 2275 } 2276 return false; 2277} 2278 2279Tick 2280BaseCache::CpuSidePort::recvAtomic(PacketPtr pkt) 2281{ 2282 if (cache->system->bypassCaches()) { 2283 // Forward the request if the system is in cache bypass mode. 2284 return cache->memSidePort.sendAtomic(pkt); 2285 } else { 2286 return cache->recvAtomic(pkt); 2287 } 2288} 2289 2290void 2291BaseCache::CpuSidePort::recvFunctional(PacketPtr pkt) 2292{ 2293 if (cache->system->bypassCaches()) { 2294 // The cache should be flushed if we are in cache bypass mode, 2295 // so we don't need to check if we need to update anything. 2296 cache->memSidePort.sendFunctional(pkt); 2297 return; 2298 } 2299 2300 // functional request 2301 cache->functionalAccess(pkt, true); 2302} 2303 2304AddrRangeList 2305BaseCache::CpuSidePort::getAddrRanges() const 2306{ 2307 return cache->getAddrRanges(); 2308} 2309 2310 2311BaseCache:: 2312CpuSidePort::CpuSidePort(const std::string &_name, BaseCache *_cache, 2313 const std::string &_label) 2314 : CacheSlavePort(_name, _cache, _label), cache(_cache) 2315{ 2316} 2317 2318/////////////// 2319// 2320// MemSidePort 2321// 2322/////////////// 2323bool 2324BaseCache::MemSidePort::recvTimingResp(PacketPtr pkt) 2325{ 2326 cache->recvTimingResp(pkt); 2327 return true; 2328} 2329 2330// Express snooping requests to memside port 2331void 2332BaseCache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt) 2333{ 2334 // Snoops shouldn't happen when bypassing caches 2335 assert(!cache->system->bypassCaches()); 2336 2337 // handle snooping requests 2338 cache->recvTimingSnoopReq(pkt); 2339} 2340 2341Tick 2342BaseCache::MemSidePort::recvAtomicSnoop(PacketPtr pkt) 2343{ 2344 // Snoops shouldn't happen when bypassing caches 2345 assert(!cache->system->bypassCaches()); 2346 2347 return cache->recvAtomicSnoop(pkt); 2348} 2349 2350void 2351BaseCache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt) 2352{ 2353 // Snoops shouldn't happen when bypassing caches 2354 assert(!cache->system->bypassCaches()); 2355 2356 // functional snoop (note that in contrast to atomic we don't have 2357 // a specific functionalSnoop method, as they have the same 2358 // behaviour regardless) 2359 cache->functionalAccess(pkt, false); 2360} 2361 2362void 2363BaseCache::CacheReqPacketQueue::sendDeferredPacket() 2364{ 2365 // sanity check 2366 assert(!waitingOnRetry); 2367 2368 // there should never be any deferred request packets in the 2369 // queue, instead we resly on the cache to provide the packets 2370 // from the MSHR queue or write queue 2371 assert(deferredPacketReadyTime() == MaxTick); 2372 2373 // check for request packets (requests & writebacks) 2374 QueueEntry* entry = cache.getNextQueueEntry(); 2375 2376 if (!entry) { 2377 // can happen if e.g. we attempt a writeback and fail, but 2378 // before the retry, the writeback is eliminated because 2379 // we snoop another cache's ReadEx. 2380 } else { 2381 // let our snoop responses go first if there are responses to 2382 // the same addresses 2383 if (checkConflictingSnoop(entry->blkAddr)) { 2384 return; 2385 } 2386 waitingOnRetry = entry->sendPacket(cache); 2387 } 2388 2389 // if we succeeded and are not waiting for a retry, schedule the 2390 // next send considering when the next queue is ready, note that 2391 // snoop responses have their own packet queue and thus schedule 2392 // their own events 2393 if (!waitingOnRetry) { 2394 schedSendEvent(cache.nextQueueReadyTime()); 2395 } 2396} 2397 2398BaseCache::MemSidePort::MemSidePort(const std::string &_name, 2399 BaseCache *_cache, 2400 const std::string &_label) 2401 : CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue), 2402 _reqQueue(*_cache, *this, _snoopRespQueue, _label), 2403 _snoopRespQueue(*_cache, *this, _label), cache(_cache) 2404{ 2405} 2406 2407void 2408WriteAllocator::updateMode(Addr write_addr, unsigned write_size, 2409 Addr blk_addr) 2410{ 2411 // check if we are continuing where the last write ended 2412 if (nextAddr == write_addr) { 2413 delayCtr[blk_addr] = delayThreshold; 2414 // stop if we have already saturated 2415 if (mode != WriteMode::NO_ALLOCATE) { 2416 byteCount += write_size; 2417 // switch to streaming mode if we have passed the lower 2418 // threshold 2419 if (mode == WriteMode::ALLOCATE && 2420 byteCount > coalesceLimit) { 2421 mode = WriteMode::COALESCE; 2422 DPRINTF(Cache, "Switched to write coalescing\n"); 2423 } else if (mode == WriteMode::COALESCE && 2424 byteCount > noAllocateLimit) { 2425 // and continue and switch to non-allocating mode if we 2426 // pass the upper threshold 2427 mode = WriteMode::NO_ALLOCATE; 2428 DPRINTF(Cache, "Switched to write-no-allocate\n"); 2429 } 2430 } 2431 } else { 2432 // we did not see a write matching the previous one, start 2433 // over again 2434 byteCount = write_size; 2435 mode = WriteMode::ALLOCATE; 2436 resetDelay(blk_addr); 2437 } 2438 nextAddr = write_addr + write_size; 2439} 2440 2441WriteAllocator* 2442WriteAllocatorParams::create() 2443{ 2444 return new WriteAllocator(this); 2445} 2446