base.cc revision 13932:24f825a9a080
1/* 2 * Copyright (c) 2012-2013, 2018-2019 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2003-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Erik Hallnor 41 * Nikos Nikoleris 42 */ 43 44/** 45 * @file 46 * Definition of BaseCache functions. 47 */ 48 49#include "mem/cache/base.hh" 50 51#include "base/compiler.hh" 52#include "base/logging.hh" 53#include "debug/Cache.hh" 54#include "debug/CachePort.hh" 55#include "debug/CacheRepl.hh" 56#include "debug/CacheVerbose.hh" 57#include "mem/cache/mshr.hh" 58#include "mem/cache/prefetch/base.hh" 59#include "mem/cache/queue_entry.hh" 60#include "params/BaseCache.hh" 61#include "params/WriteAllocator.hh" 62#include "sim/core.hh" 63 64class BaseMasterPort; 65class BaseSlavePort; 66 67using namespace std; 68 69BaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name, 70 BaseCache *_cache, 71 const std::string &_label) 72 : QueuedSlavePort(_name, _cache, queue), 73 queue(*_cache, *this, true, _label), 74 blocked(false), mustSendRetry(false), 75 sendRetryEvent([this]{ processSendRetry(); }, _name) 76{ 77} 78 79BaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size) 80 : ClockedObject(p), 81 cpuSidePort (p->name + ".cpu_side", this, "CpuSidePort"), 82 memSidePort(p->name + ".mem_side", this, "MemSidePort"), 83 mshrQueue("MSHRs", p->mshrs, 0, p->demand_mshr_reserve), // see below 84 writeBuffer("write buffer", p->write_buffers, p->mshrs), // see below 85 tags(p->tags), 86 prefetcher(p->prefetcher), 87 writeAllocator(p->write_allocator), 88 writebackClean(p->writeback_clean), 89 tempBlockWriteback(nullptr), 90 writebackTempBlockAtomicEvent([this]{ writebackTempBlockAtomic(); }, 91 name(), false, 92 EventBase::Delayed_Writeback_Pri), 93 blkSize(blk_size), 94 lookupLatency(p->tag_latency), 95 dataLatency(p->data_latency), 96 forwardLatency(p->tag_latency), 97 fillLatency(p->data_latency), 98 responseLatency(p->response_latency), 99 sequentialAccess(p->sequential_access), 100 numTarget(p->tgts_per_mshr), 101 forwardSnoops(true), 102 clusivity(p->clusivity), 103 isReadOnly(p->is_read_only), 104 blocked(0), 105 order(0), 106 noTargetMSHR(nullptr), 107 missCount(p->max_miss_count), 108 addrRanges(p->addr_ranges.begin(), p->addr_ranges.end()), 109 system(p->system) 110{ 111 // the MSHR queue has no reserve entries as we check the MSHR 112 // queue on every single allocation, whereas the write queue has 113 // as many reserve entries as we have MSHRs, since every MSHR may 114 // eventually require a writeback, and we do not check the write 115 // buffer before committing to an MSHR 116 117 // forward snoops is overridden in init() once we can query 118 // whether the connected master is actually snooping or not 119 120 tempBlock = new TempCacheBlk(blkSize); 121 122 tags->tagsInit(); 123 if (prefetcher) 124 prefetcher->setCache(this); 125} 126 127BaseCache::~BaseCache() 128{ 129 delete tempBlock; 130} 131 132void 133BaseCache::CacheSlavePort::setBlocked() 134{ 135 assert(!blocked); 136 DPRINTF(CachePort, "Port is blocking new requests\n"); 137 blocked = true; 138 // if we already scheduled a retry in this cycle, but it has not yet 139 // happened, cancel it 140 if (sendRetryEvent.scheduled()) { 141 owner.deschedule(sendRetryEvent); 142 DPRINTF(CachePort, "Port descheduled retry\n"); 143 mustSendRetry = true; 144 } 145} 146 147void 148BaseCache::CacheSlavePort::clearBlocked() 149{ 150 assert(blocked); 151 DPRINTF(CachePort, "Port is accepting new requests\n"); 152 blocked = false; 153 if (mustSendRetry) { 154 // @TODO: need to find a better time (next cycle?) 155 owner.schedule(sendRetryEvent, curTick() + 1); 156 } 157} 158 159void 160BaseCache::CacheSlavePort::processSendRetry() 161{ 162 DPRINTF(CachePort, "Port is sending retry\n"); 163 164 // reset the flag and call retry 165 mustSendRetry = false; 166 sendRetryReq(); 167} 168 169Addr 170BaseCache::regenerateBlkAddr(CacheBlk* blk) 171{ 172 if (blk != tempBlock) { 173 return tags->regenerateBlkAddr(blk); 174 } else { 175 return tempBlock->getAddr(); 176 } 177} 178 179void 180BaseCache::init() 181{ 182 if (!cpuSidePort.isConnected() || !memSidePort.isConnected()) 183 fatal("Cache ports on %s are not connected\n", name()); 184 cpuSidePort.sendRangeChange(); 185 forwardSnoops = cpuSidePort.isSnooping(); 186} 187 188Port & 189BaseCache::getPort(const std::string &if_name, PortID idx) 190{ 191 if (if_name == "mem_side") { 192 return memSidePort; 193 } else if (if_name == "cpu_side") { 194 return cpuSidePort; 195 } else { 196 return ClockedObject::getPort(if_name, idx); 197 } 198} 199 200bool 201BaseCache::inRange(Addr addr) const 202{ 203 for (const auto& r : addrRanges) { 204 if (r.contains(addr)) { 205 return true; 206 } 207 } 208 return false; 209} 210 211void 212BaseCache::handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time) 213{ 214 if (pkt->needsResponse()) { 215 // These delays should have been consumed by now 216 assert(pkt->headerDelay == 0); 217 assert(pkt->payloadDelay == 0); 218 219 pkt->makeTimingResponse(); 220 221 // In this case we are considering request_time that takes 222 // into account the delay of the xbar, if any, and just 223 // lat, neglecting responseLatency, modelling hit latency 224 // just as the value of lat overriden by access(), which calls 225 // the calculateAccessLatency() function. 226 cpuSidePort.schedTimingResp(pkt, request_time); 227 } else { 228 DPRINTF(Cache, "%s satisfied %s, no response needed\n", __func__, 229 pkt->print()); 230 231 // queue the packet for deletion, as the sending cache is 232 // still relying on it; if the block is found in access(), 233 // CleanEvict and Writeback messages will be deleted 234 // here as well 235 pendingDelete.reset(pkt); 236 } 237} 238 239void 240BaseCache::handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk, 241 Tick forward_time, Tick request_time) 242{ 243 if (writeAllocator && 244 pkt && pkt->isWrite() && !pkt->req->isUncacheable()) { 245 writeAllocator->updateMode(pkt->getAddr(), pkt->getSize(), 246 pkt->getBlockAddr(blkSize)); 247 } 248 249 if (mshr) { 250 /// MSHR hit 251 /// @note writebacks will be checked in getNextMSHR() 252 /// for any conflicting requests to the same block 253 254 //@todo remove hw_pf here 255 256 // Coalesce unless it was a software prefetch (see above). 257 if (pkt) { 258 assert(!pkt->isWriteback()); 259 // CleanEvicts corresponding to blocks which have 260 // outstanding requests in MSHRs are simply sunk here 261 if (pkt->cmd == MemCmd::CleanEvict) { 262 pendingDelete.reset(pkt); 263 } else if (pkt->cmd == MemCmd::WriteClean) { 264 // A WriteClean should never coalesce with any 265 // outstanding cache maintenance requests. 266 267 // We use forward_time here because there is an 268 // uncached memory write, forwarded to WriteBuffer. 269 allocateWriteBuffer(pkt, forward_time); 270 } else { 271 DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__, 272 pkt->print()); 273 274 assert(pkt->req->masterId() < system->maxMasters()); 275 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++; 276 277 // We use forward_time here because it is the same 278 // considering new targets. We have multiple 279 // requests for the same address here. It 280 // specifies the latency to allocate an internal 281 // buffer and to schedule an event to the queued 282 // port and also takes into account the additional 283 // delay of the xbar. 284 mshr->allocateTarget(pkt, forward_time, order++, 285 allocOnFill(pkt->cmd)); 286 if (mshr->getNumTargets() == numTarget) { 287 noTargetMSHR = mshr; 288 setBlocked(Blocked_NoTargets); 289 // need to be careful with this... if this mshr isn't 290 // ready yet (i.e. time > curTick()), we don't want to 291 // move it ahead of mshrs that are ready 292 // mshrQueue.moveToFront(mshr); 293 } 294 } 295 } 296 } else { 297 // no MSHR 298 assert(pkt->req->masterId() < system->maxMasters()); 299 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 300 301 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean) { 302 // We use forward_time here because there is an 303 // writeback or writeclean, forwarded to WriteBuffer. 304 allocateWriteBuffer(pkt, forward_time); 305 } else { 306 if (blk && blk->isValid()) { 307 // If we have a write miss to a valid block, we 308 // need to mark the block non-readable. Otherwise 309 // if we allow reads while there's an outstanding 310 // write miss, the read could return stale data 311 // out of the cache block... a more aggressive 312 // system could detect the overlap (if any) and 313 // forward data out of the MSHRs, but we don't do 314 // that yet. Note that we do need to leave the 315 // block valid so that it stays in the cache, in 316 // case we get an upgrade response (and hence no 317 // new data) when the write miss completes. 318 // As long as CPUs do proper store/load forwarding 319 // internally, and have a sufficiently weak memory 320 // model, this is probably unnecessary, but at some 321 // point it must have seemed like we needed it... 322 assert((pkt->needsWritable() && !blk->isWritable()) || 323 pkt->req->isCacheMaintenance()); 324 blk->status &= ~BlkReadable; 325 } 326 // Here we are using forward_time, modelling the latency of 327 // a miss (outbound) just as forwardLatency, neglecting the 328 // lookupLatency component. 329 allocateMissBuffer(pkt, forward_time); 330 } 331 } 332} 333 334void 335BaseCache::recvTimingReq(PacketPtr pkt) 336{ 337 // anything that is merely forwarded pays for the forward latency and 338 // the delay provided by the crossbar 339 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 340 341 Cycles lat; 342 CacheBlk *blk = nullptr; 343 bool satisfied = false; 344 { 345 PacketList writebacks; 346 // Note that lat is passed by reference here. The function 347 // access() will set the lat value. 348 satisfied = access(pkt, blk, lat, writebacks); 349 350 // After the evicted blocks are selected, they must be forwarded 351 // to the write buffer to ensure they logically precede anything 352 // happening below 353 doWritebacks(writebacks, clockEdge(lat + forwardLatency)); 354 } 355 356 // Here we charge the headerDelay that takes into account the latencies 357 // of the bus, if the packet comes from it. 358 // The latency charged is just the value set by the access() function. 359 // In case of a hit we are neglecting response latency. 360 // In case of a miss we are neglecting forward latency. 361 Tick request_time = clockEdge(lat); 362 // Here we reset the timing of the packet. 363 pkt->headerDelay = pkt->payloadDelay = 0; 364 365 if (satisfied) { 366 // notify before anything else as later handleTimingReqHit might turn 367 // the packet in a response 368 ppHit->notify(pkt); 369 370 if (prefetcher && blk && blk->wasPrefetched()) { 371 blk->status &= ~BlkHWPrefetched; 372 } 373 374 handleTimingReqHit(pkt, blk, request_time); 375 } else { 376 handleTimingReqMiss(pkt, blk, forward_time, request_time); 377 378 ppMiss->notify(pkt); 379 } 380 381 if (prefetcher) { 382 // track time of availability of next prefetch, if any 383 Tick next_pf_time = prefetcher->nextPrefetchReadyTime(); 384 if (next_pf_time != MaxTick) { 385 schedMemSideSendEvent(next_pf_time); 386 } 387 } 388} 389 390void 391BaseCache::handleUncacheableWriteResp(PacketPtr pkt) 392{ 393 Tick completion_time = clockEdge(responseLatency) + 394 pkt->headerDelay + pkt->payloadDelay; 395 396 // Reset the bus additional time as it is now accounted for 397 pkt->headerDelay = pkt->payloadDelay = 0; 398 399 cpuSidePort.schedTimingResp(pkt, completion_time); 400} 401 402void 403BaseCache::recvTimingResp(PacketPtr pkt) 404{ 405 assert(pkt->isResponse()); 406 407 // all header delay should be paid for by the crossbar, unless 408 // this is a prefetch response from above 409 panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp, 410 "%s saw a non-zero packet delay\n", name()); 411 412 const bool is_error = pkt->isError(); 413 414 if (is_error) { 415 DPRINTF(Cache, "%s: Cache received %s with error\n", __func__, 416 pkt->print()); 417 } 418 419 DPRINTF(Cache, "%s: Handling response %s\n", __func__, 420 pkt->print()); 421 422 // if this is a write, we should be looking at an uncacheable 423 // write 424 if (pkt->isWrite()) { 425 assert(pkt->req->isUncacheable()); 426 handleUncacheableWriteResp(pkt); 427 return; 428 } 429 430 // we have dealt with any (uncacheable) writes above, from here on 431 // we know we are dealing with an MSHR due to a miss or a prefetch 432 MSHR *mshr = dynamic_cast<MSHR*>(pkt->popSenderState()); 433 assert(mshr); 434 435 if (mshr == noTargetMSHR) { 436 // we always clear at least one target 437 clearBlocked(Blocked_NoTargets); 438 noTargetMSHR = nullptr; 439 } 440 441 // Initial target is used just for stats 442 QueueEntry::Target *initial_tgt = mshr->getTarget(); 443 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex(); 444 Tick miss_latency = curTick() - initial_tgt->recvTime; 445 446 if (pkt->req->isUncacheable()) { 447 assert(pkt->req->masterId() < system->maxMasters()); 448 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] += 449 miss_latency; 450 } else { 451 assert(pkt->req->masterId() < system->maxMasters()); 452 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] += 453 miss_latency; 454 } 455 456 PacketList writebacks; 457 458 bool is_fill = !mshr->isForward && 459 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp || 460 mshr->wasWholeLineWrite); 461 462 // make sure that if the mshr was due to a whole line write then 463 // the response is an invalidation 464 assert(!mshr->wasWholeLineWrite || pkt->isInvalidate()); 465 466 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 467 468 if (is_fill && !is_error) { 469 DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n", 470 pkt->getAddr()); 471 472 const bool allocate = (writeAllocator && mshr->wasWholeLineWrite) ? 473 writeAllocator->allocate() : mshr->allocOnFill(); 474 blk = handleFill(pkt, blk, writebacks, allocate); 475 assert(blk != nullptr); 476 ppFill->notify(pkt); 477 } 478 479 if (blk && blk->isValid() && pkt->isClean() && !pkt->isInvalidate()) { 480 // The block was marked not readable while there was a pending 481 // cache maintenance operation, restore its flag. 482 blk->status |= BlkReadable; 483 484 // This was a cache clean operation (without invalidate) 485 // and we have a copy of the block already. Since there 486 // is no invalidation, we can promote targets that don't 487 // require a writable copy 488 mshr->promoteReadable(); 489 } 490 491 if (blk && blk->isWritable() && !pkt->req->isCacheInvalidate()) { 492 // If at this point the referenced block is writable and the 493 // response is not a cache invalidate, we promote targets that 494 // were deferred as we couldn't guarrantee a writable copy 495 mshr->promoteWritable(); 496 } 497 498 serviceMSHRTargets(mshr, pkt, blk); 499 500 if (mshr->promoteDeferredTargets()) { 501 // avoid later read getting stale data while write miss is 502 // outstanding.. see comment in timingAccess() 503 if (blk) { 504 blk->status &= ~BlkReadable; 505 } 506 mshrQueue.markPending(mshr); 507 schedMemSideSendEvent(clockEdge() + pkt->payloadDelay); 508 } else { 509 // while we deallocate an mshr from the queue we still have to 510 // check the isFull condition before and after as we might 511 // have been using the reserved entries already 512 const bool was_full = mshrQueue.isFull(); 513 mshrQueue.deallocate(mshr); 514 if (was_full && !mshrQueue.isFull()) { 515 clearBlocked(Blocked_NoMSHRs); 516 } 517 518 // Request the bus for a prefetch if this deallocation freed enough 519 // MSHRs for a prefetch to take place 520 if (prefetcher && mshrQueue.canPrefetch()) { 521 Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(), 522 clockEdge()); 523 if (next_pf_time != MaxTick) 524 schedMemSideSendEvent(next_pf_time); 525 } 526 } 527 528 // if we used temp block, check to see if its valid and then clear it out 529 if (blk == tempBlock && tempBlock->isValid()) { 530 evictBlock(blk, writebacks); 531 } 532 533 const Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 534 // copy writebacks to write buffer 535 doWritebacks(writebacks, forward_time); 536 537 DPRINTF(CacheVerbose, "%s: Leaving with %s\n", __func__, pkt->print()); 538 delete pkt; 539} 540 541 542Tick 543BaseCache::recvAtomic(PacketPtr pkt) 544{ 545 // should assert here that there are no outstanding MSHRs or 546 // writebacks... that would mean that someone used an atomic 547 // access in timing mode 548 549 // We use lookupLatency here because it is used to specify the latency 550 // to access. 551 Cycles lat = lookupLatency; 552 553 CacheBlk *blk = nullptr; 554 PacketList writebacks; 555 bool satisfied = access(pkt, blk, lat, writebacks); 556 557 if (pkt->isClean() && blk && blk->isDirty()) { 558 // A cache clean opearation is looking for a dirty 559 // block. If a dirty block is encountered a WriteClean 560 // will update any copies to the path to the memory 561 // until the point of reference. 562 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n", 563 __func__, pkt->print(), blk->print()); 564 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id); 565 writebacks.push_back(wb_pkt); 566 pkt->setSatisfied(); 567 } 568 569 // handle writebacks resulting from the access here to ensure they 570 // logically precede anything happening below 571 doWritebacksAtomic(writebacks); 572 assert(writebacks.empty()); 573 574 if (!satisfied) { 575 lat += handleAtomicReqMiss(pkt, blk, writebacks); 576 } 577 578 // Note that we don't invoke the prefetcher at all in atomic mode. 579 // It's not clear how to do it properly, particularly for 580 // prefetchers that aggressively generate prefetch candidates and 581 // rely on bandwidth contention to throttle them; these will tend 582 // to pollute the cache in atomic mode since there is no bandwidth 583 // contention. If we ever do want to enable prefetching in atomic 584 // mode, though, this is the place to do it... see timingAccess() 585 // for an example (though we'd want to issue the prefetch(es) 586 // immediately rather than calling requestMemSideBus() as we do 587 // there). 588 589 // do any writebacks resulting from the response handling 590 doWritebacksAtomic(writebacks); 591 592 // if we used temp block, check to see if its valid and if so 593 // clear it out, but only do so after the call to recvAtomic is 594 // finished so that any downstream observers (such as a snoop 595 // filter), first see the fill, and only then see the eviction 596 if (blk == tempBlock && tempBlock->isValid()) { 597 // the atomic CPU calls recvAtomic for fetch and load/store 598 // sequentuially, and we may already have a tempBlock 599 // writeback from the fetch that we have not yet sent 600 if (tempBlockWriteback) { 601 // if that is the case, write the prevoius one back, and 602 // do not schedule any new event 603 writebackTempBlockAtomic(); 604 } else { 605 // the writeback/clean eviction happens after the call to 606 // recvAtomic has finished (but before any successive 607 // calls), so that the response handling from the fill is 608 // allowed to happen first 609 schedule(writebackTempBlockAtomicEvent, curTick()); 610 } 611 612 tempBlockWriteback = evictBlock(blk); 613 } 614 615 if (pkt->needsResponse()) { 616 pkt->makeAtomicResponse(); 617 } 618 619 return lat * clockPeriod(); 620} 621 622void 623BaseCache::functionalAccess(PacketPtr pkt, bool from_cpu_side) 624{ 625 Addr blk_addr = pkt->getBlockAddr(blkSize); 626 bool is_secure = pkt->isSecure(); 627 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 628 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 629 630 pkt->pushLabel(name()); 631 632 CacheBlkPrintWrapper cbpw(blk); 633 634 // Note that just because an L2/L3 has valid data doesn't mean an 635 // L1 doesn't have a more up-to-date modified copy that still 636 // needs to be found. As a result we always update the request if 637 // we have it, but only declare it satisfied if we are the owner. 638 639 // see if we have data at all (owned or otherwise) 640 bool have_data = blk && blk->isValid() 641 && pkt->trySatisfyFunctional(&cbpw, blk_addr, is_secure, blkSize, 642 blk->data); 643 644 // data we have is dirty if marked as such or if we have an 645 // in-service MSHR that is pending a modified line 646 bool have_dirty = 647 have_data && (blk->isDirty() || 648 (mshr && mshr->inService && mshr->isPendingModified())); 649 650 bool done = have_dirty || 651 cpuSidePort.trySatisfyFunctional(pkt) || 652 mshrQueue.trySatisfyFunctional(pkt) || 653 writeBuffer.trySatisfyFunctional(pkt) || 654 memSidePort.trySatisfyFunctional(pkt); 655 656 DPRINTF(CacheVerbose, "%s: %s %s%s%s\n", __func__, pkt->print(), 657 (blk && blk->isValid()) ? "valid " : "", 658 have_data ? "data " : "", done ? "done " : ""); 659 660 // We're leaving the cache, so pop cache->name() label 661 pkt->popLabel(); 662 663 if (done) { 664 pkt->makeResponse(); 665 } else { 666 // if it came as a request from the CPU side then make sure it 667 // continues towards the memory side 668 if (from_cpu_side) { 669 memSidePort.sendFunctional(pkt); 670 } else if (cpuSidePort.isSnooping()) { 671 // if it came from the memory side, it must be a snoop request 672 // and we should only forward it if we are forwarding snoops 673 cpuSidePort.sendFunctionalSnoop(pkt); 674 } 675 } 676} 677 678 679void 680BaseCache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt) 681{ 682 assert(pkt->isRequest()); 683 684 uint64_t overwrite_val; 685 bool overwrite_mem; 686 uint64_t condition_val64; 687 uint32_t condition_val32; 688 689 int offset = pkt->getOffset(blkSize); 690 uint8_t *blk_data = blk->data + offset; 691 692 assert(sizeof(uint64_t) >= pkt->getSize()); 693 694 overwrite_mem = true; 695 // keep a copy of our possible write value, and copy what is at the 696 // memory address into the packet 697 pkt->writeData((uint8_t *)&overwrite_val); 698 pkt->setData(blk_data); 699 700 if (pkt->req->isCondSwap()) { 701 if (pkt->getSize() == sizeof(uint64_t)) { 702 condition_val64 = pkt->req->getExtraData(); 703 overwrite_mem = !std::memcmp(&condition_val64, blk_data, 704 sizeof(uint64_t)); 705 } else if (pkt->getSize() == sizeof(uint32_t)) { 706 condition_val32 = (uint32_t)pkt->req->getExtraData(); 707 overwrite_mem = !std::memcmp(&condition_val32, blk_data, 708 sizeof(uint32_t)); 709 } else 710 panic("Invalid size for conditional read/write\n"); 711 } 712 713 if (overwrite_mem) { 714 std::memcpy(blk_data, &overwrite_val, pkt->getSize()); 715 blk->status |= BlkDirty; 716 } 717} 718 719QueueEntry* 720BaseCache::getNextQueueEntry() 721{ 722 // Check both MSHR queue and write buffer for potential requests, 723 // note that null does not mean there is no request, it could 724 // simply be that it is not ready 725 MSHR *miss_mshr = mshrQueue.getNext(); 726 WriteQueueEntry *wq_entry = writeBuffer.getNext(); 727 728 // If we got a write buffer request ready, first priority is a 729 // full write buffer, otherwise we favour the miss requests 730 if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) { 731 // need to search MSHR queue for conflicting earlier miss. 732 MSHR *conflict_mshr = mshrQueue.findPending(wq_entry); 733 734 if (conflict_mshr && conflict_mshr->order < wq_entry->order) { 735 // Service misses in order until conflict is cleared. 736 return conflict_mshr; 737 738 // @todo Note that we ignore the ready time of the conflict here 739 } 740 741 // No conflicts; issue write 742 return wq_entry; 743 } else if (miss_mshr) { 744 // need to check for conflicting earlier writeback 745 WriteQueueEntry *conflict_mshr = writeBuffer.findPending(miss_mshr); 746 if (conflict_mshr) { 747 // not sure why we don't check order here... it was in the 748 // original code but commented out. 749 750 // The only way this happens is if we are 751 // doing a write and we didn't have permissions 752 // then subsequently saw a writeback (owned got evicted) 753 // We need to make sure to perform the writeback first 754 // To preserve the dirty data, then we can issue the write 755 756 // should we return wq_entry here instead? I.e. do we 757 // have to flush writes in order? I don't think so... not 758 // for Alpha anyway. Maybe for x86? 759 return conflict_mshr; 760 761 // @todo Note that we ignore the ready time of the conflict here 762 } 763 764 // No conflicts; issue read 765 return miss_mshr; 766 } 767 768 // fall through... no pending requests. Try a prefetch. 769 assert(!miss_mshr && !wq_entry); 770 if (prefetcher && mshrQueue.canPrefetch()) { 771 // If we have a miss queue slot, we can try a prefetch 772 PacketPtr pkt = prefetcher->getPacket(); 773 if (pkt) { 774 Addr pf_addr = pkt->getBlockAddr(blkSize); 775 if (!tags->findBlock(pf_addr, pkt->isSecure()) && 776 !mshrQueue.findMatch(pf_addr, pkt->isSecure()) && 777 !writeBuffer.findMatch(pf_addr, pkt->isSecure())) { 778 // Update statistic on number of prefetches issued 779 // (hwpf_mshr_misses) 780 assert(pkt->req->masterId() < system->maxMasters()); 781 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 782 783 // allocate an MSHR and return it, note 784 // that we send the packet straight away, so do not 785 // schedule the send 786 return allocateMissBuffer(pkt, curTick(), false); 787 } else { 788 // free the request and packet 789 delete pkt; 790 } 791 } 792 } 793 794 return nullptr; 795} 796 797void 798BaseCache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool, bool) 799{ 800 assert(pkt->isRequest()); 801 802 assert(blk && blk->isValid()); 803 // Occasionally this is not true... if we are a lower-level cache 804 // satisfying a string of Read and ReadEx requests from 805 // upper-level caches, a Read will mark the block as shared but we 806 // can satisfy a following ReadEx anyway since we can rely on the 807 // Read requester(s) to have buffered the ReadEx snoop and to 808 // invalidate their blocks after receiving them. 809 // assert(!pkt->needsWritable() || blk->isWritable()); 810 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize); 811 812 // Check RMW operations first since both isRead() and 813 // isWrite() will be true for them 814 if (pkt->cmd == MemCmd::SwapReq) { 815 if (pkt->isAtomicOp()) { 816 // extract data from cache and save it into the data field in 817 // the packet as a return value from this atomic op 818 int offset = tags->extractBlkOffset(pkt->getAddr()); 819 uint8_t *blk_data = blk->data + offset; 820 pkt->setData(blk_data); 821 822 // execute AMO operation 823 (*(pkt->getAtomicOp()))(blk_data); 824 825 // set block status to dirty 826 blk->status |= BlkDirty; 827 } else { 828 cmpAndSwap(blk, pkt); 829 } 830 } else if (pkt->isWrite()) { 831 // we have the block in a writable state and can go ahead, 832 // note that the line may be also be considered writable in 833 // downstream caches along the path to memory, but always 834 // Exclusive, and never Modified 835 assert(blk->isWritable()); 836 // Write or WriteLine at the first cache with block in writable state 837 if (blk->checkWrite(pkt)) { 838 pkt->writeDataToBlock(blk->data, blkSize); 839 } 840 // Always mark the line as dirty (and thus transition to the 841 // Modified state) even if we are a failed StoreCond so we 842 // supply data to any snoops that have appended themselves to 843 // this cache before knowing the store will fail. 844 blk->status |= BlkDirty; 845 DPRINTF(CacheVerbose, "%s for %s (write)\n", __func__, pkt->print()); 846 } else if (pkt->isRead()) { 847 if (pkt->isLLSC()) { 848 blk->trackLoadLocked(pkt); 849 } 850 851 // all read responses have a data payload 852 assert(pkt->hasRespData()); 853 pkt->setDataFromBlock(blk->data, blkSize); 854 } else if (pkt->isUpgrade()) { 855 // sanity check 856 assert(!pkt->hasSharers()); 857 858 if (blk->isDirty()) { 859 // we were in the Owned state, and a cache above us that 860 // has the line in Shared state needs to be made aware 861 // that the data it already has is in fact dirty 862 pkt->setCacheResponding(); 863 blk->status &= ~BlkDirty; 864 } 865 } else if (pkt->isClean()) { 866 blk->status &= ~BlkDirty; 867 } else { 868 assert(pkt->isInvalidate()); 869 invalidateBlock(blk); 870 DPRINTF(CacheVerbose, "%s for %s (invalidation)\n", __func__, 871 pkt->print()); 872 } 873} 874 875///////////////////////////////////////////////////// 876// 877// Access path: requests coming in from the CPU side 878// 879///////////////////////////////////////////////////// 880Cycles 881BaseCache::calculateTagOnlyLatency(const uint32_t delay, 882 const Cycles lookup_lat) const 883{ 884 // A tag-only access has to wait for the packet to arrive in order to 885 // perform the tag lookup. 886 return ticksToCycles(delay) + lookup_lat; 887} 888 889Cycles 890BaseCache::calculateAccessLatency(const CacheBlk* blk, const uint32_t delay, 891 const Cycles lookup_lat) const 892{ 893 Cycles lat(0); 894 895 if (blk != nullptr) { 896 // As soon as the access arrives, for sequential accesses first access 897 // tags, then the data entry. In the case of parallel accesses the 898 // latency is dictated by the slowest of tag and data latencies. 899 if (sequentialAccess) { 900 lat = ticksToCycles(delay) + lookup_lat + dataLatency; 901 } else { 902 lat = ticksToCycles(delay) + std::max(lookup_lat, dataLatency); 903 } 904 905 // Check if the block to be accessed is available. If not, apply the 906 // access latency on top of when the block is ready to be accessed. 907 const Tick tick = curTick() + delay; 908 const Tick when_ready = blk->getWhenReady(); 909 if (when_ready > tick && 910 ticksToCycles(when_ready - tick) > lat) { 911 lat += ticksToCycles(when_ready - tick); 912 } 913 } else { 914 // In case of a miss, we neglect the data access in a parallel 915 // configuration (i.e., the data access will be stopped as soon as 916 // we find out it is a miss), and use the tag-only latency. 917 lat = calculateTagOnlyLatency(delay, lookup_lat); 918 } 919 920 return lat; 921} 922 923bool 924BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, 925 PacketList &writebacks) 926{ 927 // sanity check 928 assert(pkt->isRequest()); 929 930 chatty_assert(!(isReadOnly && pkt->isWrite()), 931 "Should never see a write in a read-only cache %s\n", 932 name()); 933 934 // Access block in the tags 935 Cycles tag_latency(0); 936 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), tag_latency); 937 938 DPRINTF(Cache, "%s for %s %s\n", __func__, pkt->print(), 939 blk ? "hit " + blk->print() : "miss"); 940 941 if (pkt->req->isCacheMaintenance()) { 942 // A cache maintenance operation is always forwarded to the 943 // memory below even if the block is found in dirty state. 944 945 // We defer any changes to the state of the block until we 946 // create and mark as in service the mshr for the downstream 947 // packet. 948 949 // Calculate access latency on top of when the packet arrives. This 950 // takes into account the bus delay. 951 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 952 953 return false; 954 } 955 956 if (pkt->isEviction()) { 957 // We check for presence of block in above caches before issuing 958 // Writeback or CleanEvict to write buffer. Therefore the only 959 // possible cases can be of a CleanEvict packet coming from above 960 // encountering a Writeback generated in this cache peer cache and 961 // waiting in the write buffer. Cases of upper level peer caches 962 // generating CleanEvict and Writeback or simply CleanEvict and 963 // CleanEvict almost simultaneously will be caught by snoops sent out 964 // by crossbar. 965 WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(), 966 pkt->isSecure()); 967 if (wb_entry) { 968 assert(wb_entry->getNumTargets() == 1); 969 PacketPtr wbPkt = wb_entry->getTarget()->pkt; 970 assert(wbPkt->isWriteback()); 971 972 if (pkt->isCleanEviction()) { 973 // The CleanEvict and WritebackClean snoops into other 974 // peer caches of the same level while traversing the 975 // crossbar. If a copy of the block is found, the 976 // packet is deleted in the crossbar. Hence, none of 977 // the other upper level caches connected to this 978 // cache have the block, so we can clear the 979 // BLOCK_CACHED flag in the Writeback if set and 980 // discard the CleanEvict by returning true. 981 wbPkt->clearBlockCached(); 982 983 // A clean evict does not need to access the data array 984 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 985 986 return true; 987 } else { 988 assert(pkt->cmd == MemCmd::WritebackDirty); 989 // Dirty writeback from above trumps our clean 990 // writeback... discard here 991 // Note: markInService will remove entry from writeback buffer. 992 markInService(wb_entry); 993 delete wbPkt; 994 } 995 } 996 } 997 998 // Writeback handling is special case. We can write the block into 999 // the cache without having a writeable copy (or any copy at all). 1000 if (pkt->isWriteback()) { 1001 assert(blkSize == pkt->getSize()); 1002 1003 // we could get a clean writeback while we are having 1004 // outstanding accesses to a block, do the simple thing for 1005 // now and drop the clean writeback so that we do not upset 1006 // any ordering/decisions about ownership already taken 1007 if (pkt->cmd == MemCmd::WritebackClean && 1008 mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) { 1009 DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, " 1010 "dropping\n", pkt->getAddr()); 1011 1012 // A writeback searches for the block, then writes the data. 1013 // As the writeback is being dropped, the data is not touched, 1014 // and we just had to wait for the time to find a match in the 1015 // MSHR. As of now assume a mshr queue search takes as long as 1016 // a tag lookup for simplicity. 1017 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 1018 1019 return true; 1020 } 1021 1022 if (!blk) { 1023 // need to do a replacement 1024 blk = allocateBlock(pkt, writebacks); 1025 if (!blk) { 1026 // no replaceable block available: give up, fwd to next level. 1027 incMissCount(pkt); 1028 1029 // A writeback searches for the block, then writes the data. 1030 // As the block could not be found, it was a tag-only access. 1031 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 1032 1033 return false; 1034 } 1035 1036 blk->status |= BlkReadable; 1037 } 1038 // only mark the block dirty if we got a writeback command, 1039 // and leave it as is for a clean writeback 1040 if (pkt->cmd == MemCmd::WritebackDirty) { 1041 // TODO: the coherent cache can assert(!blk->isDirty()); 1042 blk->status |= BlkDirty; 1043 } 1044 // if the packet does not have sharers, it is passing 1045 // writable, and we got the writeback in Modified or Exclusive 1046 // state, if not we are in the Owned or Shared state 1047 if (!pkt->hasSharers()) { 1048 blk->status |= BlkWritable; 1049 } 1050 // nothing else to do; writeback doesn't expect response 1051 assert(!pkt->needsResponse()); 1052 pkt->writeDataToBlock(blk->data, blkSize); 1053 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 1054 incHitCount(pkt); 1055 1056 // A writeback searches for the block, then writes the data 1057 lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency); 1058 1059 // When the packet metadata arrives, the tag lookup will be done while 1060 // the payload is arriving. Then the block will be ready to access as 1061 // soon as the fill is done 1062 blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay + 1063 std::max(cyclesToTicks(tag_latency), (uint64_t)pkt->payloadDelay)); 1064 1065 return true; 1066 } else if (pkt->cmd == MemCmd::CleanEvict) { 1067 // A CleanEvict does not need to access the data array 1068 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 1069 1070 if (blk) { 1071 // Found the block in the tags, need to stop CleanEvict from 1072 // propagating further down the hierarchy. Returning true will 1073 // treat the CleanEvict like a satisfied write request and delete 1074 // it. 1075 return true; 1076 } 1077 // We didn't find the block here, propagate the CleanEvict further 1078 // down the memory hierarchy. Returning false will treat the CleanEvict 1079 // like a Writeback which could not find a replaceable block so has to 1080 // go to next level. 1081 return false; 1082 } else if (pkt->cmd == MemCmd::WriteClean) { 1083 // WriteClean handling is a special case. We can allocate a 1084 // block directly if it doesn't exist and we can update the 1085 // block immediately. The WriteClean transfers the ownership 1086 // of the block as well. 1087 assert(blkSize == pkt->getSize()); 1088 1089 if (!blk) { 1090 if (pkt->writeThrough()) { 1091 // A writeback searches for the block, then writes the data. 1092 // As the block could not be found, it was a tag-only access. 1093 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 1094 1095 // if this is a write through packet, we don't try to 1096 // allocate if the block is not present 1097 return false; 1098 } else { 1099 // a writeback that misses needs to allocate a new block 1100 blk = allocateBlock(pkt, writebacks); 1101 if (!blk) { 1102 // no replaceable block available: give up, fwd to 1103 // next level. 1104 incMissCount(pkt); 1105 1106 // A writeback searches for the block, then writes the 1107 // data. As the block could not be found, it was a tag-only 1108 // access. 1109 lat = calculateTagOnlyLatency(pkt->headerDelay, 1110 tag_latency); 1111 1112 return false; 1113 } 1114 1115 blk->status |= BlkReadable; 1116 } 1117 } 1118 1119 // at this point either this is a writeback or a write-through 1120 // write clean operation and the block is already in this 1121 // cache, we need to update the data and the block flags 1122 assert(blk); 1123 // TODO: the coherent cache can assert(!blk->isDirty()); 1124 if (!pkt->writeThrough()) { 1125 blk->status |= BlkDirty; 1126 } 1127 // nothing else to do; writeback doesn't expect response 1128 assert(!pkt->needsResponse()); 1129 pkt->writeDataToBlock(blk->data, blkSize); 1130 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 1131 1132 incHitCount(pkt); 1133 1134 // A writeback searches for the block, then writes the data 1135 lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency); 1136 1137 // When the packet metadata arrives, the tag lookup will be done while 1138 // the payload is arriving. Then the block will be ready to access as 1139 // soon as the fill is done 1140 blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay + 1141 std::max(cyclesToTicks(tag_latency), (uint64_t)pkt->payloadDelay)); 1142 1143 // if this a write-through packet it will be sent to cache 1144 // below 1145 return !pkt->writeThrough(); 1146 } else if (blk && (pkt->needsWritable() ? blk->isWritable() : 1147 blk->isReadable())) { 1148 // OK to satisfy access 1149 incHitCount(pkt); 1150 1151 // Calculate access latency based on the need to access the data array 1152 if (pkt->isRead() || pkt->isWrite()) { 1153 lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency); 1154 } else { 1155 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 1156 } 1157 1158 satisfyRequest(pkt, blk); 1159 maintainClusivity(pkt->fromCache(), blk); 1160 1161 return true; 1162 } 1163 1164 // Can't satisfy access normally... either no block (blk == nullptr) 1165 // or have block but need writable 1166 1167 incMissCount(pkt); 1168 1169 lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency); 1170 1171 if (!blk && pkt->isLLSC() && pkt->isWrite()) { 1172 // complete miss on store conditional... just give up now 1173 pkt->req->setExtraData(0); 1174 return true; 1175 } 1176 1177 return false; 1178} 1179 1180void 1181BaseCache::maintainClusivity(bool from_cache, CacheBlk *blk) 1182{ 1183 if (from_cache && blk && blk->isValid() && !blk->isDirty() && 1184 clusivity == Enums::mostly_excl) { 1185 // if we have responded to a cache, and our block is still 1186 // valid, but not dirty, and this cache is mostly exclusive 1187 // with respect to the cache above, drop the block 1188 invalidateBlock(blk); 1189 } 1190} 1191 1192CacheBlk* 1193BaseCache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks, 1194 bool allocate) 1195{ 1196 assert(pkt->isResponse()); 1197 Addr addr = pkt->getAddr(); 1198 bool is_secure = pkt->isSecure(); 1199#if TRACING_ON 1200 CacheBlk::State old_state = blk ? blk->status : 0; 1201#endif 1202 1203 // When handling a fill, we should have no writes to this line. 1204 assert(addr == pkt->getBlockAddr(blkSize)); 1205 assert(!writeBuffer.findMatch(addr, is_secure)); 1206 1207 if (!blk) { 1208 // better have read new data... 1209 assert(pkt->hasData() || pkt->cmd == MemCmd::InvalidateResp); 1210 1211 // need to do a replacement if allocating, otherwise we stick 1212 // with the temporary storage 1213 blk = allocate ? allocateBlock(pkt, writebacks) : nullptr; 1214 1215 if (!blk) { 1216 // No replaceable block or a mostly exclusive 1217 // cache... just use temporary storage to complete the 1218 // current request and then get rid of it 1219 blk = tempBlock; 1220 tempBlock->insert(addr, is_secure); 1221 DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr, 1222 is_secure ? "s" : "ns"); 1223 } 1224 } else { 1225 // existing block... probably an upgrade 1226 // don't clear block status... if block is already dirty we 1227 // don't want to lose that 1228 } 1229 1230 // Block is guaranteed to be valid at this point 1231 assert(blk->isValid()); 1232 assert(blk->isSecure() == is_secure); 1233 assert(regenerateBlkAddr(blk) == addr); 1234 1235 blk->status |= BlkReadable; 1236 1237 // sanity check for whole-line writes, which should always be 1238 // marked as writable as part of the fill, and then later marked 1239 // dirty as part of satisfyRequest 1240 if (pkt->cmd == MemCmd::InvalidateResp) { 1241 assert(!pkt->hasSharers()); 1242 } 1243 1244 // here we deal with setting the appropriate state of the line, 1245 // and we start by looking at the hasSharers flag, and ignore the 1246 // cacheResponding flag (normally signalling dirty data) if the 1247 // packet has sharers, thus the line is never allocated as Owned 1248 // (dirty but not writable), and always ends up being either 1249 // Shared, Exclusive or Modified, see Packet::setCacheResponding 1250 // for more details 1251 if (!pkt->hasSharers()) { 1252 // we could get a writable line from memory (rather than a 1253 // cache) even in a read-only cache, note that we set this bit 1254 // even for a read-only cache, possibly revisit this decision 1255 blk->status |= BlkWritable; 1256 1257 // check if we got this via cache-to-cache transfer (i.e., from a 1258 // cache that had the block in Modified or Owned state) 1259 if (pkt->cacheResponding()) { 1260 // we got the block in Modified state, and invalidated the 1261 // owners copy 1262 blk->status |= BlkDirty; 1263 1264 chatty_assert(!isReadOnly, "Should never see dirty snoop response " 1265 "in read-only cache %s\n", name()); 1266 1267 } else if (pkt->cmd.isSWPrefetch() && pkt->needsWritable()) { 1268 // All other copies of the block were invalidated and we 1269 // have an exclusive copy. 1270 1271 // The coherence protocol assumes that if we fetched an 1272 // exclusive copy of the block, we have the intention to 1273 // modify it. Therefore the MSHR for the PrefetchExReq has 1274 // been the point of ordering and this cache has commited 1275 // to respond to snoops for the block. 1276 // 1277 // In most cases this is true anyway - a PrefetchExReq 1278 // will be followed by a WriteReq. However, if that 1279 // doesn't happen, the block is not marked as dirty and 1280 // the cache doesn't respond to snoops that has committed 1281 // to do so. 1282 // 1283 // To avoid deadlocks in cases where there is a snoop 1284 // between the PrefetchExReq and the expected WriteReq, we 1285 // proactively mark the block as Dirty. 1286 1287 blk->status |= BlkDirty; 1288 1289 panic_if(!isReadOnly, "Prefetch exclusive requests from read-only " 1290 "cache %s\n", name()); 1291 } 1292 } 1293 1294 DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n", 1295 addr, is_secure ? "s" : "ns", old_state, blk->print()); 1296 1297 // if we got new data, copy it in (checking for a read response 1298 // and a response that has data is the same in the end) 1299 if (pkt->isRead()) { 1300 // sanity checks 1301 assert(pkt->hasData()); 1302 assert(pkt->getSize() == blkSize); 1303 1304 pkt->writeDataToBlock(blk->data, blkSize); 1305 } 1306 // The block will be ready when the payload arrives and the fill is done 1307 blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay + 1308 pkt->payloadDelay); 1309 1310 return blk; 1311} 1312 1313CacheBlk* 1314BaseCache::allocateBlock(const PacketPtr pkt, PacketList &writebacks) 1315{ 1316 // Get address 1317 const Addr addr = pkt->getAddr(); 1318 1319 // Get secure bit 1320 const bool is_secure = pkt->isSecure(); 1321 1322 // Find replacement victim 1323 std::vector<CacheBlk*> evict_blks; 1324 CacheBlk *victim = tags->findVictim(addr, is_secure, evict_blks); 1325 1326 // It is valid to return nullptr if there is no victim 1327 if (!victim) 1328 return nullptr; 1329 1330 // Print victim block's information 1331 DPRINTF(CacheRepl, "Replacement victim: %s\n", victim->print()); 1332 1333 // Check for transient state allocations. If any of the entries listed 1334 // for eviction has a transient state, the allocation fails 1335 bool replacement = false; 1336 for (const auto& blk : evict_blks) { 1337 if (blk->isValid()) { 1338 replacement = true; 1339 1340 Addr repl_addr = regenerateBlkAddr(blk); 1341 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure()); 1342 if (repl_mshr) { 1343 // must be an outstanding upgrade or clean request 1344 // on a block we're about to replace... 1345 assert((!blk->isWritable() && repl_mshr->needsWritable()) || 1346 repl_mshr->isCleaning()); 1347 1348 // too hard to replace block with transient state 1349 // allocation failed, block not inserted 1350 return nullptr; 1351 } 1352 } 1353 } 1354 1355 // The victim will be replaced by a new entry, so increase the replacement 1356 // counter if a valid block is being replaced 1357 if (replacement) { 1358 // Evict valid blocks associated to this victim block 1359 for (const auto& blk : evict_blks) { 1360 if (blk->isValid()) { 1361 DPRINTF(CacheRepl, "Evicting %s (%#llx) to make room for " \ 1362 "%#llx (%s)\n", blk->print(), regenerateBlkAddr(blk), 1363 addr, is_secure); 1364 1365 if (blk->wasPrefetched()) { 1366 unusedPrefetches++; 1367 } 1368 1369 evictBlock(blk, writebacks); 1370 } 1371 } 1372 1373 replacements++; 1374 } 1375 1376 // Insert new block at victimized entry 1377 tags->insertBlock(pkt, victim); 1378 1379 return victim; 1380} 1381 1382void 1383BaseCache::invalidateBlock(CacheBlk *blk) 1384{ 1385 // If handling a block present in the Tags, let it do its invalidation 1386 // process, which will update stats and invalidate the block itself 1387 if (blk != tempBlock) { 1388 tags->invalidate(blk); 1389 } else { 1390 tempBlock->invalidate(); 1391 } 1392} 1393 1394void 1395BaseCache::evictBlock(CacheBlk *blk, PacketList &writebacks) 1396{ 1397 PacketPtr pkt = evictBlock(blk); 1398 if (pkt) { 1399 writebacks.push_back(pkt); 1400 } 1401} 1402 1403PacketPtr 1404BaseCache::writebackBlk(CacheBlk *blk) 1405{ 1406 chatty_assert(!isReadOnly || writebackClean, 1407 "Writeback from read-only cache"); 1408 assert(blk && blk->isValid() && (blk->isDirty() || writebackClean)); 1409 1410 writebacks[Request::wbMasterId]++; 1411 1412 RequestPtr req = std::make_shared<Request>( 1413 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId); 1414 1415 if (blk->isSecure()) 1416 req->setFlags(Request::SECURE); 1417 1418 req->taskId(blk->task_id); 1419 1420 PacketPtr pkt = 1421 new Packet(req, blk->isDirty() ? 1422 MemCmd::WritebackDirty : MemCmd::WritebackClean); 1423 1424 DPRINTF(Cache, "Create Writeback %s writable: %d, dirty: %d\n", 1425 pkt->print(), blk->isWritable(), blk->isDirty()); 1426 1427 if (blk->isWritable()) { 1428 // not asserting shared means we pass the block in modified 1429 // state, mark our own block non-writeable 1430 blk->status &= ~BlkWritable; 1431 } else { 1432 // we are in the Owned state, tell the receiver 1433 pkt->setHasSharers(); 1434 } 1435 1436 // make sure the block is not marked dirty 1437 blk->status &= ~BlkDirty; 1438 1439 pkt->allocate(); 1440 pkt->setDataFromBlock(blk->data, blkSize); 1441 1442 return pkt; 1443} 1444 1445PacketPtr 1446BaseCache::writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id) 1447{ 1448 RequestPtr req = std::make_shared<Request>( 1449 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId); 1450 1451 if (blk->isSecure()) { 1452 req->setFlags(Request::SECURE); 1453 } 1454 req->taskId(blk->task_id); 1455 1456 PacketPtr pkt = new Packet(req, MemCmd::WriteClean, blkSize, id); 1457 1458 if (dest) { 1459 req->setFlags(dest); 1460 pkt->setWriteThrough(); 1461 } 1462 1463 DPRINTF(Cache, "Create %s writable: %d, dirty: %d\n", pkt->print(), 1464 blk->isWritable(), blk->isDirty()); 1465 1466 if (blk->isWritable()) { 1467 // not asserting shared means we pass the block in modified 1468 // state, mark our own block non-writeable 1469 blk->status &= ~BlkWritable; 1470 } else { 1471 // we are in the Owned state, tell the receiver 1472 pkt->setHasSharers(); 1473 } 1474 1475 // make sure the block is not marked dirty 1476 blk->status &= ~BlkDirty; 1477 1478 pkt->allocate(); 1479 pkt->setDataFromBlock(blk->data, blkSize); 1480 1481 return pkt; 1482} 1483 1484 1485void 1486BaseCache::memWriteback() 1487{ 1488 tags->forEachBlk([this](CacheBlk &blk) { writebackVisitor(blk); }); 1489} 1490 1491void 1492BaseCache::memInvalidate() 1493{ 1494 tags->forEachBlk([this](CacheBlk &blk) { invalidateVisitor(blk); }); 1495} 1496 1497bool 1498BaseCache::isDirty() const 1499{ 1500 return tags->anyBlk([](CacheBlk &blk) { return blk.isDirty(); }); 1501} 1502 1503bool 1504BaseCache::coalesce() const 1505{ 1506 return writeAllocator && writeAllocator->coalesce(); 1507} 1508 1509void 1510BaseCache::writebackVisitor(CacheBlk &blk) 1511{ 1512 if (blk.isDirty()) { 1513 assert(blk.isValid()); 1514 1515 RequestPtr request = std::make_shared<Request>( 1516 regenerateBlkAddr(&blk), blkSize, 0, Request::funcMasterId); 1517 1518 request->taskId(blk.task_id); 1519 if (blk.isSecure()) { 1520 request->setFlags(Request::SECURE); 1521 } 1522 1523 Packet packet(request, MemCmd::WriteReq); 1524 packet.dataStatic(blk.data); 1525 1526 memSidePort.sendFunctional(&packet); 1527 1528 blk.status &= ~BlkDirty; 1529 } 1530} 1531 1532void 1533BaseCache::invalidateVisitor(CacheBlk &blk) 1534{ 1535 if (blk.isDirty()) 1536 warn_once("Invalidating dirty cache lines. " \ 1537 "Expect things to break.\n"); 1538 1539 if (blk.isValid()) { 1540 assert(!blk.isDirty()); 1541 invalidateBlock(&blk); 1542 } 1543} 1544 1545Tick 1546BaseCache::nextQueueReadyTime() const 1547{ 1548 Tick nextReady = std::min(mshrQueue.nextReadyTime(), 1549 writeBuffer.nextReadyTime()); 1550 1551 // Don't signal prefetch ready time if no MSHRs available 1552 // Will signal once enoguh MSHRs are deallocated 1553 if (prefetcher && mshrQueue.canPrefetch()) { 1554 nextReady = std::min(nextReady, 1555 prefetcher->nextPrefetchReadyTime()); 1556 } 1557 1558 return nextReady; 1559} 1560 1561 1562bool 1563BaseCache::sendMSHRQueuePacket(MSHR* mshr) 1564{ 1565 assert(mshr); 1566 1567 // use request from 1st target 1568 PacketPtr tgt_pkt = mshr->getTarget()->pkt; 1569 1570 DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print()); 1571 1572 // if the cache is in write coalescing mode or (additionally) in 1573 // no allocation mode, and we have a write packet with an MSHR 1574 // that is not a whole-line write (due to incompatible flags etc), 1575 // then reset the write mode 1576 if (writeAllocator && writeAllocator->coalesce() && tgt_pkt->isWrite()) { 1577 if (!mshr->isWholeLineWrite()) { 1578 // if we are currently write coalescing, hold on the 1579 // MSHR as many cycles extra as we need to completely 1580 // write a cache line 1581 if (writeAllocator->delay(mshr->blkAddr)) { 1582 Tick delay = blkSize / tgt_pkt->getSize() * clockPeriod(); 1583 DPRINTF(CacheVerbose, "Delaying pkt %s %llu ticks to allow " 1584 "for write coalescing\n", tgt_pkt->print(), delay); 1585 mshrQueue.delay(mshr, delay); 1586 return false; 1587 } else { 1588 writeAllocator->reset(); 1589 } 1590 } else { 1591 writeAllocator->resetDelay(mshr->blkAddr); 1592 } 1593 } 1594 1595 CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure); 1596 1597 // either a prefetch that is not present upstream, or a normal 1598 // MSHR request, proceed to get the packet to send downstream 1599 PacketPtr pkt = createMissPacket(tgt_pkt, blk, mshr->needsWritable(), 1600 mshr->isWholeLineWrite()); 1601 1602 mshr->isForward = (pkt == nullptr); 1603 1604 if (mshr->isForward) { 1605 // not a cache block request, but a response is expected 1606 // make copy of current packet to forward, keep current 1607 // copy for response handling 1608 pkt = new Packet(tgt_pkt, false, true); 1609 assert(!pkt->isWrite()); 1610 } 1611 1612 // play it safe and append (rather than set) the sender state, 1613 // as forwarded packets may already have existing state 1614 pkt->pushSenderState(mshr); 1615 1616 if (pkt->isClean() && blk && blk->isDirty()) { 1617 // A cache clean opearation is looking for a dirty block. Mark 1618 // the packet so that the destination xbar can determine that 1619 // there will be a follow-up write packet as well. 1620 pkt->setSatisfied(); 1621 } 1622 1623 if (!memSidePort.sendTimingReq(pkt)) { 1624 // we are awaiting a retry, but we 1625 // delete the packet and will be creating a new packet 1626 // when we get the opportunity 1627 delete pkt; 1628 1629 // note that we have now masked any requestBus and 1630 // schedSendEvent (we will wait for a retry before 1631 // doing anything), and this is so even if we do not 1632 // care about this packet and might override it before 1633 // it gets retried 1634 return true; 1635 } else { 1636 // As part of the call to sendTimingReq the packet is 1637 // forwarded to all neighbouring caches (and any caches 1638 // above them) as a snoop. Thus at this point we know if 1639 // any of the neighbouring caches are responding, and if 1640 // so, we know it is dirty, and we can determine if it is 1641 // being passed as Modified, making our MSHR the ordering 1642 // point 1643 bool pending_modified_resp = !pkt->hasSharers() && 1644 pkt->cacheResponding(); 1645 markInService(mshr, pending_modified_resp); 1646 1647 if (pkt->isClean() && blk && blk->isDirty()) { 1648 // A cache clean opearation is looking for a dirty 1649 // block. If a dirty block is encountered a WriteClean 1650 // will update any copies to the path to the memory 1651 // until the point of reference. 1652 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n", 1653 __func__, pkt->print(), blk->print()); 1654 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), 1655 pkt->id); 1656 PacketList writebacks; 1657 writebacks.push_back(wb_pkt); 1658 doWritebacks(writebacks, 0); 1659 } 1660 1661 return false; 1662 } 1663} 1664 1665bool 1666BaseCache::sendWriteQueuePacket(WriteQueueEntry* wq_entry) 1667{ 1668 assert(wq_entry); 1669 1670 // always a single target for write queue entries 1671 PacketPtr tgt_pkt = wq_entry->getTarget()->pkt; 1672 1673 DPRINTF(Cache, "%s: write %s\n", __func__, tgt_pkt->print()); 1674 1675 // forward as is, both for evictions and uncacheable writes 1676 if (!memSidePort.sendTimingReq(tgt_pkt)) { 1677 // note that we have now masked any requestBus and 1678 // schedSendEvent (we will wait for a retry before 1679 // doing anything), and this is so even if we do not 1680 // care about this packet and might override it before 1681 // it gets retried 1682 return true; 1683 } else { 1684 markInService(wq_entry); 1685 return false; 1686 } 1687} 1688 1689void 1690BaseCache::serialize(CheckpointOut &cp) const 1691{ 1692 bool dirty(isDirty()); 1693 1694 if (dirty) { 1695 warn("*** The cache still contains dirty data. ***\n"); 1696 warn(" Make sure to drain the system using the correct flags.\n"); 1697 warn(" This checkpoint will not restore correctly " \ 1698 "and dirty data in the cache will be lost!\n"); 1699 } 1700 1701 // Since we don't checkpoint the data in the cache, any dirty data 1702 // will be lost when restoring from a checkpoint of a system that 1703 // wasn't drained properly. Flag the checkpoint as invalid if the 1704 // cache contains dirty data. 1705 bool bad_checkpoint(dirty); 1706 SERIALIZE_SCALAR(bad_checkpoint); 1707} 1708 1709void 1710BaseCache::unserialize(CheckpointIn &cp) 1711{ 1712 bool bad_checkpoint; 1713 UNSERIALIZE_SCALAR(bad_checkpoint); 1714 if (bad_checkpoint) { 1715 fatal("Restoring from checkpoints with dirty caches is not " 1716 "supported in the classic memory system. Please remove any " 1717 "caches or drain them properly before taking checkpoints.\n"); 1718 } 1719} 1720 1721void 1722BaseCache::regStats() 1723{ 1724 ClockedObject::regStats(); 1725 1726 using namespace Stats; 1727 1728 // Hit statistics 1729 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1730 MemCmd cmd(access_idx); 1731 const string &cstr = cmd.toString(); 1732 1733 hits[access_idx] 1734 .init(system->maxMasters()) 1735 .name(name() + "." + cstr + "_hits") 1736 .desc("number of " + cstr + " hits") 1737 .flags(total | nozero | nonan) 1738 ; 1739 for (int i = 0; i < system->maxMasters(); i++) { 1740 hits[access_idx].subname(i, system->getMasterName(i)); 1741 } 1742 } 1743 1744// These macros make it easier to sum the right subset of commands and 1745// to change the subset of commands that are considered "demand" vs 1746// "non-demand" 1747#define SUM_DEMAND(s) \ 1748 (s[MemCmd::ReadReq] + s[MemCmd::WriteReq] + s[MemCmd::WriteLineReq] + \ 1749 s[MemCmd::ReadExReq] + s[MemCmd::ReadCleanReq] + s[MemCmd::ReadSharedReq]) 1750 1751// should writebacks be included here? prior code was inconsistent... 1752#define SUM_NON_DEMAND(s) \ 1753 (s[MemCmd::SoftPFReq] + s[MemCmd::HardPFReq] + s[MemCmd::SoftPFExReq]) 1754 1755 demandHits 1756 .name(name() + ".demand_hits") 1757 .desc("number of demand (read+write) hits") 1758 .flags(total | nozero | nonan) 1759 ; 1760 demandHits = SUM_DEMAND(hits); 1761 for (int i = 0; i < system->maxMasters(); i++) { 1762 demandHits.subname(i, system->getMasterName(i)); 1763 } 1764 1765 overallHits 1766 .name(name() + ".overall_hits") 1767 .desc("number of overall hits") 1768 .flags(total | nozero | nonan) 1769 ; 1770 overallHits = demandHits + SUM_NON_DEMAND(hits); 1771 for (int i = 0; i < system->maxMasters(); i++) { 1772 overallHits.subname(i, system->getMasterName(i)); 1773 } 1774 1775 // Miss statistics 1776 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1777 MemCmd cmd(access_idx); 1778 const string &cstr = cmd.toString(); 1779 1780 misses[access_idx] 1781 .init(system->maxMasters()) 1782 .name(name() + "." + cstr + "_misses") 1783 .desc("number of " + cstr + " misses") 1784 .flags(total | nozero | nonan) 1785 ; 1786 for (int i = 0; i < system->maxMasters(); i++) { 1787 misses[access_idx].subname(i, system->getMasterName(i)); 1788 } 1789 } 1790 1791 demandMisses 1792 .name(name() + ".demand_misses") 1793 .desc("number of demand (read+write) misses") 1794 .flags(total | nozero | nonan) 1795 ; 1796 demandMisses = SUM_DEMAND(misses); 1797 for (int i = 0; i < system->maxMasters(); i++) { 1798 demandMisses.subname(i, system->getMasterName(i)); 1799 } 1800 1801 overallMisses 1802 .name(name() + ".overall_misses") 1803 .desc("number of overall misses") 1804 .flags(total | nozero | nonan) 1805 ; 1806 overallMisses = demandMisses + SUM_NON_DEMAND(misses); 1807 for (int i = 0; i < system->maxMasters(); i++) { 1808 overallMisses.subname(i, system->getMasterName(i)); 1809 } 1810 1811 // Miss latency statistics 1812 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1813 MemCmd cmd(access_idx); 1814 const string &cstr = cmd.toString(); 1815 1816 missLatency[access_idx] 1817 .init(system->maxMasters()) 1818 .name(name() + "." + cstr + "_miss_latency") 1819 .desc("number of " + cstr + " miss cycles") 1820 .flags(total | nozero | nonan) 1821 ; 1822 for (int i = 0; i < system->maxMasters(); i++) { 1823 missLatency[access_idx].subname(i, system->getMasterName(i)); 1824 } 1825 } 1826 1827 demandMissLatency 1828 .name(name() + ".demand_miss_latency") 1829 .desc("number of demand (read+write) miss cycles") 1830 .flags(total | nozero | nonan) 1831 ; 1832 demandMissLatency = SUM_DEMAND(missLatency); 1833 for (int i = 0; i < system->maxMasters(); i++) { 1834 demandMissLatency.subname(i, system->getMasterName(i)); 1835 } 1836 1837 overallMissLatency 1838 .name(name() + ".overall_miss_latency") 1839 .desc("number of overall miss cycles") 1840 .flags(total | nozero | nonan) 1841 ; 1842 overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency); 1843 for (int i = 0; i < system->maxMasters(); i++) { 1844 overallMissLatency.subname(i, system->getMasterName(i)); 1845 } 1846 1847 // access formulas 1848 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1849 MemCmd cmd(access_idx); 1850 const string &cstr = cmd.toString(); 1851 1852 accesses[access_idx] 1853 .name(name() + "." + cstr + "_accesses") 1854 .desc("number of " + cstr + " accesses(hits+misses)") 1855 .flags(total | nozero | nonan) 1856 ; 1857 accesses[access_idx] = hits[access_idx] + misses[access_idx]; 1858 1859 for (int i = 0; i < system->maxMasters(); i++) { 1860 accesses[access_idx].subname(i, system->getMasterName(i)); 1861 } 1862 } 1863 1864 demandAccesses 1865 .name(name() + ".demand_accesses") 1866 .desc("number of demand (read+write) accesses") 1867 .flags(total | nozero | nonan) 1868 ; 1869 demandAccesses = demandHits + demandMisses; 1870 for (int i = 0; i < system->maxMasters(); i++) { 1871 demandAccesses.subname(i, system->getMasterName(i)); 1872 } 1873 1874 overallAccesses 1875 .name(name() + ".overall_accesses") 1876 .desc("number of overall (read+write) accesses") 1877 .flags(total | nozero | nonan) 1878 ; 1879 overallAccesses = overallHits + overallMisses; 1880 for (int i = 0; i < system->maxMasters(); i++) { 1881 overallAccesses.subname(i, system->getMasterName(i)); 1882 } 1883 1884 // miss rate formulas 1885 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1886 MemCmd cmd(access_idx); 1887 const string &cstr = cmd.toString(); 1888 1889 missRate[access_idx] 1890 .name(name() + "." + cstr + "_miss_rate") 1891 .desc("miss rate for " + cstr + " accesses") 1892 .flags(total | nozero | nonan) 1893 ; 1894 missRate[access_idx] = misses[access_idx] / accesses[access_idx]; 1895 1896 for (int i = 0; i < system->maxMasters(); i++) { 1897 missRate[access_idx].subname(i, system->getMasterName(i)); 1898 } 1899 } 1900 1901 demandMissRate 1902 .name(name() + ".demand_miss_rate") 1903 .desc("miss rate for demand accesses") 1904 .flags(total | nozero | nonan) 1905 ; 1906 demandMissRate = demandMisses / demandAccesses; 1907 for (int i = 0; i < system->maxMasters(); i++) { 1908 demandMissRate.subname(i, system->getMasterName(i)); 1909 } 1910 1911 overallMissRate 1912 .name(name() + ".overall_miss_rate") 1913 .desc("miss rate for overall accesses") 1914 .flags(total | nozero | nonan) 1915 ; 1916 overallMissRate = overallMisses / overallAccesses; 1917 for (int i = 0; i < system->maxMasters(); i++) { 1918 overallMissRate.subname(i, system->getMasterName(i)); 1919 } 1920 1921 // miss latency formulas 1922 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1923 MemCmd cmd(access_idx); 1924 const string &cstr = cmd.toString(); 1925 1926 avgMissLatency[access_idx] 1927 .name(name() + "." + cstr + "_avg_miss_latency") 1928 .desc("average " + cstr + " miss latency") 1929 .flags(total | nozero | nonan) 1930 ; 1931 avgMissLatency[access_idx] = 1932 missLatency[access_idx] / misses[access_idx]; 1933 1934 for (int i = 0; i < system->maxMasters(); i++) { 1935 avgMissLatency[access_idx].subname(i, system->getMasterName(i)); 1936 } 1937 } 1938 1939 demandAvgMissLatency 1940 .name(name() + ".demand_avg_miss_latency") 1941 .desc("average overall miss latency") 1942 .flags(total | nozero | nonan) 1943 ; 1944 demandAvgMissLatency = demandMissLatency / demandMisses; 1945 for (int i = 0; i < system->maxMasters(); i++) { 1946 demandAvgMissLatency.subname(i, system->getMasterName(i)); 1947 } 1948 1949 overallAvgMissLatency 1950 .name(name() + ".overall_avg_miss_latency") 1951 .desc("average overall miss latency") 1952 .flags(total | nozero | nonan) 1953 ; 1954 overallAvgMissLatency = overallMissLatency / overallMisses; 1955 for (int i = 0; i < system->maxMasters(); i++) { 1956 overallAvgMissLatency.subname(i, system->getMasterName(i)); 1957 } 1958 1959 blocked_cycles.init(NUM_BLOCKED_CAUSES); 1960 blocked_cycles 1961 .name(name() + ".blocked_cycles") 1962 .desc("number of cycles access was blocked") 1963 .subname(Blocked_NoMSHRs, "no_mshrs") 1964 .subname(Blocked_NoTargets, "no_targets") 1965 ; 1966 1967 1968 blocked_causes.init(NUM_BLOCKED_CAUSES); 1969 blocked_causes 1970 .name(name() + ".blocked") 1971 .desc("number of cycles access was blocked") 1972 .subname(Blocked_NoMSHRs, "no_mshrs") 1973 .subname(Blocked_NoTargets, "no_targets") 1974 ; 1975 1976 avg_blocked 1977 .name(name() + ".avg_blocked_cycles") 1978 .desc("average number of cycles each access was blocked") 1979 .subname(Blocked_NoMSHRs, "no_mshrs") 1980 .subname(Blocked_NoTargets, "no_targets") 1981 ; 1982 1983 avg_blocked = blocked_cycles / blocked_causes; 1984 1985 unusedPrefetches 1986 .name(name() + ".unused_prefetches") 1987 .desc("number of HardPF blocks evicted w/o reference") 1988 .flags(nozero) 1989 ; 1990 1991 writebacks 1992 .init(system->maxMasters()) 1993 .name(name() + ".writebacks") 1994 .desc("number of writebacks") 1995 .flags(total | nozero | nonan) 1996 ; 1997 for (int i = 0; i < system->maxMasters(); i++) { 1998 writebacks.subname(i, system->getMasterName(i)); 1999 } 2000 2001 // MSHR statistics 2002 // MSHR hit statistics 2003 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2004 MemCmd cmd(access_idx); 2005 const string &cstr = cmd.toString(); 2006 2007 mshr_hits[access_idx] 2008 .init(system->maxMasters()) 2009 .name(name() + "." + cstr + "_mshr_hits") 2010 .desc("number of " + cstr + " MSHR hits") 2011 .flags(total | nozero | nonan) 2012 ; 2013 for (int i = 0; i < system->maxMasters(); i++) { 2014 mshr_hits[access_idx].subname(i, system->getMasterName(i)); 2015 } 2016 } 2017 2018 demandMshrHits 2019 .name(name() + ".demand_mshr_hits") 2020 .desc("number of demand (read+write) MSHR hits") 2021 .flags(total | nozero | nonan) 2022 ; 2023 demandMshrHits = SUM_DEMAND(mshr_hits); 2024 for (int i = 0; i < system->maxMasters(); i++) { 2025 demandMshrHits.subname(i, system->getMasterName(i)); 2026 } 2027 2028 overallMshrHits 2029 .name(name() + ".overall_mshr_hits") 2030 .desc("number of overall MSHR hits") 2031 .flags(total | nozero | nonan) 2032 ; 2033 overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshr_hits); 2034 for (int i = 0; i < system->maxMasters(); i++) { 2035 overallMshrHits.subname(i, system->getMasterName(i)); 2036 } 2037 2038 // MSHR miss statistics 2039 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2040 MemCmd cmd(access_idx); 2041 const string &cstr = cmd.toString(); 2042 2043 mshr_misses[access_idx] 2044 .init(system->maxMasters()) 2045 .name(name() + "." + cstr + "_mshr_misses") 2046 .desc("number of " + cstr + " MSHR misses") 2047 .flags(total | nozero | nonan) 2048 ; 2049 for (int i = 0; i < system->maxMasters(); i++) { 2050 mshr_misses[access_idx].subname(i, system->getMasterName(i)); 2051 } 2052 } 2053 2054 demandMshrMisses 2055 .name(name() + ".demand_mshr_misses") 2056 .desc("number of demand (read+write) MSHR misses") 2057 .flags(total | nozero | nonan) 2058 ; 2059 demandMshrMisses = SUM_DEMAND(mshr_misses); 2060 for (int i = 0; i < system->maxMasters(); i++) { 2061 demandMshrMisses.subname(i, system->getMasterName(i)); 2062 } 2063 2064 overallMshrMisses 2065 .name(name() + ".overall_mshr_misses") 2066 .desc("number of overall MSHR misses") 2067 .flags(total | nozero | nonan) 2068 ; 2069 overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshr_misses); 2070 for (int i = 0; i < system->maxMasters(); i++) { 2071 overallMshrMisses.subname(i, system->getMasterName(i)); 2072 } 2073 2074 // MSHR miss latency statistics 2075 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2076 MemCmd cmd(access_idx); 2077 const string &cstr = cmd.toString(); 2078 2079 mshr_miss_latency[access_idx] 2080 .init(system->maxMasters()) 2081 .name(name() + "." + cstr + "_mshr_miss_latency") 2082 .desc("number of " + cstr + " MSHR miss cycles") 2083 .flags(total | nozero | nonan) 2084 ; 2085 for (int i = 0; i < system->maxMasters(); i++) { 2086 mshr_miss_latency[access_idx].subname(i, system->getMasterName(i)); 2087 } 2088 } 2089 2090 demandMshrMissLatency 2091 .name(name() + ".demand_mshr_miss_latency") 2092 .desc("number of demand (read+write) MSHR miss cycles") 2093 .flags(total | nozero | nonan) 2094 ; 2095 demandMshrMissLatency = SUM_DEMAND(mshr_miss_latency); 2096 for (int i = 0; i < system->maxMasters(); i++) { 2097 demandMshrMissLatency.subname(i, system->getMasterName(i)); 2098 } 2099 2100 overallMshrMissLatency 2101 .name(name() + ".overall_mshr_miss_latency") 2102 .desc("number of overall MSHR miss cycles") 2103 .flags(total | nozero | nonan) 2104 ; 2105 overallMshrMissLatency = 2106 demandMshrMissLatency + SUM_NON_DEMAND(mshr_miss_latency); 2107 for (int i = 0; i < system->maxMasters(); i++) { 2108 overallMshrMissLatency.subname(i, system->getMasterName(i)); 2109 } 2110 2111 // MSHR uncacheable statistics 2112 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2113 MemCmd cmd(access_idx); 2114 const string &cstr = cmd.toString(); 2115 2116 mshr_uncacheable[access_idx] 2117 .init(system->maxMasters()) 2118 .name(name() + "." + cstr + "_mshr_uncacheable") 2119 .desc("number of " + cstr + " MSHR uncacheable") 2120 .flags(total | nozero | nonan) 2121 ; 2122 for (int i = 0; i < system->maxMasters(); i++) { 2123 mshr_uncacheable[access_idx].subname(i, system->getMasterName(i)); 2124 } 2125 } 2126 2127 overallMshrUncacheable 2128 .name(name() + ".overall_mshr_uncacheable_misses") 2129 .desc("number of overall MSHR uncacheable misses") 2130 .flags(total | nozero | nonan) 2131 ; 2132 overallMshrUncacheable = 2133 SUM_DEMAND(mshr_uncacheable) + SUM_NON_DEMAND(mshr_uncacheable); 2134 for (int i = 0; i < system->maxMasters(); i++) { 2135 overallMshrUncacheable.subname(i, system->getMasterName(i)); 2136 } 2137 2138 // MSHR miss latency statistics 2139 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2140 MemCmd cmd(access_idx); 2141 const string &cstr = cmd.toString(); 2142 2143 mshr_uncacheable_lat[access_idx] 2144 .init(system->maxMasters()) 2145 .name(name() + "." + cstr + "_mshr_uncacheable_latency") 2146 .desc("number of " + cstr + " MSHR uncacheable cycles") 2147 .flags(total | nozero | nonan) 2148 ; 2149 for (int i = 0; i < system->maxMasters(); i++) { 2150 mshr_uncacheable_lat[access_idx].subname( 2151 i, system->getMasterName(i)); 2152 } 2153 } 2154 2155 overallMshrUncacheableLatency 2156 .name(name() + ".overall_mshr_uncacheable_latency") 2157 .desc("number of overall MSHR uncacheable cycles") 2158 .flags(total | nozero | nonan) 2159 ; 2160 overallMshrUncacheableLatency = 2161 SUM_DEMAND(mshr_uncacheable_lat) + 2162 SUM_NON_DEMAND(mshr_uncacheable_lat); 2163 for (int i = 0; i < system->maxMasters(); i++) { 2164 overallMshrUncacheableLatency.subname(i, system->getMasterName(i)); 2165 } 2166 2167#if 0 2168 // MSHR access formulas 2169 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2170 MemCmd cmd(access_idx); 2171 const string &cstr = cmd.toString(); 2172 2173 mshrAccesses[access_idx] 2174 .name(name() + "." + cstr + "_mshr_accesses") 2175 .desc("number of " + cstr + " mshr accesses(hits+misses)") 2176 .flags(total | nozero | nonan) 2177 ; 2178 mshrAccesses[access_idx] = 2179 mshr_hits[access_idx] + mshr_misses[access_idx] 2180 + mshr_uncacheable[access_idx]; 2181 } 2182 2183 demandMshrAccesses 2184 .name(name() + ".demand_mshr_accesses") 2185 .desc("number of demand (read+write) mshr accesses") 2186 .flags(total | nozero | nonan) 2187 ; 2188 demandMshrAccesses = demandMshrHits + demandMshrMisses; 2189 2190 overallMshrAccesses 2191 .name(name() + ".overall_mshr_accesses") 2192 .desc("number of overall (read+write) mshr accesses") 2193 .flags(total | nozero | nonan) 2194 ; 2195 overallMshrAccesses = overallMshrHits + overallMshrMisses 2196 + overallMshrUncacheable; 2197#endif 2198 2199 // MSHR miss rate formulas 2200 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2201 MemCmd cmd(access_idx); 2202 const string &cstr = cmd.toString(); 2203 2204 mshrMissRate[access_idx] 2205 .name(name() + "." + cstr + "_mshr_miss_rate") 2206 .desc("mshr miss rate for " + cstr + " accesses") 2207 .flags(total | nozero | nonan) 2208 ; 2209 mshrMissRate[access_idx] = 2210 mshr_misses[access_idx] / accesses[access_idx]; 2211 2212 for (int i = 0; i < system->maxMasters(); i++) { 2213 mshrMissRate[access_idx].subname(i, system->getMasterName(i)); 2214 } 2215 } 2216 2217 demandMshrMissRate 2218 .name(name() + ".demand_mshr_miss_rate") 2219 .desc("mshr miss rate for demand accesses") 2220 .flags(total | nozero | nonan) 2221 ; 2222 demandMshrMissRate = demandMshrMisses / demandAccesses; 2223 for (int i = 0; i < system->maxMasters(); i++) { 2224 demandMshrMissRate.subname(i, system->getMasterName(i)); 2225 } 2226 2227 overallMshrMissRate 2228 .name(name() + ".overall_mshr_miss_rate") 2229 .desc("mshr miss rate for overall accesses") 2230 .flags(total | nozero | nonan) 2231 ; 2232 overallMshrMissRate = overallMshrMisses / overallAccesses; 2233 for (int i = 0; i < system->maxMasters(); i++) { 2234 overallMshrMissRate.subname(i, system->getMasterName(i)); 2235 } 2236 2237 // mshrMiss latency formulas 2238 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2239 MemCmd cmd(access_idx); 2240 const string &cstr = cmd.toString(); 2241 2242 avgMshrMissLatency[access_idx] 2243 .name(name() + "." + cstr + "_avg_mshr_miss_latency") 2244 .desc("average " + cstr + " mshr miss latency") 2245 .flags(total | nozero | nonan) 2246 ; 2247 avgMshrMissLatency[access_idx] = 2248 mshr_miss_latency[access_idx] / mshr_misses[access_idx]; 2249 2250 for (int i = 0; i < system->maxMasters(); i++) { 2251 avgMshrMissLatency[access_idx].subname( 2252 i, system->getMasterName(i)); 2253 } 2254 } 2255 2256 demandAvgMshrMissLatency 2257 .name(name() + ".demand_avg_mshr_miss_latency") 2258 .desc("average overall mshr miss latency") 2259 .flags(total | nozero | nonan) 2260 ; 2261 demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses; 2262 for (int i = 0; i < system->maxMasters(); i++) { 2263 demandAvgMshrMissLatency.subname(i, system->getMasterName(i)); 2264 } 2265 2266 overallAvgMshrMissLatency 2267 .name(name() + ".overall_avg_mshr_miss_latency") 2268 .desc("average overall mshr miss latency") 2269 .flags(total | nozero | nonan) 2270 ; 2271 overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses; 2272 for (int i = 0; i < system->maxMasters(); i++) { 2273 overallAvgMshrMissLatency.subname(i, system->getMasterName(i)); 2274 } 2275 2276 // mshrUncacheable latency formulas 2277 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2278 MemCmd cmd(access_idx); 2279 const string &cstr = cmd.toString(); 2280 2281 avgMshrUncacheableLatency[access_idx] 2282 .name(name() + "." + cstr + "_avg_mshr_uncacheable_latency") 2283 .desc("average " + cstr + " mshr uncacheable latency") 2284 .flags(total | nozero | nonan) 2285 ; 2286 avgMshrUncacheableLatency[access_idx] = 2287 mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx]; 2288 2289 for (int i = 0; i < system->maxMasters(); i++) { 2290 avgMshrUncacheableLatency[access_idx].subname( 2291 i, system->getMasterName(i)); 2292 } 2293 } 2294 2295 overallAvgMshrUncacheableLatency 2296 .name(name() + ".overall_avg_mshr_uncacheable_latency") 2297 .desc("average overall mshr uncacheable latency") 2298 .flags(total | nozero | nonan) 2299 ; 2300 overallAvgMshrUncacheableLatency = 2301 overallMshrUncacheableLatency / overallMshrUncacheable; 2302 for (int i = 0; i < system->maxMasters(); i++) { 2303 overallAvgMshrUncacheableLatency.subname(i, system->getMasterName(i)); 2304 } 2305 2306 replacements 2307 .name(name() + ".replacements") 2308 .desc("number of replacements") 2309 ; 2310} 2311 2312void 2313BaseCache::regProbePoints() 2314{ 2315 ppHit = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Hit"); 2316 ppMiss = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Miss"); 2317 ppFill = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Fill"); 2318} 2319 2320/////////////// 2321// 2322// CpuSidePort 2323// 2324/////////////// 2325bool 2326BaseCache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt) 2327{ 2328 // Snoops shouldn't happen when bypassing caches 2329 assert(!cache->system->bypassCaches()); 2330 2331 assert(pkt->isResponse()); 2332 2333 // Express snoop responses from master to slave, e.g., from L1 to L2 2334 cache->recvTimingSnoopResp(pkt); 2335 return true; 2336} 2337 2338 2339bool 2340BaseCache::CpuSidePort::tryTiming(PacketPtr pkt) 2341{ 2342 if (cache->system->bypassCaches() || pkt->isExpressSnoop()) { 2343 // always let express snoop packets through even if blocked 2344 return true; 2345 } else if (blocked || mustSendRetry) { 2346 // either already committed to send a retry, or blocked 2347 mustSendRetry = true; 2348 return false; 2349 } 2350 mustSendRetry = false; 2351 return true; 2352} 2353 2354bool 2355BaseCache::CpuSidePort::recvTimingReq(PacketPtr pkt) 2356{ 2357 assert(pkt->isRequest()); 2358 2359 if (cache->system->bypassCaches()) { 2360 // Just forward the packet if caches are disabled. 2361 // @todo This should really enqueue the packet rather 2362 bool M5_VAR_USED success = cache->memSidePort.sendTimingReq(pkt); 2363 assert(success); 2364 return true; 2365 } else if (tryTiming(pkt)) { 2366 cache->recvTimingReq(pkt); 2367 return true; 2368 } 2369 return false; 2370} 2371 2372Tick 2373BaseCache::CpuSidePort::recvAtomic(PacketPtr pkt) 2374{ 2375 if (cache->system->bypassCaches()) { 2376 // Forward the request if the system is in cache bypass mode. 2377 return cache->memSidePort.sendAtomic(pkt); 2378 } else { 2379 return cache->recvAtomic(pkt); 2380 } 2381} 2382 2383void 2384BaseCache::CpuSidePort::recvFunctional(PacketPtr pkt) 2385{ 2386 if (cache->system->bypassCaches()) { 2387 // The cache should be flushed if we are in cache bypass mode, 2388 // so we don't need to check if we need to update anything. 2389 cache->memSidePort.sendFunctional(pkt); 2390 return; 2391 } 2392 2393 // functional request 2394 cache->functionalAccess(pkt, true); 2395} 2396 2397AddrRangeList 2398BaseCache::CpuSidePort::getAddrRanges() const 2399{ 2400 return cache->getAddrRanges(); 2401} 2402 2403 2404BaseCache:: 2405CpuSidePort::CpuSidePort(const std::string &_name, BaseCache *_cache, 2406 const std::string &_label) 2407 : CacheSlavePort(_name, _cache, _label), cache(_cache) 2408{ 2409} 2410 2411/////////////// 2412// 2413// MemSidePort 2414// 2415/////////////// 2416bool 2417BaseCache::MemSidePort::recvTimingResp(PacketPtr pkt) 2418{ 2419 cache->recvTimingResp(pkt); 2420 return true; 2421} 2422 2423// Express snooping requests to memside port 2424void 2425BaseCache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt) 2426{ 2427 // Snoops shouldn't happen when bypassing caches 2428 assert(!cache->system->bypassCaches()); 2429 2430 // handle snooping requests 2431 cache->recvTimingSnoopReq(pkt); 2432} 2433 2434Tick 2435BaseCache::MemSidePort::recvAtomicSnoop(PacketPtr pkt) 2436{ 2437 // Snoops shouldn't happen when bypassing caches 2438 assert(!cache->system->bypassCaches()); 2439 2440 return cache->recvAtomicSnoop(pkt); 2441} 2442 2443void 2444BaseCache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt) 2445{ 2446 // Snoops shouldn't happen when bypassing caches 2447 assert(!cache->system->bypassCaches()); 2448 2449 // functional snoop (note that in contrast to atomic we don't have 2450 // a specific functionalSnoop method, as they have the same 2451 // behaviour regardless) 2452 cache->functionalAccess(pkt, false); 2453} 2454 2455void 2456BaseCache::CacheReqPacketQueue::sendDeferredPacket() 2457{ 2458 // sanity check 2459 assert(!waitingOnRetry); 2460 2461 // there should never be any deferred request packets in the 2462 // queue, instead we resly on the cache to provide the packets 2463 // from the MSHR queue or write queue 2464 assert(deferredPacketReadyTime() == MaxTick); 2465 2466 // check for request packets (requests & writebacks) 2467 QueueEntry* entry = cache.getNextQueueEntry(); 2468 2469 if (!entry) { 2470 // can happen if e.g. we attempt a writeback and fail, but 2471 // before the retry, the writeback is eliminated because 2472 // we snoop another cache's ReadEx. 2473 } else { 2474 // let our snoop responses go first if there are responses to 2475 // the same addresses 2476 if (checkConflictingSnoop(entry->getTarget()->pkt)) { 2477 return; 2478 } 2479 waitingOnRetry = entry->sendPacket(cache); 2480 } 2481 2482 // if we succeeded and are not waiting for a retry, schedule the 2483 // next send considering when the next queue is ready, note that 2484 // snoop responses have their own packet queue and thus schedule 2485 // their own events 2486 if (!waitingOnRetry) { 2487 schedSendEvent(cache.nextQueueReadyTime()); 2488 } 2489} 2490 2491BaseCache::MemSidePort::MemSidePort(const std::string &_name, 2492 BaseCache *_cache, 2493 const std::string &_label) 2494 : CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue), 2495 _reqQueue(*_cache, *this, _snoopRespQueue, _label), 2496 _snoopRespQueue(*_cache, *this, true, _label), cache(_cache) 2497{ 2498} 2499 2500void 2501WriteAllocator::updateMode(Addr write_addr, unsigned write_size, 2502 Addr blk_addr) 2503{ 2504 // check if we are continuing where the last write ended 2505 if (nextAddr == write_addr) { 2506 delayCtr[blk_addr] = delayThreshold; 2507 // stop if we have already saturated 2508 if (mode != WriteMode::NO_ALLOCATE) { 2509 byteCount += write_size; 2510 // switch to streaming mode if we have passed the lower 2511 // threshold 2512 if (mode == WriteMode::ALLOCATE && 2513 byteCount > coalesceLimit) { 2514 mode = WriteMode::COALESCE; 2515 DPRINTF(Cache, "Switched to write coalescing\n"); 2516 } else if (mode == WriteMode::COALESCE && 2517 byteCount > noAllocateLimit) { 2518 // and continue and switch to non-allocating mode if we 2519 // pass the upper threshold 2520 mode = WriteMode::NO_ALLOCATE; 2521 DPRINTF(Cache, "Switched to write-no-allocate\n"); 2522 } 2523 } 2524 } else { 2525 // we did not see a write matching the previous one, start 2526 // over again 2527 byteCount = write_size; 2528 mode = WriteMode::ALLOCATE; 2529 resetDelay(blk_addr); 2530 } 2531 nextAddr = write_addr + write_size; 2532} 2533 2534WriteAllocator* 2535WriteAllocatorParams::create() 2536{ 2537 return new WriteAllocator(this); 2538} 2539