base.cc revision 13866
1/* 2 * Copyright (c) 2012-2013, 2018 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2003-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Erik Hallnor 41 * Nikos Nikoleris 42 */ 43 44/** 45 * @file 46 * Definition of BaseCache functions. 47 */ 48 49#include "mem/cache/base.hh" 50 51#include "base/compiler.hh" 52#include "base/logging.hh" 53#include "debug/Cache.hh" 54#include "debug/CachePort.hh" 55#include "debug/CacheRepl.hh" 56#include "debug/CacheVerbose.hh" 57#include "mem/cache/mshr.hh" 58#include "mem/cache/prefetch/base.hh" 59#include "mem/cache/queue_entry.hh" 60#include "params/BaseCache.hh" 61#include "params/WriteAllocator.hh" 62#include "sim/core.hh" 63 64class BaseMasterPort; 65class BaseSlavePort; 66 67using namespace std; 68 69BaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name, 70 BaseCache *_cache, 71 const std::string &_label) 72 : QueuedSlavePort(_name, _cache, queue), 73 queue(*_cache, *this, true, _label), 74 blocked(false), mustSendRetry(false), 75 sendRetryEvent([this]{ processSendRetry(); }, _name) 76{ 77} 78 79BaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size) 80 : MemObject(p), 81 cpuSidePort (p->name + ".cpu_side", this, "CpuSidePort"), 82 memSidePort(p->name + ".mem_side", this, "MemSidePort"), 83 mshrQueue("MSHRs", p->mshrs, 0, p->demand_mshr_reserve), // see below 84 writeBuffer("write buffer", p->write_buffers, p->mshrs), // see below 85 tags(p->tags), 86 prefetcher(p->prefetcher), 87 writeAllocator(p->write_allocator), 88 writebackClean(p->writeback_clean), 89 tempBlockWriteback(nullptr), 90 writebackTempBlockAtomicEvent([this]{ writebackTempBlockAtomic(); }, 91 name(), false, 92 EventBase::Delayed_Writeback_Pri), 93 blkSize(blk_size), 94 lookupLatency(p->tag_latency), 95 dataLatency(p->data_latency), 96 forwardLatency(p->tag_latency), 97 fillLatency(p->data_latency), 98 responseLatency(p->response_latency), 99 sequentialAccess(p->sequential_access), 100 numTarget(p->tgts_per_mshr), 101 forwardSnoops(true), 102 clusivity(p->clusivity), 103 isReadOnly(p->is_read_only), 104 blocked(0), 105 order(0), 106 noTargetMSHR(nullptr), 107 missCount(p->max_miss_count), 108 addrRanges(p->addr_ranges.begin(), p->addr_ranges.end()), 109 system(p->system) 110{ 111 // the MSHR queue has no reserve entries as we check the MSHR 112 // queue on every single allocation, whereas the write queue has 113 // as many reserve entries as we have MSHRs, since every MSHR may 114 // eventually require a writeback, and we do not check the write 115 // buffer before committing to an MSHR 116 117 // forward snoops is overridden in init() once we can query 118 // whether the connected master is actually snooping or not 119 120 tempBlock = new TempCacheBlk(blkSize); 121 122 tags->tagsInit(); 123 if (prefetcher) 124 prefetcher->setCache(this); 125} 126 127BaseCache::~BaseCache() 128{ 129 delete tempBlock; 130} 131 132void 133BaseCache::CacheSlavePort::setBlocked() 134{ 135 assert(!blocked); 136 DPRINTF(CachePort, "Port is blocking new requests\n"); 137 blocked = true; 138 // if we already scheduled a retry in this cycle, but it has not yet 139 // happened, cancel it 140 if (sendRetryEvent.scheduled()) { 141 owner.deschedule(sendRetryEvent); 142 DPRINTF(CachePort, "Port descheduled retry\n"); 143 mustSendRetry = true; 144 } 145} 146 147void 148BaseCache::CacheSlavePort::clearBlocked() 149{ 150 assert(blocked); 151 DPRINTF(CachePort, "Port is accepting new requests\n"); 152 blocked = false; 153 if (mustSendRetry) { 154 // @TODO: need to find a better time (next cycle?) 155 owner.schedule(sendRetryEvent, curTick() + 1); 156 } 157} 158 159void 160BaseCache::CacheSlavePort::processSendRetry() 161{ 162 DPRINTF(CachePort, "Port is sending retry\n"); 163 164 // reset the flag and call retry 165 mustSendRetry = false; 166 sendRetryReq(); 167} 168 169Addr 170BaseCache::regenerateBlkAddr(CacheBlk* blk) 171{ 172 if (blk != tempBlock) { 173 return tags->regenerateBlkAddr(blk); 174 } else { 175 return tempBlock->getAddr(); 176 } 177} 178 179void 180BaseCache::init() 181{ 182 if (!cpuSidePort.isConnected() || !memSidePort.isConnected()) 183 fatal("Cache ports on %s are not connected\n", name()); 184 cpuSidePort.sendRangeChange(); 185 forwardSnoops = cpuSidePort.isSnooping(); 186} 187 188Port & 189BaseCache::getPort(const std::string &if_name, PortID idx) 190{ 191 if (if_name == "mem_side") { 192 return memSidePort; 193 } else if (if_name == "cpu_side") { 194 return cpuSidePort; 195 } else { 196 return MemObject::getPort(if_name, idx); 197 } 198} 199 200bool 201BaseCache::inRange(Addr addr) const 202{ 203 for (const auto& r : addrRanges) { 204 if (r.contains(addr)) { 205 return true; 206 } 207 } 208 return false; 209} 210 211void 212BaseCache::handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time) 213{ 214 if (pkt->needsResponse()) { 215 // These delays should have been consumed by now 216 assert(pkt->headerDelay == 0); 217 assert(pkt->payloadDelay == 0); 218 219 pkt->makeTimingResponse(); 220 221 // In this case we are considering request_time that takes 222 // into account the delay of the xbar, if any, and just 223 // lat, neglecting responseLatency, modelling hit latency 224 // just as the value of lat overriden by access(), which calls 225 // the calculateAccessLatency() function. 226 cpuSidePort.schedTimingResp(pkt, request_time); 227 } else { 228 DPRINTF(Cache, "%s satisfied %s, no response needed\n", __func__, 229 pkt->print()); 230 231 // queue the packet for deletion, as the sending cache is 232 // still relying on it; if the block is found in access(), 233 // CleanEvict and Writeback messages will be deleted 234 // here as well 235 pendingDelete.reset(pkt); 236 } 237} 238 239void 240BaseCache::handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk, 241 Tick forward_time, Tick request_time) 242{ 243 if (writeAllocator && 244 pkt && pkt->isWrite() && !pkt->req->isUncacheable()) { 245 writeAllocator->updateMode(pkt->getAddr(), pkt->getSize(), 246 pkt->getBlockAddr(blkSize)); 247 } 248 249 if (mshr) { 250 /// MSHR hit 251 /// @note writebacks will be checked in getNextMSHR() 252 /// for any conflicting requests to the same block 253 254 //@todo remove hw_pf here 255 256 // Coalesce unless it was a software prefetch (see above). 257 if (pkt) { 258 assert(!pkt->isWriteback()); 259 // CleanEvicts corresponding to blocks which have 260 // outstanding requests in MSHRs are simply sunk here 261 if (pkt->cmd == MemCmd::CleanEvict) { 262 pendingDelete.reset(pkt); 263 } else if (pkt->cmd == MemCmd::WriteClean) { 264 // A WriteClean should never coalesce with any 265 // outstanding cache maintenance requests. 266 267 // We use forward_time here because there is an 268 // uncached memory write, forwarded to WriteBuffer. 269 allocateWriteBuffer(pkt, forward_time); 270 } else { 271 DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__, 272 pkt->print()); 273 274 assert(pkt->req->masterId() < system->maxMasters()); 275 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++; 276 277 // We use forward_time here because it is the same 278 // considering new targets. We have multiple 279 // requests for the same address here. It 280 // specifies the latency to allocate an internal 281 // buffer and to schedule an event to the queued 282 // port and also takes into account the additional 283 // delay of the xbar. 284 mshr->allocateTarget(pkt, forward_time, order++, 285 allocOnFill(pkt->cmd)); 286 if (mshr->getNumTargets() == numTarget) { 287 noTargetMSHR = mshr; 288 setBlocked(Blocked_NoTargets); 289 // need to be careful with this... if this mshr isn't 290 // ready yet (i.e. time > curTick()), we don't want to 291 // move it ahead of mshrs that are ready 292 // mshrQueue.moveToFront(mshr); 293 } 294 } 295 } 296 } else { 297 // no MSHR 298 assert(pkt->req->masterId() < system->maxMasters()); 299 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 300 301 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean) { 302 // We use forward_time here because there is an 303 // writeback or writeclean, forwarded to WriteBuffer. 304 allocateWriteBuffer(pkt, forward_time); 305 } else { 306 if (blk && blk->isValid()) { 307 // If we have a write miss to a valid block, we 308 // need to mark the block non-readable. Otherwise 309 // if we allow reads while there's an outstanding 310 // write miss, the read could return stale data 311 // out of the cache block... a more aggressive 312 // system could detect the overlap (if any) and 313 // forward data out of the MSHRs, but we don't do 314 // that yet. Note that we do need to leave the 315 // block valid so that it stays in the cache, in 316 // case we get an upgrade response (and hence no 317 // new data) when the write miss completes. 318 // As long as CPUs do proper store/load forwarding 319 // internally, and have a sufficiently weak memory 320 // model, this is probably unnecessary, but at some 321 // point it must have seemed like we needed it... 322 assert((pkt->needsWritable() && !blk->isWritable()) || 323 pkt->req->isCacheMaintenance()); 324 blk->status &= ~BlkReadable; 325 } 326 // Here we are using forward_time, modelling the latency of 327 // a miss (outbound) just as forwardLatency, neglecting the 328 // lookupLatency component. 329 allocateMissBuffer(pkt, forward_time); 330 } 331 } 332} 333 334void 335BaseCache::recvTimingReq(PacketPtr pkt) 336{ 337 // anything that is merely forwarded pays for the forward latency and 338 // the delay provided by the crossbar 339 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 340 341 Cycles lat; 342 CacheBlk *blk = nullptr; 343 bool satisfied = false; 344 { 345 PacketList writebacks; 346 // Note that lat is passed by reference here. The function 347 // access() will set the lat value. 348 satisfied = access(pkt, blk, lat, writebacks); 349 350 // After the evicted blocks are selected, they must be forwarded 351 // to the write buffer to ensure they logically precede anything 352 // happening below 353 doWritebacks(writebacks, clockEdge(lat + forwardLatency)); 354 } 355 356 // Here we charge the headerDelay that takes into account the latencies 357 // of the bus, if the packet comes from it. 358 // The latency charged is just the value set by the access() function. 359 // In case of a hit we are neglecting response latency. 360 // In case of a miss we are neglecting forward latency. 361 Tick request_time = clockEdge(lat); 362 // Here we reset the timing of the packet. 363 pkt->headerDelay = pkt->payloadDelay = 0; 364 365 if (satisfied) { 366 // notify before anything else as later handleTimingReqHit might turn 367 // the packet in a response 368 ppHit->notify(pkt); 369 370 if (prefetcher && blk && blk->wasPrefetched()) { 371 blk->status &= ~BlkHWPrefetched; 372 } 373 374 handleTimingReqHit(pkt, blk, request_time); 375 } else { 376 handleTimingReqMiss(pkt, blk, forward_time, request_time); 377 378 ppMiss->notify(pkt); 379 } 380 381 if (prefetcher) { 382 // track time of availability of next prefetch, if any 383 Tick next_pf_time = prefetcher->nextPrefetchReadyTime(); 384 if (next_pf_time != MaxTick) { 385 schedMemSideSendEvent(next_pf_time); 386 } 387 } 388} 389 390void 391BaseCache::handleUncacheableWriteResp(PacketPtr pkt) 392{ 393 Tick completion_time = clockEdge(responseLatency) + 394 pkt->headerDelay + pkt->payloadDelay; 395 396 // Reset the bus additional time as it is now accounted for 397 pkt->headerDelay = pkt->payloadDelay = 0; 398 399 cpuSidePort.schedTimingResp(pkt, completion_time); 400} 401 402void 403BaseCache::recvTimingResp(PacketPtr pkt) 404{ 405 assert(pkt->isResponse()); 406 407 // all header delay should be paid for by the crossbar, unless 408 // this is a prefetch response from above 409 panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp, 410 "%s saw a non-zero packet delay\n", name()); 411 412 const bool is_error = pkt->isError(); 413 414 if (is_error) { 415 DPRINTF(Cache, "%s: Cache received %s with error\n", __func__, 416 pkt->print()); 417 } 418 419 DPRINTF(Cache, "%s: Handling response %s\n", __func__, 420 pkt->print()); 421 422 // if this is a write, we should be looking at an uncacheable 423 // write 424 if (pkt->isWrite()) { 425 assert(pkt->req->isUncacheable()); 426 handleUncacheableWriteResp(pkt); 427 return; 428 } 429 430 // we have dealt with any (uncacheable) writes above, from here on 431 // we know we are dealing with an MSHR due to a miss or a prefetch 432 MSHR *mshr = dynamic_cast<MSHR*>(pkt->popSenderState()); 433 assert(mshr); 434 435 if (mshr == noTargetMSHR) { 436 // we always clear at least one target 437 clearBlocked(Blocked_NoTargets); 438 noTargetMSHR = nullptr; 439 } 440 441 // Initial target is used just for stats 442 QueueEntry::Target *initial_tgt = mshr->getTarget(); 443 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex(); 444 Tick miss_latency = curTick() - initial_tgt->recvTime; 445 446 if (pkt->req->isUncacheable()) { 447 assert(pkt->req->masterId() < system->maxMasters()); 448 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] += 449 miss_latency; 450 } else { 451 assert(pkt->req->masterId() < system->maxMasters()); 452 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] += 453 miss_latency; 454 } 455 456 PacketList writebacks; 457 458 bool is_fill = !mshr->isForward && 459 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp || 460 mshr->wasWholeLineWrite); 461 462 // make sure that if the mshr was due to a whole line write then 463 // the response is an invalidation 464 assert(!mshr->wasWholeLineWrite || pkt->isInvalidate()); 465 466 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 467 468 if (is_fill && !is_error) { 469 DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n", 470 pkt->getAddr()); 471 472 const bool allocate = (writeAllocator && mshr->wasWholeLineWrite) ? 473 writeAllocator->allocate() : mshr->allocOnFill(); 474 blk = handleFill(pkt, blk, writebacks, allocate); 475 assert(blk != nullptr); 476 ppFill->notify(pkt); 477 } 478 479 if (blk && blk->isValid() && pkt->isClean() && !pkt->isInvalidate()) { 480 // The block was marked not readable while there was a pending 481 // cache maintenance operation, restore its flag. 482 blk->status |= BlkReadable; 483 484 // This was a cache clean operation (without invalidate) 485 // and we have a copy of the block already. Since there 486 // is no invalidation, we can promote targets that don't 487 // require a writable copy 488 mshr->promoteReadable(); 489 } 490 491 if (blk && blk->isWritable() && !pkt->req->isCacheInvalidate()) { 492 // If at this point the referenced block is writable and the 493 // response is not a cache invalidate, we promote targets that 494 // were deferred as we couldn't guarrantee a writable copy 495 mshr->promoteWritable(); 496 } 497 498 serviceMSHRTargets(mshr, pkt, blk); 499 500 if (mshr->promoteDeferredTargets()) { 501 // avoid later read getting stale data while write miss is 502 // outstanding.. see comment in timingAccess() 503 if (blk) { 504 blk->status &= ~BlkReadable; 505 } 506 mshrQueue.markPending(mshr); 507 schedMemSideSendEvent(clockEdge() + pkt->payloadDelay); 508 } else { 509 // while we deallocate an mshr from the queue we still have to 510 // check the isFull condition before and after as we might 511 // have been using the reserved entries already 512 const bool was_full = mshrQueue.isFull(); 513 mshrQueue.deallocate(mshr); 514 if (was_full && !mshrQueue.isFull()) { 515 clearBlocked(Blocked_NoMSHRs); 516 } 517 518 // Request the bus for a prefetch if this deallocation freed enough 519 // MSHRs for a prefetch to take place 520 if (prefetcher && mshrQueue.canPrefetch()) { 521 Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(), 522 clockEdge()); 523 if (next_pf_time != MaxTick) 524 schedMemSideSendEvent(next_pf_time); 525 } 526 } 527 528 // if we used temp block, check to see if its valid and then clear it out 529 if (blk == tempBlock && tempBlock->isValid()) { 530 evictBlock(blk, writebacks); 531 } 532 533 const Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 534 // copy writebacks to write buffer 535 doWritebacks(writebacks, forward_time); 536 537 DPRINTF(CacheVerbose, "%s: Leaving with %s\n", __func__, pkt->print()); 538 delete pkt; 539} 540 541 542Tick 543BaseCache::recvAtomic(PacketPtr pkt) 544{ 545 // should assert here that there are no outstanding MSHRs or 546 // writebacks... that would mean that someone used an atomic 547 // access in timing mode 548 549 // We use lookupLatency here because it is used to specify the latency 550 // to access. 551 Cycles lat = lookupLatency; 552 553 CacheBlk *blk = nullptr; 554 PacketList writebacks; 555 bool satisfied = access(pkt, blk, lat, writebacks); 556 557 if (pkt->isClean() && blk && blk->isDirty()) { 558 // A cache clean opearation is looking for a dirty 559 // block. If a dirty block is encountered a WriteClean 560 // will update any copies to the path to the memory 561 // until the point of reference. 562 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n", 563 __func__, pkt->print(), blk->print()); 564 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id); 565 writebacks.push_back(wb_pkt); 566 pkt->setSatisfied(); 567 } 568 569 // handle writebacks resulting from the access here to ensure they 570 // logically precede anything happening below 571 doWritebacksAtomic(writebacks); 572 assert(writebacks.empty()); 573 574 if (!satisfied) { 575 lat += handleAtomicReqMiss(pkt, blk, writebacks); 576 } 577 578 // Note that we don't invoke the prefetcher at all in atomic mode. 579 // It's not clear how to do it properly, particularly for 580 // prefetchers that aggressively generate prefetch candidates and 581 // rely on bandwidth contention to throttle them; these will tend 582 // to pollute the cache in atomic mode since there is no bandwidth 583 // contention. If we ever do want to enable prefetching in atomic 584 // mode, though, this is the place to do it... see timingAccess() 585 // for an example (though we'd want to issue the prefetch(es) 586 // immediately rather than calling requestMemSideBus() as we do 587 // there). 588 589 // do any writebacks resulting from the response handling 590 doWritebacksAtomic(writebacks); 591 592 // if we used temp block, check to see if its valid and if so 593 // clear it out, but only do so after the call to recvAtomic is 594 // finished so that any downstream observers (such as a snoop 595 // filter), first see the fill, and only then see the eviction 596 if (blk == tempBlock && tempBlock->isValid()) { 597 // the atomic CPU calls recvAtomic for fetch and load/store 598 // sequentuially, and we may already have a tempBlock 599 // writeback from the fetch that we have not yet sent 600 if (tempBlockWriteback) { 601 // if that is the case, write the prevoius one back, and 602 // do not schedule any new event 603 writebackTempBlockAtomic(); 604 } else { 605 // the writeback/clean eviction happens after the call to 606 // recvAtomic has finished (but before any successive 607 // calls), so that the response handling from the fill is 608 // allowed to happen first 609 schedule(writebackTempBlockAtomicEvent, curTick()); 610 } 611 612 tempBlockWriteback = evictBlock(blk); 613 } 614 615 if (pkt->needsResponse()) { 616 pkt->makeAtomicResponse(); 617 } 618 619 return lat * clockPeriod(); 620} 621 622void 623BaseCache::functionalAccess(PacketPtr pkt, bool from_cpu_side) 624{ 625 Addr blk_addr = pkt->getBlockAddr(blkSize); 626 bool is_secure = pkt->isSecure(); 627 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 628 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 629 630 pkt->pushLabel(name()); 631 632 CacheBlkPrintWrapper cbpw(blk); 633 634 // Note that just because an L2/L3 has valid data doesn't mean an 635 // L1 doesn't have a more up-to-date modified copy that still 636 // needs to be found. As a result we always update the request if 637 // we have it, but only declare it satisfied if we are the owner. 638 639 // see if we have data at all (owned or otherwise) 640 bool have_data = blk && blk->isValid() 641 && pkt->trySatisfyFunctional(&cbpw, blk_addr, is_secure, blkSize, 642 blk->data); 643 644 // data we have is dirty if marked as such or if we have an 645 // in-service MSHR that is pending a modified line 646 bool have_dirty = 647 have_data && (blk->isDirty() || 648 (mshr && mshr->inService && mshr->isPendingModified())); 649 650 bool done = have_dirty || 651 cpuSidePort.trySatisfyFunctional(pkt) || 652 mshrQueue.trySatisfyFunctional(pkt) || 653 writeBuffer.trySatisfyFunctional(pkt) || 654 memSidePort.trySatisfyFunctional(pkt); 655 656 DPRINTF(CacheVerbose, "%s: %s %s%s%s\n", __func__, pkt->print(), 657 (blk && blk->isValid()) ? "valid " : "", 658 have_data ? "data " : "", done ? "done " : ""); 659 660 // We're leaving the cache, so pop cache->name() label 661 pkt->popLabel(); 662 663 if (done) { 664 pkt->makeResponse(); 665 } else { 666 // if it came as a request from the CPU side then make sure it 667 // continues towards the memory side 668 if (from_cpu_side) { 669 memSidePort.sendFunctional(pkt); 670 } else if (cpuSidePort.isSnooping()) { 671 // if it came from the memory side, it must be a snoop request 672 // and we should only forward it if we are forwarding snoops 673 cpuSidePort.sendFunctionalSnoop(pkt); 674 } 675 } 676} 677 678 679void 680BaseCache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt) 681{ 682 assert(pkt->isRequest()); 683 684 uint64_t overwrite_val; 685 bool overwrite_mem; 686 uint64_t condition_val64; 687 uint32_t condition_val32; 688 689 int offset = pkt->getOffset(blkSize); 690 uint8_t *blk_data = blk->data + offset; 691 692 assert(sizeof(uint64_t) >= pkt->getSize()); 693 694 overwrite_mem = true; 695 // keep a copy of our possible write value, and copy what is at the 696 // memory address into the packet 697 pkt->writeData((uint8_t *)&overwrite_val); 698 pkt->setData(blk_data); 699 700 if (pkt->req->isCondSwap()) { 701 if (pkt->getSize() == sizeof(uint64_t)) { 702 condition_val64 = pkt->req->getExtraData(); 703 overwrite_mem = !std::memcmp(&condition_val64, blk_data, 704 sizeof(uint64_t)); 705 } else if (pkt->getSize() == sizeof(uint32_t)) { 706 condition_val32 = (uint32_t)pkt->req->getExtraData(); 707 overwrite_mem = !std::memcmp(&condition_val32, blk_data, 708 sizeof(uint32_t)); 709 } else 710 panic("Invalid size for conditional read/write\n"); 711 } 712 713 if (overwrite_mem) { 714 std::memcpy(blk_data, &overwrite_val, pkt->getSize()); 715 blk->status |= BlkDirty; 716 } 717} 718 719QueueEntry* 720BaseCache::getNextQueueEntry() 721{ 722 // Check both MSHR queue and write buffer for potential requests, 723 // note that null does not mean there is no request, it could 724 // simply be that it is not ready 725 MSHR *miss_mshr = mshrQueue.getNext(); 726 WriteQueueEntry *wq_entry = writeBuffer.getNext(); 727 728 // If we got a write buffer request ready, first priority is a 729 // full write buffer, otherwise we favour the miss requests 730 if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) { 731 // need to search MSHR queue for conflicting earlier miss. 732 MSHR *conflict_mshr = mshrQueue.findPending(wq_entry); 733 734 if (conflict_mshr && conflict_mshr->order < wq_entry->order) { 735 // Service misses in order until conflict is cleared. 736 return conflict_mshr; 737 738 // @todo Note that we ignore the ready time of the conflict here 739 } 740 741 // No conflicts; issue write 742 return wq_entry; 743 } else if (miss_mshr) { 744 // need to check for conflicting earlier writeback 745 WriteQueueEntry *conflict_mshr = writeBuffer.findPending(miss_mshr); 746 if (conflict_mshr) { 747 // not sure why we don't check order here... it was in the 748 // original code but commented out. 749 750 // The only way this happens is if we are 751 // doing a write and we didn't have permissions 752 // then subsequently saw a writeback (owned got evicted) 753 // We need to make sure to perform the writeback first 754 // To preserve the dirty data, then we can issue the write 755 756 // should we return wq_entry here instead? I.e. do we 757 // have to flush writes in order? I don't think so... not 758 // for Alpha anyway. Maybe for x86? 759 return conflict_mshr; 760 761 // @todo Note that we ignore the ready time of the conflict here 762 } 763 764 // No conflicts; issue read 765 return miss_mshr; 766 } 767 768 // fall through... no pending requests. Try a prefetch. 769 assert(!miss_mshr && !wq_entry); 770 if (prefetcher && mshrQueue.canPrefetch()) { 771 // If we have a miss queue slot, we can try a prefetch 772 PacketPtr pkt = prefetcher->getPacket(); 773 if (pkt) { 774 Addr pf_addr = pkt->getBlockAddr(blkSize); 775 if (!tags->findBlock(pf_addr, pkt->isSecure()) && 776 !mshrQueue.findMatch(pf_addr, pkt->isSecure()) && 777 !writeBuffer.findMatch(pf_addr, pkt->isSecure())) { 778 // Update statistic on number of prefetches issued 779 // (hwpf_mshr_misses) 780 assert(pkt->req->masterId() < system->maxMasters()); 781 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 782 783 // allocate an MSHR and return it, note 784 // that we send the packet straight away, so do not 785 // schedule the send 786 return allocateMissBuffer(pkt, curTick(), false); 787 } else { 788 // free the request and packet 789 delete pkt; 790 } 791 } 792 } 793 794 return nullptr; 795} 796 797void 798BaseCache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool, bool) 799{ 800 assert(pkt->isRequest()); 801 802 assert(blk && blk->isValid()); 803 // Occasionally this is not true... if we are a lower-level cache 804 // satisfying a string of Read and ReadEx requests from 805 // upper-level caches, a Read will mark the block as shared but we 806 // can satisfy a following ReadEx anyway since we can rely on the 807 // Read requester(s) to have buffered the ReadEx snoop and to 808 // invalidate their blocks after receiving them. 809 // assert(!pkt->needsWritable() || blk->isWritable()); 810 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize); 811 812 // Check RMW operations first since both isRead() and 813 // isWrite() will be true for them 814 if (pkt->cmd == MemCmd::SwapReq) { 815 if (pkt->isAtomicOp()) { 816 // extract data from cache and save it into the data field in 817 // the packet as a return value from this atomic op 818 int offset = tags->extractBlkOffset(pkt->getAddr()); 819 uint8_t *blk_data = blk->data + offset; 820 pkt->setData(blk_data); 821 822 // execute AMO operation 823 (*(pkt->getAtomicOp()))(blk_data); 824 825 // set block status to dirty 826 blk->status |= BlkDirty; 827 } else { 828 cmpAndSwap(blk, pkt); 829 } 830 } else if (pkt->isWrite()) { 831 // we have the block in a writable state and can go ahead, 832 // note that the line may be also be considered writable in 833 // downstream caches along the path to memory, but always 834 // Exclusive, and never Modified 835 assert(blk->isWritable()); 836 // Write or WriteLine at the first cache with block in writable state 837 if (blk->checkWrite(pkt)) { 838 pkt->writeDataToBlock(blk->data, blkSize); 839 } 840 // Always mark the line as dirty (and thus transition to the 841 // Modified state) even if we are a failed StoreCond so we 842 // supply data to any snoops that have appended themselves to 843 // this cache before knowing the store will fail. 844 blk->status |= BlkDirty; 845 DPRINTF(CacheVerbose, "%s for %s (write)\n", __func__, pkt->print()); 846 } else if (pkt->isRead()) { 847 if (pkt->isLLSC()) { 848 blk->trackLoadLocked(pkt); 849 } 850 851 // all read responses have a data payload 852 assert(pkt->hasRespData()); 853 pkt->setDataFromBlock(blk->data, blkSize); 854 } else if (pkt->isUpgrade()) { 855 // sanity check 856 assert(!pkt->hasSharers()); 857 858 if (blk->isDirty()) { 859 // we were in the Owned state, and a cache above us that 860 // has the line in Shared state needs to be made aware 861 // that the data it already has is in fact dirty 862 pkt->setCacheResponding(); 863 blk->status &= ~BlkDirty; 864 } 865 } else if (pkt->isClean()) { 866 blk->status &= ~BlkDirty; 867 } else { 868 assert(pkt->isInvalidate()); 869 invalidateBlock(blk); 870 DPRINTF(CacheVerbose, "%s for %s (invalidation)\n", __func__, 871 pkt->print()); 872 } 873} 874 875///////////////////////////////////////////////////// 876// 877// Access path: requests coming in from the CPU side 878// 879///////////////////////////////////////////////////// 880Cycles 881BaseCache::calculateTagOnlyLatency(const uint32_t delay, 882 const Cycles lookup_lat) const 883{ 884 // A tag-only access has to wait for the packet to arrive in order to 885 // perform the tag lookup. 886 return ticksToCycles(delay) + lookup_lat; 887} 888 889Cycles 890BaseCache::calculateAccessLatency(const CacheBlk* blk, const uint32_t delay, 891 const Cycles lookup_lat) const 892{ 893 Cycles lat(0); 894 895 if (blk != nullptr) { 896 // As soon as the access arrives, for sequential accesses first access 897 // tags, then the data entry. In the case of parallel accesses the 898 // latency is dictated by the slowest of tag and data latencies. 899 if (sequentialAccess) { 900 lat = ticksToCycles(delay) + lookup_lat + dataLatency; 901 } else { 902 lat = ticksToCycles(delay) + std::max(lookup_lat, dataLatency); 903 } 904 905 // Check if the block to be accessed is available. If not, apply the 906 // access latency on top of when the block is ready to be accessed. 907 const Tick tick = curTick() + delay; 908 const Tick when_ready = blk->getWhenReady(); 909 if (when_ready > tick && 910 ticksToCycles(when_ready - tick) > lat) { 911 lat += ticksToCycles(when_ready - tick); 912 } 913 } else { 914 // In case of a miss, we neglect the data access in a parallel 915 // configuration (i.e., the data access will be stopped as soon as 916 // we find out it is a miss), and use the tag-only latency. 917 lat = calculateTagOnlyLatency(delay, lookup_lat); 918 } 919 920 return lat; 921} 922 923bool 924BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, 925 PacketList &writebacks) 926{ 927 // sanity check 928 assert(pkt->isRequest()); 929 930 chatty_assert(!(isReadOnly && pkt->isWrite()), 931 "Should never see a write in a read-only cache %s\n", 932 name()); 933 934 // Access block in the tags 935 Cycles tag_latency(0); 936 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), tag_latency); 937 938 DPRINTF(Cache, "%s for %s %s\n", __func__, pkt->print(), 939 blk ? "hit " + blk->print() : "miss"); 940 941 if (pkt->req->isCacheMaintenance()) { 942 // A cache maintenance operation is always forwarded to the 943 // memory below even if the block is found in dirty state. 944 945 // We defer any changes to the state of the block until we 946 // create and mark as in service the mshr for the downstream 947 // packet. 948 949 // Calculate access latency on top of when the packet arrives. This 950 // takes into account the bus delay. 951 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 952 953 return false; 954 } 955 956 if (pkt->isEviction()) { 957 // We check for presence of block in above caches before issuing 958 // Writeback or CleanEvict to write buffer. Therefore the only 959 // possible cases can be of a CleanEvict packet coming from above 960 // encountering a Writeback generated in this cache peer cache and 961 // waiting in the write buffer. Cases of upper level peer caches 962 // generating CleanEvict and Writeback or simply CleanEvict and 963 // CleanEvict almost simultaneously will be caught by snoops sent out 964 // by crossbar. 965 WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(), 966 pkt->isSecure()); 967 if (wb_entry) { 968 assert(wb_entry->getNumTargets() == 1); 969 PacketPtr wbPkt = wb_entry->getTarget()->pkt; 970 assert(wbPkt->isWriteback()); 971 972 if (pkt->isCleanEviction()) { 973 // The CleanEvict and WritebackClean snoops into other 974 // peer caches of the same level while traversing the 975 // crossbar. If a copy of the block is found, the 976 // packet is deleted in the crossbar. Hence, none of 977 // the other upper level caches connected to this 978 // cache have the block, so we can clear the 979 // BLOCK_CACHED flag in the Writeback if set and 980 // discard the CleanEvict by returning true. 981 wbPkt->clearBlockCached(); 982 983 // A clean evict does not need to access the data array 984 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 985 986 return true; 987 } else { 988 assert(pkt->cmd == MemCmd::WritebackDirty); 989 // Dirty writeback from above trumps our clean 990 // writeback... discard here 991 // Note: markInService will remove entry from writeback buffer. 992 markInService(wb_entry); 993 delete wbPkt; 994 } 995 } 996 } 997 998 // Writeback handling is special case. We can write the block into 999 // the cache without having a writeable copy (or any copy at all). 1000 if (pkt->isWriteback()) { 1001 assert(blkSize == pkt->getSize()); 1002 1003 // we could get a clean writeback while we are having 1004 // outstanding accesses to a block, do the simple thing for 1005 // now and drop the clean writeback so that we do not upset 1006 // any ordering/decisions about ownership already taken 1007 if (pkt->cmd == MemCmd::WritebackClean && 1008 mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) { 1009 DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, " 1010 "dropping\n", pkt->getAddr()); 1011 1012 // A writeback searches for the block, then writes the data. 1013 // As the writeback is being dropped, the data is not touched, 1014 // and we just had to wait for the time to find a match in the 1015 // MSHR. As of now assume a mshr queue search takes as long as 1016 // a tag lookup for simplicity. 1017 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 1018 1019 return true; 1020 } 1021 1022 if (!blk) { 1023 // need to do a replacement 1024 blk = allocateBlock(pkt, writebacks); 1025 if (!blk) { 1026 // no replaceable block available: give up, fwd to next level. 1027 incMissCount(pkt); 1028 1029 // A writeback searches for the block, then writes the data. 1030 // As the block could not be found, it was a tag-only access. 1031 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 1032 1033 return false; 1034 } 1035 1036 blk->status |= BlkReadable; 1037 } 1038 // only mark the block dirty if we got a writeback command, 1039 // and leave it as is for a clean writeback 1040 if (pkt->cmd == MemCmd::WritebackDirty) { 1041 // TODO: the coherent cache can assert(!blk->isDirty()); 1042 blk->status |= BlkDirty; 1043 } 1044 // if the packet does not have sharers, it is passing 1045 // writable, and we got the writeback in Modified or Exclusive 1046 // state, if not we are in the Owned or Shared state 1047 if (!pkt->hasSharers()) { 1048 blk->status |= BlkWritable; 1049 } 1050 // nothing else to do; writeback doesn't expect response 1051 assert(!pkt->needsResponse()); 1052 pkt->writeDataToBlock(blk->data, blkSize); 1053 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 1054 incHitCount(pkt); 1055 1056 // A writeback searches for the block, then writes the data 1057 lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency); 1058 1059 // When the packet metadata arrives, the tag lookup will be done while 1060 // the payload is arriving. Then the block will be ready to access as 1061 // soon as the fill is done 1062 blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay + 1063 std::max(cyclesToTicks(tag_latency), (uint64_t)pkt->payloadDelay)); 1064 1065 return true; 1066 } else if (pkt->cmd == MemCmd::CleanEvict) { 1067 // A CleanEvict does not need to access the data array 1068 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 1069 1070 if (blk) { 1071 // Found the block in the tags, need to stop CleanEvict from 1072 // propagating further down the hierarchy. Returning true will 1073 // treat the CleanEvict like a satisfied write request and delete 1074 // it. 1075 return true; 1076 } 1077 // We didn't find the block here, propagate the CleanEvict further 1078 // down the memory hierarchy. Returning false will treat the CleanEvict 1079 // like a Writeback which could not find a replaceable block so has to 1080 // go to next level. 1081 return false; 1082 } else if (pkt->cmd == MemCmd::WriteClean) { 1083 // WriteClean handling is a special case. We can allocate a 1084 // block directly if it doesn't exist and we can update the 1085 // block immediately. The WriteClean transfers the ownership 1086 // of the block as well. 1087 assert(blkSize == pkt->getSize()); 1088 1089 if (!blk) { 1090 if (pkt->writeThrough()) { 1091 // A writeback searches for the block, then writes the data. 1092 // As the block could not be found, it was a tag-only access. 1093 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 1094 1095 // if this is a write through packet, we don't try to 1096 // allocate if the block is not present 1097 return false; 1098 } else { 1099 // a writeback that misses needs to allocate a new block 1100 blk = allocateBlock(pkt, writebacks); 1101 if (!blk) { 1102 // no replaceable block available: give up, fwd to 1103 // next level. 1104 incMissCount(pkt); 1105 1106 // A writeback searches for the block, then writes the 1107 // data. As the block could not be found, it was a tag-only 1108 // access. 1109 lat = calculateTagOnlyLatency(pkt->headerDelay, 1110 tag_latency); 1111 1112 return false; 1113 } 1114 1115 blk->status |= BlkReadable; 1116 } 1117 } 1118 1119 // at this point either this is a writeback or a write-through 1120 // write clean operation and the block is already in this 1121 // cache, we need to update the data and the block flags 1122 assert(blk); 1123 // TODO: the coherent cache can assert(!blk->isDirty()); 1124 if (!pkt->writeThrough()) { 1125 blk->status |= BlkDirty; 1126 } 1127 // nothing else to do; writeback doesn't expect response 1128 assert(!pkt->needsResponse()); 1129 pkt->writeDataToBlock(blk->data, blkSize); 1130 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 1131 1132 incHitCount(pkt); 1133 1134 // A writeback searches for the block, then writes the data 1135 lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency); 1136 1137 // When the packet metadata arrives, the tag lookup will be done while 1138 // the payload is arriving. Then the block will be ready to access as 1139 // soon as the fill is done 1140 blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay + 1141 std::max(cyclesToTicks(tag_latency), (uint64_t)pkt->payloadDelay)); 1142 1143 // if this a write-through packet it will be sent to cache 1144 // below 1145 return !pkt->writeThrough(); 1146 } else if (blk && (pkt->needsWritable() ? blk->isWritable() : 1147 blk->isReadable())) { 1148 // OK to satisfy access 1149 incHitCount(pkt); 1150 1151 // Calculate access latency based on the need to access the data array 1152 if (pkt->isRead() || pkt->isWrite()) { 1153 lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency); 1154 } else { 1155 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 1156 } 1157 1158 satisfyRequest(pkt, blk); 1159 maintainClusivity(pkt->fromCache(), blk); 1160 1161 return true; 1162 } 1163 1164 // Can't satisfy access normally... either no block (blk == nullptr) 1165 // or have block but need writable 1166 1167 incMissCount(pkt); 1168 1169 lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency); 1170 1171 if (!blk && pkt->isLLSC() && pkt->isWrite()) { 1172 // complete miss on store conditional... just give up now 1173 pkt->req->setExtraData(0); 1174 return true; 1175 } 1176 1177 return false; 1178} 1179 1180void 1181BaseCache::maintainClusivity(bool from_cache, CacheBlk *blk) 1182{ 1183 if (from_cache && blk && blk->isValid() && !blk->isDirty() && 1184 clusivity == Enums::mostly_excl) { 1185 // if we have responded to a cache, and our block is still 1186 // valid, but not dirty, and this cache is mostly exclusive 1187 // with respect to the cache above, drop the block 1188 invalidateBlock(blk); 1189 } 1190} 1191 1192CacheBlk* 1193BaseCache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks, 1194 bool allocate) 1195{ 1196 assert(pkt->isResponse()); 1197 Addr addr = pkt->getAddr(); 1198 bool is_secure = pkt->isSecure(); 1199#if TRACING_ON 1200 CacheBlk::State old_state = blk ? blk->status : 0; 1201#endif 1202 1203 // When handling a fill, we should have no writes to this line. 1204 assert(addr == pkt->getBlockAddr(blkSize)); 1205 assert(!writeBuffer.findMatch(addr, is_secure)); 1206 1207 if (!blk) { 1208 // better have read new data... 1209 assert(pkt->hasData() || pkt->cmd == MemCmd::InvalidateResp); 1210 1211 // need to do a replacement if allocating, otherwise we stick 1212 // with the temporary storage 1213 blk = allocate ? allocateBlock(pkt, writebacks) : nullptr; 1214 1215 if (!blk) { 1216 // No replaceable block or a mostly exclusive 1217 // cache... just use temporary storage to complete the 1218 // current request and then get rid of it 1219 blk = tempBlock; 1220 tempBlock->insert(addr, is_secure); 1221 DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr, 1222 is_secure ? "s" : "ns"); 1223 } 1224 } else { 1225 // existing block... probably an upgrade 1226 // don't clear block status... if block is already dirty we 1227 // don't want to lose that 1228 } 1229 1230 // Block is guaranteed to be valid at this point 1231 assert(blk->isValid()); 1232 assert(blk->isSecure() == is_secure); 1233 assert(regenerateBlkAddr(blk) == addr); 1234 1235 blk->status |= BlkReadable; 1236 1237 // sanity check for whole-line writes, which should always be 1238 // marked as writable as part of the fill, and then later marked 1239 // dirty as part of satisfyRequest 1240 if (pkt->cmd == MemCmd::InvalidateResp) { 1241 assert(!pkt->hasSharers()); 1242 } 1243 1244 // here we deal with setting the appropriate state of the line, 1245 // and we start by looking at the hasSharers flag, and ignore the 1246 // cacheResponding flag (normally signalling dirty data) if the 1247 // packet has sharers, thus the line is never allocated as Owned 1248 // (dirty but not writable), and always ends up being either 1249 // Shared, Exclusive or Modified, see Packet::setCacheResponding 1250 // for more details 1251 if (!pkt->hasSharers()) { 1252 // we could get a writable line from memory (rather than a 1253 // cache) even in a read-only cache, note that we set this bit 1254 // even for a read-only cache, possibly revisit this decision 1255 blk->status |= BlkWritable; 1256 1257 // check if we got this via cache-to-cache transfer (i.e., from a 1258 // cache that had the block in Modified or Owned state) 1259 if (pkt->cacheResponding()) { 1260 // we got the block in Modified state, and invalidated the 1261 // owners copy 1262 blk->status |= BlkDirty; 1263 1264 chatty_assert(!isReadOnly, "Should never see dirty snoop response " 1265 "in read-only cache %s\n", name()); 1266 } 1267 } 1268 1269 DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n", 1270 addr, is_secure ? "s" : "ns", old_state, blk->print()); 1271 1272 // if we got new data, copy it in (checking for a read response 1273 // and a response that has data is the same in the end) 1274 if (pkt->isRead()) { 1275 // sanity checks 1276 assert(pkt->hasData()); 1277 assert(pkt->getSize() == blkSize); 1278 1279 pkt->writeDataToBlock(blk->data, blkSize); 1280 } 1281 // The block will be ready when the payload arrives and the fill is done 1282 blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay + 1283 pkt->payloadDelay); 1284 1285 return blk; 1286} 1287 1288CacheBlk* 1289BaseCache::allocateBlock(const PacketPtr pkt, PacketList &writebacks) 1290{ 1291 // Get address 1292 const Addr addr = pkt->getAddr(); 1293 1294 // Get secure bit 1295 const bool is_secure = pkt->isSecure(); 1296 1297 // Find replacement victim 1298 std::vector<CacheBlk*> evict_blks; 1299 CacheBlk *victim = tags->findVictim(addr, is_secure, evict_blks); 1300 1301 // It is valid to return nullptr if there is no victim 1302 if (!victim) 1303 return nullptr; 1304 1305 // Print victim block's information 1306 DPRINTF(CacheRepl, "Replacement victim: %s\n", victim->print()); 1307 1308 // Check for transient state allocations. If any of the entries listed 1309 // for eviction has a transient state, the allocation fails 1310 bool replacement = false; 1311 for (const auto& blk : evict_blks) { 1312 if (blk->isValid()) { 1313 replacement = true; 1314 1315 Addr repl_addr = regenerateBlkAddr(blk); 1316 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure()); 1317 if (repl_mshr) { 1318 // must be an outstanding upgrade or clean request 1319 // on a block we're about to replace... 1320 assert((!blk->isWritable() && repl_mshr->needsWritable()) || 1321 repl_mshr->isCleaning()); 1322 1323 // too hard to replace block with transient state 1324 // allocation failed, block not inserted 1325 return nullptr; 1326 } 1327 } 1328 } 1329 1330 // The victim will be replaced by a new entry, so increase the replacement 1331 // counter if a valid block is being replaced 1332 if (replacement) { 1333 // Evict valid blocks associated to this victim block 1334 for (const auto& blk : evict_blks) { 1335 if (blk->isValid()) { 1336 DPRINTF(CacheRepl, "Evicting %s (%#llx) to make room for " \ 1337 "%#llx (%s)\n", blk->print(), regenerateBlkAddr(blk), 1338 addr, is_secure); 1339 1340 if (blk->wasPrefetched()) { 1341 unusedPrefetches++; 1342 } 1343 1344 evictBlock(blk, writebacks); 1345 } 1346 } 1347 1348 replacements++; 1349 } 1350 1351 // Insert new block at victimized entry 1352 tags->insertBlock(pkt, victim); 1353 1354 return victim; 1355} 1356 1357void 1358BaseCache::invalidateBlock(CacheBlk *blk) 1359{ 1360 // If handling a block present in the Tags, let it do its invalidation 1361 // process, which will update stats and invalidate the block itself 1362 if (blk != tempBlock) { 1363 tags->invalidate(blk); 1364 } else { 1365 tempBlock->invalidate(); 1366 } 1367} 1368 1369void 1370BaseCache::evictBlock(CacheBlk *blk, PacketList &writebacks) 1371{ 1372 PacketPtr pkt = evictBlock(blk); 1373 if (pkt) { 1374 writebacks.push_back(pkt); 1375 } 1376} 1377 1378PacketPtr 1379BaseCache::writebackBlk(CacheBlk *blk) 1380{ 1381 chatty_assert(!isReadOnly || writebackClean, 1382 "Writeback from read-only cache"); 1383 assert(blk && blk->isValid() && (blk->isDirty() || writebackClean)); 1384 1385 writebacks[Request::wbMasterId]++; 1386 1387 RequestPtr req = std::make_shared<Request>( 1388 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId); 1389 1390 if (blk->isSecure()) 1391 req->setFlags(Request::SECURE); 1392 1393 req->taskId(blk->task_id); 1394 1395 PacketPtr pkt = 1396 new Packet(req, blk->isDirty() ? 1397 MemCmd::WritebackDirty : MemCmd::WritebackClean); 1398 1399 DPRINTF(Cache, "Create Writeback %s writable: %d, dirty: %d\n", 1400 pkt->print(), blk->isWritable(), blk->isDirty()); 1401 1402 if (blk->isWritable()) { 1403 // not asserting shared means we pass the block in modified 1404 // state, mark our own block non-writeable 1405 blk->status &= ~BlkWritable; 1406 } else { 1407 // we are in the Owned state, tell the receiver 1408 pkt->setHasSharers(); 1409 } 1410 1411 // make sure the block is not marked dirty 1412 blk->status &= ~BlkDirty; 1413 1414 pkt->allocate(); 1415 pkt->setDataFromBlock(blk->data, blkSize); 1416 1417 return pkt; 1418} 1419 1420PacketPtr 1421BaseCache::writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id) 1422{ 1423 RequestPtr req = std::make_shared<Request>( 1424 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId); 1425 1426 if (blk->isSecure()) { 1427 req->setFlags(Request::SECURE); 1428 } 1429 req->taskId(blk->task_id); 1430 1431 PacketPtr pkt = new Packet(req, MemCmd::WriteClean, blkSize, id); 1432 1433 if (dest) { 1434 req->setFlags(dest); 1435 pkt->setWriteThrough(); 1436 } 1437 1438 DPRINTF(Cache, "Create %s writable: %d, dirty: %d\n", pkt->print(), 1439 blk->isWritable(), blk->isDirty()); 1440 1441 if (blk->isWritable()) { 1442 // not asserting shared means we pass the block in modified 1443 // state, mark our own block non-writeable 1444 blk->status &= ~BlkWritable; 1445 } else { 1446 // we are in the Owned state, tell the receiver 1447 pkt->setHasSharers(); 1448 } 1449 1450 // make sure the block is not marked dirty 1451 blk->status &= ~BlkDirty; 1452 1453 pkt->allocate(); 1454 pkt->setDataFromBlock(blk->data, blkSize); 1455 1456 return pkt; 1457} 1458 1459 1460void 1461BaseCache::memWriteback() 1462{ 1463 tags->forEachBlk([this](CacheBlk &blk) { writebackVisitor(blk); }); 1464} 1465 1466void 1467BaseCache::memInvalidate() 1468{ 1469 tags->forEachBlk([this](CacheBlk &blk) { invalidateVisitor(blk); }); 1470} 1471 1472bool 1473BaseCache::isDirty() const 1474{ 1475 return tags->anyBlk([](CacheBlk &blk) { return blk.isDirty(); }); 1476} 1477 1478bool 1479BaseCache::coalesce() const 1480{ 1481 return writeAllocator && writeAllocator->coalesce(); 1482} 1483 1484void 1485BaseCache::writebackVisitor(CacheBlk &blk) 1486{ 1487 if (blk.isDirty()) { 1488 assert(blk.isValid()); 1489 1490 RequestPtr request = std::make_shared<Request>( 1491 regenerateBlkAddr(&blk), blkSize, 0, Request::funcMasterId); 1492 1493 request->taskId(blk.task_id); 1494 if (blk.isSecure()) { 1495 request->setFlags(Request::SECURE); 1496 } 1497 1498 Packet packet(request, MemCmd::WriteReq); 1499 packet.dataStatic(blk.data); 1500 1501 memSidePort.sendFunctional(&packet); 1502 1503 blk.status &= ~BlkDirty; 1504 } 1505} 1506 1507void 1508BaseCache::invalidateVisitor(CacheBlk &blk) 1509{ 1510 if (blk.isDirty()) 1511 warn_once("Invalidating dirty cache lines. " \ 1512 "Expect things to break.\n"); 1513 1514 if (blk.isValid()) { 1515 assert(!blk.isDirty()); 1516 invalidateBlock(&blk); 1517 } 1518} 1519 1520Tick 1521BaseCache::nextQueueReadyTime() const 1522{ 1523 Tick nextReady = std::min(mshrQueue.nextReadyTime(), 1524 writeBuffer.nextReadyTime()); 1525 1526 // Don't signal prefetch ready time if no MSHRs available 1527 // Will signal once enoguh MSHRs are deallocated 1528 if (prefetcher && mshrQueue.canPrefetch()) { 1529 nextReady = std::min(nextReady, 1530 prefetcher->nextPrefetchReadyTime()); 1531 } 1532 1533 return nextReady; 1534} 1535 1536 1537bool 1538BaseCache::sendMSHRQueuePacket(MSHR* mshr) 1539{ 1540 assert(mshr); 1541 1542 // use request from 1st target 1543 PacketPtr tgt_pkt = mshr->getTarget()->pkt; 1544 1545 DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print()); 1546 1547 // if the cache is in write coalescing mode or (additionally) in 1548 // no allocation mode, and we have a write packet with an MSHR 1549 // that is not a whole-line write (due to incompatible flags etc), 1550 // then reset the write mode 1551 if (writeAllocator && writeAllocator->coalesce() && tgt_pkt->isWrite()) { 1552 if (!mshr->isWholeLineWrite()) { 1553 // if we are currently write coalescing, hold on the 1554 // MSHR as many cycles extra as we need to completely 1555 // write a cache line 1556 if (writeAllocator->delay(mshr->blkAddr)) { 1557 Tick delay = blkSize / tgt_pkt->getSize() * clockPeriod(); 1558 DPRINTF(CacheVerbose, "Delaying pkt %s %llu ticks to allow " 1559 "for write coalescing\n", tgt_pkt->print(), delay); 1560 mshrQueue.delay(mshr, delay); 1561 return false; 1562 } else { 1563 writeAllocator->reset(); 1564 } 1565 } else { 1566 writeAllocator->resetDelay(mshr->blkAddr); 1567 } 1568 } 1569 1570 CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure); 1571 1572 // either a prefetch that is not present upstream, or a normal 1573 // MSHR request, proceed to get the packet to send downstream 1574 PacketPtr pkt = createMissPacket(tgt_pkt, blk, mshr->needsWritable(), 1575 mshr->isWholeLineWrite()); 1576 1577 mshr->isForward = (pkt == nullptr); 1578 1579 if (mshr->isForward) { 1580 // not a cache block request, but a response is expected 1581 // make copy of current packet to forward, keep current 1582 // copy for response handling 1583 pkt = new Packet(tgt_pkt, false, true); 1584 assert(!pkt->isWrite()); 1585 } 1586 1587 // play it safe and append (rather than set) the sender state, 1588 // as forwarded packets may already have existing state 1589 pkt->pushSenderState(mshr); 1590 1591 if (pkt->isClean() && blk && blk->isDirty()) { 1592 // A cache clean opearation is looking for a dirty block. Mark 1593 // the packet so that the destination xbar can determine that 1594 // there will be a follow-up write packet as well. 1595 pkt->setSatisfied(); 1596 } 1597 1598 if (!memSidePort.sendTimingReq(pkt)) { 1599 // we are awaiting a retry, but we 1600 // delete the packet and will be creating a new packet 1601 // when we get the opportunity 1602 delete pkt; 1603 1604 // note that we have now masked any requestBus and 1605 // schedSendEvent (we will wait for a retry before 1606 // doing anything), and this is so even if we do not 1607 // care about this packet and might override it before 1608 // it gets retried 1609 return true; 1610 } else { 1611 // As part of the call to sendTimingReq the packet is 1612 // forwarded to all neighbouring caches (and any caches 1613 // above them) as a snoop. Thus at this point we know if 1614 // any of the neighbouring caches are responding, and if 1615 // so, we know it is dirty, and we can determine if it is 1616 // being passed as Modified, making our MSHR the ordering 1617 // point 1618 bool pending_modified_resp = !pkt->hasSharers() && 1619 pkt->cacheResponding(); 1620 markInService(mshr, pending_modified_resp); 1621 1622 if (pkt->isClean() && blk && blk->isDirty()) { 1623 // A cache clean opearation is looking for a dirty 1624 // block. If a dirty block is encountered a WriteClean 1625 // will update any copies to the path to the memory 1626 // until the point of reference. 1627 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n", 1628 __func__, pkt->print(), blk->print()); 1629 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), 1630 pkt->id); 1631 PacketList writebacks; 1632 writebacks.push_back(wb_pkt); 1633 doWritebacks(writebacks, 0); 1634 } 1635 1636 return false; 1637 } 1638} 1639 1640bool 1641BaseCache::sendWriteQueuePacket(WriteQueueEntry* wq_entry) 1642{ 1643 assert(wq_entry); 1644 1645 // always a single target for write queue entries 1646 PacketPtr tgt_pkt = wq_entry->getTarget()->pkt; 1647 1648 DPRINTF(Cache, "%s: write %s\n", __func__, tgt_pkt->print()); 1649 1650 // forward as is, both for evictions and uncacheable writes 1651 if (!memSidePort.sendTimingReq(tgt_pkt)) { 1652 // note that we have now masked any requestBus and 1653 // schedSendEvent (we will wait for a retry before 1654 // doing anything), and this is so even if we do not 1655 // care about this packet and might override it before 1656 // it gets retried 1657 return true; 1658 } else { 1659 markInService(wq_entry); 1660 return false; 1661 } 1662} 1663 1664void 1665BaseCache::serialize(CheckpointOut &cp) const 1666{ 1667 bool dirty(isDirty()); 1668 1669 if (dirty) { 1670 warn("*** The cache still contains dirty data. ***\n"); 1671 warn(" Make sure to drain the system using the correct flags.\n"); 1672 warn(" This checkpoint will not restore correctly " \ 1673 "and dirty data in the cache will be lost!\n"); 1674 } 1675 1676 // Since we don't checkpoint the data in the cache, any dirty data 1677 // will be lost when restoring from a checkpoint of a system that 1678 // wasn't drained properly. Flag the checkpoint as invalid if the 1679 // cache contains dirty data. 1680 bool bad_checkpoint(dirty); 1681 SERIALIZE_SCALAR(bad_checkpoint); 1682} 1683 1684void 1685BaseCache::unserialize(CheckpointIn &cp) 1686{ 1687 bool bad_checkpoint; 1688 UNSERIALIZE_SCALAR(bad_checkpoint); 1689 if (bad_checkpoint) { 1690 fatal("Restoring from checkpoints with dirty caches is not " 1691 "supported in the classic memory system. Please remove any " 1692 "caches or drain them properly before taking checkpoints.\n"); 1693 } 1694} 1695 1696void 1697BaseCache::regStats() 1698{ 1699 MemObject::regStats(); 1700 1701 using namespace Stats; 1702 1703 // Hit statistics 1704 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1705 MemCmd cmd(access_idx); 1706 const string &cstr = cmd.toString(); 1707 1708 hits[access_idx] 1709 .init(system->maxMasters()) 1710 .name(name() + "." + cstr + "_hits") 1711 .desc("number of " + cstr + " hits") 1712 .flags(total | nozero | nonan) 1713 ; 1714 for (int i = 0; i < system->maxMasters(); i++) { 1715 hits[access_idx].subname(i, system->getMasterName(i)); 1716 } 1717 } 1718 1719// These macros make it easier to sum the right subset of commands and 1720// to change the subset of commands that are considered "demand" vs 1721// "non-demand" 1722#define SUM_DEMAND(s) \ 1723 (s[MemCmd::ReadReq] + s[MemCmd::WriteReq] + s[MemCmd::WriteLineReq] + \ 1724 s[MemCmd::ReadExReq] + s[MemCmd::ReadCleanReq] + s[MemCmd::ReadSharedReq]) 1725 1726// should writebacks be included here? prior code was inconsistent... 1727#define SUM_NON_DEMAND(s) \ 1728 (s[MemCmd::SoftPFReq] + s[MemCmd::HardPFReq] + s[MemCmd::SoftPFExReq]) 1729 1730 demandHits 1731 .name(name() + ".demand_hits") 1732 .desc("number of demand (read+write) hits") 1733 .flags(total | nozero | nonan) 1734 ; 1735 demandHits = SUM_DEMAND(hits); 1736 for (int i = 0; i < system->maxMasters(); i++) { 1737 demandHits.subname(i, system->getMasterName(i)); 1738 } 1739 1740 overallHits 1741 .name(name() + ".overall_hits") 1742 .desc("number of overall hits") 1743 .flags(total | nozero | nonan) 1744 ; 1745 overallHits = demandHits + SUM_NON_DEMAND(hits); 1746 for (int i = 0; i < system->maxMasters(); i++) { 1747 overallHits.subname(i, system->getMasterName(i)); 1748 } 1749 1750 // Miss statistics 1751 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1752 MemCmd cmd(access_idx); 1753 const string &cstr = cmd.toString(); 1754 1755 misses[access_idx] 1756 .init(system->maxMasters()) 1757 .name(name() + "." + cstr + "_misses") 1758 .desc("number of " + cstr + " misses") 1759 .flags(total | nozero | nonan) 1760 ; 1761 for (int i = 0; i < system->maxMasters(); i++) { 1762 misses[access_idx].subname(i, system->getMasterName(i)); 1763 } 1764 } 1765 1766 demandMisses 1767 .name(name() + ".demand_misses") 1768 .desc("number of demand (read+write) misses") 1769 .flags(total | nozero | nonan) 1770 ; 1771 demandMisses = SUM_DEMAND(misses); 1772 for (int i = 0; i < system->maxMasters(); i++) { 1773 demandMisses.subname(i, system->getMasterName(i)); 1774 } 1775 1776 overallMisses 1777 .name(name() + ".overall_misses") 1778 .desc("number of overall misses") 1779 .flags(total | nozero | nonan) 1780 ; 1781 overallMisses = demandMisses + SUM_NON_DEMAND(misses); 1782 for (int i = 0; i < system->maxMasters(); i++) { 1783 overallMisses.subname(i, system->getMasterName(i)); 1784 } 1785 1786 // Miss latency statistics 1787 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1788 MemCmd cmd(access_idx); 1789 const string &cstr = cmd.toString(); 1790 1791 missLatency[access_idx] 1792 .init(system->maxMasters()) 1793 .name(name() + "." + cstr + "_miss_latency") 1794 .desc("number of " + cstr + " miss cycles") 1795 .flags(total | nozero | nonan) 1796 ; 1797 for (int i = 0; i < system->maxMasters(); i++) { 1798 missLatency[access_idx].subname(i, system->getMasterName(i)); 1799 } 1800 } 1801 1802 demandMissLatency 1803 .name(name() + ".demand_miss_latency") 1804 .desc("number of demand (read+write) miss cycles") 1805 .flags(total | nozero | nonan) 1806 ; 1807 demandMissLatency = SUM_DEMAND(missLatency); 1808 for (int i = 0; i < system->maxMasters(); i++) { 1809 demandMissLatency.subname(i, system->getMasterName(i)); 1810 } 1811 1812 overallMissLatency 1813 .name(name() + ".overall_miss_latency") 1814 .desc("number of overall miss cycles") 1815 .flags(total | nozero | nonan) 1816 ; 1817 overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency); 1818 for (int i = 0; i < system->maxMasters(); i++) { 1819 overallMissLatency.subname(i, system->getMasterName(i)); 1820 } 1821 1822 // access formulas 1823 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1824 MemCmd cmd(access_idx); 1825 const string &cstr = cmd.toString(); 1826 1827 accesses[access_idx] 1828 .name(name() + "." + cstr + "_accesses") 1829 .desc("number of " + cstr + " accesses(hits+misses)") 1830 .flags(total | nozero | nonan) 1831 ; 1832 accesses[access_idx] = hits[access_idx] + misses[access_idx]; 1833 1834 for (int i = 0; i < system->maxMasters(); i++) { 1835 accesses[access_idx].subname(i, system->getMasterName(i)); 1836 } 1837 } 1838 1839 demandAccesses 1840 .name(name() + ".demand_accesses") 1841 .desc("number of demand (read+write) accesses") 1842 .flags(total | nozero | nonan) 1843 ; 1844 demandAccesses = demandHits + demandMisses; 1845 for (int i = 0; i < system->maxMasters(); i++) { 1846 demandAccesses.subname(i, system->getMasterName(i)); 1847 } 1848 1849 overallAccesses 1850 .name(name() + ".overall_accesses") 1851 .desc("number of overall (read+write) accesses") 1852 .flags(total | nozero | nonan) 1853 ; 1854 overallAccesses = overallHits + overallMisses; 1855 for (int i = 0; i < system->maxMasters(); i++) { 1856 overallAccesses.subname(i, system->getMasterName(i)); 1857 } 1858 1859 // miss rate formulas 1860 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1861 MemCmd cmd(access_idx); 1862 const string &cstr = cmd.toString(); 1863 1864 missRate[access_idx] 1865 .name(name() + "." + cstr + "_miss_rate") 1866 .desc("miss rate for " + cstr + " accesses") 1867 .flags(total | nozero | nonan) 1868 ; 1869 missRate[access_idx] = misses[access_idx] / accesses[access_idx]; 1870 1871 for (int i = 0; i < system->maxMasters(); i++) { 1872 missRate[access_idx].subname(i, system->getMasterName(i)); 1873 } 1874 } 1875 1876 demandMissRate 1877 .name(name() + ".demand_miss_rate") 1878 .desc("miss rate for demand accesses") 1879 .flags(total | nozero | nonan) 1880 ; 1881 demandMissRate = demandMisses / demandAccesses; 1882 for (int i = 0; i < system->maxMasters(); i++) { 1883 demandMissRate.subname(i, system->getMasterName(i)); 1884 } 1885 1886 overallMissRate 1887 .name(name() + ".overall_miss_rate") 1888 .desc("miss rate for overall accesses") 1889 .flags(total | nozero | nonan) 1890 ; 1891 overallMissRate = overallMisses / overallAccesses; 1892 for (int i = 0; i < system->maxMasters(); i++) { 1893 overallMissRate.subname(i, system->getMasterName(i)); 1894 } 1895 1896 // miss latency formulas 1897 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1898 MemCmd cmd(access_idx); 1899 const string &cstr = cmd.toString(); 1900 1901 avgMissLatency[access_idx] 1902 .name(name() + "." + cstr + "_avg_miss_latency") 1903 .desc("average " + cstr + " miss latency") 1904 .flags(total | nozero | nonan) 1905 ; 1906 avgMissLatency[access_idx] = 1907 missLatency[access_idx] / misses[access_idx]; 1908 1909 for (int i = 0; i < system->maxMasters(); i++) { 1910 avgMissLatency[access_idx].subname(i, system->getMasterName(i)); 1911 } 1912 } 1913 1914 demandAvgMissLatency 1915 .name(name() + ".demand_avg_miss_latency") 1916 .desc("average overall miss latency") 1917 .flags(total | nozero | nonan) 1918 ; 1919 demandAvgMissLatency = demandMissLatency / demandMisses; 1920 for (int i = 0; i < system->maxMasters(); i++) { 1921 demandAvgMissLatency.subname(i, system->getMasterName(i)); 1922 } 1923 1924 overallAvgMissLatency 1925 .name(name() + ".overall_avg_miss_latency") 1926 .desc("average overall miss latency") 1927 .flags(total | nozero | nonan) 1928 ; 1929 overallAvgMissLatency = overallMissLatency / overallMisses; 1930 for (int i = 0; i < system->maxMasters(); i++) { 1931 overallAvgMissLatency.subname(i, system->getMasterName(i)); 1932 } 1933 1934 blocked_cycles.init(NUM_BLOCKED_CAUSES); 1935 blocked_cycles 1936 .name(name() + ".blocked_cycles") 1937 .desc("number of cycles access was blocked") 1938 .subname(Blocked_NoMSHRs, "no_mshrs") 1939 .subname(Blocked_NoTargets, "no_targets") 1940 ; 1941 1942 1943 blocked_causes.init(NUM_BLOCKED_CAUSES); 1944 blocked_causes 1945 .name(name() + ".blocked") 1946 .desc("number of cycles access was blocked") 1947 .subname(Blocked_NoMSHRs, "no_mshrs") 1948 .subname(Blocked_NoTargets, "no_targets") 1949 ; 1950 1951 avg_blocked 1952 .name(name() + ".avg_blocked_cycles") 1953 .desc("average number of cycles each access was blocked") 1954 .subname(Blocked_NoMSHRs, "no_mshrs") 1955 .subname(Blocked_NoTargets, "no_targets") 1956 ; 1957 1958 avg_blocked = blocked_cycles / blocked_causes; 1959 1960 unusedPrefetches 1961 .name(name() + ".unused_prefetches") 1962 .desc("number of HardPF blocks evicted w/o reference") 1963 .flags(nozero) 1964 ; 1965 1966 writebacks 1967 .init(system->maxMasters()) 1968 .name(name() + ".writebacks") 1969 .desc("number of writebacks") 1970 .flags(total | nozero | nonan) 1971 ; 1972 for (int i = 0; i < system->maxMasters(); i++) { 1973 writebacks.subname(i, system->getMasterName(i)); 1974 } 1975 1976 // MSHR statistics 1977 // MSHR hit statistics 1978 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1979 MemCmd cmd(access_idx); 1980 const string &cstr = cmd.toString(); 1981 1982 mshr_hits[access_idx] 1983 .init(system->maxMasters()) 1984 .name(name() + "." + cstr + "_mshr_hits") 1985 .desc("number of " + cstr + " MSHR hits") 1986 .flags(total | nozero | nonan) 1987 ; 1988 for (int i = 0; i < system->maxMasters(); i++) { 1989 mshr_hits[access_idx].subname(i, system->getMasterName(i)); 1990 } 1991 } 1992 1993 demandMshrHits 1994 .name(name() + ".demand_mshr_hits") 1995 .desc("number of demand (read+write) MSHR hits") 1996 .flags(total | nozero | nonan) 1997 ; 1998 demandMshrHits = SUM_DEMAND(mshr_hits); 1999 for (int i = 0; i < system->maxMasters(); i++) { 2000 demandMshrHits.subname(i, system->getMasterName(i)); 2001 } 2002 2003 overallMshrHits 2004 .name(name() + ".overall_mshr_hits") 2005 .desc("number of overall MSHR hits") 2006 .flags(total | nozero | nonan) 2007 ; 2008 overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshr_hits); 2009 for (int i = 0; i < system->maxMasters(); i++) { 2010 overallMshrHits.subname(i, system->getMasterName(i)); 2011 } 2012 2013 // MSHR miss statistics 2014 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2015 MemCmd cmd(access_idx); 2016 const string &cstr = cmd.toString(); 2017 2018 mshr_misses[access_idx] 2019 .init(system->maxMasters()) 2020 .name(name() + "." + cstr + "_mshr_misses") 2021 .desc("number of " + cstr + " MSHR misses") 2022 .flags(total | nozero | nonan) 2023 ; 2024 for (int i = 0; i < system->maxMasters(); i++) { 2025 mshr_misses[access_idx].subname(i, system->getMasterName(i)); 2026 } 2027 } 2028 2029 demandMshrMisses 2030 .name(name() + ".demand_mshr_misses") 2031 .desc("number of demand (read+write) MSHR misses") 2032 .flags(total | nozero | nonan) 2033 ; 2034 demandMshrMisses = SUM_DEMAND(mshr_misses); 2035 for (int i = 0; i < system->maxMasters(); i++) { 2036 demandMshrMisses.subname(i, system->getMasterName(i)); 2037 } 2038 2039 overallMshrMisses 2040 .name(name() + ".overall_mshr_misses") 2041 .desc("number of overall MSHR misses") 2042 .flags(total | nozero | nonan) 2043 ; 2044 overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshr_misses); 2045 for (int i = 0; i < system->maxMasters(); i++) { 2046 overallMshrMisses.subname(i, system->getMasterName(i)); 2047 } 2048 2049 // MSHR miss latency statistics 2050 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2051 MemCmd cmd(access_idx); 2052 const string &cstr = cmd.toString(); 2053 2054 mshr_miss_latency[access_idx] 2055 .init(system->maxMasters()) 2056 .name(name() + "." + cstr + "_mshr_miss_latency") 2057 .desc("number of " + cstr + " MSHR miss cycles") 2058 .flags(total | nozero | nonan) 2059 ; 2060 for (int i = 0; i < system->maxMasters(); i++) { 2061 mshr_miss_latency[access_idx].subname(i, system->getMasterName(i)); 2062 } 2063 } 2064 2065 demandMshrMissLatency 2066 .name(name() + ".demand_mshr_miss_latency") 2067 .desc("number of demand (read+write) MSHR miss cycles") 2068 .flags(total | nozero | nonan) 2069 ; 2070 demandMshrMissLatency = SUM_DEMAND(mshr_miss_latency); 2071 for (int i = 0; i < system->maxMasters(); i++) { 2072 demandMshrMissLatency.subname(i, system->getMasterName(i)); 2073 } 2074 2075 overallMshrMissLatency 2076 .name(name() + ".overall_mshr_miss_latency") 2077 .desc("number of overall MSHR miss cycles") 2078 .flags(total | nozero | nonan) 2079 ; 2080 overallMshrMissLatency = 2081 demandMshrMissLatency + SUM_NON_DEMAND(mshr_miss_latency); 2082 for (int i = 0; i < system->maxMasters(); i++) { 2083 overallMshrMissLatency.subname(i, system->getMasterName(i)); 2084 } 2085 2086 // MSHR uncacheable statistics 2087 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2088 MemCmd cmd(access_idx); 2089 const string &cstr = cmd.toString(); 2090 2091 mshr_uncacheable[access_idx] 2092 .init(system->maxMasters()) 2093 .name(name() + "." + cstr + "_mshr_uncacheable") 2094 .desc("number of " + cstr + " MSHR uncacheable") 2095 .flags(total | nozero | nonan) 2096 ; 2097 for (int i = 0; i < system->maxMasters(); i++) { 2098 mshr_uncacheable[access_idx].subname(i, system->getMasterName(i)); 2099 } 2100 } 2101 2102 overallMshrUncacheable 2103 .name(name() + ".overall_mshr_uncacheable_misses") 2104 .desc("number of overall MSHR uncacheable misses") 2105 .flags(total | nozero | nonan) 2106 ; 2107 overallMshrUncacheable = 2108 SUM_DEMAND(mshr_uncacheable) + SUM_NON_DEMAND(mshr_uncacheable); 2109 for (int i = 0; i < system->maxMasters(); i++) { 2110 overallMshrUncacheable.subname(i, system->getMasterName(i)); 2111 } 2112 2113 // MSHR miss latency statistics 2114 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2115 MemCmd cmd(access_idx); 2116 const string &cstr = cmd.toString(); 2117 2118 mshr_uncacheable_lat[access_idx] 2119 .init(system->maxMasters()) 2120 .name(name() + "." + cstr + "_mshr_uncacheable_latency") 2121 .desc("number of " + cstr + " MSHR uncacheable cycles") 2122 .flags(total | nozero | nonan) 2123 ; 2124 for (int i = 0; i < system->maxMasters(); i++) { 2125 mshr_uncacheable_lat[access_idx].subname( 2126 i, system->getMasterName(i)); 2127 } 2128 } 2129 2130 overallMshrUncacheableLatency 2131 .name(name() + ".overall_mshr_uncacheable_latency") 2132 .desc("number of overall MSHR uncacheable cycles") 2133 .flags(total | nozero | nonan) 2134 ; 2135 overallMshrUncacheableLatency = 2136 SUM_DEMAND(mshr_uncacheable_lat) + 2137 SUM_NON_DEMAND(mshr_uncacheable_lat); 2138 for (int i = 0; i < system->maxMasters(); i++) { 2139 overallMshrUncacheableLatency.subname(i, system->getMasterName(i)); 2140 } 2141 2142#if 0 2143 // MSHR access formulas 2144 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2145 MemCmd cmd(access_idx); 2146 const string &cstr = cmd.toString(); 2147 2148 mshrAccesses[access_idx] 2149 .name(name() + "." + cstr + "_mshr_accesses") 2150 .desc("number of " + cstr + " mshr accesses(hits+misses)") 2151 .flags(total | nozero | nonan) 2152 ; 2153 mshrAccesses[access_idx] = 2154 mshr_hits[access_idx] + mshr_misses[access_idx] 2155 + mshr_uncacheable[access_idx]; 2156 } 2157 2158 demandMshrAccesses 2159 .name(name() + ".demand_mshr_accesses") 2160 .desc("number of demand (read+write) mshr accesses") 2161 .flags(total | nozero | nonan) 2162 ; 2163 demandMshrAccesses = demandMshrHits + demandMshrMisses; 2164 2165 overallMshrAccesses 2166 .name(name() + ".overall_mshr_accesses") 2167 .desc("number of overall (read+write) mshr accesses") 2168 .flags(total | nozero | nonan) 2169 ; 2170 overallMshrAccesses = overallMshrHits + overallMshrMisses 2171 + overallMshrUncacheable; 2172#endif 2173 2174 // MSHR miss rate formulas 2175 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2176 MemCmd cmd(access_idx); 2177 const string &cstr = cmd.toString(); 2178 2179 mshrMissRate[access_idx] 2180 .name(name() + "." + cstr + "_mshr_miss_rate") 2181 .desc("mshr miss rate for " + cstr + " accesses") 2182 .flags(total | nozero | nonan) 2183 ; 2184 mshrMissRate[access_idx] = 2185 mshr_misses[access_idx] / accesses[access_idx]; 2186 2187 for (int i = 0; i < system->maxMasters(); i++) { 2188 mshrMissRate[access_idx].subname(i, system->getMasterName(i)); 2189 } 2190 } 2191 2192 demandMshrMissRate 2193 .name(name() + ".demand_mshr_miss_rate") 2194 .desc("mshr miss rate for demand accesses") 2195 .flags(total | nozero | nonan) 2196 ; 2197 demandMshrMissRate = demandMshrMisses / demandAccesses; 2198 for (int i = 0; i < system->maxMasters(); i++) { 2199 demandMshrMissRate.subname(i, system->getMasterName(i)); 2200 } 2201 2202 overallMshrMissRate 2203 .name(name() + ".overall_mshr_miss_rate") 2204 .desc("mshr miss rate for overall accesses") 2205 .flags(total | nozero | nonan) 2206 ; 2207 overallMshrMissRate = overallMshrMisses / overallAccesses; 2208 for (int i = 0; i < system->maxMasters(); i++) { 2209 overallMshrMissRate.subname(i, system->getMasterName(i)); 2210 } 2211 2212 // mshrMiss latency formulas 2213 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2214 MemCmd cmd(access_idx); 2215 const string &cstr = cmd.toString(); 2216 2217 avgMshrMissLatency[access_idx] 2218 .name(name() + "." + cstr + "_avg_mshr_miss_latency") 2219 .desc("average " + cstr + " mshr miss latency") 2220 .flags(total | nozero | nonan) 2221 ; 2222 avgMshrMissLatency[access_idx] = 2223 mshr_miss_latency[access_idx] / mshr_misses[access_idx]; 2224 2225 for (int i = 0; i < system->maxMasters(); i++) { 2226 avgMshrMissLatency[access_idx].subname( 2227 i, system->getMasterName(i)); 2228 } 2229 } 2230 2231 demandAvgMshrMissLatency 2232 .name(name() + ".demand_avg_mshr_miss_latency") 2233 .desc("average overall mshr miss latency") 2234 .flags(total | nozero | nonan) 2235 ; 2236 demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses; 2237 for (int i = 0; i < system->maxMasters(); i++) { 2238 demandAvgMshrMissLatency.subname(i, system->getMasterName(i)); 2239 } 2240 2241 overallAvgMshrMissLatency 2242 .name(name() + ".overall_avg_mshr_miss_latency") 2243 .desc("average overall mshr miss latency") 2244 .flags(total | nozero | nonan) 2245 ; 2246 overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses; 2247 for (int i = 0; i < system->maxMasters(); i++) { 2248 overallAvgMshrMissLatency.subname(i, system->getMasterName(i)); 2249 } 2250 2251 // mshrUncacheable latency formulas 2252 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2253 MemCmd cmd(access_idx); 2254 const string &cstr = cmd.toString(); 2255 2256 avgMshrUncacheableLatency[access_idx] 2257 .name(name() + "." + cstr + "_avg_mshr_uncacheable_latency") 2258 .desc("average " + cstr + " mshr uncacheable latency") 2259 .flags(total | nozero | nonan) 2260 ; 2261 avgMshrUncacheableLatency[access_idx] = 2262 mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx]; 2263 2264 for (int i = 0; i < system->maxMasters(); i++) { 2265 avgMshrUncacheableLatency[access_idx].subname( 2266 i, system->getMasterName(i)); 2267 } 2268 } 2269 2270 overallAvgMshrUncacheableLatency 2271 .name(name() + ".overall_avg_mshr_uncacheable_latency") 2272 .desc("average overall mshr uncacheable latency") 2273 .flags(total | nozero | nonan) 2274 ; 2275 overallAvgMshrUncacheableLatency = 2276 overallMshrUncacheableLatency / overallMshrUncacheable; 2277 for (int i = 0; i < system->maxMasters(); i++) { 2278 overallAvgMshrUncacheableLatency.subname(i, system->getMasterName(i)); 2279 } 2280 2281 replacements 2282 .name(name() + ".replacements") 2283 .desc("number of replacements") 2284 ; 2285} 2286 2287void 2288BaseCache::regProbePoints() 2289{ 2290 ppHit = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Hit"); 2291 ppMiss = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Miss"); 2292 ppFill = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Fill"); 2293} 2294 2295/////////////// 2296// 2297// CpuSidePort 2298// 2299/////////////// 2300bool 2301BaseCache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt) 2302{ 2303 // Snoops shouldn't happen when bypassing caches 2304 assert(!cache->system->bypassCaches()); 2305 2306 assert(pkt->isResponse()); 2307 2308 // Express snoop responses from master to slave, e.g., from L1 to L2 2309 cache->recvTimingSnoopResp(pkt); 2310 return true; 2311} 2312 2313 2314bool 2315BaseCache::CpuSidePort::tryTiming(PacketPtr pkt) 2316{ 2317 if (cache->system->bypassCaches() || pkt->isExpressSnoop()) { 2318 // always let express snoop packets through even if blocked 2319 return true; 2320 } else if (blocked || mustSendRetry) { 2321 // either already committed to send a retry, or blocked 2322 mustSendRetry = true; 2323 return false; 2324 } 2325 mustSendRetry = false; 2326 return true; 2327} 2328 2329bool 2330BaseCache::CpuSidePort::recvTimingReq(PacketPtr pkt) 2331{ 2332 assert(pkt->isRequest()); 2333 2334 if (cache->system->bypassCaches()) { 2335 // Just forward the packet if caches are disabled. 2336 // @todo This should really enqueue the packet rather 2337 bool M5_VAR_USED success = cache->memSidePort.sendTimingReq(pkt); 2338 assert(success); 2339 return true; 2340 } else if (tryTiming(pkt)) { 2341 cache->recvTimingReq(pkt); 2342 return true; 2343 } 2344 return false; 2345} 2346 2347Tick 2348BaseCache::CpuSidePort::recvAtomic(PacketPtr pkt) 2349{ 2350 if (cache->system->bypassCaches()) { 2351 // Forward the request if the system is in cache bypass mode. 2352 return cache->memSidePort.sendAtomic(pkt); 2353 } else { 2354 return cache->recvAtomic(pkt); 2355 } 2356} 2357 2358void 2359BaseCache::CpuSidePort::recvFunctional(PacketPtr pkt) 2360{ 2361 if (cache->system->bypassCaches()) { 2362 // The cache should be flushed if we are in cache bypass mode, 2363 // so we don't need to check if we need to update anything. 2364 cache->memSidePort.sendFunctional(pkt); 2365 return; 2366 } 2367 2368 // functional request 2369 cache->functionalAccess(pkt, true); 2370} 2371 2372AddrRangeList 2373BaseCache::CpuSidePort::getAddrRanges() const 2374{ 2375 return cache->getAddrRanges(); 2376} 2377 2378 2379BaseCache:: 2380CpuSidePort::CpuSidePort(const std::string &_name, BaseCache *_cache, 2381 const std::string &_label) 2382 : CacheSlavePort(_name, _cache, _label), cache(_cache) 2383{ 2384} 2385 2386/////////////// 2387// 2388// MemSidePort 2389// 2390/////////////// 2391bool 2392BaseCache::MemSidePort::recvTimingResp(PacketPtr pkt) 2393{ 2394 cache->recvTimingResp(pkt); 2395 return true; 2396} 2397 2398// Express snooping requests to memside port 2399void 2400BaseCache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt) 2401{ 2402 // Snoops shouldn't happen when bypassing caches 2403 assert(!cache->system->bypassCaches()); 2404 2405 // handle snooping requests 2406 cache->recvTimingSnoopReq(pkt); 2407} 2408 2409Tick 2410BaseCache::MemSidePort::recvAtomicSnoop(PacketPtr pkt) 2411{ 2412 // Snoops shouldn't happen when bypassing caches 2413 assert(!cache->system->bypassCaches()); 2414 2415 return cache->recvAtomicSnoop(pkt); 2416} 2417 2418void 2419BaseCache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt) 2420{ 2421 // Snoops shouldn't happen when bypassing caches 2422 assert(!cache->system->bypassCaches()); 2423 2424 // functional snoop (note that in contrast to atomic we don't have 2425 // a specific functionalSnoop method, as they have the same 2426 // behaviour regardless) 2427 cache->functionalAccess(pkt, false); 2428} 2429 2430void 2431BaseCache::CacheReqPacketQueue::sendDeferredPacket() 2432{ 2433 // sanity check 2434 assert(!waitingOnRetry); 2435 2436 // there should never be any deferred request packets in the 2437 // queue, instead we resly on the cache to provide the packets 2438 // from the MSHR queue or write queue 2439 assert(deferredPacketReadyTime() == MaxTick); 2440 2441 // check for request packets (requests & writebacks) 2442 QueueEntry* entry = cache.getNextQueueEntry(); 2443 2444 if (!entry) { 2445 // can happen if e.g. we attempt a writeback and fail, but 2446 // before the retry, the writeback is eliminated because 2447 // we snoop another cache's ReadEx. 2448 } else { 2449 // let our snoop responses go first if there are responses to 2450 // the same addresses 2451 if (checkConflictingSnoop(entry->getTarget()->pkt)) { 2452 return; 2453 } 2454 waitingOnRetry = entry->sendPacket(cache); 2455 } 2456 2457 // if we succeeded and are not waiting for a retry, schedule the 2458 // next send considering when the next queue is ready, note that 2459 // snoop responses have their own packet queue and thus schedule 2460 // their own events 2461 if (!waitingOnRetry) { 2462 schedSendEvent(cache.nextQueueReadyTime()); 2463 } 2464} 2465 2466BaseCache::MemSidePort::MemSidePort(const std::string &_name, 2467 BaseCache *_cache, 2468 const std::string &_label) 2469 : CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue), 2470 _reqQueue(*_cache, *this, _snoopRespQueue, _label), 2471 _snoopRespQueue(*_cache, *this, true, _label), cache(_cache) 2472{ 2473} 2474 2475void 2476WriteAllocator::updateMode(Addr write_addr, unsigned write_size, 2477 Addr blk_addr) 2478{ 2479 // check if we are continuing where the last write ended 2480 if (nextAddr == write_addr) { 2481 delayCtr[blk_addr] = delayThreshold; 2482 // stop if we have already saturated 2483 if (mode != WriteMode::NO_ALLOCATE) { 2484 byteCount += write_size; 2485 // switch to streaming mode if we have passed the lower 2486 // threshold 2487 if (mode == WriteMode::ALLOCATE && 2488 byteCount > coalesceLimit) { 2489 mode = WriteMode::COALESCE; 2490 DPRINTF(Cache, "Switched to write coalescing\n"); 2491 } else if (mode == WriteMode::COALESCE && 2492 byteCount > noAllocateLimit) { 2493 // and continue and switch to non-allocating mode if we 2494 // pass the upper threshold 2495 mode = WriteMode::NO_ALLOCATE; 2496 DPRINTF(Cache, "Switched to write-no-allocate\n"); 2497 } 2498 } 2499 } else { 2500 // we did not see a write matching the previous one, start 2501 // over again 2502 byteCount = write_size; 2503 mode = WriteMode::ALLOCATE; 2504 resetDelay(blk_addr); 2505 } 2506 nextAddr = write_addr + write_size; 2507} 2508 2509WriteAllocator* 2510WriteAllocatorParams::create() 2511{ 2512 return new WriteAllocator(this); 2513} 2514