base.cc revision 14193:7dd8a6df30e2
1/* 2 * Copyright (c) 2012-2013, 2018-2019 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2003-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Erik Hallnor 41 * Nikos Nikoleris 42 */ 43 44/** 45 * @file 46 * Definition of BaseCache functions. 47 */ 48 49#include "mem/cache/base.hh" 50 51#include "base/compiler.hh" 52#include "base/logging.hh" 53#include "debug/Cache.hh" 54#include "debug/CacheComp.hh" 55#include "debug/CachePort.hh" 56#include "debug/CacheRepl.hh" 57#include "debug/CacheVerbose.hh" 58#include "mem/cache/compressors/base.hh" 59#include "mem/cache/mshr.hh" 60#include "mem/cache/prefetch/base.hh" 61#include "mem/cache/queue_entry.hh" 62#include "mem/cache/tags/super_blk.hh" 63#include "params/BaseCache.hh" 64#include "params/WriteAllocator.hh" 65#include "sim/core.hh" 66 67using namespace std; 68 69BaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name, 70 BaseCache *_cache, 71 const std::string &_label) 72 : QueuedSlavePort(_name, _cache, queue), 73 queue(*_cache, *this, true, _label), 74 blocked(false), mustSendRetry(false), 75 sendRetryEvent([this]{ processSendRetry(); }, _name) 76{ 77} 78 79BaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size) 80 : ClockedObject(p), 81 cpuSidePort (p->name + ".cpu_side", this, "CpuSidePort"), 82 memSidePort(p->name + ".mem_side", this, "MemSidePort"), 83 mshrQueue("MSHRs", p->mshrs, 0, p->demand_mshr_reserve), // see below 84 writeBuffer("write buffer", p->write_buffers, p->mshrs), // see below 85 tags(p->tags), 86 compressor(p->compressor), 87 prefetcher(p->prefetcher), 88 writeAllocator(p->write_allocator), 89 writebackClean(p->writeback_clean), 90 tempBlockWriteback(nullptr), 91 writebackTempBlockAtomicEvent([this]{ writebackTempBlockAtomic(); }, 92 name(), false, 93 EventBase::Delayed_Writeback_Pri), 94 blkSize(blk_size), 95 lookupLatency(p->tag_latency), 96 dataLatency(p->data_latency), 97 forwardLatency(p->tag_latency), 98 fillLatency(p->data_latency), 99 responseLatency(p->response_latency), 100 sequentialAccess(p->sequential_access), 101 numTarget(p->tgts_per_mshr), 102 forwardSnoops(true), 103 clusivity(p->clusivity), 104 isReadOnly(p->is_read_only), 105 blocked(0), 106 order(0), 107 noTargetMSHR(nullptr), 108 missCount(p->max_miss_count), 109 addrRanges(p->addr_ranges.begin(), p->addr_ranges.end()), 110 system(p->system) 111{ 112 // the MSHR queue has no reserve entries as we check the MSHR 113 // queue on every single allocation, whereas the write queue has 114 // as many reserve entries as we have MSHRs, since every MSHR may 115 // eventually require a writeback, and we do not check the write 116 // buffer before committing to an MSHR 117 118 // forward snoops is overridden in init() once we can query 119 // whether the connected master is actually snooping or not 120 121 tempBlock = new TempCacheBlk(blkSize); 122 123 tags->tagsInit(); 124 if (prefetcher) 125 prefetcher->setCache(this); 126} 127 128BaseCache::~BaseCache() 129{ 130 delete tempBlock; 131} 132 133void 134BaseCache::CacheSlavePort::setBlocked() 135{ 136 assert(!blocked); 137 DPRINTF(CachePort, "Port is blocking new requests\n"); 138 blocked = true; 139 // if we already scheduled a retry in this cycle, but it has not yet 140 // happened, cancel it 141 if (sendRetryEvent.scheduled()) { 142 owner.deschedule(sendRetryEvent); 143 DPRINTF(CachePort, "Port descheduled retry\n"); 144 mustSendRetry = true; 145 } 146} 147 148void 149BaseCache::CacheSlavePort::clearBlocked() 150{ 151 assert(blocked); 152 DPRINTF(CachePort, "Port is accepting new requests\n"); 153 blocked = false; 154 if (mustSendRetry) { 155 // @TODO: need to find a better time (next cycle?) 156 owner.schedule(sendRetryEvent, curTick() + 1); 157 } 158} 159 160void 161BaseCache::CacheSlavePort::processSendRetry() 162{ 163 DPRINTF(CachePort, "Port is sending retry\n"); 164 165 // reset the flag and call retry 166 mustSendRetry = false; 167 sendRetryReq(); 168} 169 170Addr 171BaseCache::regenerateBlkAddr(CacheBlk* blk) 172{ 173 if (blk != tempBlock) { 174 return tags->regenerateBlkAddr(blk); 175 } else { 176 return tempBlock->getAddr(); 177 } 178} 179 180void 181BaseCache::init() 182{ 183 if (!cpuSidePort.isConnected() || !memSidePort.isConnected()) 184 fatal("Cache ports on %s are not connected\n", name()); 185 cpuSidePort.sendRangeChange(); 186 forwardSnoops = cpuSidePort.isSnooping(); 187} 188 189Port & 190BaseCache::getPort(const std::string &if_name, PortID idx) 191{ 192 if (if_name == "mem_side") { 193 return memSidePort; 194 } else if (if_name == "cpu_side") { 195 return cpuSidePort; 196 } else { 197 return ClockedObject::getPort(if_name, idx); 198 } 199} 200 201bool 202BaseCache::inRange(Addr addr) const 203{ 204 for (const auto& r : addrRanges) { 205 if (r.contains(addr)) { 206 return true; 207 } 208 } 209 return false; 210} 211 212void 213BaseCache::handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time) 214{ 215 if (pkt->needsResponse()) { 216 // These delays should have been consumed by now 217 assert(pkt->headerDelay == 0); 218 assert(pkt->payloadDelay == 0); 219 220 pkt->makeTimingResponse(); 221 222 // In this case we are considering request_time that takes 223 // into account the delay of the xbar, if any, and just 224 // lat, neglecting responseLatency, modelling hit latency 225 // just as the value of lat overriden by access(), which calls 226 // the calculateAccessLatency() function. 227 cpuSidePort.schedTimingResp(pkt, request_time); 228 } else { 229 DPRINTF(Cache, "%s satisfied %s, no response needed\n", __func__, 230 pkt->print()); 231 232 // queue the packet for deletion, as the sending cache is 233 // still relying on it; if the block is found in access(), 234 // CleanEvict and Writeback messages will be deleted 235 // here as well 236 pendingDelete.reset(pkt); 237 } 238} 239 240void 241BaseCache::handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk, 242 Tick forward_time, Tick request_time) 243{ 244 if (writeAllocator && 245 pkt && pkt->isWrite() && !pkt->req->isUncacheable()) { 246 writeAllocator->updateMode(pkt->getAddr(), pkt->getSize(), 247 pkt->getBlockAddr(blkSize)); 248 } 249 250 if (mshr) { 251 /// MSHR hit 252 /// @note writebacks will be checked in getNextMSHR() 253 /// for any conflicting requests to the same block 254 255 //@todo remove hw_pf here 256 257 // Coalesce unless it was a software prefetch (see above). 258 if (pkt) { 259 assert(!pkt->isWriteback()); 260 // CleanEvicts corresponding to blocks which have 261 // outstanding requests in MSHRs are simply sunk here 262 if (pkt->cmd == MemCmd::CleanEvict) { 263 pendingDelete.reset(pkt); 264 } else if (pkt->cmd == MemCmd::WriteClean) { 265 // A WriteClean should never coalesce with any 266 // outstanding cache maintenance requests. 267 268 // We use forward_time here because there is an 269 // uncached memory write, forwarded to WriteBuffer. 270 allocateWriteBuffer(pkt, forward_time); 271 } else { 272 DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__, 273 pkt->print()); 274 275 assert(pkt->req->masterId() < system->maxMasters()); 276 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++; 277 278 // We use forward_time here because it is the same 279 // considering new targets. We have multiple 280 // requests for the same address here. It 281 // specifies the latency to allocate an internal 282 // buffer and to schedule an event to the queued 283 // port and also takes into account the additional 284 // delay of the xbar. 285 mshr->allocateTarget(pkt, forward_time, order++, 286 allocOnFill(pkt->cmd)); 287 if (mshr->getNumTargets() == numTarget) { 288 noTargetMSHR = mshr; 289 setBlocked(Blocked_NoTargets); 290 // need to be careful with this... if this mshr isn't 291 // ready yet (i.e. time > curTick()), we don't want to 292 // move it ahead of mshrs that are ready 293 // mshrQueue.moveToFront(mshr); 294 } 295 } 296 } 297 } else { 298 // no MSHR 299 assert(pkt->req->masterId() < system->maxMasters()); 300 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 301 302 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean) { 303 // We use forward_time here because there is an 304 // writeback or writeclean, forwarded to WriteBuffer. 305 allocateWriteBuffer(pkt, forward_time); 306 } else { 307 if (blk && blk->isValid()) { 308 // If we have a write miss to a valid block, we 309 // need to mark the block non-readable. Otherwise 310 // if we allow reads while there's an outstanding 311 // write miss, the read could return stale data 312 // out of the cache block... a more aggressive 313 // system could detect the overlap (if any) and 314 // forward data out of the MSHRs, but we don't do 315 // that yet. Note that we do need to leave the 316 // block valid so that it stays in the cache, in 317 // case we get an upgrade response (and hence no 318 // new data) when the write miss completes. 319 // As long as CPUs do proper store/load forwarding 320 // internally, and have a sufficiently weak memory 321 // model, this is probably unnecessary, but at some 322 // point it must have seemed like we needed it... 323 assert((pkt->needsWritable() && !blk->isWritable()) || 324 pkt->req->isCacheMaintenance()); 325 blk->status &= ~BlkReadable; 326 } 327 // Here we are using forward_time, modelling the latency of 328 // a miss (outbound) just as forwardLatency, neglecting the 329 // lookupLatency component. 330 allocateMissBuffer(pkt, forward_time); 331 } 332 } 333} 334 335void 336BaseCache::recvTimingReq(PacketPtr pkt) 337{ 338 // anything that is merely forwarded pays for the forward latency and 339 // the delay provided by the crossbar 340 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 341 342 Cycles lat; 343 CacheBlk *blk = nullptr; 344 bool satisfied = false; 345 { 346 PacketList writebacks; 347 // Note that lat is passed by reference here. The function 348 // access() will set the lat value. 349 satisfied = access(pkt, blk, lat, writebacks); 350 351 // After the evicted blocks are selected, they must be forwarded 352 // to the write buffer to ensure they logically precede anything 353 // happening below 354 doWritebacks(writebacks, clockEdge(lat + forwardLatency)); 355 } 356 357 // Here we charge the headerDelay that takes into account the latencies 358 // of the bus, if the packet comes from it. 359 // The latency charged is just the value set by the access() function. 360 // In case of a hit we are neglecting response latency. 361 // In case of a miss we are neglecting forward latency. 362 Tick request_time = clockEdge(lat); 363 // Here we reset the timing of the packet. 364 pkt->headerDelay = pkt->payloadDelay = 0; 365 366 if (satisfied) { 367 // notify before anything else as later handleTimingReqHit might turn 368 // the packet in a response 369 ppHit->notify(pkt); 370 371 if (prefetcher && blk && blk->wasPrefetched()) { 372 blk->status &= ~BlkHWPrefetched; 373 } 374 375 handleTimingReqHit(pkt, blk, request_time); 376 } else { 377 handleTimingReqMiss(pkt, blk, forward_time, request_time); 378 379 ppMiss->notify(pkt); 380 } 381 382 if (prefetcher) { 383 // track time of availability of next prefetch, if any 384 Tick next_pf_time = prefetcher->nextPrefetchReadyTime(); 385 if (next_pf_time != MaxTick) { 386 schedMemSideSendEvent(next_pf_time); 387 } 388 } 389} 390 391void 392BaseCache::handleUncacheableWriteResp(PacketPtr pkt) 393{ 394 Tick completion_time = clockEdge(responseLatency) + 395 pkt->headerDelay + pkt->payloadDelay; 396 397 // Reset the bus additional time as it is now accounted for 398 pkt->headerDelay = pkt->payloadDelay = 0; 399 400 cpuSidePort.schedTimingResp(pkt, completion_time); 401} 402 403void 404BaseCache::recvTimingResp(PacketPtr pkt) 405{ 406 assert(pkt->isResponse()); 407 408 // all header delay should be paid for by the crossbar, unless 409 // this is a prefetch response from above 410 panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp, 411 "%s saw a non-zero packet delay\n", name()); 412 413 const bool is_error = pkt->isError(); 414 415 if (is_error) { 416 DPRINTF(Cache, "%s: Cache received %s with error\n", __func__, 417 pkt->print()); 418 } 419 420 DPRINTF(Cache, "%s: Handling response %s\n", __func__, 421 pkt->print()); 422 423 // if this is a write, we should be looking at an uncacheable 424 // write 425 if (pkt->isWrite()) { 426 assert(pkt->req->isUncacheable()); 427 handleUncacheableWriteResp(pkt); 428 return; 429 } 430 431 // we have dealt with any (uncacheable) writes above, from here on 432 // we know we are dealing with an MSHR due to a miss or a prefetch 433 MSHR *mshr = dynamic_cast<MSHR*>(pkt->popSenderState()); 434 assert(mshr); 435 436 if (mshr == noTargetMSHR) { 437 // we always clear at least one target 438 clearBlocked(Blocked_NoTargets); 439 noTargetMSHR = nullptr; 440 } 441 442 // Initial target is used just for stats 443 QueueEntry::Target *initial_tgt = mshr->getTarget(); 444 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex(); 445 Tick miss_latency = curTick() - initial_tgt->recvTime; 446 447 if (pkt->req->isUncacheable()) { 448 assert(pkt->req->masterId() < system->maxMasters()); 449 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] += 450 miss_latency; 451 } else { 452 assert(pkt->req->masterId() < system->maxMasters()); 453 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] += 454 miss_latency; 455 } 456 457 PacketList writebacks; 458 459 bool is_fill = !mshr->isForward && 460 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp || 461 mshr->wasWholeLineWrite); 462 463 // make sure that if the mshr was due to a whole line write then 464 // the response is an invalidation 465 assert(!mshr->wasWholeLineWrite || pkt->isInvalidate()); 466 467 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 468 469 if (is_fill && !is_error) { 470 DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n", 471 pkt->getAddr()); 472 473 const bool allocate = (writeAllocator && mshr->wasWholeLineWrite) ? 474 writeAllocator->allocate() : mshr->allocOnFill(); 475 blk = handleFill(pkt, blk, writebacks, allocate); 476 assert(blk != nullptr); 477 ppFill->notify(pkt); 478 } 479 480 if (blk && blk->isValid() && pkt->isClean() && !pkt->isInvalidate()) { 481 // The block was marked not readable while there was a pending 482 // cache maintenance operation, restore its flag. 483 blk->status |= BlkReadable; 484 485 // This was a cache clean operation (without invalidate) 486 // and we have a copy of the block already. Since there 487 // is no invalidation, we can promote targets that don't 488 // require a writable copy 489 mshr->promoteReadable(); 490 } 491 492 if (blk && blk->isWritable() && !pkt->req->isCacheInvalidate()) { 493 // If at this point the referenced block is writable and the 494 // response is not a cache invalidate, we promote targets that 495 // were deferred as we couldn't guarrantee a writable copy 496 mshr->promoteWritable(); 497 } 498 499 serviceMSHRTargets(mshr, pkt, blk); 500 501 if (mshr->promoteDeferredTargets()) { 502 // avoid later read getting stale data while write miss is 503 // outstanding.. see comment in timingAccess() 504 if (blk) { 505 blk->status &= ~BlkReadable; 506 } 507 mshrQueue.markPending(mshr); 508 schedMemSideSendEvent(clockEdge() + pkt->payloadDelay); 509 } else { 510 // while we deallocate an mshr from the queue we still have to 511 // check the isFull condition before and after as we might 512 // have been using the reserved entries already 513 const bool was_full = mshrQueue.isFull(); 514 mshrQueue.deallocate(mshr); 515 if (was_full && !mshrQueue.isFull()) { 516 clearBlocked(Blocked_NoMSHRs); 517 } 518 519 // Request the bus for a prefetch if this deallocation freed enough 520 // MSHRs for a prefetch to take place 521 if (prefetcher && mshrQueue.canPrefetch()) { 522 Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(), 523 clockEdge()); 524 if (next_pf_time != MaxTick) 525 schedMemSideSendEvent(next_pf_time); 526 } 527 } 528 529 // if we used temp block, check to see if its valid and then clear it out 530 if (blk == tempBlock && tempBlock->isValid()) { 531 evictBlock(blk, writebacks); 532 } 533 534 const Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 535 // copy writebacks to write buffer 536 doWritebacks(writebacks, forward_time); 537 538 DPRINTF(CacheVerbose, "%s: Leaving with %s\n", __func__, pkt->print()); 539 delete pkt; 540} 541 542 543Tick 544BaseCache::recvAtomic(PacketPtr pkt) 545{ 546 // should assert here that there are no outstanding MSHRs or 547 // writebacks... that would mean that someone used an atomic 548 // access in timing mode 549 550 // We use lookupLatency here because it is used to specify the latency 551 // to access. 552 Cycles lat = lookupLatency; 553 554 CacheBlk *blk = nullptr; 555 PacketList writebacks; 556 bool satisfied = access(pkt, blk, lat, writebacks); 557 558 if (pkt->isClean() && blk && blk->isDirty()) { 559 // A cache clean opearation is looking for a dirty 560 // block. If a dirty block is encountered a WriteClean 561 // will update any copies to the path to the memory 562 // until the point of reference. 563 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n", 564 __func__, pkt->print(), blk->print()); 565 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id); 566 writebacks.push_back(wb_pkt); 567 pkt->setSatisfied(); 568 } 569 570 // handle writebacks resulting from the access here to ensure they 571 // logically precede anything happening below 572 doWritebacksAtomic(writebacks); 573 assert(writebacks.empty()); 574 575 if (!satisfied) { 576 lat += handleAtomicReqMiss(pkt, blk, writebacks); 577 } 578 579 // Note that we don't invoke the prefetcher at all in atomic mode. 580 // It's not clear how to do it properly, particularly for 581 // prefetchers that aggressively generate prefetch candidates and 582 // rely on bandwidth contention to throttle them; these will tend 583 // to pollute the cache in atomic mode since there is no bandwidth 584 // contention. If we ever do want to enable prefetching in atomic 585 // mode, though, this is the place to do it... see timingAccess() 586 // for an example (though we'd want to issue the prefetch(es) 587 // immediately rather than calling requestMemSideBus() as we do 588 // there). 589 590 // do any writebacks resulting from the response handling 591 doWritebacksAtomic(writebacks); 592 593 // if we used temp block, check to see if its valid and if so 594 // clear it out, but only do so after the call to recvAtomic is 595 // finished so that any downstream observers (such as a snoop 596 // filter), first see the fill, and only then see the eviction 597 if (blk == tempBlock && tempBlock->isValid()) { 598 // the atomic CPU calls recvAtomic for fetch and load/store 599 // sequentuially, and we may already have a tempBlock 600 // writeback from the fetch that we have not yet sent 601 if (tempBlockWriteback) { 602 // if that is the case, write the prevoius one back, and 603 // do not schedule any new event 604 writebackTempBlockAtomic(); 605 } else { 606 // the writeback/clean eviction happens after the call to 607 // recvAtomic has finished (but before any successive 608 // calls), so that the response handling from the fill is 609 // allowed to happen first 610 schedule(writebackTempBlockAtomicEvent, curTick()); 611 } 612 613 tempBlockWriteback = evictBlock(blk); 614 } 615 616 if (pkt->needsResponse()) { 617 pkt->makeAtomicResponse(); 618 } 619 620 return lat * clockPeriod(); 621} 622 623void 624BaseCache::functionalAccess(PacketPtr pkt, bool from_cpu_side) 625{ 626 Addr blk_addr = pkt->getBlockAddr(blkSize); 627 bool is_secure = pkt->isSecure(); 628 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 629 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 630 631 pkt->pushLabel(name()); 632 633 CacheBlkPrintWrapper cbpw(blk); 634 635 // Note that just because an L2/L3 has valid data doesn't mean an 636 // L1 doesn't have a more up-to-date modified copy that still 637 // needs to be found. As a result we always update the request if 638 // we have it, but only declare it satisfied if we are the owner. 639 640 // see if we have data at all (owned or otherwise) 641 bool have_data = blk && blk->isValid() 642 && pkt->trySatisfyFunctional(&cbpw, blk_addr, is_secure, blkSize, 643 blk->data); 644 645 // data we have is dirty if marked as such or if we have an 646 // in-service MSHR that is pending a modified line 647 bool have_dirty = 648 have_data && (blk->isDirty() || 649 (mshr && mshr->inService && mshr->isPendingModified())); 650 651 bool done = have_dirty || 652 cpuSidePort.trySatisfyFunctional(pkt) || 653 mshrQueue.trySatisfyFunctional(pkt) || 654 writeBuffer.trySatisfyFunctional(pkt) || 655 memSidePort.trySatisfyFunctional(pkt); 656 657 DPRINTF(CacheVerbose, "%s: %s %s%s%s\n", __func__, pkt->print(), 658 (blk && blk->isValid()) ? "valid " : "", 659 have_data ? "data " : "", done ? "done " : ""); 660 661 // We're leaving the cache, so pop cache->name() label 662 pkt->popLabel(); 663 664 if (done) { 665 pkt->makeResponse(); 666 } else { 667 // if it came as a request from the CPU side then make sure it 668 // continues towards the memory side 669 if (from_cpu_side) { 670 memSidePort.sendFunctional(pkt); 671 } else if (cpuSidePort.isSnooping()) { 672 // if it came from the memory side, it must be a snoop request 673 // and we should only forward it if we are forwarding snoops 674 cpuSidePort.sendFunctionalSnoop(pkt); 675 } 676 } 677} 678 679 680void 681BaseCache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt) 682{ 683 assert(pkt->isRequest()); 684 685 uint64_t overwrite_val; 686 bool overwrite_mem; 687 uint64_t condition_val64; 688 uint32_t condition_val32; 689 690 int offset = pkt->getOffset(blkSize); 691 uint8_t *blk_data = blk->data + offset; 692 693 assert(sizeof(uint64_t) >= pkt->getSize()); 694 695 overwrite_mem = true; 696 // keep a copy of our possible write value, and copy what is at the 697 // memory address into the packet 698 pkt->writeData((uint8_t *)&overwrite_val); 699 pkt->setData(blk_data); 700 701 if (pkt->req->isCondSwap()) { 702 if (pkt->getSize() == sizeof(uint64_t)) { 703 condition_val64 = pkt->req->getExtraData(); 704 overwrite_mem = !std::memcmp(&condition_val64, blk_data, 705 sizeof(uint64_t)); 706 } else if (pkt->getSize() == sizeof(uint32_t)) { 707 condition_val32 = (uint32_t)pkt->req->getExtraData(); 708 overwrite_mem = !std::memcmp(&condition_val32, blk_data, 709 sizeof(uint32_t)); 710 } else 711 panic("Invalid size for conditional read/write\n"); 712 } 713 714 if (overwrite_mem) { 715 std::memcpy(blk_data, &overwrite_val, pkt->getSize()); 716 blk->status |= BlkDirty; 717 } 718} 719 720QueueEntry* 721BaseCache::getNextQueueEntry() 722{ 723 // Check both MSHR queue and write buffer for potential requests, 724 // note that null does not mean there is no request, it could 725 // simply be that it is not ready 726 MSHR *miss_mshr = mshrQueue.getNext(); 727 WriteQueueEntry *wq_entry = writeBuffer.getNext(); 728 729 // If we got a write buffer request ready, first priority is a 730 // full write buffer, otherwise we favour the miss requests 731 if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) { 732 // need to search MSHR queue for conflicting earlier miss. 733 MSHR *conflict_mshr = mshrQueue.findPending(wq_entry); 734 735 if (conflict_mshr && conflict_mshr->order < wq_entry->order) { 736 // Service misses in order until conflict is cleared. 737 return conflict_mshr; 738 739 // @todo Note that we ignore the ready time of the conflict here 740 } 741 742 // No conflicts; issue write 743 return wq_entry; 744 } else if (miss_mshr) { 745 // need to check for conflicting earlier writeback 746 WriteQueueEntry *conflict_mshr = writeBuffer.findPending(miss_mshr); 747 if (conflict_mshr) { 748 // not sure why we don't check order here... it was in the 749 // original code but commented out. 750 751 // The only way this happens is if we are 752 // doing a write and we didn't have permissions 753 // then subsequently saw a writeback (owned got evicted) 754 // We need to make sure to perform the writeback first 755 // To preserve the dirty data, then we can issue the write 756 757 // should we return wq_entry here instead? I.e. do we 758 // have to flush writes in order? I don't think so... not 759 // for Alpha anyway. Maybe for x86? 760 return conflict_mshr; 761 762 // @todo Note that we ignore the ready time of the conflict here 763 } 764 765 // No conflicts; issue read 766 return miss_mshr; 767 } 768 769 // fall through... no pending requests. Try a prefetch. 770 assert(!miss_mshr && !wq_entry); 771 if (prefetcher && mshrQueue.canPrefetch()) { 772 // If we have a miss queue slot, we can try a prefetch 773 PacketPtr pkt = prefetcher->getPacket(); 774 if (pkt) { 775 Addr pf_addr = pkt->getBlockAddr(blkSize); 776 if (!tags->findBlock(pf_addr, pkt->isSecure()) && 777 !mshrQueue.findMatch(pf_addr, pkt->isSecure()) && 778 !writeBuffer.findMatch(pf_addr, pkt->isSecure())) { 779 // Update statistic on number of prefetches issued 780 // (hwpf_mshr_misses) 781 assert(pkt->req->masterId() < system->maxMasters()); 782 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 783 784 // allocate an MSHR and return it, note 785 // that we send the packet straight away, so do not 786 // schedule the send 787 return allocateMissBuffer(pkt, curTick(), false); 788 } else { 789 // free the request and packet 790 delete pkt; 791 } 792 } 793 } 794 795 return nullptr; 796} 797 798bool 799BaseCache::updateCompressionData(CacheBlk *blk, const uint64_t* data, 800 PacketList &writebacks) 801{ 802 // tempBlock does not exist in the tags, so don't do anything for it. 803 if (blk == tempBlock) { 804 return true; 805 } 806 807 // Get superblock of the given block 808 CompressionBlk* compression_blk = static_cast<CompressionBlk*>(blk); 809 const SuperBlk* superblock = static_cast<const SuperBlk*>( 810 compression_blk->getSectorBlock()); 811 812 // The compressor is called to compress the updated data, so that its 813 // metadata can be updated. 814 std::size_t compression_size = 0; 815 Cycles compression_lat = Cycles(0); 816 Cycles decompression_lat = Cycles(0); 817 compressor->compress(data, compression_lat, decompression_lat, 818 compression_size); 819 820 // If block's compression factor increased, it may not be co-allocatable 821 // anymore. If so, some blocks might need to be evicted to make room for 822 // the bigger block 823 824 // Get previous compressed size 825 const std::size_t M5_VAR_USED prev_size = compression_blk->getSizeBits(); 826 827 // Check if new data is co-allocatable 828 const bool is_co_allocatable = superblock->isCompressed(compression_blk) && 829 superblock->canCoAllocate(compression_size); 830 831 // If block was compressed, possibly co-allocated with other blocks, and 832 // cannot be co-allocated anymore, one or more blocks must be evicted to 833 // make room for the expanded block. As of now we decide to evict the co- 834 // allocated blocks to make room for the expansion, but other approaches 835 // that take the replacement data of the superblock into account may 836 // generate better results 837 std::vector<CacheBlk*> evict_blks; 838 const bool was_compressed = compression_blk->isCompressed(); 839 if (was_compressed && !is_co_allocatable) { 840 // Get all co-allocated blocks 841 for (const auto& sub_blk : superblock->blks) { 842 if (sub_blk->isValid() && (compression_blk != sub_blk)) { 843 // Check for transient state allocations. If any of the 844 // entries listed for eviction has a transient state, the 845 // allocation fails 846 const Addr repl_addr = regenerateBlkAddr(sub_blk); 847 const MSHR *repl_mshr = 848 mshrQueue.findMatch(repl_addr, sub_blk->isSecure()); 849 if (repl_mshr) { 850 DPRINTF(CacheRepl, "Aborting data expansion of %s due " \ 851 "to replacement of block in transient state: %s\n", 852 compression_blk->print(), sub_blk->print()); 853 // Too hard to replace block with transient state, so it 854 // cannot be evicted. Mark the update as failed and expect 855 // the caller to evict this block. Since this is called 856 // only when writebacks arrive, and packets do not contain 857 // compressed data, there is no need to decompress 858 compression_blk->setSizeBits(blkSize * 8); 859 compression_blk->setDecompressionLatency(Cycles(0)); 860 compression_blk->setUncompressed(); 861 return false; 862 } 863 864 evict_blks.push_back(sub_blk); 865 } 866 } 867 868 // Update the number of data expansions 869 dataExpansions++; 870 871 DPRINTF(CacheComp, "Data expansion: expanding [%s] from %d to %d bits" 872 "\n", blk->print(), prev_size, compression_size); 873 } 874 875 // We always store compressed blocks when possible 876 if (is_co_allocatable) { 877 compression_blk->setCompressed(); 878 } else { 879 compression_blk->setUncompressed(); 880 } 881 compression_blk->setSizeBits(compression_size); 882 compression_blk->setDecompressionLatency(decompression_lat); 883 884 // Evict valid blocks 885 for (const auto& evict_blk : evict_blks) { 886 if (evict_blk->isValid()) { 887 if (evict_blk->wasPrefetched()) { 888 unusedPrefetches++; 889 } 890 evictBlock(evict_blk, writebacks); 891 } 892 } 893 894 return true; 895} 896 897void 898BaseCache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool, bool) 899{ 900 assert(pkt->isRequest()); 901 902 assert(blk && blk->isValid()); 903 // Occasionally this is not true... if we are a lower-level cache 904 // satisfying a string of Read and ReadEx requests from 905 // upper-level caches, a Read will mark the block as shared but we 906 // can satisfy a following ReadEx anyway since we can rely on the 907 // Read requester(s) to have buffered the ReadEx snoop and to 908 // invalidate their blocks after receiving them. 909 // assert(!pkt->needsWritable() || blk->isWritable()); 910 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize); 911 912 // Check RMW operations first since both isRead() and 913 // isWrite() will be true for them 914 if (pkt->cmd == MemCmd::SwapReq) { 915 if (pkt->isAtomicOp()) { 916 // extract data from cache and save it into the data field in 917 // the packet as a return value from this atomic op 918 int offset = tags->extractBlkOffset(pkt->getAddr()); 919 uint8_t *blk_data = blk->data + offset; 920 pkt->setData(blk_data); 921 922 // execute AMO operation 923 (*(pkt->getAtomicOp()))(blk_data); 924 925 // set block status to dirty 926 blk->status |= BlkDirty; 927 } else { 928 cmpAndSwap(blk, pkt); 929 } 930 } else if (pkt->isWrite()) { 931 // we have the block in a writable state and can go ahead, 932 // note that the line may be also be considered writable in 933 // downstream caches along the path to memory, but always 934 // Exclusive, and never Modified 935 assert(blk->isWritable()); 936 // Write or WriteLine at the first cache with block in writable state 937 if (blk->checkWrite(pkt)) { 938 pkt->writeDataToBlock(blk->data, blkSize); 939 } 940 // Always mark the line as dirty (and thus transition to the 941 // Modified state) even if we are a failed StoreCond so we 942 // supply data to any snoops that have appended themselves to 943 // this cache before knowing the store will fail. 944 blk->status |= BlkDirty; 945 DPRINTF(CacheVerbose, "%s for %s (write)\n", __func__, pkt->print()); 946 } else if (pkt->isRead()) { 947 if (pkt->isLLSC()) { 948 blk->trackLoadLocked(pkt); 949 } 950 951 // all read responses have a data payload 952 assert(pkt->hasRespData()); 953 pkt->setDataFromBlock(blk->data, blkSize); 954 } else if (pkt->isUpgrade()) { 955 // sanity check 956 assert(!pkt->hasSharers()); 957 958 if (blk->isDirty()) { 959 // we were in the Owned state, and a cache above us that 960 // has the line in Shared state needs to be made aware 961 // that the data it already has is in fact dirty 962 pkt->setCacheResponding(); 963 blk->status &= ~BlkDirty; 964 } 965 } else if (pkt->isClean()) { 966 blk->status &= ~BlkDirty; 967 } else { 968 assert(pkt->isInvalidate()); 969 invalidateBlock(blk); 970 DPRINTF(CacheVerbose, "%s for %s (invalidation)\n", __func__, 971 pkt->print()); 972 } 973} 974 975///////////////////////////////////////////////////// 976// 977// Access path: requests coming in from the CPU side 978// 979///////////////////////////////////////////////////// 980Cycles 981BaseCache::calculateTagOnlyLatency(const uint32_t delay, 982 const Cycles lookup_lat) const 983{ 984 // A tag-only access has to wait for the packet to arrive in order to 985 // perform the tag lookup. 986 return ticksToCycles(delay) + lookup_lat; 987} 988 989Cycles 990BaseCache::calculateAccessLatency(const CacheBlk* blk, const uint32_t delay, 991 const Cycles lookup_lat) const 992{ 993 Cycles lat(0); 994 995 if (blk != nullptr) { 996 // As soon as the access arrives, for sequential accesses first access 997 // tags, then the data entry. In the case of parallel accesses the 998 // latency is dictated by the slowest of tag and data latencies. 999 if (sequentialAccess) { 1000 lat = ticksToCycles(delay) + lookup_lat + dataLatency; 1001 } else { 1002 lat = ticksToCycles(delay) + std::max(lookup_lat, dataLatency); 1003 } 1004 1005 // Check if the block to be accessed is available. If not, apply the 1006 // access latency on top of when the block is ready to be accessed. 1007 const Tick tick = curTick() + delay; 1008 const Tick when_ready = blk->getWhenReady(); 1009 if (when_ready > tick && 1010 ticksToCycles(when_ready - tick) > lat) { 1011 lat += ticksToCycles(when_ready - tick); 1012 } 1013 } else { 1014 // In case of a miss, we neglect the data access in a parallel 1015 // configuration (i.e., the data access will be stopped as soon as 1016 // we find out it is a miss), and use the tag-only latency. 1017 lat = calculateTagOnlyLatency(delay, lookup_lat); 1018 } 1019 1020 return lat; 1021} 1022 1023bool 1024BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, 1025 PacketList &writebacks) 1026{ 1027 // sanity check 1028 assert(pkt->isRequest()); 1029 1030 chatty_assert(!(isReadOnly && pkt->isWrite()), 1031 "Should never see a write in a read-only cache %s\n", 1032 name()); 1033 1034 // Access block in the tags 1035 Cycles tag_latency(0); 1036 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), tag_latency); 1037 1038 DPRINTF(Cache, "%s for %s %s\n", __func__, pkt->print(), 1039 blk ? "hit " + blk->print() : "miss"); 1040 1041 if (pkt->req->isCacheMaintenance()) { 1042 // A cache maintenance operation is always forwarded to the 1043 // memory below even if the block is found in dirty state. 1044 1045 // We defer any changes to the state of the block until we 1046 // create and mark as in service the mshr for the downstream 1047 // packet. 1048 1049 // Calculate access latency on top of when the packet arrives. This 1050 // takes into account the bus delay. 1051 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 1052 1053 return false; 1054 } 1055 1056 if (pkt->isEviction()) { 1057 // We check for presence of block in above caches before issuing 1058 // Writeback or CleanEvict to write buffer. Therefore the only 1059 // possible cases can be of a CleanEvict packet coming from above 1060 // encountering a Writeback generated in this cache peer cache and 1061 // waiting in the write buffer. Cases of upper level peer caches 1062 // generating CleanEvict and Writeback or simply CleanEvict and 1063 // CleanEvict almost simultaneously will be caught by snoops sent out 1064 // by crossbar. 1065 WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(), 1066 pkt->isSecure()); 1067 if (wb_entry) { 1068 assert(wb_entry->getNumTargets() == 1); 1069 PacketPtr wbPkt = wb_entry->getTarget()->pkt; 1070 assert(wbPkt->isWriteback()); 1071 1072 if (pkt->isCleanEviction()) { 1073 // The CleanEvict and WritebackClean snoops into other 1074 // peer caches of the same level while traversing the 1075 // crossbar. If a copy of the block is found, the 1076 // packet is deleted in the crossbar. Hence, none of 1077 // the other upper level caches connected to this 1078 // cache have the block, so we can clear the 1079 // BLOCK_CACHED flag in the Writeback if set and 1080 // discard the CleanEvict by returning true. 1081 wbPkt->clearBlockCached(); 1082 1083 // A clean evict does not need to access the data array 1084 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 1085 1086 return true; 1087 } else { 1088 assert(pkt->cmd == MemCmd::WritebackDirty); 1089 // Dirty writeback from above trumps our clean 1090 // writeback... discard here 1091 // Note: markInService will remove entry from writeback buffer. 1092 markInService(wb_entry); 1093 delete wbPkt; 1094 } 1095 } 1096 } 1097 1098 // Writeback handling is special case. We can write the block into 1099 // the cache without having a writeable copy (or any copy at all). 1100 if (pkt->isWriteback()) { 1101 assert(blkSize == pkt->getSize()); 1102 1103 // we could get a clean writeback while we are having 1104 // outstanding accesses to a block, do the simple thing for 1105 // now and drop the clean writeback so that we do not upset 1106 // any ordering/decisions about ownership already taken 1107 if (pkt->cmd == MemCmd::WritebackClean && 1108 mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) { 1109 DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, " 1110 "dropping\n", pkt->getAddr()); 1111 1112 // A writeback searches for the block, then writes the data. 1113 // As the writeback is being dropped, the data is not touched, 1114 // and we just had to wait for the time to find a match in the 1115 // MSHR. As of now assume a mshr queue search takes as long as 1116 // a tag lookup for simplicity. 1117 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 1118 1119 return true; 1120 } 1121 1122 if (!blk) { 1123 // need to do a replacement 1124 blk = allocateBlock(pkt, writebacks); 1125 if (!blk) { 1126 // no replaceable block available: give up, fwd to next level. 1127 incMissCount(pkt); 1128 1129 // A writeback searches for the block, then writes the data. 1130 // As the block could not be found, it was a tag-only access. 1131 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 1132 1133 return false; 1134 } 1135 1136 blk->status |= BlkReadable; 1137 } else if (compressor) { 1138 // This is an overwrite to an existing block, therefore we need 1139 // to check for data expansion (i.e., block was compressed with 1140 // a smaller size, and now it doesn't fit the entry anymore). 1141 // If that is the case we might need to evict blocks. 1142 if (!updateCompressionData(blk, pkt->getConstPtr<uint64_t>(), 1143 writebacks)) { 1144 // This is a failed data expansion (write), which happened 1145 // after finding the replacement entries and accessing the 1146 // block's data. There were no replaceable entries available 1147 // to make room for the expanded block, and since it does not 1148 // fit anymore and it has been properly updated to contain 1149 // the new data, forward it to the next level 1150 lat = calculateAccessLatency(blk, pkt->headerDelay, 1151 tag_latency); 1152 invalidateBlock(blk); 1153 return false; 1154 } 1155 } 1156 1157 // only mark the block dirty if we got a writeback command, 1158 // and leave it as is for a clean writeback 1159 if (pkt->cmd == MemCmd::WritebackDirty) { 1160 // TODO: the coherent cache can assert(!blk->isDirty()); 1161 blk->status |= BlkDirty; 1162 } 1163 // if the packet does not have sharers, it is passing 1164 // writable, and we got the writeback in Modified or Exclusive 1165 // state, if not we are in the Owned or Shared state 1166 if (!pkt->hasSharers()) { 1167 blk->status |= BlkWritable; 1168 } 1169 // nothing else to do; writeback doesn't expect response 1170 assert(!pkt->needsResponse()); 1171 pkt->writeDataToBlock(blk->data, blkSize); 1172 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 1173 incHitCount(pkt); 1174 1175 // A writeback searches for the block, then writes the data 1176 lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency); 1177 1178 // When the packet metadata arrives, the tag lookup will be done while 1179 // the payload is arriving. Then the block will be ready to access as 1180 // soon as the fill is done 1181 blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay + 1182 std::max(cyclesToTicks(tag_latency), (uint64_t)pkt->payloadDelay)); 1183 1184 return true; 1185 } else if (pkt->cmd == MemCmd::CleanEvict) { 1186 // A CleanEvict does not need to access the data array 1187 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 1188 1189 if (blk) { 1190 // Found the block in the tags, need to stop CleanEvict from 1191 // propagating further down the hierarchy. Returning true will 1192 // treat the CleanEvict like a satisfied write request and delete 1193 // it. 1194 return true; 1195 } 1196 // We didn't find the block here, propagate the CleanEvict further 1197 // down the memory hierarchy. Returning false will treat the CleanEvict 1198 // like a Writeback which could not find a replaceable block so has to 1199 // go to next level. 1200 return false; 1201 } else if (pkt->cmd == MemCmd::WriteClean) { 1202 // WriteClean handling is a special case. We can allocate a 1203 // block directly if it doesn't exist and we can update the 1204 // block immediately. The WriteClean transfers the ownership 1205 // of the block as well. 1206 assert(blkSize == pkt->getSize()); 1207 1208 if (!blk) { 1209 if (pkt->writeThrough()) { 1210 // A writeback searches for the block, then writes the data. 1211 // As the block could not be found, it was a tag-only access. 1212 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 1213 1214 // if this is a write through packet, we don't try to 1215 // allocate if the block is not present 1216 return false; 1217 } else { 1218 // a writeback that misses needs to allocate a new block 1219 blk = allocateBlock(pkt, writebacks); 1220 if (!blk) { 1221 // no replaceable block available: give up, fwd to 1222 // next level. 1223 incMissCount(pkt); 1224 1225 // A writeback searches for the block, then writes the 1226 // data. As the block could not be found, it was a tag-only 1227 // access. 1228 lat = calculateTagOnlyLatency(pkt->headerDelay, 1229 tag_latency); 1230 1231 return false; 1232 } 1233 1234 blk->status |= BlkReadable; 1235 } 1236 } else if (compressor) { 1237 // This is an overwrite to an existing block, therefore we need 1238 // to check for data expansion (i.e., block was compressed with 1239 // a smaller size, and now it doesn't fit the entry anymore). 1240 // If that is the case we might need to evict blocks. 1241 if (!updateCompressionData(blk, pkt->getConstPtr<uint64_t>(), 1242 writebacks)) { 1243 // This is a failed data expansion (write), which happened 1244 // after finding the replacement entries and accessing the 1245 // block's data. There were no replaceable entries available 1246 // to make room for the expanded block, and since it does not 1247 // fit anymore and it has been properly updated to contain 1248 // the new data, forward it to the next level 1249 lat = calculateAccessLatency(blk, pkt->headerDelay, 1250 tag_latency); 1251 invalidateBlock(blk); 1252 return false; 1253 } 1254 } 1255 1256 // at this point either this is a writeback or a write-through 1257 // write clean operation and the block is already in this 1258 // cache, we need to update the data and the block flags 1259 assert(blk); 1260 // TODO: the coherent cache can assert(!blk->isDirty()); 1261 if (!pkt->writeThrough()) { 1262 blk->status |= BlkDirty; 1263 } 1264 // nothing else to do; writeback doesn't expect response 1265 assert(!pkt->needsResponse()); 1266 pkt->writeDataToBlock(blk->data, blkSize); 1267 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 1268 1269 incHitCount(pkt); 1270 1271 // A writeback searches for the block, then writes the data 1272 lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency); 1273 1274 // When the packet metadata arrives, the tag lookup will be done while 1275 // the payload is arriving. Then the block will be ready to access as 1276 // soon as the fill is done 1277 blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay + 1278 std::max(cyclesToTicks(tag_latency), (uint64_t)pkt->payloadDelay)); 1279 1280 // If this a write-through packet it will be sent to cache below 1281 return !pkt->writeThrough(); 1282 } else if (blk && (pkt->needsWritable() ? blk->isWritable() : 1283 blk->isReadable())) { 1284 // OK to satisfy access 1285 incHitCount(pkt); 1286 1287 // Calculate access latency based on the need to access the data array 1288 if (pkt->isRead() || pkt->isWrite()) { 1289 lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency); 1290 1291 // When a block is compressed, it must first be decompressed 1292 // before being read. This adds to the access latency. 1293 if (compressor && pkt->isRead()) { 1294 lat += compressor->getDecompressionLatency(blk); 1295 } 1296 } else { 1297 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 1298 } 1299 1300 satisfyRequest(pkt, blk); 1301 maintainClusivity(pkt->fromCache(), blk); 1302 1303 return true; 1304 } 1305 1306 // Can't satisfy access normally... either no block (blk == nullptr) 1307 // or have block but need writable 1308 1309 incMissCount(pkt); 1310 1311 lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency); 1312 1313 if (!blk && pkt->isLLSC() && pkt->isWrite()) { 1314 // complete miss on store conditional... just give up now 1315 pkt->req->setExtraData(0); 1316 return true; 1317 } 1318 1319 return false; 1320} 1321 1322void 1323BaseCache::maintainClusivity(bool from_cache, CacheBlk *blk) 1324{ 1325 if (from_cache && blk && blk->isValid() && !blk->isDirty() && 1326 clusivity == Enums::mostly_excl) { 1327 // if we have responded to a cache, and our block is still 1328 // valid, but not dirty, and this cache is mostly exclusive 1329 // with respect to the cache above, drop the block 1330 invalidateBlock(blk); 1331 } 1332} 1333 1334CacheBlk* 1335BaseCache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks, 1336 bool allocate) 1337{ 1338 assert(pkt->isResponse()); 1339 Addr addr = pkt->getAddr(); 1340 bool is_secure = pkt->isSecure(); 1341#if TRACING_ON 1342 CacheBlk::State old_state = blk ? blk->status : 0; 1343#endif 1344 1345 // When handling a fill, we should have no writes to this line. 1346 assert(addr == pkt->getBlockAddr(blkSize)); 1347 assert(!writeBuffer.findMatch(addr, is_secure)); 1348 1349 if (!blk) { 1350 // better have read new data... 1351 assert(pkt->hasData() || pkt->cmd == MemCmd::InvalidateResp); 1352 1353 // need to do a replacement if allocating, otherwise we stick 1354 // with the temporary storage 1355 blk = allocate ? allocateBlock(pkt, writebacks) : nullptr; 1356 1357 if (!blk) { 1358 // No replaceable block or a mostly exclusive 1359 // cache... just use temporary storage to complete the 1360 // current request and then get rid of it 1361 blk = tempBlock; 1362 tempBlock->insert(addr, is_secure); 1363 DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr, 1364 is_secure ? "s" : "ns"); 1365 } 1366 } else { 1367 // existing block... probably an upgrade 1368 // don't clear block status... if block is already dirty we 1369 // don't want to lose that 1370 } 1371 1372 // Block is guaranteed to be valid at this point 1373 assert(blk->isValid()); 1374 assert(blk->isSecure() == is_secure); 1375 assert(regenerateBlkAddr(blk) == addr); 1376 1377 blk->status |= BlkReadable; 1378 1379 // sanity check for whole-line writes, which should always be 1380 // marked as writable as part of the fill, and then later marked 1381 // dirty as part of satisfyRequest 1382 if (pkt->cmd == MemCmd::InvalidateResp) { 1383 assert(!pkt->hasSharers()); 1384 } 1385 1386 // here we deal with setting the appropriate state of the line, 1387 // and we start by looking at the hasSharers flag, and ignore the 1388 // cacheResponding flag (normally signalling dirty data) if the 1389 // packet has sharers, thus the line is never allocated as Owned 1390 // (dirty but not writable), and always ends up being either 1391 // Shared, Exclusive or Modified, see Packet::setCacheResponding 1392 // for more details 1393 if (!pkt->hasSharers()) { 1394 // we could get a writable line from memory (rather than a 1395 // cache) even in a read-only cache, note that we set this bit 1396 // even for a read-only cache, possibly revisit this decision 1397 blk->status |= BlkWritable; 1398 1399 // check if we got this via cache-to-cache transfer (i.e., from a 1400 // cache that had the block in Modified or Owned state) 1401 if (pkt->cacheResponding()) { 1402 // we got the block in Modified state, and invalidated the 1403 // owners copy 1404 blk->status |= BlkDirty; 1405 1406 chatty_assert(!isReadOnly, "Should never see dirty snoop response " 1407 "in read-only cache %s\n", name()); 1408 1409 } 1410 } 1411 1412 DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n", 1413 addr, is_secure ? "s" : "ns", old_state, blk->print()); 1414 1415 // if we got new data, copy it in (checking for a read response 1416 // and a response that has data is the same in the end) 1417 if (pkt->isRead()) { 1418 // sanity checks 1419 assert(pkt->hasData()); 1420 assert(pkt->getSize() == blkSize); 1421 1422 pkt->writeDataToBlock(blk->data, blkSize); 1423 } 1424 // The block will be ready when the payload arrives and the fill is done 1425 blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay + 1426 pkt->payloadDelay); 1427 1428 return blk; 1429} 1430 1431CacheBlk* 1432BaseCache::allocateBlock(const PacketPtr pkt, PacketList &writebacks) 1433{ 1434 // Get address 1435 const Addr addr = pkt->getAddr(); 1436 1437 // Get secure bit 1438 const bool is_secure = pkt->isSecure(); 1439 1440 // Block size and compression related access latency. Only relevant if 1441 // using a compressor, otherwise there is no extra delay, and the block 1442 // is fully sized 1443 std::size_t blk_size_bits = blkSize*8; 1444 Cycles compression_lat = Cycles(0); 1445 Cycles decompression_lat = Cycles(0); 1446 1447 // If a compressor is being used, it is called to compress data before 1448 // insertion. Although in Gem5 the data is stored uncompressed, even if a 1449 // compressor is used, the compression/decompression methods are called to 1450 // calculate the amount of extra cycles needed to read or write compressed 1451 // blocks. 1452 if (compressor) { 1453 compressor->compress(pkt->getConstPtr<uint64_t>(), compression_lat, 1454 decompression_lat, blk_size_bits); 1455 } 1456 1457 // Find replacement victim 1458 std::vector<CacheBlk*> evict_blks; 1459 CacheBlk *victim = tags->findVictim(addr, is_secure, blk_size_bits, 1460 evict_blks); 1461 1462 // It is valid to return nullptr if there is no victim 1463 if (!victim) 1464 return nullptr; 1465 1466 // Print victim block's information 1467 DPRINTF(CacheRepl, "Replacement victim: %s\n", victim->print()); 1468 1469 // Check for transient state allocations. If any of the entries listed 1470 // for eviction has a transient state, the allocation fails 1471 bool replacement = false; 1472 for (const auto& blk : evict_blks) { 1473 if (blk->isValid()) { 1474 replacement = true; 1475 1476 Addr repl_addr = regenerateBlkAddr(blk); 1477 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure()); 1478 if (repl_mshr) { 1479 // must be an outstanding upgrade or clean request 1480 // on a block we're about to replace... 1481 assert((!blk->isWritable() && repl_mshr->needsWritable()) || 1482 repl_mshr->isCleaning()); 1483 1484 // too hard to replace block with transient state 1485 // allocation failed, block not inserted 1486 return nullptr; 1487 } 1488 } 1489 } 1490 1491 // The victim will be replaced by a new entry, so increase the replacement 1492 // counter if a valid block is being replaced 1493 if (replacement) { 1494 // Evict valid blocks associated to this victim block 1495 for (const auto& blk : evict_blks) { 1496 if (blk->isValid()) { 1497 DPRINTF(CacheRepl, "Evicting %s (%#llx) to make room for " \ 1498 "%#llx (%s)\n", blk->print(), regenerateBlkAddr(blk), 1499 addr, is_secure); 1500 1501 if (blk->wasPrefetched()) { 1502 unusedPrefetches++; 1503 } 1504 1505 evictBlock(blk, writebacks); 1506 } 1507 } 1508 1509 replacements++; 1510 } 1511 1512 // If using a compressor, set compression data. This must be done before 1513 // block insertion, as compressed tags use this information. 1514 if (compressor) { 1515 compressor->setSizeBits(victim, blk_size_bits); 1516 compressor->setDecompressionLatency(victim, decompression_lat); 1517 } 1518 1519 // Insert new block at victimized entry 1520 tags->insertBlock(pkt, victim); 1521 1522 return victim; 1523} 1524 1525void 1526BaseCache::invalidateBlock(CacheBlk *blk) 1527{ 1528 // If handling a block present in the Tags, let it do its invalidation 1529 // process, which will update stats and invalidate the block itself 1530 if (blk != tempBlock) { 1531 tags->invalidate(blk); 1532 } else { 1533 tempBlock->invalidate(); 1534 } 1535} 1536 1537void 1538BaseCache::evictBlock(CacheBlk *blk, PacketList &writebacks) 1539{ 1540 PacketPtr pkt = evictBlock(blk); 1541 if (pkt) { 1542 writebacks.push_back(pkt); 1543 } 1544} 1545 1546PacketPtr 1547BaseCache::writebackBlk(CacheBlk *blk) 1548{ 1549 chatty_assert(!isReadOnly || writebackClean, 1550 "Writeback from read-only cache"); 1551 assert(blk && blk->isValid() && (blk->isDirty() || writebackClean)); 1552 1553 writebacks[Request::wbMasterId]++; 1554 1555 RequestPtr req = std::make_shared<Request>( 1556 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId); 1557 1558 if (blk->isSecure()) 1559 req->setFlags(Request::SECURE); 1560 1561 req->taskId(blk->task_id); 1562 1563 PacketPtr pkt = 1564 new Packet(req, blk->isDirty() ? 1565 MemCmd::WritebackDirty : MemCmd::WritebackClean); 1566 1567 DPRINTF(Cache, "Create Writeback %s writable: %d, dirty: %d\n", 1568 pkt->print(), blk->isWritable(), blk->isDirty()); 1569 1570 if (blk->isWritable()) { 1571 // not asserting shared means we pass the block in modified 1572 // state, mark our own block non-writeable 1573 blk->status &= ~BlkWritable; 1574 } else { 1575 // we are in the Owned state, tell the receiver 1576 pkt->setHasSharers(); 1577 } 1578 1579 // make sure the block is not marked dirty 1580 blk->status &= ~BlkDirty; 1581 1582 pkt->allocate(); 1583 pkt->setDataFromBlock(blk->data, blkSize); 1584 1585 // When a block is compressed, it must first be decompressed before being 1586 // sent for writeback. 1587 if (compressor) { 1588 pkt->payloadDelay = compressor->getDecompressionLatency(blk); 1589 } 1590 1591 return pkt; 1592} 1593 1594PacketPtr 1595BaseCache::writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id) 1596{ 1597 RequestPtr req = std::make_shared<Request>( 1598 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId); 1599 1600 if (blk->isSecure()) { 1601 req->setFlags(Request::SECURE); 1602 } 1603 req->taskId(blk->task_id); 1604 1605 PacketPtr pkt = new Packet(req, MemCmd::WriteClean, blkSize, id); 1606 1607 if (dest) { 1608 req->setFlags(dest); 1609 pkt->setWriteThrough(); 1610 } 1611 1612 DPRINTF(Cache, "Create %s writable: %d, dirty: %d\n", pkt->print(), 1613 blk->isWritable(), blk->isDirty()); 1614 1615 if (blk->isWritable()) { 1616 // not asserting shared means we pass the block in modified 1617 // state, mark our own block non-writeable 1618 blk->status &= ~BlkWritable; 1619 } else { 1620 // we are in the Owned state, tell the receiver 1621 pkt->setHasSharers(); 1622 } 1623 1624 // make sure the block is not marked dirty 1625 blk->status &= ~BlkDirty; 1626 1627 pkt->allocate(); 1628 pkt->setDataFromBlock(blk->data, blkSize); 1629 1630 // When a block is compressed, it must first be decompressed before being 1631 // sent for writeback. 1632 if (compressor) { 1633 pkt->payloadDelay = compressor->getDecompressionLatency(blk); 1634 } 1635 1636 return pkt; 1637} 1638 1639 1640void 1641BaseCache::memWriteback() 1642{ 1643 tags->forEachBlk([this](CacheBlk &blk) { writebackVisitor(blk); }); 1644} 1645 1646void 1647BaseCache::memInvalidate() 1648{ 1649 tags->forEachBlk([this](CacheBlk &blk) { invalidateVisitor(blk); }); 1650} 1651 1652bool 1653BaseCache::isDirty() const 1654{ 1655 return tags->anyBlk([](CacheBlk &blk) { return blk.isDirty(); }); 1656} 1657 1658bool 1659BaseCache::coalesce() const 1660{ 1661 return writeAllocator && writeAllocator->coalesce(); 1662} 1663 1664void 1665BaseCache::writebackVisitor(CacheBlk &blk) 1666{ 1667 if (blk.isDirty()) { 1668 assert(blk.isValid()); 1669 1670 RequestPtr request = std::make_shared<Request>( 1671 regenerateBlkAddr(&blk), blkSize, 0, Request::funcMasterId); 1672 1673 request->taskId(blk.task_id); 1674 if (blk.isSecure()) { 1675 request->setFlags(Request::SECURE); 1676 } 1677 1678 Packet packet(request, MemCmd::WriteReq); 1679 packet.dataStatic(blk.data); 1680 1681 memSidePort.sendFunctional(&packet); 1682 1683 blk.status &= ~BlkDirty; 1684 } 1685} 1686 1687void 1688BaseCache::invalidateVisitor(CacheBlk &blk) 1689{ 1690 if (blk.isDirty()) 1691 warn_once("Invalidating dirty cache lines. " \ 1692 "Expect things to break.\n"); 1693 1694 if (blk.isValid()) { 1695 assert(!blk.isDirty()); 1696 invalidateBlock(&blk); 1697 } 1698} 1699 1700Tick 1701BaseCache::nextQueueReadyTime() const 1702{ 1703 Tick nextReady = std::min(mshrQueue.nextReadyTime(), 1704 writeBuffer.nextReadyTime()); 1705 1706 // Don't signal prefetch ready time if no MSHRs available 1707 // Will signal once enoguh MSHRs are deallocated 1708 if (prefetcher && mshrQueue.canPrefetch()) { 1709 nextReady = std::min(nextReady, 1710 prefetcher->nextPrefetchReadyTime()); 1711 } 1712 1713 return nextReady; 1714} 1715 1716 1717bool 1718BaseCache::sendMSHRQueuePacket(MSHR* mshr) 1719{ 1720 assert(mshr); 1721 1722 // use request from 1st target 1723 PacketPtr tgt_pkt = mshr->getTarget()->pkt; 1724 1725 DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print()); 1726 1727 // if the cache is in write coalescing mode or (additionally) in 1728 // no allocation mode, and we have a write packet with an MSHR 1729 // that is not a whole-line write (due to incompatible flags etc), 1730 // then reset the write mode 1731 if (writeAllocator && writeAllocator->coalesce() && tgt_pkt->isWrite()) { 1732 if (!mshr->isWholeLineWrite()) { 1733 // if we are currently write coalescing, hold on the 1734 // MSHR as many cycles extra as we need to completely 1735 // write a cache line 1736 if (writeAllocator->delay(mshr->blkAddr)) { 1737 Tick delay = blkSize / tgt_pkt->getSize() * clockPeriod(); 1738 DPRINTF(CacheVerbose, "Delaying pkt %s %llu ticks to allow " 1739 "for write coalescing\n", tgt_pkt->print(), delay); 1740 mshrQueue.delay(mshr, delay); 1741 return false; 1742 } else { 1743 writeAllocator->reset(); 1744 } 1745 } else { 1746 writeAllocator->resetDelay(mshr->blkAddr); 1747 } 1748 } 1749 1750 CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure); 1751 1752 // either a prefetch that is not present upstream, or a normal 1753 // MSHR request, proceed to get the packet to send downstream 1754 PacketPtr pkt = createMissPacket(tgt_pkt, blk, mshr->needsWritable(), 1755 mshr->isWholeLineWrite()); 1756 1757 mshr->isForward = (pkt == nullptr); 1758 1759 if (mshr->isForward) { 1760 // not a cache block request, but a response is expected 1761 // make copy of current packet to forward, keep current 1762 // copy for response handling 1763 pkt = new Packet(tgt_pkt, false, true); 1764 assert(!pkt->isWrite()); 1765 } 1766 1767 // play it safe and append (rather than set) the sender state, 1768 // as forwarded packets may already have existing state 1769 pkt->pushSenderState(mshr); 1770 1771 if (pkt->isClean() && blk && blk->isDirty()) { 1772 // A cache clean opearation is looking for a dirty block. Mark 1773 // the packet so that the destination xbar can determine that 1774 // there will be a follow-up write packet as well. 1775 pkt->setSatisfied(); 1776 } 1777 1778 if (!memSidePort.sendTimingReq(pkt)) { 1779 // we are awaiting a retry, but we 1780 // delete the packet and will be creating a new packet 1781 // when we get the opportunity 1782 delete pkt; 1783 1784 // note that we have now masked any requestBus and 1785 // schedSendEvent (we will wait for a retry before 1786 // doing anything), and this is so even if we do not 1787 // care about this packet and might override it before 1788 // it gets retried 1789 return true; 1790 } else { 1791 // As part of the call to sendTimingReq the packet is 1792 // forwarded to all neighbouring caches (and any caches 1793 // above them) as a snoop. Thus at this point we know if 1794 // any of the neighbouring caches are responding, and if 1795 // so, we know it is dirty, and we can determine if it is 1796 // being passed as Modified, making our MSHR the ordering 1797 // point 1798 bool pending_modified_resp = !pkt->hasSharers() && 1799 pkt->cacheResponding(); 1800 markInService(mshr, pending_modified_resp); 1801 1802 if (pkt->isClean() && blk && blk->isDirty()) { 1803 // A cache clean opearation is looking for a dirty 1804 // block. If a dirty block is encountered a WriteClean 1805 // will update any copies to the path to the memory 1806 // until the point of reference. 1807 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n", 1808 __func__, pkt->print(), blk->print()); 1809 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), 1810 pkt->id); 1811 PacketList writebacks; 1812 writebacks.push_back(wb_pkt); 1813 doWritebacks(writebacks, 0); 1814 } 1815 1816 return false; 1817 } 1818} 1819 1820bool 1821BaseCache::sendWriteQueuePacket(WriteQueueEntry* wq_entry) 1822{ 1823 assert(wq_entry); 1824 1825 // always a single target for write queue entries 1826 PacketPtr tgt_pkt = wq_entry->getTarget()->pkt; 1827 1828 DPRINTF(Cache, "%s: write %s\n", __func__, tgt_pkt->print()); 1829 1830 // forward as is, both for evictions and uncacheable writes 1831 if (!memSidePort.sendTimingReq(tgt_pkt)) { 1832 // note that we have now masked any requestBus and 1833 // schedSendEvent (we will wait for a retry before 1834 // doing anything), and this is so even if we do not 1835 // care about this packet and might override it before 1836 // it gets retried 1837 return true; 1838 } else { 1839 markInService(wq_entry); 1840 return false; 1841 } 1842} 1843 1844void 1845BaseCache::serialize(CheckpointOut &cp) const 1846{ 1847 bool dirty(isDirty()); 1848 1849 if (dirty) { 1850 warn("*** The cache still contains dirty data. ***\n"); 1851 warn(" Make sure to drain the system using the correct flags.\n"); 1852 warn(" This checkpoint will not restore correctly " \ 1853 "and dirty data in the cache will be lost!\n"); 1854 } 1855 1856 // Since we don't checkpoint the data in the cache, any dirty data 1857 // will be lost when restoring from a checkpoint of a system that 1858 // wasn't drained properly. Flag the checkpoint as invalid if the 1859 // cache contains dirty data. 1860 bool bad_checkpoint(dirty); 1861 SERIALIZE_SCALAR(bad_checkpoint); 1862} 1863 1864void 1865BaseCache::unserialize(CheckpointIn &cp) 1866{ 1867 bool bad_checkpoint; 1868 UNSERIALIZE_SCALAR(bad_checkpoint); 1869 if (bad_checkpoint) { 1870 fatal("Restoring from checkpoints with dirty caches is not " 1871 "supported in the classic memory system. Please remove any " 1872 "caches or drain them properly before taking checkpoints.\n"); 1873 } 1874} 1875 1876void 1877BaseCache::regStats() 1878{ 1879 ClockedObject::regStats(); 1880 1881 using namespace Stats; 1882 1883 // Hit statistics 1884 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1885 MemCmd cmd(access_idx); 1886 const string &cstr = cmd.toString(); 1887 1888 hits[access_idx] 1889 .init(system->maxMasters()) 1890 .name(name() + "." + cstr + "_hits") 1891 .desc("number of " + cstr + " hits") 1892 .flags(total | nozero | nonan) 1893 ; 1894 for (int i = 0; i < system->maxMasters(); i++) { 1895 hits[access_idx].subname(i, system->getMasterName(i)); 1896 } 1897 } 1898 1899// These macros make it easier to sum the right subset of commands and 1900// to change the subset of commands that are considered "demand" vs 1901// "non-demand" 1902#define SUM_DEMAND(s) \ 1903 (s[MemCmd::ReadReq] + s[MemCmd::WriteReq] + s[MemCmd::WriteLineReq] + \ 1904 s[MemCmd::ReadExReq] + s[MemCmd::ReadCleanReq] + s[MemCmd::ReadSharedReq]) 1905 1906// should writebacks be included here? prior code was inconsistent... 1907#define SUM_NON_DEMAND(s) \ 1908 (s[MemCmd::SoftPFReq] + s[MemCmd::HardPFReq] + s[MemCmd::SoftPFExReq]) 1909 1910 demandHits 1911 .name(name() + ".demand_hits") 1912 .desc("number of demand (read+write) hits") 1913 .flags(total | nozero | nonan) 1914 ; 1915 demandHits = SUM_DEMAND(hits); 1916 for (int i = 0; i < system->maxMasters(); i++) { 1917 demandHits.subname(i, system->getMasterName(i)); 1918 } 1919 1920 overallHits 1921 .name(name() + ".overall_hits") 1922 .desc("number of overall hits") 1923 .flags(total | nozero | nonan) 1924 ; 1925 overallHits = demandHits + SUM_NON_DEMAND(hits); 1926 for (int i = 0; i < system->maxMasters(); i++) { 1927 overallHits.subname(i, system->getMasterName(i)); 1928 } 1929 1930 // Miss statistics 1931 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1932 MemCmd cmd(access_idx); 1933 const string &cstr = cmd.toString(); 1934 1935 misses[access_idx] 1936 .init(system->maxMasters()) 1937 .name(name() + "." + cstr + "_misses") 1938 .desc("number of " + cstr + " misses") 1939 .flags(total | nozero | nonan) 1940 ; 1941 for (int i = 0; i < system->maxMasters(); i++) { 1942 misses[access_idx].subname(i, system->getMasterName(i)); 1943 } 1944 } 1945 1946 demandMisses 1947 .name(name() + ".demand_misses") 1948 .desc("number of demand (read+write) misses") 1949 .flags(total | nozero | nonan) 1950 ; 1951 demandMisses = SUM_DEMAND(misses); 1952 for (int i = 0; i < system->maxMasters(); i++) { 1953 demandMisses.subname(i, system->getMasterName(i)); 1954 } 1955 1956 overallMisses 1957 .name(name() + ".overall_misses") 1958 .desc("number of overall misses") 1959 .flags(total | nozero | nonan) 1960 ; 1961 overallMisses = demandMisses + SUM_NON_DEMAND(misses); 1962 for (int i = 0; i < system->maxMasters(); i++) { 1963 overallMisses.subname(i, system->getMasterName(i)); 1964 } 1965 1966 // Miss latency statistics 1967 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1968 MemCmd cmd(access_idx); 1969 const string &cstr = cmd.toString(); 1970 1971 missLatency[access_idx] 1972 .init(system->maxMasters()) 1973 .name(name() + "." + cstr + "_miss_latency") 1974 .desc("number of " + cstr + " miss cycles") 1975 .flags(total | nozero | nonan) 1976 ; 1977 for (int i = 0; i < system->maxMasters(); i++) { 1978 missLatency[access_idx].subname(i, system->getMasterName(i)); 1979 } 1980 } 1981 1982 demandMissLatency 1983 .name(name() + ".demand_miss_latency") 1984 .desc("number of demand (read+write) miss cycles") 1985 .flags(total | nozero | nonan) 1986 ; 1987 demandMissLatency = SUM_DEMAND(missLatency); 1988 for (int i = 0; i < system->maxMasters(); i++) { 1989 demandMissLatency.subname(i, system->getMasterName(i)); 1990 } 1991 1992 overallMissLatency 1993 .name(name() + ".overall_miss_latency") 1994 .desc("number of overall miss cycles") 1995 .flags(total | nozero | nonan) 1996 ; 1997 overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency); 1998 for (int i = 0; i < system->maxMasters(); i++) { 1999 overallMissLatency.subname(i, system->getMasterName(i)); 2000 } 2001 2002 // access formulas 2003 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2004 MemCmd cmd(access_idx); 2005 const string &cstr = cmd.toString(); 2006 2007 accesses[access_idx] 2008 .name(name() + "." + cstr + "_accesses") 2009 .desc("number of " + cstr + " accesses(hits+misses)") 2010 .flags(total | nozero | nonan) 2011 ; 2012 accesses[access_idx] = hits[access_idx] + misses[access_idx]; 2013 2014 for (int i = 0; i < system->maxMasters(); i++) { 2015 accesses[access_idx].subname(i, system->getMasterName(i)); 2016 } 2017 } 2018 2019 demandAccesses 2020 .name(name() + ".demand_accesses") 2021 .desc("number of demand (read+write) accesses") 2022 .flags(total | nozero | nonan) 2023 ; 2024 demandAccesses = demandHits + demandMisses; 2025 for (int i = 0; i < system->maxMasters(); i++) { 2026 demandAccesses.subname(i, system->getMasterName(i)); 2027 } 2028 2029 overallAccesses 2030 .name(name() + ".overall_accesses") 2031 .desc("number of overall (read+write) accesses") 2032 .flags(total | nozero | nonan) 2033 ; 2034 overallAccesses = overallHits + overallMisses; 2035 for (int i = 0; i < system->maxMasters(); i++) { 2036 overallAccesses.subname(i, system->getMasterName(i)); 2037 } 2038 2039 // miss rate formulas 2040 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2041 MemCmd cmd(access_idx); 2042 const string &cstr = cmd.toString(); 2043 2044 missRate[access_idx] 2045 .name(name() + "." + cstr + "_miss_rate") 2046 .desc("miss rate for " + cstr + " accesses") 2047 .flags(total | nozero | nonan) 2048 ; 2049 missRate[access_idx] = misses[access_idx] / accesses[access_idx]; 2050 2051 for (int i = 0; i < system->maxMasters(); i++) { 2052 missRate[access_idx].subname(i, system->getMasterName(i)); 2053 } 2054 } 2055 2056 demandMissRate 2057 .name(name() + ".demand_miss_rate") 2058 .desc("miss rate for demand accesses") 2059 .flags(total | nozero | nonan) 2060 ; 2061 demandMissRate = demandMisses / demandAccesses; 2062 for (int i = 0; i < system->maxMasters(); i++) { 2063 demandMissRate.subname(i, system->getMasterName(i)); 2064 } 2065 2066 overallMissRate 2067 .name(name() + ".overall_miss_rate") 2068 .desc("miss rate for overall accesses") 2069 .flags(total | nozero | nonan) 2070 ; 2071 overallMissRate = overallMisses / overallAccesses; 2072 for (int i = 0; i < system->maxMasters(); i++) { 2073 overallMissRate.subname(i, system->getMasterName(i)); 2074 } 2075 2076 // miss latency formulas 2077 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2078 MemCmd cmd(access_idx); 2079 const string &cstr = cmd.toString(); 2080 2081 avgMissLatency[access_idx] 2082 .name(name() + "." + cstr + "_avg_miss_latency") 2083 .desc("average " + cstr + " miss latency") 2084 .flags(total | nozero | nonan) 2085 ; 2086 avgMissLatency[access_idx] = 2087 missLatency[access_idx] / misses[access_idx]; 2088 2089 for (int i = 0; i < system->maxMasters(); i++) { 2090 avgMissLatency[access_idx].subname(i, system->getMasterName(i)); 2091 } 2092 } 2093 2094 demandAvgMissLatency 2095 .name(name() + ".demand_avg_miss_latency") 2096 .desc("average overall miss latency") 2097 .flags(total | nozero | nonan) 2098 ; 2099 demandAvgMissLatency = demandMissLatency / demandMisses; 2100 for (int i = 0; i < system->maxMasters(); i++) { 2101 demandAvgMissLatency.subname(i, system->getMasterName(i)); 2102 } 2103 2104 overallAvgMissLatency 2105 .name(name() + ".overall_avg_miss_latency") 2106 .desc("average overall miss latency") 2107 .flags(total | nozero | nonan) 2108 ; 2109 overallAvgMissLatency = overallMissLatency / overallMisses; 2110 for (int i = 0; i < system->maxMasters(); i++) { 2111 overallAvgMissLatency.subname(i, system->getMasterName(i)); 2112 } 2113 2114 blocked_cycles.init(NUM_BLOCKED_CAUSES); 2115 blocked_cycles 2116 .name(name() + ".blocked_cycles") 2117 .desc("number of cycles access was blocked") 2118 .subname(Blocked_NoMSHRs, "no_mshrs") 2119 .subname(Blocked_NoTargets, "no_targets") 2120 ; 2121 2122 2123 blocked_causes.init(NUM_BLOCKED_CAUSES); 2124 blocked_causes 2125 .name(name() + ".blocked") 2126 .desc("number of cycles access was blocked") 2127 .subname(Blocked_NoMSHRs, "no_mshrs") 2128 .subname(Blocked_NoTargets, "no_targets") 2129 ; 2130 2131 avg_blocked 2132 .name(name() + ".avg_blocked_cycles") 2133 .desc("average number of cycles each access was blocked") 2134 .subname(Blocked_NoMSHRs, "no_mshrs") 2135 .subname(Blocked_NoTargets, "no_targets") 2136 ; 2137 2138 avg_blocked = blocked_cycles / blocked_causes; 2139 2140 unusedPrefetches 2141 .name(name() + ".unused_prefetches") 2142 .desc("number of HardPF blocks evicted w/o reference") 2143 .flags(nozero) 2144 ; 2145 2146 writebacks 2147 .init(system->maxMasters()) 2148 .name(name() + ".writebacks") 2149 .desc("number of writebacks") 2150 .flags(total | nozero | nonan) 2151 ; 2152 for (int i = 0; i < system->maxMasters(); i++) { 2153 writebacks.subname(i, system->getMasterName(i)); 2154 } 2155 2156 // MSHR statistics 2157 // MSHR hit statistics 2158 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2159 MemCmd cmd(access_idx); 2160 const string &cstr = cmd.toString(); 2161 2162 mshr_hits[access_idx] 2163 .init(system->maxMasters()) 2164 .name(name() + "." + cstr + "_mshr_hits") 2165 .desc("number of " + cstr + " MSHR hits") 2166 .flags(total | nozero | nonan) 2167 ; 2168 for (int i = 0; i < system->maxMasters(); i++) { 2169 mshr_hits[access_idx].subname(i, system->getMasterName(i)); 2170 } 2171 } 2172 2173 demandMshrHits 2174 .name(name() + ".demand_mshr_hits") 2175 .desc("number of demand (read+write) MSHR hits") 2176 .flags(total | nozero | nonan) 2177 ; 2178 demandMshrHits = SUM_DEMAND(mshr_hits); 2179 for (int i = 0; i < system->maxMasters(); i++) { 2180 demandMshrHits.subname(i, system->getMasterName(i)); 2181 } 2182 2183 overallMshrHits 2184 .name(name() + ".overall_mshr_hits") 2185 .desc("number of overall MSHR hits") 2186 .flags(total | nozero | nonan) 2187 ; 2188 overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshr_hits); 2189 for (int i = 0; i < system->maxMasters(); i++) { 2190 overallMshrHits.subname(i, system->getMasterName(i)); 2191 } 2192 2193 // MSHR miss statistics 2194 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2195 MemCmd cmd(access_idx); 2196 const string &cstr = cmd.toString(); 2197 2198 mshr_misses[access_idx] 2199 .init(system->maxMasters()) 2200 .name(name() + "." + cstr + "_mshr_misses") 2201 .desc("number of " + cstr + " MSHR misses") 2202 .flags(total | nozero | nonan) 2203 ; 2204 for (int i = 0; i < system->maxMasters(); i++) { 2205 mshr_misses[access_idx].subname(i, system->getMasterName(i)); 2206 } 2207 } 2208 2209 demandMshrMisses 2210 .name(name() + ".demand_mshr_misses") 2211 .desc("number of demand (read+write) MSHR misses") 2212 .flags(total | nozero | nonan) 2213 ; 2214 demandMshrMisses = SUM_DEMAND(mshr_misses); 2215 for (int i = 0; i < system->maxMasters(); i++) { 2216 demandMshrMisses.subname(i, system->getMasterName(i)); 2217 } 2218 2219 overallMshrMisses 2220 .name(name() + ".overall_mshr_misses") 2221 .desc("number of overall MSHR misses") 2222 .flags(total | nozero | nonan) 2223 ; 2224 overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshr_misses); 2225 for (int i = 0; i < system->maxMasters(); i++) { 2226 overallMshrMisses.subname(i, system->getMasterName(i)); 2227 } 2228 2229 // MSHR miss latency statistics 2230 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2231 MemCmd cmd(access_idx); 2232 const string &cstr = cmd.toString(); 2233 2234 mshr_miss_latency[access_idx] 2235 .init(system->maxMasters()) 2236 .name(name() + "." + cstr + "_mshr_miss_latency") 2237 .desc("number of " + cstr + " MSHR miss cycles") 2238 .flags(total | nozero | nonan) 2239 ; 2240 for (int i = 0; i < system->maxMasters(); i++) { 2241 mshr_miss_latency[access_idx].subname(i, system->getMasterName(i)); 2242 } 2243 } 2244 2245 demandMshrMissLatency 2246 .name(name() + ".demand_mshr_miss_latency") 2247 .desc("number of demand (read+write) MSHR miss cycles") 2248 .flags(total | nozero | nonan) 2249 ; 2250 demandMshrMissLatency = SUM_DEMAND(mshr_miss_latency); 2251 for (int i = 0; i < system->maxMasters(); i++) { 2252 demandMshrMissLatency.subname(i, system->getMasterName(i)); 2253 } 2254 2255 overallMshrMissLatency 2256 .name(name() + ".overall_mshr_miss_latency") 2257 .desc("number of overall MSHR miss cycles") 2258 .flags(total | nozero | nonan) 2259 ; 2260 overallMshrMissLatency = 2261 demandMshrMissLatency + SUM_NON_DEMAND(mshr_miss_latency); 2262 for (int i = 0; i < system->maxMasters(); i++) { 2263 overallMshrMissLatency.subname(i, system->getMasterName(i)); 2264 } 2265 2266 // MSHR uncacheable statistics 2267 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2268 MemCmd cmd(access_idx); 2269 const string &cstr = cmd.toString(); 2270 2271 mshr_uncacheable[access_idx] 2272 .init(system->maxMasters()) 2273 .name(name() + "." + cstr + "_mshr_uncacheable") 2274 .desc("number of " + cstr + " MSHR uncacheable") 2275 .flags(total | nozero | nonan) 2276 ; 2277 for (int i = 0; i < system->maxMasters(); i++) { 2278 mshr_uncacheable[access_idx].subname(i, system->getMasterName(i)); 2279 } 2280 } 2281 2282 overallMshrUncacheable 2283 .name(name() + ".overall_mshr_uncacheable_misses") 2284 .desc("number of overall MSHR uncacheable misses") 2285 .flags(total | nozero | nonan) 2286 ; 2287 overallMshrUncacheable = 2288 SUM_DEMAND(mshr_uncacheable) + SUM_NON_DEMAND(mshr_uncacheable); 2289 for (int i = 0; i < system->maxMasters(); i++) { 2290 overallMshrUncacheable.subname(i, system->getMasterName(i)); 2291 } 2292 2293 // MSHR miss latency statistics 2294 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2295 MemCmd cmd(access_idx); 2296 const string &cstr = cmd.toString(); 2297 2298 mshr_uncacheable_lat[access_idx] 2299 .init(system->maxMasters()) 2300 .name(name() + "." + cstr + "_mshr_uncacheable_latency") 2301 .desc("number of " + cstr + " MSHR uncacheable cycles") 2302 .flags(total | nozero | nonan) 2303 ; 2304 for (int i = 0; i < system->maxMasters(); i++) { 2305 mshr_uncacheable_lat[access_idx].subname( 2306 i, system->getMasterName(i)); 2307 } 2308 } 2309 2310 overallMshrUncacheableLatency 2311 .name(name() + ".overall_mshr_uncacheable_latency") 2312 .desc("number of overall MSHR uncacheable cycles") 2313 .flags(total | nozero | nonan) 2314 ; 2315 overallMshrUncacheableLatency = 2316 SUM_DEMAND(mshr_uncacheable_lat) + 2317 SUM_NON_DEMAND(mshr_uncacheable_lat); 2318 for (int i = 0; i < system->maxMasters(); i++) { 2319 overallMshrUncacheableLatency.subname(i, system->getMasterName(i)); 2320 } 2321 2322 // MSHR miss rate formulas 2323 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2324 MemCmd cmd(access_idx); 2325 const string &cstr = cmd.toString(); 2326 2327 mshrMissRate[access_idx] 2328 .name(name() + "." + cstr + "_mshr_miss_rate") 2329 .desc("mshr miss rate for " + cstr + " accesses") 2330 .flags(total | nozero | nonan) 2331 ; 2332 mshrMissRate[access_idx] = 2333 mshr_misses[access_idx] / accesses[access_idx]; 2334 2335 for (int i = 0; i < system->maxMasters(); i++) { 2336 mshrMissRate[access_idx].subname(i, system->getMasterName(i)); 2337 } 2338 } 2339 2340 demandMshrMissRate 2341 .name(name() + ".demand_mshr_miss_rate") 2342 .desc("mshr miss rate for demand accesses") 2343 .flags(total | nozero | nonan) 2344 ; 2345 demandMshrMissRate = demandMshrMisses / demandAccesses; 2346 for (int i = 0; i < system->maxMasters(); i++) { 2347 demandMshrMissRate.subname(i, system->getMasterName(i)); 2348 } 2349 2350 overallMshrMissRate 2351 .name(name() + ".overall_mshr_miss_rate") 2352 .desc("mshr miss rate for overall accesses") 2353 .flags(total | nozero | nonan) 2354 ; 2355 overallMshrMissRate = overallMshrMisses / overallAccesses; 2356 for (int i = 0; i < system->maxMasters(); i++) { 2357 overallMshrMissRate.subname(i, system->getMasterName(i)); 2358 } 2359 2360 // mshrMiss latency formulas 2361 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2362 MemCmd cmd(access_idx); 2363 const string &cstr = cmd.toString(); 2364 2365 avgMshrMissLatency[access_idx] 2366 .name(name() + "." + cstr + "_avg_mshr_miss_latency") 2367 .desc("average " + cstr + " mshr miss latency") 2368 .flags(total | nozero | nonan) 2369 ; 2370 avgMshrMissLatency[access_idx] = 2371 mshr_miss_latency[access_idx] / mshr_misses[access_idx]; 2372 2373 for (int i = 0; i < system->maxMasters(); i++) { 2374 avgMshrMissLatency[access_idx].subname( 2375 i, system->getMasterName(i)); 2376 } 2377 } 2378 2379 demandAvgMshrMissLatency 2380 .name(name() + ".demand_avg_mshr_miss_latency") 2381 .desc("average overall mshr miss latency") 2382 .flags(total | nozero | nonan) 2383 ; 2384 demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses; 2385 for (int i = 0; i < system->maxMasters(); i++) { 2386 demandAvgMshrMissLatency.subname(i, system->getMasterName(i)); 2387 } 2388 2389 overallAvgMshrMissLatency 2390 .name(name() + ".overall_avg_mshr_miss_latency") 2391 .desc("average overall mshr miss latency") 2392 .flags(total | nozero | nonan) 2393 ; 2394 overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses; 2395 for (int i = 0; i < system->maxMasters(); i++) { 2396 overallAvgMshrMissLatency.subname(i, system->getMasterName(i)); 2397 } 2398 2399 // mshrUncacheable latency formulas 2400 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2401 MemCmd cmd(access_idx); 2402 const string &cstr = cmd.toString(); 2403 2404 avgMshrUncacheableLatency[access_idx] 2405 .name(name() + "." + cstr + "_avg_mshr_uncacheable_latency") 2406 .desc("average " + cstr + " mshr uncacheable latency") 2407 .flags(total | nozero | nonan) 2408 ; 2409 avgMshrUncacheableLatency[access_idx] = 2410 mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx]; 2411 2412 for (int i = 0; i < system->maxMasters(); i++) { 2413 avgMshrUncacheableLatency[access_idx].subname( 2414 i, system->getMasterName(i)); 2415 } 2416 } 2417 2418 overallAvgMshrUncacheableLatency 2419 .name(name() + ".overall_avg_mshr_uncacheable_latency") 2420 .desc("average overall mshr uncacheable latency") 2421 .flags(total | nozero | nonan) 2422 ; 2423 overallAvgMshrUncacheableLatency = 2424 overallMshrUncacheableLatency / overallMshrUncacheable; 2425 for (int i = 0; i < system->maxMasters(); i++) { 2426 overallAvgMshrUncacheableLatency.subname(i, system->getMasterName(i)); 2427 } 2428 2429 replacements 2430 .name(name() + ".replacements") 2431 .desc("number of replacements") 2432 ; 2433 2434 dataExpansions 2435 .name(name() + ".data_expansions") 2436 .desc("number of data expansions") 2437 .flags(nozero | nonan) 2438 ; 2439} 2440 2441void 2442BaseCache::regProbePoints() 2443{ 2444 ppHit = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Hit"); 2445 ppMiss = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Miss"); 2446 ppFill = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Fill"); 2447} 2448 2449/////////////// 2450// 2451// CpuSidePort 2452// 2453/////////////// 2454bool 2455BaseCache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt) 2456{ 2457 // Snoops shouldn't happen when bypassing caches 2458 assert(!cache->system->bypassCaches()); 2459 2460 assert(pkt->isResponse()); 2461 2462 // Express snoop responses from master to slave, e.g., from L1 to L2 2463 cache->recvTimingSnoopResp(pkt); 2464 return true; 2465} 2466 2467 2468bool 2469BaseCache::CpuSidePort::tryTiming(PacketPtr pkt) 2470{ 2471 if (cache->system->bypassCaches() || pkt->isExpressSnoop()) { 2472 // always let express snoop packets through even if blocked 2473 return true; 2474 } else if (blocked || mustSendRetry) { 2475 // either already committed to send a retry, or blocked 2476 mustSendRetry = true; 2477 return false; 2478 } 2479 mustSendRetry = false; 2480 return true; 2481} 2482 2483bool 2484BaseCache::CpuSidePort::recvTimingReq(PacketPtr pkt) 2485{ 2486 assert(pkt->isRequest()); 2487 2488 if (cache->system->bypassCaches()) { 2489 // Just forward the packet if caches are disabled. 2490 // @todo This should really enqueue the packet rather 2491 bool M5_VAR_USED success = cache->memSidePort.sendTimingReq(pkt); 2492 assert(success); 2493 return true; 2494 } else if (tryTiming(pkt)) { 2495 cache->recvTimingReq(pkt); 2496 return true; 2497 } 2498 return false; 2499} 2500 2501Tick 2502BaseCache::CpuSidePort::recvAtomic(PacketPtr pkt) 2503{ 2504 if (cache->system->bypassCaches()) { 2505 // Forward the request if the system is in cache bypass mode. 2506 return cache->memSidePort.sendAtomic(pkt); 2507 } else { 2508 return cache->recvAtomic(pkt); 2509 } 2510} 2511 2512void 2513BaseCache::CpuSidePort::recvFunctional(PacketPtr pkt) 2514{ 2515 if (cache->system->bypassCaches()) { 2516 // The cache should be flushed if we are in cache bypass mode, 2517 // so we don't need to check if we need to update anything. 2518 cache->memSidePort.sendFunctional(pkt); 2519 return; 2520 } 2521 2522 // functional request 2523 cache->functionalAccess(pkt, true); 2524} 2525 2526AddrRangeList 2527BaseCache::CpuSidePort::getAddrRanges() const 2528{ 2529 return cache->getAddrRanges(); 2530} 2531 2532 2533BaseCache:: 2534CpuSidePort::CpuSidePort(const std::string &_name, BaseCache *_cache, 2535 const std::string &_label) 2536 : CacheSlavePort(_name, _cache, _label), cache(_cache) 2537{ 2538} 2539 2540/////////////// 2541// 2542// MemSidePort 2543// 2544/////////////// 2545bool 2546BaseCache::MemSidePort::recvTimingResp(PacketPtr pkt) 2547{ 2548 cache->recvTimingResp(pkt); 2549 return true; 2550} 2551 2552// Express snooping requests to memside port 2553void 2554BaseCache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt) 2555{ 2556 // Snoops shouldn't happen when bypassing caches 2557 assert(!cache->system->bypassCaches()); 2558 2559 // handle snooping requests 2560 cache->recvTimingSnoopReq(pkt); 2561} 2562 2563Tick 2564BaseCache::MemSidePort::recvAtomicSnoop(PacketPtr pkt) 2565{ 2566 // Snoops shouldn't happen when bypassing caches 2567 assert(!cache->system->bypassCaches()); 2568 2569 return cache->recvAtomicSnoop(pkt); 2570} 2571 2572void 2573BaseCache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt) 2574{ 2575 // Snoops shouldn't happen when bypassing caches 2576 assert(!cache->system->bypassCaches()); 2577 2578 // functional snoop (note that in contrast to atomic we don't have 2579 // a specific functionalSnoop method, as they have the same 2580 // behaviour regardless) 2581 cache->functionalAccess(pkt, false); 2582} 2583 2584void 2585BaseCache::CacheReqPacketQueue::sendDeferredPacket() 2586{ 2587 // sanity check 2588 assert(!waitingOnRetry); 2589 2590 // there should never be any deferred request packets in the 2591 // queue, instead we resly on the cache to provide the packets 2592 // from the MSHR queue or write queue 2593 assert(deferredPacketReadyTime() == MaxTick); 2594 2595 // check for request packets (requests & writebacks) 2596 QueueEntry* entry = cache.getNextQueueEntry(); 2597 2598 if (!entry) { 2599 // can happen if e.g. we attempt a writeback and fail, but 2600 // before the retry, the writeback is eliminated because 2601 // we snoop another cache's ReadEx. 2602 } else { 2603 // let our snoop responses go first if there are responses to 2604 // the same addresses 2605 if (checkConflictingSnoop(entry->getTarget()->pkt)) { 2606 return; 2607 } 2608 waitingOnRetry = entry->sendPacket(cache); 2609 } 2610 2611 // if we succeeded and are not waiting for a retry, schedule the 2612 // next send considering when the next queue is ready, note that 2613 // snoop responses have their own packet queue and thus schedule 2614 // their own events 2615 if (!waitingOnRetry) { 2616 schedSendEvent(cache.nextQueueReadyTime()); 2617 } 2618} 2619 2620BaseCache::MemSidePort::MemSidePort(const std::string &_name, 2621 BaseCache *_cache, 2622 const std::string &_label) 2623 : CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue), 2624 _reqQueue(*_cache, *this, _snoopRespQueue, _label), 2625 _snoopRespQueue(*_cache, *this, true, _label), cache(_cache) 2626{ 2627} 2628 2629void 2630WriteAllocator::updateMode(Addr write_addr, unsigned write_size, 2631 Addr blk_addr) 2632{ 2633 // check if we are continuing where the last write ended 2634 if (nextAddr == write_addr) { 2635 delayCtr[blk_addr] = delayThreshold; 2636 // stop if we have already saturated 2637 if (mode != WriteMode::NO_ALLOCATE) { 2638 byteCount += write_size; 2639 // switch to streaming mode if we have passed the lower 2640 // threshold 2641 if (mode == WriteMode::ALLOCATE && 2642 byteCount > coalesceLimit) { 2643 mode = WriteMode::COALESCE; 2644 DPRINTF(Cache, "Switched to write coalescing\n"); 2645 } else if (mode == WriteMode::COALESCE && 2646 byteCount > noAllocateLimit) { 2647 // and continue and switch to non-allocating mode if we 2648 // pass the upper threshold 2649 mode = WriteMode::NO_ALLOCATE; 2650 DPRINTF(Cache, "Switched to write-no-allocate\n"); 2651 } 2652 } 2653 } else { 2654 // we did not see a write matching the previous one, start 2655 // over again 2656 byteCount = write_size; 2657 mode = WriteMode::ALLOCATE; 2658 resetDelay(blk_addr); 2659 } 2660 nextAddr = write_addr + write_size; 2661} 2662 2663WriteAllocator* 2664WriteAllocatorParams::create() 2665{ 2666 return new WriteAllocator(this); 2667} 2668