base.cc revision 13945
1/* 2 * Copyright (c) 2012-2013, 2018-2019 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2003-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Erik Hallnor 41 * Nikos Nikoleris 42 */ 43 44/** 45 * @file 46 * Definition of BaseCache functions. 47 */ 48 49#include "mem/cache/base.hh" 50 51#include "base/compiler.hh" 52#include "base/logging.hh" 53#include "debug/Cache.hh" 54#include "debug/CachePort.hh" 55#include "debug/CacheRepl.hh" 56#include "debug/CacheVerbose.hh" 57#include "mem/cache/compressors/base.hh" 58#include "mem/cache/mshr.hh" 59#include "mem/cache/prefetch/base.hh" 60#include "mem/cache/queue_entry.hh" 61#include "params/BaseCache.hh" 62#include "params/WriteAllocator.hh" 63#include "sim/core.hh" 64 65class BaseMasterPort; 66class BaseSlavePort; 67 68using namespace std; 69 70BaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name, 71 BaseCache *_cache, 72 const std::string &_label) 73 : QueuedSlavePort(_name, _cache, queue), 74 queue(*_cache, *this, true, _label), 75 blocked(false), mustSendRetry(false), 76 sendRetryEvent([this]{ processSendRetry(); }, _name) 77{ 78} 79 80BaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size) 81 : ClockedObject(p), 82 cpuSidePort (p->name + ".cpu_side", this, "CpuSidePort"), 83 memSidePort(p->name + ".mem_side", this, "MemSidePort"), 84 mshrQueue("MSHRs", p->mshrs, 0, p->demand_mshr_reserve), // see below 85 writeBuffer("write buffer", p->write_buffers, p->mshrs), // see below 86 tags(p->tags), 87 compressor(p->compressor), 88 prefetcher(p->prefetcher), 89 writeAllocator(p->write_allocator), 90 writebackClean(p->writeback_clean), 91 tempBlockWriteback(nullptr), 92 writebackTempBlockAtomicEvent([this]{ writebackTempBlockAtomic(); }, 93 name(), false, 94 EventBase::Delayed_Writeback_Pri), 95 blkSize(blk_size), 96 lookupLatency(p->tag_latency), 97 dataLatency(p->data_latency), 98 forwardLatency(p->tag_latency), 99 fillLatency(p->data_latency), 100 responseLatency(p->response_latency), 101 sequentialAccess(p->sequential_access), 102 numTarget(p->tgts_per_mshr), 103 forwardSnoops(true), 104 clusivity(p->clusivity), 105 isReadOnly(p->is_read_only), 106 blocked(0), 107 order(0), 108 noTargetMSHR(nullptr), 109 missCount(p->max_miss_count), 110 addrRanges(p->addr_ranges.begin(), p->addr_ranges.end()), 111 system(p->system) 112{ 113 // the MSHR queue has no reserve entries as we check the MSHR 114 // queue on every single allocation, whereas the write queue has 115 // as many reserve entries as we have MSHRs, since every MSHR may 116 // eventually require a writeback, and we do not check the write 117 // buffer before committing to an MSHR 118 119 // forward snoops is overridden in init() once we can query 120 // whether the connected master is actually snooping or not 121 122 tempBlock = new TempCacheBlk(blkSize); 123 124 tags->tagsInit(); 125 if (prefetcher) 126 prefetcher->setCache(this); 127} 128 129BaseCache::~BaseCache() 130{ 131 delete tempBlock; 132} 133 134void 135BaseCache::CacheSlavePort::setBlocked() 136{ 137 assert(!blocked); 138 DPRINTF(CachePort, "Port is blocking new requests\n"); 139 blocked = true; 140 // if we already scheduled a retry in this cycle, but it has not yet 141 // happened, cancel it 142 if (sendRetryEvent.scheduled()) { 143 owner.deschedule(sendRetryEvent); 144 DPRINTF(CachePort, "Port descheduled retry\n"); 145 mustSendRetry = true; 146 } 147} 148 149void 150BaseCache::CacheSlavePort::clearBlocked() 151{ 152 assert(blocked); 153 DPRINTF(CachePort, "Port is accepting new requests\n"); 154 blocked = false; 155 if (mustSendRetry) { 156 // @TODO: need to find a better time (next cycle?) 157 owner.schedule(sendRetryEvent, curTick() + 1); 158 } 159} 160 161void 162BaseCache::CacheSlavePort::processSendRetry() 163{ 164 DPRINTF(CachePort, "Port is sending retry\n"); 165 166 // reset the flag and call retry 167 mustSendRetry = false; 168 sendRetryReq(); 169} 170 171Addr 172BaseCache::regenerateBlkAddr(CacheBlk* blk) 173{ 174 if (blk != tempBlock) { 175 return tags->regenerateBlkAddr(blk); 176 } else { 177 return tempBlock->getAddr(); 178 } 179} 180 181void 182BaseCache::init() 183{ 184 if (!cpuSidePort.isConnected() || !memSidePort.isConnected()) 185 fatal("Cache ports on %s are not connected\n", name()); 186 cpuSidePort.sendRangeChange(); 187 forwardSnoops = cpuSidePort.isSnooping(); 188} 189 190Port & 191BaseCache::getPort(const std::string &if_name, PortID idx) 192{ 193 if (if_name == "mem_side") { 194 return memSidePort; 195 } else if (if_name == "cpu_side") { 196 return cpuSidePort; 197 } else { 198 return ClockedObject::getPort(if_name, idx); 199 } 200} 201 202bool 203BaseCache::inRange(Addr addr) const 204{ 205 for (const auto& r : addrRanges) { 206 if (r.contains(addr)) { 207 return true; 208 } 209 } 210 return false; 211} 212 213void 214BaseCache::handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time) 215{ 216 if (pkt->needsResponse()) { 217 // These delays should have been consumed by now 218 assert(pkt->headerDelay == 0); 219 assert(pkt->payloadDelay == 0); 220 221 pkt->makeTimingResponse(); 222 223 // In this case we are considering request_time that takes 224 // into account the delay of the xbar, if any, and just 225 // lat, neglecting responseLatency, modelling hit latency 226 // just as the value of lat overriden by access(), which calls 227 // the calculateAccessLatency() function. 228 cpuSidePort.schedTimingResp(pkt, request_time); 229 } else { 230 DPRINTF(Cache, "%s satisfied %s, no response needed\n", __func__, 231 pkt->print()); 232 233 // queue the packet for deletion, as the sending cache is 234 // still relying on it; if the block is found in access(), 235 // CleanEvict and Writeback messages will be deleted 236 // here as well 237 pendingDelete.reset(pkt); 238 } 239} 240 241void 242BaseCache::handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk, 243 Tick forward_time, Tick request_time) 244{ 245 if (writeAllocator && 246 pkt && pkt->isWrite() && !pkt->req->isUncacheable()) { 247 writeAllocator->updateMode(pkt->getAddr(), pkt->getSize(), 248 pkt->getBlockAddr(blkSize)); 249 } 250 251 if (mshr) { 252 /// MSHR hit 253 /// @note writebacks will be checked in getNextMSHR() 254 /// for any conflicting requests to the same block 255 256 //@todo remove hw_pf here 257 258 // Coalesce unless it was a software prefetch (see above). 259 if (pkt) { 260 assert(!pkt->isWriteback()); 261 // CleanEvicts corresponding to blocks which have 262 // outstanding requests in MSHRs are simply sunk here 263 if (pkt->cmd == MemCmd::CleanEvict) { 264 pendingDelete.reset(pkt); 265 } else if (pkt->cmd == MemCmd::WriteClean) { 266 // A WriteClean should never coalesce with any 267 // outstanding cache maintenance requests. 268 269 // We use forward_time here because there is an 270 // uncached memory write, forwarded to WriteBuffer. 271 allocateWriteBuffer(pkt, forward_time); 272 } else { 273 DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__, 274 pkt->print()); 275 276 assert(pkt->req->masterId() < system->maxMasters()); 277 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++; 278 279 // We use forward_time here because it is the same 280 // considering new targets. We have multiple 281 // requests for the same address here. It 282 // specifies the latency to allocate an internal 283 // buffer and to schedule an event to the queued 284 // port and also takes into account the additional 285 // delay of the xbar. 286 mshr->allocateTarget(pkt, forward_time, order++, 287 allocOnFill(pkt->cmd)); 288 if (mshr->getNumTargets() == numTarget) { 289 noTargetMSHR = mshr; 290 setBlocked(Blocked_NoTargets); 291 // need to be careful with this... if this mshr isn't 292 // ready yet (i.e. time > curTick()), we don't want to 293 // move it ahead of mshrs that are ready 294 // mshrQueue.moveToFront(mshr); 295 } 296 } 297 } 298 } else { 299 // no MSHR 300 assert(pkt->req->masterId() < system->maxMasters()); 301 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 302 303 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean) { 304 // We use forward_time here because there is an 305 // writeback or writeclean, forwarded to WriteBuffer. 306 allocateWriteBuffer(pkt, forward_time); 307 } else { 308 if (blk && blk->isValid()) { 309 // If we have a write miss to a valid block, we 310 // need to mark the block non-readable. Otherwise 311 // if we allow reads while there's an outstanding 312 // write miss, the read could return stale data 313 // out of the cache block... a more aggressive 314 // system could detect the overlap (if any) and 315 // forward data out of the MSHRs, but we don't do 316 // that yet. Note that we do need to leave the 317 // block valid so that it stays in the cache, in 318 // case we get an upgrade response (and hence no 319 // new data) when the write miss completes. 320 // As long as CPUs do proper store/load forwarding 321 // internally, and have a sufficiently weak memory 322 // model, this is probably unnecessary, but at some 323 // point it must have seemed like we needed it... 324 assert((pkt->needsWritable() && !blk->isWritable()) || 325 pkt->req->isCacheMaintenance()); 326 blk->status &= ~BlkReadable; 327 } 328 // Here we are using forward_time, modelling the latency of 329 // a miss (outbound) just as forwardLatency, neglecting the 330 // lookupLatency component. 331 allocateMissBuffer(pkt, forward_time); 332 } 333 } 334} 335 336void 337BaseCache::recvTimingReq(PacketPtr pkt) 338{ 339 // anything that is merely forwarded pays for the forward latency and 340 // the delay provided by the crossbar 341 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 342 343 Cycles lat; 344 CacheBlk *blk = nullptr; 345 bool satisfied = false; 346 { 347 PacketList writebacks; 348 // Note that lat is passed by reference here. The function 349 // access() will set the lat value. 350 satisfied = access(pkt, blk, lat, writebacks); 351 352 // After the evicted blocks are selected, they must be forwarded 353 // to the write buffer to ensure they logically precede anything 354 // happening below 355 doWritebacks(writebacks, clockEdge(lat + forwardLatency)); 356 } 357 358 // Here we charge the headerDelay that takes into account the latencies 359 // of the bus, if the packet comes from it. 360 // The latency charged is just the value set by the access() function. 361 // In case of a hit we are neglecting response latency. 362 // In case of a miss we are neglecting forward latency. 363 Tick request_time = clockEdge(lat); 364 // Here we reset the timing of the packet. 365 pkt->headerDelay = pkt->payloadDelay = 0; 366 367 if (satisfied) { 368 // notify before anything else as later handleTimingReqHit might turn 369 // the packet in a response 370 ppHit->notify(pkt); 371 372 if (prefetcher && blk && blk->wasPrefetched()) { 373 blk->status &= ~BlkHWPrefetched; 374 } 375 376 handleTimingReqHit(pkt, blk, request_time); 377 } else { 378 handleTimingReqMiss(pkt, blk, forward_time, request_time); 379 380 ppMiss->notify(pkt); 381 } 382 383 if (prefetcher) { 384 // track time of availability of next prefetch, if any 385 Tick next_pf_time = prefetcher->nextPrefetchReadyTime(); 386 if (next_pf_time != MaxTick) { 387 schedMemSideSendEvent(next_pf_time); 388 } 389 } 390} 391 392void 393BaseCache::handleUncacheableWriteResp(PacketPtr pkt) 394{ 395 Tick completion_time = clockEdge(responseLatency) + 396 pkt->headerDelay + pkt->payloadDelay; 397 398 // Reset the bus additional time as it is now accounted for 399 pkt->headerDelay = pkt->payloadDelay = 0; 400 401 cpuSidePort.schedTimingResp(pkt, completion_time); 402} 403 404void 405BaseCache::recvTimingResp(PacketPtr pkt) 406{ 407 assert(pkt->isResponse()); 408 409 // all header delay should be paid for by the crossbar, unless 410 // this is a prefetch response from above 411 panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp, 412 "%s saw a non-zero packet delay\n", name()); 413 414 const bool is_error = pkt->isError(); 415 416 if (is_error) { 417 DPRINTF(Cache, "%s: Cache received %s with error\n", __func__, 418 pkt->print()); 419 } 420 421 DPRINTF(Cache, "%s: Handling response %s\n", __func__, 422 pkt->print()); 423 424 // if this is a write, we should be looking at an uncacheable 425 // write 426 if (pkt->isWrite()) { 427 assert(pkt->req->isUncacheable()); 428 handleUncacheableWriteResp(pkt); 429 return; 430 } 431 432 // we have dealt with any (uncacheable) writes above, from here on 433 // we know we are dealing with an MSHR due to a miss or a prefetch 434 MSHR *mshr = dynamic_cast<MSHR*>(pkt->popSenderState()); 435 assert(mshr); 436 437 if (mshr == noTargetMSHR) { 438 // we always clear at least one target 439 clearBlocked(Blocked_NoTargets); 440 noTargetMSHR = nullptr; 441 } 442 443 // Initial target is used just for stats 444 QueueEntry::Target *initial_tgt = mshr->getTarget(); 445 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex(); 446 Tick miss_latency = curTick() - initial_tgt->recvTime; 447 448 if (pkt->req->isUncacheable()) { 449 assert(pkt->req->masterId() < system->maxMasters()); 450 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] += 451 miss_latency; 452 } else { 453 assert(pkt->req->masterId() < system->maxMasters()); 454 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] += 455 miss_latency; 456 } 457 458 PacketList writebacks; 459 460 bool is_fill = !mshr->isForward && 461 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp || 462 mshr->wasWholeLineWrite); 463 464 // make sure that if the mshr was due to a whole line write then 465 // the response is an invalidation 466 assert(!mshr->wasWholeLineWrite || pkt->isInvalidate()); 467 468 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 469 470 if (is_fill && !is_error) { 471 DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n", 472 pkt->getAddr()); 473 474 const bool allocate = (writeAllocator && mshr->wasWholeLineWrite) ? 475 writeAllocator->allocate() : mshr->allocOnFill(); 476 blk = handleFill(pkt, blk, writebacks, allocate); 477 assert(blk != nullptr); 478 ppFill->notify(pkt); 479 } 480 481 if (blk && blk->isValid() && pkt->isClean() && !pkt->isInvalidate()) { 482 // The block was marked not readable while there was a pending 483 // cache maintenance operation, restore its flag. 484 blk->status |= BlkReadable; 485 486 // This was a cache clean operation (without invalidate) 487 // and we have a copy of the block already. Since there 488 // is no invalidation, we can promote targets that don't 489 // require a writable copy 490 mshr->promoteReadable(); 491 } 492 493 if (blk && blk->isWritable() && !pkt->req->isCacheInvalidate()) { 494 // If at this point the referenced block is writable and the 495 // response is not a cache invalidate, we promote targets that 496 // were deferred as we couldn't guarrantee a writable copy 497 mshr->promoteWritable(); 498 } 499 500 serviceMSHRTargets(mshr, pkt, blk); 501 502 if (mshr->promoteDeferredTargets()) { 503 // avoid later read getting stale data while write miss is 504 // outstanding.. see comment in timingAccess() 505 if (blk) { 506 blk->status &= ~BlkReadable; 507 } 508 mshrQueue.markPending(mshr); 509 schedMemSideSendEvent(clockEdge() + pkt->payloadDelay); 510 } else { 511 // while we deallocate an mshr from the queue we still have to 512 // check the isFull condition before and after as we might 513 // have been using the reserved entries already 514 const bool was_full = mshrQueue.isFull(); 515 mshrQueue.deallocate(mshr); 516 if (was_full && !mshrQueue.isFull()) { 517 clearBlocked(Blocked_NoMSHRs); 518 } 519 520 // Request the bus for a prefetch if this deallocation freed enough 521 // MSHRs for a prefetch to take place 522 if (prefetcher && mshrQueue.canPrefetch()) { 523 Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(), 524 clockEdge()); 525 if (next_pf_time != MaxTick) 526 schedMemSideSendEvent(next_pf_time); 527 } 528 } 529 530 // if we used temp block, check to see if its valid and then clear it out 531 if (blk == tempBlock && tempBlock->isValid()) { 532 evictBlock(blk, writebacks); 533 } 534 535 const Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 536 // copy writebacks to write buffer 537 doWritebacks(writebacks, forward_time); 538 539 DPRINTF(CacheVerbose, "%s: Leaving with %s\n", __func__, pkt->print()); 540 delete pkt; 541} 542 543 544Tick 545BaseCache::recvAtomic(PacketPtr pkt) 546{ 547 // should assert here that there are no outstanding MSHRs or 548 // writebacks... that would mean that someone used an atomic 549 // access in timing mode 550 551 // We use lookupLatency here because it is used to specify the latency 552 // to access. 553 Cycles lat = lookupLatency; 554 555 CacheBlk *blk = nullptr; 556 PacketList writebacks; 557 bool satisfied = access(pkt, blk, lat, writebacks); 558 559 if (pkt->isClean() && blk && blk->isDirty()) { 560 // A cache clean opearation is looking for a dirty 561 // block. If a dirty block is encountered a WriteClean 562 // will update any copies to the path to the memory 563 // until the point of reference. 564 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n", 565 __func__, pkt->print(), blk->print()); 566 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id); 567 writebacks.push_back(wb_pkt); 568 pkt->setSatisfied(); 569 } 570 571 // handle writebacks resulting from the access here to ensure they 572 // logically precede anything happening below 573 doWritebacksAtomic(writebacks); 574 assert(writebacks.empty()); 575 576 if (!satisfied) { 577 lat += handleAtomicReqMiss(pkt, blk, writebacks); 578 } 579 580 // Note that we don't invoke the prefetcher at all in atomic mode. 581 // It's not clear how to do it properly, particularly for 582 // prefetchers that aggressively generate prefetch candidates and 583 // rely on bandwidth contention to throttle them; these will tend 584 // to pollute the cache in atomic mode since there is no bandwidth 585 // contention. If we ever do want to enable prefetching in atomic 586 // mode, though, this is the place to do it... see timingAccess() 587 // for an example (though we'd want to issue the prefetch(es) 588 // immediately rather than calling requestMemSideBus() as we do 589 // there). 590 591 // do any writebacks resulting from the response handling 592 doWritebacksAtomic(writebacks); 593 594 // if we used temp block, check to see if its valid and if so 595 // clear it out, but only do so after the call to recvAtomic is 596 // finished so that any downstream observers (such as a snoop 597 // filter), first see the fill, and only then see the eviction 598 if (blk == tempBlock && tempBlock->isValid()) { 599 // the atomic CPU calls recvAtomic for fetch and load/store 600 // sequentuially, and we may already have a tempBlock 601 // writeback from the fetch that we have not yet sent 602 if (tempBlockWriteback) { 603 // if that is the case, write the prevoius one back, and 604 // do not schedule any new event 605 writebackTempBlockAtomic(); 606 } else { 607 // the writeback/clean eviction happens after the call to 608 // recvAtomic has finished (but before any successive 609 // calls), so that the response handling from the fill is 610 // allowed to happen first 611 schedule(writebackTempBlockAtomicEvent, curTick()); 612 } 613 614 tempBlockWriteback = evictBlock(blk); 615 } 616 617 if (pkt->needsResponse()) { 618 pkt->makeAtomicResponse(); 619 } 620 621 return lat * clockPeriod(); 622} 623 624void 625BaseCache::functionalAccess(PacketPtr pkt, bool from_cpu_side) 626{ 627 Addr blk_addr = pkt->getBlockAddr(blkSize); 628 bool is_secure = pkt->isSecure(); 629 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 630 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 631 632 pkt->pushLabel(name()); 633 634 CacheBlkPrintWrapper cbpw(blk); 635 636 // Note that just because an L2/L3 has valid data doesn't mean an 637 // L1 doesn't have a more up-to-date modified copy that still 638 // needs to be found. As a result we always update the request if 639 // we have it, but only declare it satisfied if we are the owner. 640 641 // see if we have data at all (owned or otherwise) 642 bool have_data = blk && blk->isValid() 643 && pkt->trySatisfyFunctional(&cbpw, blk_addr, is_secure, blkSize, 644 blk->data); 645 646 // data we have is dirty if marked as such or if we have an 647 // in-service MSHR that is pending a modified line 648 bool have_dirty = 649 have_data && (blk->isDirty() || 650 (mshr && mshr->inService && mshr->isPendingModified())); 651 652 bool done = have_dirty || 653 cpuSidePort.trySatisfyFunctional(pkt) || 654 mshrQueue.trySatisfyFunctional(pkt) || 655 writeBuffer.trySatisfyFunctional(pkt) || 656 memSidePort.trySatisfyFunctional(pkt); 657 658 DPRINTF(CacheVerbose, "%s: %s %s%s%s\n", __func__, pkt->print(), 659 (blk && blk->isValid()) ? "valid " : "", 660 have_data ? "data " : "", done ? "done " : ""); 661 662 // We're leaving the cache, so pop cache->name() label 663 pkt->popLabel(); 664 665 if (done) { 666 pkt->makeResponse(); 667 } else { 668 // if it came as a request from the CPU side then make sure it 669 // continues towards the memory side 670 if (from_cpu_side) { 671 memSidePort.sendFunctional(pkt); 672 } else if (cpuSidePort.isSnooping()) { 673 // if it came from the memory side, it must be a snoop request 674 // and we should only forward it if we are forwarding snoops 675 cpuSidePort.sendFunctionalSnoop(pkt); 676 } 677 } 678} 679 680 681void 682BaseCache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt) 683{ 684 assert(pkt->isRequest()); 685 686 uint64_t overwrite_val; 687 bool overwrite_mem; 688 uint64_t condition_val64; 689 uint32_t condition_val32; 690 691 int offset = pkt->getOffset(blkSize); 692 uint8_t *blk_data = blk->data + offset; 693 694 assert(sizeof(uint64_t) >= pkt->getSize()); 695 696 overwrite_mem = true; 697 // keep a copy of our possible write value, and copy what is at the 698 // memory address into the packet 699 pkt->writeData((uint8_t *)&overwrite_val); 700 pkt->setData(blk_data); 701 702 if (pkt->req->isCondSwap()) { 703 if (pkt->getSize() == sizeof(uint64_t)) { 704 condition_val64 = pkt->req->getExtraData(); 705 overwrite_mem = !std::memcmp(&condition_val64, blk_data, 706 sizeof(uint64_t)); 707 } else if (pkt->getSize() == sizeof(uint32_t)) { 708 condition_val32 = (uint32_t)pkt->req->getExtraData(); 709 overwrite_mem = !std::memcmp(&condition_val32, blk_data, 710 sizeof(uint32_t)); 711 } else 712 panic("Invalid size for conditional read/write\n"); 713 } 714 715 if (overwrite_mem) { 716 std::memcpy(blk_data, &overwrite_val, pkt->getSize()); 717 blk->status |= BlkDirty; 718 } 719} 720 721QueueEntry* 722BaseCache::getNextQueueEntry() 723{ 724 // Check both MSHR queue and write buffer for potential requests, 725 // note that null does not mean there is no request, it could 726 // simply be that it is not ready 727 MSHR *miss_mshr = mshrQueue.getNext(); 728 WriteQueueEntry *wq_entry = writeBuffer.getNext(); 729 730 // If we got a write buffer request ready, first priority is a 731 // full write buffer, otherwise we favour the miss requests 732 if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) { 733 // need to search MSHR queue for conflicting earlier miss. 734 MSHR *conflict_mshr = mshrQueue.findPending(wq_entry); 735 736 if (conflict_mshr && conflict_mshr->order < wq_entry->order) { 737 // Service misses in order until conflict is cleared. 738 return conflict_mshr; 739 740 // @todo Note that we ignore the ready time of the conflict here 741 } 742 743 // No conflicts; issue write 744 return wq_entry; 745 } else if (miss_mshr) { 746 // need to check for conflicting earlier writeback 747 WriteQueueEntry *conflict_mshr = writeBuffer.findPending(miss_mshr); 748 if (conflict_mshr) { 749 // not sure why we don't check order here... it was in the 750 // original code but commented out. 751 752 // The only way this happens is if we are 753 // doing a write and we didn't have permissions 754 // then subsequently saw a writeback (owned got evicted) 755 // We need to make sure to perform the writeback first 756 // To preserve the dirty data, then we can issue the write 757 758 // should we return wq_entry here instead? I.e. do we 759 // have to flush writes in order? I don't think so... not 760 // for Alpha anyway. Maybe for x86? 761 return conflict_mshr; 762 763 // @todo Note that we ignore the ready time of the conflict here 764 } 765 766 // No conflicts; issue read 767 return miss_mshr; 768 } 769 770 // fall through... no pending requests. Try a prefetch. 771 assert(!miss_mshr && !wq_entry); 772 if (prefetcher && mshrQueue.canPrefetch()) { 773 // If we have a miss queue slot, we can try a prefetch 774 PacketPtr pkt = prefetcher->getPacket(); 775 if (pkt) { 776 Addr pf_addr = pkt->getBlockAddr(blkSize); 777 if (!tags->findBlock(pf_addr, pkt->isSecure()) && 778 !mshrQueue.findMatch(pf_addr, pkt->isSecure()) && 779 !writeBuffer.findMatch(pf_addr, pkt->isSecure())) { 780 // Update statistic on number of prefetches issued 781 // (hwpf_mshr_misses) 782 assert(pkt->req->masterId() < system->maxMasters()); 783 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 784 785 // allocate an MSHR and return it, note 786 // that we send the packet straight away, so do not 787 // schedule the send 788 return allocateMissBuffer(pkt, curTick(), false); 789 } else { 790 // free the request and packet 791 delete pkt; 792 } 793 } 794 } 795 796 return nullptr; 797} 798 799void 800BaseCache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool, bool) 801{ 802 assert(pkt->isRequest()); 803 804 assert(blk && blk->isValid()); 805 // Occasionally this is not true... if we are a lower-level cache 806 // satisfying a string of Read and ReadEx requests from 807 // upper-level caches, a Read will mark the block as shared but we 808 // can satisfy a following ReadEx anyway since we can rely on the 809 // Read requester(s) to have buffered the ReadEx snoop and to 810 // invalidate their blocks after receiving them. 811 // assert(!pkt->needsWritable() || blk->isWritable()); 812 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize); 813 814 // Check RMW operations first since both isRead() and 815 // isWrite() will be true for them 816 if (pkt->cmd == MemCmd::SwapReq) { 817 if (pkt->isAtomicOp()) { 818 // extract data from cache and save it into the data field in 819 // the packet as a return value from this atomic op 820 int offset = tags->extractBlkOffset(pkt->getAddr()); 821 uint8_t *blk_data = blk->data + offset; 822 pkt->setData(blk_data); 823 824 // execute AMO operation 825 (*(pkt->getAtomicOp()))(blk_data); 826 827 // set block status to dirty 828 blk->status |= BlkDirty; 829 } else { 830 cmpAndSwap(blk, pkt); 831 } 832 } else if (pkt->isWrite()) { 833 // we have the block in a writable state and can go ahead, 834 // note that the line may be also be considered writable in 835 // downstream caches along the path to memory, but always 836 // Exclusive, and never Modified 837 assert(blk->isWritable()); 838 // Write or WriteLine at the first cache with block in writable state 839 if (blk->checkWrite(pkt)) { 840 pkt->writeDataToBlock(blk->data, blkSize); 841 } 842 // Always mark the line as dirty (and thus transition to the 843 // Modified state) even if we are a failed StoreCond so we 844 // supply data to any snoops that have appended themselves to 845 // this cache before knowing the store will fail. 846 blk->status |= BlkDirty; 847 DPRINTF(CacheVerbose, "%s for %s (write)\n", __func__, pkt->print()); 848 } else if (pkt->isRead()) { 849 if (pkt->isLLSC()) { 850 blk->trackLoadLocked(pkt); 851 } 852 853 // all read responses have a data payload 854 assert(pkt->hasRespData()); 855 pkt->setDataFromBlock(blk->data, blkSize); 856 } else if (pkt->isUpgrade()) { 857 // sanity check 858 assert(!pkt->hasSharers()); 859 860 if (blk->isDirty()) { 861 // we were in the Owned state, and a cache above us that 862 // has the line in Shared state needs to be made aware 863 // that the data it already has is in fact dirty 864 pkt->setCacheResponding(); 865 blk->status &= ~BlkDirty; 866 } 867 } else if (pkt->isClean()) { 868 blk->status &= ~BlkDirty; 869 } else { 870 assert(pkt->isInvalidate()); 871 invalidateBlock(blk); 872 DPRINTF(CacheVerbose, "%s for %s (invalidation)\n", __func__, 873 pkt->print()); 874 } 875} 876 877///////////////////////////////////////////////////// 878// 879// Access path: requests coming in from the CPU side 880// 881///////////////////////////////////////////////////// 882Cycles 883BaseCache::calculateTagOnlyLatency(const uint32_t delay, 884 const Cycles lookup_lat) const 885{ 886 // A tag-only access has to wait for the packet to arrive in order to 887 // perform the tag lookup. 888 return ticksToCycles(delay) + lookup_lat; 889} 890 891Cycles 892BaseCache::calculateAccessLatency(const CacheBlk* blk, const uint32_t delay, 893 const Cycles lookup_lat) const 894{ 895 Cycles lat(0); 896 897 if (blk != nullptr) { 898 // As soon as the access arrives, for sequential accesses first access 899 // tags, then the data entry. In the case of parallel accesses the 900 // latency is dictated by the slowest of tag and data latencies. 901 if (sequentialAccess) { 902 lat = ticksToCycles(delay) + lookup_lat + dataLatency; 903 } else { 904 lat = ticksToCycles(delay) + std::max(lookup_lat, dataLatency); 905 } 906 907 // Check if the block to be accessed is available. If not, apply the 908 // access latency on top of when the block is ready to be accessed. 909 const Tick tick = curTick() + delay; 910 const Tick when_ready = blk->getWhenReady(); 911 if (when_ready > tick && 912 ticksToCycles(when_ready - tick) > lat) { 913 lat += ticksToCycles(when_ready - tick); 914 } 915 } else { 916 // In case of a miss, we neglect the data access in a parallel 917 // configuration (i.e., the data access will be stopped as soon as 918 // we find out it is a miss), and use the tag-only latency. 919 lat = calculateTagOnlyLatency(delay, lookup_lat); 920 } 921 922 return lat; 923} 924 925bool 926BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, 927 PacketList &writebacks) 928{ 929 // sanity check 930 assert(pkt->isRequest()); 931 932 chatty_assert(!(isReadOnly && pkt->isWrite()), 933 "Should never see a write in a read-only cache %s\n", 934 name()); 935 936 // Access block in the tags 937 Cycles tag_latency(0); 938 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), tag_latency); 939 940 DPRINTF(Cache, "%s for %s %s\n", __func__, pkt->print(), 941 blk ? "hit " + blk->print() : "miss"); 942 943 if (pkt->req->isCacheMaintenance()) { 944 // A cache maintenance operation is always forwarded to the 945 // memory below even if the block is found in dirty state. 946 947 // We defer any changes to the state of the block until we 948 // create and mark as in service the mshr for the downstream 949 // packet. 950 951 // Calculate access latency on top of when the packet arrives. This 952 // takes into account the bus delay. 953 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 954 955 return false; 956 } 957 958 if (pkt->isEviction()) { 959 // We check for presence of block in above caches before issuing 960 // Writeback or CleanEvict to write buffer. Therefore the only 961 // possible cases can be of a CleanEvict packet coming from above 962 // encountering a Writeback generated in this cache peer cache and 963 // waiting in the write buffer. Cases of upper level peer caches 964 // generating CleanEvict and Writeback or simply CleanEvict and 965 // CleanEvict almost simultaneously will be caught by snoops sent out 966 // by crossbar. 967 WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(), 968 pkt->isSecure()); 969 if (wb_entry) { 970 assert(wb_entry->getNumTargets() == 1); 971 PacketPtr wbPkt = wb_entry->getTarget()->pkt; 972 assert(wbPkt->isWriteback()); 973 974 if (pkt->isCleanEviction()) { 975 // The CleanEvict and WritebackClean snoops into other 976 // peer caches of the same level while traversing the 977 // crossbar. If a copy of the block is found, the 978 // packet is deleted in the crossbar. Hence, none of 979 // the other upper level caches connected to this 980 // cache have the block, so we can clear the 981 // BLOCK_CACHED flag in the Writeback if set and 982 // discard the CleanEvict by returning true. 983 wbPkt->clearBlockCached(); 984 985 // A clean evict does not need to access the data array 986 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 987 988 return true; 989 } else { 990 assert(pkt->cmd == MemCmd::WritebackDirty); 991 // Dirty writeback from above trumps our clean 992 // writeback... discard here 993 // Note: markInService will remove entry from writeback buffer. 994 markInService(wb_entry); 995 delete wbPkt; 996 } 997 } 998 } 999 1000 // Writeback handling is special case. We can write the block into 1001 // the cache without having a writeable copy (or any copy at all). 1002 if (pkt->isWriteback()) { 1003 assert(blkSize == pkt->getSize()); 1004 1005 // we could get a clean writeback while we are having 1006 // outstanding accesses to a block, do the simple thing for 1007 // now and drop the clean writeback so that we do not upset 1008 // any ordering/decisions about ownership already taken 1009 if (pkt->cmd == MemCmd::WritebackClean && 1010 mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) { 1011 DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, " 1012 "dropping\n", pkt->getAddr()); 1013 1014 // A writeback searches for the block, then writes the data. 1015 // As the writeback is being dropped, the data is not touched, 1016 // and we just had to wait for the time to find a match in the 1017 // MSHR. As of now assume a mshr queue search takes as long as 1018 // a tag lookup for simplicity. 1019 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 1020 1021 return true; 1022 } 1023 1024 if (!blk) { 1025 // need to do a replacement 1026 blk = allocateBlock(pkt, writebacks); 1027 if (!blk) { 1028 // no replaceable block available: give up, fwd to next level. 1029 incMissCount(pkt); 1030 1031 // A writeback searches for the block, then writes the data. 1032 // As the block could not be found, it was a tag-only access. 1033 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 1034 1035 return false; 1036 } 1037 1038 blk->status |= BlkReadable; 1039 } else { 1040 if (compressor) { 1041 // This is an overwrite to an existing block, therefore we need 1042 // to check for data expansion (i.e., block was compressed with 1043 // a smaller size, and now it doesn't fit the entry anymore). 1044 // If that is the case we might need to evict blocks. 1045 // @todo Update compression data 1046 } 1047 } 1048 1049 // only mark the block dirty if we got a writeback command, 1050 // and leave it as is for a clean writeback 1051 if (pkt->cmd == MemCmd::WritebackDirty) { 1052 // TODO: the coherent cache can assert(!blk->isDirty()); 1053 blk->status |= BlkDirty; 1054 } 1055 // if the packet does not have sharers, it is passing 1056 // writable, and we got the writeback in Modified or Exclusive 1057 // state, if not we are in the Owned or Shared state 1058 if (!pkt->hasSharers()) { 1059 blk->status |= BlkWritable; 1060 } 1061 // nothing else to do; writeback doesn't expect response 1062 assert(!pkt->needsResponse()); 1063 pkt->writeDataToBlock(blk->data, blkSize); 1064 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 1065 incHitCount(pkt); 1066 1067 // A writeback searches for the block, then writes the data 1068 lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency); 1069 1070 // When the packet metadata arrives, the tag lookup will be done while 1071 // the payload is arriving. Then the block will be ready to access as 1072 // soon as the fill is done 1073 blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay + 1074 std::max(cyclesToTicks(tag_latency), (uint64_t)pkt->payloadDelay)); 1075 1076 return true; 1077 } else if (pkt->cmd == MemCmd::CleanEvict) { 1078 // A CleanEvict does not need to access the data array 1079 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 1080 1081 if (blk) { 1082 // Found the block in the tags, need to stop CleanEvict from 1083 // propagating further down the hierarchy. Returning true will 1084 // treat the CleanEvict like a satisfied write request and delete 1085 // it. 1086 return true; 1087 } 1088 // We didn't find the block here, propagate the CleanEvict further 1089 // down the memory hierarchy. Returning false will treat the CleanEvict 1090 // like a Writeback which could not find a replaceable block so has to 1091 // go to next level. 1092 return false; 1093 } else if (pkt->cmd == MemCmd::WriteClean) { 1094 // WriteClean handling is a special case. We can allocate a 1095 // block directly if it doesn't exist and we can update the 1096 // block immediately. The WriteClean transfers the ownership 1097 // of the block as well. 1098 assert(blkSize == pkt->getSize()); 1099 1100 if (!blk) { 1101 if (pkt->writeThrough()) { 1102 // A writeback searches for the block, then writes the data. 1103 // As the block could not be found, it was a tag-only access. 1104 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 1105 1106 // if this is a write through packet, we don't try to 1107 // allocate if the block is not present 1108 return false; 1109 } else { 1110 // a writeback that misses needs to allocate a new block 1111 blk = allocateBlock(pkt, writebacks); 1112 if (!blk) { 1113 // no replaceable block available: give up, fwd to 1114 // next level. 1115 incMissCount(pkt); 1116 1117 // A writeback searches for the block, then writes the 1118 // data. As the block could not be found, it was a tag-only 1119 // access. 1120 lat = calculateTagOnlyLatency(pkt->headerDelay, 1121 tag_latency); 1122 1123 return false; 1124 } 1125 1126 blk->status |= BlkReadable; 1127 } 1128 } else { 1129 if (compressor) { 1130 // @todo Update compression data 1131 } 1132 } 1133 1134 // at this point either this is a writeback or a write-through 1135 // write clean operation and the block is already in this 1136 // cache, we need to update the data and the block flags 1137 assert(blk); 1138 // TODO: the coherent cache can assert(!blk->isDirty()); 1139 if (!pkt->writeThrough()) { 1140 blk->status |= BlkDirty; 1141 } 1142 // nothing else to do; writeback doesn't expect response 1143 assert(!pkt->needsResponse()); 1144 pkt->writeDataToBlock(blk->data, blkSize); 1145 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 1146 1147 incHitCount(pkt); 1148 1149 // A writeback searches for the block, then writes the data 1150 lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency); 1151 1152 // When the packet metadata arrives, the tag lookup will be done while 1153 // the payload is arriving. Then the block will be ready to access as 1154 // soon as the fill is done 1155 blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay + 1156 std::max(cyclesToTicks(tag_latency), (uint64_t)pkt->payloadDelay)); 1157 1158 // if this a write-through packet it will be sent to cache 1159 // below 1160 return !pkt->writeThrough(); 1161 } else if (blk && (pkt->needsWritable() ? blk->isWritable() : 1162 blk->isReadable())) { 1163 // OK to satisfy access 1164 incHitCount(pkt); 1165 1166 // Calculate access latency based on the need to access the data array 1167 if (pkt->isRead() || pkt->isWrite()) { 1168 lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency); 1169 1170 // When a block is compressed, it must first be decompressed 1171 // before being read. This adds to the access latency. 1172 if (compressor && pkt->isRead()) { 1173 lat += compressor->getDecompressionLatency(blk); 1174 } 1175 } else { 1176 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 1177 } 1178 1179 satisfyRequest(pkt, blk); 1180 maintainClusivity(pkt->fromCache(), blk); 1181 1182 return true; 1183 } 1184 1185 // Can't satisfy access normally... either no block (blk == nullptr) 1186 // or have block but need writable 1187 1188 incMissCount(pkt); 1189 1190 lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency); 1191 1192 if (!blk && pkt->isLLSC() && pkt->isWrite()) { 1193 // complete miss on store conditional... just give up now 1194 pkt->req->setExtraData(0); 1195 return true; 1196 } 1197 1198 return false; 1199} 1200 1201void 1202BaseCache::maintainClusivity(bool from_cache, CacheBlk *blk) 1203{ 1204 if (from_cache && blk && blk->isValid() && !blk->isDirty() && 1205 clusivity == Enums::mostly_excl) { 1206 // if we have responded to a cache, and our block is still 1207 // valid, but not dirty, and this cache is mostly exclusive 1208 // with respect to the cache above, drop the block 1209 invalidateBlock(blk); 1210 } 1211} 1212 1213CacheBlk* 1214BaseCache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks, 1215 bool allocate) 1216{ 1217 assert(pkt->isResponse()); 1218 Addr addr = pkt->getAddr(); 1219 bool is_secure = pkt->isSecure(); 1220#if TRACING_ON 1221 CacheBlk::State old_state = blk ? blk->status : 0; 1222#endif 1223 1224 // When handling a fill, we should have no writes to this line. 1225 assert(addr == pkt->getBlockAddr(blkSize)); 1226 assert(!writeBuffer.findMatch(addr, is_secure)); 1227 1228 if (!blk) { 1229 // better have read new data... 1230 assert(pkt->hasData() || pkt->cmd == MemCmd::InvalidateResp); 1231 1232 // need to do a replacement if allocating, otherwise we stick 1233 // with the temporary storage 1234 blk = allocate ? allocateBlock(pkt, writebacks) : nullptr; 1235 1236 if (!blk) { 1237 // No replaceable block or a mostly exclusive 1238 // cache... just use temporary storage to complete the 1239 // current request and then get rid of it 1240 blk = tempBlock; 1241 tempBlock->insert(addr, is_secure); 1242 DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr, 1243 is_secure ? "s" : "ns"); 1244 } 1245 } else { 1246 // existing block... probably an upgrade 1247 // don't clear block status... if block is already dirty we 1248 // don't want to lose that 1249 } 1250 1251 // Block is guaranteed to be valid at this point 1252 assert(blk->isValid()); 1253 assert(blk->isSecure() == is_secure); 1254 assert(regenerateBlkAddr(blk) == addr); 1255 1256 blk->status |= BlkReadable; 1257 1258 // sanity check for whole-line writes, which should always be 1259 // marked as writable as part of the fill, and then later marked 1260 // dirty as part of satisfyRequest 1261 if (pkt->cmd == MemCmd::InvalidateResp) { 1262 assert(!pkt->hasSharers()); 1263 } 1264 1265 // here we deal with setting the appropriate state of the line, 1266 // and we start by looking at the hasSharers flag, and ignore the 1267 // cacheResponding flag (normally signalling dirty data) if the 1268 // packet has sharers, thus the line is never allocated as Owned 1269 // (dirty but not writable), and always ends up being either 1270 // Shared, Exclusive or Modified, see Packet::setCacheResponding 1271 // for more details 1272 if (!pkt->hasSharers()) { 1273 // we could get a writable line from memory (rather than a 1274 // cache) even in a read-only cache, note that we set this bit 1275 // even for a read-only cache, possibly revisit this decision 1276 blk->status |= BlkWritable; 1277 1278 // check if we got this via cache-to-cache transfer (i.e., from a 1279 // cache that had the block in Modified or Owned state) 1280 if (pkt->cacheResponding()) { 1281 // we got the block in Modified state, and invalidated the 1282 // owners copy 1283 blk->status |= BlkDirty; 1284 1285 chatty_assert(!isReadOnly, "Should never see dirty snoop response " 1286 "in read-only cache %s\n", name()); 1287 1288 } else if (pkt->cmd.isSWPrefetch() && pkt->needsWritable()) { 1289 // All other copies of the block were invalidated and we 1290 // have an exclusive copy. 1291 1292 // The coherence protocol assumes that if we fetched an 1293 // exclusive copy of the block, we have the intention to 1294 // modify it. Therefore the MSHR for the PrefetchExReq has 1295 // been the point of ordering and this cache has commited 1296 // to respond to snoops for the block. 1297 // 1298 // In most cases this is true anyway - a PrefetchExReq 1299 // will be followed by a WriteReq. However, if that 1300 // doesn't happen, the block is not marked as dirty and 1301 // the cache doesn't respond to snoops that has committed 1302 // to do so. 1303 // 1304 // To avoid deadlocks in cases where there is a snoop 1305 // between the PrefetchExReq and the expected WriteReq, we 1306 // proactively mark the block as Dirty. 1307 1308 blk->status |= BlkDirty; 1309 1310 panic_if(!isReadOnly, "Prefetch exclusive requests from read-only " 1311 "cache %s\n", name()); 1312 } 1313 } 1314 1315 DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n", 1316 addr, is_secure ? "s" : "ns", old_state, blk->print()); 1317 1318 // if we got new data, copy it in (checking for a read response 1319 // and a response that has data is the same in the end) 1320 if (pkt->isRead()) { 1321 // sanity checks 1322 assert(pkt->hasData()); 1323 assert(pkt->getSize() == blkSize); 1324 1325 pkt->writeDataToBlock(blk->data, blkSize); 1326 } 1327 // The block will be ready when the payload arrives and the fill is done 1328 blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay + 1329 pkt->payloadDelay); 1330 1331 return blk; 1332} 1333 1334CacheBlk* 1335BaseCache::allocateBlock(const PacketPtr pkt, PacketList &writebacks) 1336{ 1337 // Get address 1338 const Addr addr = pkt->getAddr(); 1339 1340 // Get secure bit 1341 const bool is_secure = pkt->isSecure(); 1342 1343 // Block size and compression related access latency. Only relevant if 1344 // using a compressor, otherwise there is no extra delay, and the block 1345 // is fully sized 1346 std::size_t blk_size_bits = blkSize*8; 1347 Cycles compression_lat = Cycles(0); 1348 Cycles decompression_lat = Cycles(0); 1349 1350 // If a compressor is being used, it is called to compress data before 1351 // insertion. Although in Gem5 the data is stored uncompressed, even if a 1352 // compressor is used, the compression/decompression methods are called to 1353 // calculate the amount of extra cycles needed to read or write compressed 1354 // blocks. 1355 if (compressor) { 1356 compressor->compress(pkt->getConstPtr<uint64_t>(), compression_lat, 1357 decompression_lat, blk_size_bits); 1358 } 1359 1360 // Find replacement victim 1361 std::vector<CacheBlk*> evict_blks; 1362 CacheBlk *victim = tags->findVictim(addr, is_secure, blk_size_bits, 1363 evict_blks); 1364 1365 // It is valid to return nullptr if there is no victim 1366 if (!victim) 1367 return nullptr; 1368 1369 // Print victim block's information 1370 DPRINTF(CacheRepl, "Replacement victim: %s\n", victim->print()); 1371 1372 // Check for transient state allocations. If any of the entries listed 1373 // for eviction has a transient state, the allocation fails 1374 bool replacement = false; 1375 for (const auto& blk : evict_blks) { 1376 if (blk->isValid()) { 1377 replacement = true; 1378 1379 Addr repl_addr = regenerateBlkAddr(blk); 1380 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure()); 1381 if (repl_mshr) { 1382 // must be an outstanding upgrade or clean request 1383 // on a block we're about to replace... 1384 assert((!blk->isWritable() && repl_mshr->needsWritable()) || 1385 repl_mshr->isCleaning()); 1386 1387 // too hard to replace block with transient state 1388 // allocation failed, block not inserted 1389 return nullptr; 1390 } 1391 } 1392 } 1393 1394 // The victim will be replaced by a new entry, so increase the replacement 1395 // counter if a valid block is being replaced 1396 if (replacement) { 1397 // Evict valid blocks associated to this victim block 1398 for (const auto& blk : evict_blks) { 1399 if (blk->isValid()) { 1400 DPRINTF(CacheRepl, "Evicting %s (%#llx) to make room for " \ 1401 "%#llx (%s)\n", blk->print(), regenerateBlkAddr(blk), 1402 addr, is_secure); 1403 1404 if (blk->wasPrefetched()) { 1405 unusedPrefetches++; 1406 } 1407 1408 evictBlock(blk, writebacks); 1409 } 1410 } 1411 1412 replacements++; 1413 } 1414 1415 // If using a compressor, set compression data. This must be done before 1416 // block insertion, as compressed tags use this information. 1417 if (compressor) { 1418 compressor->setSizeBits(victim, blk_size_bits); 1419 compressor->setDecompressionLatency(victim, decompression_lat); 1420 } 1421 1422 // Insert new block at victimized entry 1423 tags->insertBlock(pkt, victim); 1424 1425 return victim; 1426} 1427 1428void 1429BaseCache::invalidateBlock(CacheBlk *blk) 1430{ 1431 // If handling a block present in the Tags, let it do its invalidation 1432 // process, which will update stats and invalidate the block itself 1433 if (blk != tempBlock) { 1434 tags->invalidate(blk); 1435 } else { 1436 tempBlock->invalidate(); 1437 } 1438} 1439 1440void 1441BaseCache::evictBlock(CacheBlk *blk, PacketList &writebacks) 1442{ 1443 PacketPtr pkt = evictBlock(blk); 1444 if (pkt) { 1445 writebacks.push_back(pkt); 1446 } 1447} 1448 1449PacketPtr 1450BaseCache::writebackBlk(CacheBlk *blk) 1451{ 1452 chatty_assert(!isReadOnly || writebackClean, 1453 "Writeback from read-only cache"); 1454 assert(blk && blk->isValid() && (blk->isDirty() || writebackClean)); 1455 1456 writebacks[Request::wbMasterId]++; 1457 1458 RequestPtr req = std::make_shared<Request>( 1459 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId); 1460 1461 if (blk->isSecure()) 1462 req->setFlags(Request::SECURE); 1463 1464 req->taskId(blk->task_id); 1465 1466 PacketPtr pkt = 1467 new Packet(req, blk->isDirty() ? 1468 MemCmd::WritebackDirty : MemCmd::WritebackClean); 1469 1470 DPRINTF(Cache, "Create Writeback %s writable: %d, dirty: %d\n", 1471 pkt->print(), blk->isWritable(), blk->isDirty()); 1472 1473 if (blk->isWritable()) { 1474 // not asserting shared means we pass the block in modified 1475 // state, mark our own block non-writeable 1476 blk->status &= ~BlkWritable; 1477 } else { 1478 // we are in the Owned state, tell the receiver 1479 pkt->setHasSharers(); 1480 } 1481 1482 // make sure the block is not marked dirty 1483 blk->status &= ~BlkDirty; 1484 1485 pkt->allocate(); 1486 pkt->setDataFromBlock(blk->data, blkSize); 1487 1488 // When a block is compressed, it must first be decompressed before being 1489 // sent for writeback. 1490 if (compressor) { 1491 pkt->payloadDelay = compressor->getDecompressionLatency(blk); 1492 } 1493 1494 return pkt; 1495} 1496 1497PacketPtr 1498BaseCache::writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id) 1499{ 1500 RequestPtr req = std::make_shared<Request>( 1501 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId); 1502 1503 if (blk->isSecure()) { 1504 req->setFlags(Request::SECURE); 1505 } 1506 req->taskId(blk->task_id); 1507 1508 PacketPtr pkt = new Packet(req, MemCmd::WriteClean, blkSize, id); 1509 1510 if (dest) { 1511 req->setFlags(dest); 1512 pkt->setWriteThrough(); 1513 } 1514 1515 DPRINTF(Cache, "Create %s writable: %d, dirty: %d\n", pkt->print(), 1516 blk->isWritable(), blk->isDirty()); 1517 1518 if (blk->isWritable()) { 1519 // not asserting shared means we pass the block in modified 1520 // state, mark our own block non-writeable 1521 blk->status &= ~BlkWritable; 1522 } else { 1523 // we are in the Owned state, tell the receiver 1524 pkt->setHasSharers(); 1525 } 1526 1527 // make sure the block is not marked dirty 1528 blk->status &= ~BlkDirty; 1529 1530 pkt->allocate(); 1531 pkt->setDataFromBlock(blk->data, blkSize); 1532 1533 // When a block is compressed, it must first be decompressed before being 1534 // sent for writeback. 1535 if (compressor) { 1536 pkt->payloadDelay = compressor->getDecompressionLatency(blk); 1537 } 1538 1539 return pkt; 1540} 1541 1542 1543void 1544BaseCache::memWriteback() 1545{ 1546 tags->forEachBlk([this](CacheBlk &blk) { writebackVisitor(blk); }); 1547} 1548 1549void 1550BaseCache::memInvalidate() 1551{ 1552 tags->forEachBlk([this](CacheBlk &blk) { invalidateVisitor(blk); }); 1553} 1554 1555bool 1556BaseCache::isDirty() const 1557{ 1558 return tags->anyBlk([](CacheBlk &blk) { return blk.isDirty(); }); 1559} 1560 1561bool 1562BaseCache::coalesce() const 1563{ 1564 return writeAllocator && writeAllocator->coalesce(); 1565} 1566 1567void 1568BaseCache::writebackVisitor(CacheBlk &blk) 1569{ 1570 if (blk.isDirty()) { 1571 assert(blk.isValid()); 1572 1573 RequestPtr request = std::make_shared<Request>( 1574 regenerateBlkAddr(&blk), blkSize, 0, Request::funcMasterId); 1575 1576 request->taskId(blk.task_id); 1577 if (blk.isSecure()) { 1578 request->setFlags(Request::SECURE); 1579 } 1580 1581 Packet packet(request, MemCmd::WriteReq); 1582 packet.dataStatic(blk.data); 1583 1584 memSidePort.sendFunctional(&packet); 1585 1586 blk.status &= ~BlkDirty; 1587 } 1588} 1589 1590void 1591BaseCache::invalidateVisitor(CacheBlk &blk) 1592{ 1593 if (blk.isDirty()) 1594 warn_once("Invalidating dirty cache lines. " \ 1595 "Expect things to break.\n"); 1596 1597 if (blk.isValid()) { 1598 assert(!blk.isDirty()); 1599 invalidateBlock(&blk); 1600 } 1601} 1602 1603Tick 1604BaseCache::nextQueueReadyTime() const 1605{ 1606 Tick nextReady = std::min(mshrQueue.nextReadyTime(), 1607 writeBuffer.nextReadyTime()); 1608 1609 // Don't signal prefetch ready time if no MSHRs available 1610 // Will signal once enoguh MSHRs are deallocated 1611 if (prefetcher && mshrQueue.canPrefetch()) { 1612 nextReady = std::min(nextReady, 1613 prefetcher->nextPrefetchReadyTime()); 1614 } 1615 1616 return nextReady; 1617} 1618 1619 1620bool 1621BaseCache::sendMSHRQueuePacket(MSHR* mshr) 1622{ 1623 assert(mshr); 1624 1625 // use request from 1st target 1626 PacketPtr tgt_pkt = mshr->getTarget()->pkt; 1627 1628 DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print()); 1629 1630 // if the cache is in write coalescing mode or (additionally) in 1631 // no allocation mode, and we have a write packet with an MSHR 1632 // that is not a whole-line write (due to incompatible flags etc), 1633 // then reset the write mode 1634 if (writeAllocator && writeAllocator->coalesce() && tgt_pkt->isWrite()) { 1635 if (!mshr->isWholeLineWrite()) { 1636 // if we are currently write coalescing, hold on the 1637 // MSHR as many cycles extra as we need to completely 1638 // write a cache line 1639 if (writeAllocator->delay(mshr->blkAddr)) { 1640 Tick delay = blkSize / tgt_pkt->getSize() * clockPeriod(); 1641 DPRINTF(CacheVerbose, "Delaying pkt %s %llu ticks to allow " 1642 "for write coalescing\n", tgt_pkt->print(), delay); 1643 mshrQueue.delay(mshr, delay); 1644 return false; 1645 } else { 1646 writeAllocator->reset(); 1647 } 1648 } else { 1649 writeAllocator->resetDelay(mshr->blkAddr); 1650 } 1651 } 1652 1653 CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure); 1654 1655 // either a prefetch that is not present upstream, or a normal 1656 // MSHR request, proceed to get the packet to send downstream 1657 PacketPtr pkt = createMissPacket(tgt_pkt, blk, mshr->needsWritable(), 1658 mshr->isWholeLineWrite()); 1659 1660 mshr->isForward = (pkt == nullptr); 1661 1662 if (mshr->isForward) { 1663 // not a cache block request, but a response is expected 1664 // make copy of current packet to forward, keep current 1665 // copy for response handling 1666 pkt = new Packet(tgt_pkt, false, true); 1667 assert(!pkt->isWrite()); 1668 } 1669 1670 // play it safe and append (rather than set) the sender state, 1671 // as forwarded packets may already have existing state 1672 pkt->pushSenderState(mshr); 1673 1674 if (pkt->isClean() && blk && blk->isDirty()) { 1675 // A cache clean opearation is looking for a dirty block. Mark 1676 // the packet so that the destination xbar can determine that 1677 // there will be a follow-up write packet as well. 1678 pkt->setSatisfied(); 1679 } 1680 1681 if (!memSidePort.sendTimingReq(pkt)) { 1682 // we are awaiting a retry, but we 1683 // delete the packet and will be creating a new packet 1684 // when we get the opportunity 1685 delete pkt; 1686 1687 // note that we have now masked any requestBus and 1688 // schedSendEvent (we will wait for a retry before 1689 // doing anything), and this is so even if we do not 1690 // care about this packet and might override it before 1691 // it gets retried 1692 return true; 1693 } else { 1694 // As part of the call to sendTimingReq the packet is 1695 // forwarded to all neighbouring caches (and any caches 1696 // above them) as a snoop. Thus at this point we know if 1697 // any of the neighbouring caches are responding, and if 1698 // so, we know it is dirty, and we can determine if it is 1699 // being passed as Modified, making our MSHR the ordering 1700 // point 1701 bool pending_modified_resp = !pkt->hasSharers() && 1702 pkt->cacheResponding(); 1703 markInService(mshr, pending_modified_resp); 1704 1705 if (pkt->isClean() && blk && blk->isDirty()) { 1706 // A cache clean opearation is looking for a dirty 1707 // block. If a dirty block is encountered a WriteClean 1708 // will update any copies to the path to the memory 1709 // until the point of reference. 1710 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n", 1711 __func__, pkt->print(), blk->print()); 1712 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), 1713 pkt->id); 1714 PacketList writebacks; 1715 writebacks.push_back(wb_pkt); 1716 doWritebacks(writebacks, 0); 1717 } 1718 1719 return false; 1720 } 1721} 1722 1723bool 1724BaseCache::sendWriteQueuePacket(WriteQueueEntry* wq_entry) 1725{ 1726 assert(wq_entry); 1727 1728 // always a single target for write queue entries 1729 PacketPtr tgt_pkt = wq_entry->getTarget()->pkt; 1730 1731 DPRINTF(Cache, "%s: write %s\n", __func__, tgt_pkt->print()); 1732 1733 // forward as is, both for evictions and uncacheable writes 1734 if (!memSidePort.sendTimingReq(tgt_pkt)) { 1735 // note that we have now masked any requestBus and 1736 // schedSendEvent (we will wait for a retry before 1737 // doing anything), and this is so even if we do not 1738 // care about this packet and might override it before 1739 // it gets retried 1740 return true; 1741 } else { 1742 markInService(wq_entry); 1743 return false; 1744 } 1745} 1746 1747void 1748BaseCache::serialize(CheckpointOut &cp) const 1749{ 1750 bool dirty(isDirty()); 1751 1752 if (dirty) { 1753 warn("*** The cache still contains dirty data. ***\n"); 1754 warn(" Make sure to drain the system using the correct flags.\n"); 1755 warn(" This checkpoint will not restore correctly " \ 1756 "and dirty data in the cache will be lost!\n"); 1757 } 1758 1759 // Since we don't checkpoint the data in the cache, any dirty data 1760 // will be lost when restoring from a checkpoint of a system that 1761 // wasn't drained properly. Flag the checkpoint as invalid if the 1762 // cache contains dirty data. 1763 bool bad_checkpoint(dirty); 1764 SERIALIZE_SCALAR(bad_checkpoint); 1765} 1766 1767void 1768BaseCache::unserialize(CheckpointIn &cp) 1769{ 1770 bool bad_checkpoint; 1771 UNSERIALIZE_SCALAR(bad_checkpoint); 1772 if (bad_checkpoint) { 1773 fatal("Restoring from checkpoints with dirty caches is not " 1774 "supported in the classic memory system. Please remove any " 1775 "caches or drain them properly before taking checkpoints.\n"); 1776 } 1777} 1778 1779void 1780BaseCache::regStats() 1781{ 1782 ClockedObject::regStats(); 1783 1784 using namespace Stats; 1785 1786 // Hit statistics 1787 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1788 MemCmd cmd(access_idx); 1789 const string &cstr = cmd.toString(); 1790 1791 hits[access_idx] 1792 .init(system->maxMasters()) 1793 .name(name() + "." + cstr + "_hits") 1794 .desc("number of " + cstr + " hits") 1795 .flags(total | nozero | nonan) 1796 ; 1797 for (int i = 0; i < system->maxMasters(); i++) { 1798 hits[access_idx].subname(i, system->getMasterName(i)); 1799 } 1800 } 1801 1802// These macros make it easier to sum the right subset of commands and 1803// to change the subset of commands that are considered "demand" vs 1804// "non-demand" 1805#define SUM_DEMAND(s) \ 1806 (s[MemCmd::ReadReq] + s[MemCmd::WriteReq] + s[MemCmd::WriteLineReq] + \ 1807 s[MemCmd::ReadExReq] + s[MemCmd::ReadCleanReq] + s[MemCmd::ReadSharedReq]) 1808 1809// should writebacks be included here? prior code was inconsistent... 1810#define SUM_NON_DEMAND(s) \ 1811 (s[MemCmd::SoftPFReq] + s[MemCmd::HardPFReq] + s[MemCmd::SoftPFExReq]) 1812 1813 demandHits 1814 .name(name() + ".demand_hits") 1815 .desc("number of demand (read+write) hits") 1816 .flags(total | nozero | nonan) 1817 ; 1818 demandHits = SUM_DEMAND(hits); 1819 for (int i = 0; i < system->maxMasters(); i++) { 1820 demandHits.subname(i, system->getMasterName(i)); 1821 } 1822 1823 overallHits 1824 .name(name() + ".overall_hits") 1825 .desc("number of overall hits") 1826 .flags(total | nozero | nonan) 1827 ; 1828 overallHits = demandHits + SUM_NON_DEMAND(hits); 1829 for (int i = 0; i < system->maxMasters(); i++) { 1830 overallHits.subname(i, system->getMasterName(i)); 1831 } 1832 1833 // Miss statistics 1834 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1835 MemCmd cmd(access_idx); 1836 const string &cstr = cmd.toString(); 1837 1838 misses[access_idx] 1839 .init(system->maxMasters()) 1840 .name(name() + "." + cstr + "_misses") 1841 .desc("number of " + cstr + " misses") 1842 .flags(total | nozero | nonan) 1843 ; 1844 for (int i = 0; i < system->maxMasters(); i++) { 1845 misses[access_idx].subname(i, system->getMasterName(i)); 1846 } 1847 } 1848 1849 demandMisses 1850 .name(name() + ".demand_misses") 1851 .desc("number of demand (read+write) misses") 1852 .flags(total | nozero | nonan) 1853 ; 1854 demandMisses = SUM_DEMAND(misses); 1855 for (int i = 0; i < system->maxMasters(); i++) { 1856 demandMisses.subname(i, system->getMasterName(i)); 1857 } 1858 1859 overallMisses 1860 .name(name() + ".overall_misses") 1861 .desc("number of overall misses") 1862 .flags(total | nozero | nonan) 1863 ; 1864 overallMisses = demandMisses + SUM_NON_DEMAND(misses); 1865 for (int i = 0; i < system->maxMasters(); i++) { 1866 overallMisses.subname(i, system->getMasterName(i)); 1867 } 1868 1869 // Miss latency statistics 1870 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1871 MemCmd cmd(access_idx); 1872 const string &cstr = cmd.toString(); 1873 1874 missLatency[access_idx] 1875 .init(system->maxMasters()) 1876 .name(name() + "." + cstr + "_miss_latency") 1877 .desc("number of " + cstr + " miss cycles") 1878 .flags(total | nozero | nonan) 1879 ; 1880 for (int i = 0; i < system->maxMasters(); i++) { 1881 missLatency[access_idx].subname(i, system->getMasterName(i)); 1882 } 1883 } 1884 1885 demandMissLatency 1886 .name(name() + ".demand_miss_latency") 1887 .desc("number of demand (read+write) miss cycles") 1888 .flags(total | nozero | nonan) 1889 ; 1890 demandMissLatency = SUM_DEMAND(missLatency); 1891 for (int i = 0; i < system->maxMasters(); i++) { 1892 demandMissLatency.subname(i, system->getMasterName(i)); 1893 } 1894 1895 overallMissLatency 1896 .name(name() + ".overall_miss_latency") 1897 .desc("number of overall miss cycles") 1898 .flags(total | nozero | nonan) 1899 ; 1900 overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency); 1901 for (int i = 0; i < system->maxMasters(); i++) { 1902 overallMissLatency.subname(i, system->getMasterName(i)); 1903 } 1904 1905 // access formulas 1906 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1907 MemCmd cmd(access_idx); 1908 const string &cstr = cmd.toString(); 1909 1910 accesses[access_idx] 1911 .name(name() + "." + cstr + "_accesses") 1912 .desc("number of " + cstr + " accesses(hits+misses)") 1913 .flags(total | nozero | nonan) 1914 ; 1915 accesses[access_idx] = hits[access_idx] + misses[access_idx]; 1916 1917 for (int i = 0; i < system->maxMasters(); i++) { 1918 accesses[access_idx].subname(i, system->getMasterName(i)); 1919 } 1920 } 1921 1922 demandAccesses 1923 .name(name() + ".demand_accesses") 1924 .desc("number of demand (read+write) accesses") 1925 .flags(total | nozero | nonan) 1926 ; 1927 demandAccesses = demandHits + demandMisses; 1928 for (int i = 0; i < system->maxMasters(); i++) { 1929 demandAccesses.subname(i, system->getMasterName(i)); 1930 } 1931 1932 overallAccesses 1933 .name(name() + ".overall_accesses") 1934 .desc("number of overall (read+write) accesses") 1935 .flags(total | nozero | nonan) 1936 ; 1937 overallAccesses = overallHits + overallMisses; 1938 for (int i = 0; i < system->maxMasters(); i++) { 1939 overallAccesses.subname(i, system->getMasterName(i)); 1940 } 1941 1942 // miss rate formulas 1943 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1944 MemCmd cmd(access_idx); 1945 const string &cstr = cmd.toString(); 1946 1947 missRate[access_idx] 1948 .name(name() + "." + cstr + "_miss_rate") 1949 .desc("miss rate for " + cstr + " accesses") 1950 .flags(total | nozero | nonan) 1951 ; 1952 missRate[access_idx] = misses[access_idx] / accesses[access_idx]; 1953 1954 for (int i = 0; i < system->maxMasters(); i++) { 1955 missRate[access_idx].subname(i, system->getMasterName(i)); 1956 } 1957 } 1958 1959 demandMissRate 1960 .name(name() + ".demand_miss_rate") 1961 .desc("miss rate for demand accesses") 1962 .flags(total | nozero | nonan) 1963 ; 1964 demandMissRate = demandMisses / demandAccesses; 1965 for (int i = 0; i < system->maxMasters(); i++) { 1966 demandMissRate.subname(i, system->getMasterName(i)); 1967 } 1968 1969 overallMissRate 1970 .name(name() + ".overall_miss_rate") 1971 .desc("miss rate for overall accesses") 1972 .flags(total | nozero | nonan) 1973 ; 1974 overallMissRate = overallMisses / overallAccesses; 1975 for (int i = 0; i < system->maxMasters(); i++) { 1976 overallMissRate.subname(i, system->getMasterName(i)); 1977 } 1978 1979 // miss latency formulas 1980 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1981 MemCmd cmd(access_idx); 1982 const string &cstr = cmd.toString(); 1983 1984 avgMissLatency[access_idx] 1985 .name(name() + "." + cstr + "_avg_miss_latency") 1986 .desc("average " + cstr + " miss latency") 1987 .flags(total | nozero | nonan) 1988 ; 1989 avgMissLatency[access_idx] = 1990 missLatency[access_idx] / misses[access_idx]; 1991 1992 for (int i = 0; i < system->maxMasters(); i++) { 1993 avgMissLatency[access_idx].subname(i, system->getMasterName(i)); 1994 } 1995 } 1996 1997 demandAvgMissLatency 1998 .name(name() + ".demand_avg_miss_latency") 1999 .desc("average overall miss latency") 2000 .flags(total | nozero | nonan) 2001 ; 2002 demandAvgMissLatency = demandMissLatency / demandMisses; 2003 for (int i = 0; i < system->maxMasters(); i++) { 2004 demandAvgMissLatency.subname(i, system->getMasterName(i)); 2005 } 2006 2007 overallAvgMissLatency 2008 .name(name() + ".overall_avg_miss_latency") 2009 .desc("average overall miss latency") 2010 .flags(total | nozero | nonan) 2011 ; 2012 overallAvgMissLatency = overallMissLatency / overallMisses; 2013 for (int i = 0; i < system->maxMasters(); i++) { 2014 overallAvgMissLatency.subname(i, system->getMasterName(i)); 2015 } 2016 2017 blocked_cycles.init(NUM_BLOCKED_CAUSES); 2018 blocked_cycles 2019 .name(name() + ".blocked_cycles") 2020 .desc("number of cycles access was blocked") 2021 .subname(Blocked_NoMSHRs, "no_mshrs") 2022 .subname(Blocked_NoTargets, "no_targets") 2023 ; 2024 2025 2026 blocked_causes.init(NUM_BLOCKED_CAUSES); 2027 blocked_causes 2028 .name(name() + ".blocked") 2029 .desc("number of cycles access was blocked") 2030 .subname(Blocked_NoMSHRs, "no_mshrs") 2031 .subname(Blocked_NoTargets, "no_targets") 2032 ; 2033 2034 avg_blocked 2035 .name(name() + ".avg_blocked_cycles") 2036 .desc("average number of cycles each access was blocked") 2037 .subname(Blocked_NoMSHRs, "no_mshrs") 2038 .subname(Blocked_NoTargets, "no_targets") 2039 ; 2040 2041 avg_blocked = blocked_cycles / blocked_causes; 2042 2043 unusedPrefetches 2044 .name(name() + ".unused_prefetches") 2045 .desc("number of HardPF blocks evicted w/o reference") 2046 .flags(nozero) 2047 ; 2048 2049 writebacks 2050 .init(system->maxMasters()) 2051 .name(name() + ".writebacks") 2052 .desc("number of writebacks") 2053 .flags(total | nozero | nonan) 2054 ; 2055 for (int i = 0; i < system->maxMasters(); i++) { 2056 writebacks.subname(i, system->getMasterName(i)); 2057 } 2058 2059 // MSHR statistics 2060 // MSHR hit statistics 2061 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2062 MemCmd cmd(access_idx); 2063 const string &cstr = cmd.toString(); 2064 2065 mshr_hits[access_idx] 2066 .init(system->maxMasters()) 2067 .name(name() + "." + cstr + "_mshr_hits") 2068 .desc("number of " + cstr + " MSHR hits") 2069 .flags(total | nozero | nonan) 2070 ; 2071 for (int i = 0; i < system->maxMasters(); i++) { 2072 mshr_hits[access_idx].subname(i, system->getMasterName(i)); 2073 } 2074 } 2075 2076 demandMshrHits 2077 .name(name() + ".demand_mshr_hits") 2078 .desc("number of demand (read+write) MSHR hits") 2079 .flags(total | nozero | nonan) 2080 ; 2081 demandMshrHits = SUM_DEMAND(mshr_hits); 2082 for (int i = 0; i < system->maxMasters(); i++) { 2083 demandMshrHits.subname(i, system->getMasterName(i)); 2084 } 2085 2086 overallMshrHits 2087 .name(name() + ".overall_mshr_hits") 2088 .desc("number of overall MSHR hits") 2089 .flags(total | nozero | nonan) 2090 ; 2091 overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshr_hits); 2092 for (int i = 0; i < system->maxMasters(); i++) { 2093 overallMshrHits.subname(i, system->getMasterName(i)); 2094 } 2095 2096 // MSHR miss statistics 2097 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2098 MemCmd cmd(access_idx); 2099 const string &cstr = cmd.toString(); 2100 2101 mshr_misses[access_idx] 2102 .init(system->maxMasters()) 2103 .name(name() + "." + cstr + "_mshr_misses") 2104 .desc("number of " + cstr + " MSHR misses") 2105 .flags(total | nozero | nonan) 2106 ; 2107 for (int i = 0; i < system->maxMasters(); i++) { 2108 mshr_misses[access_idx].subname(i, system->getMasterName(i)); 2109 } 2110 } 2111 2112 demandMshrMisses 2113 .name(name() + ".demand_mshr_misses") 2114 .desc("number of demand (read+write) MSHR misses") 2115 .flags(total | nozero | nonan) 2116 ; 2117 demandMshrMisses = SUM_DEMAND(mshr_misses); 2118 for (int i = 0; i < system->maxMasters(); i++) { 2119 demandMshrMisses.subname(i, system->getMasterName(i)); 2120 } 2121 2122 overallMshrMisses 2123 .name(name() + ".overall_mshr_misses") 2124 .desc("number of overall MSHR misses") 2125 .flags(total | nozero | nonan) 2126 ; 2127 overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshr_misses); 2128 for (int i = 0; i < system->maxMasters(); i++) { 2129 overallMshrMisses.subname(i, system->getMasterName(i)); 2130 } 2131 2132 // MSHR miss latency statistics 2133 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2134 MemCmd cmd(access_idx); 2135 const string &cstr = cmd.toString(); 2136 2137 mshr_miss_latency[access_idx] 2138 .init(system->maxMasters()) 2139 .name(name() + "." + cstr + "_mshr_miss_latency") 2140 .desc("number of " + cstr + " MSHR miss cycles") 2141 .flags(total | nozero | nonan) 2142 ; 2143 for (int i = 0; i < system->maxMasters(); i++) { 2144 mshr_miss_latency[access_idx].subname(i, system->getMasterName(i)); 2145 } 2146 } 2147 2148 demandMshrMissLatency 2149 .name(name() + ".demand_mshr_miss_latency") 2150 .desc("number of demand (read+write) MSHR miss cycles") 2151 .flags(total | nozero | nonan) 2152 ; 2153 demandMshrMissLatency = SUM_DEMAND(mshr_miss_latency); 2154 for (int i = 0; i < system->maxMasters(); i++) { 2155 demandMshrMissLatency.subname(i, system->getMasterName(i)); 2156 } 2157 2158 overallMshrMissLatency 2159 .name(name() + ".overall_mshr_miss_latency") 2160 .desc("number of overall MSHR miss cycles") 2161 .flags(total | nozero | nonan) 2162 ; 2163 overallMshrMissLatency = 2164 demandMshrMissLatency + SUM_NON_DEMAND(mshr_miss_latency); 2165 for (int i = 0; i < system->maxMasters(); i++) { 2166 overallMshrMissLatency.subname(i, system->getMasterName(i)); 2167 } 2168 2169 // MSHR uncacheable statistics 2170 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2171 MemCmd cmd(access_idx); 2172 const string &cstr = cmd.toString(); 2173 2174 mshr_uncacheable[access_idx] 2175 .init(system->maxMasters()) 2176 .name(name() + "." + cstr + "_mshr_uncacheable") 2177 .desc("number of " + cstr + " MSHR uncacheable") 2178 .flags(total | nozero | nonan) 2179 ; 2180 for (int i = 0; i < system->maxMasters(); i++) { 2181 mshr_uncacheable[access_idx].subname(i, system->getMasterName(i)); 2182 } 2183 } 2184 2185 overallMshrUncacheable 2186 .name(name() + ".overall_mshr_uncacheable_misses") 2187 .desc("number of overall MSHR uncacheable misses") 2188 .flags(total | nozero | nonan) 2189 ; 2190 overallMshrUncacheable = 2191 SUM_DEMAND(mshr_uncacheable) + SUM_NON_DEMAND(mshr_uncacheable); 2192 for (int i = 0; i < system->maxMasters(); i++) { 2193 overallMshrUncacheable.subname(i, system->getMasterName(i)); 2194 } 2195 2196 // MSHR miss latency statistics 2197 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2198 MemCmd cmd(access_idx); 2199 const string &cstr = cmd.toString(); 2200 2201 mshr_uncacheable_lat[access_idx] 2202 .init(system->maxMasters()) 2203 .name(name() + "." + cstr + "_mshr_uncacheable_latency") 2204 .desc("number of " + cstr + " MSHR uncacheable cycles") 2205 .flags(total | nozero | nonan) 2206 ; 2207 for (int i = 0; i < system->maxMasters(); i++) { 2208 mshr_uncacheable_lat[access_idx].subname( 2209 i, system->getMasterName(i)); 2210 } 2211 } 2212 2213 overallMshrUncacheableLatency 2214 .name(name() + ".overall_mshr_uncacheable_latency") 2215 .desc("number of overall MSHR uncacheable cycles") 2216 .flags(total | nozero | nonan) 2217 ; 2218 overallMshrUncacheableLatency = 2219 SUM_DEMAND(mshr_uncacheable_lat) + 2220 SUM_NON_DEMAND(mshr_uncacheable_lat); 2221 for (int i = 0; i < system->maxMasters(); i++) { 2222 overallMshrUncacheableLatency.subname(i, system->getMasterName(i)); 2223 } 2224 2225#if 0 2226 // MSHR access formulas 2227 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2228 MemCmd cmd(access_idx); 2229 const string &cstr = cmd.toString(); 2230 2231 mshrAccesses[access_idx] 2232 .name(name() + "." + cstr + "_mshr_accesses") 2233 .desc("number of " + cstr + " mshr accesses(hits+misses)") 2234 .flags(total | nozero | nonan) 2235 ; 2236 mshrAccesses[access_idx] = 2237 mshr_hits[access_idx] + mshr_misses[access_idx] 2238 + mshr_uncacheable[access_idx]; 2239 } 2240 2241 demandMshrAccesses 2242 .name(name() + ".demand_mshr_accesses") 2243 .desc("number of demand (read+write) mshr accesses") 2244 .flags(total | nozero | nonan) 2245 ; 2246 demandMshrAccesses = demandMshrHits + demandMshrMisses; 2247 2248 overallMshrAccesses 2249 .name(name() + ".overall_mshr_accesses") 2250 .desc("number of overall (read+write) mshr accesses") 2251 .flags(total | nozero | nonan) 2252 ; 2253 overallMshrAccesses = overallMshrHits + overallMshrMisses 2254 + overallMshrUncacheable; 2255#endif 2256 2257 // MSHR miss rate formulas 2258 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2259 MemCmd cmd(access_idx); 2260 const string &cstr = cmd.toString(); 2261 2262 mshrMissRate[access_idx] 2263 .name(name() + "." + cstr + "_mshr_miss_rate") 2264 .desc("mshr miss rate for " + cstr + " accesses") 2265 .flags(total | nozero | nonan) 2266 ; 2267 mshrMissRate[access_idx] = 2268 mshr_misses[access_idx] / accesses[access_idx]; 2269 2270 for (int i = 0; i < system->maxMasters(); i++) { 2271 mshrMissRate[access_idx].subname(i, system->getMasterName(i)); 2272 } 2273 } 2274 2275 demandMshrMissRate 2276 .name(name() + ".demand_mshr_miss_rate") 2277 .desc("mshr miss rate for demand accesses") 2278 .flags(total | nozero | nonan) 2279 ; 2280 demandMshrMissRate = demandMshrMisses / demandAccesses; 2281 for (int i = 0; i < system->maxMasters(); i++) { 2282 demandMshrMissRate.subname(i, system->getMasterName(i)); 2283 } 2284 2285 overallMshrMissRate 2286 .name(name() + ".overall_mshr_miss_rate") 2287 .desc("mshr miss rate for overall accesses") 2288 .flags(total | nozero | nonan) 2289 ; 2290 overallMshrMissRate = overallMshrMisses / overallAccesses; 2291 for (int i = 0; i < system->maxMasters(); i++) { 2292 overallMshrMissRate.subname(i, system->getMasterName(i)); 2293 } 2294 2295 // mshrMiss latency formulas 2296 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2297 MemCmd cmd(access_idx); 2298 const string &cstr = cmd.toString(); 2299 2300 avgMshrMissLatency[access_idx] 2301 .name(name() + "." + cstr + "_avg_mshr_miss_latency") 2302 .desc("average " + cstr + " mshr miss latency") 2303 .flags(total | nozero | nonan) 2304 ; 2305 avgMshrMissLatency[access_idx] = 2306 mshr_miss_latency[access_idx] / mshr_misses[access_idx]; 2307 2308 for (int i = 0; i < system->maxMasters(); i++) { 2309 avgMshrMissLatency[access_idx].subname( 2310 i, system->getMasterName(i)); 2311 } 2312 } 2313 2314 demandAvgMshrMissLatency 2315 .name(name() + ".demand_avg_mshr_miss_latency") 2316 .desc("average overall mshr miss latency") 2317 .flags(total | nozero | nonan) 2318 ; 2319 demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses; 2320 for (int i = 0; i < system->maxMasters(); i++) { 2321 demandAvgMshrMissLatency.subname(i, system->getMasterName(i)); 2322 } 2323 2324 overallAvgMshrMissLatency 2325 .name(name() + ".overall_avg_mshr_miss_latency") 2326 .desc("average overall mshr miss latency") 2327 .flags(total | nozero | nonan) 2328 ; 2329 overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses; 2330 for (int i = 0; i < system->maxMasters(); i++) { 2331 overallAvgMshrMissLatency.subname(i, system->getMasterName(i)); 2332 } 2333 2334 // mshrUncacheable latency formulas 2335 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2336 MemCmd cmd(access_idx); 2337 const string &cstr = cmd.toString(); 2338 2339 avgMshrUncacheableLatency[access_idx] 2340 .name(name() + "." + cstr + "_avg_mshr_uncacheable_latency") 2341 .desc("average " + cstr + " mshr uncacheable latency") 2342 .flags(total | nozero | nonan) 2343 ; 2344 avgMshrUncacheableLatency[access_idx] = 2345 mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx]; 2346 2347 for (int i = 0; i < system->maxMasters(); i++) { 2348 avgMshrUncacheableLatency[access_idx].subname( 2349 i, system->getMasterName(i)); 2350 } 2351 } 2352 2353 overallAvgMshrUncacheableLatency 2354 .name(name() + ".overall_avg_mshr_uncacheable_latency") 2355 .desc("average overall mshr uncacheable latency") 2356 .flags(total | nozero | nonan) 2357 ; 2358 overallAvgMshrUncacheableLatency = 2359 overallMshrUncacheableLatency / overallMshrUncacheable; 2360 for (int i = 0; i < system->maxMasters(); i++) { 2361 overallAvgMshrUncacheableLatency.subname(i, system->getMasterName(i)); 2362 } 2363 2364 replacements 2365 .name(name() + ".replacements") 2366 .desc("number of replacements") 2367 ; 2368} 2369 2370void 2371BaseCache::regProbePoints() 2372{ 2373 ppHit = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Hit"); 2374 ppMiss = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Miss"); 2375 ppFill = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Fill"); 2376} 2377 2378/////////////// 2379// 2380// CpuSidePort 2381// 2382/////////////// 2383bool 2384BaseCache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt) 2385{ 2386 // Snoops shouldn't happen when bypassing caches 2387 assert(!cache->system->bypassCaches()); 2388 2389 assert(pkt->isResponse()); 2390 2391 // Express snoop responses from master to slave, e.g., from L1 to L2 2392 cache->recvTimingSnoopResp(pkt); 2393 return true; 2394} 2395 2396 2397bool 2398BaseCache::CpuSidePort::tryTiming(PacketPtr pkt) 2399{ 2400 if (cache->system->bypassCaches() || pkt->isExpressSnoop()) { 2401 // always let express snoop packets through even if blocked 2402 return true; 2403 } else if (blocked || mustSendRetry) { 2404 // either already committed to send a retry, or blocked 2405 mustSendRetry = true; 2406 return false; 2407 } 2408 mustSendRetry = false; 2409 return true; 2410} 2411 2412bool 2413BaseCache::CpuSidePort::recvTimingReq(PacketPtr pkt) 2414{ 2415 assert(pkt->isRequest()); 2416 2417 if (cache->system->bypassCaches()) { 2418 // Just forward the packet if caches are disabled. 2419 // @todo This should really enqueue the packet rather 2420 bool M5_VAR_USED success = cache->memSidePort.sendTimingReq(pkt); 2421 assert(success); 2422 return true; 2423 } else if (tryTiming(pkt)) { 2424 cache->recvTimingReq(pkt); 2425 return true; 2426 } 2427 return false; 2428} 2429 2430Tick 2431BaseCache::CpuSidePort::recvAtomic(PacketPtr pkt) 2432{ 2433 if (cache->system->bypassCaches()) { 2434 // Forward the request if the system is in cache bypass mode. 2435 return cache->memSidePort.sendAtomic(pkt); 2436 } else { 2437 return cache->recvAtomic(pkt); 2438 } 2439} 2440 2441void 2442BaseCache::CpuSidePort::recvFunctional(PacketPtr pkt) 2443{ 2444 if (cache->system->bypassCaches()) { 2445 // The cache should be flushed if we are in cache bypass mode, 2446 // so we don't need to check if we need to update anything. 2447 cache->memSidePort.sendFunctional(pkt); 2448 return; 2449 } 2450 2451 // functional request 2452 cache->functionalAccess(pkt, true); 2453} 2454 2455AddrRangeList 2456BaseCache::CpuSidePort::getAddrRanges() const 2457{ 2458 return cache->getAddrRanges(); 2459} 2460 2461 2462BaseCache:: 2463CpuSidePort::CpuSidePort(const std::string &_name, BaseCache *_cache, 2464 const std::string &_label) 2465 : CacheSlavePort(_name, _cache, _label), cache(_cache) 2466{ 2467} 2468 2469/////////////// 2470// 2471// MemSidePort 2472// 2473/////////////// 2474bool 2475BaseCache::MemSidePort::recvTimingResp(PacketPtr pkt) 2476{ 2477 cache->recvTimingResp(pkt); 2478 return true; 2479} 2480 2481// Express snooping requests to memside port 2482void 2483BaseCache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt) 2484{ 2485 // Snoops shouldn't happen when bypassing caches 2486 assert(!cache->system->bypassCaches()); 2487 2488 // handle snooping requests 2489 cache->recvTimingSnoopReq(pkt); 2490} 2491 2492Tick 2493BaseCache::MemSidePort::recvAtomicSnoop(PacketPtr pkt) 2494{ 2495 // Snoops shouldn't happen when bypassing caches 2496 assert(!cache->system->bypassCaches()); 2497 2498 return cache->recvAtomicSnoop(pkt); 2499} 2500 2501void 2502BaseCache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt) 2503{ 2504 // Snoops shouldn't happen when bypassing caches 2505 assert(!cache->system->bypassCaches()); 2506 2507 // functional snoop (note that in contrast to atomic we don't have 2508 // a specific functionalSnoop method, as they have the same 2509 // behaviour regardless) 2510 cache->functionalAccess(pkt, false); 2511} 2512 2513void 2514BaseCache::CacheReqPacketQueue::sendDeferredPacket() 2515{ 2516 // sanity check 2517 assert(!waitingOnRetry); 2518 2519 // there should never be any deferred request packets in the 2520 // queue, instead we resly on the cache to provide the packets 2521 // from the MSHR queue or write queue 2522 assert(deferredPacketReadyTime() == MaxTick); 2523 2524 // check for request packets (requests & writebacks) 2525 QueueEntry* entry = cache.getNextQueueEntry(); 2526 2527 if (!entry) { 2528 // can happen if e.g. we attempt a writeback and fail, but 2529 // before the retry, the writeback is eliminated because 2530 // we snoop another cache's ReadEx. 2531 } else { 2532 // let our snoop responses go first if there are responses to 2533 // the same addresses 2534 if (checkConflictingSnoop(entry->getTarget()->pkt)) { 2535 return; 2536 } 2537 waitingOnRetry = entry->sendPacket(cache); 2538 } 2539 2540 // if we succeeded and are not waiting for a retry, schedule the 2541 // next send considering when the next queue is ready, note that 2542 // snoop responses have their own packet queue and thus schedule 2543 // their own events 2544 if (!waitingOnRetry) { 2545 schedSendEvent(cache.nextQueueReadyTime()); 2546 } 2547} 2548 2549BaseCache::MemSidePort::MemSidePort(const std::string &_name, 2550 BaseCache *_cache, 2551 const std::string &_label) 2552 : CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue), 2553 _reqQueue(*_cache, *this, _snoopRespQueue, _label), 2554 _snoopRespQueue(*_cache, *this, true, _label), cache(_cache) 2555{ 2556} 2557 2558void 2559WriteAllocator::updateMode(Addr write_addr, unsigned write_size, 2560 Addr blk_addr) 2561{ 2562 // check if we are continuing where the last write ended 2563 if (nextAddr == write_addr) { 2564 delayCtr[blk_addr] = delayThreshold; 2565 // stop if we have already saturated 2566 if (mode != WriteMode::NO_ALLOCATE) { 2567 byteCount += write_size; 2568 // switch to streaming mode if we have passed the lower 2569 // threshold 2570 if (mode == WriteMode::ALLOCATE && 2571 byteCount > coalesceLimit) { 2572 mode = WriteMode::COALESCE; 2573 DPRINTF(Cache, "Switched to write coalescing\n"); 2574 } else if (mode == WriteMode::COALESCE && 2575 byteCount > noAllocateLimit) { 2576 // and continue and switch to non-allocating mode if we 2577 // pass the upper threshold 2578 mode = WriteMode::NO_ALLOCATE; 2579 DPRINTF(Cache, "Switched to write-no-allocate\n"); 2580 } 2581 } 2582 } else { 2583 // we did not see a write matching the previous one, start 2584 // over again 2585 byteCount = write_size; 2586 mode = WriteMode::ALLOCATE; 2587 resetDelay(blk_addr); 2588 } 2589 nextAddr = write_addr + write_size; 2590} 2591 2592WriteAllocator* 2593WriteAllocatorParams::create() 2594{ 2595 return new WriteAllocator(this); 2596} 2597