base.cc revision 12729
1/* 2 * Copyright (c) 2012-2013, 2018 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2003-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Erik Hallnor 41 * Nikos Nikoleris 42 */ 43 44/** 45 * @file 46 * Definition of BaseCache functions. 47 */ 48 49#include "mem/cache/base.hh" 50 51#include "base/compiler.hh" 52#include "base/logging.hh" 53#include "debug/Cache.hh" 54#include "debug/CachePort.hh" 55#include "debug/CacheVerbose.hh" 56#include "mem/cache/mshr.hh" 57#include "mem/cache/prefetch/base.hh" 58#include "mem/cache/queue_entry.hh" 59#include "params/BaseCache.hh" 60#include "sim/core.hh" 61 62class BaseMasterPort; 63class BaseSlavePort; 64 65using namespace std; 66 67BaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name, 68 BaseCache *_cache, 69 const std::string &_label) 70 : QueuedSlavePort(_name, _cache, queue), queue(*_cache, *this, _label), 71 blocked(false), mustSendRetry(false), 72 sendRetryEvent([this]{ processSendRetry(); }, _name) 73{ 74} 75 76BaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size) 77 : MemObject(p), 78 cpuSidePort (p->name + ".cpu_side", this, "CpuSidePort"), 79 memSidePort(p->name + ".mem_side", this, "MemSidePort"), 80 mshrQueue("MSHRs", p->mshrs, 0, p->demand_mshr_reserve), // see below 81 writeBuffer("write buffer", p->write_buffers, p->mshrs), // see below 82 tags(p->tags), 83 prefetcher(p->prefetcher), 84 prefetchOnAccess(p->prefetch_on_access), 85 writebackClean(p->writeback_clean), 86 tempBlockWriteback(nullptr), 87 writebackTempBlockAtomicEvent([this]{ writebackTempBlockAtomic(); }, 88 name(), false, 89 EventBase::Delayed_Writeback_Pri), 90 blkSize(blk_size), 91 lookupLatency(p->tag_latency), 92 dataLatency(p->data_latency), 93 forwardLatency(p->tag_latency), 94 fillLatency(p->data_latency), 95 responseLatency(p->response_latency), 96 numTarget(p->tgts_per_mshr), 97 forwardSnoops(true), 98 clusivity(p->clusivity), 99 isReadOnly(p->is_read_only), 100 blocked(0), 101 order(0), 102 noTargetMSHR(nullptr), 103 missCount(p->max_miss_count), 104 addrRanges(p->addr_ranges.begin(), p->addr_ranges.end()), 105 system(p->system) 106{ 107 // the MSHR queue has no reserve entries as we check the MSHR 108 // queue on every single allocation, whereas the write queue has 109 // as many reserve entries as we have MSHRs, since every MSHR may 110 // eventually require a writeback, and we do not check the write 111 // buffer before committing to an MSHR 112 113 // forward snoops is overridden in init() once we can query 114 // whether the connected master is actually snooping or not 115 116 tempBlock = new CacheBlk(); 117 tempBlock->data = new uint8_t[blkSize]; 118 119 tags->setCache(this); 120 if (prefetcher) 121 prefetcher->setCache(this); 122} 123 124BaseCache::~BaseCache() 125{ 126 delete [] tempBlock->data; 127 delete tempBlock; 128} 129 130void 131BaseCache::CacheSlavePort::setBlocked() 132{ 133 assert(!blocked); 134 DPRINTF(CachePort, "Port is blocking new requests\n"); 135 blocked = true; 136 // if we already scheduled a retry in this cycle, but it has not yet 137 // happened, cancel it 138 if (sendRetryEvent.scheduled()) { 139 owner.deschedule(sendRetryEvent); 140 DPRINTF(CachePort, "Port descheduled retry\n"); 141 mustSendRetry = true; 142 } 143} 144 145void 146BaseCache::CacheSlavePort::clearBlocked() 147{ 148 assert(blocked); 149 DPRINTF(CachePort, "Port is accepting new requests\n"); 150 blocked = false; 151 if (mustSendRetry) { 152 // @TODO: need to find a better time (next cycle?) 153 owner.schedule(sendRetryEvent, curTick() + 1); 154 } 155} 156 157void 158BaseCache::CacheSlavePort::processSendRetry() 159{ 160 DPRINTF(CachePort, "Port is sending retry\n"); 161 162 // reset the flag and call retry 163 mustSendRetry = false; 164 sendRetryReq(); 165} 166 167void 168BaseCache::init() 169{ 170 if (!cpuSidePort.isConnected() || !memSidePort.isConnected()) 171 fatal("Cache ports on %s are not connected\n", name()); 172 cpuSidePort.sendRangeChange(); 173 forwardSnoops = cpuSidePort.isSnooping(); 174} 175 176BaseMasterPort & 177BaseCache::getMasterPort(const std::string &if_name, PortID idx) 178{ 179 if (if_name == "mem_side") { 180 return memSidePort; 181 } else { 182 return MemObject::getMasterPort(if_name, idx); 183 } 184} 185 186BaseSlavePort & 187BaseCache::getSlavePort(const std::string &if_name, PortID idx) 188{ 189 if (if_name == "cpu_side") { 190 return cpuSidePort; 191 } else { 192 return MemObject::getSlavePort(if_name, idx); 193 } 194} 195 196bool 197BaseCache::inRange(Addr addr) const 198{ 199 for (const auto& r : addrRanges) { 200 if (r.contains(addr)) { 201 return true; 202 } 203 } 204 return false; 205} 206 207void 208BaseCache::handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time) 209{ 210 if (pkt->needsResponse()) { 211 pkt->makeTimingResponse(); 212 // @todo: Make someone pay for this 213 pkt->headerDelay = pkt->payloadDelay = 0; 214 215 // In this case we are considering request_time that takes 216 // into account the delay of the xbar, if any, and just 217 // lat, neglecting responseLatency, modelling hit latency 218 // just as lookupLatency or or the value of lat overriden 219 // by access(), that calls accessBlock() function. 220 cpuSidePort.schedTimingResp(pkt, request_time, true); 221 } else { 222 DPRINTF(Cache, "%s satisfied %s, no response needed\n", __func__, 223 pkt->print()); 224 225 // queue the packet for deletion, as the sending cache is 226 // still relying on it; if the block is found in access(), 227 // CleanEvict and Writeback messages will be deleted 228 // here as well 229 pendingDelete.reset(pkt); 230 } 231} 232 233void 234BaseCache::handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk, 235 Tick forward_time, Tick request_time) 236{ 237 if (mshr) { 238 /// MSHR hit 239 /// @note writebacks will be checked in getNextMSHR() 240 /// for any conflicting requests to the same block 241 242 //@todo remove hw_pf here 243 244 // Coalesce unless it was a software prefetch (see above). 245 if (pkt) { 246 assert(!pkt->isWriteback()); 247 // CleanEvicts corresponding to blocks which have 248 // outstanding requests in MSHRs are simply sunk here 249 if (pkt->cmd == MemCmd::CleanEvict) { 250 pendingDelete.reset(pkt); 251 } else if (pkt->cmd == MemCmd::WriteClean) { 252 // A WriteClean should never coalesce with any 253 // outstanding cache maintenance requests. 254 255 // We use forward_time here because there is an 256 // uncached memory write, forwarded to WriteBuffer. 257 allocateWriteBuffer(pkt, forward_time); 258 } else { 259 DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__, 260 pkt->print()); 261 262 assert(pkt->req->masterId() < system->maxMasters()); 263 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++; 264 265 // We use forward_time here because it is the same 266 // considering new targets. We have multiple 267 // requests for the same address here. It 268 // specifies the latency to allocate an internal 269 // buffer and to schedule an event to the queued 270 // port and also takes into account the additional 271 // delay of the xbar. 272 mshr->allocateTarget(pkt, forward_time, order++, 273 allocOnFill(pkt->cmd)); 274 if (mshr->getNumTargets() == numTarget) { 275 noTargetMSHR = mshr; 276 setBlocked(Blocked_NoTargets); 277 // need to be careful with this... if this mshr isn't 278 // ready yet (i.e. time > curTick()), we don't want to 279 // move it ahead of mshrs that are ready 280 // mshrQueue.moveToFront(mshr); 281 } 282 } 283 } 284 } else { 285 // no MSHR 286 assert(pkt->req->masterId() < system->maxMasters()); 287 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 288 289 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean) { 290 // We use forward_time here because there is an 291 // writeback or writeclean, forwarded to WriteBuffer. 292 allocateWriteBuffer(pkt, forward_time); 293 } else { 294 if (blk && blk->isValid()) { 295 // If we have a write miss to a valid block, we 296 // need to mark the block non-readable. Otherwise 297 // if we allow reads while there's an outstanding 298 // write miss, the read could return stale data 299 // out of the cache block... a more aggressive 300 // system could detect the overlap (if any) and 301 // forward data out of the MSHRs, but we don't do 302 // that yet. Note that we do need to leave the 303 // block valid so that it stays in the cache, in 304 // case we get an upgrade response (and hence no 305 // new data) when the write miss completes. 306 // As long as CPUs do proper store/load forwarding 307 // internally, and have a sufficiently weak memory 308 // model, this is probably unnecessary, but at some 309 // point it must have seemed like we needed it... 310 assert((pkt->needsWritable() && !blk->isWritable()) || 311 pkt->req->isCacheMaintenance()); 312 blk->status &= ~BlkReadable; 313 } 314 // Here we are using forward_time, modelling the latency of 315 // a miss (outbound) just as forwardLatency, neglecting the 316 // lookupLatency component. 317 allocateMissBuffer(pkt, forward_time); 318 } 319 } 320} 321 322void 323BaseCache::recvTimingReq(PacketPtr pkt) 324{ 325 // anything that is merely forwarded pays for the forward latency and 326 // the delay provided by the crossbar 327 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 328 329 // We use lookupLatency here because it is used to specify the latency 330 // to access. 331 Cycles lat = lookupLatency; 332 CacheBlk *blk = nullptr; 333 bool satisfied = false; 334 { 335 PacketList writebacks; 336 // Note that lat is passed by reference here. The function 337 // access() calls accessBlock() which can modify lat value. 338 satisfied = access(pkt, blk, lat, writebacks); 339 340 // copy writebacks to write buffer here to ensure they logically 341 // proceed anything happening below 342 doWritebacks(writebacks, forward_time); 343 } 344 345 // Here we charge the headerDelay that takes into account the latencies 346 // of the bus, if the packet comes from it. 347 // The latency charged it is just lat that is the value of lookupLatency 348 // modified by access() function, or if not just lookupLatency. 349 // In case of a hit we are neglecting response latency. 350 // In case of a miss we are neglecting forward latency. 351 Tick request_time = clockEdge(lat) + pkt->headerDelay; 352 // Here we reset the timing of the packet. 353 pkt->headerDelay = pkt->payloadDelay = 0; 354 // track time of availability of next prefetch, if any 355 Tick next_pf_time = MaxTick; 356 357 if (satisfied) { 358 // if need to notify the prefetcher we have to do it before 359 // anything else as later handleTimingReqHit might turn the 360 // packet in a response 361 if (prefetcher && 362 (prefetchOnAccess || (blk && blk->wasPrefetched()))) { 363 if (blk) 364 blk->status &= ~BlkHWPrefetched; 365 366 // Don't notify on SWPrefetch 367 if (!pkt->cmd.isSWPrefetch()) { 368 assert(!pkt->req->isCacheMaintenance()); 369 next_pf_time = prefetcher->notify(pkt); 370 } 371 } 372 373 handleTimingReqHit(pkt, blk, request_time); 374 } else { 375 handleTimingReqMiss(pkt, blk, forward_time, request_time); 376 377 // We should call the prefetcher reguardless if the request is 378 // satisfied or not, reguardless if the request is in the MSHR 379 // or not. The request could be a ReadReq hit, but still not 380 // satisfied (potentially because of a prior write to the same 381 // cache line. So, even when not satisfied, there is an MSHR 382 // already allocated for this, we need to let the prefetcher 383 // know about the request 384 385 // Don't notify prefetcher on SWPrefetch or cache maintenance 386 // operations 387 if (prefetcher && pkt && 388 !pkt->cmd.isSWPrefetch() && 389 !pkt->req->isCacheMaintenance()) { 390 next_pf_time = prefetcher->notify(pkt); 391 } 392 } 393 394 if (next_pf_time != MaxTick) { 395 schedMemSideSendEvent(next_pf_time); 396 } 397} 398 399void 400BaseCache::handleUncacheableWriteResp(PacketPtr pkt) 401{ 402 Tick completion_time = clockEdge(responseLatency) + 403 pkt->headerDelay + pkt->payloadDelay; 404 405 // Reset the bus additional time as it is now accounted for 406 pkt->headerDelay = pkt->payloadDelay = 0; 407 408 cpuSidePort.schedTimingResp(pkt, completion_time, true); 409} 410 411void 412BaseCache::recvTimingResp(PacketPtr pkt) 413{ 414 assert(pkt->isResponse()); 415 416 // all header delay should be paid for by the crossbar, unless 417 // this is a prefetch response from above 418 panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp, 419 "%s saw a non-zero packet delay\n", name()); 420 421 const bool is_error = pkt->isError(); 422 423 if (is_error) { 424 DPRINTF(Cache, "%s: Cache received %s with error\n", __func__, 425 pkt->print()); 426 } 427 428 DPRINTF(Cache, "%s: Handling response %s\n", __func__, 429 pkt->print()); 430 431 // if this is a write, we should be looking at an uncacheable 432 // write 433 if (pkt->isWrite()) { 434 assert(pkt->req->isUncacheable()); 435 handleUncacheableWriteResp(pkt); 436 return; 437 } 438 439 // we have dealt with any (uncacheable) writes above, from here on 440 // we know we are dealing with an MSHR due to a miss or a prefetch 441 MSHR *mshr = dynamic_cast<MSHR*>(pkt->popSenderState()); 442 assert(mshr); 443 444 if (mshr == noTargetMSHR) { 445 // we always clear at least one target 446 clearBlocked(Blocked_NoTargets); 447 noTargetMSHR = nullptr; 448 } 449 450 // Initial target is used just for stats 451 MSHR::Target *initial_tgt = mshr->getTarget(); 452 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex(); 453 Tick miss_latency = curTick() - initial_tgt->recvTime; 454 455 if (pkt->req->isUncacheable()) { 456 assert(pkt->req->masterId() < system->maxMasters()); 457 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] += 458 miss_latency; 459 } else { 460 assert(pkt->req->masterId() < system->maxMasters()); 461 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] += 462 miss_latency; 463 } 464 465 PacketList writebacks; 466 467 bool is_fill = !mshr->isForward && 468 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp); 469 470 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 471 472 if (is_fill && !is_error) { 473 DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n", 474 pkt->getAddr()); 475 476 blk = handleFill(pkt, blk, writebacks, mshr->allocOnFill()); 477 assert(blk != nullptr); 478 } 479 480 if (blk && blk->isValid() && pkt->isClean() && !pkt->isInvalidate()) { 481 // The block was marked not readable while there was a pending 482 // cache maintenance operation, restore its flag. 483 blk->status |= BlkReadable; 484 } 485 486 if (blk && blk->isWritable() && !pkt->req->isCacheInvalidate()) { 487 // If at this point the referenced block is writable and the 488 // response is not a cache invalidate, we promote targets that 489 // were deferred as we couldn't guarrantee a writable copy 490 mshr->promoteWritable(); 491 } 492 493 serviceMSHRTargets(mshr, pkt, blk, writebacks); 494 495 if (mshr->promoteDeferredTargets()) { 496 // avoid later read getting stale data while write miss is 497 // outstanding.. see comment in timingAccess() 498 if (blk) { 499 blk->status &= ~BlkReadable; 500 } 501 mshrQueue.markPending(mshr); 502 schedMemSideSendEvent(clockEdge() + pkt->payloadDelay); 503 } else { 504 // while we deallocate an mshr from the queue we still have to 505 // check the isFull condition before and after as we might 506 // have been using the reserved entries already 507 const bool was_full = mshrQueue.isFull(); 508 mshrQueue.deallocate(mshr); 509 if (was_full && !mshrQueue.isFull()) { 510 clearBlocked(Blocked_NoMSHRs); 511 } 512 513 // Request the bus for a prefetch if this deallocation freed enough 514 // MSHRs for a prefetch to take place 515 if (prefetcher && mshrQueue.canPrefetch()) { 516 Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(), 517 clockEdge()); 518 if (next_pf_time != MaxTick) 519 schedMemSideSendEvent(next_pf_time); 520 } 521 } 522 523 // if we used temp block, check to see if its valid and then clear it out 524 if (blk == tempBlock && tempBlock->isValid()) { 525 evictBlock(blk, writebacks); 526 } 527 528 const Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 529 // copy writebacks to write buffer 530 doWritebacks(writebacks, forward_time); 531 532 DPRINTF(CacheVerbose, "%s: Leaving with %s\n", __func__, pkt->print()); 533 delete pkt; 534} 535 536 537Tick 538BaseCache::recvAtomic(PacketPtr pkt) 539{ 540 // We are in atomic mode so we pay just for lookupLatency here. 541 Cycles lat = lookupLatency; 542 543 // follow the same flow as in recvTimingReq, and check if a cache 544 // above us is responding 545 if (pkt->cacheResponding() && !pkt->isClean()) { 546 assert(!pkt->req->isCacheInvalidate()); 547 DPRINTF(Cache, "Cache above responding to %s: not responding\n", 548 pkt->print()); 549 550 // if a cache is responding, and it had the line in Owned 551 // rather than Modified state, we need to invalidate any 552 // copies that are not on the same path to memory 553 assert(pkt->needsWritable() && !pkt->responderHadWritable()); 554 lat += ticksToCycles(memSidePort.sendAtomic(pkt)); 555 556 return lat * clockPeriod(); 557 } 558 559 // should assert here that there are no outstanding MSHRs or 560 // writebacks... that would mean that someone used an atomic 561 // access in timing mode 562 563 CacheBlk *blk = nullptr; 564 PacketList writebacks; 565 bool satisfied = access(pkt, blk, lat, writebacks); 566 567 if (pkt->isClean() && blk && blk->isDirty()) { 568 // A cache clean opearation is looking for a dirty 569 // block. If a dirty block is encountered a WriteClean 570 // will update any copies to the path to the memory 571 // until the point of reference. 572 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n", 573 __func__, pkt->print(), blk->print()); 574 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id); 575 writebacks.push_back(wb_pkt); 576 pkt->setSatisfied(); 577 } 578 579 // handle writebacks resulting from the access here to ensure they 580 // logically proceed anything happening below 581 doWritebacksAtomic(writebacks); 582 assert(writebacks.empty()); 583 584 if (!satisfied) { 585 lat += handleAtomicReqMiss(pkt, blk, writebacks); 586 } 587 588 // Note that we don't invoke the prefetcher at all in atomic mode. 589 // It's not clear how to do it properly, particularly for 590 // prefetchers that aggressively generate prefetch candidates and 591 // rely on bandwidth contention to throttle them; these will tend 592 // to pollute the cache in atomic mode since there is no bandwidth 593 // contention. If we ever do want to enable prefetching in atomic 594 // mode, though, this is the place to do it... see timingAccess() 595 // for an example (though we'd want to issue the prefetch(es) 596 // immediately rather than calling requestMemSideBus() as we do 597 // there). 598 599 // do any writebacks resulting from the response handling 600 doWritebacksAtomic(writebacks); 601 602 // if we used temp block, check to see if its valid and if so 603 // clear it out, but only do so after the call to recvAtomic is 604 // finished so that any downstream observers (such as a snoop 605 // filter), first see the fill, and only then see the eviction 606 if (blk == tempBlock && tempBlock->isValid()) { 607 // the atomic CPU calls recvAtomic for fetch and load/store 608 // sequentuially, and we may already have a tempBlock 609 // writeback from the fetch that we have not yet sent 610 if (tempBlockWriteback) { 611 // if that is the case, write the prevoius one back, and 612 // do not schedule any new event 613 writebackTempBlockAtomic(); 614 } else { 615 // the writeback/clean eviction happens after the call to 616 // recvAtomic has finished (but before any successive 617 // calls), so that the response handling from the fill is 618 // allowed to happen first 619 schedule(writebackTempBlockAtomicEvent, curTick()); 620 } 621 622 tempBlockWriteback = evictBlock(blk); 623 } 624 625 if (pkt->needsResponse()) { 626 pkt->makeAtomicResponse(); 627 } 628 629 return lat * clockPeriod(); 630} 631 632void 633BaseCache::functionalAccess(PacketPtr pkt, bool from_cpu_side) 634{ 635 Addr blk_addr = pkt->getBlockAddr(blkSize); 636 bool is_secure = pkt->isSecure(); 637 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 638 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 639 640 pkt->pushLabel(name()); 641 642 CacheBlkPrintWrapper cbpw(blk); 643 644 // Note that just because an L2/L3 has valid data doesn't mean an 645 // L1 doesn't have a more up-to-date modified copy that still 646 // needs to be found. As a result we always update the request if 647 // we have it, but only declare it satisfied if we are the owner. 648 649 // see if we have data at all (owned or otherwise) 650 bool have_data = blk && blk->isValid() 651 && pkt->checkFunctional(&cbpw, blk_addr, is_secure, blkSize, 652 blk->data); 653 654 // data we have is dirty if marked as such or if we have an 655 // in-service MSHR that is pending a modified line 656 bool have_dirty = 657 have_data && (blk->isDirty() || 658 (mshr && mshr->inService && mshr->isPendingModified())); 659 660 bool done = have_dirty || 661 cpuSidePort.checkFunctional(pkt) || 662 mshrQueue.checkFunctional(pkt, blk_addr) || 663 writeBuffer.checkFunctional(pkt, blk_addr) || 664 memSidePort.checkFunctional(pkt); 665 666 DPRINTF(CacheVerbose, "%s: %s %s%s%s\n", __func__, pkt->print(), 667 (blk && blk->isValid()) ? "valid " : "", 668 have_data ? "data " : "", done ? "done " : ""); 669 670 // We're leaving the cache, so pop cache->name() label 671 pkt->popLabel(); 672 673 if (done) { 674 pkt->makeResponse(); 675 } else { 676 // if it came as a request from the CPU side then make sure it 677 // continues towards the memory side 678 if (from_cpu_side) { 679 memSidePort.sendFunctional(pkt); 680 } else if (cpuSidePort.isSnooping()) { 681 // if it came from the memory side, it must be a snoop request 682 // and we should only forward it if we are forwarding snoops 683 cpuSidePort.sendFunctionalSnoop(pkt); 684 } 685 } 686} 687 688 689void 690BaseCache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt) 691{ 692 assert(pkt->isRequest()); 693 694 uint64_t overwrite_val; 695 bool overwrite_mem; 696 uint64_t condition_val64; 697 uint32_t condition_val32; 698 699 int offset = pkt->getOffset(blkSize); 700 uint8_t *blk_data = blk->data + offset; 701 702 assert(sizeof(uint64_t) >= pkt->getSize()); 703 704 overwrite_mem = true; 705 // keep a copy of our possible write value, and copy what is at the 706 // memory address into the packet 707 pkt->writeData((uint8_t *)&overwrite_val); 708 pkt->setData(blk_data); 709 710 if (pkt->req->isCondSwap()) { 711 if (pkt->getSize() == sizeof(uint64_t)) { 712 condition_val64 = pkt->req->getExtraData(); 713 overwrite_mem = !std::memcmp(&condition_val64, blk_data, 714 sizeof(uint64_t)); 715 } else if (pkt->getSize() == sizeof(uint32_t)) { 716 condition_val32 = (uint32_t)pkt->req->getExtraData(); 717 overwrite_mem = !std::memcmp(&condition_val32, blk_data, 718 sizeof(uint32_t)); 719 } else 720 panic("Invalid size for conditional read/write\n"); 721 } 722 723 if (overwrite_mem) { 724 std::memcpy(blk_data, &overwrite_val, pkt->getSize()); 725 blk->status |= BlkDirty; 726 } 727} 728 729QueueEntry* 730BaseCache::getNextQueueEntry() 731{ 732 // Check both MSHR queue and write buffer for potential requests, 733 // note that null does not mean there is no request, it could 734 // simply be that it is not ready 735 MSHR *miss_mshr = mshrQueue.getNext(); 736 WriteQueueEntry *wq_entry = writeBuffer.getNext(); 737 738 // If we got a write buffer request ready, first priority is a 739 // full write buffer, otherwise we favour the miss requests 740 if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) { 741 // need to search MSHR queue for conflicting earlier miss. 742 MSHR *conflict_mshr = 743 mshrQueue.findPending(wq_entry->blkAddr, 744 wq_entry->isSecure); 745 746 if (conflict_mshr && conflict_mshr->order < wq_entry->order) { 747 // Service misses in order until conflict is cleared. 748 return conflict_mshr; 749 750 // @todo Note that we ignore the ready time of the conflict here 751 } 752 753 // No conflicts; issue write 754 return wq_entry; 755 } else if (miss_mshr) { 756 // need to check for conflicting earlier writeback 757 WriteQueueEntry *conflict_mshr = 758 writeBuffer.findPending(miss_mshr->blkAddr, 759 miss_mshr->isSecure); 760 if (conflict_mshr) { 761 // not sure why we don't check order here... it was in the 762 // original code but commented out. 763 764 // The only way this happens is if we are 765 // doing a write and we didn't have permissions 766 // then subsequently saw a writeback (owned got evicted) 767 // We need to make sure to perform the writeback first 768 // To preserve the dirty data, then we can issue the write 769 770 // should we return wq_entry here instead? I.e. do we 771 // have to flush writes in order? I don't think so... not 772 // for Alpha anyway. Maybe for x86? 773 return conflict_mshr; 774 775 // @todo Note that we ignore the ready time of the conflict here 776 } 777 778 // No conflicts; issue read 779 return miss_mshr; 780 } 781 782 // fall through... no pending requests. Try a prefetch. 783 assert(!miss_mshr && !wq_entry); 784 if (prefetcher && mshrQueue.canPrefetch()) { 785 // If we have a miss queue slot, we can try a prefetch 786 PacketPtr pkt = prefetcher->getPacket(); 787 if (pkt) { 788 Addr pf_addr = pkt->getBlockAddr(blkSize); 789 if (!tags->findBlock(pf_addr, pkt->isSecure()) && 790 !mshrQueue.findMatch(pf_addr, pkt->isSecure()) && 791 !writeBuffer.findMatch(pf_addr, pkt->isSecure())) { 792 // Update statistic on number of prefetches issued 793 // (hwpf_mshr_misses) 794 assert(pkt->req->masterId() < system->maxMasters()); 795 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 796 797 // allocate an MSHR and return it, note 798 // that we send the packet straight away, so do not 799 // schedule the send 800 return allocateMissBuffer(pkt, curTick(), false); 801 } else { 802 // free the request and packet 803 delete pkt->req; 804 delete pkt; 805 } 806 } 807 } 808 809 return nullptr; 810} 811 812void 813BaseCache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool, bool) 814{ 815 assert(pkt->isRequest()); 816 817 assert(blk && blk->isValid()); 818 // Occasionally this is not true... if we are a lower-level cache 819 // satisfying a string of Read and ReadEx requests from 820 // upper-level caches, a Read will mark the block as shared but we 821 // can satisfy a following ReadEx anyway since we can rely on the 822 // Read requester(s) to have buffered the ReadEx snoop and to 823 // invalidate their blocks after receiving them. 824 // assert(!pkt->needsWritable() || blk->isWritable()); 825 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize); 826 827 // Check RMW operations first since both isRead() and 828 // isWrite() will be true for them 829 if (pkt->cmd == MemCmd::SwapReq) { 830 cmpAndSwap(blk, pkt); 831 } else if (pkt->isWrite()) { 832 // we have the block in a writable state and can go ahead, 833 // note that the line may be also be considered writable in 834 // downstream caches along the path to memory, but always 835 // Exclusive, and never Modified 836 assert(blk->isWritable()); 837 // Write or WriteLine at the first cache with block in writable state 838 if (blk->checkWrite(pkt)) { 839 pkt->writeDataToBlock(blk->data, blkSize); 840 } 841 // Always mark the line as dirty (and thus transition to the 842 // Modified state) even if we are a failed StoreCond so we 843 // supply data to any snoops that have appended themselves to 844 // this cache before knowing the store will fail. 845 blk->status |= BlkDirty; 846 DPRINTF(CacheVerbose, "%s for %s (write)\n", __func__, pkt->print()); 847 } else if (pkt->isRead()) { 848 if (pkt->isLLSC()) { 849 blk->trackLoadLocked(pkt); 850 } 851 852 // all read responses have a data payload 853 assert(pkt->hasRespData()); 854 pkt->setDataFromBlock(blk->data, blkSize); 855 } else if (pkt->isUpgrade()) { 856 // sanity check 857 assert(!pkt->hasSharers()); 858 859 if (blk->isDirty()) { 860 // we were in the Owned state, and a cache above us that 861 // has the line in Shared state needs to be made aware 862 // that the data it already has is in fact dirty 863 pkt->setCacheResponding(); 864 blk->status &= ~BlkDirty; 865 } 866 } else { 867 assert(pkt->isInvalidate()); 868 invalidateBlock(blk); 869 DPRINTF(CacheVerbose, "%s for %s (invalidation)\n", __func__, 870 pkt->print()); 871 } 872} 873 874///////////////////////////////////////////////////// 875// 876// Access path: requests coming in from the CPU side 877// 878///////////////////////////////////////////////////// 879 880bool 881BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, 882 PacketList &writebacks) 883{ 884 // sanity check 885 assert(pkt->isRequest()); 886 887 chatty_assert(!(isReadOnly && pkt->isWrite()), 888 "Should never see a write in a read-only cache %s\n", 889 name()); 890 891 // Here lat is the value passed as parameter to accessBlock() function 892 // that can modify its value. 893 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), lat); 894 895 DPRINTF(Cache, "%s for %s %s\n", __func__, pkt->print(), 896 blk ? "hit " + blk->print() : "miss"); 897 898 if (pkt->req->isCacheMaintenance()) { 899 // A cache maintenance operation is always forwarded to the 900 // memory below even if the block is found in dirty state. 901 902 // We defer any changes to the state of the block until we 903 // create and mark as in service the mshr for the downstream 904 // packet. 905 return false; 906 } 907 908 if (pkt->isEviction()) { 909 // We check for presence of block in above caches before issuing 910 // Writeback or CleanEvict to write buffer. Therefore the only 911 // possible cases can be of a CleanEvict packet coming from above 912 // encountering a Writeback generated in this cache peer cache and 913 // waiting in the write buffer. Cases of upper level peer caches 914 // generating CleanEvict and Writeback or simply CleanEvict and 915 // CleanEvict almost simultaneously will be caught by snoops sent out 916 // by crossbar. 917 WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(), 918 pkt->isSecure()); 919 if (wb_entry) { 920 assert(wb_entry->getNumTargets() == 1); 921 PacketPtr wbPkt = wb_entry->getTarget()->pkt; 922 assert(wbPkt->isWriteback()); 923 924 if (pkt->isCleanEviction()) { 925 // The CleanEvict and WritebackClean snoops into other 926 // peer caches of the same level while traversing the 927 // crossbar. If a copy of the block is found, the 928 // packet is deleted in the crossbar. Hence, none of 929 // the other upper level caches connected to this 930 // cache have the block, so we can clear the 931 // BLOCK_CACHED flag in the Writeback if set and 932 // discard the CleanEvict by returning true. 933 wbPkt->clearBlockCached(); 934 return true; 935 } else { 936 assert(pkt->cmd == MemCmd::WritebackDirty); 937 // Dirty writeback from above trumps our clean 938 // writeback... discard here 939 // Note: markInService will remove entry from writeback buffer. 940 markInService(wb_entry); 941 delete wbPkt; 942 } 943 } 944 } 945 946 // Writeback handling is special case. We can write the block into 947 // the cache without having a writeable copy (or any copy at all). 948 if (pkt->isWriteback()) { 949 assert(blkSize == pkt->getSize()); 950 951 // we could get a clean writeback while we are having 952 // outstanding accesses to a block, do the simple thing for 953 // now and drop the clean writeback so that we do not upset 954 // any ordering/decisions about ownership already taken 955 if (pkt->cmd == MemCmd::WritebackClean && 956 mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) { 957 DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, " 958 "dropping\n", pkt->getAddr()); 959 return true; 960 } 961 962 if (!blk) { 963 // need to do a replacement 964 blk = allocateBlock(pkt->getAddr(), pkt->isSecure(), writebacks); 965 if (!blk) { 966 // no replaceable block available: give up, fwd to next level. 967 incMissCount(pkt); 968 return false; 969 } 970 tags->insertBlock(pkt, blk); 971 972 blk->status |= (BlkValid | BlkReadable); 973 } 974 // only mark the block dirty if we got a writeback command, 975 // and leave it as is for a clean writeback 976 if (pkt->cmd == MemCmd::WritebackDirty) { 977 // TODO: the coherent cache can assert(!blk->isDirty()); 978 blk->status |= BlkDirty; 979 } 980 // if the packet does not have sharers, it is passing 981 // writable, and we got the writeback in Modified or Exclusive 982 // state, if not we are in the Owned or Shared state 983 if (!pkt->hasSharers()) { 984 blk->status |= BlkWritable; 985 } 986 // nothing else to do; writeback doesn't expect response 987 assert(!pkt->needsResponse()); 988 pkt->writeDataToBlock(blk->data, blkSize); 989 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 990 incHitCount(pkt); 991 // populate the time when the block will be ready to access. 992 blk->whenReady = clockEdge(fillLatency) + pkt->headerDelay + 993 pkt->payloadDelay; 994 return true; 995 } else if (pkt->cmd == MemCmd::CleanEvict) { 996 if (blk) { 997 // Found the block in the tags, need to stop CleanEvict from 998 // propagating further down the hierarchy. Returning true will 999 // treat the CleanEvict like a satisfied write request and delete 1000 // it. 1001 return true; 1002 } 1003 // We didn't find the block here, propagate the CleanEvict further 1004 // down the memory hierarchy. Returning false will treat the CleanEvict 1005 // like a Writeback which could not find a replaceable block so has to 1006 // go to next level. 1007 return false; 1008 } else if (pkt->cmd == MemCmd::WriteClean) { 1009 // WriteClean handling is a special case. We can allocate a 1010 // block directly if it doesn't exist and we can update the 1011 // block immediately. The WriteClean transfers the ownership 1012 // of the block as well. 1013 assert(blkSize == pkt->getSize()); 1014 1015 if (!blk) { 1016 if (pkt->writeThrough()) { 1017 // if this is a write through packet, we don't try to 1018 // allocate if the block is not present 1019 return false; 1020 } else { 1021 // a writeback that misses needs to allocate a new block 1022 blk = allocateBlock(pkt->getAddr(), pkt->isSecure(), 1023 writebacks); 1024 if (!blk) { 1025 // no replaceable block available: give up, fwd to 1026 // next level. 1027 incMissCount(pkt); 1028 return false; 1029 } 1030 tags->insertBlock(pkt, blk); 1031 1032 blk->status |= (BlkValid | BlkReadable); 1033 } 1034 } 1035 1036 // at this point either this is a writeback or a write-through 1037 // write clean operation and the block is already in this 1038 // cache, we need to update the data and the block flags 1039 assert(blk); 1040 // TODO: the coherent cache can assert(!blk->isDirty()); 1041 if (!pkt->writeThrough()) { 1042 blk->status |= BlkDirty; 1043 } 1044 // nothing else to do; writeback doesn't expect response 1045 assert(!pkt->needsResponse()); 1046 pkt->writeDataToBlock(blk->data, blkSize); 1047 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 1048 1049 incHitCount(pkt); 1050 // populate the time when the block will be ready to access. 1051 blk->whenReady = clockEdge(fillLatency) + pkt->headerDelay + 1052 pkt->payloadDelay; 1053 // if this a write-through packet it will be sent to cache 1054 // below 1055 return !pkt->writeThrough(); 1056 } else if (blk && (pkt->needsWritable() ? blk->isWritable() : 1057 blk->isReadable())) { 1058 // OK to satisfy access 1059 incHitCount(pkt); 1060 satisfyRequest(pkt, blk); 1061 maintainClusivity(pkt->fromCache(), blk); 1062 1063 return true; 1064 } 1065 1066 // Can't satisfy access normally... either no block (blk == nullptr) 1067 // or have block but need writable 1068 1069 incMissCount(pkt); 1070 1071 if (!blk && pkt->isLLSC() && pkt->isWrite()) { 1072 // complete miss on store conditional... just give up now 1073 pkt->req->setExtraData(0); 1074 return true; 1075 } 1076 1077 return false; 1078} 1079 1080void 1081BaseCache::maintainClusivity(bool from_cache, CacheBlk *blk) 1082{ 1083 if (from_cache && blk && blk->isValid() && !blk->isDirty() && 1084 clusivity == Enums::mostly_excl) { 1085 // if we have responded to a cache, and our block is still 1086 // valid, but not dirty, and this cache is mostly exclusive 1087 // with respect to the cache above, drop the block 1088 invalidateBlock(blk); 1089 } 1090} 1091 1092CacheBlk* 1093BaseCache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks, 1094 bool allocate) 1095{ 1096 assert(pkt->isResponse() || pkt->cmd == MemCmd::WriteLineReq); 1097 Addr addr = pkt->getAddr(); 1098 bool is_secure = pkt->isSecure(); 1099#if TRACING_ON 1100 CacheBlk::State old_state = blk ? blk->status : 0; 1101#endif 1102 1103 // When handling a fill, we should have no writes to this line. 1104 assert(addr == pkt->getBlockAddr(blkSize)); 1105 assert(!writeBuffer.findMatch(addr, is_secure)); 1106 1107 if (!blk) { 1108 // better have read new data... 1109 assert(pkt->hasData()); 1110 1111 // only read responses and write-line requests have data; 1112 // note that we don't write the data here for write-line - that 1113 // happens in the subsequent call to satisfyRequest 1114 assert(pkt->isRead() || pkt->cmd == MemCmd::WriteLineReq); 1115 1116 // need to do a replacement if allocating, otherwise we stick 1117 // with the temporary storage 1118 blk = allocate ? allocateBlock(addr, is_secure, writebacks) : nullptr; 1119 1120 if (!blk) { 1121 // No replaceable block or a mostly exclusive 1122 // cache... just use temporary storage to complete the 1123 // current request and then get rid of it 1124 assert(!tempBlock->isValid()); 1125 blk = tempBlock; 1126 tempBlock->set = tags->extractSet(addr); 1127 tempBlock->tag = tags->extractTag(addr); 1128 DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr, 1129 is_secure ? "s" : "ns"); 1130 } else { 1131 tags->insertBlock(pkt, blk); 1132 } 1133 1134 // we should never be overwriting a valid block 1135 assert(!blk->isValid()); 1136 } else { 1137 // existing block... probably an upgrade 1138 assert(blk->tag == tags->extractTag(addr)); 1139 assert(blk->isSecure() == is_secure); 1140 // either we're getting new data or the block should already be valid 1141 assert(pkt->hasData() || blk->isValid()); 1142 // don't clear block status... if block is already dirty we 1143 // don't want to lose that 1144 } 1145 1146 blk->status |= BlkValid | BlkReadable; 1147 1148 // sanity check for whole-line writes, which should always be 1149 // marked as writable as part of the fill, and then later marked 1150 // dirty as part of satisfyRequest 1151 if (pkt->cmd == MemCmd::WriteLineReq) { 1152 assert(!pkt->hasSharers()); 1153 } 1154 1155 // here we deal with setting the appropriate state of the line, 1156 // and we start by looking at the hasSharers flag, and ignore the 1157 // cacheResponding flag (normally signalling dirty data) if the 1158 // packet has sharers, thus the line is never allocated as Owned 1159 // (dirty but not writable), and always ends up being either 1160 // Shared, Exclusive or Modified, see Packet::setCacheResponding 1161 // for more details 1162 if (!pkt->hasSharers()) { 1163 // we could get a writable line from memory (rather than a 1164 // cache) even in a read-only cache, note that we set this bit 1165 // even for a read-only cache, possibly revisit this decision 1166 blk->status |= BlkWritable; 1167 1168 // check if we got this via cache-to-cache transfer (i.e., from a 1169 // cache that had the block in Modified or Owned state) 1170 if (pkt->cacheResponding()) { 1171 // we got the block in Modified state, and invalidated the 1172 // owners copy 1173 blk->status |= BlkDirty; 1174 1175 chatty_assert(!isReadOnly, "Should never see dirty snoop response " 1176 "in read-only cache %s\n", name()); 1177 } 1178 } 1179 1180 DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n", 1181 addr, is_secure ? "s" : "ns", old_state, blk->print()); 1182 1183 // if we got new data, copy it in (checking for a read response 1184 // and a response that has data is the same in the end) 1185 if (pkt->isRead()) { 1186 // sanity checks 1187 assert(pkt->hasData()); 1188 assert(pkt->getSize() == blkSize); 1189 1190 pkt->writeDataToBlock(blk->data, blkSize); 1191 } 1192 // We pay for fillLatency here. 1193 blk->whenReady = clockEdge() + fillLatency * clockPeriod() + 1194 pkt->payloadDelay; 1195 1196 return blk; 1197} 1198 1199CacheBlk* 1200BaseCache::allocateBlock(Addr addr, bool is_secure, PacketList &writebacks) 1201{ 1202 // Find replacement victim 1203 CacheBlk *blk = tags->findVictim(addr); 1204 1205 // It is valid to return nullptr if there is no victim 1206 if (!blk) 1207 return nullptr; 1208 1209 if (blk->isValid()) { 1210 Addr repl_addr = tags->regenerateBlkAddr(blk); 1211 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure()); 1212 if (repl_mshr) { 1213 // must be an outstanding upgrade or clean request 1214 // on a block we're about to replace... 1215 assert((!blk->isWritable() && repl_mshr->needsWritable()) || 1216 repl_mshr->isCleaning()); 1217 // too hard to replace block with transient state 1218 // allocation failed, block not inserted 1219 return nullptr; 1220 } else { 1221 DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx " 1222 "(%s): %s\n", repl_addr, blk->isSecure() ? "s" : "ns", 1223 addr, is_secure ? "s" : "ns", 1224 blk->isDirty() ? "writeback" : "clean"); 1225 1226 if (blk->wasPrefetched()) { 1227 unusedPrefetches++; 1228 } 1229 evictBlock(blk, writebacks); 1230 replacements++; 1231 } 1232 } 1233 1234 return blk; 1235} 1236 1237void 1238BaseCache::invalidateBlock(CacheBlk *blk) 1239{ 1240 if (blk != tempBlock) 1241 tags->invalidate(blk); 1242 blk->invalidate(); 1243} 1244 1245PacketPtr 1246BaseCache::writebackBlk(CacheBlk *blk) 1247{ 1248 chatty_assert(!isReadOnly || writebackClean, 1249 "Writeback from read-only cache"); 1250 assert(blk && blk->isValid() && (blk->isDirty() || writebackClean)); 1251 1252 writebacks[Request::wbMasterId]++; 1253 1254 Request *req = new Request(tags->regenerateBlkAddr(blk), blkSize, 0, 1255 Request::wbMasterId); 1256 if (blk->isSecure()) 1257 req->setFlags(Request::SECURE); 1258 1259 req->taskId(blk->task_id); 1260 1261 PacketPtr pkt = 1262 new Packet(req, blk->isDirty() ? 1263 MemCmd::WritebackDirty : MemCmd::WritebackClean); 1264 1265 DPRINTF(Cache, "Create Writeback %s writable: %d, dirty: %d\n", 1266 pkt->print(), blk->isWritable(), blk->isDirty()); 1267 1268 if (blk->isWritable()) { 1269 // not asserting shared means we pass the block in modified 1270 // state, mark our own block non-writeable 1271 blk->status &= ~BlkWritable; 1272 } else { 1273 // we are in the Owned state, tell the receiver 1274 pkt->setHasSharers(); 1275 } 1276 1277 // make sure the block is not marked dirty 1278 blk->status &= ~BlkDirty; 1279 1280 pkt->allocate(); 1281 pkt->setDataFromBlock(blk->data, blkSize); 1282 1283 return pkt; 1284} 1285 1286PacketPtr 1287BaseCache::writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id) 1288{ 1289 Request *req = new Request(tags->regenerateBlkAddr(blk), blkSize, 0, 1290 Request::wbMasterId); 1291 if (blk->isSecure()) { 1292 req->setFlags(Request::SECURE); 1293 } 1294 req->taskId(blk->task_id); 1295 1296 PacketPtr pkt = new Packet(req, MemCmd::WriteClean, blkSize, id); 1297 1298 if (dest) { 1299 req->setFlags(dest); 1300 pkt->setWriteThrough(); 1301 } 1302 1303 DPRINTF(Cache, "Create %s writable: %d, dirty: %d\n", pkt->print(), 1304 blk->isWritable(), blk->isDirty()); 1305 1306 if (blk->isWritable()) { 1307 // not asserting shared means we pass the block in modified 1308 // state, mark our own block non-writeable 1309 blk->status &= ~BlkWritable; 1310 } else { 1311 // we are in the Owned state, tell the receiver 1312 pkt->setHasSharers(); 1313 } 1314 1315 // make sure the block is not marked dirty 1316 blk->status &= ~BlkDirty; 1317 1318 pkt->allocate(); 1319 pkt->setDataFromBlock(blk->data, blkSize); 1320 1321 return pkt; 1322} 1323 1324 1325void 1326BaseCache::memWriteback() 1327{ 1328 tags->forEachBlk([this](CacheBlk &blk) { writebackVisitor(blk); }); 1329} 1330 1331void 1332BaseCache::memInvalidate() 1333{ 1334 tags->forEachBlk([this](CacheBlk &blk) { invalidateVisitor(blk); }); 1335} 1336 1337bool 1338BaseCache::isDirty() const 1339{ 1340 return tags->anyBlk([](CacheBlk &blk) { return blk.isDirty(); }); 1341} 1342 1343void 1344BaseCache::writebackVisitor(CacheBlk &blk) 1345{ 1346 if (blk.isDirty()) { 1347 assert(blk.isValid()); 1348 1349 Request request(tags->regenerateBlkAddr(&blk), 1350 blkSize, 0, Request::funcMasterId); 1351 request.taskId(blk.task_id); 1352 if (blk.isSecure()) { 1353 request.setFlags(Request::SECURE); 1354 } 1355 1356 Packet packet(&request, MemCmd::WriteReq); 1357 packet.dataStatic(blk.data); 1358 1359 memSidePort.sendFunctional(&packet); 1360 1361 blk.status &= ~BlkDirty; 1362 } 1363} 1364 1365void 1366BaseCache::invalidateVisitor(CacheBlk &blk) 1367{ 1368 if (blk.isDirty()) 1369 warn_once("Invalidating dirty cache lines. " \ 1370 "Expect things to break.\n"); 1371 1372 if (blk.isValid()) { 1373 assert(!blk.isDirty()); 1374 invalidateBlock(&blk); 1375 } 1376} 1377 1378Tick 1379BaseCache::nextQueueReadyTime() const 1380{ 1381 Tick nextReady = std::min(mshrQueue.nextReadyTime(), 1382 writeBuffer.nextReadyTime()); 1383 1384 // Don't signal prefetch ready time if no MSHRs available 1385 // Will signal once enoguh MSHRs are deallocated 1386 if (prefetcher && mshrQueue.canPrefetch()) { 1387 nextReady = std::min(nextReady, 1388 prefetcher->nextPrefetchReadyTime()); 1389 } 1390 1391 return nextReady; 1392} 1393 1394 1395bool 1396BaseCache::sendMSHRQueuePacket(MSHR* mshr) 1397{ 1398 assert(mshr); 1399 1400 // use request from 1st target 1401 PacketPtr tgt_pkt = mshr->getTarget()->pkt; 1402 1403 DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print()); 1404 1405 CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure); 1406 1407 // either a prefetch that is not present upstream, or a normal 1408 // MSHR request, proceed to get the packet to send downstream 1409 PacketPtr pkt = createMissPacket(tgt_pkt, blk, mshr->needsWritable()); 1410 1411 mshr->isForward = (pkt == nullptr); 1412 1413 if (mshr->isForward) { 1414 // not a cache block request, but a response is expected 1415 // make copy of current packet to forward, keep current 1416 // copy for response handling 1417 pkt = new Packet(tgt_pkt, false, true); 1418 assert(!pkt->isWrite()); 1419 } 1420 1421 // play it safe and append (rather than set) the sender state, 1422 // as forwarded packets may already have existing state 1423 pkt->pushSenderState(mshr); 1424 1425 if (pkt->isClean() && blk && blk->isDirty()) { 1426 // A cache clean opearation is looking for a dirty block. Mark 1427 // the packet so that the destination xbar can determine that 1428 // there will be a follow-up write packet as well. 1429 pkt->setSatisfied(); 1430 } 1431 1432 if (!memSidePort.sendTimingReq(pkt)) { 1433 // we are awaiting a retry, but we 1434 // delete the packet and will be creating a new packet 1435 // when we get the opportunity 1436 delete pkt; 1437 1438 // note that we have now masked any requestBus and 1439 // schedSendEvent (we will wait for a retry before 1440 // doing anything), and this is so even if we do not 1441 // care about this packet and might override it before 1442 // it gets retried 1443 return true; 1444 } else { 1445 // As part of the call to sendTimingReq the packet is 1446 // forwarded to all neighbouring caches (and any caches 1447 // above them) as a snoop. Thus at this point we know if 1448 // any of the neighbouring caches are responding, and if 1449 // so, we know it is dirty, and we can determine if it is 1450 // being passed as Modified, making our MSHR the ordering 1451 // point 1452 bool pending_modified_resp = !pkt->hasSharers() && 1453 pkt->cacheResponding(); 1454 markInService(mshr, pending_modified_resp); 1455 1456 if (pkt->isClean() && blk && blk->isDirty()) { 1457 // A cache clean opearation is looking for a dirty 1458 // block. If a dirty block is encountered a WriteClean 1459 // will update any copies to the path to the memory 1460 // until the point of reference. 1461 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n", 1462 __func__, pkt->print(), blk->print()); 1463 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), 1464 pkt->id); 1465 PacketList writebacks; 1466 writebacks.push_back(wb_pkt); 1467 doWritebacks(writebacks, 0); 1468 } 1469 1470 return false; 1471 } 1472} 1473 1474bool 1475BaseCache::sendWriteQueuePacket(WriteQueueEntry* wq_entry) 1476{ 1477 assert(wq_entry); 1478 1479 // always a single target for write queue entries 1480 PacketPtr tgt_pkt = wq_entry->getTarget()->pkt; 1481 1482 DPRINTF(Cache, "%s: write %s\n", __func__, tgt_pkt->print()); 1483 1484 // forward as is, both for evictions and uncacheable writes 1485 if (!memSidePort.sendTimingReq(tgt_pkt)) { 1486 // note that we have now masked any requestBus and 1487 // schedSendEvent (we will wait for a retry before 1488 // doing anything), and this is so even if we do not 1489 // care about this packet and might override it before 1490 // it gets retried 1491 return true; 1492 } else { 1493 markInService(wq_entry); 1494 return false; 1495 } 1496} 1497 1498void 1499BaseCache::serialize(CheckpointOut &cp) const 1500{ 1501 bool dirty(isDirty()); 1502 1503 if (dirty) { 1504 warn("*** The cache still contains dirty data. ***\n"); 1505 warn(" Make sure to drain the system using the correct flags.\n"); 1506 warn(" This checkpoint will not restore correctly " \ 1507 "and dirty data in the cache will be lost!\n"); 1508 } 1509 1510 // Since we don't checkpoint the data in the cache, any dirty data 1511 // will be lost when restoring from a checkpoint of a system that 1512 // wasn't drained properly. Flag the checkpoint as invalid if the 1513 // cache contains dirty data. 1514 bool bad_checkpoint(dirty); 1515 SERIALIZE_SCALAR(bad_checkpoint); 1516} 1517 1518void 1519BaseCache::unserialize(CheckpointIn &cp) 1520{ 1521 bool bad_checkpoint; 1522 UNSERIALIZE_SCALAR(bad_checkpoint); 1523 if (bad_checkpoint) { 1524 fatal("Restoring from checkpoints with dirty caches is not " 1525 "supported in the classic memory system. Please remove any " 1526 "caches or drain them properly before taking checkpoints.\n"); 1527 } 1528} 1529 1530void 1531BaseCache::regStats() 1532{ 1533 MemObject::regStats(); 1534 1535 using namespace Stats; 1536 1537 // Hit statistics 1538 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1539 MemCmd cmd(access_idx); 1540 const string &cstr = cmd.toString(); 1541 1542 hits[access_idx] 1543 .init(system->maxMasters()) 1544 .name(name() + "." + cstr + "_hits") 1545 .desc("number of " + cstr + " hits") 1546 .flags(total | nozero | nonan) 1547 ; 1548 for (int i = 0; i < system->maxMasters(); i++) { 1549 hits[access_idx].subname(i, system->getMasterName(i)); 1550 } 1551 } 1552 1553// These macros make it easier to sum the right subset of commands and 1554// to change the subset of commands that are considered "demand" vs 1555// "non-demand" 1556#define SUM_DEMAND(s) \ 1557 (s[MemCmd::ReadReq] + s[MemCmd::WriteReq] + s[MemCmd::WriteLineReq] + \ 1558 s[MemCmd::ReadExReq] + s[MemCmd::ReadCleanReq] + s[MemCmd::ReadSharedReq]) 1559 1560// should writebacks be included here? prior code was inconsistent... 1561#define SUM_NON_DEMAND(s) \ 1562 (s[MemCmd::SoftPFReq] + s[MemCmd::HardPFReq]) 1563 1564 demandHits 1565 .name(name() + ".demand_hits") 1566 .desc("number of demand (read+write) hits") 1567 .flags(total | nozero | nonan) 1568 ; 1569 demandHits = SUM_DEMAND(hits); 1570 for (int i = 0; i < system->maxMasters(); i++) { 1571 demandHits.subname(i, system->getMasterName(i)); 1572 } 1573 1574 overallHits 1575 .name(name() + ".overall_hits") 1576 .desc("number of overall hits") 1577 .flags(total | nozero | nonan) 1578 ; 1579 overallHits = demandHits + SUM_NON_DEMAND(hits); 1580 for (int i = 0; i < system->maxMasters(); i++) { 1581 overallHits.subname(i, system->getMasterName(i)); 1582 } 1583 1584 // Miss statistics 1585 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1586 MemCmd cmd(access_idx); 1587 const string &cstr = cmd.toString(); 1588 1589 misses[access_idx] 1590 .init(system->maxMasters()) 1591 .name(name() + "." + cstr + "_misses") 1592 .desc("number of " + cstr + " misses") 1593 .flags(total | nozero | nonan) 1594 ; 1595 for (int i = 0; i < system->maxMasters(); i++) { 1596 misses[access_idx].subname(i, system->getMasterName(i)); 1597 } 1598 } 1599 1600 demandMisses 1601 .name(name() + ".demand_misses") 1602 .desc("number of demand (read+write) misses") 1603 .flags(total | nozero | nonan) 1604 ; 1605 demandMisses = SUM_DEMAND(misses); 1606 for (int i = 0; i < system->maxMasters(); i++) { 1607 demandMisses.subname(i, system->getMasterName(i)); 1608 } 1609 1610 overallMisses 1611 .name(name() + ".overall_misses") 1612 .desc("number of overall misses") 1613 .flags(total | nozero | nonan) 1614 ; 1615 overallMisses = demandMisses + SUM_NON_DEMAND(misses); 1616 for (int i = 0; i < system->maxMasters(); i++) { 1617 overallMisses.subname(i, system->getMasterName(i)); 1618 } 1619 1620 // Miss latency statistics 1621 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1622 MemCmd cmd(access_idx); 1623 const string &cstr = cmd.toString(); 1624 1625 missLatency[access_idx] 1626 .init(system->maxMasters()) 1627 .name(name() + "." + cstr + "_miss_latency") 1628 .desc("number of " + cstr + " miss cycles") 1629 .flags(total | nozero | nonan) 1630 ; 1631 for (int i = 0; i < system->maxMasters(); i++) { 1632 missLatency[access_idx].subname(i, system->getMasterName(i)); 1633 } 1634 } 1635 1636 demandMissLatency 1637 .name(name() + ".demand_miss_latency") 1638 .desc("number of demand (read+write) miss cycles") 1639 .flags(total | nozero | nonan) 1640 ; 1641 demandMissLatency = SUM_DEMAND(missLatency); 1642 for (int i = 0; i < system->maxMasters(); i++) { 1643 demandMissLatency.subname(i, system->getMasterName(i)); 1644 } 1645 1646 overallMissLatency 1647 .name(name() + ".overall_miss_latency") 1648 .desc("number of overall miss cycles") 1649 .flags(total | nozero | nonan) 1650 ; 1651 overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency); 1652 for (int i = 0; i < system->maxMasters(); i++) { 1653 overallMissLatency.subname(i, system->getMasterName(i)); 1654 } 1655 1656 // access formulas 1657 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1658 MemCmd cmd(access_idx); 1659 const string &cstr = cmd.toString(); 1660 1661 accesses[access_idx] 1662 .name(name() + "." + cstr + "_accesses") 1663 .desc("number of " + cstr + " accesses(hits+misses)") 1664 .flags(total | nozero | nonan) 1665 ; 1666 accesses[access_idx] = hits[access_idx] + misses[access_idx]; 1667 1668 for (int i = 0; i < system->maxMasters(); i++) { 1669 accesses[access_idx].subname(i, system->getMasterName(i)); 1670 } 1671 } 1672 1673 demandAccesses 1674 .name(name() + ".demand_accesses") 1675 .desc("number of demand (read+write) accesses") 1676 .flags(total | nozero | nonan) 1677 ; 1678 demandAccesses = demandHits + demandMisses; 1679 for (int i = 0; i < system->maxMasters(); i++) { 1680 demandAccesses.subname(i, system->getMasterName(i)); 1681 } 1682 1683 overallAccesses 1684 .name(name() + ".overall_accesses") 1685 .desc("number of overall (read+write) accesses") 1686 .flags(total | nozero | nonan) 1687 ; 1688 overallAccesses = overallHits + overallMisses; 1689 for (int i = 0; i < system->maxMasters(); i++) { 1690 overallAccesses.subname(i, system->getMasterName(i)); 1691 } 1692 1693 // miss rate formulas 1694 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1695 MemCmd cmd(access_idx); 1696 const string &cstr = cmd.toString(); 1697 1698 missRate[access_idx] 1699 .name(name() + "." + cstr + "_miss_rate") 1700 .desc("miss rate for " + cstr + " accesses") 1701 .flags(total | nozero | nonan) 1702 ; 1703 missRate[access_idx] = misses[access_idx] / accesses[access_idx]; 1704 1705 for (int i = 0; i < system->maxMasters(); i++) { 1706 missRate[access_idx].subname(i, system->getMasterName(i)); 1707 } 1708 } 1709 1710 demandMissRate 1711 .name(name() + ".demand_miss_rate") 1712 .desc("miss rate for demand accesses") 1713 .flags(total | nozero | nonan) 1714 ; 1715 demandMissRate = demandMisses / demandAccesses; 1716 for (int i = 0; i < system->maxMasters(); i++) { 1717 demandMissRate.subname(i, system->getMasterName(i)); 1718 } 1719 1720 overallMissRate 1721 .name(name() + ".overall_miss_rate") 1722 .desc("miss rate for overall accesses") 1723 .flags(total | nozero | nonan) 1724 ; 1725 overallMissRate = overallMisses / overallAccesses; 1726 for (int i = 0; i < system->maxMasters(); i++) { 1727 overallMissRate.subname(i, system->getMasterName(i)); 1728 } 1729 1730 // miss latency formulas 1731 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1732 MemCmd cmd(access_idx); 1733 const string &cstr = cmd.toString(); 1734 1735 avgMissLatency[access_idx] 1736 .name(name() + "." + cstr + "_avg_miss_latency") 1737 .desc("average " + cstr + " miss latency") 1738 .flags(total | nozero | nonan) 1739 ; 1740 avgMissLatency[access_idx] = 1741 missLatency[access_idx] / misses[access_idx]; 1742 1743 for (int i = 0; i < system->maxMasters(); i++) { 1744 avgMissLatency[access_idx].subname(i, system->getMasterName(i)); 1745 } 1746 } 1747 1748 demandAvgMissLatency 1749 .name(name() + ".demand_avg_miss_latency") 1750 .desc("average overall miss latency") 1751 .flags(total | nozero | nonan) 1752 ; 1753 demandAvgMissLatency = demandMissLatency / demandMisses; 1754 for (int i = 0; i < system->maxMasters(); i++) { 1755 demandAvgMissLatency.subname(i, system->getMasterName(i)); 1756 } 1757 1758 overallAvgMissLatency 1759 .name(name() + ".overall_avg_miss_latency") 1760 .desc("average overall miss latency") 1761 .flags(total | nozero | nonan) 1762 ; 1763 overallAvgMissLatency = overallMissLatency / overallMisses; 1764 for (int i = 0; i < system->maxMasters(); i++) { 1765 overallAvgMissLatency.subname(i, system->getMasterName(i)); 1766 } 1767 1768 blocked_cycles.init(NUM_BLOCKED_CAUSES); 1769 blocked_cycles 1770 .name(name() + ".blocked_cycles") 1771 .desc("number of cycles access was blocked") 1772 .subname(Blocked_NoMSHRs, "no_mshrs") 1773 .subname(Blocked_NoTargets, "no_targets") 1774 ; 1775 1776 1777 blocked_causes.init(NUM_BLOCKED_CAUSES); 1778 blocked_causes 1779 .name(name() + ".blocked") 1780 .desc("number of cycles access was blocked") 1781 .subname(Blocked_NoMSHRs, "no_mshrs") 1782 .subname(Blocked_NoTargets, "no_targets") 1783 ; 1784 1785 avg_blocked 1786 .name(name() + ".avg_blocked_cycles") 1787 .desc("average number of cycles each access was blocked") 1788 .subname(Blocked_NoMSHRs, "no_mshrs") 1789 .subname(Blocked_NoTargets, "no_targets") 1790 ; 1791 1792 avg_blocked = blocked_cycles / blocked_causes; 1793 1794 unusedPrefetches 1795 .name(name() + ".unused_prefetches") 1796 .desc("number of HardPF blocks evicted w/o reference") 1797 .flags(nozero) 1798 ; 1799 1800 writebacks 1801 .init(system->maxMasters()) 1802 .name(name() + ".writebacks") 1803 .desc("number of writebacks") 1804 .flags(total | nozero | nonan) 1805 ; 1806 for (int i = 0; i < system->maxMasters(); i++) { 1807 writebacks.subname(i, system->getMasterName(i)); 1808 } 1809 1810 // MSHR statistics 1811 // MSHR hit statistics 1812 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1813 MemCmd cmd(access_idx); 1814 const string &cstr = cmd.toString(); 1815 1816 mshr_hits[access_idx] 1817 .init(system->maxMasters()) 1818 .name(name() + "." + cstr + "_mshr_hits") 1819 .desc("number of " + cstr + " MSHR hits") 1820 .flags(total | nozero | nonan) 1821 ; 1822 for (int i = 0; i < system->maxMasters(); i++) { 1823 mshr_hits[access_idx].subname(i, system->getMasterName(i)); 1824 } 1825 } 1826 1827 demandMshrHits 1828 .name(name() + ".demand_mshr_hits") 1829 .desc("number of demand (read+write) MSHR hits") 1830 .flags(total | nozero | nonan) 1831 ; 1832 demandMshrHits = SUM_DEMAND(mshr_hits); 1833 for (int i = 0; i < system->maxMasters(); i++) { 1834 demandMshrHits.subname(i, system->getMasterName(i)); 1835 } 1836 1837 overallMshrHits 1838 .name(name() + ".overall_mshr_hits") 1839 .desc("number of overall MSHR hits") 1840 .flags(total | nozero | nonan) 1841 ; 1842 overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshr_hits); 1843 for (int i = 0; i < system->maxMasters(); i++) { 1844 overallMshrHits.subname(i, system->getMasterName(i)); 1845 } 1846 1847 // MSHR miss statistics 1848 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1849 MemCmd cmd(access_idx); 1850 const string &cstr = cmd.toString(); 1851 1852 mshr_misses[access_idx] 1853 .init(system->maxMasters()) 1854 .name(name() + "." + cstr + "_mshr_misses") 1855 .desc("number of " + cstr + " MSHR misses") 1856 .flags(total | nozero | nonan) 1857 ; 1858 for (int i = 0; i < system->maxMasters(); i++) { 1859 mshr_misses[access_idx].subname(i, system->getMasterName(i)); 1860 } 1861 } 1862 1863 demandMshrMisses 1864 .name(name() + ".demand_mshr_misses") 1865 .desc("number of demand (read+write) MSHR misses") 1866 .flags(total | nozero | nonan) 1867 ; 1868 demandMshrMisses = SUM_DEMAND(mshr_misses); 1869 for (int i = 0; i < system->maxMasters(); i++) { 1870 demandMshrMisses.subname(i, system->getMasterName(i)); 1871 } 1872 1873 overallMshrMisses 1874 .name(name() + ".overall_mshr_misses") 1875 .desc("number of overall MSHR misses") 1876 .flags(total | nozero | nonan) 1877 ; 1878 overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshr_misses); 1879 for (int i = 0; i < system->maxMasters(); i++) { 1880 overallMshrMisses.subname(i, system->getMasterName(i)); 1881 } 1882 1883 // MSHR miss latency statistics 1884 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1885 MemCmd cmd(access_idx); 1886 const string &cstr = cmd.toString(); 1887 1888 mshr_miss_latency[access_idx] 1889 .init(system->maxMasters()) 1890 .name(name() + "." + cstr + "_mshr_miss_latency") 1891 .desc("number of " + cstr + " MSHR miss cycles") 1892 .flags(total | nozero | nonan) 1893 ; 1894 for (int i = 0; i < system->maxMasters(); i++) { 1895 mshr_miss_latency[access_idx].subname(i, system->getMasterName(i)); 1896 } 1897 } 1898 1899 demandMshrMissLatency 1900 .name(name() + ".demand_mshr_miss_latency") 1901 .desc("number of demand (read+write) MSHR miss cycles") 1902 .flags(total | nozero | nonan) 1903 ; 1904 demandMshrMissLatency = SUM_DEMAND(mshr_miss_latency); 1905 for (int i = 0; i < system->maxMasters(); i++) { 1906 demandMshrMissLatency.subname(i, system->getMasterName(i)); 1907 } 1908 1909 overallMshrMissLatency 1910 .name(name() + ".overall_mshr_miss_latency") 1911 .desc("number of overall MSHR miss cycles") 1912 .flags(total | nozero | nonan) 1913 ; 1914 overallMshrMissLatency = 1915 demandMshrMissLatency + SUM_NON_DEMAND(mshr_miss_latency); 1916 for (int i = 0; i < system->maxMasters(); i++) { 1917 overallMshrMissLatency.subname(i, system->getMasterName(i)); 1918 } 1919 1920 // MSHR uncacheable statistics 1921 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1922 MemCmd cmd(access_idx); 1923 const string &cstr = cmd.toString(); 1924 1925 mshr_uncacheable[access_idx] 1926 .init(system->maxMasters()) 1927 .name(name() + "." + cstr + "_mshr_uncacheable") 1928 .desc("number of " + cstr + " MSHR uncacheable") 1929 .flags(total | nozero | nonan) 1930 ; 1931 for (int i = 0; i < system->maxMasters(); i++) { 1932 mshr_uncacheable[access_idx].subname(i, system->getMasterName(i)); 1933 } 1934 } 1935 1936 overallMshrUncacheable 1937 .name(name() + ".overall_mshr_uncacheable_misses") 1938 .desc("number of overall MSHR uncacheable misses") 1939 .flags(total | nozero | nonan) 1940 ; 1941 overallMshrUncacheable = 1942 SUM_DEMAND(mshr_uncacheable) + SUM_NON_DEMAND(mshr_uncacheable); 1943 for (int i = 0; i < system->maxMasters(); i++) { 1944 overallMshrUncacheable.subname(i, system->getMasterName(i)); 1945 } 1946 1947 // MSHR miss latency statistics 1948 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1949 MemCmd cmd(access_idx); 1950 const string &cstr = cmd.toString(); 1951 1952 mshr_uncacheable_lat[access_idx] 1953 .init(system->maxMasters()) 1954 .name(name() + "." + cstr + "_mshr_uncacheable_latency") 1955 .desc("number of " + cstr + " MSHR uncacheable cycles") 1956 .flags(total | nozero | nonan) 1957 ; 1958 for (int i = 0; i < system->maxMasters(); i++) { 1959 mshr_uncacheable_lat[access_idx].subname( 1960 i, system->getMasterName(i)); 1961 } 1962 } 1963 1964 overallMshrUncacheableLatency 1965 .name(name() + ".overall_mshr_uncacheable_latency") 1966 .desc("number of overall MSHR uncacheable cycles") 1967 .flags(total | nozero | nonan) 1968 ; 1969 overallMshrUncacheableLatency = 1970 SUM_DEMAND(mshr_uncacheable_lat) + 1971 SUM_NON_DEMAND(mshr_uncacheable_lat); 1972 for (int i = 0; i < system->maxMasters(); i++) { 1973 overallMshrUncacheableLatency.subname(i, system->getMasterName(i)); 1974 } 1975 1976#if 0 1977 // MSHR access formulas 1978 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1979 MemCmd cmd(access_idx); 1980 const string &cstr = cmd.toString(); 1981 1982 mshrAccesses[access_idx] 1983 .name(name() + "." + cstr + "_mshr_accesses") 1984 .desc("number of " + cstr + " mshr accesses(hits+misses)") 1985 .flags(total | nozero | nonan) 1986 ; 1987 mshrAccesses[access_idx] = 1988 mshr_hits[access_idx] + mshr_misses[access_idx] 1989 + mshr_uncacheable[access_idx]; 1990 } 1991 1992 demandMshrAccesses 1993 .name(name() + ".demand_mshr_accesses") 1994 .desc("number of demand (read+write) mshr accesses") 1995 .flags(total | nozero | nonan) 1996 ; 1997 demandMshrAccesses = demandMshrHits + demandMshrMisses; 1998 1999 overallMshrAccesses 2000 .name(name() + ".overall_mshr_accesses") 2001 .desc("number of overall (read+write) mshr accesses") 2002 .flags(total | nozero | nonan) 2003 ; 2004 overallMshrAccesses = overallMshrHits + overallMshrMisses 2005 + overallMshrUncacheable; 2006#endif 2007 2008 // MSHR miss rate formulas 2009 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2010 MemCmd cmd(access_idx); 2011 const string &cstr = cmd.toString(); 2012 2013 mshrMissRate[access_idx] 2014 .name(name() + "." + cstr + "_mshr_miss_rate") 2015 .desc("mshr miss rate for " + cstr + " accesses") 2016 .flags(total | nozero | nonan) 2017 ; 2018 mshrMissRate[access_idx] = 2019 mshr_misses[access_idx] / accesses[access_idx]; 2020 2021 for (int i = 0; i < system->maxMasters(); i++) { 2022 mshrMissRate[access_idx].subname(i, system->getMasterName(i)); 2023 } 2024 } 2025 2026 demandMshrMissRate 2027 .name(name() + ".demand_mshr_miss_rate") 2028 .desc("mshr miss rate for demand accesses") 2029 .flags(total | nozero | nonan) 2030 ; 2031 demandMshrMissRate = demandMshrMisses / demandAccesses; 2032 for (int i = 0; i < system->maxMasters(); i++) { 2033 demandMshrMissRate.subname(i, system->getMasterName(i)); 2034 } 2035 2036 overallMshrMissRate 2037 .name(name() + ".overall_mshr_miss_rate") 2038 .desc("mshr miss rate for overall accesses") 2039 .flags(total | nozero | nonan) 2040 ; 2041 overallMshrMissRate = overallMshrMisses / overallAccesses; 2042 for (int i = 0; i < system->maxMasters(); i++) { 2043 overallMshrMissRate.subname(i, system->getMasterName(i)); 2044 } 2045 2046 // mshrMiss latency formulas 2047 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2048 MemCmd cmd(access_idx); 2049 const string &cstr = cmd.toString(); 2050 2051 avgMshrMissLatency[access_idx] 2052 .name(name() + "." + cstr + "_avg_mshr_miss_latency") 2053 .desc("average " + cstr + " mshr miss latency") 2054 .flags(total | nozero | nonan) 2055 ; 2056 avgMshrMissLatency[access_idx] = 2057 mshr_miss_latency[access_idx] / mshr_misses[access_idx]; 2058 2059 for (int i = 0; i < system->maxMasters(); i++) { 2060 avgMshrMissLatency[access_idx].subname( 2061 i, system->getMasterName(i)); 2062 } 2063 } 2064 2065 demandAvgMshrMissLatency 2066 .name(name() + ".demand_avg_mshr_miss_latency") 2067 .desc("average overall mshr miss latency") 2068 .flags(total | nozero | nonan) 2069 ; 2070 demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses; 2071 for (int i = 0; i < system->maxMasters(); i++) { 2072 demandAvgMshrMissLatency.subname(i, system->getMasterName(i)); 2073 } 2074 2075 overallAvgMshrMissLatency 2076 .name(name() + ".overall_avg_mshr_miss_latency") 2077 .desc("average overall mshr miss latency") 2078 .flags(total | nozero | nonan) 2079 ; 2080 overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses; 2081 for (int i = 0; i < system->maxMasters(); i++) { 2082 overallAvgMshrMissLatency.subname(i, system->getMasterName(i)); 2083 } 2084 2085 // mshrUncacheable latency formulas 2086 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2087 MemCmd cmd(access_idx); 2088 const string &cstr = cmd.toString(); 2089 2090 avgMshrUncacheableLatency[access_idx] 2091 .name(name() + "." + cstr + "_avg_mshr_uncacheable_latency") 2092 .desc("average " + cstr + " mshr uncacheable latency") 2093 .flags(total | nozero | nonan) 2094 ; 2095 avgMshrUncacheableLatency[access_idx] = 2096 mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx]; 2097 2098 for (int i = 0; i < system->maxMasters(); i++) { 2099 avgMshrUncacheableLatency[access_idx].subname( 2100 i, system->getMasterName(i)); 2101 } 2102 } 2103 2104 overallAvgMshrUncacheableLatency 2105 .name(name() + ".overall_avg_mshr_uncacheable_latency") 2106 .desc("average overall mshr uncacheable latency") 2107 .flags(total | nozero | nonan) 2108 ; 2109 overallAvgMshrUncacheableLatency = 2110 overallMshrUncacheableLatency / overallMshrUncacheable; 2111 for (int i = 0; i < system->maxMasters(); i++) { 2112 overallAvgMshrUncacheableLatency.subname(i, system->getMasterName(i)); 2113 } 2114 2115 replacements 2116 .name(name() + ".replacements") 2117 .desc("number of replacements") 2118 ; 2119} 2120 2121/////////////// 2122// 2123// CpuSidePort 2124// 2125/////////////// 2126bool 2127BaseCache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt) 2128{ 2129 // Snoops shouldn't happen when bypassing caches 2130 assert(!cache->system->bypassCaches()); 2131 2132 assert(pkt->isResponse()); 2133 2134 // Express snoop responses from master to slave, e.g., from L1 to L2 2135 cache->recvTimingSnoopResp(pkt); 2136 return true; 2137} 2138 2139 2140bool 2141BaseCache::CpuSidePort::tryTiming(PacketPtr pkt) 2142{ 2143 if (cache->system->bypassCaches() || pkt->isExpressSnoop()) { 2144 // always let express snoop packets through even if blocked 2145 return true; 2146 } else if (blocked || mustSendRetry) { 2147 // either already committed to send a retry, or blocked 2148 mustSendRetry = true; 2149 return false; 2150 } 2151 mustSendRetry = false; 2152 return true; 2153} 2154 2155bool 2156BaseCache::CpuSidePort::recvTimingReq(PacketPtr pkt) 2157{ 2158 assert(pkt->isRequest()); 2159 2160 if (cache->system->bypassCaches()) { 2161 // Just forward the packet if caches are disabled. 2162 // @todo This should really enqueue the packet rather 2163 bool M5_VAR_USED success = cache->memSidePort.sendTimingReq(pkt); 2164 assert(success); 2165 return true; 2166 } else if (tryTiming(pkt)) { 2167 cache->recvTimingReq(pkt); 2168 return true; 2169 } 2170 return false; 2171} 2172 2173Tick 2174BaseCache::CpuSidePort::recvAtomic(PacketPtr pkt) 2175{ 2176 if (cache->system->bypassCaches()) { 2177 // Forward the request if the system is in cache bypass mode. 2178 return cache->memSidePort.sendAtomic(pkt); 2179 } else { 2180 return cache->recvAtomic(pkt); 2181 } 2182} 2183 2184void 2185BaseCache::CpuSidePort::recvFunctional(PacketPtr pkt) 2186{ 2187 if (cache->system->bypassCaches()) { 2188 // The cache should be flushed if we are in cache bypass mode, 2189 // so we don't need to check if we need to update anything. 2190 cache->memSidePort.sendFunctional(pkt); 2191 return; 2192 } 2193 2194 // functional request 2195 cache->functionalAccess(pkt, true); 2196} 2197 2198AddrRangeList 2199BaseCache::CpuSidePort::getAddrRanges() const 2200{ 2201 return cache->getAddrRanges(); 2202} 2203 2204 2205BaseCache:: 2206CpuSidePort::CpuSidePort(const std::string &_name, BaseCache *_cache, 2207 const std::string &_label) 2208 : CacheSlavePort(_name, _cache, _label), cache(_cache) 2209{ 2210} 2211 2212/////////////// 2213// 2214// MemSidePort 2215// 2216/////////////// 2217bool 2218BaseCache::MemSidePort::recvTimingResp(PacketPtr pkt) 2219{ 2220 cache->recvTimingResp(pkt); 2221 return true; 2222} 2223 2224// Express snooping requests to memside port 2225void 2226BaseCache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt) 2227{ 2228 // Snoops shouldn't happen when bypassing caches 2229 assert(!cache->system->bypassCaches()); 2230 2231 // handle snooping requests 2232 cache->recvTimingSnoopReq(pkt); 2233} 2234 2235Tick 2236BaseCache::MemSidePort::recvAtomicSnoop(PacketPtr pkt) 2237{ 2238 // Snoops shouldn't happen when bypassing caches 2239 assert(!cache->system->bypassCaches()); 2240 2241 return cache->recvAtomicSnoop(pkt); 2242} 2243 2244void 2245BaseCache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt) 2246{ 2247 // Snoops shouldn't happen when bypassing caches 2248 assert(!cache->system->bypassCaches()); 2249 2250 // functional snoop (note that in contrast to atomic we don't have 2251 // a specific functionalSnoop method, as they have the same 2252 // behaviour regardless) 2253 cache->functionalAccess(pkt, false); 2254} 2255 2256void 2257BaseCache::CacheReqPacketQueue::sendDeferredPacket() 2258{ 2259 // sanity check 2260 assert(!waitingOnRetry); 2261 2262 // there should never be any deferred request packets in the 2263 // queue, instead we resly on the cache to provide the packets 2264 // from the MSHR queue or write queue 2265 assert(deferredPacketReadyTime() == MaxTick); 2266 2267 // check for request packets (requests & writebacks) 2268 QueueEntry* entry = cache.getNextQueueEntry(); 2269 2270 if (!entry) { 2271 // can happen if e.g. we attempt a writeback and fail, but 2272 // before the retry, the writeback is eliminated because 2273 // we snoop another cache's ReadEx. 2274 } else { 2275 // let our snoop responses go first if there are responses to 2276 // the same addresses 2277 if (checkConflictingSnoop(entry->blkAddr)) { 2278 return; 2279 } 2280 waitingOnRetry = entry->sendPacket(cache); 2281 } 2282 2283 // if we succeeded and are not waiting for a retry, schedule the 2284 // next send considering when the next queue is ready, note that 2285 // snoop responses have their own packet queue and thus schedule 2286 // their own events 2287 if (!waitingOnRetry) { 2288 schedSendEvent(cache.nextQueueReadyTime()); 2289 } 2290} 2291 2292BaseCache::MemSidePort::MemSidePort(const std::string &_name, 2293 BaseCache *_cache, 2294 const std::string &_label) 2295 : CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue), 2296 _reqQueue(*_cache, *this, _snoopRespQueue, _label), 2297 _snoopRespQueue(*_cache, *this, _label), cache(_cache) 2298{ 2299} 2300