base.cc revision 13215
1/* 2 * Copyright (c) 2012-2013, 2018 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2003-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Erik Hallnor 41 * Nikos Nikoleris 42 */ 43 44/** 45 * @file 46 * Definition of BaseCache functions. 47 */ 48 49#include "mem/cache/base.hh" 50 51#include "base/compiler.hh" 52#include "base/logging.hh" 53#include "debug/Cache.hh" 54#include "debug/CachePort.hh" 55#include "debug/CacheVerbose.hh" 56#include "mem/cache/mshr.hh" 57#include "mem/cache/prefetch/base.hh" 58#include "mem/cache/queue_entry.hh" 59#include "params/BaseCache.hh" 60#include "sim/core.hh" 61 62class BaseMasterPort; 63class BaseSlavePort; 64 65using namespace std; 66 67BaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name, 68 BaseCache *_cache, 69 const std::string &_label) 70 : QueuedSlavePort(_name, _cache, queue), queue(*_cache, *this, _label), 71 blocked(false), mustSendRetry(false), 72 sendRetryEvent([this]{ processSendRetry(); }, _name) 73{ 74} 75 76BaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size) 77 : MemObject(p), 78 cpuSidePort (p->name + ".cpu_side", this, "CpuSidePort"), 79 memSidePort(p->name + ".mem_side", this, "MemSidePort"), 80 mshrQueue("MSHRs", p->mshrs, 0, p->demand_mshr_reserve), // see below 81 writeBuffer("write buffer", p->write_buffers, p->mshrs), // see below 82 tags(p->tags), 83 prefetcher(p->prefetcher), 84 prefetchOnAccess(p->prefetch_on_access), 85 writebackClean(p->writeback_clean), 86 tempBlockWriteback(nullptr), 87 writebackTempBlockAtomicEvent([this]{ writebackTempBlockAtomic(); }, 88 name(), false, 89 EventBase::Delayed_Writeback_Pri), 90 blkSize(blk_size), 91 lookupLatency(p->tag_latency), 92 dataLatency(p->data_latency), 93 forwardLatency(p->tag_latency), 94 fillLatency(p->data_latency), 95 responseLatency(p->response_latency), 96 numTarget(p->tgts_per_mshr), 97 forwardSnoops(true), 98 clusivity(p->clusivity), 99 isReadOnly(p->is_read_only), 100 blocked(0), 101 order(0), 102 noTargetMSHR(nullptr), 103 missCount(p->max_miss_count), 104 addrRanges(p->addr_ranges.begin(), p->addr_ranges.end()), 105 system(p->system) 106{ 107 // the MSHR queue has no reserve entries as we check the MSHR 108 // queue on every single allocation, whereas the write queue has 109 // as many reserve entries as we have MSHRs, since every MSHR may 110 // eventually require a writeback, and we do not check the write 111 // buffer before committing to an MSHR 112 113 // forward snoops is overridden in init() once we can query 114 // whether the connected master is actually snooping or not 115 116 tempBlock = new TempCacheBlk(blkSize); 117 118 tags->setCache(this); 119 if (prefetcher) 120 prefetcher->setCache(this); 121} 122 123BaseCache::~BaseCache() 124{ 125 delete tempBlock; 126} 127 128void 129BaseCache::CacheSlavePort::setBlocked() 130{ 131 assert(!blocked); 132 DPRINTF(CachePort, "Port is blocking new requests\n"); 133 blocked = true; 134 // if we already scheduled a retry in this cycle, but it has not yet 135 // happened, cancel it 136 if (sendRetryEvent.scheduled()) { 137 owner.deschedule(sendRetryEvent); 138 DPRINTF(CachePort, "Port descheduled retry\n"); 139 mustSendRetry = true; 140 } 141} 142 143void 144BaseCache::CacheSlavePort::clearBlocked() 145{ 146 assert(blocked); 147 DPRINTF(CachePort, "Port is accepting new requests\n"); 148 blocked = false; 149 if (mustSendRetry) { 150 // @TODO: need to find a better time (next cycle?) 151 owner.schedule(sendRetryEvent, curTick() + 1); 152 } 153} 154 155void 156BaseCache::CacheSlavePort::processSendRetry() 157{ 158 DPRINTF(CachePort, "Port is sending retry\n"); 159 160 // reset the flag and call retry 161 mustSendRetry = false; 162 sendRetryReq(); 163} 164 165Addr 166BaseCache::regenerateBlkAddr(CacheBlk* blk) 167{ 168 if (blk != tempBlock) { 169 return tags->regenerateBlkAddr(blk); 170 } else { 171 return tempBlock->getAddr(); 172 } 173} 174 175void 176BaseCache::init() 177{ 178 if (!cpuSidePort.isConnected() || !memSidePort.isConnected()) 179 fatal("Cache ports on %s are not connected\n", name()); 180 cpuSidePort.sendRangeChange(); 181 forwardSnoops = cpuSidePort.isSnooping(); 182} 183 184BaseMasterPort & 185BaseCache::getMasterPort(const std::string &if_name, PortID idx) 186{ 187 if (if_name == "mem_side") { 188 return memSidePort; 189 } else { 190 return MemObject::getMasterPort(if_name, idx); 191 } 192} 193 194BaseSlavePort & 195BaseCache::getSlavePort(const std::string &if_name, PortID idx) 196{ 197 if (if_name == "cpu_side") { 198 return cpuSidePort; 199 } else { 200 return MemObject::getSlavePort(if_name, idx); 201 } 202} 203 204bool 205BaseCache::inRange(Addr addr) const 206{ 207 for (const auto& r : addrRanges) { 208 if (r.contains(addr)) { 209 return true; 210 } 211 } 212 return false; 213} 214 215void 216BaseCache::handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time) 217{ 218 if (pkt->needsResponse()) { 219 pkt->makeTimingResponse(); 220 // @todo: Make someone pay for this 221 pkt->headerDelay = pkt->payloadDelay = 0; 222 223 // In this case we are considering request_time that takes 224 // into account the delay of the xbar, if any, and just 225 // lat, neglecting responseLatency, modelling hit latency 226 // just as lookupLatency or or the value of lat overriden 227 // by access(), that calls accessBlock() function. 228 cpuSidePort.schedTimingResp(pkt, request_time, true); 229 } else { 230 DPRINTF(Cache, "%s satisfied %s, no response needed\n", __func__, 231 pkt->print()); 232 233 // queue the packet for deletion, as the sending cache is 234 // still relying on it; if the block is found in access(), 235 // CleanEvict and Writeback messages will be deleted 236 // here as well 237 pendingDelete.reset(pkt); 238 } 239} 240 241void 242BaseCache::handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk, 243 Tick forward_time, Tick request_time) 244{ 245 if (mshr) { 246 /// MSHR hit 247 /// @note writebacks will be checked in getNextMSHR() 248 /// for any conflicting requests to the same block 249 250 //@todo remove hw_pf here 251 252 // Coalesce unless it was a software prefetch (see above). 253 if (pkt) { 254 assert(!pkt->isWriteback()); 255 // CleanEvicts corresponding to blocks which have 256 // outstanding requests in MSHRs are simply sunk here 257 if (pkt->cmd == MemCmd::CleanEvict) { 258 pendingDelete.reset(pkt); 259 } else if (pkt->cmd == MemCmd::WriteClean) { 260 // A WriteClean should never coalesce with any 261 // outstanding cache maintenance requests. 262 263 // We use forward_time here because there is an 264 // uncached memory write, forwarded to WriteBuffer. 265 allocateWriteBuffer(pkt, forward_time); 266 } else { 267 DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__, 268 pkt->print()); 269 270 assert(pkt->req->masterId() < system->maxMasters()); 271 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++; 272 273 // We use forward_time here because it is the same 274 // considering new targets. We have multiple 275 // requests for the same address here. It 276 // specifies the latency to allocate an internal 277 // buffer and to schedule an event to the queued 278 // port and also takes into account the additional 279 // delay of the xbar. 280 mshr->allocateTarget(pkt, forward_time, order++, 281 allocOnFill(pkt->cmd)); 282 if (mshr->getNumTargets() == numTarget) { 283 noTargetMSHR = mshr; 284 setBlocked(Blocked_NoTargets); 285 // need to be careful with this... if this mshr isn't 286 // ready yet (i.e. time > curTick()), we don't want to 287 // move it ahead of mshrs that are ready 288 // mshrQueue.moveToFront(mshr); 289 } 290 } 291 } 292 } else { 293 // no MSHR 294 assert(pkt->req->masterId() < system->maxMasters()); 295 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 296 297 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean) { 298 // We use forward_time here because there is an 299 // writeback or writeclean, forwarded to WriteBuffer. 300 allocateWriteBuffer(pkt, forward_time); 301 } else { 302 if (blk && blk->isValid()) { 303 // If we have a write miss to a valid block, we 304 // need to mark the block non-readable. Otherwise 305 // if we allow reads while there's an outstanding 306 // write miss, the read could return stale data 307 // out of the cache block... a more aggressive 308 // system could detect the overlap (if any) and 309 // forward data out of the MSHRs, but we don't do 310 // that yet. Note that we do need to leave the 311 // block valid so that it stays in the cache, in 312 // case we get an upgrade response (and hence no 313 // new data) when the write miss completes. 314 // As long as CPUs do proper store/load forwarding 315 // internally, and have a sufficiently weak memory 316 // model, this is probably unnecessary, but at some 317 // point it must have seemed like we needed it... 318 assert((pkt->needsWritable() && !blk->isWritable()) || 319 pkt->req->isCacheMaintenance()); 320 blk->status &= ~BlkReadable; 321 } 322 // Here we are using forward_time, modelling the latency of 323 // a miss (outbound) just as forwardLatency, neglecting the 324 // lookupLatency component. 325 allocateMissBuffer(pkt, forward_time); 326 } 327 } 328} 329 330void 331BaseCache::recvTimingReq(PacketPtr pkt) 332{ 333 // anything that is merely forwarded pays for the forward latency and 334 // the delay provided by the crossbar 335 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 336 337 // We use lookupLatency here because it is used to specify the latency 338 // to access. 339 Cycles lat = lookupLatency; 340 CacheBlk *blk = nullptr; 341 bool satisfied = false; 342 { 343 PacketList writebacks; 344 // Note that lat is passed by reference here. The function 345 // access() calls accessBlock() which can modify lat value. 346 satisfied = access(pkt, blk, lat, writebacks); 347 348 // copy writebacks to write buffer here to ensure they logically 349 // precede anything happening below 350 doWritebacks(writebacks, forward_time); 351 } 352 353 // Here we charge the headerDelay that takes into account the latencies 354 // of the bus, if the packet comes from it. 355 // The latency charged it is just lat that is the value of lookupLatency 356 // modified by access() function, or if not just lookupLatency. 357 // In case of a hit we are neglecting response latency. 358 // In case of a miss we are neglecting forward latency. 359 Tick request_time = clockEdge(lat) + pkt->headerDelay; 360 // Here we reset the timing of the packet. 361 pkt->headerDelay = pkt->payloadDelay = 0; 362 // track time of availability of next prefetch, if any 363 Tick next_pf_time = MaxTick; 364 365 if (satisfied) { 366 // if need to notify the prefetcher we have to do it before 367 // anything else as later handleTimingReqHit might turn the 368 // packet in a response 369 if (prefetcher && 370 (prefetchOnAccess || (blk && blk->wasPrefetched()))) { 371 if (blk) 372 blk->status &= ~BlkHWPrefetched; 373 374 // Don't notify on SWPrefetch 375 if (!pkt->cmd.isSWPrefetch()) { 376 assert(!pkt->req->isCacheMaintenance()); 377 next_pf_time = prefetcher->notify(pkt); 378 } 379 } 380 381 handleTimingReqHit(pkt, blk, request_time); 382 } else { 383 handleTimingReqMiss(pkt, blk, forward_time, request_time); 384 385 // We should call the prefetcher reguardless if the request is 386 // satisfied or not, reguardless if the request is in the MSHR 387 // or not. The request could be a ReadReq hit, but still not 388 // satisfied (potentially because of a prior write to the same 389 // cache line. So, even when not satisfied, there is an MSHR 390 // already allocated for this, we need to let the prefetcher 391 // know about the request 392 393 // Don't notify prefetcher on SWPrefetch or cache maintenance 394 // operations 395 if (prefetcher && pkt && 396 !pkt->cmd.isSWPrefetch() && 397 !pkt->req->isCacheMaintenance()) { 398 next_pf_time = prefetcher->notify(pkt); 399 } 400 } 401 402 if (next_pf_time != MaxTick) { 403 schedMemSideSendEvent(next_pf_time); 404 } 405} 406 407void 408BaseCache::handleUncacheableWriteResp(PacketPtr pkt) 409{ 410 Tick completion_time = clockEdge(responseLatency) + 411 pkt->headerDelay + pkt->payloadDelay; 412 413 // Reset the bus additional time as it is now accounted for 414 pkt->headerDelay = pkt->payloadDelay = 0; 415 416 cpuSidePort.schedTimingResp(pkt, completion_time, true); 417} 418 419void 420BaseCache::recvTimingResp(PacketPtr pkt) 421{ 422 assert(pkt->isResponse()); 423 424 // all header delay should be paid for by the crossbar, unless 425 // this is a prefetch response from above 426 panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp, 427 "%s saw a non-zero packet delay\n", name()); 428 429 const bool is_error = pkt->isError(); 430 431 if (is_error) { 432 DPRINTF(Cache, "%s: Cache received %s with error\n", __func__, 433 pkt->print()); 434 } 435 436 DPRINTF(Cache, "%s: Handling response %s\n", __func__, 437 pkt->print()); 438 439 // if this is a write, we should be looking at an uncacheable 440 // write 441 if (pkt->isWrite()) { 442 assert(pkt->req->isUncacheable()); 443 handleUncacheableWriteResp(pkt); 444 return; 445 } 446 447 // we have dealt with any (uncacheable) writes above, from here on 448 // we know we are dealing with an MSHR due to a miss or a prefetch 449 MSHR *mshr = dynamic_cast<MSHR*>(pkt->popSenderState()); 450 assert(mshr); 451 452 if (mshr == noTargetMSHR) { 453 // we always clear at least one target 454 clearBlocked(Blocked_NoTargets); 455 noTargetMSHR = nullptr; 456 } 457 458 // Initial target is used just for stats 459 MSHR::Target *initial_tgt = mshr->getTarget(); 460 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex(); 461 Tick miss_latency = curTick() - initial_tgt->recvTime; 462 463 if (pkt->req->isUncacheable()) { 464 assert(pkt->req->masterId() < system->maxMasters()); 465 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] += 466 miss_latency; 467 } else { 468 assert(pkt->req->masterId() < system->maxMasters()); 469 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] += 470 miss_latency; 471 } 472 473 PacketList writebacks; 474 475 bool is_fill = !mshr->isForward && 476 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp); 477 478 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 479 480 if (is_fill && !is_error) { 481 DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n", 482 pkt->getAddr()); 483 484 blk = handleFill(pkt, blk, writebacks, mshr->allocOnFill()); 485 assert(blk != nullptr); 486 } 487 488 if (blk && blk->isValid() && pkt->isClean() && !pkt->isInvalidate()) { 489 // The block was marked not readable while there was a pending 490 // cache maintenance operation, restore its flag. 491 blk->status |= BlkReadable; 492 493 // This was a cache clean operation (without invalidate) 494 // and we have a copy of the block already. Since there 495 // is no invalidation, we can promote targets that don't 496 // require a writable copy 497 mshr->promoteReadable(); 498 } 499 500 if (blk && blk->isWritable() && !pkt->req->isCacheInvalidate()) { 501 // If at this point the referenced block is writable and the 502 // response is not a cache invalidate, we promote targets that 503 // were deferred as we couldn't guarrantee a writable copy 504 mshr->promoteWritable(); 505 } 506 507 serviceMSHRTargets(mshr, pkt, blk, writebacks); 508 509 if (mshr->promoteDeferredTargets()) { 510 // avoid later read getting stale data while write miss is 511 // outstanding.. see comment in timingAccess() 512 if (blk) { 513 blk->status &= ~BlkReadable; 514 } 515 mshrQueue.markPending(mshr); 516 schedMemSideSendEvent(clockEdge() + pkt->payloadDelay); 517 } else { 518 // while we deallocate an mshr from the queue we still have to 519 // check the isFull condition before and after as we might 520 // have been using the reserved entries already 521 const bool was_full = mshrQueue.isFull(); 522 mshrQueue.deallocate(mshr); 523 if (was_full && !mshrQueue.isFull()) { 524 clearBlocked(Blocked_NoMSHRs); 525 } 526 527 // Request the bus for a prefetch if this deallocation freed enough 528 // MSHRs for a prefetch to take place 529 if (prefetcher && mshrQueue.canPrefetch()) { 530 Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(), 531 clockEdge()); 532 if (next_pf_time != MaxTick) 533 schedMemSideSendEvent(next_pf_time); 534 } 535 } 536 537 // if we used temp block, check to see if its valid and then clear it out 538 if (blk == tempBlock && tempBlock->isValid()) { 539 evictBlock(blk, writebacks); 540 } 541 542 const Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 543 // copy writebacks to write buffer 544 doWritebacks(writebacks, forward_time); 545 546 DPRINTF(CacheVerbose, "%s: Leaving with %s\n", __func__, pkt->print()); 547 delete pkt; 548} 549 550 551Tick 552BaseCache::recvAtomic(PacketPtr pkt) 553{ 554 // We are in atomic mode so we pay just for lookupLatency here. 555 Cycles lat = lookupLatency; 556 557 // follow the same flow as in recvTimingReq, and check if a cache 558 // above us is responding 559 if (pkt->cacheResponding() && !pkt->isClean()) { 560 assert(!pkt->req->isCacheInvalidate()); 561 DPRINTF(Cache, "Cache above responding to %s: not responding\n", 562 pkt->print()); 563 564 // if a cache is responding, and it had the line in Owned 565 // rather than Modified state, we need to invalidate any 566 // copies that are not on the same path to memory 567 assert(pkt->needsWritable() && !pkt->responderHadWritable()); 568 lat += ticksToCycles(memSidePort.sendAtomic(pkt)); 569 570 return lat * clockPeriod(); 571 } 572 573 // should assert here that there are no outstanding MSHRs or 574 // writebacks... that would mean that someone used an atomic 575 // access in timing mode 576 577 CacheBlk *blk = nullptr; 578 PacketList writebacks; 579 bool satisfied = access(pkt, blk, lat, writebacks); 580 581 if (pkt->isClean() && blk && blk->isDirty()) { 582 // A cache clean opearation is looking for a dirty 583 // block. If a dirty block is encountered a WriteClean 584 // will update any copies to the path to the memory 585 // until the point of reference. 586 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n", 587 __func__, pkt->print(), blk->print()); 588 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id); 589 writebacks.push_back(wb_pkt); 590 pkt->setSatisfied(); 591 } 592 593 // handle writebacks resulting from the access here to ensure they 594 // logically precede anything happening below 595 doWritebacksAtomic(writebacks); 596 assert(writebacks.empty()); 597 598 if (!satisfied) { 599 lat += handleAtomicReqMiss(pkt, blk, writebacks); 600 } 601 602 // Note that we don't invoke the prefetcher at all in atomic mode. 603 // It's not clear how to do it properly, particularly for 604 // prefetchers that aggressively generate prefetch candidates and 605 // rely on bandwidth contention to throttle them; these will tend 606 // to pollute the cache in atomic mode since there is no bandwidth 607 // contention. If we ever do want to enable prefetching in atomic 608 // mode, though, this is the place to do it... see timingAccess() 609 // for an example (though we'd want to issue the prefetch(es) 610 // immediately rather than calling requestMemSideBus() as we do 611 // there). 612 613 // do any writebacks resulting from the response handling 614 doWritebacksAtomic(writebacks); 615 616 // if we used temp block, check to see if its valid and if so 617 // clear it out, but only do so after the call to recvAtomic is 618 // finished so that any downstream observers (such as a snoop 619 // filter), first see the fill, and only then see the eviction 620 if (blk == tempBlock && tempBlock->isValid()) { 621 // the atomic CPU calls recvAtomic for fetch and load/store 622 // sequentuially, and we may already have a tempBlock 623 // writeback from the fetch that we have not yet sent 624 if (tempBlockWriteback) { 625 // if that is the case, write the prevoius one back, and 626 // do not schedule any new event 627 writebackTempBlockAtomic(); 628 } else { 629 // the writeback/clean eviction happens after the call to 630 // recvAtomic has finished (but before any successive 631 // calls), so that the response handling from the fill is 632 // allowed to happen first 633 schedule(writebackTempBlockAtomicEvent, curTick()); 634 } 635 636 tempBlockWriteback = evictBlock(blk); 637 } 638 639 if (pkt->needsResponse()) { 640 pkt->makeAtomicResponse(); 641 } 642 643 return lat * clockPeriod(); 644} 645 646void 647BaseCache::functionalAccess(PacketPtr pkt, bool from_cpu_side) 648{ 649 Addr blk_addr = pkt->getBlockAddr(blkSize); 650 bool is_secure = pkt->isSecure(); 651 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 652 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 653 654 pkt->pushLabel(name()); 655 656 CacheBlkPrintWrapper cbpw(blk); 657 658 // Note that just because an L2/L3 has valid data doesn't mean an 659 // L1 doesn't have a more up-to-date modified copy that still 660 // needs to be found. As a result we always update the request if 661 // we have it, but only declare it satisfied if we are the owner. 662 663 // see if we have data at all (owned or otherwise) 664 bool have_data = blk && blk->isValid() 665 && pkt->trySatisfyFunctional(&cbpw, blk_addr, is_secure, blkSize, 666 blk->data); 667 668 // data we have is dirty if marked as such or if we have an 669 // in-service MSHR that is pending a modified line 670 bool have_dirty = 671 have_data && (blk->isDirty() || 672 (mshr && mshr->inService && mshr->isPendingModified())); 673 674 bool done = have_dirty || 675 cpuSidePort.trySatisfyFunctional(pkt) || 676 mshrQueue.trySatisfyFunctional(pkt, blk_addr) || 677 writeBuffer.trySatisfyFunctional(pkt, blk_addr) || 678 memSidePort.trySatisfyFunctional(pkt); 679 680 DPRINTF(CacheVerbose, "%s: %s %s%s%s\n", __func__, pkt->print(), 681 (blk && blk->isValid()) ? "valid " : "", 682 have_data ? "data " : "", done ? "done " : ""); 683 684 // We're leaving the cache, so pop cache->name() label 685 pkt->popLabel(); 686 687 if (done) { 688 pkt->makeResponse(); 689 } else { 690 // if it came as a request from the CPU side then make sure it 691 // continues towards the memory side 692 if (from_cpu_side) { 693 memSidePort.sendFunctional(pkt); 694 } else if (cpuSidePort.isSnooping()) { 695 // if it came from the memory side, it must be a snoop request 696 // and we should only forward it if we are forwarding snoops 697 cpuSidePort.sendFunctionalSnoop(pkt); 698 } 699 } 700} 701 702 703void 704BaseCache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt) 705{ 706 assert(pkt->isRequest()); 707 708 uint64_t overwrite_val; 709 bool overwrite_mem; 710 uint64_t condition_val64; 711 uint32_t condition_val32; 712 713 int offset = pkt->getOffset(blkSize); 714 uint8_t *blk_data = blk->data + offset; 715 716 assert(sizeof(uint64_t) >= pkt->getSize()); 717 718 overwrite_mem = true; 719 // keep a copy of our possible write value, and copy what is at the 720 // memory address into the packet 721 pkt->writeData((uint8_t *)&overwrite_val); 722 pkt->setData(blk_data); 723 724 if (pkt->req->isCondSwap()) { 725 if (pkt->getSize() == sizeof(uint64_t)) { 726 condition_val64 = pkt->req->getExtraData(); 727 overwrite_mem = !std::memcmp(&condition_val64, blk_data, 728 sizeof(uint64_t)); 729 } else if (pkt->getSize() == sizeof(uint32_t)) { 730 condition_val32 = (uint32_t)pkt->req->getExtraData(); 731 overwrite_mem = !std::memcmp(&condition_val32, blk_data, 732 sizeof(uint32_t)); 733 } else 734 panic("Invalid size for conditional read/write\n"); 735 } 736 737 if (overwrite_mem) { 738 std::memcpy(blk_data, &overwrite_val, pkt->getSize()); 739 blk->status |= BlkDirty; 740 } 741} 742 743QueueEntry* 744BaseCache::getNextQueueEntry() 745{ 746 // Check both MSHR queue and write buffer for potential requests, 747 // note that null does not mean there is no request, it could 748 // simply be that it is not ready 749 MSHR *miss_mshr = mshrQueue.getNext(); 750 WriteQueueEntry *wq_entry = writeBuffer.getNext(); 751 752 // If we got a write buffer request ready, first priority is a 753 // full write buffer, otherwise we favour the miss requests 754 if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) { 755 // need to search MSHR queue for conflicting earlier miss. 756 MSHR *conflict_mshr = 757 mshrQueue.findPending(wq_entry->blkAddr, 758 wq_entry->isSecure); 759 760 if (conflict_mshr && conflict_mshr->order < wq_entry->order) { 761 // Service misses in order until conflict is cleared. 762 return conflict_mshr; 763 764 // @todo Note that we ignore the ready time of the conflict here 765 } 766 767 // No conflicts; issue write 768 return wq_entry; 769 } else if (miss_mshr) { 770 // need to check for conflicting earlier writeback 771 WriteQueueEntry *conflict_mshr = 772 writeBuffer.findPending(miss_mshr->blkAddr, 773 miss_mshr->isSecure); 774 if (conflict_mshr) { 775 // not sure why we don't check order here... it was in the 776 // original code but commented out. 777 778 // The only way this happens is if we are 779 // doing a write and we didn't have permissions 780 // then subsequently saw a writeback (owned got evicted) 781 // We need to make sure to perform the writeback first 782 // To preserve the dirty data, then we can issue the write 783 784 // should we return wq_entry here instead? I.e. do we 785 // have to flush writes in order? I don't think so... not 786 // for Alpha anyway. Maybe for x86? 787 return conflict_mshr; 788 789 // @todo Note that we ignore the ready time of the conflict here 790 } 791 792 // No conflicts; issue read 793 return miss_mshr; 794 } 795 796 // fall through... no pending requests. Try a prefetch. 797 assert(!miss_mshr && !wq_entry); 798 if (prefetcher && mshrQueue.canPrefetch()) { 799 // If we have a miss queue slot, we can try a prefetch 800 PacketPtr pkt = prefetcher->getPacket(); 801 if (pkt) { 802 Addr pf_addr = pkt->getBlockAddr(blkSize); 803 if (!tags->findBlock(pf_addr, pkt->isSecure()) && 804 !mshrQueue.findMatch(pf_addr, pkt->isSecure()) && 805 !writeBuffer.findMatch(pf_addr, pkt->isSecure())) { 806 // Update statistic on number of prefetches issued 807 // (hwpf_mshr_misses) 808 assert(pkt->req->masterId() < system->maxMasters()); 809 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 810 811 // allocate an MSHR and return it, note 812 // that we send the packet straight away, so do not 813 // schedule the send 814 return allocateMissBuffer(pkt, curTick(), false); 815 } else { 816 // free the request and packet 817 delete pkt; 818 } 819 } 820 } 821 822 return nullptr; 823} 824 825void 826BaseCache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool, bool) 827{ 828 assert(pkt->isRequest()); 829 830 assert(blk && blk->isValid()); 831 // Occasionally this is not true... if we are a lower-level cache 832 // satisfying a string of Read and ReadEx requests from 833 // upper-level caches, a Read will mark the block as shared but we 834 // can satisfy a following ReadEx anyway since we can rely on the 835 // Read requester(s) to have buffered the ReadEx snoop and to 836 // invalidate their blocks after receiving them. 837 // assert(!pkt->needsWritable() || blk->isWritable()); 838 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize); 839 840 // Check RMW operations first since both isRead() and 841 // isWrite() will be true for them 842 if (pkt->cmd == MemCmd::SwapReq) { 843 if (pkt->isAtomicOp()) { 844 // extract data from cache and save it into the data field in 845 // the packet as a return value from this atomic op 846 847 int offset = tags->extractBlkOffset(pkt->getAddr()); 848 uint8_t *blk_data = blk->data + offset; 849 std::memcpy(pkt->getPtr<uint8_t>(), blk_data, pkt->getSize()); 850 851 // execute AMO operation 852 (*(pkt->getAtomicOp()))(blk_data); 853 854 // set block status to dirty 855 blk->status |= BlkDirty; 856 } else { 857 cmpAndSwap(blk, pkt); 858 } 859 } else if (pkt->isWrite()) { 860 // we have the block in a writable state and can go ahead, 861 // note that the line may be also be considered writable in 862 // downstream caches along the path to memory, but always 863 // Exclusive, and never Modified 864 assert(blk->isWritable()); 865 // Write or WriteLine at the first cache with block in writable state 866 if (blk->checkWrite(pkt)) { 867 pkt->writeDataToBlock(blk->data, blkSize); 868 } 869 // Always mark the line as dirty (and thus transition to the 870 // Modified state) even if we are a failed StoreCond so we 871 // supply data to any snoops that have appended themselves to 872 // this cache before knowing the store will fail. 873 blk->status |= BlkDirty; 874 DPRINTF(CacheVerbose, "%s for %s (write)\n", __func__, pkt->print()); 875 } else if (pkt->isRead()) { 876 if (pkt->isLLSC()) { 877 blk->trackLoadLocked(pkt); 878 } 879 880 // all read responses have a data payload 881 assert(pkt->hasRespData()); 882 pkt->setDataFromBlock(blk->data, blkSize); 883 } else if (pkt->isUpgrade()) { 884 // sanity check 885 assert(!pkt->hasSharers()); 886 887 if (blk->isDirty()) { 888 // we were in the Owned state, and a cache above us that 889 // has the line in Shared state needs to be made aware 890 // that the data it already has is in fact dirty 891 pkt->setCacheResponding(); 892 blk->status &= ~BlkDirty; 893 } 894 } else if (pkt->isClean()) { 895 blk->status &= ~BlkDirty; 896 } else { 897 assert(pkt->isInvalidate()); 898 invalidateBlock(blk); 899 DPRINTF(CacheVerbose, "%s for %s (invalidation)\n", __func__, 900 pkt->print()); 901 } 902} 903 904///////////////////////////////////////////////////// 905// 906// Access path: requests coming in from the CPU side 907// 908///////////////////////////////////////////////////// 909 910bool 911BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, 912 PacketList &writebacks) 913{ 914 // sanity check 915 assert(pkt->isRequest()); 916 917 chatty_assert(!(isReadOnly && pkt->isWrite()), 918 "Should never see a write in a read-only cache %s\n", 919 name()); 920 921 // Here lat is the value passed as parameter to accessBlock() function 922 // that can modify its value. 923 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), lat); 924 925 DPRINTF(Cache, "%s for %s %s\n", __func__, pkt->print(), 926 blk ? "hit " + blk->print() : "miss"); 927 928 if (pkt->req->isCacheMaintenance()) { 929 // A cache maintenance operation is always forwarded to the 930 // memory below even if the block is found in dirty state. 931 932 // We defer any changes to the state of the block until we 933 // create and mark as in service the mshr for the downstream 934 // packet. 935 return false; 936 } 937 938 if (pkt->isEviction()) { 939 // We check for presence of block in above caches before issuing 940 // Writeback or CleanEvict to write buffer. Therefore the only 941 // possible cases can be of a CleanEvict packet coming from above 942 // encountering a Writeback generated in this cache peer cache and 943 // waiting in the write buffer. Cases of upper level peer caches 944 // generating CleanEvict and Writeback or simply CleanEvict and 945 // CleanEvict almost simultaneously will be caught by snoops sent out 946 // by crossbar. 947 WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(), 948 pkt->isSecure()); 949 if (wb_entry) { 950 assert(wb_entry->getNumTargets() == 1); 951 PacketPtr wbPkt = wb_entry->getTarget()->pkt; 952 assert(wbPkt->isWriteback()); 953 954 if (pkt->isCleanEviction()) { 955 // The CleanEvict and WritebackClean snoops into other 956 // peer caches of the same level while traversing the 957 // crossbar. If a copy of the block is found, the 958 // packet is deleted in the crossbar. Hence, none of 959 // the other upper level caches connected to this 960 // cache have the block, so we can clear the 961 // BLOCK_CACHED flag in the Writeback if set and 962 // discard the CleanEvict by returning true. 963 wbPkt->clearBlockCached(); 964 return true; 965 } else { 966 assert(pkt->cmd == MemCmd::WritebackDirty); 967 // Dirty writeback from above trumps our clean 968 // writeback... discard here 969 // Note: markInService will remove entry from writeback buffer. 970 markInService(wb_entry); 971 delete wbPkt; 972 } 973 } 974 } 975 976 // Writeback handling is special case. We can write the block into 977 // the cache without having a writeable copy (or any copy at all). 978 if (pkt->isWriteback()) { 979 assert(blkSize == pkt->getSize()); 980 981 // we could get a clean writeback while we are having 982 // outstanding accesses to a block, do the simple thing for 983 // now and drop the clean writeback so that we do not upset 984 // any ordering/decisions about ownership already taken 985 if (pkt->cmd == MemCmd::WritebackClean && 986 mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) { 987 DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, " 988 "dropping\n", pkt->getAddr()); 989 return true; 990 } 991 992 if (!blk) { 993 // need to do a replacement 994 blk = allocateBlock(pkt, writebacks); 995 if (!blk) { 996 // no replaceable block available: give up, fwd to next level. 997 incMissCount(pkt); 998 return false; 999 } 1000 1001 blk->status |= (BlkValid | BlkReadable); 1002 } 1003 // only mark the block dirty if we got a writeback command, 1004 // and leave it as is for a clean writeback 1005 if (pkt->cmd == MemCmd::WritebackDirty) { 1006 // TODO: the coherent cache can assert(!blk->isDirty()); 1007 blk->status |= BlkDirty; 1008 } 1009 // if the packet does not have sharers, it is passing 1010 // writable, and we got the writeback in Modified or Exclusive 1011 // state, if not we are in the Owned or Shared state 1012 if (!pkt->hasSharers()) { 1013 blk->status |= BlkWritable; 1014 } 1015 // nothing else to do; writeback doesn't expect response 1016 assert(!pkt->needsResponse()); 1017 pkt->writeDataToBlock(blk->data, blkSize); 1018 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 1019 incHitCount(pkt); 1020 // populate the time when the block will be ready to access. 1021 blk->whenReady = clockEdge(fillLatency) + pkt->headerDelay + 1022 pkt->payloadDelay; 1023 return true; 1024 } else if (pkt->cmd == MemCmd::CleanEvict) { 1025 if (blk) { 1026 // Found the block in the tags, need to stop CleanEvict from 1027 // propagating further down the hierarchy. Returning true will 1028 // treat the CleanEvict like a satisfied write request and delete 1029 // it. 1030 return true; 1031 } 1032 // We didn't find the block here, propagate the CleanEvict further 1033 // down the memory hierarchy. Returning false will treat the CleanEvict 1034 // like a Writeback which could not find a replaceable block so has to 1035 // go to next level. 1036 return false; 1037 } else if (pkt->cmd == MemCmd::WriteClean) { 1038 // WriteClean handling is a special case. We can allocate a 1039 // block directly if it doesn't exist and we can update the 1040 // block immediately. The WriteClean transfers the ownership 1041 // of the block as well. 1042 assert(blkSize == pkt->getSize()); 1043 1044 if (!blk) { 1045 if (pkt->writeThrough()) { 1046 // if this is a write through packet, we don't try to 1047 // allocate if the block is not present 1048 return false; 1049 } else { 1050 // a writeback that misses needs to allocate a new block 1051 blk = allocateBlock(pkt, writebacks); 1052 if (!blk) { 1053 // no replaceable block available: give up, fwd to 1054 // next level. 1055 incMissCount(pkt); 1056 return false; 1057 } 1058 1059 blk->status |= (BlkValid | BlkReadable); 1060 } 1061 } 1062 1063 // at this point either this is a writeback or a write-through 1064 // write clean operation and the block is already in this 1065 // cache, we need to update the data and the block flags 1066 assert(blk); 1067 // TODO: the coherent cache can assert(!blk->isDirty()); 1068 if (!pkt->writeThrough()) { 1069 blk->status |= BlkDirty; 1070 } 1071 // nothing else to do; writeback doesn't expect response 1072 assert(!pkt->needsResponse()); 1073 pkt->writeDataToBlock(blk->data, blkSize); 1074 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 1075 1076 incHitCount(pkt); 1077 // populate the time when the block will be ready to access. 1078 blk->whenReady = clockEdge(fillLatency) + pkt->headerDelay + 1079 pkt->payloadDelay; 1080 // if this a write-through packet it will be sent to cache 1081 // below 1082 return !pkt->writeThrough(); 1083 } else if (blk && (pkt->needsWritable() ? blk->isWritable() : 1084 blk->isReadable())) { 1085 // OK to satisfy access 1086 incHitCount(pkt); 1087 satisfyRequest(pkt, blk); 1088 maintainClusivity(pkt->fromCache(), blk); 1089 1090 return true; 1091 } 1092 1093 // Can't satisfy access normally... either no block (blk == nullptr) 1094 // or have block but need writable 1095 1096 incMissCount(pkt); 1097 1098 if (!blk && pkt->isLLSC() && pkt->isWrite()) { 1099 // complete miss on store conditional... just give up now 1100 pkt->req->setExtraData(0); 1101 return true; 1102 } 1103 1104 return false; 1105} 1106 1107void 1108BaseCache::maintainClusivity(bool from_cache, CacheBlk *blk) 1109{ 1110 if (from_cache && blk && blk->isValid() && !blk->isDirty() && 1111 clusivity == Enums::mostly_excl) { 1112 // if we have responded to a cache, and our block is still 1113 // valid, but not dirty, and this cache is mostly exclusive 1114 // with respect to the cache above, drop the block 1115 invalidateBlock(blk); 1116 } 1117} 1118 1119CacheBlk* 1120BaseCache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks, 1121 bool allocate) 1122{ 1123 assert(pkt->isResponse() || pkt->cmd == MemCmd::WriteLineReq); 1124 Addr addr = pkt->getAddr(); 1125 bool is_secure = pkt->isSecure(); 1126#if TRACING_ON 1127 CacheBlk::State old_state = blk ? blk->status : 0; 1128#endif 1129 1130 // When handling a fill, we should have no writes to this line. 1131 assert(addr == pkt->getBlockAddr(blkSize)); 1132 assert(!writeBuffer.findMatch(addr, is_secure)); 1133 1134 if (!blk) { 1135 // better have read new data... 1136 assert(pkt->hasData()); 1137 1138 // only read responses and write-line requests have data; 1139 // note that we don't write the data here for write-line - that 1140 // happens in the subsequent call to satisfyRequest 1141 assert(pkt->isRead() || pkt->cmd == MemCmd::WriteLineReq); 1142 1143 // need to do a replacement if allocating, otherwise we stick 1144 // with the temporary storage 1145 blk = allocate ? allocateBlock(pkt, writebacks) : nullptr; 1146 1147 if (!blk) { 1148 // No replaceable block or a mostly exclusive 1149 // cache... just use temporary storage to complete the 1150 // current request and then get rid of it 1151 assert(!tempBlock->isValid()); 1152 blk = tempBlock; 1153 tempBlock->insert(addr, is_secure); 1154 DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr, 1155 is_secure ? "s" : "ns"); 1156 } 1157 1158 // we should never be overwriting a valid block 1159 assert(!blk->isValid()); 1160 } else { 1161 // existing block... probably an upgrade 1162 assert(regenerateBlkAddr(blk) == addr); 1163 assert(blk->isSecure() == is_secure); 1164 // either we're getting new data or the block should already be valid 1165 assert(pkt->hasData() || blk->isValid()); 1166 // don't clear block status... if block is already dirty we 1167 // don't want to lose that 1168 } 1169 1170 blk->status |= BlkValid | BlkReadable; 1171 1172 // sanity check for whole-line writes, which should always be 1173 // marked as writable as part of the fill, and then later marked 1174 // dirty as part of satisfyRequest 1175 if (pkt->cmd == MemCmd::WriteLineReq) { 1176 assert(!pkt->hasSharers()); 1177 } 1178 1179 // here we deal with setting the appropriate state of the line, 1180 // and we start by looking at the hasSharers flag, and ignore the 1181 // cacheResponding flag (normally signalling dirty data) if the 1182 // packet has sharers, thus the line is never allocated as Owned 1183 // (dirty but not writable), and always ends up being either 1184 // Shared, Exclusive or Modified, see Packet::setCacheResponding 1185 // for more details 1186 if (!pkt->hasSharers()) { 1187 // we could get a writable line from memory (rather than a 1188 // cache) even in a read-only cache, note that we set this bit 1189 // even for a read-only cache, possibly revisit this decision 1190 blk->status |= BlkWritable; 1191 1192 // check if we got this via cache-to-cache transfer (i.e., from a 1193 // cache that had the block in Modified or Owned state) 1194 if (pkt->cacheResponding()) { 1195 // we got the block in Modified state, and invalidated the 1196 // owners copy 1197 blk->status |= BlkDirty; 1198 1199 chatty_assert(!isReadOnly, "Should never see dirty snoop response " 1200 "in read-only cache %s\n", name()); 1201 } 1202 } 1203 1204 DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n", 1205 addr, is_secure ? "s" : "ns", old_state, blk->print()); 1206 1207 // if we got new data, copy it in (checking for a read response 1208 // and a response that has data is the same in the end) 1209 if (pkt->isRead()) { 1210 // sanity checks 1211 assert(pkt->hasData()); 1212 assert(pkt->getSize() == blkSize); 1213 1214 pkt->writeDataToBlock(blk->data, blkSize); 1215 } 1216 // We pay for fillLatency here. 1217 blk->whenReady = clockEdge() + fillLatency * clockPeriod() + 1218 pkt->payloadDelay; 1219 1220 return blk; 1221} 1222 1223CacheBlk* 1224BaseCache::allocateBlock(const PacketPtr pkt, PacketList &writebacks) 1225{ 1226 // Get address 1227 const Addr addr = pkt->getAddr(); 1228 1229 // Get secure bit 1230 const bool is_secure = pkt->isSecure(); 1231 1232 // Find replacement victim 1233 std::vector<CacheBlk*> evict_blks; 1234 CacheBlk *victim = tags->findVictim(addr, is_secure, evict_blks); 1235 1236 // It is valid to return nullptr if there is no victim 1237 if (!victim) 1238 return nullptr; 1239 1240 // Check for transient state allocations. If any of the entries listed 1241 // for eviction has a transient state, the allocation fails 1242 for (const auto& blk : evict_blks) { 1243 if (blk->isValid()) { 1244 Addr repl_addr = regenerateBlkAddr(blk); 1245 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure()); 1246 if (repl_mshr) { 1247 // must be an outstanding upgrade or clean request 1248 // on a block we're about to replace... 1249 assert((!blk->isWritable() && repl_mshr->needsWritable()) || 1250 repl_mshr->isCleaning()); 1251 1252 // too hard to replace block with transient state 1253 // allocation failed, block not inserted 1254 return nullptr; 1255 } 1256 } 1257 } 1258 1259 // The victim will be replaced by a new entry, so increase the replacement 1260 // counter if a valid block is being replaced 1261 if (victim->isValid()) { 1262 DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx " 1263 "(%s): %s\n", regenerateBlkAddr(victim), 1264 victim->isSecure() ? "s" : "ns", 1265 addr, is_secure ? "s" : "ns", 1266 victim->isDirty() ? "writeback" : "clean"); 1267 1268 replacements++; 1269 } 1270 1271 // Evict valid blocks associated to this victim block 1272 for (const auto& blk : evict_blks) { 1273 if (blk->isValid()) { 1274 if (blk->wasPrefetched()) { 1275 unusedPrefetches++; 1276 } 1277 1278 evictBlock(blk, writebacks); 1279 } 1280 } 1281 1282 // Insert new block at victimized entry 1283 tags->insertBlock(addr, is_secure, pkt->req->masterId(), 1284 pkt->req->taskId(), victim); 1285 1286 return victim; 1287} 1288 1289void 1290BaseCache::invalidateBlock(CacheBlk *blk) 1291{ 1292 if (blk != tempBlock) 1293 tags->invalidate(blk); 1294 blk->invalidate(); 1295} 1296 1297PacketPtr 1298BaseCache::writebackBlk(CacheBlk *blk) 1299{ 1300 chatty_assert(!isReadOnly || writebackClean, 1301 "Writeback from read-only cache"); 1302 assert(blk && blk->isValid() && (blk->isDirty() || writebackClean)); 1303 1304 writebacks[Request::wbMasterId]++; 1305 1306 RequestPtr req = std::make_shared<Request>( 1307 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId); 1308 1309 if (blk->isSecure()) 1310 req->setFlags(Request::SECURE); 1311 1312 req->taskId(blk->task_id); 1313 1314 PacketPtr pkt = 1315 new Packet(req, blk->isDirty() ? 1316 MemCmd::WritebackDirty : MemCmd::WritebackClean); 1317 1318 DPRINTF(Cache, "Create Writeback %s writable: %d, dirty: %d\n", 1319 pkt->print(), blk->isWritable(), blk->isDirty()); 1320 1321 if (blk->isWritable()) { 1322 // not asserting shared means we pass the block in modified 1323 // state, mark our own block non-writeable 1324 blk->status &= ~BlkWritable; 1325 } else { 1326 // we are in the Owned state, tell the receiver 1327 pkt->setHasSharers(); 1328 } 1329 1330 // make sure the block is not marked dirty 1331 blk->status &= ~BlkDirty; 1332 1333 pkt->allocate(); 1334 pkt->setDataFromBlock(blk->data, blkSize); 1335 1336 return pkt; 1337} 1338 1339PacketPtr 1340BaseCache::writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id) 1341{ 1342 RequestPtr req = std::make_shared<Request>( 1343 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId); 1344 1345 if (blk->isSecure()) { 1346 req->setFlags(Request::SECURE); 1347 } 1348 req->taskId(blk->task_id); 1349 1350 PacketPtr pkt = new Packet(req, MemCmd::WriteClean, blkSize, id); 1351 1352 if (dest) { 1353 req->setFlags(dest); 1354 pkt->setWriteThrough(); 1355 } 1356 1357 DPRINTF(Cache, "Create %s writable: %d, dirty: %d\n", pkt->print(), 1358 blk->isWritable(), blk->isDirty()); 1359 1360 if (blk->isWritable()) { 1361 // not asserting shared means we pass the block in modified 1362 // state, mark our own block non-writeable 1363 blk->status &= ~BlkWritable; 1364 } else { 1365 // we are in the Owned state, tell the receiver 1366 pkt->setHasSharers(); 1367 } 1368 1369 // make sure the block is not marked dirty 1370 blk->status &= ~BlkDirty; 1371 1372 pkt->allocate(); 1373 pkt->setDataFromBlock(blk->data, blkSize); 1374 1375 return pkt; 1376} 1377 1378 1379void 1380BaseCache::memWriteback() 1381{ 1382 tags->forEachBlk([this](CacheBlk &blk) { writebackVisitor(blk); }); 1383} 1384 1385void 1386BaseCache::memInvalidate() 1387{ 1388 tags->forEachBlk([this](CacheBlk &blk) { invalidateVisitor(blk); }); 1389} 1390 1391bool 1392BaseCache::isDirty() const 1393{ 1394 return tags->anyBlk([](CacheBlk &blk) { return blk.isDirty(); }); 1395} 1396 1397void 1398BaseCache::writebackVisitor(CacheBlk &blk) 1399{ 1400 if (blk.isDirty()) { 1401 assert(blk.isValid()); 1402 1403 RequestPtr request = std::make_shared<Request>( 1404 regenerateBlkAddr(&blk), blkSize, 0, Request::funcMasterId); 1405 1406 request->taskId(blk.task_id); 1407 if (blk.isSecure()) { 1408 request->setFlags(Request::SECURE); 1409 } 1410 1411 Packet packet(request, MemCmd::WriteReq); 1412 packet.dataStatic(blk.data); 1413 1414 memSidePort.sendFunctional(&packet); 1415 1416 blk.status &= ~BlkDirty; 1417 } 1418} 1419 1420void 1421BaseCache::invalidateVisitor(CacheBlk &blk) 1422{ 1423 if (blk.isDirty()) 1424 warn_once("Invalidating dirty cache lines. " \ 1425 "Expect things to break.\n"); 1426 1427 if (blk.isValid()) { 1428 assert(!blk.isDirty()); 1429 invalidateBlock(&blk); 1430 } 1431} 1432 1433Tick 1434BaseCache::nextQueueReadyTime() const 1435{ 1436 Tick nextReady = std::min(mshrQueue.nextReadyTime(), 1437 writeBuffer.nextReadyTime()); 1438 1439 // Don't signal prefetch ready time if no MSHRs available 1440 // Will signal once enoguh MSHRs are deallocated 1441 if (prefetcher && mshrQueue.canPrefetch()) { 1442 nextReady = std::min(nextReady, 1443 prefetcher->nextPrefetchReadyTime()); 1444 } 1445 1446 return nextReady; 1447} 1448 1449 1450bool 1451BaseCache::sendMSHRQueuePacket(MSHR* mshr) 1452{ 1453 assert(mshr); 1454 1455 // use request from 1st target 1456 PacketPtr tgt_pkt = mshr->getTarget()->pkt; 1457 1458 DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print()); 1459 1460 CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure); 1461 1462 // either a prefetch that is not present upstream, or a normal 1463 // MSHR request, proceed to get the packet to send downstream 1464 PacketPtr pkt = createMissPacket(tgt_pkt, blk, mshr->needsWritable()); 1465 1466 mshr->isForward = (pkt == nullptr); 1467 1468 if (mshr->isForward) { 1469 // not a cache block request, but a response is expected 1470 // make copy of current packet to forward, keep current 1471 // copy for response handling 1472 pkt = new Packet(tgt_pkt, false, true); 1473 assert(!pkt->isWrite()); 1474 } 1475 1476 // play it safe and append (rather than set) the sender state, 1477 // as forwarded packets may already have existing state 1478 pkt->pushSenderState(mshr); 1479 1480 if (pkt->isClean() && blk && blk->isDirty()) { 1481 // A cache clean opearation is looking for a dirty block. Mark 1482 // the packet so that the destination xbar can determine that 1483 // there will be a follow-up write packet as well. 1484 pkt->setSatisfied(); 1485 } 1486 1487 if (!memSidePort.sendTimingReq(pkt)) { 1488 // we are awaiting a retry, but we 1489 // delete the packet and will be creating a new packet 1490 // when we get the opportunity 1491 delete pkt; 1492 1493 // note that we have now masked any requestBus and 1494 // schedSendEvent (we will wait for a retry before 1495 // doing anything), and this is so even if we do not 1496 // care about this packet and might override it before 1497 // it gets retried 1498 return true; 1499 } else { 1500 // As part of the call to sendTimingReq the packet is 1501 // forwarded to all neighbouring caches (and any caches 1502 // above them) as a snoop. Thus at this point we know if 1503 // any of the neighbouring caches are responding, and if 1504 // so, we know it is dirty, and we can determine if it is 1505 // being passed as Modified, making our MSHR the ordering 1506 // point 1507 bool pending_modified_resp = !pkt->hasSharers() && 1508 pkt->cacheResponding(); 1509 markInService(mshr, pending_modified_resp); 1510 1511 if (pkt->isClean() && blk && blk->isDirty()) { 1512 // A cache clean opearation is looking for a dirty 1513 // block. If a dirty block is encountered a WriteClean 1514 // will update any copies to the path to the memory 1515 // until the point of reference. 1516 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n", 1517 __func__, pkt->print(), blk->print()); 1518 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), 1519 pkt->id); 1520 PacketList writebacks; 1521 writebacks.push_back(wb_pkt); 1522 doWritebacks(writebacks, 0); 1523 } 1524 1525 return false; 1526 } 1527} 1528 1529bool 1530BaseCache::sendWriteQueuePacket(WriteQueueEntry* wq_entry) 1531{ 1532 assert(wq_entry); 1533 1534 // always a single target for write queue entries 1535 PacketPtr tgt_pkt = wq_entry->getTarget()->pkt; 1536 1537 DPRINTF(Cache, "%s: write %s\n", __func__, tgt_pkt->print()); 1538 1539 // forward as is, both for evictions and uncacheable writes 1540 if (!memSidePort.sendTimingReq(tgt_pkt)) { 1541 // note that we have now masked any requestBus and 1542 // schedSendEvent (we will wait for a retry before 1543 // doing anything), and this is so even if we do not 1544 // care about this packet and might override it before 1545 // it gets retried 1546 return true; 1547 } else { 1548 markInService(wq_entry); 1549 return false; 1550 } 1551} 1552 1553void 1554BaseCache::serialize(CheckpointOut &cp) const 1555{ 1556 bool dirty(isDirty()); 1557 1558 if (dirty) { 1559 warn("*** The cache still contains dirty data. ***\n"); 1560 warn(" Make sure to drain the system using the correct flags.\n"); 1561 warn(" This checkpoint will not restore correctly " \ 1562 "and dirty data in the cache will be lost!\n"); 1563 } 1564 1565 // Since we don't checkpoint the data in the cache, any dirty data 1566 // will be lost when restoring from a checkpoint of a system that 1567 // wasn't drained properly. Flag the checkpoint as invalid if the 1568 // cache contains dirty data. 1569 bool bad_checkpoint(dirty); 1570 SERIALIZE_SCALAR(bad_checkpoint); 1571} 1572 1573void 1574BaseCache::unserialize(CheckpointIn &cp) 1575{ 1576 bool bad_checkpoint; 1577 UNSERIALIZE_SCALAR(bad_checkpoint); 1578 if (bad_checkpoint) { 1579 fatal("Restoring from checkpoints with dirty caches is not " 1580 "supported in the classic memory system. Please remove any " 1581 "caches or drain them properly before taking checkpoints.\n"); 1582 } 1583} 1584 1585void 1586BaseCache::regStats() 1587{ 1588 MemObject::regStats(); 1589 1590 using namespace Stats; 1591 1592 // Hit statistics 1593 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1594 MemCmd cmd(access_idx); 1595 const string &cstr = cmd.toString(); 1596 1597 hits[access_idx] 1598 .init(system->maxMasters()) 1599 .name(name() + "." + cstr + "_hits") 1600 .desc("number of " + cstr + " hits") 1601 .flags(total | nozero | nonan) 1602 ; 1603 for (int i = 0; i < system->maxMasters(); i++) { 1604 hits[access_idx].subname(i, system->getMasterName(i)); 1605 } 1606 } 1607 1608// These macros make it easier to sum the right subset of commands and 1609// to change the subset of commands that are considered "demand" vs 1610// "non-demand" 1611#define SUM_DEMAND(s) \ 1612 (s[MemCmd::ReadReq] + s[MemCmd::WriteReq] + s[MemCmd::WriteLineReq] + \ 1613 s[MemCmd::ReadExReq] + s[MemCmd::ReadCleanReq] + s[MemCmd::ReadSharedReq]) 1614 1615// should writebacks be included here? prior code was inconsistent... 1616#define SUM_NON_DEMAND(s) \ 1617 (s[MemCmd::SoftPFReq] + s[MemCmd::HardPFReq]) 1618 1619 demandHits 1620 .name(name() + ".demand_hits") 1621 .desc("number of demand (read+write) hits") 1622 .flags(total | nozero | nonan) 1623 ; 1624 demandHits = SUM_DEMAND(hits); 1625 for (int i = 0; i < system->maxMasters(); i++) { 1626 demandHits.subname(i, system->getMasterName(i)); 1627 } 1628 1629 overallHits 1630 .name(name() + ".overall_hits") 1631 .desc("number of overall hits") 1632 .flags(total | nozero | nonan) 1633 ; 1634 overallHits = demandHits + SUM_NON_DEMAND(hits); 1635 for (int i = 0; i < system->maxMasters(); i++) { 1636 overallHits.subname(i, system->getMasterName(i)); 1637 } 1638 1639 // Miss statistics 1640 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1641 MemCmd cmd(access_idx); 1642 const string &cstr = cmd.toString(); 1643 1644 misses[access_idx] 1645 .init(system->maxMasters()) 1646 .name(name() + "." + cstr + "_misses") 1647 .desc("number of " + cstr + " misses") 1648 .flags(total | nozero | nonan) 1649 ; 1650 for (int i = 0; i < system->maxMasters(); i++) { 1651 misses[access_idx].subname(i, system->getMasterName(i)); 1652 } 1653 } 1654 1655 demandMisses 1656 .name(name() + ".demand_misses") 1657 .desc("number of demand (read+write) misses") 1658 .flags(total | nozero | nonan) 1659 ; 1660 demandMisses = SUM_DEMAND(misses); 1661 for (int i = 0; i < system->maxMasters(); i++) { 1662 demandMisses.subname(i, system->getMasterName(i)); 1663 } 1664 1665 overallMisses 1666 .name(name() + ".overall_misses") 1667 .desc("number of overall misses") 1668 .flags(total | nozero | nonan) 1669 ; 1670 overallMisses = demandMisses + SUM_NON_DEMAND(misses); 1671 for (int i = 0; i < system->maxMasters(); i++) { 1672 overallMisses.subname(i, system->getMasterName(i)); 1673 } 1674 1675 // Miss latency statistics 1676 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1677 MemCmd cmd(access_idx); 1678 const string &cstr = cmd.toString(); 1679 1680 missLatency[access_idx] 1681 .init(system->maxMasters()) 1682 .name(name() + "." + cstr + "_miss_latency") 1683 .desc("number of " + cstr + " miss cycles") 1684 .flags(total | nozero | nonan) 1685 ; 1686 for (int i = 0; i < system->maxMasters(); i++) { 1687 missLatency[access_idx].subname(i, system->getMasterName(i)); 1688 } 1689 } 1690 1691 demandMissLatency 1692 .name(name() + ".demand_miss_latency") 1693 .desc("number of demand (read+write) miss cycles") 1694 .flags(total | nozero | nonan) 1695 ; 1696 demandMissLatency = SUM_DEMAND(missLatency); 1697 for (int i = 0; i < system->maxMasters(); i++) { 1698 demandMissLatency.subname(i, system->getMasterName(i)); 1699 } 1700 1701 overallMissLatency 1702 .name(name() + ".overall_miss_latency") 1703 .desc("number of overall miss cycles") 1704 .flags(total | nozero | nonan) 1705 ; 1706 overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency); 1707 for (int i = 0; i < system->maxMasters(); i++) { 1708 overallMissLatency.subname(i, system->getMasterName(i)); 1709 } 1710 1711 // access formulas 1712 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1713 MemCmd cmd(access_idx); 1714 const string &cstr = cmd.toString(); 1715 1716 accesses[access_idx] 1717 .name(name() + "." + cstr + "_accesses") 1718 .desc("number of " + cstr + " accesses(hits+misses)") 1719 .flags(total | nozero | nonan) 1720 ; 1721 accesses[access_idx] = hits[access_idx] + misses[access_idx]; 1722 1723 for (int i = 0; i < system->maxMasters(); i++) { 1724 accesses[access_idx].subname(i, system->getMasterName(i)); 1725 } 1726 } 1727 1728 demandAccesses 1729 .name(name() + ".demand_accesses") 1730 .desc("number of demand (read+write) accesses") 1731 .flags(total | nozero | nonan) 1732 ; 1733 demandAccesses = demandHits + demandMisses; 1734 for (int i = 0; i < system->maxMasters(); i++) { 1735 demandAccesses.subname(i, system->getMasterName(i)); 1736 } 1737 1738 overallAccesses 1739 .name(name() + ".overall_accesses") 1740 .desc("number of overall (read+write) accesses") 1741 .flags(total | nozero | nonan) 1742 ; 1743 overallAccesses = overallHits + overallMisses; 1744 for (int i = 0; i < system->maxMasters(); i++) { 1745 overallAccesses.subname(i, system->getMasterName(i)); 1746 } 1747 1748 // miss rate formulas 1749 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1750 MemCmd cmd(access_idx); 1751 const string &cstr = cmd.toString(); 1752 1753 missRate[access_idx] 1754 .name(name() + "." + cstr + "_miss_rate") 1755 .desc("miss rate for " + cstr + " accesses") 1756 .flags(total | nozero | nonan) 1757 ; 1758 missRate[access_idx] = misses[access_idx] / accesses[access_idx]; 1759 1760 for (int i = 0; i < system->maxMasters(); i++) { 1761 missRate[access_idx].subname(i, system->getMasterName(i)); 1762 } 1763 } 1764 1765 demandMissRate 1766 .name(name() + ".demand_miss_rate") 1767 .desc("miss rate for demand accesses") 1768 .flags(total | nozero | nonan) 1769 ; 1770 demandMissRate = demandMisses / demandAccesses; 1771 for (int i = 0; i < system->maxMasters(); i++) { 1772 demandMissRate.subname(i, system->getMasterName(i)); 1773 } 1774 1775 overallMissRate 1776 .name(name() + ".overall_miss_rate") 1777 .desc("miss rate for overall accesses") 1778 .flags(total | nozero | nonan) 1779 ; 1780 overallMissRate = overallMisses / overallAccesses; 1781 for (int i = 0; i < system->maxMasters(); i++) { 1782 overallMissRate.subname(i, system->getMasterName(i)); 1783 } 1784 1785 // miss latency formulas 1786 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1787 MemCmd cmd(access_idx); 1788 const string &cstr = cmd.toString(); 1789 1790 avgMissLatency[access_idx] 1791 .name(name() + "." + cstr + "_avg_miss_latency") 1792 .desc("average " + cstr + " miss latency") 1793 .flags(total | nozero | nonan) 1794 ; 1795 avgMissLatency[access_idx] = 1796 missLatency[access_idx] / misses[access_idx]; 1797 1798 for (int i = 0; i < system->maxMasters(); i++) { 1799 avgMissLatency[access_idx].subname(i, system->getMasterName(i)); 1800 } 1801 } 1802 1803 demandAvgMissLatency 1804 .name(name() + ".demand_avg_miss_latency") 1805 .desc("average overall miss latency") 1806 .flags(total | nozero | nonan) 1807 ; 1808 demandAvgMissLatency = demandMissLatency / demandMisses; 1809 for (int i = 0; i < system->maxMasters(); i++) { 1810 demandAvgMissLatency.subname(i, system->getMasterName(i)); 1811 } 1812 1813 overallAvgMissLatency 1814 .name(name() + ".overall_avg_miss_latency") 1815 .desc("average overall miss latency") 1816 .flags(total | nozero | nonan) 1817 ; 1818 overallAvgMissLatency = overallMissLatency / overallMisses; 1819 for (int i = 0; i < system->maxMasters(); i++) { 1820 overallAvgMissLatency.subname(i, system->getMasterName(i)); 1821 } 1822 1823 blocked_cycles.init(NUM_BLOCKED_CAUSES); 1824 blocked_cycles 1825 .name(name() + ".blocked_cycles") 1826 .desc("number of cycles access was blocked") 1827 .subname(Blocked_NoMSHRs, "no_mshrs") 1828 .subname(Blocked_NoTargets, "no_targets") 1829 ; 1830 1831 1832 blocked_causes.init(NUM_BLOCKED_CAUSES); 1833 blocked_causes 1834 .name(name() + ".blocked") 1835 .desc("number of cycles access was blocked") 1836 .subname(Blocked_NoMSHRs, "no_mshrs") 1837 .subname(Blocked_NoTargets, "no_targets") 1838 ; 1839 1840 avg_blocked 1841 .name(name() + ".avg_blocked_cycles") 1842 .desc("average number of cycles each access was blocked") 1843 .subname(Blocked_NoMSHRs, "no_mshrs") 1844 .subname(Blocked_NoTargets, "no_targets") 1845 ; 1846 1847 avg_blocked = blocked_cycles / blocked_causes; 1848 1849 unusedPrefetches 1850 .name(name() + ".unused_prefetches") 1851 .desc("number of HardPF blocks evicted w/o reference") 1852 .flags(nozero) 1853 ; 1854 1855 writebacks 1856 .init(system->maxMasters()) 1857 .name(name() + ".writebacks") 1858 .desc("number of writebacks") 1859 .flags(total | nozero | nonan) 1860 ; 1861 for (int i = 0; i < system->maxMasters(); i++) { 1862 writebacks.subname(i, system->getMasterName(i)); 1863 } 1864 1865 // MSHR statistics 1866 // MSHR hit statistics 1867 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1868 MemCmd cmd(access_idx); 1869 const string &cstr = cmd.toString(); 1870 1871 mshr_hits[access_idx] 1872 .init(system->maxMasters()) 1873 .name(name() + "." + cstr + "_mshr_hits") 1874 .desc("number of " + cstr + " MSHR hits") 1875 .flags(total | nozero | nonan) 1876 ; 1877 for (int i = 0; i < system->maxMasters(); i++) { 1878 mshr_hits[access_idx].subname(i, system->getMasterName(i)); 1879 } 1880 } 1881 1882 demandMshrHits 1883 .name(name() + ".demand_mshr_hits") 1884 .desc("number of demand (read+write) MSHR hits") 1885 .flags(total | nozero | nonan) 1886 ; 1887 demandMshrHits = SUM_DEMAND(mshr_hits); 1888 for (int i = 0; i < system->maxMasters(); i++) { 1889 demandMshrHits.subname(i, system->getMasterName(i)); 1890 } 1891 1892 overallMshrHits 1893 .name(name() + ".overall_mshr_hits") 1894 .desc("number of overall MSHR hits") 1895 .flags(total | nozero | nonan) 1896 ; 1897 overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshr_hits); 1898 for (int i = 0; i < system->maxMasters(); i++) { 1899 overallMshrHits.subname(i, system->getMasterName(i)); 1900 } 1901 1902 // MSHR miss statistics 1903 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1904 MemCmd cmd(access_idx); 1905 const string &cstr = cmd.toString(); 1906 1907 mshr_misses[access_idx] 1908 .init(system->maxMasters()) 1909 .name(name() + "." + cstr + "_mshr_misses") 1910 .desc("number of " + cstr + " MSHR misses") 1911 .flags(total | nozero | nonan) 1912 ; 1913 for (int i = 0; i < system->maxMasters(); i++) { 1914 mshr_misses[access_idx].subname(i, system->getMasterName(i)); 1915 } 1916 } 1917 1918 demandMshrMisses 1919 .name(name() + ".demand_mshr_misses") 1920 .desc("number of demand (read+write) MSHR misses") 1921 .flags(total | nozero | nonan) 1922 ; 1923 demandMshrMisses = SUM_DEMAND(mshr_misses); 1924 for (int i = 0; i < system->maxMasters(); i++) { 1925 demandMshrMisses.subname(i, system->getMasterName(i)); 1926 } 1927 1928 overallMshrMisses 1929 .name(name() + ".overall_mshr_misses") 1930 .desc("number of overall MSHR misses") 1931 .flags(total | nozero | nonan) 1932 ; 1933 overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshr_misses); 1934 for (int i = 0; i < system->maxMasters(); i++) { 1935 overallMshrMisses.subname(i, system->getMasterName(i)); 1936 } 1937 1938 // MSHR miss latency statistics 1939 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1940 MemCmd cmd(access_idx); 1941 const string &cstr = cmd.toString(); 1942 1943 mshr_miss_latency[access_idx] 1944 .init(system->maxMasters()) 1945 .name(name() + "." + cstr + "_mshr_miss_latency") 1946 .desc("number of " + cstr + " MSHR miss cycles") 1947 .flags(total | nozero | nonan) 1948 ; 1949 for (int i = 0; i < system->maxMasters(); i++) { 1950 mshr_miss_latency[access_idx].subname(i, system->getMasterName(i)); 1951 } 1952 } 1953 1954 demandMshrMissLatency 1955 .name(name() + ".demand_mshr_miss_latency") 1956 .desc("number of demand (read+write) MSHR miss cycles") 1957 .flags(total | nozero | nonan) 1958 ; 1959 demandMshrMissLatency = SUM_DEMAND(mshr_miss_latency); 1960 for (int i = 0; i < system->maxMasters(); i++) { 1961 demandMshrMissLatency.subname(i, system->getMasterName(i)); 1962 } 1963 1964 overallMshrMissLatency 1965 .name(name() + ".overall_mshr_miss_latency") 1966 .desc("number of overall MSHR miss cycles") 1967 .flags(total | nozero | nonan) 1968 ; 1969 overallMshrMissLatency = 1970 demandMshrMissLatency + SUM_NON_DEMAND(mshr_miss_latency); 1971 for (int i = 0; i < system->maxMasters(); i++) { 1972 overallMshrMissLatency.subname(i, system->getMasterName(i)); 1973 } 1974 1975 // MSHR uncacheable statistics 1976 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1977 MemCmd cmd(access_idx); 1978 const string &cstr = cmd.toString(); 1979 1980 mshr_uncacheable[access_idx] 1981 .init(system->maxMasters()) 1982 .name(name() + "." + cstr + "_mshr_uncacheable") 1983 .desc("number of " + cstr + " MSHR uncacheable") 1984 .flags(total | nozero | nonan) 1985 ; 1986 for (int i = 0; i < system->maxMasters(); i++) { 1987 mshr_uncacheable[access_idx].subname(i, system->getMasterName(i)); 1988 } 1989 } 1990 1991 overallMshrUncacheable 1992 .name(name() + ".overall_mshr_uncacheable_misses") 1993 .desc("number of overall MSHR uncacheable misses") 1994 .flags(total | nozero | nonan) 1995 ; 1996 overallMshrUncacheable = 1997 SUM_DEMAND(mshr_uncacheable) + SUM_NON_DEMAND(mshr_uncacheable); 1998 for (int i = 0; i < system->maxMasters(); i++) { 1999 overallMshrUncacheable.subname(i, system->getMasterName(i)); 2000 } 2001 2002 // MSHR miss latency statistics 2003 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2004 MemCmd cmd(access_idx); 2005 const string &cstr = cmd.toString(); 2006 2007 mshr_uncacheable_lat[access_idx] 2008 .init(system->maxMasters()) 2009 .name(name() + "." + cstr + "_mshr_uncacheable_latency") 2010 .desc("number of " + cstr + " MSHR uncacheable cycles") 2011 .flags(total | nozero | nonan) 2012 ; 2013 for (int i = 0; i < system->maxMasters(); i++) { 2014 mshr_uncacheable_lat[access_idx].subname( 2015 i, system->getMasterName(i)); 2016 } 2017 } 2018 2019 overallMshrUncacheableLatency 2020 .name(name() + ".overall_mshr_uncacheable_latency") 2021 .desc("number of overall MSHR uncacheable cycles") 2022 .flags(total | nozero | nonan) 2023 ; 2024 overallMshrUncacheableLatency = 2025 SUM_DEMAND(mshr_uncacheable_lat) + 2026 SUM_NON_DEMAND(mshr_uncacheable_lat); 2027 for (int i = 0; i < system->maxMasters(); i++) { 2028 overallMshrUncacheableLatency.subname(i, system->getMasterName(i)); 2029 } 2030 2031#if 0 2032 // MSHR access formulas 2033 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2034 MemCmd cmd(access_idx); 2035 const string &cstr = cmd.toString(); 2036 2037 mshrAccesses[access_idx] 2038 .name(name() + "." + cstr + "_mshr_accesses") 2039 .desc("number of " + cstr + " mshr accesses(hits+misses)") 2040 .flags(total | nozero | nonan) 2041 ; 2042 mshrAccesses[access_idx] = 2043 mshr_hits[access_idx] + mshr_misses[access_idx] 2044 + mshr_uncacheable[access_idx]; 2045 } 2046 2047 demandMshrAccesses 2048 .name(name() + ".demand_mshr_accesses") 2049 .desc("number of demand (read+write) mshr accesses") 2050 .flags(total | nozero | nonan) 2051 ; 2052 demandMshrAccesses = demandMshrHits + demandMshrMisses; 2053 2054 overallMshrAccesses 2055 .name(name() + ".overall_mshr_accesses") 2056 .desc("number of overall (read+write) mshr accesses") 2057 .flags(total | nozero | nonan) 2058 ; 2059 overallMshrAccesses = overallMshrHits + overallMshrMisses 2060 + overallMshrUncacheable; 2061#endif 2062 2063 // MSHR miss rate formulas 2064 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2065 MemCmd cmd(access_idx); 2066 const string &cstr = cmd.toString(); 2067 2068 mshrMissRate[access_idx] 2069 .name(name() + "." + cstr + "_mshr_miss_rate") 2070 .desc("mshr miss rate for " + cstr + " accesses") 2071 .flags(total | nozero | nonan) 2072 ; 2073 mshrMissRate[access_idx] = 2074 mshr_misses[access_idx] / accesses[access_idx]; 2075 2076 for (int i = 0; i < system->maxMasters(); i++) { 2077 mshrMissRate[access_idx].subname(i, system->getMasterName(i)); 2078 } 2079 } 2080 2081 demandMshrMissRate 2082 .name(name() + ".demand_mshr_miss_rate") 2083 .desc("mshr miss rate for demand accesses") 2084 .flags(total | nozero | nonan) 2085 ; 2086 demandMshrMissRate = demandMshrMisses / demandAccesses; 2087 for (int i = 0; i < system->maxMasters(); i++) { 2088 demandMshrMissRate.subname(i, system->getMasterName(i)); 2089 } 2090 2091 overallMshrMissRate 2092 .name(name() + ".overall_mshr_miss_rate") 2093 .desc("mshr miss rate for overall accesses") 2094 .flags(total | nozero | nonan) 2095 ; 2096 overallMshrMissRate = overallMshrMisses / overallAccesses; 2097 for (int i = 0; i < system->maxMasters(); i++) { 2098 overallMshrMissRate.subname(i, system->getMasterName(i)); 2099 } 2100 2101 // mshrMiss latency formulas 2102 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2103 MemCmd cmd(access_idx); 2104 const string &cstr = cmd.toString(); 2105 2106 avgMshrMissLatency[access_idx] 2107 .name(name() + "." + cstr + "_avg_mshr_miss_latency") 2108 .desc("average " + cstr + " mshr miss latency") 2109 .flags(total | nozero | nonan) 2110 ; 2111 avgMshrMissLatency[access_idx] = 2112 mshr_miss_latency[access_idx] / mshr_misses[access_idx]; 2113 2114 for (int i = 0; i < system->maxMasters(); i++) { 2115 avgMshrMissLatency[access_idx].subname( 2116 i, system->getMasterName(i)); 2117 } 2118 } 2119 2120 demandAvgMshrMissLatency 2121 .name(name() + ".demand_avg_mshr_miss_latency") 2122 .desc("average overall mshr miss latency") 2123 .flags(total | nozero | nonan) 2124 ; 2125 demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses; 2126 for (int i = 0; i < system->maxMasters(); i++) { 2127 demandAvgMshrMissLatency.subname(i, system->getMasterName(i)); 2128 } 2129 2130 overallAvgMshrMissLatency 2131 .name(name() + ".overall_avg_mshr_miss_latency") 2132 .desc("average overall mshr miss latency") 2133 .flags(total | nozero | nonan) 2134 ; 2135 overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses; 2136 for (int i = 0; i < system->maxMasters(); i++) { 2137 overallAvgMshrMissLatency.subname(i, system->getMasterName(i)); 2138 } 2139 2140 // mshrUncacheable latency formulas 2141 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2142 MemCmd cmd(access_idx); 2143 const string &cstr = cmd.toString(); 2144 2145 avgMshrUncacheableLatency[access_idx] 2146 .name(name() + "." + cstr + "_avg_mshr_uncacheable_latency") 2147 .desc("average " + cstr + " mshr uncacheable latency") 2148 .flags(total | nozero | nonan) 2149 ; 2150 avgMshrUncacheableLatency[access_idx] = 2151 mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx]; 2152 2153 for (int i = 0; i < system->maxMasters(); i++) { 2154 avgMshrUncacheableLatency[access_idx].subname( 2155 i, system->getMasterName(i)); 2156 } 2157 } 2158 2159 overallAvgMshrUncacheableLatency 2160 .name(name() + ".overall_avg_mshr_uncacheable_latency") 2161 .desc("average overall mshr uncacheable latency") 2162 .flags(total | nozero | nonan) 2163 ; 2164 overallAvgMshrUncacheableLatency = 2165 overallMshrUncacheableLatency / overallMshrUncacheable; 2166 for (int i = 0; i < system->maxMasters(); i++) { 2167 overallAvgMshrUncacheableLatency.subname(i, system->getMasterName(i)); 2168 } 2169 2170 replacements 2171 .name(name() + ".replacements") 2172 .desc("number of replacements") 2173 ; 2174} 2175 2176/////////////// 2177// 2178// CpuSidePort 2179// 2180/////////////// 2181bool 2182BaseCache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt) 2183{ 2184 // Snoops shouldn't happen when bypassing caches 2185 assert(!cache->system->bypassCaches()); 2186 2187 assert(pkt->isResponse()); 2188 2189 // Express snoop responses from master to slave, e.g., from L1 to L2 2190 cache->recvTimingSnoopResp(pkt); 2191 return true; 2192} 2193 2194 2195bool 2196BaseCache::CpuSidePort::tryTiming(PacketPtr pkt) 2197{ 2198 if (cache->system->bypassCaches() || pkt->isExpressSnoop()) { 2199 // always let express snoop packets through even if blocked 2200 return true; 2201 } else if (blocked || mustSendRetry) { 2202 // either already committed to send a retry, or blocked 2203 mustSendRetry = true; 2204 return false; 2205 } 2206 mustSendRetry = false; 2207 return true; 2208} 2209 2210bool 2211BaseCache::CpuSidePort::recvTimingReq(PacketPtr pkt) 2212{ 2213 assert(pkt->isRequest()); 2214 2215 if (cache->system->bypassCaches()) { 2216 // Just forward the packet if caches are disabled. 2217 // @todo This should really enqueue the packet rather 2218 bool M5_VAR_USED success = cache->memSidePort.sendTimingReq(pkt); 2219 assert(success); 2220 return true; 2221 } else if (tryTiming(pkt)) { 2222 cache->recvTimingReq(pkt); 2223 return true; 2224 } 2225 return false; 2226} 2227 2228Tick 2229BaseCache::CpuSidePort::recvAtomic(PacketPtr pkt) 2230{ 2231 if (cache->system->bypassCaches()) { 2232 // Forward the request if the system is in cache bypass mode. 2233 return cache->memSidePort.sendAtomic(pkt); 2234 } else { 2235 return cache->recvAtomic(pkt); 2236 } 2237} 2238 2239void 2240BaseCache::CpuSidePort::recvFunctional(PacketPtr pkt) 2241{ 2242 if (cache->system->bypassCaches()) { 2243 // The cache should be flushed if we are in cache bypass mode, 2244 // so we don't need to check if we need to update anything. 2245 cache->memSidePort.sendFunctional(pkt); 2246 return; 2247 } 2248 2249 // functional request 2250 cache->functionalAccess(pkt, true); 2251} 2252 2253AddrRangeList 2254BaseCache::CpuSidePort::getAddrRanges() const 2255{ 2256 return cache->getAddrRanges(); 2257} 2258 2259 2260BaseCache:: 2261CpuSidePort::CpuSidePort(const std::string &_name, BaseCache *_cache, 2262 const std::string &_label) 2263 : CacheSlavePort(_name, _cache, _label), cache(_cache) 2264{ 2265} 2266 2267/////////////// 2268// 2269// MemSidePort 2270// 2271/////////////// 2272bool 2273BaseCache::MemSidePort::recvTimingResp(PacketPtr pkt) 2274{ 2275 cache->recvTimingResp(pkt); 2276 return true; 2277} 2278 2279// Express snooping requests to memside port 2280void 2281BaseCache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt) 2282{ 2283 // Snoops shouldn't happen when bypassing caches 2284 assert(!cache->system->bypassCaches()); 2285 2286 // handle snooping requests 2287 cache->recvTimingSnoopReq(pkt); 2288} 2289 2290Tick 2291BaseCache::MemSidePort::recvAtomicSnoop(PacketPtr pkt) 2292{ 2293 // Snoops shouldn't happen when bypassing caches 2294 assert(!cache->system->bypassCaches()); 2295 2296 return cache->recvAtomicSnoop(pkt); 2297} 2298 2299void 2300BaseCache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt) 2301{ 2302 // Snoops shouldn't happen when bypassing caches 2303 assert(!cache->system->bypassCaches()); 2304 2305 // functional snoop (note that in contrast to atomic we don't have 2306 // a specific functionalSnoop method, as they have the same 2307 // behaviour regardless) 2308 cache->functionalAccess(pkt, false); 2309} 2310 2311void 2312BaseCache::CacheReqPacketQueue::sendDeferredPacket() 2313{ 2314 // sanity check 2315 assert(!waitingOnRetry); 2316 2317 // there should never be any deferred request packets in the 2318 // queue, instead we resly on the cache to provide the packets 2319 // from the MSHR queue or write queue 2320 assert(deferredPacketReadyTime() == MaxTick); 2321 2322 // check for request packets (requests & writebacks) 2323 QueueEntry* entry = cache.getNextQueueEntry(); 2324 2325 if (!entry) { 2326 // can happen if e.g. we attempt a writeback and fail, but 2327 // before the retry, the writeback is eliminated because 2328 // we snoop another cache's ReadEx. 2329 } else { 2330 // let our snoop responses go first if there are responses to 2331 // the same addresses 2332 if (checkConflictingSnoop(entry->blkAddr)) { 2333 return; 2334 } 2335 waitingOnRetry = entry->sendPacket(cache); 2336 } 2337 2338 // if we succeeded and are not waiting for a retry, schedule the 2339 // next send considering when the next queue is ready, note that 2340 // snoop responses have their own packet queue and thus schedule 2341 // their own events 2342 if (!waitingOnRetry) { 2343 schedSendEvent(cache.nextQueueReadyTime()); 2344 } 2345} 2346 2347BaseCache::MemSidePort::MemSidePort(const std::string &_name, 2348 BaseCache *_cache, 2349 const std::string &_label) 2350 : CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue), 2351 _reqQueue(*_cache, *this, _snoopRespQueue, _label), 2352 _snoopRespQueue(*_cache, *this, _label), cache(_cache) 2353{ 2354} 2355