base.cc revision 12724:4f6fac3191d2
1/* 2 * Copyright (c) 2012-2013, 2018 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2003-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Erik Hallnor 41 * Nikos Nikoleris 42 */ 43 44/** 45 * @file 46 * Definition of BaseCache functions. 47 */ 48 49#include "mem/cache/base.hh" 50 51#include "base/compiler.hh" 52#include "base/logging.hh" 53#include "debug/Cache.hh" 54#include "debug/CachePort.hh" 55#include "debug/CacheVerbose.hh" 56#include "mem/cache/mshr.hh" 57#include "mem/cache/prefetch/base.hh" 58#include "mem/cache/queue_entry.hh" 59#include "params/BaseCache.hh" 60#include "sim/core.hh" 61 62class BaseMasterPort; 63class BaseSlavePort; 64 65using namespace std; 66 67BaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name, 68 BaseCache *_cache, 69 const std::string &_label) 70 : QueuedSlavePort(_name, _cache, queue), queue(*_cache, *this, _label), 71 blocked(false), mustSendRetry(false), 72 sendRetryEvent([this]{ processSendRetry(); }, _name) 73{ 74} 75 76BaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size) 77 : MemObject(p), 78 cpuSidePort (p->name + ".cpu_side", this, "CpuSidePort"), 79 memSidePort(p->name + ".mem_side", this, "MemSidePort"), 80 mshrQueue("MSHRs", p->mshrs, 0, p->demand_mshr_reserve), // see below 81 writeBuffer("write buffer", p->write_buffers, p->mshrs), // see below 82 tags(p->tags), 83 prefetcher(p->prefetcher), 84 prefetchOnAccess(p->prefetch_on_access), 85 writebackClean(p->writeback_clean), 86 tempBlockWriteback(nullptr), 87 writebackTempBlockAtomicEvent([this]{ writebackTempBlockAtomic(); }, 88 name(), false, 89 EventBase::Delayed_Writeback_Pri), 90 blkSize(blk_size), 91 lookupLatency(p->tag_latency), 92 dataLatency(p->data_latency), 93 forwardLatency(p->tag_latency), 94 fillLatency(p->data_latency), 95 responseLatency(p->response_latency), 96 numTarget(p->tgts_per_mshr), 97 forwardSnoops(true), 98 clusivity(p->clusivity), 99 isReadOnly(p->is_read_only), 100 blocked(0), 101 order(0), 102 noTargetMSHR(nullptr), 103 missCount(p->max_miss_count), 104 addrRanges(p->addr_ranges.begin(), p->addr_ranges.end()), 105 system(p->system) 106{ 107 // the MSHR queue has no reserve entries as we check the MSHR 108 // queue on every single allocation, whereas the write queue has 109 // as many reserve entries as we have MSHRs, since every MSHR may 110 // eventually require a writeback, and we do not check the write 111 // buffer before committing to an MSHR 112 113 // forward snoops is overridden in init() once we can query 114 // whether the connected master is actually snooping or not 115 116 tempBlock = new CacheBlk(); 117 tempBlock->data = new uint8_t[blkSize]; 118 119 tags->setCache(this); 120 if (prefetcher) 121 prefetcher->setCache(this); 122} 123 124BaseCache::~BaseCache() 125{ 126 delete [] tempBlock->data; 127 delete tempBlock; 128} 129 130void 131BaseCache::CacheSlavePort::setBlocked() 132{ 133 assert(!blocked); 134 DPRINTF(CachePort, "Port is blocking new requests\n"); 135 blocked = true; 136 // if we already scheduled a retry in this cycle, but it has not yet 137 // happened, cancel it 138 if (sendRetryEvent.scheduled()) { 139 owner.deschedule(sendRetryEvent); 140 DPRINTF(CachePort, "Port descheduled retry\n"); 141 mustSendRetry = true; 142 } 143} 144 145void 146BaseCache::CacheSlavePort::clearBlocked() 147{ 148 assert(blocked); 149 DPRINTF(CachePort, "Port is accepting new requests\n"); 150 blocked = false; 151 if (mustSendRetry) { 152 // @TODO: need to find a better time (next cycle?) 153 owner.schedule(sendRetryEvent, curTick() + 1); 154 } 155} 156 157void 158BaseCache::CacheSlavePort::processSendRetry() 159{ 160 DPRINTF(CachePort, "Port is sending retry\n"); 161 162 // reset the flag and call retry 163 mustSendRetry = false; 164 sendRetryReq(); 165} 166 167void 168BaseCache::init() 169{ 170 if (!cpuSidePort.isConnected() || !memSidePort.isConnected()) 171 fatal("Cache ports on %s are not connected\n", name()); 172 cpuSidePort.sendRangeChange(); 173 forwardSnoops = cpuSidePort.isSnooping(); 174} 175 176BaseMasterPort & 177BaseCache::getMasterPort(const std::string &if_name, PortID idx) 178{ 179 if (if_name == "mem_side") { 180 return memSidePort; 181 } else { 182 return MemObject::getMasterPort(if_name, idx); 183 } 184} 185 186BaseSlavePort & 187BaseCache::getSlavePort(const std::string &if_name, PortID idx) 188{ 189 if (if_name == "cpu_side") { 190 return cpuSidePort; 191 } else { 192 return MemObject::getSlavePort(if_name, idx); 193 } 194} 195 196bool 197BaseCache::inRange(Addr addr) const 198{ 199 for (const auto& r : addrRanges) { 200 if (r.contains(addr)) { 201 return true; 202 } 203 } 204 return false; 205} 206 207void 208BaseCache::handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time) 209{ 210 if (pkt->needsResponse()) { 211 pkt->makeTimingResponse(); 212 // @todo: Make someone pay for this 213 pkt->headerDelay = pkt->payloadDelay = 0; 214 215 // In this case we are considering request_time that takes 216 // into account the delay of the xbar, if any, and just 217 // lat, neglecting responseLatency, modelling hit latency 218 // just as lookupLatency or or the value of lat overriden 219 // by access(), that calls accessBlock() function. 220 cpuSidePort.schedTimingResp(pkt, request_time, true); 221 } else { 222 DPRINTF(Cache, "%s satisfied %s, no response needed\n", __func__, 223 pkt->print()); 224 225 // queue the packet for deletion, as the sending cache is 226 // still relying on it; if the block is found in access(), 227 // CleanEvict and Writeback messages will be deleted 228 // here as well 229 pendingDelete.reset(pkt); 230 } 231} 232 233void 234BaseCache::handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk, 235 Tick forward_time, Tick request_time) 236{ 237 if (mshr) { 238 /// MSHR hit 239 /// @note writebacks will be checked in getNextMSHR() 240 /// for any conflicting requests to the same block 241 242 //@todo remove hw_pf here 243 244 // Coalesce unless it was a software prefetch (see above). 245 if (pkt) { 246 assert(!pkt->isWriteback()); 247 // CleanEvicts corresponding to blocks which have 248 // outstanding requests in MSHRs are simply sunk here 249 if (pkt->cmd == MemCmd::CleanEvict) { 250 pendingDelete.reset(pkt); 251 } else if (pkt->cmd == MemCmd::WriteClean) { 252 // A WriteClean should never coalesce with any 253 // outstanding cache maintenance requests. 254 255 // We use forward_time here because there is an 256 // uncached memory write, forwarded to WriteBuffer. 257 allocateWriteBuffer(pkt, forward_time); 258 } else { 259 DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__, 260 pkt->print()); 261 262 assert(pkt->req->masterId() < system->maxMasters()); 263 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++; 264 265 // We use forward_time here because it is the same 266 // considering new targets. We have multiple 267 // requests for the same address here. It 268 // specifies the latency to allocate an internal 269 // buffer and to schedule an event to the queued 270 // port and also takes into account the additional 271 // delay of the xbar. 272 mshr->allocateTarget(pkt, forward_time, order++, 273 allocOnFill(pkt->cmd)); 274 if (mshr->getNumTargets() == numTarget) { 275 noTargetMSHR = mshr; 276 setBlocked(Blocked_NoTargets); 277 // need to be careful with this... if this mshr isn't 278 // ready yet (i.e. time > curTick()), we don't want to 279 // move it ahead of mshrs that are ready 280 // mshrQueue.moveToFront(mshr); 281 } 282 } 283 } 284 } else { 285 // no MSHR 286 assert(pkt->req->masterId() < system->maxMasters()); 287 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 288 289 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean) { 290 // We use forward_time here because there is an 291 // writeback or writeclean, forwarded to WriteBuffer. 292 allocateWriteBuffer(pkt, forward_time); 293 } else { 294 if (blk && blk->isValid()) { 295 // If we have a write miss to a valid block, we 296 // need to mark the block non-readable. Otherwise 297 // if we allow reads while there's an outstanding 298 // write miss, the read could return stale data 299 // out of the cache block... a more aggressive 300 // system could detect the overlap (if any) and 301 // forward data out of the MSHRs, but we don't do 302 // that yet. Note that we do need to leave the 303 // block valid so that it stays in the cache, in 304 // case we get an upgrade response (and hence no 305 // new data) when the write miss completes. 306 // As long as CPUs do proper store/load forwarding 307 // internally, and have a sufficiently weak memory 308 // model, this is probably unnecessary, but at some 309 // point it must have seemed like we needed it... 310 assert((pkt->needsWritable() && !blk->isWritable()) || 311 pkt->req->isCacheMaintenance()); 312 blk->status &= ~BlkReadable; 313 } 314 // Here we are using forward_time, modelling the latency of 315 // a miss (outbound) just as forwardLatency, neglecting the 316 // lookupLatency component. 317 allocateMissBuffer(pkt, forward_time); 318 } 319 } 320} 321 322void 323BaseCache::recvTimingReq(PacketPtr pkt) 324{ 325 // anything that is merely forwarded pays for the forward latency and 326 // the delay provided by the crossbar 327 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 328 329 // We use lookupLatency here because it is used to specify the latency 330 // to access. 331 Cycles lat = lookupLatency; 332 CacheBlk *blk = nullptr; 333 bool satisfied = false; 334 { 335 PacketList writebacks; 336 // Note that lat is passed by reference here. The function 337 // access() calls accessBlock() which can modify lat value. 338 satisfied = access(pkt, blk, lat, writebacks); 339 340 // copy writebacks to write buffer here to ensure they logically 341 // proceed anything happening below 342 doWritebacks(writebacks, forward_time); 343 } 344 345 // Here we charge the headerDelay that takes into account the latencies 346 // of the bus, if the packet comes from it. 347 // The latency charged it is just lat that is the value of lookupLatency 348 // modified by access() function, or if not just lookupLatency. 349 // In case of a hit we are neglecting response latency. 350 // In case of a miss we are neglecting forward latency. 351 Tick request_time = clockEdge(lat) + pkt->headerDelay; 352 // Here we reset the timing of the packet. 353 pkt->headerDelay = pkt->payloadDelay = 0; 354 // track time of availability of next prefetch, if any 355 Tick next_pf_time = MaxTick; 356 357 if (satisfied) { 358 // if need to notify the prefetcher we have to do it before 359 // anything else as later handleTimingReqHit might turn the 360 // packet in a response 361 if (prefetcher && 362 (prefetchOnAccess || (blk && blk->wasPrefetched()))) { 363 if (blk) 364 blk->status &= ~BlkHWPrefetched; 365 366 // Don't notify on SWPrefetch 367 if (!pkt->cmd.isSWPrefetch()) { 368 assert(!pkt->req->isCacheMaintenance()); 369 next_pf_time = prefetcher->notify(pkt); 370 } 371 } 372 373 handleTimingReqHit(pkt, blk, request_time); 374 } else { 375 handleTimingReqMiss(pkt, blk, forward_time, request_time); 376 377 // We should call the prefetcher reguardless if the request is 378 // satisfied or not, reguardless if the request is in the MSHR 379 // or not. The request could be a ReadReq hit, but still not 380 // satisfied (potentially because of a prior write to the same 381 // cache line. So, even when not satisfied, there is an MSHR 382 // already allocated for this, we need to let the prefetcher 383 // know about the request 384 385 // Don't notify prefetcher on SWPrefetch or cache maintenance 386 // operations 387 if (prefetcher && pkt && 388 !pkt->cmd.isSWPrefetch() && 389 !pkt->req->isCacheMaintenance()) { 390 next_pf_time = prefetcher->notify(pkt); 391 } 392 } 393 394 if (next_pf_time != MaxTick) { 395 schedMemSideSendEvent(next_pf_time); 396 } 397} 398 399void 400BaseCache::handleUncacheableWriteResp(PacketPtr pkt) 401{ 402 Tick completion_time = clockEdge(responseLatency) + 403 pkt->headerDelay + pkt->payloadDelay; 404 405 // Reset the bus additional time as it is now accounted for 406 pkt->headerDelay = pkt->payloadDelay = 0; 407 408 cpuSidePort.schedTimingResp(pkt, completion_time, true); 409} 410 411void 412BaseCache::recvTimingResp(PacketPtr pkt) 413{ 414 assert(pkt->isResponse()); 415 416 // all header delay should be paid for by the crossbar, unless 417 // this is a prefetch response from above 418 panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp, 419 "%s saw a non-zero packet delay\n", name()); 420 421 const bool is_error = pkt->isError(); 422 423 if (is_error) { 424 DPRINTF(Cache, "%s: Cache received %s with error\n", __func__, 425 pkt->print()); 426 } 427 428 DPRINTF(Cache, "%s: Handling response %s\n", __func__, 429 pkt->print()); 430 431 // if this is a write, we should be looking at an uncacheable 432 // write 433 if (pkt->isWrite()) { 434 assert(pkt->req->isUncacheable()); 435 handleUncacheableWriteResp(pkt); 436 return; 437 } 438 439 // we have dealt with any (uncacheable) writes above, from here on 440 // we know we are dealing with an MSHR due to a miss or a prefetch 441 MSHR *mshr = dynamic_cast<MSHR*>(pkt->popSenderState()); 442 assert(mshr); 443 444 if (mshr == noTargetMSHR) { 445 // we always clear at least one target 446 clearBlocked(Blocked_NoTargets); 447 noTargetMSHR = nullptr; 448 } 449 450 // Initial target is used just for stats 451 MSHR::Target *initial_tgt = mshr->getTarget(); 452 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex(); 453 Tick miss_latency = curTick() - initial_tgt->recvTime; 454 455 if (pkt->req->isUncacheable()) { 456 assert(pkt->req->masterId() < system->maxMasters()); 457 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] += 458 miss_latency; 459 } else { 460 assert(pkt->req->masterId() < system->maxMasters()); 461 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] += 462 miss_latency; 463 } 464 465 PacketList writebacks; 466 467 bool is_fill = !mshr->isForward && 468 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp); 469 470 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 471 472 if (is_fill && !is_error) { 473 DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n", 474 pkt->getAddr()); 475 476 blk = handleFill(pkt, blk, writebacks, mshr->allocOnFill()); 477 assert(blk != nullptr); 478 } 479 480 if (blk && blk->isValid() && pkt->isClean() && !pkt->isInvalidate()) { 481 // The block was marked not readable while there was a pending 482 // cache maintenance operation, restore its flag. 483 blk->status |= BlkReadable; 484 } 485 486 if (blk && blk->isWritable() && !pkt->req->isCacheInvalidate()) { 487 // If at this point the referenced block is writable and the 488 // response is not a cache invalidate, we promote targets that 489 // were deferred as we couldn't guarrantee a writable copy 490 mshr->promoteWritable(); 491 } 492 493 serviceMSHRTargets(mshr, pkt, blk, writebacks); 494 495 if (mshr->promoteDeferredTargets()) { 496 // avoid later read getting stale data while write miss is 497 // outstanding.. see comment in timingAccess() 498 if (blk) { 499 blk->status &= ~BlkReadable; 500 } 501 mshrQueue.markPending(mshr); 502 schedMemSideSendEvent(clockEdge() + pkt->payloadDelay); 503 } else { 504 // while we deallocate an mshr from the queue we still have to 505 // check the isFull condition before and after as we might 506 // have been using the reserved entries already 507 const bool was_full = mshrQueue.isFull(); 508 mshrQueue.deallocate(mshr); 509 if (was_full && !mshrQueue.isFull()) { 510 clearBlocked(Blocked_NoMSHRs); 511 } 512 513 // Request the bus for a prefetch if this deallocation freed enough 514 // MSHRs for a prefetch to take place 515 if (prefetcher && mshrQueue.canPrefetch()) { 516 Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(), 517 clockEdge()); 518 if (next_pf_time != MaxTick) 519 schedMemSideSendEvent(next_pf_time); 520 } 521 } 522 523 // if we used temp block, check to see if its valid and then clear it out 524 if (blk == tempBlock && tempBlock->isValid()) { 525 evictBlock(blk, writebacks); 526 } 527 528 const Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 529 // copy writebacks to write buffer 530 doWritebacks(writebacks, forward_time); 531 532 DPRINTF(CacheVerbose, "%s: Leaving with %s\n", __func__, pkt->print()); 533 delete pkt; 534} 535 536 537Tick 538BaseCache::recvAtomic(PacketPtr pkt) 539{ 540 // We are in atomic mode so we pay just for lookupLatency here. 541 Cycles lat = lookupLatency; 542 543 // follow the same flow as in recvTimingReq, and check if a cache 544 // above us is responding 545 if (pkt->cacheResponding() && !pkt->isClean()) { 546 assert(!pkt->req->isCacheInvalidate()); 547 DPRINTF(Cache, "Cache above responding to %s: not responding\n", 548 pkt->print()); 549 550 // if a cache is responding, and it had the line in Owned 551 // rather than Modified state, we need to invalidate any 552 // copies that are not on the same path to memory 553 assert(pkt->needsWritable() && !pkt->responderHadWritable()); 554 lat += ticksToCycles(memSidePort.sendAtomic(pkt)); 555 556 return lat * clockPeriod(); 557 } 558 559 // should assert here that there are no outstanding MSHRs or 560 // writebacks... that would mean that someone used an atomic 561 // access in timing mode 562 563 CacheBlk *blk = nullptr; 564 PacketList writebacks; 565 bool satisfied = access(pkt, blk, lat, writebacks); 566 567 if (pkt->isClean() && blk && blk->isDirty()) { 568 // A cache clean opearation is looking for a dirty 569 // block. If a dirty block is encountered a WriteClean 570 // will update any copies to the path to the memory 571 // until the point of reference. 572 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n", 573 __func__, pkt->print(), blk->print()); 574 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id); 575 writebacks.push_back(wb_pkt); 576 pkt->setSatisfied(); 577 } 578 579 // handle writebacks resulting from the access here to ensure they 580 // logically proceed anything happening below 581 doWritebacksAtomic(writebacks); 582 assert(writebacks.empty()); 583 584 if (!satisfied) { 585 lat += handleAtomicReqMiss(pkt, blk, writebacks); 586 } 587 588 // Note that we don't invoke the prefetcher at all in atomic mode. 589 // It's not clear how to do it properly, particularly for 590 // prefetchers that aggressively generate prefetch candidates and 591 // rely on bandwidth contention to throttle them; these will tend 592 // to pollute the cache in atomic mode since there is no bandwidth 593 // contention. If we ever do want to enable prefetching in atomic 594 // mode, though, this is the place to do it... see timingAccess() 595 // for an example (though we'd want to issue the prefetch(es) 596 // immediately rather than calling requestMemSideBus() as we do 597 // there). 598 599 // do any writebacks resulting from the response handling 600 doWritebacksAtomic(writebacks); 601 602 // if we used temp block, check to see if its valid and if so 603 // clear it out, but only do so after the call to recvAtomic is 604 // finished so that any downstream observers (such as a snoop 605 // filter), first see the fill, and only then see the eviction 606 if (blk == tempBlock && tempBlock->isValid()) { 607 // the atomic CPU calls recvAtomic for fetch and load/store 608 // sequentuially, and we may already have a tempBlock 609 // writeback from the fetch that we have not yet sent 610 if (tempBlockWriteback) { 611 // if that is the case, write the prevoius one back, and 612 // do not schedule any new event 613 writebackTempBlockAtomic(); 614 } else { 615 // the writeback/clean eviction happens after the call to 616 // recvAtomic has finished (but before any successive 617 // calls), so that the response handling from the fill is 618 // allowed to happen first 619 schedule(writebackTempBlockAtomicEvent, curTick()); 620 } 621 622 tempBlockWriteback = evictBlock(blk); 623 } 624 625 if (pkt->needsResponse()) { 626 pkt->makeAtomicResponse(); 627 } 628 629 return lat * clockPeriod(); 630} 631 632void 633BaseCache::functionalAccess(PacketPtr pkt, bool from_cpu_side) 634{ 635 if (system->bypassCaches()) { 636 // Packets from the memory side are snoop request and 637 // shouldn't happen in bypass mode. 638 assert(from_cpu_side); 639 640 // The cache should be flushed if we are in cache bypass mode, 641 // so we don't need to check if we need to update anything. 642 memSidePort.sendFunctional(pkt); 643 return; 644 } 645 646 Addr blk_addr = pkt->getBlockAddr(blkSize); 647 bool is_secure = pkt->isSecure(); 648 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 649 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 650 651 pkt->pushLabel(name()); 652 653 CacheBlkPrintWrapper cbpw(blk); 654 655 // Note that just because an L2/L3 has valid data doesn't mean an 656 // L1 doesn't have a more up-to-date modified copy that still 657 // needs to be found. As a result we always update the request if 658 // we have it, but only declare it satisfied if we are the owner. 659 660 // see if we have data at all (owned or otherwise) 661 bool have_data = blk && blk->isValid() 662 && pkt->checkFunctional(&cbpw, blk_addr, is_secure, blkSize, 663 blk->data); 664 665 // data we have is dirty if marked as such or if we have an 666 // in-service MSHR that is pending a modified line 667 bool have_dirty = 668 have_data && (blk->isDirty() || 669 (mshr && mshr->inService && mshr->isPendingModified())); 670 671 bool done = have_dirty || 672 cpuSidePort.checkFunctional(pkt) || 673 mshrQueue.checkFunctional(pkt, blk_addr) || 674 writeBuffer.checkFunctional(pkt, blk_addr) || 675 memSidePort.checkFunctional(pkt); 676 677 DPRINTF(CacheVerbose, "%s: %s %s%s%s\n", __func__, pkt->print(), 678 (blk && blk->isValid()) ? "valid " : "", 679 have_data ? "data " : "", done ? "done " : ""); 680 681 // We're leaving the cache, so pop cache->name() label 682 pkt->popLabel(); 683 684 if (done) { 685 pkt->makeResponse(); 686 } else { 687 // if it came as a request from the CPU side then make sure it 688 // continues towards the memory side 689 if (from_cpu_side) { 690 memSidePort.sendFunctional(pkt); 691 } else if (cpuSidePort.isSnooping()) { 692 // if it came from the memory side, it must be a snoop request 693 // and we should only forward it if we are forwarding snoops 694 cpuSidePort.sendFunctionalSnoop(pkt); 695 } 696 } 697} 698 699 700void 701BaseCache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt) 702{ 703 assert(pkt->isRequest()); 704 705 uint64_t overwrite_val; 706 bool overwrite_mem; 707 uint64_t condition_val64; 708 uint32_t condition_val32; 709 710 int offset = pkt->getOffset(blkSize); 711 uint8_t *blk_data = blk->data + offset; 712 713 assert(sizeof(uint64_t) >= pkt->getSize()); 714 715 overwrite_mem = true; 716 // keep a copy of our possible write value, and copy what is at the 717 // memory address into the packet 718 pkt->writeData((uint8_t *)&overwrite_val); 719 pkt->setData(blk_data); 720 721 if (pkt->req->isCondSwap()) { 722 if (pkt->getSize() == sizeof(uint64_t)) { 723 condition_val64 = pkt->req->getExtraData(); 724 overwrite_mem = !std::memcmp(&condition_val64, blk_data, 725 sizeof(uint64_t)); 726 } else if (pkt->getSize() == sizeof(uint32_t)) { 727 condition_val32 = (uint32_t)pkt->req->getExtraData(); 728 overwrite_mem = !std::memcmp(&condition_val32, blk_data, 729 sizeof(uint32_t)); 730 } else 731 panic("Invalid size for conditional read/write\n"); 732 } 733 734 if (overwrite_mem) { 735 std::memcpy(blk_data, &overwrite_val, pkt->getSize()); 736 blk->status |= BlkDirty; 737 } 738} 739 740QueueEntry* 741BaseCache::getNextQueueEntry() 742{ 743 // Check both MSHR queue and write buffer for potential requests, 744 // note that null does not mean there is no request, it could 745 // simply be that it is not ready 746 MSHR *miss_mshr = mshrQueue.getNext(); 747 WriteQueueEntry *wq_entry = writeBuffer.getNext(); 748 749 // If we got a write buffer request ready, first priority is a 750 // full write buffer, otherwise we favour the miss requests 751 if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) { 752 // need to search MSHR queue for conflicting earlier miss. 753 MSHR *conflict_mshr = 754 mshrQueue.findPending(wq_entry->blkAddr, 755 wq_entry->isSecure); 756 757 if (conflict_mshr && conflict_mshr->order < wq_entry->order) { 758 // Service misses in order until conflict is cleared. 759 return conflict_mshr; 760 761 // @todo Note that we ignore the ready time of the conflict here 762 } 763 764 // No conflicts; issue write 765 return wq_entry; 766 } else if (miss_mshr) { 767 // need to check for conflicting earlier writeback 768 WriteQueueEntry *conflict_mshr = 769 writeBuffer.findPending(miss_mshr->blkAddr, 770 miss_mshr->isSecure); 771 if (conflict_mshr) { 772 // not sure why we don't check order here... it was in the 773 // original code but commented out. 774 775 // The only way this happens is if we are 776 // doing a write and we didn't have permissions 777 // then subsequently saw a writeback (owned got evicted) 778 // We need to make sure to perform the writeback first 779 // To preserve the dirty data, then we can issue the write 780 781 // should we return wq_entry here instead? I.e. do we 782 // have to flush writes in order? I don't think so... not 783 // for Alpha anyway. Maybe for x86? 784 return conflict_mshr; 785 786 // @todo Note that we ignore the ready time of the conflict here 787 } 788 789 // No conflicts; issue read 790 return miss_mshr; 791 } 792 793 // fall through... no pending requests. Try a prefetch. 794 assert(!miss_mshr && !wq_entry); 795 if (prefetcher && mshrQueue.canPrefetch()) { 796 // If we have a miss queue slot, we can try a prefetch 797 PacketPtr pkt = prefetcher->getPacket(); 798 if (pkt) { 799 Addr pf_addr = pkt->getBlockAddr(blkSize); 800 if (!tags->findBlock(pf_addr, pkt->isSecure()) && 801 !mshrQueue.findMatch(pf_addr, pkt->isSecure()) && 802 !writeBuffer.findMatch(pf_addr, pkt->isSecure())) { 803 // Update statistic on number of prefetches issued 804 // (hwpf_mshr_misses) 805 assert(pkt->req->masterId() < system->maxMasters()); 806 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 807 808 // allocate an MSHR and return it, note 809 // that we send the packet straight away, so do not 810 // schedule the send 811 return allocateMissBuffer(pkt, curTick(), false); 812 } else { 813 // free the request and packet 814 delete pkt->req; 815 delete pkt; 816 } 817 } 818 } 819 820 return nullptr; 821} 822 823void 824BaseCache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool, bool) 825{ 826 assert(pkt->isRequest()); 827 828 assert(blk && blk->isValid()); 829 // Occasionally this is not true... if we are a lower-level cache 830 // satisfying a string of Read and ReadEx requests from 831 // upper-level caches, a Read will mark the block as shared but we 832 // can satisfy a following ReadEx anyway since we can rely on the 833 // Read requester(s) to have buffered the ReadEx snoop and to 834 // invalidate their blocks after receiving them. 835 // assert(!pkt->needsWritable() || blk->isWritable()); 836 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize); 837 838 // Check RMW operations first since both isRead() and 839 // isWrite() will be true for them 840 if (pkt->cmd == MemCmd::SwapReq) { 841 cmpAndSwap(blk, pkt); 842 } else if (pkt->isWrite()) { 843 // we have the block in a writable state and can go ahead, 844 // note that the line may be also be considered writable in 845 // downstream caches along the path to memory, but always 846 // Exclusive, and never Modified 847 assert(blk->isWritable()); 848 // Write or WriteLine at the first cache with block in writable state 849 if (blk->checkWrite(pkt)) { 850 pkt->writeDataToBlock(blk->data, blkSize); 851 } 852 // Always mark the line as dirty (and thus transition to the 853 // Modified state) even if we are a failed StoreCond so we 854 // supply data to any snoops that have appended themselves to 855 // this cache before knowing the store will fail. 856 blk->status |= BlkDirty; 857 DPRINTF(CacheVerbose, "%s for %s (write)\n", __func__, pkt->print()); 858 } else if (pkt->isRead()) { 859 if (pkt->isLLSC()) { 860 blk->trackLoadLocked(pkt); 861 } 862 863 // all read responses have a data payload 864 assert(pkt->hasRespData()); 865 pkt->setDataFromBlock(blk->data, blkSize); 866 } else if (pkt->isUpgrade()) { 867 // sanity check 868 assert(!pkt->hasSharers()); 869 870 if (blk->isDirty()) { 871 // we were in the Owned state, and a cache above us that 872 // has the line in Shared state needs to be made aware 873 // that the data it already has is in fact dirty 874 pkt->setCacheResponding(); 875 blk->status &= ~BlkDirty; 876 } 877 } else { 878 assert(pkt->isInvalidate()); 879 invalidateBlock(blk); 880 DPRINTF(CacheVerbose, "%s for %s (invalidation)\n", __func__, 881 pkt->print()); 882 } 883} 884 885///////////////////////////////////////////////////// 886// 887// Access path: requests coming in from the CPU side 888// 889///////////////////////////////////////////////////// 890 891bool 892BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, 893 PacketList &writebacks) 894{ 895 // sanity check 896 assert(pkt->isRequest()); 897 898 chatty_assert(!(isReadOnly && pkt->isWrite()), 899 "Should never see a write in a read-only cache %s\n", 900 name()); 901 902 // Here lat is the value passed as parameter to accessBlock() function 903 // that can modify its value. 904 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), lat); 905 906 DPRINTF(Cache, "%s for %s %s\n", __func__, pkt->print(), 907 blk ? "hit " + blk->print() : "miss"); 908 909 if (pkt->req->isCacheMaintenance()) { 910 // A cache maintenance operation is always forwarded to the 911 // memory below even if the block is found in dirty state. 912 913 // We defer any changes to the state of the block until we 914 // create and mark as in service the mshr for the downstream 915 // packet. 916 return false; 917 } 918 919 if (pkt->isEviction()) { 920 // We check for presence of block in above caches before issuing 921 // Writeback or CleanEvict to write buffer. Therefore the only 922 // possible cases can be of a CleanEvict packet coming from above 923 // encountering a Writeback generated in this cache peer cache and 924 // waiting in the write buffer. Cases of upper level peer caches 925 // generating CleanEvict and Writeback or simply CleanEvict and 926 // CleanEvict almost simultaneously will be caught by snoops sent out 927 // by crossbar. 928 WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(), 929 pkt->isSecure()); 930 if (wb_entry) { 931 assert(wb_entry->getNumTargets() == 1); 932 PacketPtr wbPkt = wb_entry->getTarget()->pkt; 933 assert(wbPkt->isWriteback()); 934 935 if (pkt->isCleanEviction()) { 936 // The CleanEvict and WritebackClean snoops into other 937 // peer caches of the same level while traversing the 938 // crossbar. If a copy of the block is found, the 939 // packet is deleted in the crossbar. Hence, none of 940 // the other upper level caches connected to this 941 // cache have the block, so we can clear the 942 // BLOCK_CACHED flag in the Writeback if set and 943 // discard the CleanEvict by returning true. 944 wbPkt->clearBlockCached(); 945 return true; 946 } else { 947 assert(pkt->cmd == MemCmd::WritebackDirty); 948 // Dirty writeback from above trumps our clean 949 // writeback... discard here 950 // Note: markInService will remove entry from writeback buffer. 951 markInService(wb_entry); 952 delete wbPkt; 953 } 954 } 955 } 956 957 // Writeback handling is special case. We can write the block into 958 // the cache without having a writeable copy (or any copy at all). 959 if (pkt->isWriteback()) { 960 assert(blkSize == pkt->getSize()); 961 962 // we could get a clean writeback while we are having 963 // outstanding accesses to a block, do the simple thing for 964 // now and drop the clean writeback so that we do not upset 965 // any ordering/decisions about ownership already taken 966 if (pkt->cmd == MemCmd::WritebackClean && 967 mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) { 968 DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, " 969 "dropping\n", pkt->getAddr()); 970 return true; 971 } 972 973 if (!blk) { 974 // need to do a replacement 975 blk = allocateBlock(pkt->getAddr(), pkt->isSecure(), writebacks); 976 if (!blk) { 977 // no replaceable block available: give up, fwd to next level. 978 incMissCount(pkt); 979 return false; 980 } 981 tags->insertBlock(pkt, blk); 982 983 blk->status |= (BlkValid | BlkReadable); 984 } 985 // only mark the block dirty if we got a writeback command, 986 // and leave it as is for a clean writeback 987 if (pkt->cmd == MemCmd::WritebackDirty) { 988 // TODO: the coherent cache can assert(!blk->isDirty()); 989 blk->status |= BlkDirty; 990 } 991 // if the packet does not have sharers, it is passing 992 // writable, and we got the writeback in Modified or Exclusive 993 // state, if not we are in the Owned or Shared state 994 if (!pkt->hasSharers()) { 995 blk->status |= BlkWritable; 996 } 997 // nothing else to do; writeback doesn't expect response 998 assert(!pkt->needsResponse()); 999 pkt->writeDataToBlock(blk->data, blkSize); 1000 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 1001 incHitCount(pkt); 1002 // populate the time when the block will be ready to access. 1003 blk->whenReady = clockEdge(fillLatency) + pkt->headerDelay + 1004 pkt->payloadDelay; 1005 return true; 1006 } else if (pkt->cmd == MemCmd::CleanEvict) { 1007 if (blk) { 1008 // Found the block in the tags, need to stop CleanEvict from 1009 // propagating further down the hierarchy. Returning true will 1010 // treat the CleanEvict like a satisfied write request and delete 1011 // it. 1012 return true; 1013 } 1014 // We didn't find the block here, propagate the CleanEvict further 1015 // down the memory hierarchy. Returning false will treat the CleanEvict 1016 // like a Writeback which could not find a replaceable block so has to 1017 // go to next level. 1018 return false; 1019 } else if (pkt->cmd == MemCmd::WriteClean) { 1020 // WriteClean handling is a special case. We can allocate a 1021 // block directly if it doesn't exist and we can update the 1022 // block immediately. The WriteClean transfers the ownership 1023 // of the block as well. 1024 assert(blkSize == pkt->getSize()); 1025 1026 if (!blk) { 1027 if (pkt->writeThrough()) { 1028 // if this is a write through packet, we don't try to 1029 // allocate if the block is not present 1030 return false; 1031 } else { 1032 // a writeback that misses needs to allocate a new block 1033 blk = allocateBlock(pkt->getAddr(), pkt->isSecure(), 1034 writebacks); 1035 if (!blk) { 1036 // no replaceable block available: give up, fwd to 1037 // next level. 1038 incMissCount(pkt); 1039 return false; 1040 } 1041 tags->insertBlock(pkt, blk); 1042 1043 blk->status |= (BlkValid | BlkReadable); 1044 } 1045 } 1046 1047 // at this point either this is a writeback or a write-through 1048 // write clean operation and the block is already in this 1049 // cache, we need to update the data and the block flags 1050 assert(blk); 1051 // TODO: the coherent cache can assert(!blk->isDirty()); 1052 if (!pkt->writeThrough()) { 1053 blk->status |= BlkDirty; 1054 } 1055 // nothing else to do; writeback doesn't expect response 1056 assert(!pkt->needsResponse()); 1057 pkt->writeDataToBlock(blk->data, blkSize); 1058 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 1059 1060 incHitCount(pkt); 1061 // populate the time when the block will be ready to access. 1062 blk->whenReady = clockEdge(fillLatency) + pkt->headerDelay + 1063 pkt->payloadDelay; 1064 // if this a write-through packet it will be sent to cache 1065 // below 1066 return !pkt->writeThrough(); 1067 } else if (blk && (pkt->needsWritable() ? blk->isWritable() : 1068 blk->isReadable())) { 1069 // OK to satisfy access 1070 incHitCount(pkt); 1071 satisfyRequest(pkt, blk); 1072 maintainClusivity(pkt->fromCache(), blk); 1073 1074 return true; 1075 } 1076 1077 // Can't satisfy access normally... either no block (blk == nullptr) 1078 // or have block but need writable 1079 1080 incMissCount(pkt); 1081 1082 if (!blk && pkt->isLLSC() && pkt->isWrite()) { 1083 // complete miss on store conditional... just give up now 1084 pkt->req->setExtraData(0); 1085 return true; 1086 } 1087 1088 return false; 1089} 1090 1091void 1092BaseCache::maintainClusivity(bool from_cache, CacheBlk *blk) 1093{ 1094 if (from_cache && blk && blk->isValid() && !blk->isDirty() && 1095 clusivity == Enums::mostly_excl) { 1096 // if we have responded to a cache, and our block is still 1097 // valid, but not dirty, and this cache is mostly exclusive 1098 // with respect to the cache above, drop the block 1099 invalidateBlock(blk); 1100 } 1101} 1102 1103CacheBlk* 1104BaseCache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks, 1105 bool allocate) 1106{ 1107 assert(pkt->isResponse() || pkt->cmd == MemCmd::WriteLineReq); 1108 Addr addr = pkt->getAddr(); 1109 bool is_secure = pkt->isSecure(); 1110#if TRACING_ON 1111 CacheBlk::State old_state = blk ? blk->status : 0; 1112#endif 1113 1114 // When handling a fill, we should have no writes to this line. 1115 assert(addr == pkt->getBlockAddr(blkSize)); 1116 assert(!writeBuffer.findMatch(addr, is_secure)); 1117 1118 if (!blk) { 1119 // better have read new data... 1120 assert(pkt->hasData()); 1121 1122 // only read responses and write-line requests have data; 1123 // note that we don't write the data here for write-line - that 1124 // happens in the subsequent call to satisfyRequest 1125 assert(pkt->isRead() || pkt->cmd == MemCmd::WriteLineReq); 1126 1127 // need to do a replacement if allocating, otherwise we stick 1128 // with the temporary storage 1129 blk = allocate ? allocateBlock(addr, is_secure, writebacks) : nullptr; 1130 1131 if (!blk) { 1132 // No replaceable block or a mostly exclusive 1133 // cache... just use temporary storage to complete the 1134 // current request and then get rid of it 1135 assert(!tempBlock->isValid()); 1136 blk = tempBlock; 1137 tempBlock->set = tags->extractSet(addr); 1138 tempBlock->tag = tags->extractTag(addr); 1139 DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr, 1140 is_secure ? "s" : "ns"); 1141 } else { 1142 tags->insertBlock(pkt, blk); 1143 } 1144 1145 // we should never be overwriting a valid block 1146 assert(!blk->isValid()); 1147 } else { 1148 // existing block... probably an upgrade 1149 assert(blk->tag == tags->extractTag(addr)); 1150 // either we're getting new data or the block should already be valid 1151 assert(pkt->hasData() || blk->isValid()); 1152 // don't clear block status... if block is already dirty we 1153 // don't want to lose that 1154 } 1155 1156 if (is_secure) 1157 blk->status |= BlkSecure; 1158 blk->status |= BlkValid | BlkReadable; 1159 1160 // sanity check for whole-line writes, which should always be 1161 // marked as writable as part of the fill, and then later marked 1162 // dirty as part of satisfyRequest 1163 if (pkt->cmd == MemCmd::WriteLineReq) { 1164 assert(!pkt->hasSharers()); 1165 } 1166 1167 // here we deal with setting the appropriate state of the line, 1168 // and we start by looking at the hasSharers flag, and ignore the 1169 // cacheResponding flag (normally signalling dirty data) if the 1170 // packet has sharers, thus the line is never allocated as Owned 1171 // (dirty but not writable), and always ends up being either 1172 // Shared, Exclusive or Modified, see Packet::setCacheResponding 1173 // for more details 1174 if (!pkt->hasSharers()) { 1175 // we could get a writable line from memory (rather than a 1176 // cache) even in a read-only cache, note that we set this bit 1177 // even for a read-only cache, possibly revisit this decision 1178 blk->status |= BlkWritable; 1179 1180 // check if we got this via cache-to-cache transfer (i.e., from a 1181 // cache that had the block in Modified or Owned state) 1182 if (pkt->cacheResponding()) { 1183 // we got the block in Modified state, and invalidated the 1184 // owners copy 1185 blk->status |= BlkDirty; 1186 1187 chatty_assert(!isReadOnly, "Should never see dirty snoop response " 1188 "in read-only cache %s\n", name()); 1189 } 1190 } 1191 1192 DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n", 1193 addr, is_secure ? "s" : "ns", old_state, blk->print()); 1194 1195 // if we got new data, copy it in (checking for a read response 1196 // and a response that has data is the same in the end) 1197 if (pkt->isRead()) { 1198 // sanity checks 1199 assert(pkt->hasData()); 1200 assert(pkt->getSize() == blkSize); 1201 1202 pkt->writeDataToBlock(blk->data, blkSize); 1203 } 1204 // We pay for fillLatency here. 1205 blk->whenReady = clockEdge() + fillLatency * clockPeriod() + 1206 pkt->payloadDelay; 1207 1208 return blk; 1209} 1210 1211CacheBlk* 1212BaseCache::allocateBlock(Addr addr, bool is_secure, PacketList &writebacks) 1213{ 1214 // Find replacement victim 1215 CacheBlk *blk = tags->findVictim(addr); 1216 1217 // It is valid to return nullptr if there is no victim 1218 if (!blk) 1219 return nullptr; 1220 1221 if (blk->isValid()) { 1222 Addr repl_addr = tags->regenerateBlkAddr(blk); 1223 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure()); 1224 if (repl_mshr) { 1225 // must be an outstanding upgrade or clean request 1226 // on a block we're about to replace... 1227 assert((!blk->isWritable() && repl_mshr->needsWritable()) || 1228 repl_mshr->isCleaning()); 1229 // too hard to replace block with transient state 1230 // allocation failed, block not inserted 1231 return nullptr; 1232 } else { 1233 DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx " 1234 "(%s): %s\n", repl_addr, blk->isSecure() ? "s" : "ns", 1235 addr, is_secure ? "s" : "ns", 1236 blk->isDirty() ? "writeback" : "clean"); 1237 1238 if (blk->wasPrefetched()) { 1239 unusedPrefetches++; 1240 } 1241 evictBlock(blk, writebacks); 1242 replacements++; 1243 } 1244 } 1245 1246 return blk; 1247} 1248 1249void 1250BaseCache::invalidateBlock(CacheBlk *blk) 1251{ 1252 if (blk != tempBlock) 1253 tags->invalidate(blk); 1254 blk->invalidate(); 1255} 1256 1257PacketPtr 1258BaseCache::writebackBlk(CacheBlk *blk) 1259{ 1260 chatty_assert(!isReadOnly || writebackClean, 1261 "Writeback from read-only cache"); 1262 assert(blk && blk->isValid() && (blk->isDirty() || writebackClean)); 1263 1264 writebacks[Request::wbMasterId]++; 1265 1266 Request *req = new Request(tags->regenerateBlkAddr(blk), blkSize, 0, 1267 Request::wbMasterId); 1268 if (blk->isSecure()) 1269 req->setFlags(Request::SECURE); 1270 1271 req->taskId(blk->task_id); 1272 1273 PacketPtr pkt = 1274 new Packet(req, blk->isDirty() ? 1275 MemCmd::WritebackDirty : MemCmd::WritebackClean); 1276 1277 DPRINTF(Cache, "Create Writeback %s writable: %d, dirty: %d\n", 1278 pkt->print(), blk->isWritable(), blk->isDirty()); 1279 1280 if (blk->isWritable()) { 1281 // not asserting shared means we pass the block in modified 1282 // state, mark our own block non-writeable 1283 blk->status &= ~BlkWritable; 1284 } else { 1285 // we are in the Owned state, tell the receiver 1286 pkt->setHasSharers(); 1287 } 1288 1289 // make sure the block is not marked dirty 1290 blk->status &= ~BlkDirty; 1291 1292 pkt->allocate(); 1293 pkt->setDataFromBlock(blk->data, blkSize); 1294 1295 return pkt; 1296} 1297 1298PacketPtr 1299BaseCache::writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id) 1300{ 1301 Request *req = new Request(tags->regenerateBlkAddr(blk), blkSize, 0, 1302 Request::wbMasterId); 1303 if (blk->isSecure()) { 1304 req->setFlags(Request::SECURE); 1305 } 1306 req->taskId(blk->task_id); 1307 1308 PacketPtr pkt = new Packet(req, MemCmd::WriteClean, blkSize, id); 1309 1310 if (dest) { 1311 req->setFlags(dest); 1312 pkt->setWriteThrough(); 1313 } 1314 1315 DPRINTF(Cache, "Create %s writable: %d, dirty: %d\n", pkt->print(), 1316 blk->isWritable(), blk->isDirty()); 1317 1318 if (blk->isWritable()) { 1319 // not asserting shared means we pass the block in modified 1320 // state, mark our own block non-writeable 1321 blk->status &= ~BlkWritable; 1322 } else { 1323 // we are in the Owned state, tell the receiver 1324 pkt->setHasSharers(); 1325 } 1326 1327 // make sure the block is not marked dirty 1328 blk->status &= ~BlkDirty; 1329 1330 pkt->allocate(); 1331 pkt->setDataFromBlock(blk->data, blkSize); 1332 1333 return pkt; 1334} 1335 1336 1337void 1338BaseCache::memWriteback() 1339{ 1340 CacheBlkVisitorWrapper visitor(*this, &BaseCache::writebackVisitor); 1341 tags->forEachBlk(visitor); 1342} 1343 1344void 1345BaseCache::memInvalidate() 1346{ 1347 CacheBlkVisitorWrapper visitor(*this, &BaseCache::invalidateVisitor); 1348 tags->forEachBlk(visitor); 1349} 1350 1351bool 1352BaseCache::isDirty() const 1353{ 1354 CacheBlkIsDirtyVisitor visitor; 1355 tags->forEachBlk(visitor); 1356 1357 return visitor.isDirty(); 1358} 1359 1360bool 1361BaseCache::writebackVisitor(CacheBlk &blk) 1362{ 1363 if (blk.isDirty()) { 1364 assert(blk.isValid()); 1365 1366 Request request(tags->regenerateBlkAddr(&blk), 1367 blkSize, 0, Request::funcMasterId); 1368 request.taskId(blk.task_id); 1369 if (blk.isSecure()) { 1370 request.setFlags(Request::SECURE); 1371 } 1372 1373 Packet packet(&request, MemCmd::WriteReq); 1374 packet.dataStatic(blk.data); 1375 1376 memSidePort.sendFunctional(&packet); 1377 1378 blk.status &= ~BlkDirty; 1379 } 1380 1381 return true; 1382} 1383 1384bool 1385BaseCache::invalidateVisitor(CacheBlk &blk) 1386{ 1387 if (blk.isDirty()) 1388 warn_once("Invalidating dirty cache lines. " \ 1389 "Expect things to break.\n"); 1390 1391 if (blk.isValid()) { 1392 assert(!blk.isDirty()); 1393 invalidateBlock(&blk); 1394 } 1395 1396 return true; 1397} 1398 1399Tick 1400BaseCache::nextQueueReadyTime() const 1401{ 1402 Tick nextReady = std::min(mshrQueue.nextReadyTime(), 1403 writeBuffer.nextReadyTime()); 1404 1405 // Don't signal prefetch ready time if no MSHRs available 1406 // Will signal once enoguh MSHRs are deallocated 1407 if (prefetcher && mshrQueue.canPrefetch()) { 1408 nextReady = std::min(nextReady, 1409 prefetcher->nextPrefetchReadyTime()); 1410 } 1411 1412 return nextReady; 1413} 1414 1415 1416bool 1417BaseCache::sendMSHRQueuePacket(MSHR* mshr) 1418{ 1419 assert(mshr); 1420 1421 // use request from 1st target 1422 PacketPtr tgt_pkt = mshr->getTarget()->pkt; 1423 1424 DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print()); 1425 1426 CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure); 1427 1428 // either a prefetch that is not present upstream, or a normal 1429 // MSHR request, proceed to get the packet to send downstream 1430 PacketPtr pkt = createMissPacket(tgt_pkt, blk, mshr->needsWritable()); 1431 1432 mshr->isForward = (pkt == nullptr); 1433 1434 if (mshr->isForward) { 1435 // not a cache block request, but a response is expected 1436 // make copy of current packet to forward, keep current 1437 // copy for response handling 1438 pkt = new Packet(tgt_pkt, false, true); 1439 assert(!pkt->isWrite()); 1440 } 1441 1442 // play it safe and append (rather than set) the sender state, 1443 // as forwarded packets may already have existing state 1444 pkt->pushSenderState(mshr); 1445 1446 if (pkt->isClean() && blk && blk->isDirty()) { 1447 // A cache clean opearation is looking for a dirty block. Mark 1448 // the packet so that the destination xbar can determine that 1449 // there will be a follow-up write packet as well. 1450 pkt->setSatisfied(); 1451 } 1452 1453 if (!memSidePort.sendTimingReq(pkt)) { 1454 // we are awaiting a retry, but we 1455 // delete the packet and will be creating a new packet 1456 // when we get the opportunity 1457 delete pkt; 1458 1459 // note that we have now masked any requestBus and 1460 // schedSendEvent (we will wait for a retry before 1461 // doing anything), and this is so even if we do not 1462 // care about this packet and might override it before 1463 // it gets retried 1464 return true; 1465 } else { 1466 // As part of the call to sendTimingReq the packet is 1467 // forwarded to all neighbouring caches (and any caches 1468 // above them) as a snoop. Thus at this point we know if 1469 // any of the neighbouring caches are responding, and if 1470 // so, we know it is dirty, and we can determine if it is 1471 // being passed as Modified, making our MSHR the ordering 1472 // point 1473 bool pending_modified_resp = !pkt->hasSharers() && 1474 pkt->cacheResponding(); 1475 markInService(mshr, pending_modified_resp); 1476 1477 if (pkt->isClean() && blk && blk->isDirty()) { 1478 // A cache clean opearation is looking for a dirty 1479 // block. If a dirty block is encountered a WriteClean 1480 // will update any copies to the path to the memory 1481 // until the point of reference. 1482 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n", 1483 __func__, pkt->print(), blk->print()); 1484 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), 1485 pkt->id); 1486 PacketList writebacks; 1487 writebacks.push_back(wb_pkt); 1488 doWritebacks(writebacks, 0); 1489 } 1490 1491 return false; 1492 } 1493} 1494 1495bool 1496BaseCache::sendWriteQueuePacket(WriteQueueEntry* wq_entry) 1497{ 1498 assert(wq_entry); 1499 1500 // always a single target for write queue entries 1501 PacketPtr tgt_pkt = wq_entry->getTarget()->pkt; 1502 1503 DPRINTF(Cache, "%s: write %s\n", __func__, tgt_pkt->print()); 1504 1505 // forward as is, both for evictions and uncacheable writes 1506 if (!memSidePort.sendTimingReq(tgt_pkt)) { 1507 // note that we have now masked any requestBus and 1508 // schedSendEvent (we will wait for a retry before 1509 // doing anything), and this is so even if we do not 1510 // care about this packet and might override it before 1511 // it gets retried 1512 return true; 1513 } else { 1514 markInService(wq_entry); 1515 return false; 1516 } 1517} 1518 1519void 1520BaseCache::serialize(CheckpointOut &cp) const 1521{ 1522 bool dirty(isDirty()); 1523 1524 if (dirty) { 1525 warn("*** The cache still contains dirty data. ***\n"); 1526 warn(" Make sure to drain the system using the correct flags.\n"); 1527 warn(" This checkpoint will not restore correctly " \ 1528 "and dirty data in the cache will be lost!\n"); 1529 } 1530 1531 // Since we don't checkpoint the data in the cache, any dirty data 1532 // will be lost when restoring from a checkpoint of a system that 1533 // wasn't drained properly. Flag the checkpoint as invalid if the 1534 // cache contains dirty data. 1535 bool bad_checkpoint(dirty); 1536 SERIALIZE_SCALAR(bad_checkpoint); 1537} 1538 1539void 1540BaseCache::unserialize(CheckpointIn &cp) 1541{ 1542 bool bad_checkpoint; 1543 UNSERIALIZE_SCALAR(bad_checkpoint); 1544 if (bad_checkpoint) { 1545 fatal("Restoring from checkpoints with dirty caches is not " 1546 "supported in the classic memory system. Please remove any " 1547 "caches or drain them properly before taking checkpoints.\n"); 1548 } 1549} 1550 1551void 1552BaseCache::regStats() 1553{ 1554 MemObject::regStats(); 1555 1556 using namespace Stats; 1557 1558 // Hit statistics 1559 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1560 MemCmd cmd(access_idx); 1561 const string &cstr = cmd.toString(); 1562 1563 hits[access_idx] 1564 .init(system->maxMasters()) 1565 .name(name() + "." + cstr + "_hits") 1566 .desc("number of " + cstr + " hits") 1567 .flags(total | nozero | nonan) 1568 ; 1569 for (int i = 0; i < system->maxMasters(); i++) { 1570 hits[access_idx].subname(i, system->getMasterName(i)); 1571 } 1572 } 1573 1574// These macros make it easier to sum the right subset of commands and 1575// to change the subset of commands that are considered "demand" vs 1576// "non-demand" 1577#define SUM_DEMAND(s) \ 1578 (s[MemCmd::ReadReq] + s[MemCmd::WriteReq] + s[MemCmd::WriteLineReq] + \ 1579 s[MemCmd::ReadExReq] + s[MemCmd::ReadCleanReq] + s[MemCmd::ReadSharedReq]) 1580 1581// should writebacks be included here? prior code was inconsistent... 1582#define SUM_NON_DEMAND(s) \ 1583 (s[MemCmd::SoftPFReq] + s[MemCmd::HardPFReq]) 1584 1585 demandHits 1586 .name(name() + ".demand_hits") 1587 .desc("number of demand (read+write) hits") 1588 .flags(total | nozero | nonan) 1589 ; 1590 demandHits = SUM_DEMAND(hits); 1591 for (int i = 0; i < system->maxMasters(); i++) { 1592 demandHits.subname(i, system->getMasterName(i)); 1593 } 1594 1595 overallHits 1596 .name(name() + ".overall_hits") 1597 .desc("number of overall hits") 1598 .flags(total | nozero | nonan) 1599 ; 1600 overallHits = demandHits + SUM_NON_DEMAND(hits); 1601 for (int i = 0; i < system->maxMasters(); i++) { 1602 overallHits.subname(i, system->getMasterName(i)); 1603 } 1604 1605 // Miss statistics 1606 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1607 MemCmd cmd(access_idx); 1608 const string &cstr = cmd.toString(); 1609 1610 misses[access_idx] 1611 .init(system->maxMasters()) 1612 .name(name() + "." + cstr + "_misses") 1613 .desc("number of " + cstr + " misses") 1614 .flags(total | nozero | nonan) 1615 ; 1616 for (int i = 0; i < system->maxMasters(); i++) { 1617 misses[access_idx].subname(i, system->getMasterName(i)); 1618 } 1619 } 1620 1621 demandMisses 1622 .name(name() + ".demand_misses") 1623 .desc("number of demand (read+write) misses") 1624 .flags(total | nozero | nonan) 1625 ; 1626 demandMisses = SUM_DEMAND(misses); 1627 for (int i = 0; i < system->maxMasters(); i++) { 1628 demandMisses.subname(i, system->getMasterName(i)); 1629 } 1630 1631 overallMisses 1632 .name(name() + ".overall_misses") 1633 .desc("number of overall misses") 1634 .flags(total | nozero | nonan) 1635 ; 1636 overallMisses = demandMisses + SUM_NON_DEMAND(misses); 1637 for (int i = 0; i < system->maxMasters(); i++) { 1638 overallMisses.subname(i, system->getMasterName(i)); 1639 } 1640 1641 // Miss latency statistics 1642 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1643 MemCmd cmd(access_idx); 1644 const string &cstr = cmd.toString(); 1645 1646 missLatency[access_idx] 1647 .init(system->maxMasters()) 1648 .name(name() + "." + cstr + "_miss_latency") 1649 .desc("number of " + cstr + " miss cycles") 1650 .flags(total | nozero | nonan) 1651 ; 1652 for (int i = 0; i < system->maxMasters(); i++) { 1653 missLatency[access_idx].subname(i, system->getMasterName(i)); 1654 } 1655 } 1656 1657 demandMissLatency 1658 .name(name() + ".demand_miss_latency") 1659 .desc("number of demand (read+write) miss cycles") 1660 .flags(total | nozero | nonan) 1661 ; 1662 demandMissLatency = SUM_DEMAND(missLatency); 1663 for (int i = 0; i < system->maxMasters(); i++) { 1664 demandMissLatency.subname(i, system->getMasterName(i)); 1665 } 1666 1667 overallMissLatency 1668 .name(name() + ".overall_miss_latency") 1669 .desc("number of overall miss cycles") 1670 .flags(total | nozero | nonan) 1671 ; 1672 overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency); 1673 for (int i = 0; i < system->maxMasters(); i++) { 1674 overallMissLatency.subname(i, system->getMasterName(i)); 1675 } 1676 1677 // access formulas 1678 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1679 MemCmd cmd(access_idx); 1680 const string &cstr = cmd.toString(); 1681 1682 accesses[access_idx] 1683 .name(name() + "." + cstr + "_accesses") 1684 .desc("number of " + cstr + " accesses(hits+misses)") 1685 .flags(total | nozero | nonan) 1686 ; 1687 accesses[access_idx] = hits[access_idx] + misses[access_idx]; 1688 1689 for (int i = 0; i < system->maxMasters(); i++) { 1690 accesses[access_idx].subname(i, system->getMasterName(i)); 1691 } 1692 } 1693 1694 demandAccesses 1695 .name(name() + ".demand_accesses") 1696 .desc("number of demand (read+write) accesses") 1697 .flags(total | nozero | nonan) 1698 ; 1699 demandAccesses = demandHits + demandMisses; 1700 for (int i = 0; i < system->maxMasters(); i++) { 1701 demandAccesses.subname(i, system->getMasterName(i)); 1702 } 1703 1704 overallAccesses 1705 .name(name() + ".overall_accesses") 1706 .desc("number of overall (read+write) accesses") 1707 .flags(total | nozero | nonan) 1708 ; 1709 overallAccesses = overallHits + overallMisses; 1710 for (int i = 0; i < system->maxMasters(); i++) { 1711 overallAccesses.subname(i, system->getMasterName(i)); 1712 } 1713 1714 // miss rate formulas 1715 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1716 MemCmd cmd(access_idx); 1717 const string &cstr = cmd.toString(); 1718 1719 missRate[access_idx] 1720 .name(name() + "." + cstr + "_miss_rate") 1721 .desc("miss rate for " + cstr + " accesses") 1722 .flags(total | nozero | nonan) 1723 ; 1724 missRate[access_idx] = misses[access_idx] / accesses[access_idx]; 1725 1726 for (int i = 0; i < system->maxMasters(); i++) { 1727 missRate[access_idx].subname(i, system->getMasterName(i)); 1728 } 1729 } 1730 1731 demandMissRate 1732 .name(name() + ".demand_miss_rate") 1733 .desc("miss rate for demand accesses") 1734 .flags(total | nozero | nonan) 1735 ; 1736 demandMissRate = demandMisses / demandAccesses; 1737 for (int i = 0; i < system->maxMasters(); i++) { 1738 demandMissRate.subname(i, system->getMasterName(i)); 1739 } 1740 1741 overallMissRate 1742 .name(name() + ".overall_miss_rate") 1743 .desc("miss rate for overall accesses") 1744 .flags(total | nozero | nonan) 1745 ; 1746 overallMissRate = overallMisses / overallAccesses; 1747 for (int i = 0; i < system->maxMasters(); i++) { 1748 overallMissRate.subname(i, system->getMasterName(i)); 1749 } 1750 1751 // miss latency formulas 1752 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1753 MemCmd cmd(access_idx); 1754 const string &cstr = cmd.toString(); 1755 1756 avgMissLatency[access_idx] 1757 .name(name() + "." + cstr + "_avg_miss_latency") 1758 .desc("average " + cstr + " miss latency") 1759 .flags(total | nozero | nonan) 1760 ; 1761 avgMissLatency[access_idx] = 1762 missLatency[access_idx] / misses[access_idx]; 1763 1764 for (int i = 0; i < system->maxMasters(); i++) { 1765 avgMissLatency[access_idx].subname(i, system->getMasterName(i)); 1766 } 1767 } 1768 1769 demandAvgMissLatency 1770 .name(name() + ".demand_avg_miss_latency") 1771 .desc("average overall miss latency") 1772 .flags(total | nozero | nonan) 1773 ; 1774 demandAvgMissLatency = demandMissLatency / demandMisses; 1775 for (int i = 0; i < system->maxMasters(); i++) { 1776 demandAvgMissLatency.subname(i, system->getMasterName(i)); 1777 } 1778 1779 overallAvgMissLatency 1780 .name(name() + ".overall_avg_miss_latency") 1781 .desc("average overall miss latency") 1782 .flags(total | nozero | nonan) 1783 ; 1784 overallAvgMissLatency = overallMissLatency / overallMisses; 1785 for (int i = 0; i < system->maxMasters(); i++) { 1786 overallAvgMissLatency.subname(i, system->getMasterName(i)); 1787 } 1788 1789 blocked_cycles.init(NUM_BLOCKED_CAUSES); 1790 blocked_cycles 1791 .name(name() + ".blocked_cycles") 1792 .desc("number of cycles access was blocked") 1793 .subname(Blocked_NoMSHRs, "no_mshrs") 1794 .subname(Blocked_NoTargets, "no_targets") 1795 ; 1796 1797 1798 blocked_causes.init(NUM_BLOCKED_CAUSES); 1799 blocked_causes 1800 .name(name() + ".blocked") 1801 .desc("number of cycles access was blocked") 1802 .subname(Blocked_NoMSHRs, "no_mshrs") 1803 .subname(Blocked_NoTargets, "no_targets") 1804 ; 1805 1806 avg_blocked 1807 .name(name() + ".avg_blocked_cycles") 1808 .desc("average number of cycles each access was blocked") 1809 .subname(Blocked_NoMSHRs, "no_mshrs") 1810 .subname(Blocked_NoTargets, "no_targets") 1811 ; 1812 1813 avg_blocked = blocked_cycles / blocked_causes; 1814 1815 unusedPrefetches 1816 .name(name() + ".unused_prefetches") 1817 .desc("number of HardPF blocks evicted w/o reference") 1818 .flags(nozero) 1819 ; 1820 1821 writebacks 1822 .init(system->maxMasters()) 1823 .name(name() + ".writebacks") 1824 .desc("number of writebacks") 1825 .flags(total | nozero | nonan) 1826 ; 1827 for (int i = 0; i < system->maxMasters(); i++) { 1828 writebacks.subname(i, system->getMasterName(i)); 1829 } 1830 1831 // MSHR statistics 1832 // MSHR hit statistics 1833 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1834 MemCmd cmd(access_idx); 1835 const string &cstr = cmd.toString(); 1836 1837 mshr_hits[access_idx] 1838 .init(system->maxMasters()) 1839 .name(name() + "." + cstr + "_mshr_hits") 1840 .desc("number of " + cstr + " MSHR hits") 1841 .flags(total | nozero | nonan) 1842 ; 1843 for (int i = 0; i < system->maxMasters(); i++) { 1844 mshr_hits[access_idx].subname(i, system->getMasterName(i)); 1845 } 1846 } 1847 1848 demandMshrHits 1849 .name(name() + ".demand_mshr_hits") 1850 .desc("number of demand (read+write) MSHR hits") 1851 .flags(total | nozero | nonan) 1852 ; 1853 demandMshrHits = SUM_DEMAND(mshr_hits); 1854 for (int i = 0; i < system->maxMasters(); i++) { 1855 demandMshrHits.subname(i, system->getMasterName(i)); 1856 } 1857 1858 overallMshrHits 1859 .name(name() + ".overall_mshr_hits") 1860 .desc("number of overall MSHR hits") 1861 .flags(total | nozero | nonan) 1862 ; 1863 overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshr_hits); 1864 for (int i = 0; i < system->maxMasters(); i++) { 1865 overallMshrHits.subname(i, system->getMasterName(i)); 1866 } 1867 1868 // MSHR miss statistics 1869 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1870 MemCmd cmd(access_idx); 1871 const string &cstr = cmd.toString(); 1872 1873 mshr_misses[access_idx] 1874 .init(system->maxMasters()) 1875 .name(name() + "." + cstr + "_mshr_misses") 1876 .desc("number of " + cstr + " MSHR misses") 1877 .flags(total | nozero | nonan) 1878 ; 1879 for (int i = 0; i < system->maxMasters(); i++) { 1880 mshr_misses[access_idx].subname(i, system->getMasterName(i)); 1881 } 1882 } 1883 1884 demandMshrMisses 1885 .name(name() + ".demand_mshr_misses") 1886 .desc("number of demand (read+write) MSHR misses") 1887 .flags(total | nozero | nonan) 1888 ; 1889 demandMshrMisses = SUM_DEMAND(mshr_misses); 1890 for (int i = 0; i < system->maxMasters(); i++) { 1891 demandMshrMisses.subname(i, system->getMasterName(i)); 1892 } 1893 1894 overallMshrMisses 1895 .name(name() + ".overall_mshr_misses") 1896 .desc("number of overall MSHR misses") 1897 .flags(total | nozero | nonan) 1898 ; 1899 overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshr_misses); 1900 for (int i = 0; i < system->maxMasters(); i++) { 1901 overallMshrMisses.subname(i, system->getMasterName(i)); 1902 } 1903 1904 // MSHR miss latency statistics 1905 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1906 MemCmd cmd(access_idx); 1907 const string &cstr = cmd.toString(); 1908 1909 mshr_miss_latency[access_idx] 1910 .init(system->maxMasters()) 1911 .name(name() + "." + cstr + "_mshr_miss_latency") 1912 .desc("number of " + cstr + " MSHR miss cycles") 1913 .flags(total | nozero | nonan) 1914 ; 1915 for (int i = 0; i < system->maxMasters(); i++) { 1916 mshr_miss_latency[access_idx].subname(i, system->getMasterName(i)); 1917 } 1918 } 1919 1920 demandMshrMissLatency 1921 .name(name() + ".demand_mshr_miss_latency") 1922 .desc("number of demand (read+write) MSHR miss cycles") 1923 .flags(total | nozero | nonan) 1924 ; 1925 demandMshrMissLatency = SUM_DEMAND(mshr_miss_latency); 1926 for (int i = 0; i < system->maxMasters(); i++) { 1927 demandMshrMissLatency.subname(i, system->getMasterName(i)); 1928 } 1929 1930 overallMshrMissLatency 1931 .name(name() + ".overall_mshr_miss_latency") 1932 .desc("number of overall MSHR miss cycles") 1933 .flags(total | nozero | nonan) 1934 ; 1935 overallMshrMissLatency = 1936 demandMshrMissLatency + SUM_NON_DEMAND(mshr_miss_latency); 1937 for (int i = 0; i < system->maxMasters(); i++) { 1938 overallMshrMissLatency.subname(i, system->getMasterName(i)); 1939 } 1940 1941 // MSHR uncacheable statistics 1942 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1943 MemCmd cmd(access_idx); 1944 const string &cstr = cmd.toString(); 1945 1946 mshr_uncacheable[access_idx] 1947 .init(system->maxMasters()) 1948 .name(name() + "." + cstr + "_mshr_uncacheable") 1949 .desc("number of " + cstr + " MSHR uncacheable") 1950 .flags(total | nozero | nonan) 1951 ; 1952 for (int i = 0; i < system->maxMasters(); i++) { 1953 mshr_uncacheable[access_idx].subname(i, system->getMasterName(i)); 1954 } 1955 } 1956 1957 overallMshrUncacheable 1958 .name(name() + ".overall_mshr_uncacheable_misses") 1959 .desc("number of overall MSHR uncacheable misses") 1960 .flags(total | nozero | nonan) 1961 ; 1962 overallMshrUncacheable = 1963 SUM_DEMAND(mshr_uncacheable) + SUM_NON_DEMAND(mshr_uncacheable); 1964 for (int i = 0; i < system->maxMasters(); i++) { 1965 overallMshrUncacheable.subname(i, system->getMasterName(i)); 1966 } 1967 1968 // MSHR miss latency statistics 1969 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1970 MemCmd cmd(access_idx); 1971 const string &cstr = cmd.toString(); 1972 1973 mshr_uncacheable_lat[access_idx] 1974 .init(system->maxMasters()) 1975 .name(name() + "." + cstr + "_mshr_uncacheable_latency") 1976 .desc("number of " + cstr + " MSHR uncacheable cycles") 1977 .flags(total | nozero | nonan) 1978 ; 1979 for (int i = 0; i < system->maxMasters(); i++) { 1980 mshr_uncacheable_lat[access_idx].subname( 1981 i, system->getMasterName(i)); 1982 } 1983 } 1984 1985 overallMshrUncacheableLatency 1986 .name(name() + ".overall_mshr_uncacheable_latency") 1987 .desc("number of overall MSHR uncacheable cycles") 1988 .flags(total | nozero | nonan) 1989 ; 1990 overallMshrUncacheableLatency = 1991 SUM_DEMAND(mshr_uncacheable_lat) + 1992 SUM_NON_DEMAND(mshr_uncacheable_lat); 1993 for (int i = 0; i < system->maxMasters(); i++) { 1994 overallMshrUncacheableLatency.subname(i, system->getMasterName(i)); 1995 } 1996 1997#if 0 1998 // MSHR access formulas 1999 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2000 MemCmd cmd(access_idx); 2001 const string &cstr = cmd.toString(); 2002 2003 mshrAccesses[access_idx] 2004 .name(name() + "." + cstr + "_mshr_accesses") 2005 .desc("number of " + cstr + " mshr accesses(hits+misses)") 2006 .flags(total | nozero | nonan) 2007 ; 2008 mshrAccesses[access_idx] = 2009 mshr_hits[access_idx] + mshr_misses[access_idx] 2010 + mshr_uncacheable[access_idx]; 2011 } 2012 2013 demandMshrAccesses 2014 .name(name() + ".demand_mshr_accesses") 2015 .desc("number of demand (read+write) mshr accesses") 2016 .flags(total | nozero | nonan) 2017 ; 2018 demandMshrAccesses = demandMshrHits + demandMshrMisses; 2019 2020 overallMshrAccesses 2021 .name(name() + ".overall_mshr_accesses") 2022 .desc("number of overall (read+write) mshr accesses") 2023 .flags(total | nozero | nonan) 2024 ; 2025 overallMshrAccesses = overallMshrHits + overallMshrMisses 2026 + overallMshrUncacheable; 2027#endif 2028 2029 // MSHR miss rate formulas 2030 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2031 MemCmd cmd(access_idx); 2032 const string &cstr = cmd.toString(); 2033 2034 mshrMissRate[access_idx] 2035 .name(name() + "." + cstr + "_mshr_miss_rate") 2036 .desc("mshr miss rate for " + cstr + " accesses") 2037 .flags(total | nozero | nonan) 2038 ; 2039 mshrMissRate[access_idx] = 2040 mshr_misses[access_idx] / accesses[access_idx]; 2041 2042 for (int i = 0; i < system->maxMasters(); i++) { 2043 mshrMissRate[access_idx].subname(i, system->getMasterName(i)); 2044 } 2045 } 2046 2047 demandMshrMissRate 2048 .name(name() + ".demand_mshr_miss_rate") 2049 .desc("mshr miss rate for demand accesses") 2050 .flags(total | nozero | nonan) 2051 ; 2052 demandMshrMissRate = demandMshrMisses / demandAccesses; 2053 for (int i = 0; i < system->maxMasters(); i++) { 2054 demandMshrMissRate.subname(i, system->getMasterName(i)); 2055 } 2056 2057 overallMshrMissRate 2058 .name(name() + ".overall_mshr_miss_rate") 2059 .desc("mshr miss rate for overall accesses") 2060 .flags(total | nozero | nonan) 2061 ; 2062 overallMshrMissRate = overallMshrMisses / overallAccesses; 2063 for (int i = 0; i < system->maxMasters(); i++) { 2064 overallMshrMissRate.subname(i, system->getMasterName(i)); 2065 } 2066 2067 // mshrMiss latency formulas 2068 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2069 MemCmd cmd(access_idx); 2070 const string &cstr = cmd.toString(); 2071 2072 avgMshrMissLatency[access_idx] 2073 .name(name() + "." + cstr + "_avg_mshr_miss_latency") 2074 .desc("average " + cstr + " mshr miss latency") 2075 .flags(total | nozero | nonan) 2076 ; 2077 avgMshrMissLatency[access_idx] = 2078 mshr_miss_latency[access_idx] / mshr_misses[access_idx]; 2079 2080 for (int i = 0; i < system->maxMasters(); i++) { 2081 avgMshrMissLatency[access_idx].subname( 2082 i, system->getMasterName(i)); 2083 } 2084 } 2085 2086 demandAvgMshrMissLatency 2087 .name(name() + ".demand_avg_mshr_miss_latency") 2088 .desc("average overall mshr miss latency") 2089 .flags(total | nozero | nonan) 2090 ; 2091 demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses; 2092 for (int i = 0; i < system->maxMasters(); i++) { 2093 demandAvgMshrMissLatency.subname(i, system->getMasterName(i)); 2094 } 2095 2096 overallAvgMshrMissLatency 2097 .name(name() + ".overall_avg_mshr_miss_latency") 2098 .desc("average overall mshr miss latency") 2099 .flags(total | nozero | nonan) 2100 ; 2101 overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses; 2102 for (int i = 0; i < system->maxMasters(); i++) { 2103 overallAvgMshrMissLatency.subname(i, system->getMasterName(i)); 2104 } 2105 2106 // mshrUncacheable latency formulas 2107 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2108 MemCmd cmd(access_idx); 2109 const string &cstr = cmd.toString(); 2110 2111 avgMshrUncacheableLatency[access_idx] 2112 .name(name() + "." + cstr + "_avg_mshr_uncacheable_latency") 2113 .desc("average " + cstr + " mshr uncacheable latency") 2114 .flags(total | nozero | nonan) 2115 ; 2116 avgMshrUncacheableLatency[access_idx] = 2117 mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx]; 2118 2119 for (int i = 0; i < system->maxMasters(); i++) { 2120 avgMshrUncacheableLatency[access_idx].subname( 2121 i, system->getMasterName(i)); 2122 } 2123 } 2124 2125 overallAvgMshrUncacheableLatency 2126 .name(name() + ".overall_avg_mshr_uncacheable_latency") 2127 .desc("average overall mshr uncacheable latency") 2128 .flags(total | nozero | nonan) 2129 ; 2130 overallAvgMshrUncacheableLatency = 2131 overallMshrUncacheableLatency / overallMshrUncacheable; 2132 for (int i = 0; i < system->maxMasters(); i++) { 2133 overallAvgMshrUncacheableLatency.subname(i, system->getMasterName(i)); 2134 } 2135 2136 replacements 2137 .name(name() + ".replacements") 2138 .desc("number of replacements") 2139 ; 2140} 2141 2142/////////////// 2143// 2144// CpuSidePort 2145// 2146/////////////// 2147bool 2148BaseCache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt) 2149{ 2150 // Express snoop responses from master to slave, e.g., from L1 to L2 2151 cache->recvTimingSnoopResp(pkt); 2152 return true; 2153} 2154 2155 2156bool 2157BaseCache::CpuSidePort::tryTiming(PacketPtr pkt) 2158{ 2159 if (pkt->isExpressSnoop()) { 2160 // always let express snoop packets through even if blocked 2161 return true; 2162 } else if (blocked || mustSendRetry) { 2163 // either already committed to send a retry, or blocked 2164 mustSendRetry = true; 2165 return false; 2166 } 2167 mustSendRetry = false; 2168 return true; 2169} 2170 2171bool 2172BaseCache::CpuSidePort::recvTimingReq(PacketPtr pkt) 2173{ 2174 if (tryTiming(pkt)) { 2175 cache->recvTimingReq(pkt); 2176 return true; 2177 } 2178 return false; 2179} 2180 2181Tick 2182BaseCache::CpuSidePort::recvAtomic(PacketPtr pkt) 2183{ 2184 return cache->recvAtomic(pkt); 2185} 2186 2187void 2188BaseCache::CpuSidePort::recvFunctional(PacketPtr pkt) 2189{ 2190 // functional request 2191 cache->functionalAccess(pkt, true); 2192} 2193 2194AddrRangeList 2195BaseCache::CpuSidePort::getAddrRanges() const 2196{ 2197 return cache->getAddrRanges(); 2198} 2199 2200 2201BaseCache:: 2202CpuSidePort::CpuSidePort(const std::string &_name, BaseCache *_cache, 2203 const std::string &_label) 2204 : CacheSlavePort(_name, _cache, _label), cache(_cache) 2205{ 2206} 2207 2208/////////////// 2209// 2210// MemSidePort 2211// 2212/////////////// 2213bool 2214BaseCache::MemSidePort::recvTimingResp(PacketPtr pkt) 2215{ 2216 cache->recvTimingResp(pkt); 2217 return true; 2218} 2219 2220// Express snooping requests to memside port 2221void 2222BaseCache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt) 2223{ 2224 // handle snooping requests 2225 cache->recvTimingSnoopReq(pkt); 2226} 2227 2228Tick 2229BaseCache::MemSidePort::recvAtomicSnoop(PacketPtr pkt) 2230{ 2231 return cache->recvAtomicSnoop(pkt); 2232} 2233 2234void 2235BaseCache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt) 2236{ 2237 // functional snoop (note that in contrast to atomic we don't have 2238 // a specific functionalSnoop method, as they have the same 2239 // behaviour regardless) 2240 cache->functionalAccess(pkt, false); 2241} 2242 2243void 2244BaseCache::CacheReqPacketQueue::sendDeferredPacket() 2245{ 2246 // sanity check 2247 assert(!waitingOnRetry); 2248 2249 // there should never be any deferred request packets in the 2250 // queue, instead we resly on the cache to provide the packets 2251 // from the MSHR queue or write queue 2252 assert(deferredPacketReadyTime() == MaxTick); 2253 2254 // check for request packets (requests & writebacks) 2255 QueueEntry* entry = cache.getNextQueueEntry(); 2256 2257 if (!entry) { 2258 // can happen if e.g. we attempt a writeback and fail, but 2259 // before the retry, the writeback is eliminated because 2260 // we snoop another cache's ReadEx. 2261 } else { 2262 // let our snoop responses go first if there are responses to 2263 // the same addresses 2264 if (checkConflictingSnoop(entry->blkAddr)) { 2265 return; 2266 } 2267 waitingOnRetry = entry->sendPacket(cache); 2268 } 2269 2270 // if we succeeded and are not waiting for a retry, schedule the 2271 // next send considering when the next queue is ready, note that 2272 // snoop responses have their own packet queue and thus schedule 2273 // their own events 2274 if (!waitingOnRetry) { 2275 schedSendEvent(cache.nextQueueReadyTime()); 2276 } 2277} 2278 2279BaseCache::MemSidePort::MemSidePort(const std::string &_name, 2280 BaseCache *_cache, 2281 const std::string &_label) 2282 : CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue), 2283 _reqQueue(*_cache, *this, _snoopRespQueue, _label), 2284 _snoopRespQueue(*_cache, *this, _label), cache(_cache) 2285{ 2286} 2287