base.cc revision 13862
110553Salexandru.dutu@amd.com/* 210553Salexandru.dutu@amd.com * Copyright (c) 2012-2013, 2018 ARM Limited 310553Salexandru.dutu@amd.com * All rights reserved. 410553Salexandru.dutu@amd.com * 510553Salexandru.dutu@amd.com * The license below extends only to copyright in the software and shall 610553Salexandru.dutu@amd.com * not be construed as granting a license to any other intellectual 710553Salexandru.dutu@amd.com * property including but not limited to intellectual property relating 810553Salexandru.dutu@amd.com * to a hardware implementation of the functionality of the software 910553Salexandru.dutu@amd.com * licensed hereunder. You may use the software subject to the license 1010553Salexandru.dutu@amd.com * terms below provided that you ensure that this notice is replicated 1110553Salexandru.dutu@amd.com * unmodified and in its entirety in all distributions of the software, 1210553Salexandru.dutu@amd.com * modified or unmodified, in source code or in binary form. 1310553Salexandru.dutu@amd.com * 1410553Salexandru.dutu@amd.com * Copyright (c) 2003-2005 The Regents of The University of Michigan 1510553Salexandru.dutu@amd.com * All rights reserved. 1610553Salexandru.dutu@amd.com * 1710553Salexandru.dutu@amd.com * Redistribution and use in source and binary forms, with or without 1810553Salexandru.dutu@amd.com * modification, are permitted provided that the following conditions are 1910553Salexandru.dutu@amd.com * met: redistributions of source code must retain the above copyright 2010553Salexandru.dutu@amd.com * notice, this list of conditions and the following disclaimer; 2110553Salexandru.dutu@amd.com * redistributions in binary form must reproduce the above copyright 2210553Salexandru.dutu@amd.com * notice, this list of conditions and the following disclaimer in the 2310553Salexandru.dutu@amd.com * documentation and/or other materials provided with the distribution; 2410553Salexandru.dutu@amd.com * neither the name of the copyright holders nor the names of its 2510553Salexandru.dutu@amd.com * contributors may be used to endorse or promote products derived from 2610553Salexandru.dutu@amd.com * this software without specific prior written permission. 2710553Salexandru.dutu@amd.com * 2810553Salexandru.dutu@amd.com * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 2910553Salexandru.dutu@amd.com * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 3010553Salexandru.dutu@amd.com * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 3110553Salexandru.dutu@amd.com * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 3210553Salexandru.dutu@amd.com * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 3310553Salexandru.dutu@amd.com * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 3410553Salexandru.dutu@amd.com * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 3510553Salexandru.dutu@amd.com * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 3610553Salexandru.dutu@amd.com * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 3710553Salexandru.dutu@amd.com * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 3810553Salexandru.dutu@amd.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 3910553Salexandru.dutu@amd.com * 4010553Salexandru.dutu@amd.com * Authors: Erik Hallnor 4110553Salexandru.dutu@amd.com * Nikos Nikoleris 4210553Salexandru.dutu@amd.com */ 4310553Salexandru.dutu@amd.com 4410553Salexandru.dutu@amd.com/** 4510553Salexandru.dutu@amd.com * @file 46 * Definition of BaseCache functions. 47 */ 48 49#include "mem/cache/base.hh" 50 51#include "base/compiler.hh" 52#include "base/logging.hh" 53#include "debug/Cache.hh" 54#include "debug/CachePort.hh" 55#include "debug/CacheRepl.hh" 56#include "debug/CacheVerbose.hh" 57#include "mem/cache/mshr.hh" 58#include "mem/cache/prefetch/base.hh" 59#include "mem/cache/queue_entry.hh" 60#include "params/BaseCache.hh" 61#include "params/WriteAllocator.hh" 62#include "sim/core.hh" 63 64class BaseMasterPort; 65class BaseSlavePort; 66 67using namespace std; 68 69BaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name, 70 BaseCache *_cache, 71 const std::string &_label) 72 : QueuedSlavePort(_name, _cache, queue), 73 queue(*_cache, *this, true, _label), 74 blocked(false), mustSendRetry(false), 75 sendRetryEvent([this]{ processSendRetry(); }, _name) 76{ 77} 78 79BaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size) 80 : MemObject(p), 81 cpuSidePort (p->name + ".cpu_side", this, "CpuSidePort"), 82 memSidePort(p->name + ".mem_side", this, "MemSidePort"), 83 mshrQueue("MSHRs", p->mshrs, 0, p->demand_mshr_reserve), // see below 84 writeBuffer("write buffer", p->write_buffers, p->mshrs), // see below 85 tags(p->tags), 86 prefetcher(p->prefetcher), 87 writeAllocator(p->write_allocator), 88 writebackClean(p->writeback_clean), 89 tempBlockWriteback(nullptr), 90 writebackTempBlockAtomicEvent([this]{ writebackTempBlockAtomic(); }, 91 name(), false, 92 EventBase::Delayed_Writeback_Pri), 93 blkSize(blk_size), 94 lookupLatency(p->tag_latency), 95 dataLatency(p->data_latency), 96 forwardLatency(p->tag_latency), 97 fillLatency(p->data_latency), 98 responseLatency(p->response_latency), 99 sequentialAccess(p->sequential_access), 100 numTarget(p->tgts_per_mshr), 101 forwardSnoops(true), 102 clusivity(p->clusivity), 103 isReadOnly(p->is_read_only), 104 blocked(0), 105 order(0), 106 noTargetMSHR(nullptr), 107 missCount(p->max_miss_count), 108 addrRanges(p->addr_ranges.begin(), p->addr_ranges.end()), 109 system(p->system) 110{ 111 // the MSHR queue has no reserve entries as we check the MSHR 112 // queue on every single allocation, whereas the write queue has 113 // as many reserve entries as we have MSHRs, since every MSHR may 114 // eventually require a writeback, and we do not check the write 115 // buffer before committing to an MSHR 116 117 // forward snoops is overridden in init() once we can query 118 // whether the connected master is actually snooping or not 119 120 tempBlock = new TempCacheBlk(blkSize); 121 122 tags->tagsInit(); 123 if (prefetcher) 124 prefetcher->setCache(this); 125} 126 127BaseCache::~BaseCache() 128{ 129 delete tempBlock; 130} 131 132void 133BaseCache::CacheSlavePort::setBlocked() 134{ 135 assert(!blocked); 136 DPRINTF(CachePort, "Port is blocking new requests\n"); 137 blocked = true; 138 // if we already scheduled a retry in this cycle, but it has not yet 139 // happened, cancel it 140 if (sendRetryEvent.scheduled()) { 141 owner.deschedule(sendRetryEvent); 142 DPRINTF(CachePort, "Port descheduled retry\n"); 143 mustSendRetry = true; 144 } 145} 146 147void 148BaseCache::CacheSlavePort::clearBlocked() 149{ 150 assert(blocked); 151 DPRINTF(CachePort, "Port is accepting new requests\n"); 152 blocked = false; 153 if (mustSendRetry) { 154 // @TODO: need to find a better time (next cycle?) 155 owner.schedule(sendRetryEvent, curTick() + 1); 156 } 157} 158 159void 160BaseCache::CacheSlavePort::processSendRetry() 161{ 162 DPRINTF(CachePort, "Port is sending retry\n"); 163 164 // reset the flag and call retry 165 mustSendRetry = false; 166 sendRetryReq(); 167} 168 169Addr 170BaseCache::regenerateBlkAddr(CacheBlk* blk) 171{ 172 if (blk != tempBlock) { 173 return tags->regenerateBlkAddr(blk); 174 } else { 175 return tempBlock->getAddr(); 176 } 177} 178 179void 180BaseCache::init() 181{ 182 if (!cpuSidePort.isConnected() || !memSidePort.isConnected()) 183 fatal("Cache ports on %s are not connected\n", name()); 184 cpuSidePort.sendRangeChange(); 185 forwardSnoops = cpuSidePort.isSnooping(); 186} 187 188Port & 189BaseCache::getPort(const std::string &if_name, PortID idx) 190{ 191 if (if_name == "mem_side") { 192 return memSidePort; 193 } else if (if_name == "cpu_side") { 194 return cpuSidePort; 195 } else { 196 return MemObject::getPort(if_name, idx); 197 } 198} 199 200bool 201BaseCache::inRange(Addr addr) const 202{ 203 for (const auto& r : addrRanges) { 204 if (r.contains(addr)) { 205 return true; 206 } 207 } 208 return false; 209} 210 211void 212BaseCache::handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time) 213{ 214 if (pkt->needsResponse()) { 215 // These delays should have been consumed by now 216 assert(pkt->headerDelay == 0); 217 assert(pkt->payloadDelay == 0); 218 219 pkt->makeTimingResponse(); 220 221 // In this case we are considering request_time that takes 222 // into account the delay of the xbar, if any, and just 223 // lat, neglecting responseLatency, modelling hit latency 224 // just as the value of lat overriden by access(), which calls 225 // the calculateAccessLatency() function. 226 cpuSidePort.schedTimingResp(pkt, request_time); 227 } else { 228 DPRINTF(Cache, "%s satisfied %s, no response needed\n", __func__, 229 pkt->print()); 230 231 // queue the packet for deletion, as the sending cache is 232 // still relying on it; if the block is found in access(), 233 // CleanEvict and Writeback messages will be deleted 234 // here as well 235 pendingDelete.reset(pkt); 236 } 237} 238 239void 240BaseCache::handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk, 241 Tick forward_time, Tick request_time) 242{ 243 if (writeAllocator && 244 pkt && pkt->isWrite() && !pkt->req->isUncacheable()) { 245 writeAllocator->updateMode(pkt->getAddr(), pkt->getSize(), 246 pkt->getBlockAddr(blkSize)); 247 } 248 249 if (mshr) { 250 /// MSHR hit 251 /// @note writebacks will be checked in getNextMSHR() 252 /// for any conflicting requests to the same block 253 254 //@todo remove hw_pf here 255 256 // Coalesce unless it was a software prefetch (see above). 257 if (pkt) { 258 assert(!pkt->isWriteback()); 259 // CleanEvicts corresponding to blocks which have 260 // outstanding requests in MSHRs are simply sunk here 261 if (pkt->cmd == MemCmd::CleanEvict) { 262 pendingDelete.reset(pkt); 263 } else if (pkt->cmd == MemCmd::WriteClean) { 264 // A WriteClean should never coalesce with any 265 // outstanding cache maintenance requests. 266 267 // We use forward_time here because there is an 268 // uncached memory write, forwarded to WriteBuffer. 269 allocateWriteBuffer(pkt, forward_time); 270 } else { 271 DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__, 272 pkt->print()); 273 274 assert(pkt->req->masterId() < system->maxMasters()); 275 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++; 276 277 // We use forward_time here because it is the same 278 // considering new targets. We have multiple 279 // requests for the same address here. It 280 // specifies the latency to allocate an internal 281 // buffer and to schedule an event to the queued 282 // port and also takes into account the additional 283 // delay of the xbar. 284 mshr->allocateTarget(pkt, forward_time, order++, 285 allocOnFill(pkt->cmd)); 286 if (mshr->getNumTargets() == numTarget) { 287 noTargetMSHR = mshr; 288 setBlocked(Blocked_NoTargets); 289 // need to be careful with this... if this mshr isn't 290 // ready yet (i.e. time > curTick()), we don't want to 291 // move it ahead of mshrs that are ready 292 // mshrQueue.moveToFront(mshr); 293 } 294 } 295 } 296 } else { 297 // no MSHR 298 assert(pkt->req->masterId() < system->maxMasters()); 299 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 300 301 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean) { 302 // We use forward_time here because there is an 303 // writeback or writeclean, forwarded to WriteBuffer. 304 allocateWriteBuffer(pkt, forward_time); 305 } else { 306 if (blk && blk->isValid()) { 307 // If we have a write miss to a valid block, we 308 // need to mark the block non-readable. Otherwise 309 // if we allow reads while there's an outstanding 310 // write miss, the read could return stale data 311 // out of the cache block... a more aggressive 312 // system could detect the overlap (if any) and 313 // forward data out of the MSHRs, but we don't do 314 // that yet. Note that we do need to leave the 315 // block valid so that it stays in the cache, in 316 // case we get an upgrade response (and hence no 317 // new data) when the write miss completes. 318 // As long as CPUs do proper store/load forwarding 319 // internally, and have a sufficiently weak memory 320 // model, this is probably unnecessary, but at some 321 // point it must have seemed like we needed it... 322 assert((pkt->needsWritable() && !blk->isWritable()) || 323 pkt->req->isCacheMaintenance()); 324 blk->status &= ~BlkReadable; 325 } 326 // Here we are using forward_time, modelling the latency of 327 // a miss (outbound) just as forwardLatency, neglecting the 328 // lookupLatency component. 329 allocateMissBuffer(pkt, forward_time); 330 } 331 } 332} 333 334void 335BaseCache::recvTimingReq(PacketPtr pkt) 336{ 337 // anything that is merely forwarded pays for the forward latency and 338 // the delay provided by the crossbar 339 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 340 341 Cycles lat; 342 CacheBlk *blk = nullptr; 343 bool satisfied = false; 344 { 345 PacketList writebacks; 346 // Note that lat is passed by reference here. The function 347 // access() will set the lat value. 348 satisfied = access(pkt, blk, lat, writebacks); 349 350 // After the evicted blocks are selected, they must be forwarded 351 // to the write buffer to ensure they logically precede anything 352 // happening below 353 doWritebacks(writebacks, clockEdge(lat + forwardLatency)); 354 } 355 356 // Here we charge the headerDelay that takes into account the latencies 357 // of the bus, if the packet comes from it. 358 // The latency charged is just the value set by the access() function. 359 // In case of a hit we are neglecting response latency. 360 // In case of a miss we are neglecting forward latency. 361 Tick request_time = clockEdge(lat); 362 // Here we reset the timing of the packet. 363 pkt->headerDelay = pkt->payloadDelay = 0; 364 365 if (satisfied) { 366 // notify before anything else as later handleTimingReqHit might turn 367 // the packet in a response 368 ppHit->notify(pkt); 369 370 if (prefetcher && blk && blk->wasPrefetched()) { 371 blk->status &= ~BlkHWPrefetched; 372 } 373 374 handleTimingReqHit(pkt, blk, request_time); 375 } else { 376 handleTimingReqMiss(pkt, blk, forward_time, request_time); 377 378 ppMiss->notify(pkt); 379 } 380 381 if (prefetcher) { 382 // track time of availability of next prefetch, if any 383 Tick next_pf_time = prefetcher->nextPrefetchReadyTime(); 384 if (next_pf_time != MaxTick) { 385 schedMemSideSendEvent(next_pf_time); 386 } 387 } 388} 389 390void 391BaseCache::handleUncacheableWriteResp(PacketPtr pkt) 392{ 393 Tick completion_time = clockEdge(responseLatency) + 394 pkt->headerDelay + pkt->payloadDelay; 395 396 // Reset the bus additional time as it is now accounted for 397 pkt->headerDelay = pkt->payloadDelay = 0; 398 399 cpuSidePort.schedTimingResp(pkt, completion_time); 400} 401 402void 403BaseCache::recvTimingResp(PacketPtr pkt) 404{ 405 assert(pkt->isResponse()); 406 407 // all header delay should be paid for by the crossbar, unless 408 // this is a prefetch response from above 409 panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp, 410 "%s saw a non-zero packet delay\n", name()); 411 412 const bool is_error = pkt->isError(); 413 414 if (is_error) { 415 DPRINTF(Cache, "%s: Cache received %s with error\n", __func__, 416 pkt->print()); 417 } 418 419 DPRINTF(Cache, "%s: Handling response %s\n", __func__, 420 pkt->print()); 421 422 // if this is a write, we should be looking at an uncacheable 423 // write 424 if (pkt->isWrite()) { 425 assert(pkt->req->isUncacheable()); 426 handleUncacheableWriteResp(pkt); 427 return; 428 } 429 430 // we have dealt with any (uncacheable) writes above, from here on 431 // we know we are dealing with an MSHR due to a miss or a prefetch 432 MSHR *mshr = dynamic_cast<MSHR*>(pkt->popSenderState()); 433 assert(mshr); 434 435 if (mshr == noTargetMSHR) { 436 // we always clear at least one target 437 clearBlocked(Blocked_NoTargets); 438 noTargetMSHR = nullptr; 439 } 440 441 // Initial target is used just for stats 442 QueueEntry::Target *initial_tgt = mshr->getTarget(); 443 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex(); 444 Tick miss_latency = curTick() - initial_tgt->recvTime; 445 446 if (pkt->req->isUncacheable()) { 447 assert(pkt->req->masterId() < system->maxMasters()); 448 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] += 449 miss_latency; 450 } else { 451 assert(pkt->req->masterId() < system->maxMasters()); 452 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] += 453 miss_latency; 454 } 455 456 PacketList writebacks; 457 458 bool is_fill = !mshr->isForward && 459 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp || 460 mshr->wasWholeLineWrite); 461 462 // make sure that if the mshr was due to a whole line write then 463 // the response is an invalidation 464 assert(!mshr->wasWholeLineWrite || pkt->isInvalidate()); 465 466 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 467 468 if (is_fill && !is_error) { 469 DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n", 470 pkt->getAddr()); 471 472 const bool allocate = (writeAllocator && mshr->wasWholeLineWrite) ? 473 writeAllocator->allocate() : mshr->allocOnFill(); 474 blk = handleFill(pkt, blk, writebacks, allocate); 475 assert(blk != nullptr); 476 ppFill->notify(pkt); 477 } 478 479 if (blk && blk->isValid() && pkt->isClean() && !pkt->isInvalidate()) { 480 // The block was marked not readable while there was a pending 481 // cache maintenance operation, restore its flag. 482 blk->status |= BlkReadable; 483 484 // This was a cache clean operation (without invalidate) 485 // and we have a copy of the block already. Since there 486 // is no invalidation, we can promote targets that don't 487 // require a writable copy 488 mshr->promoteReadable(); 489 } 490 491 if (blk && blk->isWritable() && !pkt->req->isCacheInvalidate()) { 492 // If at this point the referenced block is writable and the 493 // response is not a cache invalidate, we promote targets that 494 // were deferred as we couldn't guarrantee a writable copy 495 mshr->promoteWritable(); 496 } 497 498 serviceMSHRTargets(mshr, pkt, blk); 499 500 if (mshr->promoteDeferredTargets()) { 501 // avoid later read getting stale data while write miss is 502 // outstanding.. see comment in timingAccess() 503 if (blk) { 504 blk->status &= ~BlkReadable; 505 } 506 mshrQueue.markPending(mshr); 507 schedMemSideSendEvent(clockEdge() + pkt->payloadDelay); 508 } else { 509 // while we deallocate an mshr from the queue we still have to 510 // check the isFull condition before and after as we might 511 // have been using the reserved entries already 512 const bool was_full = mshrQueue.isFull(); 513 mshrQueue.deallocate(mshr); 514 if (was_full && !mshrQueue.isFull()) { 515 clearBlocked(Blocked_NoMSHRs); 516 } 517 518 // Request the bus for a prefetch if this deallocation freed enough 519 // MSHRs for a prefetch to take place 520 if (prefetcher && mshrQueue.canPrefetch()) { 521 Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(), 522 clockEdge()); 523 if (next_pf_time != MaxTick) 524 schedMemSideSendEvent(next_pf_time); 525 } 526 } 527 528 // if we used temp block, check to see if its valid and then clear it out 529 if (blk == tempBlock && tempBlock->isValid()) { 530 evictBlock(blk, writebacks); 531 } 532 533 const Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 534 // copy writebacks to write buffer 535 doWritebacks(writebacks, forward_time); 536 537 DPRINTF(CacheVerbose, "%s: Leaving with %s\n", __func__, pkt->print()); 538 delete pkt; 539} 540 541 542Tick 543BaseCache::recvAtomic(PacketPtr pkt) 544{ 545 // should assert here that there are no outstanding MSHRs or 546 // writebacks... that would mean that someone used an atomic 547 // access in timing mode 548 549 // We use lookupLatency here because it is used to specify the latency 550 // to access. 551 Cycles lat = lookupLatency; 552 553 CacheBlk *blk = nullptr; 554 PacketList writebacks; 555 bool satisfied = access(pkt, blk, lat, writebacks); 556 557 if (pkt->isClean() && blk && blk->isDirty()) { 558 // A cache clean opearation is looking for a dirty 559 // block. If a dirty block is encountered a WriteClean 560 // will update any copies to the path to the memory 561 // until the point of reference. 562 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n", 563 __func__, pkt->print(), blk->print()); 564 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id); 565 writebacks.push_back(wb_pkt); 566 pkt->setSatisfied(); 567 } 568 569 // handle writebacks resulting from the access here to ensure they 570 // logically precede anything happening below 571 doWritebacksAtomic(writebacks); 572 assert(writebacks.empty()); 573 574 if (!satisfied) { 575 lat += handleAtomicReqMiss(pkt, blk, writebacks); 576 } 577 578 // Note that we don't invoke the prefetcher at all in atomic mode. 579 // It's not clear how to do it properly, particularly for 580 // prefetchers that aggressively generate prefetch candidates and 581 // rely on bandwidth contention to throttle them; these will tend 582 // to pollute the cache in atomic mode since there is no bandwidth 583 // contention. If we ever do want to enable prefetching in atomic 584 // mode, though, this is the place to do it... see timingAccess() 585 // for an example (though we'd want to issue the prefetch(es) 586 // immediately rather than calling requestMemSideBus() as we do 587 // there). 588 589 // do any writebacks resulting from the response handling 590 doWritebacksAtomic(writebacks); 591 592 // if we used temp block, check to see if its valid and if so 593 // clear it out, but only do so after the call to recvAtomic is 594 // finished so that any downstream observers (such as a snoop 595 // filter), first see the fill, and only then see the eviction 596 if (blk == tempBlock && tempBlock->isValid()) { 597 // the atomic CPU calls recvAtomic for fetch and load/store 598 // sequentuially, and we may already have a tempBlock 599 // writeback from the fetch that we have not yet sent 600 if (tempBlockWriteback) { 601 // if that is the case, write the prevoius one back, and 602 // do not schedule any new event 603 writebackTempBlockAtomic(); 604 } else { 605 // the writeback/clean eviction happens after the call to 606 // recvAtomic has finished (but before any successive 607 // calls), so that the response handling from the fill is 608 // allowed to happen first 609 schedule(writebackTempBlockAtomicEvent, curTick()); 610 } 611 612 tempBlockWriteback = evictBlock(blk); 613 } 614 615 if (pkt->needsResponse()) { 616 pkt->makeAtomicResponse(); 617 } 618 619 return lat * clockPeriod(); 620} 621 622void 623BaseCache::functionalAccess(PacketPtr pkt, bool from_cpu_side) 624{ 625 Addr blk_addr = pkt->getBlockAddr(blkSize); 626 bool is_secure = pkt->isSecure(); 627 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 628 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 629 630 pkt->pushLabel(name()); 631 632 CacheBlkPrintWrapper cbpw(blk); 633 634 // Note that just because an L2/L3 has valid data doesn't mean an 635 // L1 doesn't have a more up-to-date modified copy that still 636 // needs to be found. As a result we always update the request if 637 // we have it, but only declare it satisfied if we are the owner. 638 639 // see if we have data at all (owned or otherwise) 640 bool have_data = blk && blk->isValid() 641 && pkt->trySatisfyFunctional(&cbpw, blk_addr, is_secure, blkSize, 642 blk->data); 643 644 // data we have is dirty if marked as such or if we have an 645 // in-service MSHR that is pending a modified line 646 bool have_dirty = 647 have_data && (blk->isDirty() || 648 (mshr && mshr->inService && mshr->isPendingModified())); 649 650 bool done = have_dirty || 651 cpuSidePort.trySatisfyFunctional(pkt) || 652 mshrQueue.trySatisfyFunctional(pkt) || 653 writeBuffer.trySatisfyFunctional(pkt) || 654 memSidePort.trySatisfyFunctional(pkt); 655 656 DPRINTF(CacheVerbose, "%s: %s %s%s%s\n", __func__, pkt->print(), 657 (blk && blk->isValid()) ? "valid " : "", 658 have_data ? "data " : "", done ? "done " : ""); 659 660 // We're leaving the cache, so pop cache->name() label 661 pkt->popLabel(); 662 663 if (done) { 664 pkt->makeResponse(); 665 } else { 666 // if it came as a request from the CPU side then make sure it 667 // continues towards the memory side 668 if (from_cpu_side) { 669 memSidePort.sendFunctional(pkt); 670 } else if (cpuSidePort.isSnooping()) { 671 // if it came from the memory side, it must be a snoop request 672 // and we should only forward it if we are forwarding snoops 673 cpuSidePort.sendFunctionalSnoop(pkt); 674 } 675 } 676} 677 678 679void 680BaseCache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt) 681{ 682 assert(pkt->isRequest()); 683 684 uint64_t overwrite_val; 685 bool overwrite_mem; 686 uint64_t condition_val64; 687 uint32_t condition_val32; 688 689 int offset = pkt->getOffset(blkSize); 690 uint8_t *blk_data = blk->data + offset; 691 692 assert(sizeof(uint64_t) >= pkt->getSize()); 693 694 overwrite_mem = true; 695 // keep a copy of our possible write value, and copy what is at the 696 // memory address into the packet 697 pkt->writeData((uint8_t *)&overwrite_val); 698 pkt->setData(blk_data); 699 700 if (pkt->req->isCondSwap()) { 701 if (pkt->getSize() == sizeof(uint64_t)) { 702 condition_val64 = pkt->req->getExtraData(); 703 overwrite_mem = !std::memcmp(&condition_val64, blk_data, 704 sizeof(uint64_t)); 705 } else if (pkt->getSize() == sizeof(uint32_t)) { 706 condition_val32 = (uint32_t)pkt->req->getExtraData(); 707 overwrite_mem = !std::memcmp(&condition_val32, blk_data, 708 sizeof(uint32_t)); 709 } else 710 panic("Invalid size for conditional read/write\n"); 711 } 712 713 if (overwrite_mem) { 714 std::memcpy(blk_data, &overwrite_val, pkt->getSize()); 715 blk->status |= BlkDirty; 716 } 717} 718 719QueueEntry* 720BaseCache::getNextQueueEntry() 721{ 722 // Check both MSHR queue and write buffer for potential requests, 723 // note that null does not mean there is no request, it could 724 // simply be that it is not ready 725 MSHR *miss_mshr = mshrQueue.getNext(); 726 WriteQueueEntry *wq_entry = writeBuffer.getNext(); 727 728 // If we got a write buffer request ready, first priority is a 729 // full write buffer, otherwise we favour the miss requests 730 if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) { 731 // need to search MSHR queue for conflicting earlier miss. 732 MSHR *conflict_mshr = mshrQueue.findPending(wq_entry); 733 734 if (conflict_mshr && conflict_mshr->order < wq_entry->order) { 735 // Service misses in order until conflict is cleared. 736 return conflict_mshr; 737 738 // @todo Note that we ignore the ready time of the conflict here 739 } 740 741 // No conflicts; issue write 742 return wq_entry; 743 } else if (miss_mshr) { 744 // need to check for conflicting earlier writeback 745 WriteQueueEntry *conflict_mshr = writeBuffer.findPending(miss_mshr); 746 if (conflict_mshr) { 747 // not sure why we don't check order here... it was in the 748 // original code but commented out. 749 750 // The only way this happens is if we are 751 // doing a write and we didn't have permissions 752 // then subsequently saw a writeback (owned got evicted) 753 // We need to make sure to perform the writeback first 754 // To preserve the dirty data, then we can issue the write 755 756 // should we return wq_entry here instead? I.e. do we 757 // have to flush writes in order? I don't think so... not 758 // for Alpha anyway. Maybe for x86? 759 return conflict_mshr; 760 761 // @todo Note that we ignore the ready time of the conflict here 762 } 763 764 // No conflicts; issue read 765 return miss_mshr; 766 } 767 768 // fall through... no pending requests. Try a prefetch. 769 assert(!miss_mshr && !wq_entry); 770 if (prefetcher && mshrQueue.canPrefetch()) { 771 // If we have a miss queue slot, we can try a prefetch 772 PacketPtr pkt = prefetcher->getPacket(); 773 if (pkt) { 774 Addr pf_addr = pkt->getBlockAddr(blkSize); 775 if (!tags->findBlock(pf_addr, pkt->isSecure()) && 776 !mshrQueue.findMatch(pf_addr, pkt->isSecure()) && 777 !writeBuffer.findMatch(pf_addr, pkt->isSecure())) { 778 // Update statistic on number of prefetches issued 779 // (hwpf_mshr_misses) 780 assert(pkt->req->masterId() < system->maxMasters()); 781 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 782 783 // allocate an MSHR and return it, note 784 // that we send the packet straight away, so do not 785 // schedule the send 786 return allocateMissBuffer(pkt, curTick(), false); 787 } else { 788 // free the request and packet 789 delete pkt; 790 } 791 } 792 } 793 794 return nullptr; 795} 796 797void 798BaseCache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool, bool) 799{ 800 assert(pkt->isRequest()); 801 802 assert(blk && blk->isValid()); 803 // Occasionally this is not true... if we are a lower-level cache 804 // satisfying a string of Read and ReadEx requests from 805 // upper-level caches, a Read will mark the block as shared but we 806 // can satisfy a following ReadEx anyway since we can rely on the 807 // Read requester(s) to have buffered the ReadEx snoop and to 808 // invalidate their blocks after receiving them. 809 // assert(!pkt->needsWritable() || blk->isWritable()); 810 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize); 811 812 // Check RMW operations first since both isRead() and 813 // isWrite() will be true for them 814 if (pkt->cmd == MemCmd::SwapReq) { 815 if (pkt->isAtomicOp()) { 816 // extract data from cache and save it into the data field in 817 // the packet as a return value from this atomic op 818 int offset = tags->extractBlkOffset(pkt->getAddr()); 819 uint8_t *blk_data = blk->data + offset; 820 pkt->setData(blk_data); 821 822 // execute AMO operation 823 (*(pkt->getAtomicOp()))(blk_data); 824 825 // set block status to dirty 826 blk->status |= BlkDirty; 827 } else { 828 cmpAndSwap(blk, pkt); 829 } 830 } else if (pkt->isWrite()) { 831 // we have the block in a writable state and can go ahead, 832 // note that the line may be also be considered writable in 833 // downstream caches along the path to memory, but always 834 // Exclusive, and never Modified 835 assert(blk->isWritable()); 836 // Write or WriteLine at the first cache with block in writable state 837 if (blk->checkWrite(pkt)) { 838 pkt->writeDataToBlock(blk->data, blkSize); 839 } 840 // Always mark the line as dirty (and thus transition to the 841 // Modified state) even if we are a failed StoreCond so we 842 // supply data to any snoops that have appended themselves to 843 // this cache before knowing the store will fail. 844 blk->status |= BlkDirty; 845 DPRINTF(CacheVerbose, "%s for %s (write)\n", __func__, pkt->print()); 846 } else if (pkt->isRead()) { 847 if (pkt->isLLSC()) { 848 blk->trackLoadLocked(pkt); 849 } 850 851 // all read responses have a data payload 852 assert(pkt->hasRespData()); 853 pkt->setDataFromBlock(blk->data, blkSize); 854 } else if (pkt->isUpgrade()) { 855 // sanity check 856 assert(!pkt->hasSharers()); 857 858 if (blk->isDirty()) { 859 // we were in the Owned state, and a cache above us that 860 // has the line in Shared state needs to be made aware 861 // that the data it already has is in fact dirty 862 pkt->setCacheResponding(); 863 blk->status &= ~BlkDirty; 864 } 865 } else if (pkt->isClean()) { 866 blk->status &= ~BlkDirty; 867 } else { 868 assert(pkt->isInvalidate()); 869 invalidateBlock(blk); 870 DPRINTF(CacheVerbose, "%s for %s (invalidation)\n", __func__, 871 pkt->print()); 872 } 873} 874 875///////////////////////////////////////////////////// 876// 877// Access path: requests coming in from the CPU side 878// 879///////////////////////////////////////////////////// 880Cycles 881BaseCache::calculateTagOnlyLatency(const uint32_t delay, 882 const Cycles lookup_lat) const 883{ 884 // A tag-only access has to wait for the packet to arrive in order to 885 // perform the tag lookup. 886 return ticksToCycles(delay) + lookup_lat; 887} 888 889Cycles 890BaseCache::calculateAccessLatency(const CacheBlk* blk, const uint32_t delay, 891 const Cycles lookup_lat) const 892{ 893 Cycles lat(0); 894 895 if (blk != nullptr) { 896 // As soon as the access arrives, for sequential accesses first access 897 // tags, then the data entry. In the case of parallel accesses the 898 // latency is dictated by the slowest of tag and data latencies. 899 if (sequentialAccess) { 900 lat = ticksToCycles(delay) + lookup_lat + dataLatency; 901 } else { 902 lat = ticksToCycles(delay) + std::max(lookup_lat, dataLatency); 903 } 904 905 // Check if the block to be accessed is available. If not, apply the 906 // access latency on top of when the block is ready to be accessed. 907 const Tick tick = curTick() + delay; 908 const Tick when_ready = blk->getWhenReady(); 909 if (when_ready > tick && 910 ticksToCycles(when_ready - tick) > lat) { 911 lat += ticksToCycles(when_ready - tick); 912 } 913 } else { 914 // In case of a miss, we neglect the data access in a parallel 915 // configuration (i.e., the data access will be stopped as soon as 916 // we find out it is a miss), and use the tag-only latency. 917 lat = calculateTagOnlyLatency(delay, lookup_lat); 918 } 919 920 return lat; 921} 922 923bool 924BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, 925 PacketList &writebacks) 926{ 927 // sanity check 928 assert(pkt->isRequest()); 929 930 chatty_assert(!(isReadOnly && pkt->isWrite()), 931 "Should never see a write in a read-only cache %s\n", 932 name()); 933 934 // Access block in the tags 935 Cycles tag_latency(0); 936 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), tag_latency); 937 938 DPRINTF(Cache, "%s for %s %s\n", __func__, pkt->print(), 939 blk ? "hit " + blk->print() : "miss"); 940 941 if (pkt->req->isCacheMaintenance()) { 942 // A cache maintenance operation is always forwarded to the 943 // memory below even if the block is found in dirty state. 944 945 // We defer any changes to the state of the block until we 946 // create and mark as in service the mshr for the downstream 947 // packet. 948 949 // Calculate access latency on top of when the packet arrives. This 950 // takes into account the bus delay. 951 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 952 953 return false; 954 } 955 956 if (pkt->isEviction()) { 957 // We check for presence of block in above caches before issuing 958 // Writeback or CleanEvict to write buffer. Therefore the only 959 // possible cases can be of a CleanEvict packet coming from above 960 // encountering a Writeback generated in this cache peer cache and 961 // waiting in the write buffer. Cases of upper level peer caches 962 // generating CleanEvict and Writeback or simply CleanEvict and 963 // CleanEvict almost simultaneously will be caught by snoops sent out 964 // by crossbar. 965 WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(), 966 pkt->isSecure()); 967 if (wb_entry) { 968 assert(wb_entry->getNumTargets() == 1); 969 PacketPtr wbPkt = wb_entry->getTarget()->pkt; 970 assert(wbPkt->isWriteback()); 971 972 if (pkt->isCleanEviction()) { 973 // The CleanEvict and WritebackClean snoops into other 974 // peer caches of the same level while traversing the 975 // crossbar. If a copy of the block is found, the 976 // packet is deleted in the crossbar. Hence, none of 977 // the other upper level caches connected to this 978 // cache have the block, so we can clear the 979 // BLOCK_CACHED flag in the Writeback if set and 980 // discard the CleanEvict by returning true. 981 wbPkt->clearBlockCached(); 982 983 // A clean evict does not need to access the data array 984 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 985 986 return true; 987 } else { 988 assert(pkt->cmd == MemCmd::WritebackDirty); 989 // Dirty writeback from above trumps our clean 990 // writeback... discard here 991 // Note: markInService will remove entry from writeback buffer. 992 markInService(wb_entry); 993 delete wbPkt; 994 } 995 } 996 } 997 998 // Writeback handling is special case. We can write the block into 999 // the cache without having a writeable copy (or any copy at all). 1000 if (pkt->isWriteback()) { 1001 assert(blkSize == pkt->getSize()); 1002 1003 // we could get a clean writeback while we are having 1004 // outstanding accesses to a block, do the simple thing for 1005 // now and drop the clean writeback so that we do not upset 1006 // any ordering/decisions about ownership already taken 1007 if (pkt->cmd == MemCmd::WritebackClean && 1008 mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) { 1009 DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, " 1010 "dropping\n", pkt->getAddr()); 1011 1012 // A writeback searches for the block, then writes the data. 1013 // As the writeback is being dropped, the data is not touched, 1014 // and we just had to wait for the time to find a match in the 1015 // MSHR. As of now assume a mshr queue search takes as long as 1016 // a tag lookup for simplicity. 1017 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 1018 1019 return true; 1020 } 1021 1022 if (!blk) { 1023 // need to do a replacement 1024 blk = allocateBlock(pkt, writebacks); 1025 if (!blk) { 1026 // no replaceable block available: give up, fwd to next level. 1027 incMissCount(pkt); 1028 1029 // A writeback searches for the block, then writes the data. 1030 // As the block could not be found, it was a tag-only access. 1031 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 1032 1033 return false; 1034 } 1035 1036 blk->status |= BlkReadable; 1037 } 1038 // only mark the block dirty if we got a writeback command, 1039 // and leave it as is for a clean writeback 1040 if (pkt->cmd == MemCmd::WritebackDirty) { 1041 // TODO: the coherent cache can assert(!blk->isDirty()); 1042 blk->status |= BlkDirty; 1043 } 1044 // if the packet does not have sharers, it is passing 1045 // writable, and we got the writeback in Modified or Exclusive 1046 // state, if not we are in the Owned or Shared state 1047 if (!pkt->hasSharers()) { 1048 blk->status |= BlkWritable; 1049 } 1050 // nothing else to do; writeback doesn't expect response 1051 assert(!pkt->needsResponse()); 1052 pkt->writeDataToBlock(blk->data, blkSize); 1053 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 1054 incHitCount(pkt); 1055 1056 // A writeback searches for the block, then writes the data 1057 lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency); 1058 1059 // When the packet metadata arrives, the tag lookup will be done while 1060 // the payload is arriving. Then the block will be ready to access as 1061 // soon as the fill is done 1062 blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay + 1063 std::max(cyclesToTicks(tag_latency), (uint64_t)pkt->payloadDelay)); 1064 1065 return true; 1066 } else if (pkt->cmd == MemCmd::CleanEvict) { 1067 // A CleanEvict does not need to access the data array 1068 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 1069 1070 if (blk) { 1071 // Found the block in the tags, need to stop CleanEvict from 1072 // propagating further down the hierarchy. Returning true will 1073 // treat the CleanEvict like a satisfied write request and delete 1074 // it. 1075 return true; 1076 } 1077 // We didn't find the block here, propagate the CleanEvict further 1078 // down the memory hierarchy. Returning false will treat the CleanEvict 1079 // like a Writeback which could not find a replaceable block so has to 1080 // go to next level. 1081 return false; 1082 } else if (pkt->cmd == MemCmd::WriteClean) { 1083 // WriteClean handling is a special case. We can allocate a 1084 // block directly if it doesn't exist and we can update the 1085 // block immediately. The WriteClean transfers the ownership 1086 // of the block as well. 1087 assert(blkSize == pkt->getSize()); 1088 1089 if (!blk) { 1090 if (pkt->writeThrough()) { 1091 // A writeback searches for the block, then writes the data. 1092 // As the block could not be found, it was a tag-only access. 1093 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 1094 1095 // if this is a write through packet, we don't try to 1096 // allocate if the block is not present 1097 return false; 1098 } else { 1099 // a writeback that misses needs to allocate a new block 1100 blk = allocateBlock(pkt, writebacks); 1101 if (!blk) { 1102 // no replaceable block available: give up, fwd to 1103 // next level. 1104 incMissCount(pkt); 1105 1106 // A writeback searches for the block, then writes the 1107 // data. As the block could not be found, it was a tag-only 1108 // access. 1109 lat = calculateTagOnlyLatency(pkt->headerDelay, 1110 tag_latency); 1111 1112 return false; 1113 } 1114 1115 blk->status |= BlkReadable; 1116 } 1117 } 1118 1119 // at this point either this is a writeback or a write-through 1120 // write clean operation and the block is already in this 1121 // cache, we need to update the data and the block flags 1122 assert(blk); 1123 // TODO: the coherent cache can assert(!blk->isDirty()); 1124 if (!pkt->writeThrough()) { 1125 blk->status |= BlkDirty; 1126 } 1127 // nothing else to do; writeback doesn't expect response 1128 assert(!pkt->needsResponse()); 1129 pkt->writeDataToBlock(blk->data, blkSize); 1130 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 1131 1132 incHitCount(pkt); 1133 1134 // A writeback searches for the block, then writes the data 1135 lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency); 1136 1137 // When the packet metadata arrives, the tag lookup will be done while 1138 // the payload is arriving. Then the block will be ready to access as 1139 // soon as the fill is done 1140 blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay + 1141 std::max(cyclesToTicks(tag_latency), (uint64_t)pkt->payloadDelay)); 1142 1143 // if this a write-through packet it will be sent to cache 1144 // below 1145 return !pkt->writeThrough(); 1146 } else if (blk && (pkt->needsWritable() ? blk->isWritable() : 1147 blk->isReadable())) { 1148 // OK to satisfy access 1149 incHitCount(pkt); 1150 1151 // Calculate access latency based on the need to access the data array 1152 if (pkt->isRead() || pkt->isWrite()) { 1153 lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency); 1154 } else { 1155 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency); 1156 } 1157 1158 satisfyRequest(pkt, blk); 1159 maintainClusivity(pkt->fromCache(), blk); 1160 1161 return true; 1162 } 1163 1164 // Can't satisfy access normally... either no block (blk == nullptr) 1165 // or have block but need writable 1166 1167 incMissCount(pkt); 1168 1169 lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency); 1170 1171 if (!blk && pkt->isLLSC() && pkt->isWrite()) { 1172 // complete miss on store conditional... just give up now 1173 pkt->req->setExtraData(0); 1174 return true; 1175 } 1176 1177 return false; 1178} 1179 1180void 1181BaseCache::maintainClusivity(bool from_cache, CacheBlk *blk) 1182{ 1183 if (from_cache && blk && blk->isValid() && !blk->isDirty() && 1184 clusivity == Enums::mostly_excl) { 1185 // if we have responded to a cache, and our block is still 1186 // valid, but not dirty, and this cache is mostly exclusive 1187 // with respect to the cache above, drop the block 1188 invalidateBlock(blk); 1189 } 1190} 1191 1192CacheBlk* 1193BaseCache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks, 1194 bool allocate) 1195{ 1196 assert(pkt->isResponse()); 1197 Addr addr = pkt->getAddr(); 1198 bool is_secure = pkt->isSecure(); 1199#if TRACING_ON 1200 CacheBlk::State old_state = blk ? blk->status : 0; 1201#endif 1202 1203 // When handling a fill, we should have no writes to this line. 1204 assert(addr == pkt->getBlockAddr(blkSize)); 1205 assert(!writeBuffer.findMatch(addr, is_secure)); 1206 1207 if (!blk) { 1208 // better have read new data... 1209 assert(pkt->hasData() || pkt->cmd == MemCmd::InvalidateResp); 1210 1211 // need to do a replacement if allocating, otherwise we stick 1212 // with the temporary storage 1213 blk = allocate ? allocateBlock(pkt, writebacks) : nullptr; 1214 1215 if (!blk) { 1216 // No replaceable block or a mostly exclusive 1217 // cache... just use temporary storage to complete the 1218 // current request and then get rid of it 1219 blk = tempBlock; 1220 tempBlock->insert(addr, is_secure); 1221 DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr, 1222 is_secure ? "s" : "ns"); 1223 } 1224 } else { 1225 // existing block... probably an upgrade 1226 // don't clear block status... if block is already dirty we 1227 // don't want to lose that 1228 } 1229 1230 // Block is guaranteed to be valid at this point 1231 assert(blk->isValid()); 1232 assert(blk->isSecure() == is_secure); 1233 assert(regenerateBlkAddr(blk) == addr); 1234 1235 blk->status |= BlkReadable; 1236 1237 // sanity check for whole-line writes, which should always be 1238 // marked as writable as part of the fill, and then later marked 1239 // dirty as part of satisfyRequest 1240 if (pkt->cmd == MemCmd::InvalidateResp) { 1241 assert(!pkt->hasSharers()); 1242 } 1243 1244 // here we deal with setting the appropriate state of the line, 1245 // and we start by looking at the hasSharers flag, and ignore the 1246 // cacheResponding flag (normally signalling dirty data) if the 1247 // packet has sharers, thus the line is never allocated as Owned 1248 // (dirty but not writable), and always ends up being either 1249 // Shared, Exclusive or Modified, see Packet::setCacheResponding 1250 // for more details 1251 if (!pkt->hasSharers()) { 1252 // we could get a writable line from memory (rather than a 1253 // cache) even in a read-only cache, note that we set this bit 1254 // even for a read-only cache, possibly revisit this decision 1255 blk->status |= BlkWritable; 1256 1257 // check if we got this via cache-to-cache transfer (i.e., from a 1258 // cache that had the block in Modified or Owned state) 1259 if (pkt->cacheResponding()) { 1260 // we got the block in Modified state, and invalidated the 1261 // owners copy 1262 blk->status |= BlkDirty; 1263 1264 chatty_assert(!isReadOnly, "Should never see dirty snoop response " 1265 "in read-only cache %s\n", name()); 1266 } 1267 } 1268 1269 DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n", 1270 addr, is_secure ? "s" : "ns", old_state, blk->print()); 1271 1272 // if we got new data, copy it in (checking for a read response 1273 // and a response that has data is the same in the end) 1274 if (pkt->isRead()) { 1275 // sanity checks 1276 assert(pkt->hasData()); 1277 assert(pkt->getSize() == blkSize); 1278 1279 pkt->writeDataToBlock(blk->data, blkSize); 1280 } 1281 // The block will be ready when the payload arrives and the fill is done 1282 blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay + 1283 pkt->payloadDelay); 1284 1285 return blk; 1286} 1287 1288CacheBlk* 1289BaseCache::allocateBlock(const PacketPtr pkt, PacketList &writebacks) 1290{ 1291 // Get address 1292 const Addr addr = pkt->getAddr(); 1293 1294 // Get secure bit 1295 const bool is_secure = pkt->isSecure(); 1296 1297 // Find replacement victim 1298 std::vector<CacheBlk*> evict_blks; 1299 CacheBlk *victim = tags->findVictim(addr, is_secure, evict_blks); 1300 1301 // It is valid to return nullptr if there is no victim 1302 if (!victim) 1303 return nullptr; 1304 1305 // Print victim block's information 1306 DPRINTF(CacheRepl, "Replacement victim: %s\n", victim->print()); 1307 1308 // Check for transient state allocations. If any of the entries listed 1309 // for eviction has a transient state, the allocation fails 1310 for (const auto& blk : evict_blks) { 1311 if (blk->isValid()) { 1312 Addr repl_addr = regenerateBlkAddr(blk); 1313 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure()); 1314 if (repl_mshr) { 1315 // must be an outstanding upgrade or clean request 1316 // on a block we're about to replace... 1317 assert((!blk->isWritable() && repl_mshr->needsWritable()) || 1318 repl_mshr->isCleaning()); 1319 1320 // too hard to replace block with transient state 1321 // allocation failed, block not inserted 1322 return nullptr; 1323 } 1324 } 1325 } 1326 1327 // The victim will be replaced by a new entry, so increase the replacement 1328 // counter if a valid block is being replaced 1329 if (victim->isValid()) { 1330 DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx " 1331 "(%s): %s\n", regenerateBlkAddr(victim), 1332 victim->isSecure() ? "s" : "ns", 1333 addr, is_secure ? "s" : "ns", 1334 victim->isDirty() ? "writeback" : "clean"); 1335 1336 replacements++; 1337 } 1338 1339 // Evict valid blocks associated to this victim block 1340 for (const auto& blk : evict_blks) { 1341 if (blk->isValid()) { 1342 if (blk->wasPrefetched()) { 1343 unusedPrefetches++; 1344 } 1345 1346 evictBlock(blk, writebacks); 1347 } 1348 } 1349 1350 // Insert new block at victimized entry 1351 tags->insertBlock(pkt, victim); 1352 1353 return victim; 1354} 1355 1356void 1357BaseCache::invalidateBlock(CacheBlk *blk) 1358{ 1359 // If handling a block present in the Tags, let it do its invalidation 1360 // process, which will update stats and invalidate the block itself 1361 if (blk != tempBlock) { 1362 tags->invalidate(blk); 1363 } else { 1364 tempBlock->invalidate(); 1365 } 1366} 1367 1368void 1369BaseCache::evictBlock(CacheBlk *blk, PacketList &writebacks) 1370{ 1371 PacketPtr pkt = evictBlock(blk); 1372 if (pkt) { 1373 writebacks.push_back(pkt); 1374 } 1375} 1376 1377PacketPtr 1378BaseCache::writebackBlk(CacheBlk *blk) 1379{ 1380 chatty_assert(!isReadOnly || writebackClean, 1381 "Writeback from read-only cache"); 1382 assert(blk && blk->isValid() && (blk->isDirty() || writebackClean)); 1383 1384 writebacks[Request::wbMasterId]++; 1385 1386 RequestPtr req = std::make_shared<Request>( 1387 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId); 1388 1389 if (blk->isSecure()) 1390 req->setFlags(Request::SECURE); 1391 1392 req->taskId(blk->task_id); 1393 1394 PacketPtr pkt = 1395 new Packet(req, blk->isDirty() ? 1396 MemCmd::WritebackDirty : MemCmd::WritebackClean); 1397 1398 DPRINTF(Cache, "Create Writeback %s writable: %d, dirty: %d\n", 1399 pkt->print(), blk->isWritable(), blk->isDirty()); 1400 1401 if (blk->isWritable()) { 1402 // not asserting shared means we pass the block in modified 1403 // state, mark our own block non-writeable 1404 blk->status &= ~BlkWritable; 1405 } else { 1406 // we are in the Owned state, tell the receiver 1407 pkt->setHasSharers(); 1408 } 1409 1410 // make sure the block is not marked dirty 1411 blk->status &= ~BlkDirty; 1412 1413 pkt->allocate(); 1414 pkt->setDataFromBlock(blk->data, blkSize); 1415 1416 return pkt; 1417} 1418 1419PacketPtr 1420BaseCache::writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id) 1421{ 1422 RequestPtr req = std::make_shared<Request>( 1423 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId); 1424 1425 if (blk->isSecure()) { 1426 req->setFlags(Request::SECURE); 1427 } 1428 req->taskId(blk->task_id); 1429 1430 PacketPtr pkt = new Packet(req, MemCmd::WriteClean, blkSize, id); 1431 1432 if (dest) { 1433 req->setFlags(dest); 1434 pkt->setWriteThrough(); 1435 } 1436 1437 DPRINTF(Cache, "Create %s writable: %d, dirty: %d\n", pkt->print(), 1438 blk->isWritable(), blk->isDirty()); 1439 1440 if (blk->isWritable()) { 1441 // not asserting shared means we pass the block in modified 1442 // state, mark our own block non-writeable 1443 blk->status &= ~BlkWritable; 1444 } else { 1445 // we are in the Owned state, tell the receiver 1446 pkt->setHasSharers(); 1447 } 1448 1449 // make sure the block is not marked dirty 1450 blk->status &= ~BlkDirty; 1451 1452 pkt->allocate(); 1453 pkt->setDataFromBlock(blk->data, blkSize); 1454 1455 return pkt; 1456} 1457 1458 1459void 1460BaseCache::memWriteback() 1461{ 1462 tags->forEachBlk([this](CacheBlk &blk) { writebackVisitor(blk); }); 1463} 1464 1465void 1466BaseCache::memInvalidate() 1467{ 1468 tags->forEachBlk([this](CacheBlk &blk) { invalidateVisitor(blk); }); 1469} 1470 1471bool 1472BaseCache::isDirty() const 1473{ 1474 return tags->anyBlk([](CacheBlk &blk) { return blk.isDirty(); }); 1475} 1476 1477bool 1478BaseCache::coalesce() const 1479{ 1480 return writeAllocator && writeAllocator->coalesce(); 1481} 1482 1483void 1484BaseCache::writebackVisitor(CacheBlk &blk) 1485{ 1486 if (blk.isDirty()) { 1487 assert(blk.isValid()); 1488 1489 RequestPtr request = std::make_shared<Request>( 1490 regenerateBlkAddr(&blk), blkSize, 0, Request::funcMasterId); 1491 1492 request->taskId(blk.task_id); 1493 if (blk.isSecure()) { 1494 request->setFlags(Request::SECURE); 1495 } 1496 1497 Packet packet(request, MemCmd::WriteReq); 1498 packet.dataStatic(blk.data); 1499 1500 memSidePort.sendFunctional(&packet); 1501 1502 blk.status &= ~BlkDirty; 1503 } 1504} 1505 1506void 1507BaseCache::invalidateVisitor(CacheBlk &blk) 1508{ 1509 if (blk.isDirty()) 1510 warn_once("Invalidating dirty cache lines. " \ 1511 "Expect things to break.\n"); 1512 1513 if (blk.isValid()) { 1514 assert(!blk.isDirty()); 1515 invalidateBlock(&blk); 1516 } 1517} 1518 1519Tick 1520BaseCache::nextQueueReadyTime() const 1521{ 1522 Tick nextReady = std::min(mshrQueue.nextReadyTime(), 1523 writeBuffer.nextReadyTime()); 1524 1525 // Don't signal prefetch ready time if no MSHRs available 1526 // Will signal once enoguh MSHRs are deallocated 1527 if (prefetcher && mshrQueue.canPrefetch()) { 1528 nextReady = std::min(nextReady, 1529 prefetcher->nextPrefetchReadyTime()); 1530 } 1531 1532 return nextReady; 1533} 1534 1535 1536bool 1537BaseCache::sendMSHRQueuePacket(MSHR* mshr) 1538{ 1539 assert(mshr); 1540 1541 // use request from 1st target 1542 PacketPtr tgt_pkt = mshr->getTarget()->pkt; 1543 1544 DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print()); 1545 1546 // if the cache is in write coalescing mode or (additionally) in 1547 // no allocation mode, and we have a write packet with an MSHR 1548 // that is not a whole-line write (due to incompatible flags etc), 1549 // then reset the write mode 1550 if (writeAllocator && writeAllocator->coalesce() && tgt_pkt->isWrite()) { 1551 if (!mshr->isWholeLineWrite()) { 1552 // if we are currently write coalescing, hold on the 1553 // MSHR as many cycles extra as we need to completely 1554 // write a cache line 1555 if (writeAllocator->delay(mshr->blkAddr)) { 1556 Tick delay = blkSize / tgt_pkt->getSize() * clockPeriod(); 1557 DPRINTF(CacheVerbose, "Delaying pkt %s %llu ticks to allow " 1558 "for write coalescing\n", tgt_pkt->print(), delay); 1559 mshrQueue.delay(mshr, delay); 1560 return false; 1561 } else { 1562 writeAllocator->reset(); 1563 } 1564 } else { 1565 writeAllocator->resetDelay(mshr->blkAddr); 1566 } 1567 } 1568 1569 CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure); 1570 1571 // either a prefetch that is not present upstream, or a normal 1572 // MSHR request, proceed to get the packet to send downstream 1573 PacketPtr pkt = createMissPacket(tgt_pkt, blk, mshr->needsWritable(), 1574 mshr->isWholeLineWrite()); 1575 1576 mshr->isForward = (pkt == nullptr); 1577 1578 if (mshr->isForward) { 1579 // not a cache block request, but a response is expected 1580 // make copy of current packet to forward, keep current 1581 // copy for response handling 1582 pkt = new Packet(tgt_pkt, false, true); 1583 assert(!pkt->isWrite()); 1584 } 1585 1586 // play it safe and append (rather than set) the sender state, 1587 // as forwarded packets may already have existing state 1588 pkt->pushSenderState(mshr); 1589 1590 if (pkt->isClean() && blk && blk->isDirty()) { 1591 // A cache clean opearation is looking for a dirty block. Mark 1592 // the packet so that the destination xbar can determine that 1593 // there will be a follow-up write packet as well. 1594 pkt->setSatisfied(); 1595 } 1596 1597 if (!memSidePort.sendTimingReq(pkt)) { 1598 // we are awaiting a retry, but we 1599 // delete the packet and will be creating a new packet 1600 // when we get the opportunity 1601 delete pkt; 1602 1603 // note that we have now masked any requestBus and 1604 // schedSendEvent (we will wait for a retry before 1605 // doing anything), and this is so even if we do not 1606 // care about this packet and might override it before 1607 // it gets retried 1608 return true; 1609 } else { 1610 // As part of the call to sendTimingReq the packet is 1611 // forwarded to all neighbouring caches (and any caches 1612 // above them) as a snoop. Thus at this point we know if 1613 // any of the neighbouring caches are responding, and if 1614 // so, we know it is dirty, and we can determine if it is 1615 // being passed as Modified, making our MSHR the ordering 1616 // point 1617 bool pending_modified_resp = !pkt->hasSharers() && 1618 pkt->cacheResponding(); 1619 markInService(mshr, pending_modified_resp); 1620 1621 if (pkt->isClean() && blk && blk->isDirty()) { 1622 // A cache clean opearation is looking for a dirty 1623 // block. If a dirty block is encountered a WriteClean 1624 // will update any copies to the path to the memory 1625 // until the point of reference. 1626 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n", 1627 __func__, pkt->print(), blk->print()); 1628 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), 1629 pkt->id); 1630 PacketList writebacks; 1631 writebacks.push_back(wb_pkt); 1632 doWritebacks(writebacks, 0); 1633 } 1634 1635 return false; 1636 } 1637} 1638 1639bool 1640BaseCache::sendWriteQueuePacket(WriteQueueEntry* wq_entry) 1641{ 1642 assert(wq_entry); 1643 1644 // always a single target for write queue entries 1645 PacketPtr tgt_pkt = wq_entry->getTarget()->pkt; 1646 1647 DPRINTF(Cache, "%s: write %s\n", __func__, tgt_pkt->print()); 1648 1649 // forward as is, both for evictions and uncacheable writes 1650 if (!memSidePort.sendTimingReq(tgt_pkt)) { 1651 // note that we have now masked any requestBus and 1652 // schedSendEvent (we will wait for a retry before 1653 // doing anything), and this is so even if we do not 1654 // care about this packet and might override it before 1655 // it gets retried 1656 return true; 1657 } else { 1658 markInService(wq_entry); 1659 return false; 1660 } 1661} 1662 1663void 1664BaseCache::serialize(CheckpointOut &cp) const 1665{ 1666 bool dirty(isDirty()); 1667 1668 if (dirty) { 1669 warn("*** The cache still contains dirty data. ***\n"); 1670 warn(" Make sure to drain the system using the correct flags.\n"); 1671 warn(" This checkpoint will not restore correctly " \ 1672 "and dirty data in the cache will be lost!\n"); 1673 } 1674 1675 // Since we don't checkpoint the data in the cache, any dirty data 1676 // will be lost when restoring from a checkpoint of a system that 1677 // wasn't drained properly. Flag the checkpoint as invalid if the 1678 // cache contains dirty data. 1679 bool bad_checkpoint(dirty); 1680 SERIALIZE_SCALAR(bad_checkpoint); 1681} 1682 1683void 1684BaseCache::unserialize(CheckpointIn &cp) 1685{ 1686 bool bad_checkpoint; 1687 UNSERIALIZE_SCALAR(bad_checkpoint); 1688 if (bad_checkpoint) { 1689 fatal("Restoring from checkpoints with dirty caches is not " 1690 "supported in the classic memory system. Please remove any " 1691 "caches or drain them properly before taking checkpoints.\n"); 1692 } 1693} 1694 1695void 1696BaseCache::regStats() 1697{ 1698 MemObject::regStats(); 1699 1700 using namespace Stats; 1701 1702 // Hit statistics 1703 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1704 MemCmd cmd(access_idx); 1705 const string &cstr = cmd.toString(); 1706 1707 hits[access_idx] 1708 .init(system->maxMasters()) 1709 .name(name() + "." + cstr + "_hits") 1710 .desc("number of " + cstr + " hits") 1711 .flags(total | nozero | nonan) 1712 ; 1713 for (int i = 0; i < system->maxMasters(); i++) { 1714 hits[access_idx].subname(i, system->getMasterName(i)); 1715 } 1716 } 1717 1718// These macros make it easier to sum the right subset of commands and 1719// to change the subset of commands that are considered "demand" vs 1720// "non-demand" 1721#define SUM_DEMAND(s) \ 1722 (s[MemCmd::ReadReq] + s[MemCmd::WriteReq] + s[MemCmd::WriteLineReq] + \ 1723 s[MemCmd::ReadExReq] + s[MemCmd::ReadCleanReq] + s[MemCmd::ReadSharedReq]) 1724 1725// should writebacks be included here? prior code was inconsistent... 1726#define SUM_NON_DEMAND(s) \ 1727 (s[MemCmd::SoftPFReq] + s[MemCmd::HardPFReq] + s[MemCmd::SoftPFExReq]) 1728 1729 demandHits 1730 .name(name() + ".demand_hits") 1731 .desc("number of demand (read+write) hits") 1732 .flags(total | nozero | nonan) 1733 ; 1734 demandHits = SUM_DEMAND(hits); 1735 for (int i = 0; i < system->maxMasters(); i++) { 1736 demandHits.subname(i, system->getMasterName(i)); 1737 } 1738 1739 overallHits 1740 .name(name() + ".overall_hits") 1741 .desc("number of overall hits") 1742 .flags(total | nozero | nonan) 1743 ; 1744 overallHits = demandHits + SUM_NON_DEMAND(hits); 1745 for (int i = 0; i < system->maxMasters(); i++) { 1746 overallHits.subname(i, system->getMasterName(i)); 1747 } 1748 1749 // Miss statistics 1750 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1751 MemCmd cmd(access_idx); 1752 const string &cstr = cmd.toString(); 1753 1754 misses[access_idx] 1755 .init(system->maxMasters()) 1756 .name(name() + "." + cstr + "_misses") 1757 .desc("number of " + cstr + " misses") 1758 .flags(total | nozero | nonan) 1759 ; 1760 for (int i = 0; i < system->maxMasters(); i++) { 1761 misses[access_idx].subname(i, system->getMasterName(i)); 1762 } 1763 } 1764 1765 demandMisses 1766 .name(name() + ".demand_misses") 1767 .desc("number of demand (read+write) misses") 1768 .flags(total | nozero | nonan) 1769 ; 1770 demandMisses = SUM_DEMAND(misses); 1771 for (int i = 0; i < system->maxMasters(); i++) { 1772 demandMisses.subname(i, system->getMasterName(i)); 1773 } 1774 1775 overallMisses 1776 .name(name() + ".overall_misses") 1777 .desc("number of overall misses") 1778 .flags(total | nozero | nonan) 1779 ; 1780 overallMisses = demandMisses + SUM_NON_DEMAND(misses); 1781 for (int i = 0; i < system->maxMasters(); i++) { 1782 overallMisses.subname(i, system->getMasterName(i)); 1783 } 1784 1785 // Miss latency statistics 1786 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1787 MemCmd cmd(access_idx); 1788 const string &cstr = cmd.toString(); 1789 1790 missLatency[access_idx] 1791 .init(system->maxMasters()) 1792 .name(name() + "." + cstr + "_miss_latency") 1793 .desc("number of " + cstr + " miss cycles") 1794 .flags(total | nozero | nonan) 1795 ; 1796 for (int i = 0; i < system->maxMasters(); i++) { 1797 missLatency[access_idx].subname(i, system->getMasterName(i)); 1798 } 1799 } 1800 1801 demandMissLatency 1802 .name(name() + ".demand_miss_latency") 1803 .desc("number of demand (read+write) miss cycles") 1804 .flags(total | nozero | nonan) 1805 ; 1806 demandMissLatency = SUM_DEMAND(missLatency); 1807 for (int i = 0; i < system->maxMasters(); i++) { 1808 demandMissLatency.subname(i, system->getMasterName(i)); 1809 } 1810 1811 overallMissLatency 1812 .name(name() + ".overall_miss_latency") 1813 .desc("number of overall miss cycles") 1814 .flags(total | nozero | nonan) 1815 ; 1816 overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency); 1817 for (int i = 0; i < system->maxMasters(); i++) { 1818 overallMissLatency.subname(i, system->getMasterName(i)); 1819 } 1820 1821 // access formulas 1822 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1823 MemCmd cmd(access_idx); 1824 const string &cstr = cmd.toString(); 1825 1826 accesses[access_idx] 1827 .name(name() + "." + cstr + "_accesses") 1828 .desc("number of " + cstr + " accesses(hits+misses)") 1829 .flags(total | nozero | nonan) 1830 ; 1831 accesses[access_idx] = hits[access_idx] + misses[access_idx]; 1832 1833 for (int i = 0; i < system->maxMasters(); i++) { 1834 accesses[access_idx].subname(i, system->getMasterName(i)); 1835 } 1836 } 1837 1838 demandAccesses 1839 .name(name() + ".demand_accesses") 1840 .desc("number of demand (read+write) accesses") 1841 .flags(total | nozero | nonan) 1842 ; 1843 demandAccesses = demandHits + demandMisses; 1844 for (int i = 0; i < system->maxMasters(); i++) { 1845 demandAccesses.subname(i, system->getMasterName(i)); 1846 } 1847 1848 overallAccesses 1849 .name(name() + ".overall_accesses") 1850 .desc("number of overall (read+write) accesses") 1851 .flags(total | nozero | nonan) 1852 ; 1853 overallAccesses = overallHits + overallMisses; 1854 for (int i = 0; i < system->maxMasters(); i++) { 1855 overallAccesses.subname(i, system->getMasterName(i)); 1856 } 1857 1858 // miss rate formulas 1859 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1860 MemCmd cmd(access_idx); 1861 const string &cstr = cmd.toString(); 1862 1863 missRate[access_idx] 1864 .name(name() + "." + cstr + "_miss_rate") 1865 .desc("miss rate for " + cstr + " accesses") 1866 .flags(total | nozero | nonan) 1867 ; 1868 missRate[access_idx] = misses[access_idx] / accesses[access_idx]; 1869 1870 for (int i = 0; i < system->maxMasters(); i++) { 1871 missRate[access_idx].subname(i, system->getMasterName(i)); 1872 } 1873 } 1874 1875 demandMissRate 1876 .name(name() + ".demand_miss_rate") 1877 .desc("miss rate for demand accesses") 1878 .flags(total | nozero | nonan) 1879 ; 1880 demandMissRate = demandMisses / demandAccesses; 1881 for (int i = 0; i < system->maxMasters(); i++) { 1882 demandMissRate.subname(i, system->getMasterName(i)); 1883 } 1884 1885 overallMissRate 1886 .name(name() + ".overall_miss_rate") 1887 .desc("miss rate for overall accesses") 1888 .flags(total | nozero | nonan) 1889 ; 1890 overallMissRate = overallMisses / overallAccesses; 1891 for (int i = 0; i < system->maxMasters(); i++) { 1892 overallMissRate.subname(i, system->getMasterName(i)); 1893 } 1894 1895 // miss latency formulas 1896 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1897 MemCmd cmd(access_idx); 1898 const string &cstr = cmd.toString(); 1899 1900 avgMissLatency[access_idx] 1901 .name(name() + "." + cstr + "_avg_miss_latency") 1902 .desc("average " + cstr + " miss latency") 1903 .flags(total | nozero | nonan) 1904 ; 1905 avgMissLatency[access_idx] = 1906 missLatency[access_idx] / misses[access_idx]; 1907 1908 for (int i = 0; i < system->maxMasters(); i++) { 1909 avgMissLatency[access_idx].subname(i, system->getMasterName(i)); 1910 } 1911 } 1912 1913 demandAvgMissLatency 1914 .name(name() + ".demand_avg_miss_latency") 1915 .desc("average overall miss latency") 1916 .flags(total | nozero | nonan) 1917 ; 1918 demandAvgMissLatency = demandMissLatency / demandMisses; 1919 for (int i = 0; i < system->maxMasters(); i++) { 1920 demandAvgMissLatency.subname(i, system->getMasterName(i)); 1921 } 1922 1923 overallAvgMissLatency 1924 .name(name() + ".overall_avg_miss_latency") 1925 .desc("average overall miss latency") 1926 .flags(total | nozero | nonan) 1927 ; 1928 overallAvgMissLatency = overallMissLatency / overallMisses; 1929 for (int i = 0; i < system->maxMasters(); i++) { 1930 overallAvgMissLatency.subname(i, system->getMasterName(i)); 1931 } 1932 1933 blocked_cycles.init(NUM_BLOCKED_CAUSES); 1934 blocked_cycles 1935 .name(name() + ".blocked_cycles") 1936 .desc("number of cycles access was blocked") 1937 .subname(Blocked_NoMSHRs, "no_mshrs") 1938 .subname(Blocked_NoTargets, "no_targets") 1939 ; 1940 1941 1942 blocked_causes.init(NUM_BLOCKED_CAUSES); 1943 blocked_causes 1944 .name(name() + ".blocked") 1945 .desc("number of cycles access was blocked") 1946 .subname(Blocked_NoMSHRs, "no_mshrs") 1947 .subname(Blocked_NoTargets, "no_targets") 1948 ; 1949 1950 avg_blocked 1951 .name(name() + ".avg_blocked_cycles") 1952 .desc("average number of cycles each access was blocked") 1953 .subname(Blocked_NoMSHRs, "no_mshrs") 1954 .subname(Blocked_NoTargets, "no_targets") 1955 ; 1956 1957 avg_blocked = blocked_cycles / blocked_causes; 1958 1959 unusedPrefetches 1960 .name(name() + ".unused_prefetches") 1961 .desc("number of HardPF blocks evicted w/o reference") 1962 .flags(nozero) 1963 ; 1964 1965 writebacks 1966 .init(system->maxMasters()) 1967 .name(name() + ".writebacks") 1968 .desc("number of writebacks") 1969 .flags(total | nozero | nonan) 1970 ; 1971 for (int i = 0; i < system->maxMasters(); i++) { 1972 writebacks.subname(i, system->getMasterName(i)); 1973 } 1974 1975 // MSHR statistics 1976 // MSHR hit statistics 1977 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 1978 MemCmd cmd(access_idx); 1979 const string &cstr = cmd.toString(); 1980 1981 mshr_hits[access_idx] 1982 .init(system->maxMasters()) 1983 .name(name() + "." + cstr + "_mshr_hits") 1984 .desc("number of " + cstr + " MSHR hits") 1985 .flags(total | nozero | nonan) 1986 ; 1987 for (int i = 0; i < system->maxMasters(); i++) { 1988 mshr_hits[access_idx].subname(i, system->getMasterName(i)); 1989 } 1990 } 1991 1992 demandMshrHits 1993 .name(name() + ".demand_mshr_hits") 1994 .desc("number of demand (read+write) MSHR hits") 1995 .flags(total | nozero | nonan) 1996 ; 1997 demandMshrHits = SUM_DEMAND(mshr_hits); 1998 for (int i = 0; i < system->maxMasters(); i++) { 1999 demandMshrHits.subname(i, system->getMasterName(i)); 2000 } 2001 2002 overallMshrHits 2003 .name(name() + ".overall_mshr_hits") 2004 .desc("number of overall MSHR hits") 2005 .flags(total | nozero | nonan) 2006 ; 2007 overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshr_hits); 2008 for (int i = 0; i < system->maxMasters(); i++) { 2009 overallMshrHits.subname(i, system->getMasterName(i)); 2010 } 2011 2012 // MSHR miss statistics 2013 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2014 MemCmd cmd(access_idx); 2015 const string &cstr = cmd.toString(); 2016 2017 mshr_misses[access_idx] 2018 .init(system->maxMasters()) 2019 .name(name() + "." + cstr + "_mshr_misses") 2020 .desc("number of " + cstr + " MSHR misses") 2021 .flags(total | nozero | nonan) 2022 ; 2023 for (int i = 0; i < system->maxMasters(); i++) { 2024 mshr_misses[access_idx].subname(i, system->getMasterName(i)); 2025 } 2026 } 2027 2028 demandMshrMisses 2029 .name(name() + ".demand_mshr_misses") 2030 .desc("number of demand (read+write) MSHR misses") 2031 .flags(total | nozero | nonan) 2032 ; 2033 demandMshrMisses = SUM_DEMAND(mshr_misses); 2034 for (int i = 0; i < system->maxMasters(); i++) { 2035 demandMshrMisses.subname(i, system->getMasterName(i)); 2036 } 2037 2038 overallMshrMisses 2039 .name(name() + ".overall_mshr_misses") 2040 .desc("number of overall MSHR misses") 2041 .flags(total | nozero | nonan) 2042 ; 2043 overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshr_misses); 2044 for (int i = 0; i < system->maxMasters(); i++) { 2045 overallMshrMisses.subname(i, system->getMasterName(i)); 2046 } 2047 2048 // MSHR miss latency statistics 2049 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2050 MemCmd cmd(access_idx); 2051 const string &cstr = cmd.toString(); 2052 2053 mshr_miss_latency[access_idx] 2054 .init(system->maxMasters()) 2055 .name(name() + "." + cstr + "_mshr_miss_latency") 2056 .desc("number of " + cstr + " MSHR miss cycles") 2057 .flags(total | nozero | nonan) 2058 ; 2059 for (int i = 0; i < system->maxMasters(); i++) { 2060 mshr_miss_latency[access_idx].subname(i, system->getMasterName(i)); 2061 } 2062 } 2063 2064 demandMshrMissLatency 2065 .name(name() + ".demand_mshr_miss_latency") 2066 .desc("number of demand (read+write) MSHR miss cycles") 2067 .flags(total | nozero | nonan) 2068 ; 2069 demandMshrMissLatency = SUM_DEMAND(mshr_miss_latency); 2070 for (int i = 0; i < system->maxMasters(); i++) { 2071 demandMshrMissLatency.subname(i, system->getMasterName(i)); 2072 } 2073 2074 overallMshrMissLatency 2075 .name(name() + ".overall_mshr_miss_latency") 2076 .desc("number of overall MSHR miss cycles") 2077 .flags(total | nozero | nonan) 2078 ; 2079 overallMshrMissLatency = 2080 demandMshrMissLatency + SUM_NON_DEMAND(mshr_miss_latency); 2081 for (int i = 0; i < system->maxMasters(); i++) { 2082 overallMshrMissLatency.subname(i, system->getMasterName(i)); 2083 } 2084 2085 // MSHR uncacheable statistics 2086 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2087 MemCmd cmd(access_idx); 2088 const string &cstr = cmd.toString(); 2089 2090 mshr_uncacheable[access_idx] 2091 .init(system->maxMasters()) 2092 .name(name() + "." + cstr + "_mshr_uncacheable") 2093 .desc("number of " + cstr + " MSHR uncacheable") 2094 .flags(total | nozero | nonan) 2095 ; 2096 for (int i = 0; i < system->maxMasters(); i++) { 2097 mshr_uncacheable[access_idx].subname(i, system->getMasterName(i)); 2098 } 2099 } 2100 2101 overallMshrUncacheable 2102 .name(name() + ".overall_mshr_uncacheable_misses") 2103 .desc("number of overall MSHR uncacheable misses") 2104 .flags(total | nozero | nonan) 2105 ; 2106 overallMshrUncacheable = 2107 SUM_DEMAND(mshr_uncacheable) + SUM_NON_DEMAND(mshr_uncacheable); 2108 for (int i = 0; i < system->maxMasters(); i++) { 2109 overallMshrUncacheable.subname(i, system->getMasterName(i)); 2110 } 2111 2112 // MSHR miss latency statistics 2113 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2114 MemCmd cmd(access_idx); 2115 const string &cstr = cmd.toString(); 2116 2117 mshr_uncacheable_lat[access_idx] 2118 .init(system->maxMasters()) 2119 .name(name() + "." + cstr + "_mshr_uncacheable_latency") 2120 .desc("number of " + cstr + " MSHR uncacheable cycles") 2121 .flags(total | nozero | nonan) 2122 ; 2123 for (int i = 0; i < system->maxMasters(); i++) { 2124 mshr_uncacheable_lat[access_idx].subname( 2125 i, system->getMasterName(i)); 2126 } 2127 } 2128 2129 overallMshrUncacheableLatency 2130 .name(name() + ".overall_mshr_uncacheable_latency") 2131 .desc("number of overall MSHR uncacheable cycles") 2132 .flags(total | nozero | nonan) 2133 ; 2134 overallMshrUncacheableLatency = 2135 SUM_DEMAND(mshr_uncacheable_lat) + 2136 SUM_NON_DEMAND(mshr_uncacheable_lat); 2137 for (int i = 0; i < system->maxMasters(); i++) { 2138 overallMshrUncacheableLatency.subname(i, system->getMasterName(i)); 2139 } 2140 2141#if 0 2142 // MSHR access formulas 2143 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2144 MemCmd cmd(access_idx); 2145 const string &cstr = cmd.toString(); 2146 2147 mshrAccesses[access_idx] 2148 .name(name() + "." + cstr + "_mshr_accesses") 2149 .desc("number of " + cstr + " mshr accesses(hits+misses)") 2150 .flags(total | nozero | nonan) 2151 ; 2152 mshrAccesses[access_idx] = 2153 mshr_hits[access_idx] + mshr_misses[access_idx] 2154 + mshr_uncacheable[access_idx]; 2155 } 2156 2157 demandMshrAccesses 2158 .name(name() + ".demand_mshr_accesses") 2159 .desc("number of demand (read+write) mshr accesses") 2160 .flags(total | nozero | nonan) 2161 ; 2162 demandMshrAccesses = demandMshrHits + demandMshrMisses; 2163 2164 overallMshrAccesses 2165 .name(name() + ".overall_mshr_accesses") 2166 .desc("number of overall (read+write) mshr accesses") 2167 .flags(total | nozero | nonan) 2168 ; 2169 overallMshrAccesses = overallMshrHits + overallMshrMisses 2170 + overallMshrUncacheable; 2171#endif 2172 2173 // MSHR miss rate formulas 2174 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2175 MemCmd cmd(access_idx); 2176 const string &cstr = cmd.toString(); 2177 2178 mshrMissRate[access_idx] 2179 .name(name() + "." + cstr + "_mshr_miss_rate") 2180 .desc("mshr miss rate for " + cstr + " accesses") 2181 .flags(total | nozero | nonan) 2182 ; 2183 mshrMissRate[access_idx] = 2184 mshr_misses[access_idx] / accesses[access_idx]; 2185 2186 for (int i = 0; i < system->maxMasters(); i++) { 2187 mshrMissRate[access_idx].subname(i, system->getMasterName(i)); 2188 } 2189 } 2190 2191 demandMshrMissRate 2192 .name(name() + ".demand_mshr_miss_rate") 2193 .desc("mshr miss rate for demand accesses") 2194 .flags(total | nozero | nonan) 2195 ; 2196 demandMshrMissRate = demandMshrMisses / demandAccesses; 2197 for (int i = 0; i < system->maxMasters(); i++) { 2198 demandMshrMissRate.subname(i, system->getMasterName(i)); 2199 } 2200 2201 overallMshrMissRate 2202 .name(name() + ".overall_mshr_miss_rate") 2203 .desc("mshr miss rate for overall accesses") 2204 .flags(total | nozero | nonan) 2205 ; 2206 overallMshrMissRate = overallMshrMisses / overallAccesses; 2207 for (int i = 0; i < system->maxMasters(); i++) { 2208 overallMshrMissRate.subname(i, system->getMasterName(i)); 2209 } 2210 2211 // mshrMiss latency formulas 2212 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2213 MemCmd cmd(access_idx); 2214 const string &cstr = cmd.toString(); 2215 2216 avgMshrMissLatency[access_idx] 2217 .name(name() + "." + cstr + "_avg_mshr_miss_latency") 2218 .desc("average " + cstr + " mshr miss latency") 2219 .flags(total | nozero | nonan) 2220 ; 2221 avgMshrMissLatency[access_idx] = 2222 mshr_miss_latency[access_idx] / mshr_misses[access_idx]; 2223 2224 for (int i = 0; i < system->maxMasters(); i++) { 2225 avgMshrMissLatency[access_idx].subname( 2226 i, system->getMasterName(i)); 2227 } 2228 } 2229 2230 demandAvgMshrMissLatency 2231 .name(name() + ".demand_avg_mshr_miss_latency") 2232 .desc("average overall mshr miss latency") 2233 .flags(total | nozero | nonan) 2234 ; 2235 demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses; 2236 for (int i = 0; i < system->maxMasters(); i++) { 2237 demandAvgMshrMissLatency.subname(i, system->getMasterName(i)); 2238 } 2239 2240 overallAvgMshrMissLatency 2241 .name(name() + ".overall_avg_mshr_miss_latency") 2242 .desc("average overall mshr miss latency") 2243 .flags(total | nozero | nonan) 2244 ; 2245 overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses; 2246 for (int i = 0; i < system->maxMasters(); i++) { 2247 overallAvgMshrMissLatency.subname(i, system->getMasterName(i)); 2248 } 2249 2250 // mshrUncacheable latency formulas 2251 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 2252 MemCmd cmd(access_idx); 2253 const string &cstr = cmd.toString(); 2254 2255 avgMshrUncacheableLatency[access_idx] 2256 .name(name() + "." + cstr + "_avg_mshr_uncacheable_latency") 2257 .desc("average " + cstr + " mshr uncacheable latency") 2258 .flags(total | nozero | nonan) 2259 ; 2260 avgMshrUncacheableLatency[access_idx] = 2261 mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx]; 2262 2263 for (int i = 0; i < system->maxMasters(); i++) { 2264 avgMshrUncacheableLatency[access_idx].subname( 2265 i, system->getMasterName(i)); 2266 } 2267 } 2268 2269 overallAvgMshrUncacheableLatency 2270 .name(name() + ".overall_avg_mshr_uncacheable_latency") 2271 .desc("average overall mshr uncacheable latency") 2272 .flags(total | nozero | nonan) 2273 ; 2274 overallAvgMshrUncacheableLatency = 2275 overallMshrUncacheableLatency / overallMshrUncacheable; 2276 for (int i = 0; i < system->maxMasters(); i++) { 2277 overallAvgMshrUncacheableLatency.subname(i, system->getMasterName(i)); 2278 } 2279 2280 replacements 2281 .name(name() + ".replacements") 2282 .desc("number of replacements") 2283 ; 2284} 2285 2286void 2287BaseCache::regProbePoints() 2288{ 2289 ppHit = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Hit"); 2290 ppMiss = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Miss"); 2291 ppFill = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Fill"); 2292} 2293 2294/////////////// 2295// 2296// CpuSidePort 2297// 2298/////////////// 2299bool 2300BaseCache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt) 2301{ 2302 // Snoops shouldn't happen when bypassing caches 2303 assert(!cache->system->bypassCaches()); 2304 2305 assert(pkt->isResponse()); 2306 2307 // Express snoop responses from master to slave, e.g., from L1 to L2 2308 cache->recvTimingSnoopResp(pkt); 2309 return true; 2310} 2311 2312 2313bool 2314BaseCache::CpuSidePort::tryTiming(PacketPtr pkt) 2315{ 2316 if (cache->system->bypassCaches() || pkt->isExpressSnoop()) { 2317 // always let express snoop packets through even if blocked 2318 return true; 2319 } else if (blocked || mustSendRetry) { 2320 // either already committed to send a retry, or blocked 2321 mustSendRetry = true; 2322 return false; 2323 } 2324 mustSendRetry = false; 2325 return true; 2326} 2327 2328bool 2329BaseCache::CpuSidePort::recvTimingReq(PacketPtr pkt) 2330{ 2331 assert(pkt->isRequest()); 2332 2333 if (cache->system->bypassCaches()) { 2334 // Just forward the packet if caches are disabled. 2335 // @todo This should really enqueue the packet rather 2336 bool M5_VAR_USED success = cache->memSidePort.sendTimingReq(pkt); 2337 assert(success); 2338 return true; 2339 } else if (tryTiming(pkt)) { 2340 cache->recvTimingReq(pkt); 2341 return true; 2342 } 2343 return false; 2344} 2345 2346Tick 2347BaseCache::CpuSidePort::recvAtomic(PacketPtr pkt) 2348{ 2349 if (cache->system->bypassCaches()) { 2350 // Forward the request if the system is in cache bypass mode. 2351 return cache->memSidePort.sendAtomic(pkt); 2352 } else { 2353 return cache->recvAtomic(pkt); 2354 } 2355} 2356 2357void 2358BaseCache::CpuSidePort::recvFunctional(PacketPtr pkt) 2359{ 2360 if (cache->system->bypassCaches()) { 2361 // The cache should be flushed if we are in cache bypass mode, 2362 // so we don't need to check if we need to update anything. 2363 cache->memSidePort.sendFunctional(pkt); 2364 return; 2365 } 2366 2367 // functional request 2368 cache->functionalAccess(pkt, true); 2369} 2370 2371AddrRangeList 2372BaseCache::CpuSidePort::getAddrRanges() const 2373{ 2374 return cache->getAddrRanges(); 2375} 2376 2377 2378BaseCache:: 2379CpuSidePort::CpuSidePort(const std::string &_name, BaseCache *_cache, 2380 const std::string &_label) 2381 : CacheSlavePort(_name, _cache, _label), cache(_cache) 2382{ 2383} 2384 2385/////////////// 2386// 2387// MemSidePort 2388// 2389/////////////// 2390bool 2391BaseCache::MemSidePort::recvTimingResp(PacketPtr pkt) 2392{ 2393 cache->recvTimingResp(pkt); 2394 return true; 2395} 2396 2397// Express snooping requests to memside port 2398void 2399BaseCache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt) 2400{ 2401 // Snoops shouldn't happen when bypassing caches 2402 assert(!cache->system->bypassCaches()); 2403 2404 // handle snooping requests 2405 cache->recvTimingSnoopReq(pkt); 2406} 2407 2408Tick 2409BaseCache::MemSidePort::recvAtomicSnoop(PacketPtr pkt) 2410{ 2411 // Snoops shouldn't happen when bypassing caches 2412 assert(!cache->system->bypassCaches()); 2413 2414 return cache->recvAtomicSnoop(pkt); 2415} 2416 2417void 2418BaseCache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt) 2419{ 2420 // Snoops shouldn't happen when bypassing caches 2421 assert(!cache->system->bypassCaches()); 2422 2423 // functional snoop (note that in contrast to atomic we don't have 2424 // a specific functionalSnoop method, as they have the same 2425 // behaviour regardless) 2426 cache->functionalAccess(pkt, false); 2427} 2428 2429void 2430BaseCache::CacheReqPacketQueue::sendDeferredPacket() 2431{ 2432 // sanity check 2433 assert(!waitingOnRetry); 2434 2435 // there should never be any deferred request packets in the 2436 // queue, instead we resly on the cache to provide the packets 2437 // from the MSHR queue or write queue 2438 assert(deferredPacketReadyTime() == MaxTick); 2439 2440 // check for request packets (requests & writebacks) 2441 QueueEntry* entry = cache.getNextQueueEntry(); 2442 2443 if (!entry) { 2444 // can happen if e.g. we attempt a writeback and fail, but 2445 // before the retry, the writeback is eliminated because 2446 // we snoop another cache's ReadEx. 2447 } else { 2448 // let our snoop responses go first if there are responses to 2449 // the same addresses 2450 if (checkConflictingSnoop(entry->getTarget()->pkt)) { 2451 return; 2452 } 2453 waitingOnRetry = entry->sendPacket(cache); 2454 } 2455 2456 // if we succeeded and are not waiting for a retry, schedule the 2457 // next send considering when the next queue is ready, note that 2458 // snoop responses have their own packet queue and thus schedule 2459 // their own events 2460 if (!waitingOnRetry) { 2461 schedSendEvent(cache.nextQueueReadyTime()); 2462 } 2463} 2464 2465BaseCache::MemSidePort::MemSidePort(const std::string &_name, 2466 BaseCache *_cache, 2467 const std::string &_label) 2468 : CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue), 2469 _reqQueue(*_cache, *this, _snoopRespQueue, _label), 2470 _snoopRespQueue(*_cache, *this, true, _label), cache(_cache) 2471{ 2472} 2473 2474void 2475WriteAllocator::updateMode(Addr write_addr, unsigned write_size, 2476 Addr blk_addr) 2477{ 2478 // check if we are continuing where the last write ended 2479 if (nextAddr == write_addr) { 2480 delayCtr[blk_addr] = delayThreshold; 2481 // stop if we have already saturated 2482 if (mode != WriteMode::NO_ALLOCATE) { 2483 byteCount += write_size; 2484 // switch to streaming mode if we have passed the lower 2485 // threshold 2486 if (mode == WriteMode::ALLOCATE && 2487 byteCount > coalesceLimit) { 2488 mode = WriteMode::COALESCE; 2489 DPRINTF(Cache, "Switched to write coalescing\n"); 2490 } else if (mode == WriteMode::COALESCE && 2491 byteCount > noAllocateLimit) { 2492 // and continue and switch to non-allocating mode if we 2493 // pass the upper threshold 2494 mode = WriteMode::NO_ALLOCATE; 2495 DPRINTF(Cache, "Switched to write-no-allocate\n"); 2496 } 2497 } 2498 } else { 2499 // we did not see a write matching the previous one, start 2500 // over again 2501 byteCount = write_size; 2502 mode = WriteMode::ALLOCATE; 2503 resetDelay(blk_addr); 2504 } 2505 nextAddr = write_addr + write_size; 2506} 2507 2508WriteAllocator* 2509WriteAllocatorParams::create() 2510{ 2511 return new WriteAllocator(this); 2512} 2513