cache.cc revision 11749
1/* 2 * Copyright (c) 2010-2016 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2002-2005 The Regents of The University of Michigan 15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc. 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Erik Hallnor 42 * Dave Greene 43 * Nathan Binkert 44 * Steve Reinhardt 45 * Ron Dreslinski 46 * Andreas Sandberg 47 */ 48 49/** 50 * @file 51 * Cache definitions. 52 */ 53 54#include "mem/cache/cache.hh" 55 56#include "base/misc.hh" 57#include "base/types.hh" 58#include "debug/Cache.hh" 59#include "debug/CachePort.hh" 60#include "debug/CacheTags.hh" 61#include "debug/CacheVerbose.hh" 62#include "mem/cache/blk.hh" 63#include "mem/cache/mshr.hh" 64#include "mem/cache/prefetch/base.hh" 65#include "sim/sim_exit.hh" 66 67Cache::Cache(const CacheParams *p) 68 : BaseCache(p, p->system->cacheLineSize()), 69 tags(p->tags), 70 prefetcher(p->prefetcher), 71 doFastWrites(true), 72 prefetchOnAccess(p->prefetch_on_access), 73 clusivity(p->clusivity), 74 writebackClean(p->writeback_clean), 75 tempBlockWriteback(nullptr), 76 writebackTempBlockAtomicEvent(this, false, 77 EventBase::Delayed_Writeback_Pri) 78{ 79 tempBlock = new CacheBlk(); 80 tempBlock->data = new uint8_t[blkSize]; 81 82 cpuSidePort = new CpuSidePort(p->name + ".cpu_side", this, 83 "CpuSidePort"); 84 memSidePort = new MemSidePort(p->name + ".mem_side", this, 85 "MemSidePort"); 86 87 tags->setCache(this); 88 if (prefetcher) 89 prefetcher->setCache(this); 90} 91 92Cache::~Cache() 93{ 94 delete [] tempBlock->data; 95 delete tempBlock; 96 97 delete cpuSidePort; 98 delete memSidePort; 99} 100 101void 102Cache::regStats() 103{ 104 BaseCache::regStats(); 105} 106 107void 108Cache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt) 109{ 110 assert(pkt->isRequest()); 111 112 uint64_t overwrite_val; 113 bool overwrite_mem; 114 uint64_t condition_val64; 115 uint32_t condition_val32; 116 117 int offset = tags->extractBlkOffset(pkt->getAddr()); 118 uint8_t *blk_data = blk->data + offset; 119 120 assert(sizeof(uint64_t) >= pkt->getSize()); 121 122 overwrite_mem = true; 123 // keep a copy of our possible write value, and copy what is at the 124 // memory address into the packet 125 pkt->writeData((uint8_t *)&overwrite_val); 126 pkt->setData(blk_data); 127 128 if (pkt->req->isCondSwap()) { 129 if (pkt->getSize() == sizeof(uint64_t)) { 130 condition_val64 = pkt->req->getExtraData(); 131 overwrite_mem = !std::memcmp(&condition_val64, blk_data, 132 sizeof(uint64_t)); 133 } else if (pkt->getSize() == sizeof(uint32_t)) { 134 condition_val32 = (uint32_t)pkt->req->getExtraData(); 135 overwrite_mem = !std::memcmp(&condition_val32, blk_data, 136 sizeof(uint32_t)); 137 } else 138 panic("Invalid size for conditional read/write\n"); 139 } 140 141 if (overwrite_mem) { 142 std::memcpy(blk_data, &overwrite_val, pkt->getSize()); 143 blk->status |= BlkDirty; 144 } 145} 146 147 148void 149Cache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, 150 bool deferred_response, bool pending_downgrade) 151{ 152 assert(pkt->isRequest()); 153 154 assert(blk && blk->isValid()); 155 // Occasionally this is not true... if we are a lower-level cache 156 // satisfying a string of Read and ReadEx requests from 157 // upper-level caches, a Read will mark the block as shared but we 158 // can satisfy a following ReadEx anyway since we can rely on the 159 // Read requester(s) to have buffered the ReadEx snoop and to 160 // invalidate their blocks after receiving them. 161 // assert(!pkt->needsWritable() || blk->isWritable()); 162 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize); 163 164 // Check RMW operations first since both isRead() and 165 // isWrite() will be true for them 166 if (pkt->cmd == MemCmd::SwapReq) { 167 cmpAndSwap(blk, pkt); 168 } else if (pkt->isWrite()) { 169 // we have the block in a writable state and can go ahead, 170 // note that the line may be also be considered writable in 171 // downstream caches along the path to memory, but always 172 // Exclusive, and never Modified 173 assert(blk->isWritable()); 174 // Write or WriteLine at the first cache with block in writable state 175 if (blk->checkWrite(pkt)) { 176 pkt->writeDataToBlock(blk->data, blkSize); 177 } 178 // Always mark the line as dirty (and thus transition to the 179 // Modified state) even if we are a failed StoreCond so we 180 // supply data to any snoops that have appended themselves to 181 // this cache before knowing the store will fail. 182 blk->status |= BlkDirty; 183 DPRINTF(CacheVerbose, "%s for %s (write)\n", __func__, pkt->print()); 184 } else if (pkt->isRead()) { 185 if (pkt->isLLSC()) { 186 blk->trackLoadLocked(pkt); 187 } 188 189 // all read responses have a data payload 190 assert(pkt->hasRespData()); 191 pkt->setDataFromBlock(blk->data, blkSize); 192 193 // determine if this read is from a (coherent) cache or not 194 if (pkt->fromCache()) { 195 assert(pkt->getSize() == blkSize); 196 // special handling for coherent block requests from 197 // upper-level caches 198 if (pkt->needsWritable()) { 199 // sanity check 200 assert(pkt->cmd == MemCmd::ReadExReq || 201 pkt->cmd == MemCmd::SCUpgradeFailReq); 202 assert(!pkt->hasSharers()); 203 204 // if we have a dirty copy, make sure the recipient 205 // keeps it marked dirty (in the modified state) 206 if (blk->isDirty()) { 207 pkt->setCacheResponding(); 208 blk->status &= ~BlkDirty; 209 } 210 } else if (blk->isWritable() && !pending_downgrade && 211 !pkt->hasSharers() && 212 pkt->cmd != MemCmd::ReadCleanReq) { 213 // we can give the requester a writable copy on a read 214 // request if: 215 // - we have a writable copy at this level (& below) 216 // - we don't have a pending snoop from below 217 // signaling another read request 218 // - no other cache above has a copy (otherwise it 219 // would have set hasSharers flag when 220 // snooping the packet) 221 // - the read has explicitly asked for a clean 222 // copy of the line 223 if (blk->isDirty()) { 224 // special considerations if we're owner: 225 if (!deferred_response) { 226 // respond with the line in Modified state 227 // (cacheResponding set, hasSharers not set) 228 pkt->setCacheResponding(); 229 230 // if this cache is mostly inclusive, we 231 // keep the block in the Exclusive state, 232 // and pass it upwards as Modified 233 // (writable and dirty), hence we have 234 // multiple caches, all on the same path 235 // towards memory, all considering the 236 // same block writable, but only one 237 // considering it Modified 238 239 // we get away with multiple caches (on 240 // the same path to memory) considering 241 // the block writeable as we always enter 242 // the cache hierarchy through a cache, 243 // and first snoop upwards in all other 244 // branches 245 blk->status &= ~BlkDirty; 246 } else { 247 // if we're responding after our own miss, 248 // there's a window where the recipient didn't 249 // know it was getting ownership and may not 250 // have responded to snoops correctly, so we 251 // have to respond with a shared line 252 pkt->setHasSharers(); 253 } 254 } 255 } else { 256 // otherwise only respond with a shared copy 257 pkt->setHasSharers(); 258 } 259 } 260 } else if (pkt->isUpgrade()) { 261 // sanity check 262 assert(!pkt->hasSharers()); 263 264 if (blk->isDirty()) { 265 // we were in the Owned state, and a cache above us that 266 // has the line in Shared state needs to be made aware 267 // that the data it already has is in fact dirty 268 pkt->setCacheResponding(); 269 blk->status &= ~BlkDirty; 270 } 271 } else { 272 assert(pkt->isInvalidate()); 273 invalidateBlock(blk); 274 DPRINTF(CacheVerbose, "%s for %s (invalidation)\n", __func__, 275 pkt->print()); 276 } 277} 278 279///////////////////////////////////////////////////// 280// 281// Access path: requests coming in from the CPU side 282// 283///////////////////////////////////////////////////// 284 285bool 286Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, 287 PacketList &writebacks) 288{ 289 // sanity check 290 assert(pkt->isRequest()); 291 292 chatty_assert(!(isReadOnly && pkt->isWrite()), 293 "Should never see a write in a read-only cache %s\n", 294 name()); 295 296 DPRINTF(CacheVerbose, "%s for %s\n", __func__, pkt->print()); 297 298 if (pkt->req->isUncacheable()) { 299 DPRINTF(Cache, "uncacheable: %s\n", pkt->print()); 300 301 // flush and invalidate any existing block 302 CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure())); 303 if (old_blk && old_blk->isValid()) { 304 if (old_blk->isDirty() || writebackClean) 305 writebacks.push_back(writebackBlk(old_blk)); 306 else 307 writebacks.push_back(cleanEvictBlk(old_blk)); 308 tags->invalidate(old_blk); 309 old_blk->invalidate(); 310 } 311 312 blk = nullptr; 313 // lookupLatency is the latency in case the request is uncacheable. 314 lat = lookupLatency; 315 return false; 316 } 317 318 ContextID id = pkt->req->hasContextId() ? 319 pkt->req->contextId() : InvalidContextID; 320 // Here lat is the value passed as parameter to accessBlock() function 321 // that can modify its value. 322 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), lat, id); 323 324 DPRINTF(Cache, "%s %s\n", pkt->print(), 325 blk ? "hit " + blk->print() : "miss"); 326 327 328 if (pkt->isEviction()) { 329 // We check for presence of block in above caches before issuing 330 // Writeback or CleanEvict to write buffer. Therefore the only 331 // possible cases can be of a CleanEvict packet coming from above 332 // encountering a Writeback generated in this cache peer cache and 333 // waiting in the write buffer. Cases of upper level peer caches 334 // generating CleanEvict and Writeback or simply CleanEvict and 335 // CleanEvict almost simultaneously will be caught by snoops sent out 336 // by crossbar. 337 WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(), 338 pkt->isSecure()); 339 if (wb_entry) { 340 assert(wb_entry->getNumTargets() == 1); 341 PacketPtr wbPkt = wb_entry->getTarget()->pkt; 342 assert(wbPkt->isWriteback()); 343 344 if (pkt->isCleanEviction()) { 345 // The CleanEvict and WritebackClean snoops into other 346 // peer caches of the same level while traversing the 347 // crossbar. If a copy of the block is found, the 348 // packet is deleted in the crossbar. Hence, none of 349 // the other upper level caches connected to this 350 // cache have the block, so we can clear the 351 // BLOCK_CACHED flag in the Writeback if set and 352 // discard the CleanEvict by returning true. 353 wbPkt->clearBlockCached(); 354 return true; 355 } else { 356 assert(pkt->cmd == MemCmd::WritebackDirty); 357 // Dirty writeback from above trumps our clean 358 // writeback... discard here 359 // Note: markInService will remove entry from writeback buffer. 360 markInService(wb_entry); 361 delete wbPkt; 362 } 363 } 364 } 365 366 // Writeback handling is special case. We can write the block into 367 // the cache without having a writeable copy (or any copy at all). 368 if (pkt->isWriteback()) { 369 assert(blkSize == pkt->getSize()); 370 371 // we could get a clean writeback while we are having 372 // outstanding accesses to a block, do the simple thing for 373 // now and drop the clean writeback so that we do not upset 374 // any ordering/decisions about ownership already taken 375 if (pkt->cmd == MemCmd::WritebackClean && 376 mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) { 377 DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, " 378 "dropping\n", pkt->getAddr()); 379 return true; 380 } 381 382 if (blk == nullptr) { 383 // need to do a replacement 384 blk = allocateBlock(pkt->getAddr(), pkt->isSecure(), writebacks); 385 if (blk == nullptr) { 386 // no replaceable block available: give up, fwd to next level. 387 incMissCount(pkt); 388 return false; 389 } 390 tags->insertBlock(pkt, blk); 391 392 blk->status = (BlkValid | BlkReadable); 393 if (pkt->isSecure()) { 394 blk->status |= BlkSecure; 395 } 396 } 397 // only mark the block dirty if we got a writeback command, 398 // and leave it as is for a clean writeback 399 if (pkt->cmd == MemCmd::WritebackDirty) { 400 blk->status |= BlkDirty; 401 } 402 // if the packet does not have sharers, it is passing 403 // writable, and we got the writeback in Modified or Exclusive 404 // state, if not we are in the Owned or Shared state 405 if (!pkt->hasSharers()) { 406 blk->status |= BlkWritable; 407 } 408 // nothing else to do; writeback doesn't expect response 409 assert(!pkt->needsResponse()); 410 std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize); 411 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 412 incHitCount(pkt); 413 return true; 414 } else if (pkt->cmd == MemCmd::CleanEvict) { 415 if (blk != nullptr) { 416 // Found the block in the tags, need to stop CleanEvict from 417 // propagating further down the hierarchy. Returning true will 418 // treat the CleanEvict like a satisfied write request and delete 419 // it. 420 return true; 421 } 422 // We didn't find the block here, propagate the CleanEvict further 423 // down the memory hierarchy. Returning false will treat the CleanEvict 424 // like a Writeback which could not find a replaceable block so has to 425 // go to next level. 426 return false; 427 } else if (blk && (pkt->needsWritable() ? blk->isWritable() : 428 blk->isReadable())) { 429 // OK to satisfy access 430 incHitCount(pkt); 431 satisfyRequest(pkt, blk); 432 maintainClusivity(pkt->fromCache(), blk); 433 434 return true; 435 } 436 437 // Can't satisfy access normally... either no block (blk == nullptr) 438 // or have block but need writable 439 440 incMissCount(pkt); 441 442 if (blk == nullptr && pkt->isLLSC() && pkt->isWrite()) { 443 // complete miss on store conditional... just give up now 444 pkt->req->setExtraData(0); 445 return true; 446 } 447 448 return false; 449} 450 451void 452Cache::maintainClusivity(bool from_cache, CacheBlk *blk) 453{ 454 if (from_cache && blk && blk->isValid() && !blk->isDirty() && 455 clusivity == Enums::mostly_excl) { 456 // if we have responded to a cache, and our block is still 457 // valid, but not dirty, and this cache is mostly exclusive 458 // with respect to the cache above, drop the block 459 invalidateBlock(blk); 460 } 461} 462 463void 464Cache::doWritebacks(PacketList& writebacks, Tick forward_time) 465{ 466 while (!writebacks.empty()) { 467 PacketPtr wbPkt = writebacks.front(); 468 // We use forwardLatency here because we are copying writebacks to 469 // write buffer. Call isCachedAbove for both Writebacks and 470 // CleanEvicts. If isCachedAbove returns true we set BLOCK_CACHED flag 471 // in Writebacks and discard CleanEvicts. 472 if (isCachedAbove(wbPkt)) { 473 if (wbPkt->cmd == MemCmd::CleanEvict) { 474 // Delete CleanEvict because cached copies exist above. The 475 // packet destructor will delete the request object because 476 // this is a non-snoop request packet which does not require a 477 // response. 478 delete wbPkt; 479 } else if (wbPkt->cmd == MemCmd::WritebackClean) { 480 // clean writeback, do not send since the block is 481 // still cached above 482 assert(writebackClean); 483 delete wbPkt; 484 } else { 485 assert(wbPkt->cmd == MemCmd::WritebackDirty); 486 // Set BLOCK_CACHED flag in Writeback and send below, so that 487 // the Writeback does not reset the bit corresponding to this 488 // address in the snoop filter below. 489 wbPkt->setBlockCached(); 490 allocateWriteBuffer(wbPkt, forward_time); 491 } 492 } else { 493 // If the block is not cached above, send packet below. Both 494 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will 495 // reset the bit corresponding to this address in the snoop filter 496 // below. 497 allocateWriteBuffer(wbPkt, forward_time); 498 } 499 writebacks.pop_front(); 500 } 501} 502 503void 504Cache::doWritebacksAtomic(PacketList& writebacks) 505{ 506 while (!writebacks.empty()) { 507 PacketPtr wbPkt = writebacks.front(); 508 // Call isCachedAbove for both Writebacks and CleanEvicts. If 509 // isCachedAbove returns true we set BLOCK_CACHED flag in Writebacks 510 // and discard CleanEvicts. 511 if (isCachedAbove(wbPkt, false)) { 512 if (wbPkt->cmd == MemCmd::WritebackDirty) { 513 // Set BLOCK_CACHED flag in Writeback and send below, 514 // so that the Writeback does not reset the bit 515 // corresponding to this address in the snoop filter 516 // below. We can discard CleanEvicts because cached 517 // copies exist above. Atomic mode isCachedAbove 518 // modifies packet to set BLOCK_CACHED flag 519 memSidePort->sendAtomic(wbPkt); 520 } 521 } else { 522 // If the block is not cached above, send packet below. Both 523 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will 524 // reset the bit corresponding to this address in the snoop filter 525 // below. 526 memSidePort->sendAtomic(wbPkt); 527 } 528 writebacks.pop_front(); 529 // In case of CleanEvicts, the packet destructor will delete the 530 // request object because this is a non-snoop request packet which 531 // does not require a response. 532 delete wbPkt; 533 } 534} 535 536 537void 538Cache::recvTimingSnoopResp(PacketPtr pkt) 539{ 540 DPRINTF(Cache, "%s for %s\n", __func__, pkt->print()); 541 542 assert(pkt->isResponse()); 543 assert(!system->bypassCaches()); 544 545 // determine if the response is from a snoop request we created 546 // (in which case it should be in the outstandingSnoop), or if we 547 // merely forwarded someone else's snoop request 548 const bool forwardAsSnoop = outstandingSnoop.find(pkt->req) == 549 outstandingSnoop.end(); 550 551 if (!forwardAsSnoop) { 552 // the packet came from this cache, so sink it here and do not 553 // forward it 554 assert(pkt->cmd == MemCmd::HardPFResp); 555 556 outstandingSnoop.erase(pkt->req); 557 558 DPRINTF(Cache, "Got prefetch response from above for addr " 559 "%#llx (%s)\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns"); 560 recvTimingResp(pkt); 561 return; 562 } 563 564 // forwardLatency is set here because there is a response from an 565 // upper level cache. 566 // To pay the delay that occurs if the packet comes from the bus, 567 // we charge also headerDelay. 568 Tick snoop_resp_time = clockEdge(forwardLatency) + pkt->headerDelay; 569 // Reset the timing of the packet. 570 pkt->headerDelay = pkt->payloadDelay = 0; 571 memSidePort->schedTimingSnoopResp(pkt, snoop_resp_time); 572} 573 574void 575Cache::promoteWholeLineWrites(PacketPtr pkt) 576{ 577 // Cache line clearing instructions 578 if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) && 579 (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0)) { 580 pkt->cmd = MemCmd::WriteLineReq; 581 DPRINTF(Cache, "packet promoted from Write to WriteLineReq\n"); 582 } 583} 584 585bool 586Cache::recvTimingReq(PacketPtr pkt) 587{ 588 DPRINTF(CacheTags, "%s tags: %s\n", __func__, tags->print()); 589 590 assert(pkt->isRequest()); 591 592 // Just forward the packet if caches are disabled. 593 if (system->bypassCaches()) { 594 // @todo This should really enqueue the packet rather 595 bool M5_VAR_USED success = memSidePort->sendTimingReq(pkt); 596 assert(success); 597 return true; 598 } 599 600 promoteWholeLineWrites(pkt); 601 602 if (pkt->cacheResponding()) { 603 // a cache above us (but not where the packet came from) is 604 // responding to the request, in other words it has the line 605 // in Modified or Owned state 606 DPRINTF(Cache, "Cache above responding to %s: not responding\n", 607 pkt->print()); 608 609 // if the packet needs the block to be writable, and the cache 610 // that has promised to respond (setting the cache responding 611 // flag) is not providing writable (it is in Owned rather than 612 // the Modified state), we know that there may be other Shared 613 // copies in the system; go out and invalidate them all 614 assert(pkt->needsWritable() && !pkt->responderHadWritable()); 615 616 // an upstream cache that had the line in Owned state 617 // (dirty, but not writable), is responding and thus 618 // transferring the dirty line from one branch of the 619 // cache hierarchy to another 620 621 // send out an express snoop and invalidate all other 622 // copies (snooping a packet that needs writable is the 623 // same as an invalidation), thus turning the Owned line 624 // into a Modified line, note that we don't invalidate the 625 // block in the current cache or any other cache on the 626 // path to memory 627 628 // create a downstream express snoop with cleared packet 629 // flags, there is no need to allocate any data as the 630 // packet is merely used to co-ordinate state transitions 631 Packet *snoop_pkt = new Packet(pkt, true, false); 632 633 // also reset the bus time that the original packet has 634 // not yet paid for 635 snoop_pkt->headerDelay = snoop_pkt->payloadDelay = 0; 636 637 // make this an instantaneous express snoop, and let the 638 // other caches in the system know that the another cache 639 // is responding, because we have found the authorative 640 // copy (Modified or Owned) that will supply the right 641 // data 642 snoop_pkt->setExpressSnoop(); 643 snoop_pkt->setCacheResponding(); 644 645 // this express snoop travels towards the memory, and at 646 // every crossbar it is snooped upwards thus reaching 647 // every cache in the system 648 bool M5_VAR_USED success = memSidePort->sendTimingReq(snoop_pkt); 649 // express snoops always succeed 650 assert(success); 651 652 // main memory will delete the snoop packet 653 654 // queue for deletion, as opposed to immediate deletion, as 655 // the sending cache is still relying on the packet 656 pendingDelete.reset(pkt); 657 658 // no need to take any further action in this particular cache 659 // as an upstram cache has already committed to responding, 660 // and we have already sent out any express snoops in the 661 // section above to ensure all other copies in the system are 662 // invalidated 663 return true; 664 } 665 666 // anything that is merely forwarded pays for the forward latency and 667 // the delay provided by the crossbar 668 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 669 670 // We use lookupLatency here because it is used to specify the latency 671 // to access. 672 Cycles lat = lookupLatency; 673 CacheBlk *blk = nullptr; 674 bool satisfied = false; 675 { 676 PacketList writebacks; 677 // Note that lat is passed by reference here. The function 678 // access() calls accessBlock() which can modify lat value. 679 satisfied = access(pkt, blk, lat, writebacks); 680 681 // copy writebacks to write buffer here to ensure they logically 682 // proceed anything happening below 683 doWritebacks(writebacks, forward_time); 684 } 685 686 // Here we charge the headerDelay that takes into account the latencies 687 // of the bus, if the packet comes from it. 688 // The latency charged it is just lat that is the value of lookupLatency 689 // modified by access() function, or if not just lookupLatency. 690 // In case of a hit we are neglecting response latency. 691 // In case of a miss we are neglecting forward latency. 692 Tick request_time = clockEdge(lat) + pkt->headerDelay; 693 // Here we reset the timing of the packet. 694 pkt->headerDelay = pkt->payloadDelay = 0; 695 696 // track time of availability of next prefetch, if any 697 Tick next_pf_time = MaxTick; 698 699 bool needsResponse = pkt->needsResponse(); 700 701 if (satisfied) { 702 // should never be satisfying an uncacheable access as we 703 // flush and invalidate any existing block as part of the 704 // lookup 705 assert(!pkt->req->isUncacheable()); 706 707 // hit (for all other request types) 708 709 if (prefetcher && (prefetchOnAccess || 710 (blk && blk->wasPrefetched()))) { 711 if (blk) 712 blk->status &= ~BlkHWPrefetched; 713 714 // Don't notify on SWPrefetch 715 if (!pkt->cmd.isSWPrefetch()) 716 next_pf_time = prefetcher->notify(pkt); 717 } 718 719 if (needsResponse) { 720 pkt->makeTimingResponse(); 721 // @todo: Make someone pay for this 722 pkt->headerDelay = pkt->payloadDelay = 0; 723 724 // In this case we are considering request_time that takes 725 // into account the delay of the xbar, if any, and just 726 // lat, neglecting responseLatency, modelling hit latency 727 // just as lookupLatency or or the value of lat overriden 728 // by access(), that calls accessBlock() function. 729 cpuSidePort->schedTimingResp(pkt, request_time, true); 730 } else { 731 DPRINTF(Cache, "%s satisfied %s, no response needed\n", __func__, 732 pkt->print()); 733 734 // queue the packet for deletion, as the sending cache is 735 // still relying on it; if the block is found in access(), 736 // CleanEvict and Writeback messages will be deleted 737 // here as well 738 pendingDelete.reset(pkt); 739 } 740 } else { 741 // miss 742 743 Addr blk_addr = blockAlign(pkt->getAddr()); 744 745 // ignore any existing MSHR if we are dealing with an 746 // uncacheable request 747 MSHR *mshr = pkt->req->isUncacheable() ? nullptr : 748 mshrQueue.findMatch(blk_addr, pkt->isSecure()); 749 750 // Software prefetch handling: 751 // To keep the core from waiting on data it won't look at 752 // anyway, send back a response with dummy data. Miss handling 753 // will continue asynchronously. Unfortunately, the core will 754 // insist upon freeing original Packet/Request, so we have to 755 // create a new pair with a different lifecycle. Note that this 756 // processing happens before any MSHR munging on the behalf of 757 // this request because this new Request will be the one stored 758 // into the MSHRs, not the original. 759 if (pkt->cmd.isSWPrefetch()) { 760 assert(needsResponse); 761 assert(pkt->req->hasPaddr()); 762 assert(!pkt->req->isUncacheable()); 763 764 // There's no reason to add a prefetch as an additional target 765 // to an existing MSHR. If an outstanding request is already 766 // in progress, there is nothing for the prefetch to do. 767 // If this is the case, we don't even create a request at all. 768 PacketPtr pf = nullptr; 769 770 if (!mshr) { 771 // copy the request and create a new SoftPFReq packet 772 RequestPtr req = new Request(pkt->req->getPaddr(), 773 pkt->req->getSize(), 774 pkt->req->getFlags(), 775 pkt->req->masterId()); 776 pf = new Packet(req, pkt->cmd); 777 pf->allocate(); 778 assert(pf->getAddr() == pkt->getAddr()); 779 assert(pf->getSize() == pkt->getSize()); 780 } 781 782 pkt->makeTimingResponse(); 783 784 // request_time is used here, taking into account lat and the delay 785 // charged if the packet comes from the xbar. 786 cpuSidePort->schedTimingResp(pkt, request_time, true); 787 788 // If an outstanding request is in progress (we found an 789 // MSHR) this is set to null 790 pkt = pf; 791 } 792 793 if (mshr) { 794 /// MSHR hit 795 /// @note writebacks will be checked in getNextMSHR() 796 /// for any conflicting requests to the same block 797 798 //@todo remove hw_pf here 799 800 // Coalesce unless it was a software prefetch (see above). 801 if (pkt) { 802 assert(!pkt->isWriteback()); 803 // CleanEvicts corresponding to blocks which have 804 // outstanding requests in MSHRs are simply sunk here 805 if (pkt->cmd == MemCmd::CleanEvict) { 806 pendingDelete.reset(pkt); 807 } else { 808 DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__, 809 pkt->print()); 810 811 assert(pkt->req->masterId() < system->maxMasters()); 812 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++; 813 // We use forward_time here because it is the same 814 // considering new targets. We have multiple 815 // requests for the same address here. It 816 // specifies the latency to allocate an internal 817 // buffer and to schedule an event to the queued 818 // port and also takes into account the additional 819 // delay of the xbar. 820 mshr->allocateTarget(pkt, forward_time, order++, 821 allocOnFill(pkt->cmd)); 822 if (mshr->getNumTargets() == numTarget) { 823 noTargetMSHR = mshr; 824 setBlocked(Blocked_NoTargets); 825 // need to be careful with this... if this mshr isn't 826 // ready yet (i.e. time > curTick()), we don't want to 827 // move it ahead of mshrs that are ready 828 // mshrQueue.moveToFront(mshr); 829 } 830 } 831 // We should call the prefetcher reguardless if the request is 832 // satisfied or not, reguardless if the request is in the MSHR 833 // or not. The request could be a ReadReq hit, but still not 834 // satisfied (potentially because of a prior write to the same 835 // cache line. So, even when not satisfied, tehre is an MSHR 836 // already allocated for this, we need to let the prefetcher 837 // know about the request 838 if (prefetcher) { 839 // Don't notify on SWPrefetch 840 if (!pkt->cmd.isSWPrefetch()) 841 next_pf_time = prefetcher->notify(pkt); 842 } 843 } 844 } else { 845 // no MSHR 846 assert(pkt->req->masterId() < system->maxMasters()); 847 if (pkt->req->isUncacheable()) { 848 mshr_uncacheable[pkt->cmdToIndex()][pkt->req->masterId()]++; 849 } else { 850 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 851 } 852 853 if (pkt->isEviction() || 854 (pkt->req->isUncacheable() && pkt->isWrite())) { 855 // We use forward_time here because there is an 856 // uncached memory write, forwarded to WriteBuffer. 857 allocateWriteBuffer(pkt, forward_time); 858 } else { 859 if (blk && blk->isValid()) { 860 // should have flushed and have no valid block 861 assert(!pkt->req->isUncacheable()); 862 863 // If we have a write miss to a valid block, we 864 // need to mark the block non-readable. Otherwise 865 // if we allow reads while there's an outstanding 866 // write miss, the read could return stale data 867 // out of the cache block... a more aggressive 868 // system could detect the overlap (if any) and 869 // forward data out of the MSHRs, but we don't do 870 // that yet. Note that we do need to leave the 871 // block valid so that it stays in the cache, in 872 // case we get an upgrade response (and hence no 873 // new data) when the write miss completes. 874 // As long as CPUs do proper store/load forwarding 875 // internally, and have a sufficiently weak memory 876 // model, this is probably unnecessary, but at some 877 // point it must have seemed like we needed it... 878 assert(pkt->needsWritable()); 879 assert(!blk->isWritable()); 880 blk->status &= ~BlkReadable; 881 } 882 // Here we are using forward_time, modelling the latency of 883 // a miss (outbound) just as forwardLatency, neglecting the 884 // lookupLatency component. 885 allocateMissBuffer(pkt, forward_time); 886 } 887 888 if (prefetcher) { 889 // Don't notify on SWPrefetch 890 if (!pkt->cmd.isSWPrefetch()) 891 next_pf_time = prefetcher->notify(pkt); 892 } 893 } 894 } 895 896 if (next_pf_time != MaxTick) 897 schedMemSideSendEvent(next_pf_time); 898 899 return true; 900} 901 902PacketPtr 903Cache::createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk, 904 bool needsWritable) const 905{ 906 // should never see evictions here 907 assert(!cpu_pkt->isEviction()); 908 909 bool blkValid = blk && blk->isValid(); 910 911 if (cpu_pkt->req->isUncacheable() || 912 (!blkValid && cpu_pkt->isUpgrade()) || 913 cpu_pkt->cmd == MemCmd::InvalidateReq) { 914 // uncacheable requests and upgrades from upper-level caches 915 // that missed completely just go through as is 916 return nullptr; 917 } 918 919 assert(cpu_pkt->needsResponse()); 920 921 MemCmd cmd; 922 // @TODO make useUpgrades a parameter. 923 // Note that ownership protocols require upgrade, otherwise a 924 // write miss on a shared owned block will generate a ReadExcl, 925 // which will clobber the owned copy. 926 const bool useUpgrades = true; 927 if (cpu_pkt->cmd == MemCmd::WriteLineReq) { 928 assert(!blkValid || !blk->isWritable()); 929 // forward as invalidate to all other caches, this gives us 930 // the line in Exclusive state, and invalidates all other 931 // copies 932 cmd = MemCmd::InvalidateReq; 933 } else if (blkValid && useUpgrades) { 934 // only reason to be here is that blk is read only and we need 935 // it to be writable 936 assert(needsWritable); 937 assert(!blk->isWritable()); 938 cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq; 939 } else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq || 940 cpu_pkt->cmd == MemCmd::StoreCondFailReq) { 941 // Even though this SC will fail, we still need to send out the 942 // request and get the data to supply it to other snoopers in the case 943 // where the determination the StoreCond fails is delayed due to 944 // all caches not being on the same local bus. 945 cmd = MemCmd::SCUpgradeFailReq; 946 } else { 947 // block is invalid 948 cmd = needsWritable ? MemCmd::ReadExReq : 949 (isReadOnly ? MemCmd::ReadCleanReq : MemCmd::ReadSharedReq); 950 } 951 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize); 952 953 // if there are upstream caches that have already marked the 954 // packet as having sharers (not passing writable), pass that info 955 // downstream 956 if (cpu_pkt->hasSharers() && !needsWritable) { 957 // note that cpu_pkt may have spent a considerable time in the 958 // MSHR queue and that the information could possibly be out 959 // of date, however, there is no harm in conservatively 960 // assuming the block has sharers 961 pkt->setHasSharers(); 962 DPRINTF(Cache, "%s: passing hasSharers from %s to %s\n", 963 __func__, cpu_pkt->print(), pkt->print()); 964 } 965 966 // the packet should be block aligned 967 assert(pkt->getAddr() == blockAlign(pkt->getAddr())); 968 969 pkt->allocate(); 970 DPRINTF(Cache, "%s: created %s from %s\n", __func__, pkt->print(), 971 cpu_pkt->print()); 972 return pkt; 973} 974 975 976Tick 977Cache::recvAtomic(PacketPtr pkt) 978{ 979 // We are in atomic mode so we pay just for lookupLatency here. 980 Cycles lat = lookupLatency; 981 982 // Forward the request if the system is in cache bypass mode. 983 if (system->bypassCaches()) 984 return ticksToCycles(memSidePort->sendAtomic(pkt)); 985 986 promoteWholeLineWrites(pkt); 987 988 // follow the same flow as in recvTimingReq, and check if a cache 989 // above us is responding 990 if (pkt->cacheResponding()) { 991 DPRINTF(Cache, "Cache above responding to %s: not responding\n", 992 pkt->print()); 993 994 // if a cache is responding, and it had the line in Owned 995 // rather than Modified state, we need to invalidate any 996 // copies that are not on the same path to memory 997 assert(pkt->needsWritable() && !pkt->responderHadWritable()); 998 lat += ticksToCycles(memSidePort->sendAtomic(pkt)); 999 1000 return lat * clockPeriod(); 1001 } 1002 1003 // should assert here that there are no outstanding MSHRs or 1004 // writebacks... that would mean that someone used an atomic 1005 // access in timing mode 1006 1007 CacheBlk *blk = nullptr; 1008 PacketList writebacks; 1009 bool satisfied = access(pkt, blk, lat, writebacks); 1010 1011 // handle writebacks resulting from the access here to ensure they 1012 // logically proceed anything happening below 1013 doWritebacksAtomic(writebacks); 1014 1015 if (!satisfied) { 1016 // MISS 1017 1018 // deal with the packets that go through the write path of 1019 // the cache, i.e. any evictions and uncacheable writes 1020 if (pkt->isEviction() || 1021 (pkt->req->isUncacheable() && pkt->isWrite())) { 1022 lat += ticksToCycles(memSidePort->sendAtomic(pkt)); 1023 return lat * clockPeriod(); 1024 } 1025 // only misses left 1026 1027 PacketPtr bus_pkt = createMissPacket(pkt, blk, pkt->needsWritable()); 1028 1029 bool is_forward = (bus_pkt == nullptr); 1030 1031 if (is_forward) { 1032 // just forwarding the same request to the next level 1033 // no local cache operation involved 1034 bus_pkt = pkt; 1035 } 1036 1037 DPRINTF(Cache, "%s: Sending an atomic %s\n", __func__, 1038 bus_pkt->print()); 1039 1040#if TRACING_ON 1041 CacheBlk::State old_state = blk ? blk->status : 0; 1042#endif 1043 1044 lat += ticksToCycles(memSidePort->sendAtomic(bus_pkt)); 1045 1046 bool is_invalidate = bus_pkt->isInvalidate(); 1047 1048 // We are now dealing with the response handling 1049 DPRINTF(Cache, "%s: Receive response: %s in state %i\n", __func__, 1050 bus_pkt->print(), old_state); 1051 1052 // If packet was a forward, the response (if any) is already 1053 // in place in the bus_pkt == pkt structure, so we don't need 1054 // to do anything. Otherwise, use the separate bus_pkt to 1055 // generate response to pkt and then delete it. 1056 if (!is_forward) { 1057 if (pkt->needsResponse()) { 1058 assert(bus_pkt->isResponse()); 1059 if (bus_pkt->isError()) { 1060 pkt->makeAtomicResponse(); 1061 pkt->copyError(bus_pkt); 1062 } else if (pkt->cmd == MemCmd::WriteLineReq) { 1063 // note the use of pkt, not bus_pkt here. 1064 1065 // write-line request to the cache that promoted 1066 // the write to a whole line 1067 blk = handleFill(pkt, blk, writebacks, 1068 allocOnFill(pkt->cmd)); 1069 assert(blk != NULL); 1070 is_invalidate = false; 1071 satisfyRequest(pkt, blk); 1072 } else if (bus_pkt->isRead() || 1073 bus_pkt->cmd == MemCmd::UpgradeResp) { 1074 // we're updating cache state to allow us to 1075 // satisfy the upstream request from the cache 1076 blk = handleFill(bus_pkt, blk, writebacks, 1077 allocOnFill(pkt->cmd)); 1078 satisfyRequest(pkt, blk); 1079 maintainClusivity(pkt->fromCache(), blk); 1080 } else { 1081 // we're satisfying the upstream request without 1082 // modifying cache state, e.g., a write-through 1083 pkt->makeAtomicResponse(); 1084 } 1085 } 1086 delete bus_pkt; 1087 } 1088 1089 if (is_invalidate && blk && blk->isValid()) { 1090 invalidateBlock(blk); 1091 } 1092 } 1093 1094 // Note that we don't invoke the prefetcher at all in atomic mode. 1095 // It's not clear how to do it properly, particularly for 1096 // prefetchers that aggressively generate prefetch candidates and 1097 // rely on bandwidth contention to throttle them; these will tend 1098 // to pollute the cache in atomic mode since there is no bandwidth 1099 // contention. If we ever do want to enable prefetching in atomic 1100 // mode, though, this is the place to do it... see timingAccess() 1101 // for an example (though we'd want to issue the prefetch(es) 1102 // immediately rather than calling requestMemSideBus() as we do 1103 // there). 1104 1105 // do any writebacks resulting from the response handling 1106 doWritebacksAtomic(writebacks); 1107 1108 // if we used temp block, check to see if its valid and if so 1109 // clear it out, but only do so after the call to recvAtomic is 1110 // finished so that any downstream observers (such as a snoop 1111 // filter), first see the fill, and only then see the eviction 1112 if (blk == tempBlock && tempBlock->isValid()) { 1113 // the atomic CPU calls recvAtomic for fetch and load/store 1114 // sequentuially, and we may already have a tempBlock 1115 // writeback from the fetch that we have not yet sent 1116 if (tempBlockWriteback) { 1117 // if that is the case, write the prevoius one back, and 1118 // do not schedule any new event 1119 writebackTempBlockAtomic(); 1120 } else { 1121 // the writeback/clean eviction happens after the call to 1122 // recvAtomic has finished (but before any successive 1123 // calls), so that the response handling from the fill is 1124 // allowed to happen first 1125 schedule(writebackTempBlockAtomicEvent, curTick()); 1126 } 1127 1128 tempBlockWriteback = (blk->isDirty() || writebackClean) ? 1129 writebackBlk(blk) : cleanEvictBlk(blk); 1130 blk->invalidate(); 1131 } 1132 1133 if (pkt->needsResponse()) { 1134 pkt->makeAtomicResponse(); 1135 } 1136 1137 return lat * clockPeriod(); 1138} 1139 1140 1141void 1142Cache::functionalAccess(PacketPtr pkt, bool fromCpuSide) 1143{ 1144 if (system->bypassCaches()) { 1145 // Packets from the memory side are snoop request and 1146 // shouldn't happen in bypass mode. 1147 assert(fromCpuSide); 1148 1149 // The cache should be flushed if we are in cache bypass mode, 1150 // so we don't need to check if we need to update anything. 1151 memSidePort->sendFunctional(pkt); 1152 return; 1153 } 1154 1155 Addr blk_addr = blockAlign(pkt->getAddr()); 1156 bool is_secure = pkt->isSecure(); 1157 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 1158 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 1159 1160 pkt->pushLabel(name()); 1161 1162 CacheBlkPrintWrapper cbpw(blk); 1163 1164 // Note that just because an L2/L3 has valid data doesn't mean an 1165 // L1 doesn't have a more up-to-date modified copy that still 1166 // needs to be found. As a result we always update the request if 1167 // we have it, but only declare it satisfied if we are the owner. 1168 1169 // see if we have data at all (owned or otherwise) 1170 bool have_data = blk && blk->isValid() 1171 && pkt->checkFunctional(&cbpw, blk_addr, is_secure, blkSize, 1172 blk->data); 1173 1174 // data we have is dirty if marked as such or if we have an 1175 // in-service MSHR that is pending a modified line 1176 bool have_dirty = 1177 have_data && (blk->isDirty() || 1178 (mshr && mshr->inService && mshr->isPendingModified())); 1179 1180 bool done = have_dirty 1181 || cpuSidePort->checkFunctional(pkt) 1182 || mshrQueue.checkFunctional(pkt, blk_addr) 1183 || writeBuffer.checkFunctional(pkt, blk_addr) 1184 || memSidePort->checkFunctional(pkt); 1185 1186 DPRINTF(CacheVerbose, "%s: %s %s%s%s\n", __func__, pkt->print(), 1187 (blk && blk->isValid()) ? "valid " : "", 1188 have_data ? "data " : "", done ? "done " : ""); 1189 1190 // We're leaving the cache, so pop cache->name() label 1191 pkt->popLabel(); 1192 1193 if (done) { 1194 pkt->makeResponse(); 1195 } else { 1196 // if it came as a request from the CPU side then make sure it 1197 // continues towards the memory side 1198 if (fromCpuSide) { 1199 memSidePort->sendFunctional(pkt); 1200 } else if (cpuSidePort->isSnooping()) { 1201 // if it came from the memory side, it must be a snoop request 1202 // and we should only forward it if we are forwarding snoops 1203 cpuSidePort->sendFunctionalSnoop(pkt); 1204 } 1205 } 1206} 1207 1208 1209///////////////////////////////////////////////////// 1210// 1211// Response handling: responses from the memory side 1212// 1213///////////////////////////////////////////////////// 1214 1215 1216void 1217Cache::handleUncacheableWriteResp(PacketPtr pkt) 1218{ 1219 Tick completion_time = clockEdge(responseLatency) + 1220 pkt->headerDelay + pkt->payloadDelay; 1221 1222 // Reset the bus additional time as it is now accounted for 1223 pkt->headerDelay = pkt->payloadDelay = 0; 1224 1225 cpuSidePort->schedTimingResp(pkt, completion_time, true); 1226} 1227 1228void 1229Cache::recvTimingResp(PacketPtr pkt) 1230{ 1231 assert(pkt->isResponse()); 1232 1233 // all header delay should be paid for by the crossbar, unless 1234 // this is a prefetch response from above 1235 panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp, 1236 "%s saw a non-zero packet delay\n", name()); 1237 1238 bool is_error = pkt->isError(); 1239 1240 if (is_error) { 1241 DPRINTF(Cache, "%s: Cache received %s with error\n", __func__, 1242 pkt->print()); 1243 } 1244 1245 DPRINTF(Cache, "%s: Handling response %s\n", __func__, 1246 pkt->print()); 1247 1248 // if this is a write, we should be looking at an uncacheable 1249 // write 1250 if (pkt->isWrite()) { 1251 assert(pkt->req->isUncacheable()); 1252 handleUncacheableWriteResp(pkt); 1253 return; 1254 } 1255 1256 // we have dealt with any (uncacheable) writes above, from here on 1257 // we know we are dealing with an MSHR due to a miss or a prefetch 1258 MSHR *mshr = dynamic_cast<MSHR*>(pkt->popSenderState()); 1259 assert(mshr); 1260 1261 if (mshr == noTargetMSHR) { 1262 // we always clear at least one target 1263 clearBlocked(Blocked_NoTargets); 1264 noTargetMSHR = nullptr; 1265 } 1266 1267 // Initial target is used just for stats 1268 MSHR::Target *initial_tgt = mshr->getTarget(); 1269 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex(); 1270 Tick miss_latency = curTick() - initial_tgt->recvTime; 1271 1272 if (pkt->req->isUncacheable()) { 1273 assert(pkt->req->masterId() < system->maxMasters()); 1274 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] += 1275 miss_latency; 1276 } else { 1277 assert(pkt->req->masterId() < system->maxMasters()); 1278 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] += 1279 miss_latency; 1280 } 1281 1282 bool wasFull = mshrQueue.isFull(); 1283 1284 PacketList writebacks; 1285 1286 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 1287 1288 // upgrade deferred targets if the response has no sharers, and is 1289 // thus passing writable 1290 if (!pkt->hasSharers()) { 1291 mshr->promoteWritable(); 1292 } 1293 1294 bool is_fill = !mshr->isForward && 1295 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp); 1296 1297 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 1298 1299 if (is_fill && !is_error) { 1300 DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n", 1301 pkt->getAddr()); 1302 1303 blk = handleFill(pkt, blk, writebacks, mshr->allocOnFill()); 1304 assert(blk != nullptr); 1305 } 1306 1307 // allow invalidation responses originating from write-line 1308 // requests to be discarded 1309 bool is_invalidate = pkt->isInvalidate(); 1310 1311 // First offset for critical word first calculations 1312 int initial_offset = initial_tgt->pkt->getOffset(blkSize); 1313 1314 bool from_cache = false; 1315 MSHR::TargetList targets = mshr->extractServiceableTargets(pkt); 1316 for (auto &target: targets) { 1317 Packet *tgt_pkt = target.pkt; 1318 switch (target.source) { 1319 case MSHR::Target::FromCPU: 1320 Tick completion_time; 1321 // Here we charge on completion_time the delay of the xbar if the 1322 // packet comes from it, charged on headerDelay. 1323 completion_time = pkt->headerDelay; 1324 1325 // Software prefetch handling for cache closest to core 1326 if (tgt_pkt->cmd.isSWPrefetch()) { 1327 // a software prefetch would have already been ack'd 1328 // immediately with dummy data so the core would be able to 1329 // retire it. This request completes right here, so we 1330 // deallocate it. 1331 delete tgt_pkt->req; 1332 delete tgt_pkt; 1333 break; // skip response 1334 } 1335 1336 // keep track of whether we have responded to another 1337 // cache 1338 from_cache = from_cache || tgt_pkt->fromCache(); 1339 1340 // unlike the other packet flows, where data is found in other 1341 // caches or memory and brought back, write-line requests always 1342 // have the data right away, so the above check for "is fill?" 1343 // cannot actually be determined until examining the stored MSHR 1344 // state. We "catch up" with that logic here, which is duplicated 1345 // from above. 1346 if (tgt_pkt->cmd == MemCmd::WriteLineReq) { 1347 assert(!is_error); 1348 // we got the block in a writable state, so promote 1349 // any deferred targets if possible 1350 mshr->promoteWritable(); 1351 // NB: we use the original packet here and not the response! 1352 blk = handleFill(tgt_pkt, blk, writebacks, 1353 targets.allocOnFill); 1354 assert(blk != nullptr); 1355 1356 // treat as a fill, and discard the invalidation 1357 // response 1358 is_fill = true; 1359 is_invalidate = false; 1360 } 1361 1362 if (is_fill) { 1363 satisfyRequest(tgt_pkt, blk, true, mshr->hasPostDowngrade()); 1364 1365 // How many bytes past the first request is this one 1366 int transfer_offset = 1367 tgt_pkt->getOffset(blkSize) - initial_offset; 1368 if (transfer_offset < 0) { 1369 transfer_offset += blkSize; 1370 } 1371 1372 // If not critical word (offset) return payloadDelay. 1373 // responseLatency is the latency of the return path 1374 // from lower level caches/memory to an upper level cache or 1375 // the core. 1376 completion_time += clockEdge(responseLatency) + 1377 (transfer_offset ? pkt->payloadDelay : 0); 1378 1379 assert(!tgt_pkt->req->isUncacheable()); 1380 1381 assert(tgt_pkt->req->masterId() < system->maxMasters()); 1382 missLatency[tgt_pkt->cmdToIndex()][tgt_pkt->req->masterId()] += 1383 completion_time - target.recvTime; 1384 } else if (pkt->cmd == MemCmd::UpgradeFailResp) { 1385 // failed StoreCond upgrade 1386 assert(tgt_pkt->cmd == MemCmd::StoreCondReq || 1387 tgt_pkt->cmd == MemCmd::StoreCondFailReq || 1388 tgt_pkt->cmd == MemCmd::SCUpgradeFailReq); 1389 // responseLatency is the latency of the return path 1390 // from lower level caches/memory to an upper level cache or 1391 // the core. 1392 completion_time += clockEdge(responseLatency) + 1393 pkt->payloadDelay; 1394 tgt_pkt->req->setExtraData(0); 1395 } else { 1396 // not a cache fill, just forwarding response 1397 // responseLatency is the latency of the return path 1398 // from lower level cahces/memory to the core. 1399 completion_time += clockEdge(responseLatency) + 1400 pkt->payloadDelay; 1401 if (pkt->isRead() && !is_error) { 1402 // sanity check 1403 assert(pkt->getAddr() == tgt_pkt->getAddr()); 1404 assert(pkt->getSize() >= tgt_pkt->getSize()); 1405 1406 tgt_pkt->setData(pkt->getConstPtr<uint8_t>()); 1407 } 1408 } 1409 tgt_pkt->makeTimingResponse(); 1410 // if this packet is an error copy that to the new packet 1411 if (is_error) 1412 tgt_pkt->copyError(pkt); 1413 if (tgt_pkt->cmd == MemCmd::ReadResp && 1414 (is_invalidate || mshr->hasPostInvalidate())) { 1415 // If intermediate cache got ReadRespWithInvalidate, 1416 // propagate that. Response should not have 1417 // isInvalidate() set otherwise. 1418 tgt_pkt->cmd = MemCmd::ReadRespWithInvalidate; 1419 DPRINTF(Cache, "%s: updated cmd to %s\n", __func__, 1420 tgt_pkt->print()); 1421 } 1422 // Reset the bus additional time as it is now accounted for 1423 tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0; 1424 cpuSidePort->schedTimingResp(tgt_pkt, completion_time, true); 1425 break; 1426 1427 case MSHR::Target::FromPrefetcher: 1428 assert(tgt_pkt->cmd == MemCmd::HardPFReq); 1429 if (blk) 1430 blk->status |= BlkHWPrefetched; 1431 delete tgt_pkt->req; 1432 delete tgt_pkt; 1433 break; 1434 1435 case MSHR::Target::FromSnoop: 1436 // I don't believe that a snoop can be in an error state 1437 assert(!is_error); 1438 // response to snoop request 1439 DPRINTF(Cache, "processing deferred snoop...\n"); 1440 // If the response is invalidating, a snooping target can 1441 // be satisfied if it is also invalidating. If the reponse is, not 1442 // only invalidating, but more specifically an InvalidateResp, the 1443 // MSHR was created due to an InvalidateReq and a cache above is 1444 // waiting to satisfy a WriteLineReq. In this case even an 1445 // non-invalidating snoop is added as a target here since this is 1446 // the ordering point. When the InvalidateResp reaches this cache, 1447 // the snooping target will snoop further the cache above with the 1448 // WriteLineReq. 1449 assert(!(is_invalidate && 1450 pkt->cmd != MemCmd::InvalidateResp && 1451 !mshr->hasPostInvalidate())); 1452 handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate()); 1453 break; 1454 1455 default: 1456 panic("Illegal target->source enum %d\n", target.source); 1457 } 1458 } 1459 1460 maintainClusivity(from_cache, blk); 1461 1462 if (blk && blk->isValid()) { 1463 // an invalidate response stemming from a write line request 1464 // should not invalidate the block, so check if the 1465 // invalidation should be discarded 1466 if (is_invalidate || mshr->hasPostInvalidate()) { 1467 invalidateBlock(blk); 1468 } else if (mshr->hasPostDowngrade()) { 1469 blk->status &= ~BlkWritable; 1470 } 1471 } 1472 1473 if (mshr->promoteDeferredTargets()) { 1474 // avoid later read getting stale data while write miss is 1475 // outstanding.. see comment in timingAccess() 1476 if (blk) { 1477 blk->status &= ~BlkReadable; 1478 } 1479 mshrQueue.markPending(mshr); 1480 schedMemSideSendEvent(clockEdge() + pkt->payloadDelay); 1481 } else { 1482 mshrQueue.deallocate(mshr); 1483 if (wasFull && !mshrQueue.isFull()) { 1484 clearBlocked(Blocked_NoMSHRs); 1485 } 1486 1487 // Request the bus for a prefetch if this deallocation freed enough 1488 // MSHRs for a prefetch to take place 1489 if (prefetcher && mshrQueue.canPrefetch()) { 1490 Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(), 1491 clockEdge()); 1492 if (next_pf_time != MaxTick) 1493 schedMemSideSendEvent(next_pf_time); 1494 } 1495 } 1496 // reset the xbar additional timinig as it is now accounted for 1497 pkt->headerDelay = pkt->payloadDelay = 0; 1498 1499 // copy writebacks to write buffer 1500 doWritebacks(writebacks, forward_time); 1501 1502 // if we used temp block, check to see if its valid and then clear it out 1503 if (blk == tempBlock && tempBlock->isValid()) { 1504 // We use forwardLatency here because we are copying 1505 // Writebacks/CleanEvicts to write buffer. It specifies the latency to 1506 // allocate an internal buffer and to schedule an event to the 1507 // queued port. 1508 if (blk->isDirty() || writebackClean) { 1509 PacketPtr wbPkt = writebackBlk(blk); 1510 allocateWriteBuffer(wbPkt, forward_time); 1511 // Set BLOCK_CACHED flag if cached above. 1512 if (isCachedAbove(wbPkt)) 1513 wbPkt->setBlockCached(); 1514 } else { 1515 PacketPtr wcPkt = cleanEvictBlk(blk); 1516 // Check to see if block is cached above. If not allocate 1517 // write buffer 1518 if (isCachedAbove(wcPkt)) 1519 delete wcPkt; 1520 else 1521 allocateWriteBuffer(wcPkt, forward_time); 1522 } 1523 blk->invalidate(); 1524 } 1525 1526 DPRINTF(CacheVerbose, "%s: Leaving with %s\n", __func__, pkt->print()); 1527 delete pkt; 1528} 1529 1530PacketPtr 1531Cache::writebackBlk(CacheBlk *blk) 1532{ 1533 chatty_assert(!isReadOnly || writebackClean, 1534 "Writeback from read-only cache"); 1535 assert(blk && blk->isValid() && (blk->isDirty() || writebackClean)); 1536 1537 writebacks[Request::wbMasterId]++; 1538 1539 Request *req = new Request(tags->regenerateBlkAddr(blk->tag, blk->set), 1540 blkSize, 0, Request::wbMasterId); 1541 if (blk->isSecure()) 1542 req->setFlags(Request::SECURE); 1543 1544 req->taskId(blk->task_id); 1545 blk->task_id= ContextSwitchTaskId::Unknown; 1546 blk->tickInserted = curTick(); 1547 1548 PacketPtr pkt = 1549 new Packet(req, blk->isDirty() ? 1550 MemCmd::WritebackDirty : MemCmd::WritebackClean); 1551 1552 DPRINTF(Cache, "Create Writeback %s writable: %d, dirty: %d\n", 1553 pkt->print(), blk->isWritable(), blk->isDirty()); 1554 1555 if (blk->isWritable()) { 1556 // not asserting shared means we pass the block in modified 1557 // state, mark our own block non-writeable 1558 blk->status &= ~BlkWritable; 1559 } else { 1560 // we are in the Owned state, tell the receiver 1561 pkt->setHasSharers(); 1562 } 1563 1564 // make sure the block is not marked dirty 1565 blk->status &= ~BlkDirty; 1566 1567 pkt->allocate(); 1568 std::memcpy(pkt->getPtr<uint8_t>(), blk->data, blkSize); 1569 1570 return pkt; 1571} 1572 1573PacketPtr 1574Cache::cleanEvictBlk(CacheBlk *blk) 1575{ 1576 assert(!writebackClean); 1577 assert(blk && blk->isValid() && !blk->isDirty()); 1578 // Creating a zero sized write, a message to the snoop filter 1579 Request *req = 1580 new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0, 1581 Request::wbMasterId); 1582 if (blk->isSecure()) 1583 req->setFlags(Request::SECURE); 1584 1585 req->taskId(blk->task_id); 1586 blk->task_id = ContextSwitchTaskId::Unknown; 1587 blk->tickInserted = curTick(); 1588 1589 PacketPtr pkt = new Packet(req, MemCmd::CleanEvict); 1590 pkt->allocate(); 1591 DPRINTF(Cache, "Create CleanEvict %s\n", pkt->print()); 1592 1593 return pkt; 1594} 1595 1596void 1597Cache::memWriteback() 1598{ 1599 CacheBlkVisitorWrapper visitor(*this, &Cache::writebackVisitor); 1600 tags->forEachBlk(visitor); 1601} 1602 1603void 1604Cache::memInvalidate() 1605{ 1606 CacheBlkVisitorWrapper visitor(*this, &Cache::invalidateVisitor); 1607 tags->forEachBlk(visitor); 1608} 1609 1610bool 1611Cache::isDirty() const 1612{ 1613 CacheBlkIsDirtyVisitor visitor; 1614 tags->forEachBlk(visitor); 1615 1616 return visitor.isDirty(); 1617} 1618 1619bool 1620Cache::writebackVisitor(CacheBlk &blk) 1621{ 1622 if (blk.isDirty()) { 1623 assert(blk.isValid()); 1624 1625 Request request(tags->regenerateBlkAddr(blk.tag, blk.set), 1626 blkSize, 0, Request::funcMasterId); 1627 request.taskId(blk.task_id); 1628 1629 Packet packet(&request, MemCmd::WriteReq); 1630 packet.dataStatic(blk.data); 1631 1632 memSidePort->sendFunctional(&packet); 1633 1634 blk.status &= ~BlkDirty; 1635 } 1636 1637 return true; 1638} 1639 1640bool 1641Cache::invalidateVisitor(CacheBlk &blk) 1642{ 1643 1644 if (blk.isDirty()) 1645 warn_once("Invalidating dirty cache lines. Expect things to break.\n"); 1646 1647 if (blk.isValid()) { 1648 assert(!blk.isDirty()); 1649 tags->invalidate(&blk); 1650 blk.invalidate(); 1651 } 1652 1653 return true; 1654} 1655 1656CacheBlk* 1657Cache::allocateBlock(Addr addr, bool is_secure, PacketList &writebacks) 1658{ 1659 CacheBlk *blk = tags->findVictim(addr); 1660 1661 // It is valid to return nullptr if there is no victim 1662 if (!blk) 1663 return nullptr; 1664 1665 if (blk->isValid()) { 1666 Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set); 1667 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure()); 1668 if (repl_mshr) { 1669 // must be an outstanding upgrade request 1670 // on a block we're about to replace... 1671 assert(!blk->isWritable() || blk->isDirty()); 1672 assert(repl_mshr->needsWritable()); 1673 // too hard to replace block with transient state 1674 // allocation failed, block not inserted 1675 return nullptr; 1676 } else { 1677 DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx " 1678 "(%s): %s\n", repl_addr, blk->isSecure() ? "s" : "ns", 1679 addr, is_secure ? "s" : "ns", 1680 blk->isDirty() ? "writeback" : "clean"); 1681 1682 if (blk->wasPrefetched()) { 1683 unusedPrefetches++; 1684 } 1685 // Will send up Writeback/CleanEvict snoops via isCachedAbove 1686 // when pushing this writeback list into the write buffer. 1687 if (blk->isDirty() || writebackClean) { 1688 // Save writeback packet for handling by caller 1689 writebacks.push_back(writebackBlk(blk)); 1690 } else { 1691 writebacks.push_back(cleanEvictBlk(blk)); 1692 } 1693 } 1694 } 1695 1696 return blk; 1697} 1698 1699void 1700Cache::invalidateBlock(CacheBlk *blk) 1701{ 1702 if (blk != tempBlock) 1703 tags->invalidate(blk); 1704 blk->invalidate(); 1705} 1706 1707// Note that the reason we return a list of writebacks rather than 1708// inserting them directly in the write buffer is that this function 1709// is called by both atomic and timing-mode accesses, and in atomic 1710// mode we don't mess with the write buffer (we just perform the 1711// writebacks atomically once the original request is complete). 1712CacheBlk* 1713Cache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks, 1714 bool allocate) 1715{ 1716 assert(pkt->isResponse() || pkt->cmd == MemCmd::WriteLineReq); 1717 Addr addr = pkt->getAddr(); 1718 bool is_secure = pkt->isSecure(); 1719#if TRACING_ON 1720 CacheBlk::State old_state = blk ? blk->status : 0; 1721#endif 1722 1723 // When handling a fill, we should have no writes to this line. 1724 assert(addr == blockAlign(addr)); 1725 assert(!writeBuffer.findMatch(addr, is_secure)); 1726 1727 if (blk == nullptr) { 1728 // better have read new data... 1729 assert(pkt->hasData()); 1730 1731 // only read responses and write-line requests have data; 1732 // note that we don't write the data here for write-line - that 1733 // happens in the subsequent call to satisfyRequest 1734 assert(pkt->isRead() || pkt->cmd == MemCmd::WriteLineReq); 1735 1736 // need to do a replacement if allocating, otherwise we stick 1737 // with the temporary storage 1738 blk = allocate ? allocateBlock(addr, is_secure, writebacks) : nullptr; 1739 1740 if (blk == nullptr) { 1741 // No replaceable block or a mostly exclusive 1742 // cache... just use temporary storage to complete the 1743 // current request and then get rid of it 1744 assert(!tempBlock->isValid()); 1745 blk = tempBlock; 1746 tempBlock->set = tags->extractSet(addr); 1747 tempBlock->tag = tags->extractTag(addr); 1748 // @todo: set security state as well... 1749 DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr, 1750 is_secure ? "s" : "ns"); 1751 } else { 1752 tags->insertBlock(pkt, blk); 1753 } 1754 1755 // we should never be overwriting a valid block 1756 assert(!blk->isValid()); 1757 } else { 1758 // existing block... probably an upgrade 1759 assert(blk->tag == tags->extractTag(addr)); 1760 // either we're getting new data or the block should already be valid 1761 assert(pkt->hasData() || blk->isValid()); 1762 // don't clear block status... if block is already dirty we 1763 // don't want to lose that 1764 } 1765 1766 if (is_secure) 1767 blk->status |= BlkSecure; 1768 blk->status |= BlkValid | BlkReadable; 1769 1770 // sanity check for whole-line writes, which should always be 1771 // marked as writable as part of the fill, and then later marked 1772 // dirty as part of satisfyRequest 1773 if (pkt->cmd == MemCmd::WriteLineReq) { 1774 assert(!pkt->hasSharers()); 1775 // at the moment other caches do not respond to the 1776 // invalidation requests corresponding to a whole-line write 1777 assert(!pkt->cacheResponding()); 1778 } 1779 1780 // here we deal with setting the appropriate state of the line, 1781 // and we start by looking at the hasSharers flag, and ignore the 1782 // cacheResponding flag (normally signalling dirty data) if the 1783 // packet has sharers, thus the line is never allocated as Owned 1784 // (dirty but not writable), and always ends up being either 1785 // Shared, Exclusive or Modified, see Packet::setCacheResponding 1786 // for more details 1787 if (!pkt->hasSharers()) { 1788 // we could get a writable line from memory (rather than a 1789 // cache) even in a read-only cache, note that we set this bit 1790 // even for a read-only cache, possibly revisit this decision 1791 blk->status |= BlkWritable; 1792 1793 // check if we got this via cache-to-cache transfer (i.e., from a 1794 // cache that had the block in Modified or Owned state) 1795 if (pkt->cacheResponding()) { 1796 // we got the block in Modified state, and invalidated the 1797 // owners copy 1798 blk->status |= BlkDirty; 1799 1800 chatty_assert(!isReadOnly, "Should never see dirty snoop response " 1801 "in read-only cache %s\n", name()); 1802 } 1803 } 1804 1805 DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n", 1806 addr, is_secure ? "s" : "ns", old_state, blk->print()); 1807 1808 // if we got new data, copy it in (checking for a read response 1809 // and a response that has data is the same in the end) 1810 if (pkt->isRead()) { 1811 // sanity checks 1812 assert(pkt->hasData()); 1813 assert(pkt->getSize() == blkSize); 1814 1815 std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize); 1816 } 1817 // We pay for fillLatency here. 1818 blk->whenReady = clockEdge() + fillLatency * clockPeriod() + 1819 pkt->payloadDelay; 1820 1821 return blk; 1822} 1823 1824 1825///////////////////////////////////////////////////// 1826// 1827// Snoop path: requests coming in from the memory side 1828// 1829///////////////////////////////////////////////////// 1830 1831void 1832Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data, 1833 bool already_copied, bool pending_inval) 1834{ 1835 // sanity check 1836 assert(req_pkt->isRequest()); 1837 assert(req_pkt->needsResponse()); 1838 1839 DPRINTF(Cache, "%s: for %s\n", __func__, req_pkt->print()); 1840 // timing-mode snoop responses require a new packet, unless we 1841 // already made a copy... 1842 PacketPtr pkt = req_pkt; 1843 if (!already_copied) 1844 // do not clear flags, and allocate space for data if the 1845 // packet needs it (the only packets that carry data are read 1846 // responses) 1847 pkt = new Packet(req_pkt, false, req_pkt->isRead()); 1848 1849 assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() || 1850 pkt->hasSharers()); 1851 pkt->makeTimingResponse(); 1852 if (pkt->isRead()) { 1853 pkt->setDataFromBlock(blk_data, blkSize); 1854 } 1855 if (pkt->cmd == MemCmd::ReadResp && pending_inval) { 1856 // Assume we defer a response to a read from a far-away cache 1857 // A, then later defer a ReadExcl from a cache B on the same 1858 // bus as us. We'll assert cacheResponding in both cases, but 1859 // in the latter case cacheResponding will keep the 1860 // invalidation from reaching cache A. This special response 1861 // tells cache A that it gets the block to satisfy its read, 1862 // but must immediately invalidate it. 1863 pkt->cmd = MemCmd::ReadRespWithInvalidate; 1864 } 1865 // Here we consider forward_time, paying for just forward latency and 1866 // also charging the delay provided by the xbar. 1867 // forward_time is used as send_time in next allocateWriteBuffer(). 1868 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 1869 // Here we reset the timing of the packet. 1870 pkt->headerDelay = pkt->payloadDelay = 0; 1871 DPRINTF(CacheVerbose, "%s: created response: %s tick: %lu\n", __func__, 1872 pkt->print(), forward_time); 1873 memSidePort->schedTimingSnoopResp(pkt, forward_time, true); 1874} 1875 1876uint32_t 1877Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing, 1878 bool is_deferred, bool pending_inval) 1879{ 1880 DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print()); 1881 // deferred snoops can only happen in timing mode 1882 assert(!(is_deferred && !is_timing)); 1883 // pending_inval only makes sense on deferred snoops 1884 assert(!(pending_inval && !is_deferred)); 1885 assert(pkt->isRequest()); 1886 1887 // the packet may get modified if we or a forwarded snooper 1888 // responds in atomic mode, so remember a few things about the 1889 // original packet up front 1890 bool invalidate = pkt->isInvalidate(); 1891 bool M5_VAR_USED needs_writable = pkt->needsWritable(); 1892 1893 // at the moment we could get an uncacheable write which does not 1894 // have the invalidate flag, and we need a suitable way of dealing 1895 // with this case 1896 panic_if(invalidate && pkt->req->isUncacheable(), 1897 "%s got an invalidating uncacheable snoop request %s", 1898 name(), pkt->print()); 1899 1900 uint32_t snoop_delay = 0; 1901 1902 if (forwardSnoops) { 1903 // first propagate snoop upward to see if anyone above us wants to 1904 // handle it. save & restore packet src since it will get 1905 // rewritten to be relative to cpu-side bus (if any) 1906 bool alreadyResponded = pkt->cacheResponding(); 1907 if (is_timing) { 1908 // copy the packet so that we can clear any flags before 1909 // forwarding it upwards, we also allocate data (passing 1910 // the pointer along in case of static data), in case 1911 // there is a snoop hit in upper levels 1912 Packet snoopPkt(pkt, true, true); 1913 snoopPkt.setExpressSnoop(); 1914 // the snoop packet does not need to wait any additional 1915 // time 1916 snoopPkt.headerDelay = snoopPkt.payloadDelay = 0; 1917 cpuSidePort->sendTimingSnoopReq(&snoopPkt); 1918 1919 // add the header delay (including crossbar and snoop 1920 // delays) of the upward snoop to the snoop delay for this 1921 // cache 1922 snoop_delay += snoopPkt.headerDelay; 1923 1924 if (snoopPkt.cacheResponding()) { 1925 // cache-to-cache response from some upper cache 1926 assert(!alreadyResponded); 1927 pkt->setCacheResponding(); 1928 } 1929 // upstream cache has the block, or has an outstanding 1930 // MSHR, pass the flag on 1931 if (snoopPkt.hasSharers()) { 1932 pkt->setHasSharers(); 1933 } 1934 // If this request is a prefetch or clean evict and an upper level 1935 // signals block present, make sure to propagate the block 1936 // presence to the requester. 1937 if (snoopPkt.isBlockCached()) { 1938 pkt->setBlockCached(); 1939 } 1940 } else { 1941 cpuSidePort->sendAtomicSnoop(pkt); 1942 if (!alreadyResponded && pkt->cacheResponding()) { 1943 // cache-to-cache response from some upper cache: 1944 // forward response to original requester 1945 assert(pkt->isResponse()); 1946 } 1947 } 1948 } 1949 1950 if (!blk || !blk->isValid()) { 1951 DPRINTF(CacheVerbose, "%s: snoop miss for %s\n", __func__, 1952 pkt->print()); 1953 if (is_deferred) { 1954 // we no longer have the block, and will not respond, but a 1955 // packet was allocated in MSHR::handleSnoop and we have 1956 // to delete it 1957 assert(pkt->needsResponse()); 1958 1959 // we have passed the block to a cache upstream, that 1960 // cache should be responding 1961 assert(pkt->cacheResponding()); 1962 1963 delete pkt; 1964 } 1965 return snoop_delay; 1966 } else { 1967 DPRINTF(Cache, "%s: snoop hit for %s, old state is %s\n", __func__, 1968 pkt->print(), blk->print()); 1969 } 1970 1971 chatty_assert(!(isReadOnly && blk->isDirty()), 1972 "Should never have a dirty block in a read-only cache %s\n", 1973 name()); 1974 1975 // We may end up modifying both the block state and the packet (if 1976 // we respond in atomic mode), so just figure out what to do now 1977 // and then do it later. If we find dirty data while snooping for 1978 // an invalidate, we don't need to send a response. The 1979 // invalidation itself is taken care of below. 1980 bool respond = blk->isDirty() && pkt->needsResponse() && 1981 pkt->cmd != MemCmd::InvalidateReq; 1982 bool have_writable = blk->isWritable(); 1983 1984 // Invalidate any prefetch's from below that would strip write permissions 1985 // MemCmd::HardPFReq is only observed by upstream caches. After missing 1986 // above and in it's own cache, a new MemCmd::ReadReq is created that 1987 // downstream caches observe. 1988 if (pkt->mustCheckAbove()) { 1989 DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s " 1990 "from lower cache\n", pkt->getAddr(), pkt->print()); 1991 pkt->setBlockCached(); 1992 return snoop_delay; 1993 } 1994 1995 if (pkt->isRead() && !invalidate) { 1996 // reading without requiring the line in a writable state 1997 assert(!needs_writable); 1998 pkt->setHasSharers(); 1999 2000 // if the requesting packet is uncacheable, retain the line in 2001 // the current state, otherwhise unset the writable flag, 2002 // which means we go from Modified to Owned (and will respond 2003 // below), remain in Owned (and will respond below), from 2004 // Exclusive to Shared, or remain in Shared 2005 if (!pkt->req->isUncacheable()) 2006 blk->status &= ~BlkWritable; 2007 } 2008 2009 if (respond) { 2010 // prevent anyone else from responding, cache as well as 2011 // memory, and also prevent any memory from even seeing the 2012 // request 2013 pkt->setCacheResponding(); 2014 if (have_writable) { 2015 // inform the cache hierarchy that this cache had the line 2016 // in the Modified state so that we avoid unnecessary 2017 // invalidations (see Packet::setResponderHadWritable) 2018 pkt->setResponderHadWritable(); 2019 2020 // in the case of an uncacheable request there is no point 2021 // in setting the responderHadWritable flag, but since the 2022 // recipient does not care there is no harm in doing so 2023 } else { 2024 // if the packet has needsWritable set we invalidate our 2025 // copy below and all other copies will be invalidates 2026 // through express snoops, and if needsWritable is not set 2027 // we already called setHasSharers above 2028 } 2029 2030 // if we are returning a writable and dirty (Modified) line, 2031 // we should be invalidating the line 2032 panic_if(!invalidate && !pkt->hasSharers(), 2033 "%s is passing a Modified line through %s, " 2034 "but keeping the block", name(), pkt->print()); 2035 2036 if (is_timing) { 2037 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval); 2038 } else { 2039 pkt->makeAtomicResponse(); 2040 // packets such as upgrades do not actually have any data 2041 // payload 2042 if (pkt->hasData()) 2043 pkt->setDataFromBlock(blk->data, blkSize); 2044 } 2045 } 2046 2047 if (!respond && is_deferred) { 2048 assert(pkt->needsResponse()); 2049 2050 // if we copied the deferred packet with the intention to 2051 // respond, but are not responding, then a cache above us must 2052 // be, and we can use this as the indication of whether this 2053 // is a packet where we created a copy of the request or not 2054 if (!pkt->cacheResponding()) { 2055 delete pkt->req; 2056 } 2057 2058 delete pkt; 2059 } 2060 2061 // Do this last in case it deallocates block data or something 2062 // like that 2063 if (invalidate) { 2064 invalidateBlock(blk); 2065 } 2066 2067 DPRINTF(Cache, "new state is %s\n", blk->print()); 2068 2069 return snoop_delay; 2070} 2071 2072 2073void 2074Cache::recvTimingSnoopReq(PacketPtr pkt) 2075{ 2076 DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print()); 2077 2078 // Snoops shouldn't happen when bypassing caches 2079 assert(!system->bypassCaches()); 2080 2081 // no need to snoop requests that are not in range 2082 if (!inRange(pkt->getAddr())) { 2083 return; 2084 } 2085 2086 bool is_secure = pkt->isSecure(); 2087 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 2088 2089 Addr blk_addr = blockAlign(pkt->getAddr()); 2090 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 2091 2092 // Update the latency cost of the snoop so that the crossbar can 2093 // account for it. Do not overwrite what other neighbouring caches 2094 // have already done, rather take the maximum. The update is 2095 // tentative, for cases where we return before an upward snoop 2096 // happens below. 2097 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, 2098 lookupLatency * clockPeriod()); 2099 2100 // Inform request(Prefetch, CleanEvict or Writeback) from below of 2101 // MSHR hit, set setBlockCached. 2102 if (mshr && pkt->mustCheckAbove()) { 2103 DPRINTF(Cache, "Setting block cached for %s from lower cache on " 2104 "mshr hit\n", pkt->print()); 2105 pkt->setBlockCached(); 2106 return; 2107 } 2108 2109 // Let the MSHR itself track the snoop and decide whether we want 2110 // to go ahead and do the regular cache snoop 2111 if (mshr && mshr->handleSnoop(pkt, order++)) { 2112 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)." 2113 "mshrs: %s\n", blk_addr, is_secure ? "s" : "ns", 2114 mshr->print()); 2115 2116 if (mshr->getNumTargets() > numTarget) 2117 warn("allocating bonus target for snoop"); //handle later 2118 return; 2119 } 2120 2121 //We also need to check the writeback buffers and handle those 2122 WriteQueueEntry *wb_entry = writeBuffer.findMatch(blk_addr, is_secure); 2123 if (wb_entry) { 2124 DPRINTF(Cache, "Snoop hit in writeback to addr %#llx (%s)\n", 2125 pkt->getAddr(), is_secure ? "s" : "ns"); 2126 // Expect to see only Writebacks and/or CleanEvicts here, both of 2127 // which should not be generated for uncacheable data. 2128 assert(!wb_entry->isUncacheable()); 2129 // There should only be a single request responsible for generating 2130 // Writebacks/CleanEvicts. 2131 assert(wb_entry->getNumTargets() == 1); 2132 PacketPtr wb_pkt = wb_entry->getTarget()->pkt; 2133 assert(wb_pkt->isEviction()); 2134 2135 if (pkt->isEviction()) { 2136 // if the block is found in the write queue, set the BLOCK_CACHED 2137 // flag for Writeback/CleanEvict snoop. On return the snoop will 2138 // propagate the BLOCK_CACHED flag in Writeback packets and prevent 2139 // any CleanEvicts from travelling down the memory hierarchy. 2140 pkt->setBlockCached(); 2141 DPRINTF(Cache, "%s: Squashing %s from lower cache on writequeue " 2142 "hit\n", __func__, pkt->print()); 2143 return; 2144 } 2145 2146 // conceptually writebacks are no different to other blocks in 2147 // this cache, so the behaviour is modelled after handleSnoop, 2148 // the difference being that instead of querying the block 2149 // state to determine if it is dirty and writable, we use the 2150 // command and fields of the writeback packet 2151 bool respond = wb_pkt->cmd == MemCmd::WritebackDirty && 2152 pkt->needsResponse() && pkt->cmd != MemCmd::InvalidateReq; 2153 bool have_writable = !wb_pkt->hasSharers(); 2154 bool invalidate = pkt->isInvalidate(); 2155 2156 if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) { 2157 assert(!pkt->needsWritable()); 2158 pkt->setHasSharers(); 2159 wb_pkt->setHasSharers(); 2160 } 2161 2162 if (respond) { 2163 pkt->setCacheResponding(); 2164 2165 if (have_writable) { 2166 pkt->setResponderHadWritable(); 2167 } 2168 2169 doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(), 2170 false, false); 2171 } 2172 2173 if (invalidate) { 2174 // Invalidation trumps our writeback... discard here 2175 // Note: markInService will remove entry from writeback buffer. 2176 markInService(wb_entry); 2177 delete wb_pkt; 2178 } 2179 } 2180 2181 // If this was a shared writeback, there may still be 2182 // other shared copies above that require invalidation. 2183 // We could be more selective and return here if the 2184 // request is non-exclusive or if the writeback is 2185 // exclusive. 2186 uint32_t snoop_delay = handleSnoop(pkt, blk, true, false, false); 2187 2188 // Override what we did when we first saw the snoop, as we now 2189 // also have the cost of the upwards snoops to account for 2190 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, snoop_delay + 2191 lookupLatency * clockPeriod()); 2192} 2193 2194bool 2195Cache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt) 2196{ 2197 // Express snoop responses from master to slave, e.g., from L1 to L2 2198 cache->recvTimingSnoopResp(pkt); 2199 return true; 2200} 2201 2202Tick 2203Cache::recvAtomicSnoop(PacketPtr pkt) 2204{ 2205 // Snoops shouldn't happen when bypassing caches 2206 assert(!system->bypassCaches()); 2207 2208 // no need to snoop requests that are not in range. 2209 if (!inRange(pkt->getAddr())) { 2210 return 0; 2211 } 2212 2213 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 2214 uint32_t snoop_delay = handleSnoop(pkt, blk, false, false, false); 2215 return snoop_delay + lookupLatency * clockPeriod(); 2216} 2217 2218 2219QueueEntry* 2220Cache::getNextQueueEntry() 2221{ 2222 // Check both MSHR queue and write buffer for potential requests, 2223 // note that null does not mean there is no request, it could 2224 // simply be that it is not ready 2225 MSHR *miss_mshr = mshrQueue.getNext(); 2226 WriteQueueEntry *wq_entry = writeBuffer.getNext(); 2227 2228 // If we got a write buffer request ready, first priority is a 2229 // full write buffer, otherwise we favour the miss requests 2230 if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) { 2231 // need to search MSHR queue for conflicting earlier miss. 2232 MSHR *conflict_mshr = 2233 mshrQueue.findPending(wq_entry->blkAddr, 2234 wq_entry->isSecure); 2235 2236 if (conflict_mshr && conflict_mshr->order < wq_entry->order) { 2237 // Service misses in order until conflict is cleared. 2238 return conflict_mshr; 2239 2240 // @todo Note that we ignore the ready time of the conflict here 2241 } 2242 2243 // No conflicts; issue write 2244 return wq_entry; 2245 } else if (miss_mshr) { 2246 // need to check for conflicting earlier writeback 2247 WriteQueueEntry *conflict_mshr = 2248 writeBuffer.findPending(miss_mshr->blkAddr, 2249 miss_mshr->isSecure); 2250 if (conflict_mshr) { 2251 // not sure why we don't check order here... it was in the 2252 // original code but commented out. 2253 2254 // The only way this happens is if we are 2255 // doing a write and we didn't have permissions 2256 // then subsequently saw a writeback (owned got evicted) 2257 // We need to make sure to perform the writeback first 2258 // To preserve the dirty data, then we can issue the write 2259 2260 // should we return wq_entry here instead? I.e. do we 2261 // have to flush writes in order? I don't think so... not 2262 // for Alpha anyway. Maybe for x86? 2263 return conflict_mshr; 2264 2265 // @todo Note that we ignore the ready time of the conflict here 2266 } 2267 2268 // No conflicts; issue read 2269 return miss_mshr; 2270 } 2271 2272 // fall through... no pending requests. Try a prefetch. 2273 assert(!miss_mshr && !wq_entry); 2274 if (prefetcher && mshrQueue.canPrefetch()) { 2275 // If we have a miss queue slot, we can try a prefetch 2276 PacketPtr pkt = prefetcher->getPacket(); 2277 if (pkt) { 2278 Addr pf_addr = blockAlign(pkt->getAddr()); 2279 if (!tags->findBlock(pf_addr, pkt->isSecure()) && 2280 !mshrQueue.findMatch(pf_addr, pkt->isSecure()) && 2281 !writeBuffer.findMatch(pf_addr, pkt->isSecure())) { 2282 // Update statistic on number of prefetches issued 2283 // (hwpf_mshr_misses) 2284 assert(pkt->req->masterId() < system->maxMasters()); 2285 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 2286 2287 // allocate an MSHR and return it, note 2288 // that we send the packet straight away, so do not 2289 // schedule the send 2290 return allocateMissBuffer(pkt, curTick(), false); 2291 } else { 2292 // free the request and packet 2293 delete pkt->req; 2294 delete pkt; 2295 } 2296 } 2297 } 2298 2299 return nullptr; 2300} 2301 2302bool 2303Cache::isCachedAbove(PacketPtr pkt, bool is_timing) const 2304{ 2305 if (!forwardSnoops) 2306 return false; 2307 // Mirroring the flow of HardPFReqs, the cache sends CleanEvict and 2308 // Writeback snoops into upper level caches to check for copies of the 2309 // same block. Using the BLOCK_CACHED flag with the Writeback/CleanEvict 2310 // packet, the cache can inform the crossbar below of presence or absence 2311 // of the block. 2312 if (is_timing) { 2313 Packet snoop_pkt(pkt, true, false); 2314 snoop_pkt.setExpressSnoop(); 2315 // Assert that packet is either Writeback or CleanEvict and not a 2316 // prefetch request because prefetch requests need an MSHR and may 2317 // generate a snoop response. 2318 assert(pkt->isEviction()); 2319 snoop_pkt.senderState = nullptr; 2320 cpuSidePort->sendTimingSnoopReq(&snoop_pkt); 2321 // Writeback/CleanEvict snoops do not generate a snoop response. 2322 assert(!(snoop_pkt.cacheResponding())); 2323 return snoop_pkt.isBlockCached(); 2324 } else { 2325 cpuSidePort->sendAtomicSnoop(pkt); 2326 return pkt->isBlockCached(); 2327 } 2328} 2329 2330Tick 2331Cache::nextQueueReadyTime() const 2332{ 2333 Tick nextReady = std::min(mshrQueue.nextReadyTime(), 2334 writeBuffer.nextReadyTime()); 2335 2336 // Don't signal prefetch ready time if no MSHRs available 2337 // Will signal once enoguh MSHRs are deallocated 2338 if (prefetcher && mshrQueue.canPrefetch()) { 2339 nextReady = std::min(nextReady, 2340 prefetcher->nextPrefetchReadyTime()); 2341 } 2342 2343 return nextReady; 2344} 2345 2346bool 2347Cache::sendMSHRQueuePacket(MSHR* mshr) 2348{ 2349 assert(mshr); 2350 2351 // use request from 1st target 2352 PacketPtr tgt_pkt = mshr->getTarget()->pkt; 2353 2354 DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print()); 2355 2356 CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure); 2357 2358 if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) { 2359 // we should never have hardware prefetches to allocated 2360 // blocks 2361 assert(blk == nullptr); 2362 2363 // We need to check the caches above us to verify that 2364 // they don't have a copy of this block in the dirty state 2365 // at the moment. Without this check we could get a stale 2366 // copy from memory that might get used in place of the 2367 // dirty one. 2368 Packet snoop_pkt(tgt_pkt, true, false); 2369 snoop_pkt.setExpressSnoop(); 2370 // We are sending this packet upwards, but if it hits we will 2371 // get a snoop response that we end up treating just like a 2372 // normal response, hence it needs the MSHR as its sender 2373 // state 2374 snoop_pkt.senderState = mshr; 2375 cpuSidePort->sendTimingSnoopReq(&snoop_pkt); 2376 2377 // Check to see if the prefetch was squashed by an upper cache (to 2378 // prevent us from grabbing the line) or if a Check to see if a 2379 // writeback arrived between the time the prefetch was placed in 2380 // the MSHRs and when it was selected to be sent or if the 2381 // prefetch was squashed by an upper cache. 2382 2383 // It is important to check cacheResponding before 2384 // prefetchSquashed. If another cache has committed to 2385 // responding, it will be sending a dirty response which will 2386 // arrive at the MSHR allocated for this request. Checking the 2387 // prefetchSquash first may result in the MSHR being 2388 // prematurely deallocated. 2389 if (snoop_pkt.cacheResponding()) { 2390 auto M5_VAR_USED r = outstandingSnoop.insert(snoop_pkt.req); 2391 assert(r.second); 2392 2393 // if we are getting a snoop response with no sharers it 2394 // will be allocated as Modified 2395 bool pending_modified_resp = !snoop_pkt.hasSharers(); 2396 markInService(mshr, pending_modified_resp); 2397 2398 DPRINTF(Cache, "Upward snoop of prefetch for addr" 2399 " %#x (%s) hit\n", 2400 tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns"); 2401 return false; 2402 } 2403 2404 if (snoop_pkt.isBlockCached()) { 2405 DPRINTF(Cache, "Block present, prefetch squashed by cache. " 2406 "Deallocating mshr target %#x.\n", 2407 mshr->blkAddr); 2408 2409 // Deallocate the mshr target 2410 if (mshrQueue.forceDeallocateTarget(mshr)) { 2411 // Clear block if this deallocation resulted freed an 2412 // mshr when all had previously been utilized 2413 clearBlocked(Blocked_NoMSHRs); 2414 } 2415 return false; 2416 } 2417 } 2418 2419 // either a prefetch that is not present upstream, or a normal 2420 // MSHR request, proceed to get the packet to send downstream 2421 PacketPtr pkt = createMissPacket(tgt_pkt, blk, mshr->needsWritable()); 2422 2423 mshr->isForward = (pkt == nullptr); 2424 2425 if (mshr->isForward) { 2426 // not a cache block request, but a response is expected 2427 // make copy of current packet to forward, keep current 2428 // copy for response handling 2429 pkt = new Packet(tgt_pkt, false, true); 2430 assert(!pkt->isWrite()); 2431 } 2432 2433 // play it safe and append (rather than set) the sender state, 2434 // as forwarded packets may already have existing state 2435 pkt->pushSenderState(mshr); 2436 2437 if (!memSidePort->sendTimingReq(pkt)) { 2438 // we are awaiting a retry, but we 2439 // delete the packet and will be creating a new packet 2440 // when we get the opportunity 2441 delete pkt; 2442 2443 // note that we have now masked any requestBus and 2444 // schedSendEvent (we will wait for a retry before 2445 // doing anything), and this is so even if we do not 2446 // care about this packet and might override it before 2447 // it gets retried 2448 return true; 2449 } else { 2450 // As part of the call to sendTimingReq the packet is 2451 // forwarded to all neighbouring caches (and any caches 2452 // above them) as a snoop. Thus at this point we know if 2453 // any of the neighbouring caches are responding, and if 2454 // so, we know it is dirty, and we can determine if it is 2455 // being passed as Modified, making our MSHR the ordering 2456 // point 2457 bool pending_modified_resp = !pkt->hasSharers() && 2458 pkt->cacheResponding(); 2459 markInService(mshr, pending_modified_resp); 2460 return false; 2461 } 2462} 2463 2464bool 2465Cache::sendWriteQueuePacket(WriteQueueEntry* wq_entry) 2466{ 2467 assert(wq_entry); 2468 2469 // always a single target for write queue entries 2470 PacketPtr tgt_pkt = wq_entry->getTarget()->pkt; 2471 2472 DPRINTF(Cache, "%s: write %s\n", __func__, tgt_pkt->print()); 2473 2474 // forward as is, both for evictions and uncacheable writes 2475 if (!memSidePort->sendTimingReq(tgt_pkt)) { 2476 // note that we have now masked any requestBus and 2477 // schedSendEvent (we will wait for a retry before 2478 // doing anything), and this is so even if we do not 2479 // care about this packet and might override it before 2480 // it gets retried 2481 return true; 2482 } else { 2483 markInService(wq_entry); 2484 return false; 2485 } 2486} 2487 2488void 2489Cache::serialize(CheckpointOut &cp) const 2490{ 2491 bool dirty(isDirty()); 2492 2493 if (dirty) { 2494 warn("*** The cache still contains dirty data. ***\n"); 2495 warn(" Make sure to drain the system using the correct flags.\n"); 2496 warn(" This checkpoint will not restore correctly and dirty data " 2497 " in the cache will be lost!\n"); 2498 } 2499 2500 // Since we don't checkpoint the data in the cache, any dirty data 2501 // will be lost when restoring from a checkpoint of a system that 2502 // wasn't drained properly. Flag the checkpoint as invalid if the 2503 // cache contains dirty data. 2504 bool bad_checkpoint(dirty); 2505 SERIALIZE_SCALAR(bad_checkpoint); 2506} 2507 2508void 2509Cache::unserialize(CheckpointIn &cp) 2510{ 2511 bool bad_checkpoint; 2512 UNSERIALIZE_SCALAR(bad_checkpoint); 2513 if (bad_checkpoint) { 2514 fatal("Restoring from checkpoints with dirty caches is not supported " 2515 "in the classic memory system. Please remove any caches or " 2516 " drain them properly before taking checkpoints.\n"); 2517 } 2518} 2519 2520/////////////// 2521// 2522// CpuSidePort 2523// 2524/////////////// 2525 2526AddrRangeList 2527Cache::CpuSidePort::getAddrRanges() const 2528{ 2529 return cache->getAddrRanges(); 2530} 2531 2532bool 2533Cache::CpuSidePort::recvTimingReq(PacketPtr pkt) 2534{ 2535 assert(!cache->system->bypassCaches()); 2536 2537 bool success = false; 2538 2539 // always let express snoop packets through if even if blocked 2540 if (pkt->isExpressSnoop()) { 2541 // do not change the current retry state 2542 bool M5_VAR_USED bypass_success = cache->recvTimingReq(pkt); 2543 assert(bypass_success); 2544 return true; 2545 } else if (blocked || mustSendRetry) { 2546 // either already committed to send a retry, or blocked 2547 success = false; 2548 } else { 2549 // pass it on to the cache, and let the cache decide if we 2550 // have to retry or not 2551 success = cache->recvTimingReq(pkt); 2552 } 2553 2554 // remember if we have to retry 2555 mustSendRetry = !success; 2556 return success; 2557} 2558 2559Tick 2560Cache::CpuSidePort::recvAtomic(PacketPtr pkt) 2561{ 2562 return cache->recvAtomic(pkt); 2563} 2564 2565void 2566Cache::CpuSidePort::recvFunctional(PacketPtr pkt) 2567{ 2568 // functional request 2569 cache->functionalAccess(pkt, true); 2570} 2571 2572Cache:: 2573CpuSidePort::CpuSidePort(const std::string &_name, Cache *_cache, 2574 const std::string &_label) 2575 : BaseCache::CacheSlavePort(_name, _cache, _label), cache(_cache) 2576{ 2577} 2578 2579Cache* 2580CacheParams::create() 2581{ 2582 assert(tags); 2583 2584 return new Cache(this); 2585} 2586/////////////// 2587// 2588// MemSidePort 2589// 2590/////////////// 2591 2592bool 2593Cache::MemSidePort::recvTimingResp(PacketPtr pkt) 2594{ 2595 cache->recvTimingResp(pkt); 2596 return true; 2597} 2598 2599// Express snooping requests to memside port 2600void 2601Cache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt) 2602{ 2603 // handle snooping requests 2604 cache->recvTimingSnoopReq(pkt); 2605} 2606 2607Tick 2608Cache::MemSidePort::recvAtomicSnoop(PacketPtr pkt) 2609{ 2610 return cache->recvAtomicSnoop(pkt); 2611} 2612 2613void 2614Cache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt) 2615{ 2616 // functional snoop (note that in contrast to atomic we don't have 2617 // a specific functionalSnoop method, as they have the same 2618 // behaviour regardless) 2619 cache->functionalAccess(pkt, false); 2620} 2621 2622void 2623Cache::CacheReqPacketQueue::sendDeferredPacket() 2624{ 2625 // sanity check 2626 assert(!waitingOnRetry); 2627 2628 // there should never be any deferred request packets in the 2629 // queue, instead we resly on the cache to provide the packets 2630 // from the MSHR queue or write queue 2631 assert(deferredPacketReadyTime() == MaxTick); 2632 2633 // check for request packets (requests & writebacks) 2634 QueueEntry* entry = cache.getNextQueueEntry(); 2635 2636 if (!entry) { 2637 // can happen if e.g. we attempt a writeback and fail, but 2638 // before the retry, the writeback is eliminated because 2639 // we snoop another cache's ReadEx. 2640 } else { 2641 // let our snoop responses go first if there are responses to 2642 // the same addresses 2643 if (checkConflictingSnoop(entry->blkAddr)) { 2644 return; 2645 } 2646 waitingOnRetry = entry->sendPacket(cache); 2647 } 2648 2649 // if we succeeded and are not waiting for a retry, schedule the 2650 // next send considering when the next queue is ready, note that 2651 // snoop responses have their own packet queue and thus schedule 2652 // their own events 2653 if (!waitingOnRetry) { 2654 schedSendEvent(cache.nextQueueReadyTime()); 2655 } 2656} 2657 2658Cache:: 2659MemSidePort::MemSidePort(const std::string &_name, Cache *_cache, 2660 const std::string &_label) 2661 : BaseCache::CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue), 2662 _reqQueue(*_cache, *this, _snoopRespQueue, _label), 2663 _snoopRespQueue(*_cache, *this, _label), cache(_cache) 2664{ 2665} 2666