cache.cc revision 12346
1/* 2 * Copyright (c) 2010-2016 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2002-2005 The Regents of The University of Michigan 15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc. 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Erik Hallnor 42 * Dave Greene 43 * Nathan Binkert 44 * Steve Reinhardt 45 * Ron Dreslinski 46 * Andreas Sandberg 47 */ 48 49/** 50 * @file 51 * Cache definitions. 52 */ 53 54#include "mem/cache/cache.hh" 55 56#include "base/logging.hh" 57#include "base/types.hh" 58#include "debug/Cache.hh" 59#include "debug/CachePort.hh" 60#include "debug/CacheTags.hh" 61#include "debug/CacheVerbose.hh" 62#include "mem/cache/blk.hh" 63#include "mem/cache/mshr.hh" 64#include "mem/cache/prefetch/base.hh" 65#include "sim/sim_exit.hh" 66 67Cache::Cache(const CacheParams *p) 68 : BaseCache(p, p->system->cacheLineSize()), 69 tags(p->tags), 70 prefetcher(p->prefetcher), 71 doFastWrites(true), 72 prefetchOnAccess(p->prefetch_on_access), 73 clusivity(p->clusivity), 74 writebackClean(p->writeback_clean), 75 tempBlockWriteback(nullptr), 76 writebackTempBlockAtomicEvent([this]{ writebackTempBlockAtomic(); }, 77 name(), false, 78 EventBase::Delayed_Writeback_Pri) 79{ 80 tempBlock = new CacheBlk(); 81 tempBlock->data = new uint8_t[blkSize]; 82 83 cpuSidePort = new CpuSidePort(p->name + ".cpu_side", this, 84 "CpuSidePort"); 85 memSidePort = new MemSidePort(p->name + ".mem_side", this, 86 "MemSidePort"); 87 88 tags->setCache(this); 89 if (prefetcher) 90 prefetcher->setCache(this); 91} 92 93Cache::~Cache() 94{ 95 delete [] tempBlock->data; 96 delete tempBlock; 97 98 delete cpuSidePort; 99 delete memSidePort; 100} 101 102void 103Cache::regStats() 104{ 105 BaseCache::regStats(); 106} 107 108void 109Cache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt) 110{ 111 assert(pkt->isRequest()); 112 113 uint64_t overwrite_val; 114 bool overwrite_mem; 115 uint64_t condition_val64; 116 uint32_t condition_val32; 117 118 int offset = tags->extractBlkOffset(pkt->getAddr()); 119 uint8_t *blk_data = blk->data + offset; 120 121 assert(sizeof(uint64_t) >= pkt->getSize()); 122 123 overwrite_mem = true; 124 // keep a copy of our possible write value, and copy what is at the 125 // memory address into the packet 126 pkt->writeData((uint8_t *)&overwrite_val); 127 pkt->setData(blk_data); 128 129 if (pkt->req->isCondSwap()) { 130 if (pkt->getSize() == sizeof(uint64_t)) { 131 condition_val64 = pkt->req->getExtraData(); 132 overwrite_mem = !std::memcmp(&condition_val64, blk_data, 133 sizeof(uint64_t)); 134 } else if (pkt->getSize() == sizeof(uint32_t)) { 135 condition_val32 = (uint32_t)pkt->req->getExtraData(); 136 overwrite_mem = !std::memcmp(&condition_val32, blk_data, 137 sizeof(uint32_t)); 138 } else 139 panic("Invalid size for conditional read/write\n"); 140 } 141 142 if (overwrite_mem) { 143 std::memcpy(blk_data, &overwrite_val, pkt->getSize()); 144 blk->status |= BlkDirty; 145 } 146} 147 148 149void 150Cache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, 151 bool deferred_response, bool pending_downgrade) 152{ 153 assert(pkt->isRequest()); 154 155 assert(blk && blk->isValid()); 156 // Occasionally this is not true... if we are a lower-level cache 157 // satisfying a string of Read and ReadEx requests from 158 // upper-level caches, a Read will mark the block as shared but we 159 // can satisfy a following ReadEx anyway since we can rely on the 160 // Read requester(s) to have buffered the ReadEx snoop and to 161 // invalidate their blocks after receiving them. 162 // assert(!pkt->needsWritable() || blk->isWritable()); 163 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize); 164 165 // Check RMW operations first since both isRead() and 166 // isWrite() will be true for them 167 if (pkt->cmd == MemCmd::SwapReq) { 168 cmpAndSwap(blk, pkt); 169 } else if (pkt->isWrite()) { 170 // we have the block in a writable state and can go ahead, 171 // note that the line may be also be considered writable in 172 // downstream caches along the path to memory, but always 173 // Exclusive, and never Modified 174 assert(blk->isWritable()); 175 // Write or WriteLine at the first cache with block in writable state 176 if (blk->checkWrite(pkt)) { 177 pkt->writeDataToBlock(blk->data, blkSize); 178 } 179 // Always mark the line as dirty (and thus transition to the 180 // Modified state) even if we are a failed StoreCond so we 181 // supply data to any snoops that have appended themselves to 182 // this cache before knowing the store will fail. 183 blk->status |= BlkDirty; 184 DPRINTF(CacheVerbose, "%s for %s (write)\n", __func__, pkt->print()); 185 } else if (pkt->isRead()) { 186 if (pkt->isLLSC()) { 187 blk->trackLoadLocked(pkt); 188 } 189 190 // all read responses have a data payload 191 assert(pkt->hasRespData()); 192 pkt->setDataFromBlock(blk->data, blkSize); 193 194 // determine if this read is from a (coherent) cache or not 195 if (pkt->fromCache()) { 196 assert(pkt->getSize() == blkSize); 197 // special handling for coherent block requests from 198 // upper-level caches 199 if (pkt->needsWritable()) { 200 // sanity check 201 assert(pkt->cmd == MemCmd::ReadExReq || 202 pkt->cmd == MemCmd::SCUpgradeFailReq); 203 assert(!pkt->hasSharers()); 204 205 // if we have a dirty copy, make sure the recipient 206 // keeps it marked dirty (in the modified state) 207 if (blk->isDirty()) { 208 pkt->setCacheResponding(); 209 blk->status &= ~BlkDirty; 210 } 211 } else if (blk->isWritable() && !pending_downgrade && 212 !pkt->hasSharers() && 213 pkt->cmd != MemCmd::ReadCleanReq) { 214 // we can give the requester a writable copy on a read 215 // request if: 216 // - we have a writable copy at this level (& below) 217 // - we don't have a pending snoop from below 218 // signaling another read request 219 // - no other cache above has a copy (otherwise it 220 // would have set hasSharers flag when 221 // snooping the packet) 222 // - the read has explicitly asked for a clean 223 // copy of the line 224 if (blk->isDirty()) { 225 // special considerations if we're owner: 226 if (!deferred_response) { 227 // respond with the line in Modified state 228 // (cacheResponding set, hasSharers not set) 229 pkt->setCacheResponding(); 230 231 // if this cache is mostly inclusive, we 232 // keep the block in the Exclusive state, 233 // and pass it upwards as Modified 234 // (writable and dirty), hence we have 235 // multiple caches, all on the same path 236 // towards memory, all considering the 237 // same block writable, but only one 238 // considering it Modified 239 240 // we get away with multiple caches (on 241 // the same path to memory) considering 242 // the block writeable as we always enter 243 // the cache hierarchy through a cache, 244 // and first snoop upwards in all other 245 // branches 246 blk->status &= ~BlkDirty; 247 } else { 248 // if we're responding after our own miss, 249 // there's a window where the recipient didn't 250 // know it was getting ownership and may not 251 // have responded to snoops correctly, so we 252 // have to respond with a shared line 253 pkt->setHasSharers(); 254 } 255 } 256 } else { 257 // otherwise only respond with a shared copy 258 pkt->setHasSharers(); 259 } 260 } 261 } else if (pkt->isUpgrade()) { 262 // sanity check 263 assert(!pkt->hasSharers()); 264 265 if (blk->isDirty()) { 266 // we were in the Owned state, and a cache above us that 267 // has the line in Shared state needs to be made aware 268 // that the data it already has is in fact dirty 269 pkt->setCacheResponding(); 270 blk->status &= ~BlkDirty; 271 } 272 } else { 273 assert(pkt->isInvalidate()); 274 invalidateBlock(blk); 275 DPRINTF(CacheVerbose, "%s for %s (invalidation)\n", __func__, 276 pkt->print()); 277 } 278} 279 280///////////////////////////////////////////////////// 281// 282// Access path: requests coming in from the CPU side 283// 284///////////////////////////////////////////////////// 285 286bool 287Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, 288 PacketList &writebacks) 289{ 290 // sanity check 291 assert(pkt->isRequest()); 292 293 chatty_assert(!(isReadOnly && pkt->isWrite()), 294 "Should never see a write in a read-only cache %s\n", 295 name()); 296 297 DPRINTF(CacheVerbose, "%s for %s\n", __func__, pkt->print()); 298 299 if (pkt->req->isUncacheable()) { 300 DPRINTF(Cache, "uncacheable: %s\n", pkt->print()); 301 302 // flush and invalidate any existing block 303 CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure())); 304 if (old_blk && old_blk->isValid()) { 305 if (old_blk->isDirty() || writebackClean) 306 writebacks.push_back(writebackBlk(old_blk)); 307 else 308 writebacks.push_back(cleanEvictBlk(old_blk)); 309 invalidateBlock(old_blk); 310 } 311 312 blk = nullptr; 313 // lookupLatency is the latency in case the request is uncacheable. 314 lat = lookupLatency; 315 return false; 316 } 317 318 // Here lat is the value passed as parameter to accessBlock() function 319 // that can modify its value. 320 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), lat); 321 322 DPRINTF(Cache, "%s %s\n", pkt->print(), 323 blk ? "hit " + blk->print() : "miss"); 324 325 326 if (pkt->isEviction()) { 327 // We check for presence of block in above caches before issuing 328 // Writeback or CleanEvict to write buffer. Therefore the only 329 // possible cases can be of a CleanEvict packet coming from above 330 // encountering a Writeback generated in this cache peer cache and 331 // waiting in the write buffer. Cases of upper level peer caches 332 // generating CleanEvict and Writeback or simply CleanEvict and 333 // CleanEvict almost simultaneously will be caught by snoops sent out 334 // by crossbar. 335 WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(), 336 pkt->isSecure()); 337 if (wb_entry) { 338 assert(wb_entry->getNumTargets() == 1); 339 PacketPtr wbPkt = wb_entry->getTarget()->pkt; 340 assert(wbPkt->isWriteback()); 341 342 if (pkt->isCleanEviction()) { 343 // The CleanEvict and WritebackClean snoops into other 344 // peer caches of the same level while traversing the 345 // crossbar. If a copy of the block is found, the 346 // packet is deleted in the crossbar. Hence, none of 347 // the other upper level caches connected to this 348 // cache have the block, so we can clear the 349 // BLOCK_CACHED flag in the Writeback if set and 350 // discard the CleanEvict by returning true. 351 wbPkt->clearBlockCached(); 352 return true; 353 } else { 354 assert(pkt->cmd == MemCmd::WritebackDirty); 355 // Dirty writeback from above trumps our clean 356 // writeback... discard here 357 // Note: markInService will remove entry from writeback buffer. 358 markInService(wb_entry); 359 delete wbPkt; 360 } 361 } 362 } 363 364 // Writeback handling is special case. We can write the block into 365 // the cache without having a writeable copy (or any copy at all). 366 if (pkt->isWriteback()) { 367 assert(blkSize == pkt->getSize()); 368 369 // we could get a clean writeback while we are having 370 // outstanding accesses to a block, do the simple thing for 371 // now and drop the clean writeback so that we do not upset 372 // any ordering/decisions about ownership already taken 373 if (pkt->cmd == MemCmd::WritebackClean && 374 mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) { 375 DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, " 376 "dropping\n", pkt->getAddr()); 377 return true; 378 } 379 380 if (blk == nullptr) { 381 // need to do a replacement 382 blk = allocateBlock(pkt->getAddr(), pkt->isSecure(), writebacks); 383 if (blk == nullptr) { 384 // no replaceable block available: give up, fwd to next level. 385 incMissCount(pkt); 386 return false; 387 } 388 tags->insertBlock(pkt, blk); 389 390 blk->status = (BlkValid | BlkReadable); 391 if (pkt->isSecure()) { 392 blk->status |= BlkSecure; 393 } 394 } 395 // only mark the block dirty if we got a writeback command, 396 // and leave it as is for a clean writeback 397 if (pkt->cmd == MemCmd::WritebackDirty) { 398 blk->status |= BlkDirty; 399 } 400 // if the packet does not have sharers, it is passing 401 // writable, and we got the writeback in Modified or Exclusive 402 // state, if not we are in the Owned or Shared state 403 if (!pkt->hasSharers()) { 404 blk->status |= BlkWritable; 405 } 406 // nothing else to do; writeback doesn't expect response 407 assert(!pkt->needsResponse()); 408 std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize); 409 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 410 incHitCount(pkt); 411 return true; 412 } else if (pkt->cmd == MemCmd::CleanEvict) { 413 if (blk != nullptr) { 414 // Found the block in the tags, need to stop CleanEvict from 415 // propagating further down the hierarchy. Returning true will 416 // treat the CleanEvict like a satisfied write request and delete 417 // it. 418 return true; 419 } 420 // We didn't find the block here, propagate the CleanEvict further 421 // down the memory hierarchy. Returning false will treat the CleanEvict 422 // like a Writeback which could not find a replaceable block so has to 423 // go to next level. 424 return false; 425 } else if (pkt->cmd == MemCmd::WriteClean) { 426 // WriteClean handling is a special case. We can allocate a 427 // block directly if it doesn't exist and we can update the 428 // block immediately. The WriteClean transfers the ownership 429 // of the block as well. 430 assert(blkSize == pkt->getSize()); 431 432 if (!blk) { 433 if (pkt->writeThrough()) { 434 // if this is a write through packet, we don't try to 435 // allocate if the block is not present 436 return false; 437 } else { 438 // a writeback that misses needs to allocate a new block 439 blk = allocateBlock(pkt->getAddr(), pkt->isSecure(), 440 writebacks); 441 if (!blk) { 442 // no replaceable block available: give up, fwd to 443 // next level. 444 incMissCount(pkt); 445 return false; 446 } 447 tags->insertBlock(pkt, blk); 448 449 blk->status = (BlkValid | BlkReadable); 450 if (pkt->isSecure()) { 451 blk->status |= BlkSecure; 452 } 453 } 454 } 455 456 // at this point either this is a writeback or a write-through 457 // write clean operation and the block is already in this 458 // cache, we need to update the data and the block flags 459 assert(blk); 460 if (!pkt->writeThrough()) { 461 blk->status |= BlkDirty; 462 } 463 // nothing else to do; writeback doesn't expect response 464 assert(!pkt->needsResponse()); 465 std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize); 466 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 467 468 incHitCount(pkt); 469 // populate the time when the block will be ready to access. 470 blk->whenReady = clockEdge(fillLatency) + pkt->headerDelay + 471 pkt->payloadDelay; 472 // if this a write-through packet it will be sent to cache 473 // below 474 return !pkt->writeThrough(); 475 } else if (blk && (pkt->needsWritable() ? blk->isWritable() : 476 blk->isReadable())) { 477 // OK to satisfy access 478 incHitCount(pkt); 479 satisfyRequest(pkt, blk); 480 maintainClusivity(pkt->fromCache(), blk); 481 482 return true; 483 } 484 485 // Can't satisfy access normally... either no block (blk == nullptr) 486 // or have block but need writable 487 488 incMissCount(pkt); 489 490 if (blk == nullptr && pkt->isLLSC() && pkt->isWrite()) { 491 // complete miss on store conditional... just give up now 492 pkt->req->setExtraData(0); 493 return true; 494 } 495 496 return false; 497} 498 499void 500Cache::maintainClusivity(bool from_cache, CacheBlk *blk) 501{ 502 if (from_cache && blk && blk->isValid() && !blk->isDirty() && 503 clusivity == Enums::mostly_excl) { 504 // if we have responded to a cache, and our block is still 505 // valid, but not dirty, and this cache is mostly exclusive 506 // with respect to the cache above, drop the block 507 invalidateBlock(blk); 508 } 509} 510 511void 512Cache::doWritebacks(PacketList& writebacks, Tick forward_time) 513{ 514 while (!writebacks.empty()) { 515 PacketPtr wbPkt = writebacks.front(); 516 // We use forwardLatency here because we are copying writebacks to 517 // write buffer. 518 519 // Call isCachedAbove for Writebacks, CleanEvicts and 520 // WriteCleans to discover if the block is cached above. 521 if (isCachedAbove(wbPkt)) { 522 if (wbPkt->cmd == MemCmd::CleanEvict) { 523 // Delete CleanEvict because cached copies exist above. The 524 // packet destructor will delete the request object because 525 // this is a non-snoop request packet which does not require a 526 // response. 527 delete wbPkt; 528 } else if (wbPkt->cmd == MemCmd::WritebackClean) { 529 // clean writeback, do not send since the block is 530 // still cached above 531 assert(writebackClean); 532 delete wbPkt; 533 } else { 534 assert(wbPkt->cmd == MemCmd::WritebackDirty || 535 wbPkt->cmd == MemCmd::WriteClean); 536 // Set BLOCK_CACHED flag in Writeback and send below, so that 537 // the Writeback does not reset the bit corresponding to this 538 // address in the snoop filter below. 539 wbPkt->setBlockCached(); 540 allocateWriteBuffer(wbPkt, forward_time); 541 } 542 } else { 543 // If the block is not cached above, send packet below. Both 544 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will 545 // reset the bit corresponding to this address in the snoop filter 546 // below. 547 allocateWriteBuffer(wbPkt, forward_time); 548 } 549 writebacks.pop_front(); 550 } 551} 552 553void 554Cache::doWritebacksAtomic(PacketList& writebacks) 555{ 556 while (!writebacks.empty()) { 557 PacketPtr wbPkt = writebacks.front(); 558 // Call isCachedAbove for both Writebacks and CleanEvicts. If 559 // isCachedAbove returns true we set BLOCK_CACHED flag in Writebacks 560 // and discard CleanEvicts. 561 if (isCachedAbove(wbPkt, false)) { 562 if (wbPkt->cmd == MemCmd::WritebackDirty || 563 wbPkt->cmd == MemCmd::WriteClean) { 564 // Set BLOCK_CACHED flag in Writeback and send below, 565 // so that the Writeback does not reset the bit 566 // corresponding to this address in the snoop filter 567 // below. We can discard CleanEvicts because cached 568 // copies exist above. Atomic mode isCachedAbove 569 // modifies packet to set BLOCK_CACHED flag 570 memSidePort->sendAtomic(wbPkt); 571 } 572 } else { 573 // If the block is not cached above, send packet below. Both 574 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will 575 // reset the bit corresponding to this address in the snoop filter 576 // below. 577 memSidePort->sendAtomic(wbPkt); 578 } 579 writebacks.pop_front(); 580 // In case of CleanEvicts, the packet destructor will delete the 581 // request object because this is a non-snoop request packet which 582 // does not require a response. 583 delete wbPkt; 584 } 585} 586 587 588void 589Cache::recvTimingSnoopResp(PacketPtr pkt) 590{ 591 DPRINTF(Cache, "%s for %s\n", __func__, pkt->print()); 592 593 assert(pkt->isResponse()); 594 assert(!system->bypassCaches()); 595 596 // determine if the response is from a snoop request we created 597 // (in which case it should be in the outstandingSnoop), or if we 598 // merely forwarded someone else's snoop request 599 const bool forwardAsSnoop = outstandingSnoop.find(pkt->req) == 600 outstandingSnoop.end(); 601 602 if (!forwardAsSnoop) { 603 // the packet came from this cache, so sink it here and do not 604 // forward it 605 assert(pkt->cmd == MemCmd::HardPFResp); 606 607 outstandingSnoop.erase(pkt->req); 608 609 DPRINTF(Cache, "Got prefetch response from above for addr " 610 "%#llx (%s)\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns"); 611 recvTimingResp(pkt); 612 return; 613 } 614 615 // forwardLatency is set here because there is a response from an 616 // upper level cache. 617 // To pay the delay that occurs if the packet comes from the bus, 618 // we charge also headerDelay. 619 Tick snoop_resp_time = clockEdge(forwardLatency) + pkt->headerDelay; 620 // Reset the timing of the packet. 621 pkt->headerDelay = pkt->payloadDelay = 0; 622 memSidePort->schedTimingSnoopResp(pkt, snoop_resp_time); 623} 624 625void 626Cache::promoteWholeLineWrites(PacketPtr pkt) 627{ 628 // Cache line clearing instructions 629 if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) && 630 (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0)) { 631 pkt->cmd = MemCmd::WriteLineReq; 632 DPRINTF(Cache, "packet promoted from Write to WriteLineReq\n"); 633 } 634} 635 636bool 637Cache::recvTimingReq(PacketPtr pkt) 638{ 639 DPRINTF(CacheTags, "%s tags:\n%s\n", __func__, tags->print()); 640 641 assert(pkt->isRequest()); 642 643 // Just forward the packet if caches are disabled. 644 if (system->bypassCaches()) { 645 // @todo This should really enqueue the packet rather 646 bool M5_VAR_USED success = memSidePort->sendTimingReq(pkt); 647 assert(success); 648 return true; 649 } 650 651 promoteWholeLineWrites(pkt); 652 653 if (pkt->cacheResponding()) { 654 // a cache above us (but not where the packet came from) is 655 // responding to the request, in other words it has the line 656 // in Modified or Owned state 657 DPRINTF(Cache, "Cache above responding to %s: not responding\n", 658 pkt->print()); 659 660 // if the packet needs the block to be writable, and the cache 661 // that has promised to respond (setting the cache responding 662 // flag) is not providing writable (it is in Owned rather than 663 // the Modified state), we know that there may be other Shared 664 // copies in the system; go out and invalidate them all 665 assert(pkt->needsWritable() && !pkt->responderHadWritable()); 666 667 // an upstream cache that had the line in Owned state 668 // (dirty, but not writable), is responding and thus 669 // transferring the dirty line from one branch of the 670 // cache hierarchy to another 671 672 // send out an express snoop and invalidate all other 673 // copies (snooping a packet that needs writable is the 674 // same as an invalidation), thus turning the Owned line 675 // into a Modified line, note that we don't invalidate the 676 // block in the current cache or any other cache on the 677 // path to memory 678 679 // create a downstream express snoop with cleared packet 680 // flags, there is no need to allocate any data as the 681 // packet is merely used to co-ordinate state transitions 682 Packet *snoop_pkt = new Packet(pkt, true, false); 683 684 // also reset the bus time that the original packet has 685 // not yet paid for 686 snoop_pkt->headerDelay = snoop_pkt->payloadDelay = 0; 687 688 // make this an instantaneous express snoop, and let the 689 // other caches in the system know that the another cache 690 // is responding, because we have found the authorative 691 // copy (Modified or Owned) that will supply the right 692 // data 693 snoop_pkt->setExpressSnoop(); 694 snoop_pkt->setCacheResponding(); 695 696 // this express snoop travels towards the memory, and at 697 // every crossbar it is snooped upwards thus reaching 698 // every cache in the system 699 bool M5_VAR_USED success = memSidePort->sendTimingReq(snoop_pkt); 700 // express snoops always succeed 701 assert(success); 702 703 // main memory will delete the snoop packet 704 705 // queue for deletion, as opposed to immediate deletion, as 706 // the sending cache is still relying on the packet 707 pendingDelete.reset(pkt); 708 709 // no need to take any further action in this particular cache 710 // as an upstram cache has already committed to responding, 711 // and we have already sent out any express snoops in the 712 // section above to ensure all other copies in the system are 713 // invalidated 714 return true; 715 } 716 717 // anything that is merely forwarded pays for the forward latency and 718 // the delay provided by the crossbar 719 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 720 721 // We use lookupLatency here because it is used to specify the latency 722 // to access. 723 Cycles lat = lookupLatency; 724 CacheBlk *blk = nullptr; 725 bool satisfied = false; 726 { 727 PacketList writebacks; 728 // Note that lat is passed by reference here. The function 729 // access() calls accessBlock() which can modify lat value. 730 satisfied = access(pkt, blk, lat, writebacks); 731 732 // copy writebacks to write buffer here to ensure they logically 733 // proceed anything happening below 734 doWritebacks(writebacks, forward_time); 735 } 736 737 // Here we charge the headerDelay that takes into account the latencies 738 // of the bus, if the packet comes from it. 739 // The latency charged it is just lat that is the value of lookupLatency 740 // modified by access() function, or if not just lookupLatency. 741 // In case of a hit we are neglecting response latency. 742 // In case of a miss we are neglecting forward latency. 743 Tick request_time = clockEdge(lat) + pkt->headerDelay; 744 // Here we reset the timing of the packet. 745 pkt->headerDelay = pkt->payloadDelay = 0; 746 747 // track time of availability of next prefetch, if any 748 Tick next_pf_time = MaxTick; 749 750 bool needsResponse = pkt->needsResponse(); 751 752 if (satisfied) { 753 // should never be satisfying an uncacheable access as we 754 // flush and invalidate any existing block as part of the 755 // lookup 756 assert(!pkt->req->isUncacheable()); 757 758 // hit (for all other request types) 759 760 if (prefetcher && (prefetchOnAccess || 761 (blk && blk->wasPrefetched()))) { 762 if (blk) 763 blk->status &= ~BlkHWPrefetched; 764 765 // Don't notify on SWPrefetch 766 if (!pkt->cmd.isSWPrefetch()) 767 next_pf_time = prefetcher->notify(pkt); 768 } 769 770 if (needsResponse) { 771 pkt->makeTimingResponse(); 772 // @todo: Make someone pay for this 773 pkt->headerDelay = pkt->payloadDelay = 0; 774 775 // In this case we are considering request_time that takes 776 // into account the delay of the xbar, if any, and just 777 // lat, neglecting responseLatency, modelling hit latency 778 // just as lookupLatency or or the value of lat overriden 779 // by access(), that calls accessBlock() function. 780 cpuSidePort->schedTimingResp(pkt, request_time, true); 781 } else { 782 DPRINTF(Cache, "%s satisfied %s, no response needed\n", __func__, 783 pkt->print()); 784 785 // queue the packet for deletion, as the sending cache is 786 // still relying on it; if the block is found in access(), 787 // CleanEvict and Writeback messages will be deleted 788 // here as well 789 pendingDelete.reset(pkt); 790 } 791 } else { 792 // miss 793 794 Addr blk_addr = pkt->getBlockAddr(blkSize); 795 796 // ignore any existing MSHR if we are dealing with an 797 // uncacheable request 798 MSHR *mshr = pkt->req->isUncacheable() ? nullptr : 799 mshrQueue.findMatch(blk_addr, pkt->isSecure()); 800 801 // Software prefetch handling: 802 // To keep the core from waiting on data it won't look at 803 // anyway, send back a response with dummy data. Miss handling 804 // will continue asynchronously. Unfortunately, the core will 805 // insist upon freeing original Packet/Request, so we have to 806 // create a new pair with a different lifecycle. Note that this 807 // processing happens before any MSHR munging on the behalf of 808 // this request because this new Request will be the one stored 809 // into the MSHRs, not the original. 810 if (pkt->cmd.isSWPrefetch()) { 811 assert(needsResponse); 812 assert(pkt->req->hasPaddr()); 813 assert(!pkt->req->isUncacheable()); 814 815 // There's no reason to add a prefetch as an additional target 816 // to an existing MSHR. If an outstanding request is already 817 // in progress, there is nothing for the prefetch to do. 818 // If this is the case, we don't even create a request at all. 819 PacketPtr pf = nullptr; 820 821 if (!mshr) { 822 // copy the request and create a new SoftPFReq packet 823 RequestPtr req = new Request(pkt->req->getPaddr(), 824 pkt->req->getSize(), 825 pkt->req->getFlags(), 826 pkt->req->masterId()); 827 pf = new Packet(req, pkt->cmd); 828 pf->allocate(); 829 assert(pf->getAddr() == pkt->getAddr()); 830 assert(pf->getSize() == pkt->getSize()); 831 } 832 833 pkt->makeTimingResponse(); 834 835 // request_time is used here, taking into account lat and the delay 836 // charged if the packet comes from the xbar. 837 cpuSidePort->schedTimingResp(pkt, request_time, true); 838 839 // If an outstanding request is in progress (we found an 840 // MSHR) this is set to null 841 pkt = pf; 842 } 843 844 if (mshr) { 845 /// MSHR hit 846 /// @note writebacks will be checked in getNextMSHR() 847 /// for any conflicting requests to the same block 848 849 //@todo remove hw_pf here 850 851 // Coalesce unless it was a software prefetch (see above). 852 if (pkt) { 853 assert(!pkt->isWriteback()); 854 // CleanEvicts corresponding to blocks which have 855 // outstanding requests in MSHRs are simply sunk here 856 if (pkt->cmd == MemCmd::CleanEvict) { 857 pendingDelete.reset(pkt); 858 } else { 859 DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__, 860 pkt->print()); 861 862 assert(pkt->req->masterId() < system->maxMasters()); 863 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++; 864 // We use forward_time here because it is the same 865 // considering new targets. We have multiple 866 // requests for the same address here. It 867 // specifies the latency to allocate an internal 868 // buffer and to schedule an event to the queued 869 // port and also takes into account the additional 870 // delay of the xbar. 871 mshr->allocateTarget(pkt, forward_time, order++, 872 allocOnFill(pkt->cmd)); 873 if (mshr->getNumTargets() == numTarget) { 874 noTargetMSHR = mshr; 875 setBlocked(Blocked_NoTargets); 876 // need to be careful with this... if this mshr isn't 877 // ready yet (i.e. time > curTick()), we don't want to 878 // move it ahead of mshrs that are ready 879 // mshrQueue.moveToFront(mshr); 880 } 881 } 882 // We should call the prefetcher reguardless if the request is 883 // satisfied or not, reguardless if the request is in the MSHR 884 // or not. The request could be a ReadReq hit, but still not 885 // satisfied (potentially because of a prior write to the same 886 // cache line. So, even when not satisfied, tehre is an MSHR 887 // already allocated for this, we need to let the prefetcher 888 // know about the request 889 if (prefetcher) { 890 // Don't notify on SWPrefetch 891 if (!pkt->cmd.isSWPrefetch()) 892 next_pf_time = prefetcher->notify(pkt); 893 } 894 } 895 } else { 896 // no MSHR 897 assert(pkt->req->masterId() < system->maxMasters()); 898 if (pkt->req->isUncacheable()) { 899 mshr_uncacheable[pkt->cmdToIndex()][pkt->req->masterId()]++; 900 } else { 901 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 902 } 903 904 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean || 905 (pkt->req->isUncacheable() && pkt->isWrite())) { 906 // We use forward_time here because there is an 907 // uncached memory write, forwarded to WriteBuffer. 908 allocateWriteBuffer(pkt, forward_time); 909 } else { 910 if (blk && blk->isValid()) { 911 // should have flushed and have no valid block 912 assert(!pkt->req->isUncacheable()); 913 914 // If we have a write miss to a valid block, we 915 // need to mark the block non-readable. Otherwise 916 // if we allow reads while there's an outstanding 917 // write miss, the read could return stale data 918 // out of the cache block... a more aggressive 919 // system could detect the overlap (if any) and 920 // forward data out of the MSHRs, but we don't do 921 // that yet. Note that we do need to leave the 922 // block valid so that it stays in the cache, in 923 // case we get an upgrade response (and hence no 924 // new data) when the write miss completes. 925 // As long as CPUs do proper store/load forwarding 926 // internally, and have a sufficiently weak memory 927 // model, this is probably unnecessary, but at some 928 // point it must have seemed like we needed it... 929 assert(pkt->needsWritable()); 930 assert(!blk->isWritable()); 931 blk->status &= ~BlkReadable; 932 } 933 // Here we are using forward_time, modelling the latency of 934 // a miss (outbound) just as forwardLatency, neglecting the 935 // lookupLatency component. 936 allocateMissBuffer(pkt, forward_time); 937 } 938 939 if (prefetcher) { 940 // Don't notify on SWPrefetch 941 if (!pkt->cmd.isSWPrefetch()) 942 next_pf_time = prefetcher->notify(pkt); 943 } 944 } 945 } 946 947 if (next_pf_time != MaxTick) 948 schedMemSideSendEvent(next_pf_time); 949 950 return true; 951} 952 953PacketPtr 954Cache::createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk, 955 bool needsWritable) const 956{ 957 // should never see evictions here 958 assert(!cpu_pkt->isEviction()); 959 960 bool blkValid = blk && blk->isValid(); 961 962 if (cpu_pkt->req->isUncacheable() || 963 (!blkValid && cpu_pkt->isUpgrade()) || 964 cpu_pkt->cmd == MemCmd::InvalidateReq) { 965 // uncacheable requests and upgrades from upper-level caches 966 // that missed completely just go through as is 967 return nullptr; 968 } 969 970 assert(cpu_pkt->needsResponse()); 971 972 MemCmd cmd; 973 // @TODO make useUpgrades a parameter. 974 // Note that ownership protocols require upgrade, otherwise a 975 // write miss on a shared owned block will generate a ReadExcl, 976 // which will clobber the owned copy. 977 const bool useUpgrades = true; 978 if (cpu_pkt->cmd == MemCmd::WriteLineReq) { 979 assert(!blkValid || !blk->isWritable()); 980 // forward as invalidate to all other caches, this gives us 981 // the line in Exclusive state, and invalidates all other 982 // copies 983 cmd = MemCmd::InvalidateReq; 984 } else if (blkValid && useUpgrades) { 985 // only reason to be here is that blk is read only and we need 986 // it to be writable 987 assert(needsWritable); 988 assert(!blk->isWritable()); 989 cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq; 990 } else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq || 991 cpu_pkt->cmd == MemCmd::StoreCondFailReq) { 992 // Even though this SC will fail, we still need to send out the 993 // request and get the data to supply it to other snoopers in the case 994 // where the determination the StoreCond fails is delayed due to 995 // all caches not being on the same local bus. 996 cmd = MemCmd::SCUpgradeFailReq; 997 } else { 998 // block is invalid 999 cmd = needsWritable ? MemCmd::ReadExReq : 1000 (isReadOnly ? MemCmd::ReadCleanReq : MemCmd::ReadSharedReq); 1001 } 1002 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize); 1003 1004 // if there are upstream caches that have already marked the 1005 // packet as having sharers (not passing writable), pass that info 1006 // downstream 1007 if (cpu_pkt->hasSharers() && !needsWritable) { 1008 // note that cpu_pkt may have spent a considerable time in the 1009 // MSHR queue and that the information could possibly be out 1010 // of date, however, there is no harm in conservatively 1011 // assuming the block has sharers 1012 pkt->setHasSharers(); 1013 DPRINTF(Cache, "%s: passing hasSharers from %s to %s\n", 1014 __func__, cpu_pkt->print(), pkt->print()); 1015 } 1016 1017 // the packet should be block aligned 1018 assert(pkt->getAddr() == pkt->getBlockAddr(blkSize)); 1019 1020 pkt->allocate(); 1021 DPRINTF(Cache, "%s: created %s from %s\n", __func__, pkt->print(), 1022 cpu_pkt->print()); 1023 return pkt; 1024} 1025 1026 1027Tick 1028Cache::recvAtomic(PacketPtr pkt) 1029{ 1030 // We are in atomic mode so we pay just for lookupLatency here. 1031 Cycles lat = lookupLatency; 1032 1033 // Forward the request if the system is in cache bypass mode. 1034 if (system->bypassCaches()) 1035 return ticksToCycles(memSidePort->sendAtomic(pkt)); 1036 1037 promoteWholeLineWrites(pkt); 1038 1039 // follow the same flow as in recvTimingReq, and check if a cache 1040 // above us is responding 1041 if (pkt->cacheResponding()) { 1042 DPRINTF(Cache, "Cache above responding to %s: not responding\n", 1043 pkt->print()); 1044 1045 // if a cache is responding, and it had the line in Owned 1046 // rather than Modified state, we need to invalidate any 1047 // copies that are not on the same path to memory 1048 assert(pkt->needsWritable() && !pkt->responderHadWritable()); 1049 lat += ticksToCycles(memSidePort->sendAtomic(pkt)); 1050 1051 return lat * clockPeriod(); 1052 } 1053 1054 // should assert here that there are no outstanding MSHRs or 1055 // writebacks... that would mean that someone used an atomic 1056 // access in timing mode 1057 1058 CacheBlk *blk = nullptr; 1059 PacketList writebacks; 1060 bool satisfied = access(pkt, blk, lat, writebacks); 1061 1062 // handle writebacks resulting from the access here to ensure they 1063 // logically proceed anything happening below 1064 doWritebacksAtomic(writebacks); 1065 1066 if (!satisfied) { 1067 // MISS 1068 1069 // deal with the packets that go through the write path of 1070 // the cache, i.e. any evictions and writes 1071 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean || 1072 (pkt->req->isUncacheable() && pkt->isWrite())) { 1073 lat += ticksToCycles(memSidePort->sendAtomic(pkt)); 1074 return lat * clockPeriod(); 1075 } 1076 // only misses left 1077 1078 PacketPtr bus_pkt = createMissPacket(pkt, blk, pkt->needsWritable()); 1079 1080 bool is_forward = (bus_pkt == nullptr); 1081 1082 if (is_forward) { 1083 // just forwarding the same request to the next level 1084 // no local cache operation involved 1085 bus_pkt = pkt; 1086 } 1087 1088 DPRINTF(Cache, "%s: Sending an atomic %s\n", __func__, 1089 bus_pkt->print()); 1090 1091#if TRACING_ON 1092 CacheBlk::State old_state = blk ? blk->status : 0; 1093#endif 1094 1095 lat += ticksToCycles(memSidePort->sendAtomic(bus_pkt)); 1096 1097 bool is_invalidate = bus_pkt->isInvalidate(); 1098 1099 // We are now dealing with the response handling 1100 DPRINTF(Cache, "%s: Receive response: %s in state %i\n", __func__, 1101 bus_pkt->print(), old_state); 1102 1103 // If packet was a forward, the response (if any) is already 1104 // in place in the bus_pkt == pkt structure, so we don't need 1105 // to do anything. Otherwise, use the separate bus_pkt to 1106 // generate response to pkt and then delete it. 1107 if (!is_forward) { 1108 if (pkt->needsResponse()) { 1109 assert(bus_pkt->isResponse()); 1110 if (bus_pkt->isError()) { 1111 pkt->makeAtomicResponse(); 1112 pkt->copyError(bus_pkt); 1113 } else if (pkt->cmd == MemCmd::WriteLineReq) { 1114 // note the use of pkt, not bus_pkt here. 1115 1116 // write-line request to the cache that promoted 1117 // the write to a whole line 1118 blk = handleFill(pkt, blk, writebacks, 1119 allocOnFill(pkt->cmd)); 1120 assert(blk != NULL); 1121 is_invalidate = false; 1122 satisfyRequest(pkt, blk); 1123 } else if (bus_pkt->isRead() || 1124 bus_pkt->cmd == MemCmd::UpgradeResp) { 1125 // we're updating cache state to allow us to 1126 // satisfy the upstream request from the cache 1127 blk = handleFill(bus_pkt, blk, writebacks, 1128 allocOnFill(pkt->cmd)); 1129 satisfyRequest(pkt, blk); 1130 maintainClusivity(pkt->fromCache(), blk); 1131 } else { 1132 // we're satisfying the upstream request without 1133 // modifying cache state, e.g., a write-through 1134 pkt->makeAtomicResponse(); 1135 } 1136 } 1137 delete bus_pkt; 1138 } 1139 1140 if (is_invalidate && blk && blk->isValid()) { 1141 invalidateBlock(blk); 1142 } 1143 } 1144 1145 // Note that we don't invoke the prefetcher at all in atomic mode. 1146 // It's not clear how to do it properly, particularly for 1147 // prefetchers that aggressively generate prefetch candidates and 1148 // rely on bandwidth contention to throttle them; these will tend 1149 // to pollute the cache in atomic mode since there is no bandwidth 1150 // contention. If we ever do want to enable prefetching in atomic 1151 // mode, though, this is the place to do it... see timingAccess() 1152 // for an example (though we'd want to issue the prefetch(es) 1153 // immediately rather than calling requestMemSideBus() as we do 1154 // there). 1155 1156 // do any writebacks resulting from the response handling 1157 doWritebacksAtomic(writebacks); 1158 1159 // if we used temp block, check to see if its valid and if so 1160 // clear it out, but only do so after the call to recvAtomic is 1161 // finished so that any downstream observers (such as a snoop 1162 // filter), first see the fill, and only then see the eviction 1163 if (blk == tempBlock && tempBlock->isValid()) { 1164 // the atomic CPU calls recvAtomic for fetch and load/store 1165 // sequentuially, and we may already have a tempBlock 1166 // writeback from the fetch that we have not yet sent 1167 if (tempBlockWriteback) { 1168 // if that is the case, write the prevoius one back, and 1169 // do not schedule any new event 1170 writebackTempBlockAtomic(); 1171 } else { 1172 // the writeback/clean eviction happens after the call to 1173 // recvAtomic has finished (but before any successive 1174 // calls), so that the response handling from the fill is 1175 // allowed to happen first 1176 schedule(writebackTempBlockAtomicEvent, curTick()); 1177 } 1178 1179 tempBlockWriteback = (blk->isDirty() || writebackClean) ? 1180 writebackBlk(blk) : cleanEvictBlk(blk); 1181 invalidateBlock(blk); 1182 } 1183 1184 if (pkt->needsResponse()) { 1185 pkt->makeAtomicResponse(); 1186 } 1187 1188 return lat * clockPeriod(); 1189} 1190 1191 1192void 1193Cache::functionalAccess(PacketPtr pkt, bool fromCpuSide) 1194{ 1195 if (system->bypassCaches()) { 1196 // Packets from the memory side are snoop request and 1197 // shouldn't happen in bypass mode. 1198 assert(fromCpuSide); 1199 1200 // The cache should be flushed if we are in cache bypass mode, 1201 // so we don't need to check if we need to update anything. 1202 memSidePort->sendFunctional(pkt); 1203 return; 1204 } 1205 1206 Addr blk_addr = pkt->getBlockAddr(blkSize); 1207 bool is_secure = pkt->isSecure(); 1208 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 1209 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 1210 1211 pkt->pushLabel(name()); 1212 1213 CacheBlkPrintWrapper cbpw(blk); 1214 1215 // Note that just because an L2/L3 has valid data doesn't mean an 1216 // L1 doesn't have a more up-to-date modified copy that still 1217 // needs to be found. As a result we always update the request if 1218 // we have it, but only declare it satisfied if we are the owner. 1219 1220 // see if we have data at all (owned or otherwise) 1221 bool have_data = blk && blk->isValid() 1222 && pkt->checkFunctional(&cbpw, blk_addr, is_secure, blkSize, 1223 blk->data); 1224 1225 // data we have is dirty if marked as such or if we have an 1226 // in-service MSHR that is pending a modified line 1227 bool have_dirty = 1228 have_data && (blk->isDirty() || 1229 (mshr && mshr->inService && mshr->isPendingModified())); 1230 1231 bool done = have_dirty 1232 || cpuSidePort->checkFunctional(pkt) 1233 || mshrQueue.checkFunctional(pkt, blk_addr) 1234 || writeBuffer.checkFunctional(pkt, blk_addr) 1235 || memSidePort->checkFunctional(pkt); 1236 1237 DPRINTF(CacheVerbose, "%s: %s %s%s%s\n", __func__, pkt->print(), 1238 (blk && blk->isValid()) ? "valid " : "", 1239 have_data ? "data " : "", done ? "done " : ""); 1240 1241 // We're leaving the cache, so pop cache->name() label 1242 pkt->popLabel(); 1243 1244 if (done) { 1245 pkt->makeResponse(); 1246 } else { 1247 // if it came as a request from the CPU side then make sure it 1248 // continues towards the memory side 1249 if (fromCpuSide) { 1250 memSidePort->sendFunctional(pkt); 1251 } else if (cpuSidePort->isSnooping()) { 1252 // if it came from the memory side, it must be a snoop request 1253 // and we should only forward it if we are forwarding snoops 1254 cpuSidePort->sendFunctionalSnoop(pkt); 1255 } 1256 } 1257} 1258 1259 1260///////////////////////////////////////////////////// 1261// 1262// Response handling: responses from the memory side 1263// 1264///////////////////////////////////////////////////// 1265 1266 1267void 1268Cache::handleUncacheableWriteResp(PacketPtr pkt) 1269{ 1270 Tick completion_time = clockEdge(responseLatency) + 1271 pkt->headerDelay + pkt->payloadDelay; 1272 1273 // Reset the bus additional time as it is now accounted for 1274 pkt->headerDelay = pkt->payloadDelay = 0; 1275 1276 cpuSidePort->schedTimingResp(pkt, completion_time, true); 1277} 1278 1279void 1280Cache::recvTimingResp(PacketPtr pkt) 1281{ 1282 assert(pkt->isResponse()); 1283 1284 // all header delay should be paid for by the crossbar, unless 1285 // this is a prefetch response from above 1286 panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp, 1287 "%s saw a non-zero packet delay\n", name()); 1288 1289 bool is_error = pkt->isError(); 1290 1291 if (is_error) { 1292 DPRINTF(Cache, "%s: Cache received %s with error\n", __func__, 1293 pkt->print()); 1294 } 1295 1296 DPRINTF(Cache, "%s: Handling response %s\n", __func__, 1297 pkt->print()); 1298 1299 // if this is a write, we should be looking at an uncacheable 1300 // write 1301 if (pkt->isWrite()) { 1302 assert(pkt->req->isUncacheable()); 1303 handleUncacheableWriteResp(pkt); 1304 return; 1305 } 1306 1307 // we have dealt with any (uncacheable) writes above, from here on 1308 // we know we are dealing with an MSHR due to a miss or a prefetch 1309 MSHR *mshr = dynamic_cast<MSHR*>(pkt->popSenderState()); 1310 assert(mshr); 1311 1312 if (mshr == noTargetMSHR) { 1313 // we always clear at least one target 1314 clearBlocked(Blocked_NoTargets); 1315 noTargetMSHR = nullptr; 1316 } 1317 1318 // Initial target is used just for stats 1319 MSHR::Target *initial_tgt = mshr->getTarget(); 1320 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex(); 1321 Tick miss_latency = curTick() - initial_tgt->recvTime; 1322 1323 if (pkt->req->isUncacheable()) { 1324 assert(pkt->req->masterId() < system->maxMasters()); 1325 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] += 1326 miss_latency; 1327 } else { 1328 assert(pkt->req->masterId() < system->maxMasters()); 1329 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] += 1330 miss_latency; 1331 } 1332 1333 bool wasFull = mshrQueue.isFull(); 1334 1335 PacketList writebacks; 1336 1337 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 1338 1339 // upgrade deferred targets if the response has no sharers, and is 1340 // thus passing writable 1341 if (!pkt->hasSharers()) { 1342 mshr->promoteWritable(); 1343 } 1344 1345 bool is_fill = !mshr->isForward && 1346 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp); 1347 1348 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 1349 1350 if (is_fill && !is_error) { 1351 DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n", 1352 pkt->getAddr()); 1353 1354 blk = handleFill(pkt, blk, writebacks, mshr->allocOnFill()); 1355 assert(blk != nullptr); 1356 } 1357 1358 // allow invalidation responses originating from write-line 1359 // requests to be discarded 1360 bool is_invalidate = pkt->isInvalidate(); 1361 1362 // First offset for critical word first calculations 1363 int initial_offset = initial_tgt->pkt->getOffset(blkSize); 1364 1365 bool from_cache = false; 1366 MSHR::TargetList targets = mshr->extractServiceableTargets(pkt); 1367 for (auto &target: targets) { 1368 Packet *tgt_pkt = target.pkt; 1369 switch (target.source) { 1370 case MSHR::Target::FromCPU: 1371 Tick completion_time; 1372 // Here we charge on completion_time the delay of the xbar if the 1373 // packet comes from it, charged on headerDelay. 1374 completion_time = pkt->headerDelay; 1375 1376 // Software prefetch handling for cache closest to core 1377 if (tgt_pkt->cmd.isSWPrefetch()) { 1378 // a software prefetch would have already been ack'd 1379 // immediately with dummy data so the core would be able to 1380 // retire it. This request completes right here, so we 1381 // deallocate it. 1382 delete tgt_pkt->req; 1383 delete tgt_pkt; 1384 break; // skip response 1385 } 1386 1387 // keep track of whether we have responded to another 1388 // cache 1389 from_cache = from_cache || tgt_pkt->fromCache(); 1390 1391 // unlike the other packet flows, where data is found in other 1392 // caches or memory and brought back, write-line requests always 1393 // have the data right away, so the above check for "is fill?" 1394 // cannot actually be determined until examining the stored MSHR 1395 // state. We "catch up" with that logic here, which is duplicated 1396 // from above. 1397 if (tgt_pkt->cmd == MemCmd::WriteLineReq) { 1398 assert(!is_error); 1399 // we got the block in a writable state, so promote 1400 // any deferred targets if possible 1401 mshr->promoteWritable(); 1402 // NB: we use the original packet here and not the response! 1403 blk = handleFill(tgt_pkt, blk, writebacks, 1404 targets.allocOnFill); 1405 assert(blk != nullptr); 1406 1407 // treat as a fill, and discard the invalidation 1408 // response 1409 is_fill = true; 1410 is_invalidate = false; 1411 } 1412 1413 if (is_fill) { 1414 satisfyRequest(tgt_pkt, blk, true, mshr->hasPostDowngrade()); 1415 1416 // How many bytes past the first request is this one 1417 int transfer_offset = 1418 tgt_pkt->getOffset(blkSize) - initial_offset; 1419 if (transfer_offset < 0) { 1420 transfer_offset += blkSize; 1421 } 1422 1423 // If not critical word (offset) return payloadDelay. 1424 // responseLatency is the latency of the return path 1425 // from lower level caches/memory to an upper level cache or 1426 // the core. 1427 completion_time += clockEdge(responseLatency) + 1428 (transfer_offset ? pkt->payloadDelay : 0); 1429 1430 assert(!tgt_pkt->req->isUncacheable()); 1431 1432 assert(tgt_pkt->req->masterId() < system->maxMasters()); 1433 missLatency[tgt_pkt->cmdToIndex()][tgt_pkt->req->masterId()] += 1434 completion_time - target.recvTime; 1435 } else if (pkt->cmd == MemCmd::UpgradeFailResp) { 1436 // failed StoreCond upgrade 1437 assert(tgt_pkt->cmd == MemCmd::StoreCondReq || 1438 tgt_pkt->cmd == MemCmd::StoreCondFailReq || 1439 tgt_pkt->cmd == MemCmd::SCUpgradeFailReq); 1440 // responseLatency is the latency of the return path 1441 // from lower level caches/memory to an upper level cache or 1442 // the core. 1443 completion_time += clockEdge(responseLatency) + 1444 pkt->payloadDelay; 1445 tgt_pkt->req->setExtraData(0); 1446 } else { 1447 // We are about to send a response to a cache above 1448 // that asked for an invalidation; we need to 1449 // invalidate our copy immediately as the most 1450 // up-to-date copy of the block will now be in the 1451 // cache above. It will also prevent this cache from 1452 // responding (if the block was previously dirty) to 1453 // snoops as they should snoop the caches above where 1454 // they will get the response from. 1455 if (is_invalidate && blk && blk->isValid()) { 1456 invalidateBlock(blk); 1457 } 1458 // not a cache fill, just forwarding response 1459 // responseLatency is the latency of the return path 1460 // from lower level cahces/memory to the core. 1461 completion_time += clockEdge(responseLatency) + 1462 pkt->payloadDelay; 1463 if (pkt->isRead() && !is_error) { 1464 // sanity check 1465 assert(pkt->getAddr() == tgt_pkt->getAddr()); 1466 assert(pkt->getSize() >= tgt_pkt->getSize()); 1467 1468 tgt_pkt->setData(pkt->getConstPtr<uint8_t>()); 1469 } 1470 } 1471 tgt_pkt->makeTimingResponse(); 1472 // if this packet is an error copy that to the new packet 1473 if (is_error) 1474 tgt_pkt->copyError(pkt); 1475 if (tgt_pkt->cmd == MemCmd::ReadResp && 1476 (is_invalidate || mshr->hasPostInvalidate())) { 1477 // If intermediate cache got ReadRespWithInvalidate, 1478 // propagate that. Response should not have 1479 // isInvalidate() set otherwise. 1480 tgt_pkt->cmd = MemCmd::ReadRespWithInvalidate; 1481 DPRINTF(Cache, "%s: updated cmd to %s\n", __func__, 1482 tgt_pkt->print()); 1483 } 1484 // Reset the bus additional time as it is now accounted for 1485 tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0; 1486 cpuSidePort->schedTimingResp(tgt_pkt, completion_time, true); 1487 break; 1488 1489 case MSHR::Target::FromPrefetcher: 1490 assert(tgt_pkt->cmd == MemCmd::HardPFReq); 1491 if (blk) 1492 blk->status |= BlkHWPrefetched; 1493 delete tgt_pkt->req; 1494 delete tgt_pkt; 1495 break; 1496 1497 case MSHR::Target::FromSnoop: 1498 // I don't believe that a snoop can be in an error state 1499 assert(!is_error); 1500 // response to snoop request 1501 DPRINTF(Cache, "processing deferred snoop...\n"); 1502 // If the response is invalidating, a snooping target can 1503 // be satisfied if it is also invalidating. If the reponse is, not 1504 // only invalidating, but more specifically an InvalidateResp, the 1505 // MSHR was created due to an InvalidateReq and a cache above is 1506 // waiting to satisfy a WriteLineReq. In this case even an 1507 // non-invalidating snoop is added as a target here since this is 1508 // the ordering point. When the InvalidateResp reaches this cache, 1509 // the snooping target will snoop further the cache above with the 1510 // WriteLineReq. 1511 assert(!(is_invalidate && 1512 pkt->cmd != MemCmd::InvalidateResp && 1513 !mshr->hasPostInvalidate())); 1514 handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate()); 1515 break; 1516 1517 default: 1518 panic("Illegal target->source enum %d\n", target.source); 1519 } 1520 } 1521 1522 maintainClusivity(from_cache, blk); 1523 1524 if (blk && blk->isValid()) { 1525 // an invalidate response stemming from a write line request 1526 // should not invalidate the block, so check if the 1527 // invalidation should be discarded 1528 if (is_invalidate || mshr->hasPostInvalidate()) { 1529 invalidateBlock(blk); 1530 } else if (mshr->hasPostDowngrade()) { 1531 blk->status &= ~BlkWritable; 1532 } 1533 } 1534 1535 if (mshr->promoteDeferredTargets()) { 1536 // avoid later read getting stale data while write miss is 1537 // outstanding.. see comment in timingAccess() 1538 if (blk) { 1539 blk->status &= ~BlkReadable; 1540 } 1541 mshrQueue.markPending(mshr); 1542 schedMemSideSendEvent(clockEdge() + pkt->payloadDelay); 1543 } else { 1544 mshrQueue.deallocate(mshr); 1545 if (wasFull && !mshrQueue.isFull()) { 1546 clearBlocked(Blocked_NoMSHRs); 1547 } 1548 1549 // Request the bus for a prefetch if this deallocation freed enough 1550 // MSHRs for a prefetch to take place 1551 if (prefetcher && mshrQueue.canPrefetch()) { 1552 Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(), 1553 clockEdge()); 1554 if (next_pf_time != MaxTick) 1555 schedMemSideSendEvent(next_pf_time); 1556 } 1557 } 1558 // reset the xbar additional timinig as it is now accounted for 1559 pkt->headerDelay = pkt->payloadDelay = 0; 1560 1561 // copy writebacks to write buffer 1562 doWritebacks(writebacks, forward_time); 1563 1564 // if we used temp block, check to see if its valid and then clear it out 1565 if (blk == tempBlock && tempBlock->isValid()) { 1566 // We use forwardLatency here because we are copying 1567 // Writebacks/CleanEvicts to write buffer. It specifies the latency to 1568 // allocate an internal buffer and to schedule an event to the 1569 // queued port. 1570 if (blk->isDirty() || writebackClean) { 1571 PacketPtr wbPkt = writebackBlk(blk); 1572 allocateWriteBuffer(wbPkt, forward_time); 1573 // Set BLOCK_CACHED flag if cached above. 1574 if (isCachedAbove(wbPkt)) 1575 wbPkt->setBlockCached(); 1576 } else { 1577 PacketPtr wcPkt = cleanEvictBlk(blk); 1578 // Check to see if block is cached above. If not allocate 1579 // write buffer 1580 if (isCachedAbove(wcPkt)) 1581 delete wcPkt; 1582 else 1583 allocateWriteBuffer(wcPkt, forward_time); 1584 } 1585 invalidateBlock(blk); 1586 } 1587 1588 DPRINTF(CacheVerbose, "%s: Leaving with %s\n", __func__, pkt->print()); 1589 delete pkt; 1590} 1591 1592PacketPtr 1593Cache::writebackBlk(CacheBlk *blk) 1594{ 1595 chatty_assert(!isReadOnly || writebackClean, 1596 "Writeback from read-only cache"); 1597 assert(blk && blk->isValid() && (blk->isDirty() || writebackClean)); 1598 1599 writebacks[Request::wbMasterId]++; 1600 1601 Request *req = new Request(tags->regenerateBlkAddr(blk->tag, blk->set), 1602 blkSize, 0, Request::wbMasterId); 1603 if (blk->isSecure()) 1604 req->setFlags(Request::SECURE); 1605 1606 req->taskId(blk->task_id); 1607 blk->task_id= ContextSwitchTaskId::Unknown; 1608 blk->tickInserted = curTick(); 1609 1610 PacketPtr pkt = 1611 new Packet(req, blk->isDirty() ? 1612 MemCmd::WritebackDirty : MemCmd::WritebackClean); 1613 1614 DPRINTF(Cache, "Create Writeback %s writable: %d, dirty: %d\n", 1615 pkt->print(), blk->isWritable(), blk->isDirty()); 1616 1617 if (blk->isWritable()) { 1618 // not asserting shared means we pass the block in modified 1619 // state, mark our own block non-writeable 1620 blk->status &= ~BlkWritable; 1621 } else { 1622 // we are in the Owned state, tell the receiver 1623 pkt->setHasSharers(); 1624 } 1625 1626 // make sure the block is not marked dirty 1627 blk->status &= ~BlkDirty; 1628 1629 pkt->allocate(); 1630 std::memcpy(pkt->getPtr<uint8_t>(), blk->data, blkSize); 1631 1632 return pkt; 1633} 1634 1635PacketPtr 1636Cache::writecleanBlk(CacheBlk *blk, Request::Flags dest) 1637{ 1638 Request *req = new Request(tags->regenerateBlkAddr(blk->tag, blk->set), 1639 blkSize, 0, Request::wbMasterId); 1640 if (blk->isSecure()) { 1641 req->setFlags(Request::SECURE); 1642 } 1643 req->taskId(blk->task_id); 1644 blk->task_id = ContextSwitchTaskId::Unknown; 1645 PacketPtr pkt = new Packet(req, MemCmd::WriteClean); 1646 DPRINTF(Cache, "Create %s writable: %d, dirty: %d\n", pkt->print(), 1647 blk->isWritable(), blk->isDirty()); 1648 // make sure the block is not marked dirty 1649 blk->status &= ~BlkDirty; 1650 pkt->allocate(); 1651 // We inform the cache below that the block has sharers in the 1652 // system as we retain our copy. 1653 pkt->setHasSharers(); 1654 if (dest) { 1655 req->setFlags(dest); 1656 pkt->setWriteThrough(); 1657 } 1658 std::memcpy(pkt->getPtr<uint8_t>(), blk->data, blkSize); 1659 return pkt; 1660} 1661 1662 1663PacketPtr 1664Cache::cleanEvictBlk(CacheBlk *blk) 1665{ 1666 assert(!writebackClean); 1667 assert(blk && blk->isValid() && !blk->isDirty()); 1668 // Creating a zero sized write, a message to the snoop filter 1669 Request *req = 1670 new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0, 1671 Request::wbMasterId); 1672 if (blk->isSecure()) 1673 req->setFlags(Request::SECURE); 1674 1675 req->taskId(blk->task_id); 1676 blk->task_id = ContextSwitchTaskId::Unknown; 1677 blk->tickInserted = curTick(); 1678 1679 PacketPtr pkt = new Packet(req, MemCmd::CleanEvict); 1680 pkt->allocate(); 1681 DPRINTF(Cache, "Create CleanEvict %s\n", pkt->print()); 1682 1683 return pkt; 1684} 1685 1686void 1687Cache::memWriteback() 1688{ 1689 CacheBlkVisitorWrapper visitor(*this, &Cache::writebackVisitor); 1690 tags->forEachBlk(visitor); 1691} 1692 1693void 1694Cache::memInvalidate() 1695{ 1696 CacheBlkVisitorWrapper visitor(*this, &Cache::invalidateVisitor); 1697 tags->forEachBlk(visitor); 1698} 1699 1700bool 1701Cache::isDirty() const 1702{ 1703 CacheBlkIsDirtyVisitor visitor; 1704 tags->forEachBlk(visitor); 1705 1706 return visitor.isDirty(); 1707} 1708 1709bool 1710Cache::writebackVisitor(CacheBlk &blk) 1711{ 1712 if (blk.isDirty()) { 1713 assert(blk.isValid()); 1714 1715 Request request(tags->regenerateBlkAddr(blk.tag, blk.set), 1716 blkSize, 0, Request::funcMasterId); 1717 request.taskId(blk.task_id); 1718 if (blk.isSecure()) { 1719 request.setFlags(Request::SECURE); 1720 } 1721 1722 Packet packet(&request, MemCmd::WriteReq); 1723 packet.dataStatic(blk.data); 1724 1725 memSidePort->sendFunctional(&packet); 1726 1727 blk.status &= ~BlkDirty; 1728 } 1729 1730 return true; 1731} 1732 1733bool 1734Cache::invalidateVisitor(CacheBlk &blk) 1735{ 1736 1737 if (blk.isDirty()) 1738 warn_once("Invalidating dirty cache lines. Expect things to break.\n"); 1739 1740 if (blk.isValid()) { 1741 assert(!blk.isDirty()); 1742 invalidateBlock(&blk); 1743 } 1744 1745 return true; 1746} 1747 1748CacheBlk* 1749Cache::allocateBlock(Addr addr, bool is_secure, PacketList &writebacks) 1750{ 1751 CacheBlk *blk = tags->findVictim(addr); 1752 1753 // It is valid to return nullptr if there is no victim 1754 if (!blk) 1755 return nullptr; 1756 1757 if (blk->isValid()) { 1758 Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set); 1759 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure()); 1760 if (repl_mshr) { 1761 // must be an outstanding upgrade request 1762 // on a block we're about to replace... 1763 assert(!blk->isWritable() || blk->isDirty()); 1764 assert(repl_mshr->needsWritable()); 1765 // too hard to replace block with transient state 1766 // allocation failed, block not inserted 1767 return nullptr; 1768 } else { 1769 DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx " 1770 "(%s): %s\n", repl_addr, blk->isSecure() ? "s" : "ns", 1771 addr, is_secure ? "s" : "ns", 1772 blk->isDirty() ? "writeback" : "clean"); 1773 1774 if (blk->wasPrefetched()) { 1775 unusedPrefetches++; 1776 } 1777 // Will send up Writeback/CleanEvict snoops via isCachedAbove 1778 // when pushing this writeback list into the write buffer. 1779 if (blk->isDirty() || writebackClean) { 1780 // Save writeback packet for handling by caller 1781 writebacks.push_back(writebackBlk(blk)); 1782 } else { 1783 writebacks.push_back(cleanEvictBlk(blk)); 1784 } 1785 } 1786 } 1787 1788 return blk; 1789} 1790 1791void 1792Cache::invalidateBlock(CacheBlk *blk) 1793{ 1794 if (blk != tempBlock) 1795 tags->invalidate(blk); 1796 blk->invalidate(); 1797} 1798 1799// Note that the reason we return a list of writebacks rather than 1800// inserting them directly in the write buffer is that this function 1801// is called by both atomic and timing-mode accesses, and in atomic 1802// mode we don't mess with the write buffer (we just perform the 1803// writebacks atomically once the original request is complete). 1804CacheBlk* 1805Cache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks, 1806 bool allocate) 1807{ 1808 assert(pkt->isResponse() || pkt->cmd == MemCmd::WriteLineReq); 1809 Addr addr = pkt->getAddr(); 1810 bool is_secure = pkt->isSecure(); 1811#if TRACING_ON 1812 CacheBlk::State old_state = blk ? blk->status : 0; 1813#endif 1814 1815 // When handling a fill, we should have no writes to this line. 1816 assert(addr == pkt->getBlockAddr(blkSize)); 1817 assert(!writeBuffer.findMatch(addr, is_secure)); 1818 1819 if (blk == nullptr) { 1820 // better have read new data... 1821 assert(pkt->hasData()); 1822 1823 // only read responses and write-line requests have data; 1824 // note that we don't write the data here for write-line - that 1825 // happens in the subsequent call to satisfyRequest 1826 assert(pkt->isRead() || pkt->cmd == MemCmd::WriteLineReq); 1827 1828 // need to do a replacement if allocating, otherwise we stick 1829 // with the temporary storage 1830 blk = allocate ? allocateBlock(addr, is_secure, writebacks) : nullptr; 1831 1832 if (blk == nullptr) { 1833 // No replaceable block or a mostly exclusive 1834 // cache... just use temporary storage to complete the 1835 // current request and then get rid of it 1836 assert(!tempBlock->isValid()); 1837 blk = tempBlock; 1838 tempBlock->set = tags->extractSet(addr); 1839 tempBlock->tag = tags->extractTag(addr); 1840 // @todo: set security state as well... 1841 DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr, 1842 is_secure ? "s" : "ns"); 1843 } else { 1844 tags->insertBlock(pkt, blk); 1845 } 1846 1847 // we should never be overwriting a valid block 1848 assert(!blk->isValid()); 1849 } else { 1850 // existing block... probably an upgrade 1851 assert(blk->tag == tags->extractTag(addr)); 1852 // either we're getting new data or the block should already be valid 1853 assert(pkt->hasData() || blk->isValid()); 1854 // don't clear block status... if block is already dirty we 1855 // don't want to lose that 1856 } 1857 1858 if (is_secure) 1859 blk->status |= BlkSecure; 1860 blk->status |= BlkValid | BlkReadable; 1861 1862 // sanity check for whole-line writes, which should always be 1863 // marked as writable as part of the fill, and then later marked 1864 // dirty as part of satisfyRequest 1865 if (pkt->cmd == MemCmd::WriteLineReq) { 1866 assert(!pkt->hasSharers()); 1867 } 1868 1869 // here we deal with setting the appropriate state of the line, 1870 // and we start by looking at the hasSharers flag, and ignore the 1871 // cacheResponding flag (normally signalling dirty data) if the 1872 // packet has sharers, thus the line is never allocated as Owned 1873 // (dirty but not writable), and always ends up being either 1874 // Shared, Exclusive or Modified, see Packet::setCacheResponding 1875 // for more details 1876 if (!pkt->hasSharers()) { 1877 // we could get a writable line from memory (rather than a 1878 // cache) even in a read-only cache, note that we set this bit 1879 // even for a read-only cache, possibly revisit this decision 1880 blk->status |= BlkWritable; 1881 1882 // check if we got this via cache-to-cache transfer (i.e., from a 1883 // cache that had the block in Modified or Owned state) 1884 if (pkt->cacheResponding()) { 1885 // we got the block in Modified state, and invalidated the 1886 // owners copy 1887 blk->status |= BlkDirty; 1888 1889 chatty_assert(!isReadOnly, "Should never see dirty snoop response " 1890 "in read-only cache %s\n", name()); 1891 } 1892 } 1893 1894 DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n", 1895 addr, is_secure ? "s" : "ns", old_state, blk->print()); 1896 1897 // if we got new data, copy it in (checking for a read response 1898 // and a response that has data is the same in the end) 1899 if (pkt->isRead()) { 1900 // sanity checks 1901 assert(pkt->hasData()); 1902 assert(pkt->getSize() == blkSize); 1903 1904 std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize); 1905 } 1906 // We pay for fillLatency here. 1907 blk->whenReady = clockEdge() + fillLatency * clockPeriod() + 1908 pkt->payloadDelay; 1909 1910 return blk; 1911} 1912 1913 1914///////////////////////////////////////////////////// 1915// 1916// Snoop path: requests coming in from the memory side 1917// 1918///////////////////////////////////////////////////// 1919 1920void 1921Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data, 1922 bool already_copied, bool pending_inval) 1923{ 1924 // sanity check 1925 assert(req_pkt->isRequest()); 1926 assert(req_pkt->needsResponse()); 1927 1928 DPRINTF(Cache, "%s: for %s\n", __func__, req_pkt->print()); 1929 // timing-mode snoop responses require a new packet, unless we 1930 // already made a copy... 1931 PacketPtr pkt = req_pkt; 1932 if (!already_copied) 1933 // do not clear flags, and allocate space for data if the 1934 // packet needs it (the only packets that carry data are read 1935 // responses) 1936 pkt = new Packet(req_pkt, false, req_pkt->isRead()); 1937 1938 assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() || 1939 pkt->hasSharers()); 1940 pkt->makeTimingResponse(); 1941 if (pkt->isRead()) { 1942 pkt->setDataFromBlock(blk_data, blkSize); 1943 } 1944 if (pkt->cmd == MemCmd::ReadResp && pending_inval) { 1945 // Assume we defer a response to a read from a far-away cache 1946 // A, then later defer a ReadExcl from a cache B on the same 1947 // bus as us. We'll assert cacheResponding in both cases, but 1948 // in the latter case cacheResponding will keep the 1949 // invalidation from reaching cache A. This special response 1950 // tells cache A that it gets the block to satisfy its read, 1951 // but must immediately invalidate it. 1952 pkt->cmd = MemCmd::ReadRespWithInvalidate; 1953 } 1954 // Here we consider forward_time, paying for just forward latency and 1955 // also charging the delay provided by the xbar. 1956 // forward_time is used as send_time in next allocateWriteBuffer(). 1957 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 1958 // Here we reset the timing of the packet. 1959 pkt->headerDelay = pkt->payloadDelay = 0; 1960 DPRINTF(CacheVerbose, "%s: created response: %s tick: %lu\n", __func__, 1961 pkt->print(), forward_time); 1962 memSidePort->schedTimingSnoopResp(pkt, forward_time, true); 1963} 1964 1965uint32_t 1966Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing, 1967 bool is_deferred, bool pending_inval) 1968{ 1969 DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print()); 1970 // deferred snoops can only happen in timing mode 1971 assert(!(is_deferred && !is_timing)); 1972 // pending_inval only makes sense on deferred snoops 1973 assert(!(pending_inval && !is_deferred)); 1974 assert(pkt->isRequest()); 1975 1976 // the packet may get modified if we or a forwarded snooper 1977 // responds in atomic mode, so remember a few things about the 1978 // original packet up front 1979 bool invalidate = pkt->isInvalidate(); 1980 bool M5_VAR_USED needs_writable = pkt->needsWritable(); 1981 1982 // at the moment we could get an uncacheable write which does not 1983 // have the invalidate flag, and we need a suitable way of dealing 1984 // with this case 1985 panic_if(invalidate && pkt->req->isUncacheable(), 1986 "%s got an invalidating uncacheable snoop request %s", 1987 name(), pkt->print()); 1988 1989 uint32_t snoop_delay = 0; 1990 1991 if (forwardSnoops) { 1992 // first propagate snoop upward to see if anyone above us wants to 1993 // handle it. save & restore packet src since it will get 1994 // rewritten to be relative to cpu-side bus (if any) 1995 bool alreadyResponded = pkt->cacheResponding(); 1996 if (is_timing) { 1997 // copy the packet so that we can clear any flags before 1998 // forwarding it upwards, we also allocate data (passing 1999 // the pointer along in case of static data), in case 2000 // there is a snoop hit in upper levels 2001 Packet snoopPkt(pkt, true, true); 2002 snoopPkt.setExpressSnoop(); 2003 // the snoop packet does not need to wait any additional 2004 // time 2005 snoopPkt.headerDelay = snoopPkt.payloadDelay = 0; 2006 cpuSidePort->sendTimingSnoopReq(&snoopPkt); 2007 2008 // add the header delay (including crossbar and snoop 2009 // delays) of the upward snoop to the snoop delay for this 2010 // cache 2011 snoop_delay += snoopPkt.headerDelay; 2012 2013 if (snoopPkt.cacheResponding()) { 2014 // cache-to-cache response from some upper cache 2015 assert(!alreadyResponded); 2016 pkt->setCacheResponding(); 2017 } 2018 // upstream cache has the block, or has an outstanding 2019 // MSHR, pass the flag on 2020 if (snoopPkt.hasSharers()) { 2021 pkt->setHasSharers(); 2022 } 2023 // If this request is a prefetch or clean evict and an upper level 2024 // signals block present, make sure to propagate the block 2025 // presence to the requester. 2026 if (snoopPkt.isBlockCached()) { 2027 pkt->setBlockCached(); 2028 } 2029 } else { 2030 cpuSidePort->sendAtomicSnoop(pkt); 2031 if (!alreadyResponded && pkt->cacheResponding()) { 2032 // cache-to-cache response from some upper cache: 2033 // forward response to original requester 2034 assert(pkt->isResponse()); 2035 } 2036 } 2037 } 2038 2039 if (!blk || !blk->isValid()) { 2040 DPRINTF(CacheVerbose, "%s: snoop miss for %s\n", __func__, 2041 pkt->print()); 2042 if (is_deferred) { 2043 // we no longer have the block, and will not respond, but a 2044 // packet was allocated in MSHR::handleSnoop and we have 2045 // to delete it 2046 assert(pkt->needsResponse()); 2047 2048 // we have passed the block to a cache upstream, that 2049 // cache should be responding 2050 assert(pkt->cacheResponding()); 2051 2052 delete pkt; 2053 } 2054 return snoop_delay; 2055 } else { 2056 DPRINTF(Cache, "%s: snoop hit for %s, old state is %s\n", __func__, 2057 pkt->print(), blk->print()); 2058 } 2059 2060 chatty_assert(!(isReadOnly && blk->isDirty()), 2061 "Should never have a dirty block in a read-only cache %s\n", 2062 name()); 2063 2064 // We may end up modifying both the block state and the packet (if 2065 // we respond in atomic mode), so just figure out what to do now 2066 // and then do it later. We respond to all snoops that need 2067 // responses provided we have the block in dirty state. The 2068 // invalidation itself is taken care of below. 2069 bool respond = blk->isDirty() && pkt->needsResponse(); 2070 bool have_writable = blk->isWritable(); 2071 2072 // Invalidate any prefetch's from below that would strip write permissions 2073 // MemCmd::HardPFReq is only observed by upstream caches. After missing 2074 // above and in it's own cache, a new MemCmd::ReadReq is created that 2075 // downstream caches observe. 2076 if (pkt->mustCheckAbove()) { 2077 DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s " 2078 "from lower cache\n", pkt->getAddr(), pkt->print()); 2079 pkt->setBlockCached(); 2080 return snoop_delay; 2081 } 2082 2083 if (pkt->isRead() && !invalidate) { 2084 // reading without requiring the line in a writable state 2085 assert(!needs_writable); 2086 pkt->setHasSharers(); 2087 2088 // if the requesting packet is uncacheable, retain the line in 2089 // the current state, otherwhise unset the writable flag, 2090 // which means we go from Modified to Owned (and will respond 2091 // below), remain in Owned (and will respond below), from 2092 // Exclusive to Shared, or remain in Shared 2093 if (!pkt->req->isUncacheable()) 2094 blk->status &= ~BlkWritable; 2095 } 2096 2097 if (respond) { 2098 // prevent anyone else from responding, cache as well as 2099 // memory, and also prevent any memory from even seeing the 2100 // request 2101 pkt->setCacheResponding(); 2102 if (have_writable) { 2103 // inform the cache hierarchy that this cache had the line 2104 // in the Modified state so that we avoid unnecessary 2105 // invalidations (see Packet::setResponderHadWritable) 2106 pkt->setResponderHadWritable(); 2107 2108 // in the case of an uncacheable request there is no point 2109 // in setting the responderHadWritable flag, but since the 2110 // recipient does not care there is no harm in doing so 2111 } else { 2112 // if the packet has needsWritable set we invalidate our 2113 // copy below and all other copies will be invalidates 2114 // through express snoops, and if needsWritable is not set 2115 // we already called setHasSharers above 2116 } 2117 2118 // if we are returning a writable and dirty (Modified) line, 2119 // we should be invalidating the line 2120 panic_if(!invalidate && !pkt->hasSharers(), 2121 "%s is passing a Modified line through %s, " 2122 "but keeping the block", name(), pkt->print()); 2123 2124 if (is_timing) { 2125 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval); 2126 } else { 2127 pkt->makeAtomicResponse(); 2128 // packets such as upgrades do not actually have any data 2129 // payload 2130 if (pkt->hasData()) 2131 pkt->setDataFromBlock(blk->data, blkSize); 2132 } 2133 } 2134 2135 if (!respond && is_deferred) { 2136 assert(pkt->needsResponse()); 2137 2138 // if we copied the deferred packet with the intention to 2139 // respond, but are not responding, then a cache above us must 2140 // be, and we can use this as the indication of whether this 2141 // is a packet where we created a copy of the request or not 2142 if (!pkt->cacheResponding()) { 2143 delete pkt->req; 2144 } 2145 2146 delete pkt; 2147 } 2148 2149 // Do this last in case it deallocates block data or something 2150 // like that 2151 if (invalidate) { 2152 invalidateBlock(blk); 2153 } 2154 2155 DPRINTF(Cache, "new state is %s\n", blk->print()); 2156 2157 return snoop_delay; 2158} 2159 2160 2161void 2162Cache::recvTimingSnoopReq(PacketPtr pkt) 2163{ 2164 DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print()); 2165 2166 // Snoops shouldn't happen when bypassing caches 2167 assert(!system->bypassCaches()); 2168 2169 // no need to snoop requests that are not in range 2170 if (!inRange(pkt->getAddr())) { 2171 return; 2172 } 2173 2174 bool is_secure = pkt->isSecure(); 2175 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 2176 2177 Addr blk_addr = pkt->getBlockAddr(blkSize); 2178 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 2179 2180 // Update the latency cost of the snoop so that the crossbar can 2181 // account for it. Do not overwrite what other neighbouring caches 2182 // have already done, rather take the maximum. The update is 2183 // tentative, for cases where we return before an upward snoop 2184 // happens below. 2185 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, 2186 lookupLatency * clockPeriod()); 2187 2188 // Inform request(Prefetch, CleanEvict or Writeback) from below of 2189 // MSHR hit, set setBlockCached. 2190 if (mshr && pkt->mustCheckAbove()) { 2191 DPRINTF(Cache, "Setting block cached for %s from lower cache on " 2192 "mshr hit\n", pkt->print()); 2193 pkt->setBlockCached(); 2194 return; 2195 } 2196 2197 // Let the MSHR itself track the snoop and decide whether we want 2198 // to go ahead and do the regular cache snoop 2199 if (mshr && mshr->handleSnoop(pkt, order++)) { 2200 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)." 2201 "mshrs: %s\n", blk_addr, is_secure ? "s" : "ns", 2202 mshr->print()); 2203 2204 if (mshr->getNumTargets() > numTarget) 2205 warn("allocating bonus target for snoop"); //handle later 2206 return; 2207 } 2208 2209 //We also need to check the writeback buffers and handle those 2210 WriteQueueEntry *wb_entry = writeBuffer.findMatch(blk_addr, is_secure); 2211 if (wb_entry) { 2212 DPRINTF(Cache, "Snoop hit in writeback to addr %#llx (%s)\n", 2213 pkt->getAddr(), is_secure ? "s" : "ns"); 2214 // Expect to see only Writebacks and/or CleanEvicts here, both of 2215 // which should not be generated for uncacheable data. 2216 assert(!wb_entry->isUncacheable()); 2217 // There should only be a single request responsible for generating 2218 // Writebacks/CleanEvicts. 2219 assert(wb_entry->getNumTargets() == 1); 2220 PacketPtr wb_pkt = wb_entry->getTarget()->pkt; 2221 assert(wb_pkt->isEviction() || wb_pkt->cmd == MemCmd::WriteClean); 2222 2223 if (pkt->isEviction()) { 2224 // if the block is found in the write queue, set the BLOCK_CACHED 2225 // flag for Writeback/CleanEvict snoop. On return the snoop will 2226 // propagate the BLOCK_CACHED flag in Writeback packets and prevent 2227 // any CleanEvicts from travelling down the memory hierarchy. 2228 pkt->setBlockCached(); 2229 DPRINTF(Cache, "%s: Squashing %s from lower cache on writequeue " 2230 "hit\n", __func__, pkt->print()); 2231 return; 2232 } 2233 2234 // conceptually writebacks are no different to other blocks in 2235 // this cache, so the behaviour is modelled after handleSnoop, 2236 // the difference being that instead of querying the block 2237 // state to determine if it is dirty and writable, we use the 2238 // command and fields of the writeback packet 2239 bool respond = wb_pkt->cmd == MemCmd::WritebackDirty && 2240 pkt->needsResponse(); 2241 bool have_writable = !wb_pkt->hasSharers(); 2242 bool invalidate = pkt->isInvalidate(); 2243 2244 if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) { 2245 assert(!pkt->needsWritable()); 2246 pkt->setHasSharers(); 2247 wb_pkt->setHasSharers(); 2248 } 2249 2250 if (respond) { 2251 pkt->setCacheResponding(); 2252 2253 if (have_writable) { 2254 pkt->setResponderHadWritable(); 2255 } 2256 2257 doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(), 2258 false, false); 2259 } 2260 2261 if (invalidate) { 2262 // Invalidation trumps our writeback... discard here 2263 // Note: markInService will remove entry from writeback buffer. 2264 markInService(wb_entry); 2265 delete wb_pkt; 2266 } 2267 } 2268 2269 // If this was a shared writeback, there may still be 2270 // other shared copies above that require invalidation. 2271 // We could be more selective and return here if the 2272 // request is non-exclusive or if the writeback is 2273 // exclusive. 2274 uint32_t snoop_delay = handleSnoop(pkt, blk, true, false, false); 2275 2276 // Override what we did when we first saw the snoop, as we now 2277 // also have the cost of the upwards snoops to account for 2278 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, snoop_delay + 2279 lookupLatency * clockPeriod()); 2280} 2281 2282bool 2283Cache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt) 2284{ 2285 // Express snoop responses from master to slave, e.g., from L1 to L2 2286 cache->recvTimingSnoopResp(pkt); 2287 return true; 2288} 2289 2290Tick 2291Cache::recvAtomicSnoop(PacketPtr pkt) 2292{ 2293 // Snoops shouldn't happen when bypassing caches 2294 assert(!system->bypassCaches()); 2295 2296 // no need to snoop requests that are not in range. 2297 if (!inRange(pkt->getAddr())) { 2298 return 0; 2299 } 2300 2301 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 2302 uint32_t snoop_delay = handleSnoop(pkt, blk, false, false, false); 2303 return snoop_delay + lookupLatency * clockPeriod(); 2304} 2305 2306 2307QueueEntry* 2308Cache::getNextQueueEntry() 2309{ 2310 // Check both MSHR queue and write buffer for potential requests, 2311 // note that null does not mean there is no request, it could 2312 // simply be that it is not ready 2313 MSHR *miss_mshr = mshrQueue.getNext(); 2314 WriteQueueEntry *wq_entry = writeBuffer.getNext(); 2315 2316 // If we got a write buffer request ready, first priority is a 2317 // full write buffer, otherwise we favour the miss requests 2318 if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) { 2319 // need to search MSHR queue for conflicting earlier miss. 2320 MSHR *conflict_mshr = 2321 mshrQueue.findPending(wq_entry->blkAddr, 2322 wq_entry->isSecure); 2323 2324 if (conflict_mshr && conflict_mshr->order < wq_entry->order) { 2325 // Service misses in order until conflict is cleared. 2326 return conflict_mshr; 2327 2328 // @todo Note that we ignore the ready time of the conflict here 2329 } 2330 2331 // No conflicts; issue write 2332 return wq_entry; 2333 } else if (miss_mshr) { 2334 // need to check for conflicting earlier writeback 2335 WriteQueueEntry *conflict_mshr = 2336 writeBuffer.findPending(miss_mshr->blkAddr, 2337 miss_mshr->isSecure); 2338 if (conflict_mshr) { 2339 // not sure why we don't check order here... it was in the 2340 // original code but commented out. 2341 2342 // The only way this happens is if we are 2343 // doing a write and we didn't have permissions 2344 // then subsequently saw a writeback (owned got evicted) 2345 // We need to make sure to perform the writeback first 2346 // To preserve the dirty data, then we can issue the write 2347 2348 // should we return wq_entry here instead? I.e. do we 2349 // have to flush writes in order? I don't think so... not 2350 // for Alpha anyway. Maybe for x86? 2351 return conflict_mshr; 2352 2353 // @todo Note that we ignore the ready time of the conflict here 2354 } 2355 2356 // No conflicts; issue read 2357 return miss_mshr; 2358 } 2359 2360 // fall through... no pending requests. Try a prefetch. 2361 assert(!miss_mshr && !wq_entry); 2362 if (prefetcher && mshrQueue.canPrefetch()) { 2363 // If we have a miss queue slot, we can try a prefetch 2364 PacketPtr pkt = prefetcher->getPacket(); 2365 if (pkt) { 2366 Addr pf_addr = pkt->getBlockAddr(blkSize); 2367 if (!tags->findBlock(pf_addr, pkt->isSecure()) && 2368 !mshrQueue.findMatch(pf_addr, pkt->isSecure()) && 2369 !writeBuffer.findMatch(pf_addr, pkt->isSecure())) { 2370 // Update statistic on number of prefetches issued 2371 // (hwpf_mshr_misses) 2372 assert(pkt->req->masterId() < system->maxMasters()); 2373 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 2374 2375 // allocate an MSHR and return it, note 2376 // that we send the packet straight away, so do not 2377 // schedule the send 2378 return allocateMissBuffer(pkt, curTick(), false); 2379 } else { 2380 // free the request and packet 2381 delete pkt->req; 2382 delete pkt; 2383 } 2384 } 2385 } 2386 2387 return nullptr; 2388} 2389 2390bool 2391Cache::isCachedAbove(PacketPtr pkt, bool is_timing) const 2392{ 2393 if (!forwardSnoops) 2394 return false; 2395 // Mirroring the flow of HardPFReqs, the cache sends CleanEvict and 2396 // Writeback snoops into upper level caches to check for copies of the 2397 // same block. Using the BLOCK_CACHED flag with the Writeback/CleanEvict 2398 // packet, the cache can inform the crossbar below of presence or absence 2399 // of the block. 2400 if (is_timing) { 2401 Packet snoop_pkt(pkt, true, false); 2402 snoop_pkt.setExpressSnoop(); 2403 // Assert that packet is either Writeback or CleanEvict and not a 2404 // prefetch request because prefetch requests need an MSHR and may 2405 // generate a snoop response. 2406 assert(pkt->isEviction() || pkt->cmd == MemCmd::WriteClean); 2407 snoop_pkt.senderState = nullptr; 2408 cpuSidePort->sendTimingSnoopReq(&snoop_pkt); 2409 // Writeback/CleanEvict snoops do not generate a snoop response. 2410 assert(!(snoop_pkt.cacheResponding())); 2411 return snoop_pkt.isBlockCached(); 2412 } else { 2413 cpuSidePort->sendAtomicSnoop(pkt); 2414 return pkt->isBlockCached(); 2415 } 2416} 2417 2418Tick 2419Cache::nextQueueReadyTime() const 2420{ 2421 Tick nextReady = std::min(mshrQueue.nextReadyTime(), 2422 writeBuffer.nextReadyTime()); 2423 2424 // Don't signal prefetch ready time if no MSHRs available 2425 // Will signal once enoguh MSHRs are deallocated 2426 if (prefetcher && mshrQueue.canPrefetch()) { 2427 nextReady = std::min(nextReady, 2428 prefetcher->nextPrefetchReadyTime()); 2429 } 2430 2431 return nextReady; 2432} 2433 2434bool 2435Cache::sendMSHRQueuePacket(MSHR* mshr) 2436{ 2437 assert(mshr); 2438 2439 // use request from 1st target 2440 PacketPtr tgt_pkt = mshr->getTarget()->pkt; 2441 2442 DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print()); 2443 2444 CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure); 2445 2446 if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) { 2447 // we should never have hardware prefetches to allocated 2448 // blocks 2449 assert(blk == nullptr); 2450 2451 // We need to check the caches above us to verify that 2452 // they don't have a copy of this block in the dirty state 2453 // at the moment. Without this check we could get a stale 2454 // copy from memory that might get used in place of the 2455 // dirty one. 2456 Packet snoop_pkt(tgt_pkt, true, false); 2457 snoop_pkt.setExpressSnoop(); 2458 // We are sending this packet upwards, but if it hits we will 2459 // get a snoop response that we end up treating just like a 2460 // normal response, hence it needs the MSHR as its sender 2461 // state 2462 snoop_pkt.senderState = mshr; 2463 cpuSidePort->sendTimingSnoopReq(&snoop_pkt); 2464 2465 // Check to see if the prefetch was squashed by an upper cache (to 2466 // prevent us from grabbing the line) or if a Check to see if a 2467 // writeback arrived between the time the prefetch was placed in 2468 // the MSHRs and when it was selected to be sent or if the 2469 // prefetch was squashed by an upper cache. 2470 2471 // It is important to check cacheResponding before 2472 // prefetchSquashed. If another cache has committed to 2473 // responding, it will be sending a dirty response which will 2474 // arrive at the MSHR allocated for this request. Checking the 2475 // prefetchSquash first may result in the MSHR being 2476 // prematurely deallocated. 2477 if (snoop_pkt.cacheResponding()) { 2478 auto M5_VAR_USED r = outstandingSnoop.insert(snoop_pkt.req); 2479 assert(r.second); 2480 2481 // if we are getting a snoop response with no sharers it 2482 // will be allocated as Modified 2483 bool pending_modified_resp = !snoop_pkt.hasSharers(); 2484 markInService(mshr, pending_modified_resp); 2485 2486 DPRINTF(Cache, "Upward snoop of prefetch for addr" 2487 " %#x (%s) hit\n", 2488 tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns"); 2489 return false; 2490 } 2491 2492 if (snoop_pkt.isBlockCached()) { 2493 DPRINTF(Cache, "Block present, prefetch squashed by cache. " 2494 "Deallocating mshr target %#x.\n", 2495 mshr->blkAddr); 2496 2497 // Deallocate the mshr target 2498 if (mshrQueue.forceDeallocateTarget(mshr)) { 2499 // Clear block if this deallocation resulted freed an 2500 // mshr when all had previously been utilized 2501 clearBlocked(Blocked_NoMSHRs); 2502 } 2503 2504 // given that no response is expected, delete Request and Packet 2505 delete tgt_pkt->req; 2506 delete tgt_pkt; 2507 2508 return false; 2509 } 2510 } 2511 2512 // either a prefetch that is not present upstream, or a normal 2513 // MSHR request, proceed to get the packet to send downstream 2514 PacketPtr pkt = createMissPacket(tgt_pkt, blk, mshr->needsWritable()); 2515 2516 mshr->isForward = (pkt == nullptr); 2517 2518 if (mshr->isForward) { 2519 // not a cache block request, but a response is expected 2520 // make copy of current packet to forward, keep current 2521 // copy for response handling 2522 pkt = new Packet(tgt_pkt, false, true); 2523 assert(!pkt->isWrite()); 2524 } 2525 2526 // play it safe and append (rather than set) the sender state, 2527 // as forwarded packets may already have existing state 2528 pkt->pushSenderState(mshr); 2529 2530 if (!memSidePort->sendTimingReq(pkt)) { 2531 // we are awaiting a retry, but we 2532 // delete the packet and will be creating a new packet 2533 // when we get the opportunity 2534 delete pkt; 2535 2536 // note that we have now masked any requestBus and 2537 // schedSendEvent (we will wait for a retry before 2538 // doing anything), and this is so even if we do not 2539 // care about this packet and might override it before 2540 // it gets retried 2541 return true; 2542 } else { 2543 // As part of the call to sendTimingReq the packet is 2544 // forwarded to all neighbouring caches (and any caches 2545 // above them) as a snoop. Thus at this point we know if 2546 // any of the neighbouring caches are responding, and if 2547 // so, we know it is dirty, and we can determine if it is 2548 // being passed as Modified, making our MSHR the ordering 2549 // point 2550 bool pending_modified_resp = !pkt->hasSharers() && 2551 pkt->cacheResponding(); 2552 markInService(mshr, pending_modified_resp); 2553 return false; 2554 } 2555} 2556 2557bool 2558Cache::sendWriteQueuePacket(WriteQueueEntry* wq_entry) 2559{ 2560 assert(wq_entry); 2561 2562 // always a single target for write queue entries 2563 PacketPtr tgt_pkt = wq_entry->getTarget()->pkt; 2564 2565 DPRINTF(Cache, "%s: write %s\n", __func__, tgt_pkt->print()); 2566 2567 // forward as is, both for evictions and uncacheable writes 2568 if (!memSidePort->sendTimingReq(tgt_pkt)) { 2569 // note that we have now masked any requestBus and 2570 // schedSendEvent (we will wait for a retry before 2571 // doing anything), and this is so even if we do not 2572 // care about this packet and might override it before 2573 // it gets retried 2574 return true; 2575 } else { 2576 markInService(wq_entry); 2577 return false; 2578 } 2579} 2580 2581void 2582Cache::serialize(CheckpointOut &cp) const 2583{ 2584 bool dirty(isDirty()); 2585 2586 if (dirty) { 2587 warn("*** The cache still contains dirty data. ***\n"); 2588 warn(" Make sure to drain the system using the correct flags.\n"); 2589 warn(" This checkpoint will not restore correctly and dirty data " 2590 " in the cache will be lost!\n"); 2591 } 2592 2593 // Since we don't checkpoint the data in the cache, any dirty data 2594 // will be lost when restoring from a checkpoint of a system that 2595 // wasn't drained properly. Flag the checkpoint as invalid if the 2596 // cache contains dirty data. 2597 bool bad_checkpoint(dirty); 2598 SERIALIZE_SCALAR(bad_checkpoint); 2599} 2600 2601void 2602Cache::unserialize(CheckpointIn &cp) 2603{ 2604 bool bad_checkpoint; 2605 UNSERIALIZE_SCALAR(bad_checkpoint); 2606 if (bad_checkpoint) { 2607 fatal("Restoring from checkpoints with dirty caches is not supported " 2608 "in the classic memory system. Please remove any caches or " 2609 " drain them properly before taking checkpoints.\n"); 2610 } 2611} 2612 2613/////////////// 2614// 2615// CpuSidePort 2616// 2617/////////////// 2618 2619AddrRangeList 2620Cache::CpuSidePort::getAddrRanges() const 2621{ 2622 return cache->getAddrRanges(); 2623} 2624 2625bool 2626Cache::CpuSidePort::tryTiming(PacketPtr pkt) 2627{ 2628 assert(!cache->system->bypassCaches()); 2629 2630 // always let express snoop packets through if even if blocked 2631 if (pkt->isExpressSnoop()) { 2632 return true; 2633 } else if (isBlocked() || mustSendRetry) { 2634 // either already committed to send a retry, or blocked 2635 mustSendRetry = true; 2636 return false; 2637 } 2638 mustSendRetry = false; 2639 return true; 2640} 2641 2642bool 2643Cache::CpuSidePort::recvTimingReq(PacketPtr pkt) 2644{ 2645 assert(!cache->system->bypassCaches()); 2646 2647 // always let express snoop packets through if even if blocked 2648 if (pkt->isExpressSnoop()) { 2649 bool M5_VAR_USED bypass_success = cache->recvTimingReq(pkt); 2650 assert(bypass_success); 2651 return true; 2652 } 2653 2654 return tryTiming(pkt) && cache->recvTimingReq(pkt); 2655} 2656 2657Tick 2658Cache::CpuSidePort::recvAtomic(PacketPtr pkt) 2659{ 2660 return cache->recvAtomic(pkt); 2661} 2662 2663void 2664Cache::CpuSidePort::recvFunctional(PacketPtr pkt) 2665{ 2666 // functional request 2667 cache->functionalAccess(pkt, true); 2668} 2669 2670Cache:: 2671CpuSidePort::CpuSidePort(const std::string &_name, Cache *_cache, 2672 const std::string &_label) 2673 : BaseCache::CacheSlavePort(_name, _cache, _label), cache(_cache) 2674{ 2675} 2676 2677Cache* 2678CacheParams::create() 2679{ 2680 assert(tags); 2681 2682 return new Cache(this); 2683} 2684/////////////// 2685// 2686// MemSidePort 2687// 2688/////////////// 2689 2690bool 2691Cache::MemSidePort::recvTimingResp(PacketPtr pkt) 2692{ 2693 cache->recvTimingResp(pkt); 2694 return true; 2695} 2696 2697// Express snooping requests to memside port 2698void 2699Cache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt) 2700{ 2701 // handle snooping requests 2702 cache->recvTimingSnoopReq(pkt); 2703} 2704 2705Tick 2706Cache::MemSidePort::recvAtomicSnoop(PacketPtr pkt) 2707{ 2708 return cache->recvAtomicSnoop(pkt); 2709} 2710 2711void 2712Cache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt) 2713{ 2714 // functional snoop (note that in contrast to atomic we don't have 2715 // a specific functionalSnoop method, as they have the same 2716 // behaviour regardless) 2717 cache->functionalAccess(pkt, false); 2718} 2719 2720void 2721Cache::CacheReqPacketQueue::sendDeferredPacket() 2722{ 2723 // sanity check 2724 assert(!waitingOnRetry); 2725 2726 // there should never be any deferred request packets in the 2727 // queue, instead we resly on the cache to provide the packets 2728 // from the MSHR queue or write queue 2729 assert(deferredPacketReadyTime() == MaxTick); 2730 2731 // check for request packets (requests & writebacks) 2732 QueueEntry* entry = cache.getNextQueueEntry(); 2733 2734 if (!entry) { 2735 // can happen if e.g. we attempt a writeback and fail, but 2736 // before the retry, the writeback is eliminated because 2737 // we snoop another cache's ReadEx. 2738 } else { 2739 // let our snoop responses go first if there are responses to 2740 // the same addresses 2741 if (checkConflictingSnoop(entry->blkAddr)) { 2742 return; 2743 } 2744 waitingOnRetry = entry->sendPacket(cache); 2745 } 2746 2747 // if we succeeded and are not waiting for a retry, schedule the 2748 // next send considering when the next queue is ready, note that 2749 // snoop responses have their own packet queue and thus schedule 2750 // their own events 2751 if (!waitingOnRetry) { 2752 schedSendEvent(cache.nextQueueReadyTime()); 2753 } 2754} 2755 2756Cache:: 2757MemSidePort::MemSidePort(const std::string &_name, Cache *_cache, 2758 const std::string &_label) 2759 : BaseCache::CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue), 2760 _reqQueue(*_cache, *this, _snoopRespQueue, _label), 2761 _snoopRespQueue(*_cache, *this, _label), cache(_cache) 2762{ 2763} 2764