cache.cc revision 12691
1/* 2 * Copyright (c) 2010-2018 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2002-2005 The Regents of The University of Michigan 15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc. 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Erik Hallnor 42 * Dave Greene 43 * Nathan Binkert 44 * Steve Reinhardt 45 * Ron Dreslinski 46 * Andreas Sandberg 47 * Nikos Nikoleris 48 */ 49 50/** 51 * @file 52 * Cache definitions. 53 */ 54 55#include "mem/cache/cache.hh" 56 57#include "base/logging.hh" 58#include "base/types.hh" 59#include "debug/Cache.hh" 60#include "debug/CachePort.hh" 61#include "debug/CacheTags.hh" 62#include "debug/CacheVerbose.hh" 63#include "mem/cache/blk.hh" 64#include "mem/cache/mshr.hh" 65#include "mem/cache/prefetch/base.hh" 66#include "sim/sim_exit.hh" 67 68Cache::Cache(const CacheParams *p) 69 : BaseCache(p, p->system->cacheLineSize()), 70 tags(p->tags), 71 prefetcher(p->prefetcher), 72 doFastWrites(true), 73 prefetchOnAccess(p->prefetch_on_access), 74 clusivity(p->clusivity), 75 writebackClean(p->writeback_clean), 76 tempBlockWriteback(nullptr), 77 writebackTempBlockAtomicEvent([this]{ writebackTempBlockAtomic(); }, 78 name(), false, 79 EventBase::Delayed_Writeback_Pri) 80{ 81 tempBlock = new CacheBlk(); 82 tempBlock->data = new uint8_t[blkSize]; 83 84 cpuSidePort = new CpuSidePort(p->name + ".cpu_side", this, 85 "CpuSidePort"); 86 memSidePort = new MemSidePort(p->name + ".mem_side", this, 87 "MemSidePort"); 88 89 tags->setCache(this); 90 if (prefetcher) 91 prefetcher->setCache(this); 92} 93 94Cache::~Cache() 95{ 96 delete [] tempBlock->data; 97 delete tempBlock; 98 99 delete cpuSidePort; 100 delete memSidePort; 101} 102 103void 104Cache::regStats() 105{ 106 BaseCache::regStats(); 107} 108 109void 110Cache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt) 111{ 112 assert(pkt->isRequest()); 113 114 uint64_t overwrite_val; 115 bool overwrite_mem; 116 uint64_t condition_val64; 117 uint32_t condition_val32; 118 119 int offset = tags->extractBlkOffset(pkt->getAddr()); 120 uint8_t *blk_data = blk->data + offset; 121 122 assert(sizeof(uint64_t) >= pkt->getSize()); 123 124 overwrite_mem = true; 125 // keep a copy of our possible write value, and copy what is at the 126 // memory address into the packet 127 pkt->writeData((uint8_t *)&overwrite_val); 128 pkt->setData(blk_data); 129 130 if (pkt->req->isCondSwap()) { 131 if (pkt->getSize() == sizeof(uint64_t)) { 132 condition_val64 = pkt->req->getExtraData(); 133 overwrite_mem = !std::memcmp(&condition_val64, blk_data, 134 sizeof(uint64_t)); 135 } else if (pkt->getSize() == sizeof(uint32_t)) { 136 condition_val32 = (uint32_t)pkt->req->getExtraData(); 137 overwrite_mem = !std::memcmp(&condition_val32, blk_data, 138 sizeof(uint32_t)); 139 } else 140 panic("Invalid size for conditional read/write\n"); 141 } 142 143 if (overwrite_mem) { 144 std::memcpy(blk_data, &overwrite_val, pkt->getSize()); 145 blk->status |= BlkDirty; 146 } 147} 148 149 150void 151Cache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, 152 bool deferred_response, bool pending_downgrade) 153{ 154 assert(pkt->isRequest()); 155 156 assert(blk && blk->isValid()); 157 // Occasionally this is not true... if we are a lower-level cache 158 // satisfying a string of Read and ReadEx requests from 159 // upper-level caches, a Read will mark the block as shared but we 160 // can satisfy a following ReadEx anyway since we can rely on the 161 // Read requester(s) to have buffered the ReadEx snoop and to 162 // invalidate their blocks after receiving them. 163 // assert(!pkt->needsWritable() || blk->isWritable()); 164 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize); 165 166 // Check RMW operations first since both isRead() and 167 // isWrite() will be true for them 168 if (pkt->cmd == MemCmd::SwapReq) { 169 cmpAndSwap(blk, pkt); 170 } else if (pkt->isWrite()) { 171 // we have the block in a writable state and can go ahead, 172 // note that the line may be also be considered writable in 173 // downstream caches along the path to memory, but always 174 // Exclusive, and never Modified 175 assert(blk->isWritable()); 176 // Write or WriteLine at the first cache with block in writable state 177 if (blk->checkWrite(pkt)) { 178 pkt->writeDataToBlock(blk->data, blkSize); 179 } 180 // Always mark the line as dirty (and thus transition to the 181 // Modified state) even if we are a failed StoreCond so we 182 // supply data to any snoops that have appended themselves to 183 // this cache before knowing the store will fail. 184 blk->status |= BlkDirty; 185 DPRINTF(CacheVerbose, "%s for %s (write)\n", __func__, pkt->print()); 186 } else if (pkt->isRead()) { 187 if (pkt->isLLSC()) { 188 blk->trackLoadLocked(pkt); 189 } 190 191 // all read responses have a data payload 192 assert(pkt->hasRespData()); 193 pkt->setDataFromBlock(blk->data, blkSize); 194 195 // determine if this read is from a (coherent) cache or not 196 if (pkt->fromCache()) { 197 assert(pkt->getSize() == blkSize); 198 // special handling for coherent block requests from 199 // upper-level caches 200 if (pkt->needsWritable()) { 201 // sanity check 202 assert(pkt->cmd == MemCmd::ReadExReq || 203 pkt->cmd == MemCmd::SCUpgradeFailReq); 204 assert(!pkt->hasSharers()); 205 206 // if we have a dirty copy, make sure the recipient 207 // keeps it marked dirty (in the modified state) 208 if (blk->isDirty()) { 209 pkt->setCacheResponding(); 210 blk->status &= ~BlkDirty; 211 } 212 } else if (blk->isWritable() && !pending_downgrade && 213 !pkt->hasSharers() && 214 pkt->cmd != MemCmd::ReadCleanReq) { 215 // we can give the requester a writable copy on a read 216 // request if: 217 // - we have a writable copy at this level (& below) 218 // - we don't have a pending snoop from below 219 // signaling another read request 220 // - no other cache above has a copy (otherwise it 221 // would have set hasSharers flag when 222 // snooping the packet) 223 // - the read has explicitly asked for a clean 224 // copy of the line 225 if (blk->isDirty()) { 226 // special considerations if we're owner: 227 if (!deferred_response) { 228 // respond with the line in Modified state 229 // (cacheResponding set, hasSharers not set) 230 pkt->setCacheResponding(); 231 232 // if this cache is mostly inclusive, we 233 // keep the block in the Exclusive state, 234 // and pass it upwards as Modified 235 // (writable and dirty), hence we have 236 // multiple caches, all on the same path 237 // towards memory, all considering the 238 // same block writable, but only one 239 // considering it Modified 240 241 // we get away with multiple caches (on 242 // the same path to memory) considering 243 // the block writeable as we always enter 244 // the cache hierarchy through a cache, 245 // and first snoop upwards in all other 246 // branches 247 blk->status &= ~BlkDirty; 248 } else { 249 // if we're responding after our own miss, 250 // there's a window where the recipient didn't 251 // know it was getting ownership and may not 252 // have responded to snoops correctly, so we 253 // have to respond with a shared line 254 pkt->setHasSharers(); 255 } 256 } 257 } else { 258 // otherwise only respond with a shared copy 259 pkt->setHasSharers(); 260 } 261 } 262 } else if (pkt->isUpgrade()) { 263 // sanity check 264 assert(!pkt->hasSharers()); 265 266 if (blk->isDirty()) { 267 // we were in the Owned state, and a cache above us that 268 // has the line in Shared state needs to be made aware 269 // that the data it already has is in fact dirty 270 pkt->setCacheResponding(); 271 blk->status &= ~BlkDirty; 272 } 273 } else { 274 assert(pkt->isInvalidate()); 275 invalidateBlock(blk); 276 DPRINTF(CacheVerbose, "%s for %s (invalidation)\n", __func__, 277 pkt->print()); 278 } 279} 280 281///////////////////////////////////////////////////// 282// 283// Access path: requests coming in from the CPU side 284// 285///////////////////////////////////////////////////// 286 287bool 288Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, 289 PacketList &writebacks) 290{ 291 // sanity check 292 assert(pkt->isRequest()); 293 294 chatty_assert(!(isReadOnly && pkt->isWrite()), 295 "Should never see a write in a read-only cache %s\n", 296 name()); 297 298 DPRINTF(CacheVerbose, "%s for %s\n", __func__, pkt->print()); 299 300 if (pkt->req->isUncacheable()) { 301 DPRINTF(Cache, "uncacheable: %s\n", pkt->print()); 302 303 // flush and invalidate any existing block 304 CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure())); 305 if (old_blk && old_blk->isValid()) { 306 if (old_blk->isDirty() || writebackClean) 307 writebacks.push_back(writebackBlk(old_blk)); 308 else 309 writebacks.push_back(cleanEvictBlk(old_blk)); 310 invalidateBlock(old_blk); 311 } 312 313 blk = nullptr; 314 // lookupLatency is the latency in case the request is uncacheable. 315 lat = lookupLatency; 316 return false; 317 } 318 319 // Here lat is the value passed as parameter to accessBlock() function 320 // that can modify its value. 321 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), lat); 322 323 DPRINTF(Cache, "%s %s\n", pkt->print(), 324 blk ? "hit " + blk->print() : "miss"); 325 326 if (pkt->req->isCacheMaintenance()) { 327 // A cache maintenance operation is always forwarded to the 328 // memory below even if the block is found in dirty state. 329 330 // We defer any changes to the state of the block until we 331 // create and mark as in service the mshr for the downstream 332 // packet. 333 return false; 334 } 335 336 if (pkt->isEviction()) { 337 // We check for presence of block in above caches before issuing 338 // Writeback or CleanEvict to write buffer. Therefore the only 339 // possible cases can be of a CleanEvict packet coming from above 340 // encountering a Writeback generated in this cache peer cache and 341 // waiting in the write buffer. Cases of upper level peer caches 342 // generating CleanEvict and Writeback or simply CleanEvict and 343 // CleanEvict almost simultaneously will be caught by snoops sent out 344 // by crossbar. 345 WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(), 346 pkt->isSecure()); 347 if (wb_entry) { 348 assert(wb_entry->getNumTargets() == 1); 349 PacketPtr wbPkt = wb_entry->getTarget()->pkt; 350 assert(wbPkt->isWriteback()); 351 352 if (pkt->isCleanEviction()) { 353 // The CleanEvict and WritebackClean snoops into other 354 // peer caches of the same level while traversing the 355 // crossbar. If a copy of the block is found, the 356 // packet is deleted in the crossbar. Hence, none of 357 // the other upper level caches connected to this 358 // cache have the block, so we can clear the 359 // BLOCK_CACHED flag in the Writeback if set and 360 // discard the CleanEvict by returning true. 361 wbPkt->clearBlockCached(); 362 return true; 363 } else { 364 assert(pkt->cmd == MemCmd::WritebackDirty); 365 // Dirty writeback from above trumps our clean 366 // writeback... discard here 367 // Note: markInService will remove entry from writeback buffer. 368 markInService(wb_entry); 369 delete wbPkt; 370 } 371 } 372 } 373 374 // Writeback handling is special case. We can write the block into 375 // the cache without having a writeable copy (or any copy at all). 376 if (pkt->isWriteback()) { 377 assert(blkSize == pkt->getSize()); 378 379 // we could get a clean writeback while we are having 380 // outstanding accesses to a block, do the simple thing for 381 // now and drop the clean writeback so that we do not upset 382 // any ordering/decisions about ownership already taken 383 if (pkt->cmd == MemCmd::WritebackClean && 384 mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) { 385 DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, " 386 "dropping\n", pkt->getAddr()); 387 return true; 388 } 389 390 if (blk == nullptr) { 391 // need to do a replacement 392 blk = allocateBlock(pkt->getAddr(), pkt->isSecure(), writebacks); 393 if (blk == nullptr) { 394 // no replaceable block available: give up, fwd to next level. 395 incMissCount(pkt); 396 return false; 397 } 398 tags->insertBlock(pkt, blk); 399 400 blk->status |= (BlkValid | BlkReadable); 401 } 402 // only mark the block dirty if we got a writeback command, 403 // and leave it as is for a clean writeback 404 if (pkt->cmd == MemCmd::WritebackDirty) { 405 assert(!blk->isDirty()); 406 blk->status |= BlkDirty; 407 } 408 // if the packet does not have sharers, it is passing 409 // writable, and we got the writeback in Modified or Exclusive 410 // state, if not we are in the Owned or Shared state 411 if (!pkt->hasSharers()) { 412 blk->status |= BlkWritable; 413 } 414 // nothing else to do; writeback doesn't expect response 415 assert(!pkt->needsResponse()); 416 pkt->writeDataToBlock(blk->data, blkSize); 417 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 418 incHitCount(pkt); 419 // populate the time when the block will be ready to access. 420 blk->whenReady = clockEdge(fillLatency) + pkt->headerDelay + 421 pkt->payloadDelay; 422 return true; 423 } else if (pkt->cmd == MemCmd::CleanEvict) { 424 if (blk != nullptr) { 425 // Found the block in the tags, need to stop CleanEvict from 426 // propagating further down the hierarchy. Returning true will 427 // treat the CleanEvict like a satisfied write request and delete 428 // it. 429 return true; 430 } 431 // We didn't find the block here, propagate the CleanEvict further 432 // down the memory hierarchy. Returning false will treat the CleanEvict 433 // like a Writeback which could not find a replaceable block so has to 434 // go to next level. 435 return false; 436 } else if (pkt->cmd == MemCmd::WriteClean) { 437 // WriteClean handling is a special case. We can allocate a 438 // block directly if it doesn't exist and we can update the 439 // block immediately. The WriteClean transfers the ownership 440 // of the block as well. 441 assert(blkSize == pkt->getSize()); 442 443 if (!blk) { 444 if (pkt->writeThrough()) { 445 // if this is a write through packet, we don't try to 446 // allocate if the block is not present 447 return false; 448 } else { 449 // a writeback that misses needs to allocate a new block 450 blk = allocateBlock(pkt->getAddr(), pkt->isSecure(), 451 writebacks); 452 if (!blk) { 453 // no replaceable block available: give up, fwd to 454 // next level. 455 incMissCount(pkt); 456 return false; 457 } 458 tags->insertBlock(pkt, blk); 459 460 blk->status |= (BlkValid | BlkReadable); 461 } 462 } 463 464 // at this point either this is a writeback or a write-through 465 // write clean operation and the block is already in this 466 // cache, we need to update the data and the block flags 467 assert(blk); 468 assert(!blk->isDirty()); 469 if (!pkt->writeThrough()) { 470 blk->status |= BlkDirty; 471 } 472 // nothing else to do; writeback doesn't expect response 473 assert(!pkt->needsResponse()); 474 pkt->writeDataToBlock(blk->data, blkSize); 475 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 476 477 incHitCount(pkt); 478 // populate the time when the block will be ready to access. 479 blk->whenReady = clockEdge(fillLatency) + pkt->headerDelay + 480 pkt->payloadDelay; 481 // if this a write-through packet it will be sent to cache 482 // below 483 return !pkt->writeThrough(); 484 } else if (blk && (pkt->needsWritable() ? blk->isWritable() : 485 blk->isReadable())) { 486 // OK to satisfy access 487 incHitCount(pkt); 488 satisfyRequest(pkt, blk); 489 maintainClusivity(pkt->fromCache(), blk); 490 491 return true; 492 } 493 494 // Can't satisfy access normally... either no block (blk == nullptr) 495 // or have block but need writable 496 497 incMissCount(pkt); 498 499 if (blk == nullptr && pkt->isLLSC() && pkt->isWrite()) { 500 // complete miss on store conditional... just give up now 501 pkt->req->setExtraData(0); 502 return true; 503 } 504 505 return false; 506} 507 508void 509Cache::maintainClusivity(bool from_cache, CacheBlk *blk) 510{ 511 if (from_cache && blk && blk->isValid() && !blk->isDirty() && 512 clusivity == Enums::mostly_excl) { 513 // if we have responded to a cache, and our block is still 514 // valid, but not dirty, and this cache is mostly exclusive 515 // with respect to the cache above, drop the block 516 invalidateBlock(blk); 517 } 518} 519 520void 521Cache::doWritebacks(PacketList& writebacks, Tick forward_time) 522{ 523 while (!writebacks.empty()) { 524 PacketPtr wbPkt = writebacks.front(); 525 // We use forwardLatency here because we are copying writebacks to 526 // write buffer. 527 528 // Call isCachedAbove for Writebacks, CleanEvicts and 529 // WriteCleans to discover if the block is cached above. 530 if (isCachedAbove(wbPkt)) { 531 if (wbPkt->cmd == MemCmd::CleanEvict) { 532 // Delete CleanEvict because cached copies exist above. The 533 // packet destructor will delete the request object because 534 // this is a non-snoop request packet which does not require a 535 // response. 536 delete wbPkt; 537 } else if (wbPkt->cmd == MemCmd::WritebackClean) { 538 // clean writeback, do not send since the block is 539 // still cached above 540 assert(writebackClean); 541 delete wbPkt; 542 } else { 543 assert(wbPkt->cmd == MemCmd::WritebackDirty || 544 wbPkt->cmd == MemCmd::WriteClean); 545 // Set BLOCK_CACHED flag in Writeback and send below, so that 546 // the Writeback does not reset the bit corresponding to this 547 // address in the snoop filter below. 548 wbPkt->setBlockCached(); 549 allocateWriteBuffer(wbPkt, forward_time); 550 } 551 } else { 552 // If the block is not cached above, send packet below. Both 553 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will 554 // reset the bit corresponding to this address in the snoop filter 555 // below. 556 allocateWriteBuffer(wbPkt, forward_time); 557 } 558 writebacks.pop_front(); 559 } 560} 561 562void 563Cache::doWritebacksAtomic(PacketList& writebacks) 564{ 565 while (!writebacks.empty()) { 566 PacketPtr wbPkt = writebacks.front(); 567 // Call isCachedAbove for both Writebacks and CleanEvicts. If 568 // isCachedAbove returns true we set BLOCK_CACHED flag in Writebacks 569 // and discard CleanEvicts. 570 if (isCachedAbove(wbPkt, false)) { 571 if (wbPkt->cmd == MemCmd::WritebackDirty || 572 wbPkt->cmd == MemCmd::WriteClean) { 573 // Set BLOCK_CACHED flag in Writeback and send below, 574 // so that the Writeback does not reset the bit 575 // corresponding to this address in the snoop filter 576 // below. We can discard CleanEvicts because cached 577 // copies exist above. Atomic mode isCachedAbove 578 // modifies packet to set BLOCK_CACHED flag 579 memSidePort->sendAtomic(wbPkt); 580 } 581 } else { 582 // If the block is not cached above, send packet below. Both 583 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will 584 // reset the bit corresponding to this address in the snoop filter 585 // below. 586 memSidePort->sendAtomic(wbPkt); 587 } 588 writebacks.pop_front(); 589 // In case of CleanEvicts, the packet destructor will delete the 590 // request object because this is a non-snoop request packet which 591 // does not require a response. 592 delete wbPkt; 593 } 594} 595 596 597void 598Cache::recvTimingSnoopResp(PacketPtr pkt) 599{ 600 DPRINTF(Cache, "%s for %s\n", __func__, pkt->print()); 601 602 assert(pkt->isResponse()); 603 assert(!system->bypassCaches()); 604 605 // determine if the response is from a snoop request we created 606 // (in which case it should be in the outstandingSnoop), or if we 607 // merely forwarded someone else's snoop request 608 const bool forwardAsSnoop = outstandingSnoop.find(pkt->req) == 609 outstandingSnoop.end(); 610 611 if (!forwardAsSnoop) { 612 // the packet came from this cache, so sink it here and do not 613 // forward it 614 assert(pkt->cmd == MemCmd::HardPFResp); 615 616 outstandingSnoop.erase(pkt->req); 617 618 DPRINTF(Cache, "Got prefetch response from above for addr " 619 "%#llx (%s)\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns"); 620 recvTimingResp(pkt); 621 return; 622 } 623 624 // forwardLatency is set here because there is a response from an 625 // upper level cache. 626 // To pay the delay that occurs if the packet comes from the bus, 627 // we charge also headerDelay. 628 Tick snoop_resp_time = clockEdge(forwardLatency) + pkt->headerDelay; 629 // Reset the timing of the packet. 630 pkt->headerDelay = pkt->payloadDelay = 0; 631 memSidePort->schedTimingSnoopResp(pkt, snoop_resp_time); 632} 633 634void 635Cache::promoteWholeLineWrites(PacketPtr pkt) 636{ 637 // Cache line clearing instructions 638 if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) && 639 (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0)) { 640 pkt->cmd = MemCmd::WriteLineReq; 641 DPRINTF(Cache, "packet promoted from Write to WriteLineReq\n"); 642 } 643} 644 645void 646Cache::recvTimingReq(PacketPtr pkt) 647{ 648 DPRINTF(CacheTags, "%s tags:\n%s\n", __func__, tags->print()); 649 650 assert(pkt->isRequest()); 651 652 // Just forward the packet if caches are disabled. 653 if (system->bypassCaches()) { 654 // @todo This should really enqueue the packet rather 655 bool M5_VAR_USED success = memSidePort->sendTimingReq(pkt); 656 assert(success); 657 return; 658 } 659 660 promoteWholeLineWrites(pkt); 661 662 // Cache maintenance operations have to visit all the caches down 663 // to the specified xbar (PoC, PoU, etc.). Even if a cache above 664 // is responding we forward the packet to the memory below rather 665 // than creating an express snoop. 666 if (pkt->cacheResponding()) { 667 // a cache above us (but not where the packet came from) is 668 // responding to the request, in other words it has the line 669 // in Modified or Owned state 670 DPRINTF(Cache, "Cache above responding to %s: not responding\n", 671 pkt->print()); 672 673 // if the packet needs the block to be writable, and the cache 674 // that has promised to respond (setting the cache responding 675 // flag) is not providing writable (it is in Owned rather than 676 // the Modified state), we know that there may be other Shared 677 // copies in the system; go out and invalidate them all 678 assert(pkt->needsWritable() && !pkt->responderHadWritable()); 679 680 // an upstream cache that had the line in Owned state 681 // (dirty, but not writable), is responding and thus 682 // transferring the dirty line from one branch of the 683 // cache hierarchy to another 684 685 // send out an express snoop and invalidate all other 686 // copies (snooping a packet that needs writable is the 687 // same as an invalidation), thus turning the Owned line 688 // into a Modified line, note that we don't invalidate the 689 // block in the current cache or any other cache on the 690 // path to memory 691 692 // create a downstream express snoop with cleared packet 693 // flags, there is no need to allocate any data as the 694 // packet is merely used to co-ordinate state transitions 695 Packet *snoop_pkt = new Packet(pkt, true, false); 696 697 // also reset the bus time that the original packet has 698 // not yet paid for 699 snoop_pkt->headerDelay = snoop_pkt->payloadDelay = 0; 700 701 // make this an instantaneous express snoop, and let the 702 // other caches in the system know that the another cache 703 // is responding, because we have found the authorative 704 // copy (Modified or Owned) that will supply the right 705 // data 706 snoop_pkt->setExpressSnoop(); 707 snoop_pkt->setCacheResponding(); 708 709 // this express snoop travels towards the memory, and at 710 // every crossbar it is snooped upwards thus reaching 711 // every cache in the system 712 bool M5_VAR_USED success = memSidePort->sendTimingReq(snoop_pkt); 713 // express snoops always succeed 714 assert(success); 715 716 // main memory will delete the snoop packet 717 718 // queue for deletion, as opposed to immediate deletion, as 719 // the sending cache is still relying on the packet 720 pendingDelete.reset(pkt); 721 722 // no need to take any further action in this particular cache 723 // as an upstram cache has already committed to responding, 724 // and we have already sent out any express snoops in the 725 // section above to ensure all other copies in the system are 726 // invalidated 727 return; 728 } 729 730 // anything that is merely forwarded pays for the forward latency and 731 // the delay provided by the crossbar 732 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 733 734 // We use lookupLatency here because it is used to specify the latency 735 // to access. 736 Cycles lat = lookupLatency; 737 CacheBlk *blk = nullptr; 738 bool satisfied = false; 739 { 740 PacketList writebacks; 741 // Note that lat is passed by reference here. The function 742 // access() calls accessBlock() which can modify lat value. 743 satisfied = access(pkt, blk, lat, writebacks); 744 745 // copy writebacks to write buffer here to ensure they logically 746 // proceed anything happening below 747 doWritebacks(writebacks, forward_time); 748 } 749 750 // Here we charge the headerDelay that takes into account the latencies 751 // of the bus, if the packet comes from it. 752 // The latency charged it is just lat that is the value of lookupLatency 753 // modified by access() function, or if not just lookupLatency. 754 // In case of a hit we are neglecting response latency. 755 // In case of a miss we are neglecting forward latency. 756 Tick request_time = clockEdge(lat) + pkt->headerDelay; 757 // Here we reset the timing of the packet. 758 pkt->headerDelay = pkt->payloadDelay = 0; 759 760 // track time of availability of next prefetch, if any 761 Tick next_pf_time = MaxTick; 762 763 bool needsResponse = pkt->needsResponse(); 764 765 if (satisfied) { 766 // should never be satisfying an uncacheable access as we 767 // flush and invalidate any existing block as part of the 768 // lookup 769 assert(!pkt->req->isUncacheable()); 770 771 // hit (for all other request types) 772 773 if (prefetcher && (prefetchOnAccess || 774 (blk && blk->wasPrefetched()))) { 775 if (blk) 776 blk->status &= ~BlkHWPrefetched; 777 778 // Don't notify on SWPrefetch 779 if (!pkt->cmd.isSWPrefetch()) { 780 assert(!pkt->req->isCacheMaintenance()); 781 next_pf_time = prefetcher->notify(pkt); 782 } 783 } 784 785 if (needsResponse) { 786 pkt->makeTimingResponse(); 787 // @todo: Make someone pay for this 788 pkt->headerDelay = pkt->payloadDelay = 0; 789 790 // In this case we are considering request_time that takes 791 // into account the delay of the xbar, if any, and just 792 // lat, neglecting responseLatency, modelling hit latency 793 // just as lookupLatency or or the value of lat overriden 794 // by access(), that calls accessBlock() function. 795 cpuSidePort->schedTimingResp(pkt, request_time, true); 796 } else { 797 DPRINTF(Cache, "%s satisfied %s, no response needed\n", __func__, 798 pkt->print()); 799 800 // queue the packet for deletion, as the sending cache is 801 // still relying on it; if the block is found in access(), 802 // CleanEvict and Writeback messages will be deleted 803 // here as well 804 pendingDelete.reset(pkt); 805 } 806 } else { 807 // miss 808 809 Addr blk_addr = pkt->getBlockAddr(blkSize); 810 811 // ignore any existing MSHR if we are dealing with an 812 // uncacheable request 813 MSHR *mshr = pkt->req->isUncacheable() ? nullptr : 814 mshrQueue.findMatch(blk_addr, pkt->isSecure()); 815 816 // Software prefetch handling: 817 // To keep the core from waiting on data it won't look at 818 // anyway, send back a response with dummy data. Miss handling 819 // will continue asynchronously. Unfortunately, the core will 820 // insist upon freeing original Packet/Request, so we have to 821 // create a new pair with a different lifecycle. Note that this 822 // processing happens before any MSHR munging on the behalf of 823 // this request because this new Request will be the one stored 824 // into the MSHRs, not the original. 825 if (pkt->cmd.isSWPrefetch()) { 826 assert(needsResponse); 827 assert(pkt->req->hasPaddr()); 828 assert(!pkt->req->isUncacheable()); 829 830 // There's no reason to add a prefetch as an additional target 831 // to an existing MSHR. If an outstanding request is already 832 // in progress, there is nothing for the prefetch to do. 833 // If this is the case, we don't even create a request at all. 834 PacketPtr pf = nullptr; 835 836 if (!mshr) { 837 // copy the request and create a new SoftPFReq packet 838 RequestPtr req = new Request(pkt->req->getPaddr(), 839 pkt->req->getSize(), 840 pkt->req->getFlags(), 841 pkt->req->masterId()); 842 pf = new Packet(req, pkt->cmd); 843 pf->allocate(); 844 assert(pf->getAddr() == pkt->getAddr()); 845 assert(pf->getSize() == pkt->getSize()); 846 } 847 848 pkt->makeTimingResponse(); 849 850 // request_time is used here, taking into account lat and the delay 851 // charged if the packet comes from the xbar. 852 cpuSidePort->schedTimingResp(pkt, request_time, true); 853 854 // If an outstanding request is in progress (we found an 855 // MSHR) this is set to null 856 pkt = pf; 857 } 858 859 if (mshr) { 860 /// MSHR hit 861 /// @note writebacks will be checked in getNextMSHR() 862 /// for any conflicting requests to the same block 863 864 //@todo remove hw_pf here 865 866 // Coalesce unless it was a software prefetch (see above). 867 if (pkt) { 868 assert(!pkt->isWriteback()); 869 // CleanEvicts corresponding to blocks which have 870 // outstanding requests in MSHRs are simply sunk here 871 if (pkt->cmd == MemCmd::CleanEvict) { 872 pendingDelete.reset(pkt); 873 } else if (pkt->cmd == MemCmd::WriteClean) { 874 // A WriteClean should never coalesce with any 875 // outstanding cache maintenance requests. 876 877 // We use forward_time here because there is an 878 // uncached memory write, forwarded to WriteBuffer. 879 allocateWriteBuffer(pkt, forward_time); 880 } else { 881 DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__, 882 pkt->print()); 883 884 assert(pkt->req->masterId() < system->maxMasters()); 885 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++; 886 // We use forward_time here because it is the same 887 // considering new targets. We have multiple 888 // requests for the same address here. It 889 // specifies the latency to allocate an internal 890 // buffer and to schedule an event to the queued 891 // port and also takes into account the additional 892 // delay of the xbar. 893 mshr->allocateTarget(pkt, forward_time, order++, 894 allocOnFill(pkt->cmd)); 895 if (mshr->getNumTargets() == numTarget) { 896 noTargetMSHR = mshr; 897 setBlocked(Blocked_NoTargets); 898 // need to be careful with this... if this mshr isn't 899 // ready yet (i.e. time > curTick()), we don't want to 900 // move it ahead of mshrs that are ready 901 // mshrQueue.moveToFront(mshr); 902 } 903 } 904 // We should call the prefetcher reguardless if the request is 905 // satisfied or not, reguardless if the request is in the MSHR 906 // or not. The request could be a ReadReq hit, but still not 907 // satisfied (potentially because of a prior write to the same 908 // cache line. So, even when not satisfied, tehre is an MSHR 909 // already allocated for this, we need to let the prefetcher 910 // know about the request 911 if (prefetcher) { 912 // Don't notify on SWPrefetch 913 if (!pkt->cmd.isSWPrefetch() && 914 !pkt->req->isCacheMaintenance()) 915 next_pf_time = prefetcher->notify(pkt); 916 } 917 } 918 } else { 919 // no MSHR 920 assert(pkt->req->masterId() < system->maxMasters()); 921 if (pkt->req->isUncacheable()) { 922 mshr_uncacheable[pkt->cmdToIndex()][pkt->req->masterId()]++; 923 } else { 924 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 925 } 926 927 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean || 928 (pkt->req->isUncacheable() && pkt->isWrite())) { 929 // We use forward_time here because there is an 930 // uncached memory write, forwarded to WriteBuffer. 931 allocateWriteBuffer(pkt, forward_time); 932 } else { 933 if (blk && blk->isValid()) { 934 // should have flushed and have no valid block 935 assert(!pkt->req->isUncacheable()); 936 937 // If we have a write miss to a valid block, we 938 // need to mark the block non-readable. Otherwise 939 // if we allow reads while there's an outstanding 940 // write miss, the read could return stale data 941 // out of the cache block... a more aggressive 942 // system could detect the overlap (if any) and 943 // forward data out of the MSHRs, but we don't do 944 // that yet. Note that we do need to leave the 945 // block valid so that it stays in the cache, in 946 // case we get an upgrade response (and hence no 947 // new data) when the write miss completes. 948 // As long as CPUs do proper store/load forwarding 949 // internally, and have a sufficiently weak memory 950 // model, this is probably unnecessary, but at some 951 // point it must have seemed like we needed it... 952 assert((pkt->needsWritable() && !blk->isWritable()) || 953 pkt->req->isCacheMaintenance()); 954 blk->status &= ~BlkReadable; 955 } 956 // Here we are using forward_time, modelling the latency of 957 // a miss (outbound) just as forwardLatency, neglecting the 958 // lookupLatency component. 959 allocateMissBuffer(pkt, forward_time); 960 } 961 962 if (prefetcher) { 963 // Don't notify on SWPrefetch 964 if (!pkt->cmd.isSWPrefetch() && 965 !pkt->req->isCacheMaintenance()) 966 next_pf_time = prefetcher->notify(pkt); 967 } 968 } 969 } 970 971 if (next_pf_time != MaxTick) 972 schedMemSideSendEvent(next_pf_time); 973} 974 975PacketPtr 976Cache::createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk, 977 bool needsWritable) const 978{ 979 // should never see evictions here 980 assert(!cpu_pkt->isEviction()); 981 982 bool blkValid = blk && blk->isValid(); 983 984 if (cpu_pkt->req->isUncacheable() || 985 (!blkValid && cpu_pkt->isUpgrade()) || 986 cpu_pkt->cmd == MemCmd::InvalidateReq || cpu_pkt->isClean()) { 987 // uncacheable requests and upgrades from upper-level caches 988 // that missed completely just go through as is 989 return nullptr; 990 } 991 992 assert(cpu_pkt->needsResponse()); 993 994 MemCmd cmd; 995 // @TODO make useUpgrades a parameter. 996 // Note that ownership protocols require upgrade, otherwise a 997 // write miss on a shared owned block will generate a ReadExcl, 998 // which will clobber the owned copy. 999 const bool useUpgrades = true; 1000 if (cpu_pkt->cmd == MemCmd::WriteLineReq) { 1001 assert(!blkValid || !blk->isWritable()); 1002 // forward as invalidate to all other caches, this gives us 1003 // the line in Exclusive state, and invalidates all other 1004 // copies 1005 cmd = MemCmd::InvalidateReq; 1006 } else if (blkValid && useUpgrades) { 1007 // only reason to be here is that blk is read only and we need 1008 // it to be writable 1009 assert(needsWritable); 1010 assert(!blk->isWritable()); 1011 cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq; 1012 } else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq || 1013 cpu_pkt->cmd == MemCmd::StoreCondFailReq) { 1014 // Even though this SC will fail, we still need to send out the 1015 // request and get the data to supply it to other snoopers in the case 1016 // where the determination the StoreCond fails is delayed due to 1017 // all caches not being on the same local bus. 1018 cmd = MemCmd::SCUpgradeFailReq; 1019 } else { 1020 // block is invalid 1021 1022 // If the request does not need a writable there are two cases 1023 // where we need to ensure the response will not fetch the 1024 // block in dirty state: 1025 // * this cache is read only and it does not perform 1026 // writebacks, 1027 // * this cache is mostly exclusive and will not fill (since 1028 // it does not fill it will have to writeback the dirty data 1029 // immediately which generates uneccesary writebacks). 1030 bool force_clean_rsp = isReadOnly || clusivity == Enums::mostly_excl; 1031 cmd = needsWritable ? MemCmd::ReadExReq : 1032 (force_clean_rsp ? MemCmd::ReadCleanReq : MemCmd::ReadSharedReq); 1033 } 1034 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize); 1035 1036 // if there are upstream caches that have already marked the 1037 // packet as having sharers (not passing writable), pass that info 1038 // downstream 1039 if (cpu_pkt->hasSharers() && !needsWritable) { 1040 // note that cpu_pkt may have spent a considerable time in the 1041 // MSHR queue and that the information could possibly be out 1042 // of date, however, there is no harm in conservatively 1043 // assuming the block has sharers 1044 pkt->setHasSharers(); 1045 DPRINTF(Cache, "%s: passing hasSharers from %s to %s\n", 1046 __func__, cpu_pkt->print(), pkt->print()); 1047 } 1048 1049 // the packet should be block aligned 1050 assert(pkt->getAddr() == pkt->getBlockAddr(blkSize)); 1051 1052 pkt->allocate(); 1053 DPRINTF(Cache, "%s: created %s from %s\n", __func__, pkt->print(), 1054 cpu_pkt->print()); 1055 return pkt; 1056} 1057 1058 1059Tick 1060Cache::recvAtomic(PacketPtr pkt) 1061{ 1062 // We are in atomic mode so we pay just for lookupLatency here. 1063 Cycles lat = lookupLatency; 1064 1065 // Forward the request if the system is in cache bypass mode. 1066 if (system->bypassCaches()) 1067 return ticksToCycles(memSidePort->sendAtomic(pkt)); 1068 1069 promoteWholeLineWrites(pkt); 1070 1071 // follow the same flow as in recvTimingReq, and check if a cache 1072 // above us is responding 1073 if (pkt->cacheResponding() && !pkt->isClean()) { 1074 assert(!pkt->req->isCacheInvalidate()); 1075 DPRINTF(Cache, "Cache above responding to %s: not responding\n", 1076 pkt->print()); 1077 1078 // if a cache is responding, and it had the line in Owned 1079 // rather than Modified state, we need to invalidate any 1080 // copies that are not on the same path to memory 1081 assert(pkt->needsWritable() && !pkt->responderHadWritable()); 1082 lat += ticksToCycles(memSidePort->sendAtomic(pkt)); 1083 1084 return lat * clockPeriod(); 1085 } 1086 1087 // should assert here that there are no outstanding MSHRs or 1088 // writebacks... that would mean that someone used an atomic 1089 // access in timing mode 1090 1091 CacheBlk *blk = nullptr; 1092 PacketList writebacks; 1093 bool satisfied = access(pkt, blk, lat, writebacks); 1094 1095 if (pkt->isClean() && blk && blk->isDirty()) { 1096 // A cache clean opearation is looking for a dirty 1097 // block. If a dirty block is encountered a WriteClean 1098 // will update any copies to the path to the memory 1099 // until the point of reference. 1100 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n", 1101 __func__, pkt->print(), blk->print()); 1102 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id); 1103 writebacks.push_back(wb_pkt); 1104 pkt->setSatisfied(); 1105 } 1106 1107 // handle writebacks resulting from the access here to ensure they 1108 // logically proceed anything happening below 1109 doWritebacksAtomic(writebacks); 1110 1111 if (!satisfied) { 1112 // MISS 1113 1114 // deal with the packets that go through the write path of 1115 // the cache, i.e. any evictions and writes 1116 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean || 1117 (pkt->req->isUncacheable() && pkt->isWrite())) { 1118 lat += ticksToCycles(memSidePort->sendAtomic(pkt)); 1119 return lat * clockPeriod(); 1120 } 1121 // only misses left 1122 1123 PacketPtr bus_pkt = createMissPacket(pkt, blk, pkt->needsWritable()); 1124 1125 bool is_forward = (bus_pkt == nullptr); 1126 1127 if (is_forward) { 1128 // just forwarding the same request to the next level 1129 // no local cache operation involved 1130 bus_pkt = pkt; 1131 } 1132 1133 DPRINTF(Cache, "%s: Sending an atomic %s\n", __func__, 1134 bus_pkt->print()); 1135 1136#if TRACING_ON 1137 CacheBlk::State old_state = blk ? blk->status : 0; 1138#endif 1139 1140 lat += ticksToCycles(memSidePort->sendAtomic(bus_pkt)); 1141 1142 bool is_invalidate = bus_pkt->isInvalidate(); 1143 1144 // We are now dealing with the response handling 1145 DPRINTF(Cache, "%s: Receive response: %s in state %i\n", __func__, 1146 bus_pkt->print(), old_state); 1147 1148 // If packet was a forward, the response (if any) is already 1149 // in place in the bus_pkt == pkt structure, so we don't need 1150 // to do anything. Otherwise, use the separate bus_pkt to 1151 // generate response to pkt and then delete it. 1152 if (!is_forward) { 1153 if (pkt->needsResponse()) { 1154 assert(bus_pkt->isResponse()); 1155 if (bus_pkt->isError()) { 1156 pkt->makeAtomicResponse(); 1157 pkt->copyError(bus_pkt); 1158 } else if (pkt->cmd == MemCmd::WriteLineReq) { 1159 // note the use of pkt, not bus_pkt here. 1160 1161 // write-line request to the cache that promoted 1162 // the write to a whole line 1163 blk = handleFill(pkt, blk, writebacks, 1164 allocOnFill(pkt->cmd)); 1165 assert(blk != NULL); 1166 is_invalidate = false; 1167 satisfyRequest(pkt, blk); 1168 } else if (bus_pkt->isRead() || 1169 bus_pkt->cmd == MemCmd::UpgradeResp) { 1170 // we're updating cache state to allow us to 1171 // satisfy the upstream request from the cache 1172 blk = handleFill(bus_pkt, blk, writebacks, 1173 allocOnFill(pkt->cmd)); 1174 satisfyRequest(pkt, blk); 1175 maintainClusivity(pkt->fromCache(), blk); 1176 } else { 1177 // we're satisfying the upstream request without 1178 // modifying cache state, e.g., a write-through 1179 pkt->makeAtomicResponse(); 1180 } 1181 } 1182 delete bus_pkt; 1183 } 1184 1185 if (is_invalidate && blk && blk->isValid()) { 1186 invalidateBlock(blk); 1187 } 1188 } 1189 1190 // Note that we don't invoke the prefetcher at all in atomic mode. 1191 // It's not clear how to do it properly, particularly for 1192 // prefetchers that aggressively generate prefetch candidates and 1193 // rely on bandwidth contention to throttle them; these will tend 1194 // to pollute the cache in atomic mode since there is no bandwidth 1195 // contention. If we ever do want to enable prefetching in atomic 1196 // mode, though, this is the place to do it... see timingAccess() 1197 // for an example (though we'd want to issue the prefetch(es) 1198 // immediately rather than calling requestMemSideBus() as we do 1199 // there). 1200 1201 // do any writebacks resulting from the response handling 1202 doWritebacksAtomic(writebacks); 1203 1204 // if we used temp block, check to see if its valid and if so 1205 // clear it out, but only do so after the call to recvAtomic is 1206 // finished so that any downstream observers (such as a snoop 1207 // filter), first see the fill, and only then see the eviction 1208 if (blk == tempBlock && tempBlock->isValid()) { 1209 // the atomic CPU calls recvAtomic for fetch and load/store 1210 // sequentuially, and we may already have a tempBlock 1211 // writeback from the fetch that we have not yet sent 1212 if (tempBlockWriteback) { 1213 // if that is the case, write the prevoius one back, and 1214 // do not schedule any new event 1215 writebackTempBlockAtomic(); 1216 } else { 1217 // the writeback/clean eviction happens after the call to 1218 // recvAtomic has finished (but before any successive 1219 // calls), so that the response handling from the fill is 1220 // allowed to happen first 1221 schedule(writebackTempBlockAtomicEvent, curTick()); 1222 } 1223 1224 tempBlockWriteback = (blk->isDirty() || writebackClean) ? 1225 writebackBlk(blk) : cleanEvictBlk(blk); 1226 invalidateBlock(blk); 1227 } 1228 1229 if (pkt->needsResponse()) { 1230 pkt->makeAtomicResponse(); 1231 } 1232 1233 return lat * clockPeriod(); 1234} 1235 1236 1237void 1238Cache::functionalAccess(PacketPtr pkt, bool fromCpuSide) 1239{ 1240 if (system->bypassCaches()) { 1241 // Packets from the memory side are snoop request and 1242 // shouldn't happen in bypass mode. 1243 assert(fromCpuSide); 1244 1245 // The cache should be flushed if we are in cache bypass mode, 1246 // so we don't need to check if we need to update anything. 1247 memSidePort->sendFunctional(pkt); 1248 return; 1249 } 1250 1251 Addr blk_addr = pkt->getBlockAddr(blkSize); 1252 bool is_secure = pkt->isSecure(); 1253 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 1254 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 1255 1256 pkt->pushLabel(name()); 1257 1258 CacheBlkPrintWrapper cbpw(blk); 1259 1260 // Note that just because an L2/L3 has valid data doesn't mean an 1261 // L1 doesn't have a more up-to-date modified copy that still 1262 // needs to be found. As a result we always update the request if 1263 // we have it, but only declare it satisfied if we are the owner. 1264 1265 // see if we have data at all (owned or otherwise) 1266 bool have_data = blk && blk->isValid() 1267 && pkt->checkFunctional(&cbpw, blk_addr, is_secure, blkSize, 1268 blk->data); 1269 1270 // data we have is dirty if marked as such or if we have an 1271 // in-service MSHR that is pending a modified line 1272 bool have_dirty = 1273 have_data && (blk->isDirty() || 1274 (mshr && mshr->inService && mshr->isPendingModified())); 1275 1276 bool done = have_dirty 1277 || cpuSidePort->checkFunctional(pkt) 1278 || mshrQueue.checkFunctional(pkt, blk_addr) 1279 || writeBuffer.checkFunctional(pkt, blk_addr) 1280 || memSidePort->checkFunctional(pkt); 1281 1282 DPRINTF(CacheVerbose, "%s: %s %s%s%s\n", __func__, pkt->print(), 1283 (blk && blk->isValid()) ? "valid " : "", 1284 have_data ? "data " : "", done ? "done " : ""); 1285 1286 // We're leaving the cache, so pop cache->name() label 1287 pkt->popLabel(); 1288 1289 if (done) { 1290 pkt->makeResponse(); 1291 } else { 1292 // if it came as a request from the CPU side then make sure it 1293 // continues towards the memory side 1294 if (fromCpuSide) { 1295 memSidePort->sendFunctional(pkt); 1296 } else if (cpuSidePort->isSnooping()) { 1297 // if it came from the memory side, it must be a snoop request 1298 // and we should only forward it if we are forwarding snoops 1299 cpuSidePort->sendFunctionalSnoop(pkt); 1300 } 1301 } 1302} 1303 1304 1305///////////////////////////////////////////////////// 1306// 1307// Response handling: responses from the memory side 1308// 1309///////////////////////////////////////////////////// 1310 1311 1312void 1313Cache::handleUncacheableWriteResp(PacketPtr pkt) 1314{ 1315 Tick completion_time = clockEdge(responseLatency) + 1316 pkt->headerDelay + pkt->payloadDelay; 1317 1318 // Reset the bus additional time as it is now accounted for 1319 pkt->headerDelay = pkt->payloadDelay = 0; 1320 1321 cpuSidePort->schedTimingResp(pkt, completion_time, true); 1322} 1323 1324void 1325Cache::recvTimingResp(PacketPtr pkt) 1326{ 1327 assert(pkt->isResponse()); 1328 1329 // all header delay should be paid for by the crossbar, unless 1330 // this is a prefetch response from above 1331 panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp, 1332 "%s saw a non-zero packet delay\n", name()); 1333 1334 bool is_error = pkt->isError(); 1335 1336 if (is_error) { 1337 DPRINTF(Cache, "%s: Cache received %s with error\n", __func__, 1338 pkt->print()); 1339 } 1340 1341 DPRINTF(Cache, "%s: Handling response %s\n", __func__, 1342 pkt->print()); 1343 1344 // if this is a write, we should be looking at an uncacheable 1345 // write 1346 if (pkt->isWrite()) { 1347 assert(pkt->req->isUncacheable()); 1348 handleUncacheableWriteResp(pkt); 1349 return; 1350 } 1351 1352 // we have dealt with any (uncacheable) writes above, from here on 1353 // we know we are dealing with an MSHR due to a miss or a prefetch 1354 MSHR *mshr = dynamic_cast<MSHR*>(pkt->popSenderState()); 1355 assert(mshr); 1356 1357 if (mshr == noTargetMSHR) { 1358 // we always clear at least one target 1359 clearBlocked(Blocked_NoTargets); 1360 noTargetMSHR = nullptr; 1361 } 1362 1363 // Initial target is used just for stats 1364 MSHR::Target *initial_tgt = mshr->getTarget(); 1365 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex(); 1366 Tick miss_latency = curTick() - initial_tgt->recvTime; 1367 1368 if (pkt->req->isUncacheable()) { 1369 assert(pkt->req->masterId() < system->maxMasters()); 1370 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] += 1371 miss_latency; 1372 } else { 1373 assert(pkt->req->masterId() < system->maxMasters()); 1374 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] += 1375 miss_latency; 1376 } 1377 1378 bool wasFull = mshrQueue.isFull(); 1379 1380 PacketList writebacks; 1381 1382 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 1383 1384 bool is_fill = !mshr->isForward && 1385 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp); 1386 1387 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 1388 const bool valid_blk = blk && blk->isValid(); 1389 // If the response indicates that there are no sharers and we 1390 // either had the block already or the response is filling we can 1391 // promote our copy to writable 1392 if (!pkt->hasSharers() && 1393 (is_fill || (valid_blk && !pkt->req->isCacheInvalidate()))) { 1394 mshr->promoteWritable(); 1395 } 1396 1397 if (is_fill && !is_error) { 1398 DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n", 1399 pkt->getAddr()); 1400 1401 blk = handleFill(pkt, blk, writebacks, mshr->allocOnFill()); 1402 assert(blk != nullptr); 1403 } 1404 1405 // allow invalidation responses originating from write-line 1406 // requests to be discarded 1407 bool is_invalidate = pkt->isInvalidate(); 1408 1409 // The block was marked as not readable while there was a pending 1410 // cache maintenance operation, restore its flag. 1411 if (pkt->isClean() && !is_invalidate && valid_blk) { 1412 blk->status |= BlkReadable; 1413 } 1414 1415 // First offset for critical word first calculations 1416 int initial_offset = initial_tgt->pkt->getOffset(blkSize); 1417 1418 bool from_cache = false; 1419 MSHR::TargetList targets = mshr->extractServiceableTargets(pkt); 1420 for (auto &target: targets) { 1421 Packet *tgt_pkt = target.pkt; 1422 switch (target.source) { 1423 case MSHR::Target::FromCPU: 1424 Tick completion_time; 1425 // Here we charge on completion_time the delay of the xbar if the 1426 // packet comes from it, charged on headerDelay. 1427 completion_time = pkt->headerDelay; 1428 1429 // Software prefetch handling for cache closest to core 1430 if (tgt_pkt->cmd.isSWPrefetch()) { 1431 // a software prefetch would have already been ack'd 1432 // immediately with dummy data so the core would be able to 1433 // retire it. This request completes right here, so we 1434 // deallocate it. 1435 delete tgt_pkt->req; 1436 delete tgt_pkt; 1437 break; // skip response 1438 } 1439 1440 // keep track of whether we have responded to another 1441 // cache 1442 from_cache = from_cache || tgt_pkt->fromCache(); 1443 1444 // unlike the other packet flows, where data is found in other 1445 // caches or memory and brought back, write-line requests always 1446 // have the data right away, so the above check for "is fill?" 1447 // cannot actually be determined until examining the stored MSHR 1448 // state. We "catch up" with that logic here, which is duplicated 1449 // from above. 1450 if (tgt_pkt->cmd == MemCmd::WriteLineReq) { 1451 assert(!is_error); 1452 // we got the block in a writable state, so promote 1453 // any deferred targets if possible 1454 mshr->promoteWritable(); 1455 // NB: we use the original packet here and not the response! 1456 blk = handleFill(tgt_pkt, blk, writebacks, 1457 targets.allocOnFill); 1458 assert(blk != nullptr); 1459 1460 // treat as a fill, and discard the invalidation 1461 // response 1462 is_fill = true; 1463 is_invalidate = false; 1464 } 1465 1466 if (is_fill) { 1467 satisfyRequest(tgt_pkt, blk, true, mshr->hasPostDowngrade()); 1468 1469 // How many bytes past the first request is this one 1470 int transfer_offset = 1471 tgt_pkt->getOffset(blkSize) - initial_offset; 1472 if (transfer_offset < 0) { 1473 transfer_offset += blkSize; 1474 } 1475 1476 // If not critical word (offset) return payloadDelay. 1477 // responseLatency is the latency of the return path 1478 // from lower level caches/memory to an upper level cache or 1479 // the core. 1480 completion_time += clockEdge(responseLatency) + 1481 (transfer_offset ? pkt->payloadDelay : 0); 1482 1483 assert(!tgt_pkt->req->isUncacheable()); 1484 1485 assert(tgt_pkt->req->masterId() < system->maxMasters()); 1486 missLatency[tgt_pkt->cmdToIndex()][tgt_pkt->req->masterId()] += 1487 completion_time - target.recvTime; 1488 } else if (pkt->cmd == MemCmd::UpgradeFailResp) { 1489 // failed StoreCond upgrade 1490 assert(tgt_pkt->cmd == MemCmd::StoreCondReq || 1491 tgt_pkt->cmd == MemCmd::StoreCondFailReq || 1492 tgt_pkt->cmd == MemCmd::SCUpgradeFailReq); 1493 // responseLatency is the latency of the return path 1494 // from lower level caches/memory to an upper level cache or 1495 // the core. 1496 completion_time += clockEdge(responseLatency) + 1497 pkt->payloadDelay; 1498 tgt_pkt->req->setExtraData(0); 1499 } else { 1500 // We are about to send a response to a cache above 1501 // that asked for an invalidation; we need to 1502 // invalidate our copy immediately as the most 1503 // up-to-date copy of the block will now be in the 1504 // cache above. It will also prevent this cache from 1505 // responding (if the block was previously dirty) to 1506 // snoops as they should snoop the caches above where 1507 // they will get the response from. 1508 if (is_invalidate && blk && blk->isValid()) { 1509 invalidateBlock(blk); 1510 } 1511 // not a cache fill, just forwarding response 1512 // responseLatency is the latency of the return path 1513 // from lower level cahces/memory to the core. 1514 completion_time += clockEdge(responseLatency) + 1515 pkt->payloadDelay; 1516 if (pkt->isRead() && !is_error) { 1517 // sanity check 1518 assert(pkt->getAddr() == tgt_pkt->getAddr()); 1519 assert(pkt->getSize() >= tgt_pkt->getSize()); 1520 1521 tgt_pkt->setData(pkt->getConstPtr<uint8_t>()); 1522 } 1523 } 1524 tgt_pkt->makeTimingResponse(); 1525 // if this packet is an error copy that to the new packet 1526 if (is_error) 1527 tgt_pkt->copyError(pkt); 1528 if (tgt_pkt->cmd == MemCmd::ReadResp && 1529 (is_invalidate || mshr->hasPostInvalidate())) { 1530 // If intermediate cache got ReadRespWithInvalidate, 1531 // propagate that. Response should not have 1532 // isInvalidate() set otherwise. 1533 tgt_pkt->cmd = MemCmd::ReadRespWithInvalidate; 1534 DPRINTF(Cache, "%s: updated cmd to %s\n", __func__, 1535 tgt_pkt->print()); 1536 } 1537 // Reset the bus additional time as it is now accounted for 1538 tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0; 1539 cpuSidePort->schedTimingResp(tgt_pkt, completion_time, true); 1540 break; 1541 1542 case MSHR::Target::FromPrefetcher: 1543 assert(tgt_pkt->cmd == MemCmd::HardPFReq); 1544 if (blk) 1545 blk->status |= BlkHWPrefetched; 1546 delete tgt_pkt->req; 1547 delete tgt_pkt; 1548 break; 1549 1550 case MSHR::Target::FromSnoop: 1551 // I don't believe that a snoop can be in an error state 1552 assert(!is_error); 1553 // response to snoop request 1554 DPRINTF(Cache, "processing deferred snoop...\n"); 1555 // If the response is invalidating, a snooping target can 1556 // be satisfied if it is also invalidating. If the reponse is, not 1557 // only invalidating, but more specifically an InvalidateResp and 1558 // the MSHR was created due to an InvalidateReq then a cache above 1559 // is waiting to satisfy a WriteLineReq. In this case even an 1560 // non-invalidating snoop is added as a target here since this is 1561 // the ordering point. When the InvalidateResp reaches this cache, 1562 // the snooping target will snoop further the cache above with the 1563 // WriteLineReq. 1564 assert(!is_invalidate || pkt->cmd == MemCmd::InvalidateResp || 1565 pkt->req->isCacheMaintenance() || 1566 mshr->hasPostInvalidate()); 1567 handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate()); 1568 break; 1569 1570 default: 1571 panic("Illegal target->source enum %d\n", target.source); 1572 } 1573 } 1574 1575 maintainClusivity(from_cache, blk); 1576 1577 if (blk && blk->isValid()) { 1578 // an invalidate response stemming from a write line request 1579 // should not invalidate the block, so check if the 1580 // invalidation should be discarded 1581 if (is_invalidate || mshr->hasPostInvalidate()) { 1582 invalidateBlock(blk); 1583 } else if (mshr->hasPostDowngrade()) { 1584 blk->status &= ~BlkWritable; 1585 } 1586 } 1587 1588 if (mshr->promoteDeferredTargets()) { 1589 // avoid later read getting stale data while write miss is 1590 // outstanding.. see comment in timingAccess() 1591 if (blk) { 1592 blk->status &= ~BlkReadable; 1593 } 1594 mshrQueue.markPending(mshr); 1595 schedMemSideSendEvent(clockEdge() + pkt->payloadDelay); 1596 } else { 1597 mshrQueue.deallocate(mshr); 1598 if (wasFull && !mshrQueue.isFull()) { 1599 clearBlocked(Blocked_NoMSHRs); 1600 } 1601 1602 // Request the bus for a prefetch if this deallocation freed enough 1603 // MSHRs for a prefetch to take place 1604 if (prefetcher && mshrQueue.canPrefetch()) { 1605 Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(), 1606 clockEdge()); 1607 if (next_pf_time != MaxTick) 1608 schedMemSideSendEvent(next_pf_time); 1609 } 1610 } 1611 // reset the xbar additional timinig as it is now accounted for 1612 pkt->headerDelay = pkt->payloadDelay = 0; 1613 1614 // copy writebacks to write buffer 1615 doWritebacks(writebacks, forward_time); 1616 1617 // if we used temp block, check to see if its valid and then clear it out 1618 if (blk == tempBlock && tempBlock->isValid()) { 1619 // We use forwardLatency here because we are copying 1620 // Writebacks/CleanEvicts to write buffer. It specifies the latency to 1621 // allocate an internal buffer and to schedule an event to the 1622 // queued port. 1623 if (blk->isDirty() || writebackClean) { 1624 PacketPtr wbPkt = writebackBlk(blk); 1625 allocateWriteBuffer(wbPkt, forward_time); 1626 // Set BLOCK_CACHED flag if cached above. 1627 if (isCachedAbove(wbPkt)) 1628 wbPkt->setBlockCached(); 1629 } else { 1630 PacketPtr wcPkt = cleanEvictBlk(blk); 1631 // Check to see if block is cached above. If not allocate 1632 // write buffer 1633 if (isCachedAbove(wcPkt)) 1634 delete wcPkt; 1635 else 1636 allocateWriteBuffer(wcPkt, forward_time); 1637 } 1638 invalidateBlock(blk); 1639 } 1640 1641 DPRINTF(CacheVerbose, "%s: Leaving with %s\n", __func__, pkt->print()); 1642 delete pkt; 1643} 1644 1645PacketPtr 1646Cache::writebackBlk(CacheBlk *blk) 1647{ 1648 chatty_assert(!isReadOnly || writebackClean, 1649 "Writeback from read-only cache"); 1650 assert(blk && blk->isValid() && (blk->isDirty() || writebackClean)); 1651 1652 writebacks[Request::wbMasterId]++; 1653 1654 Request *req = new Request(tags->regenerateBlkAddr(blk), blkSize, 0, 1655 Request::wbMasterId); 1656 if (blk->isSecure()) 1657 req->setFlags(Request::SECURE); 1658 1659 req->taskId(blk->task_id); 1660 1661 PacketPtr pkt = 1662 new Packet(req, blk->isDirty() ? 1663 MemCmd::WritebackDirty : MemCmd::WritebackClean); 1664 1665 DPRINTF(Cache, "Create Writeback %s writable: %d, dirty: %d\n", 1666 pkt->print(), blk->isWritable(), blk->isDirty()); 1667 1668 if (blk->isWritable()) { 1669 // not asserting shared means we pass the block in modified 1670 // state, mark our own block non-writeable 1671 blk->status &= ~BlkWritable; 1672 } else { 1673 // we are in the Owned state, tell the receiver 1674 pkt->setHasSharers(); 1675 } 1676 1677 // make sure the block is not marked dirty 1678 blk->status &= ~BlkDirty; 1679 1680 pkt->allocate(); 1681 pkt->setDataFromBlock(blk->data, blkSize); 1682 1683 return pkt; 1684} 1685 1686PacketPtr 1687Cache::writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id) 1688{ 1689 Request *req = new Request(tags->regenerateBlkAddr(blk), blkSize, 0, 1690 Request::wbMasterId); 1691 if (blk->isSecure()) { 1692 req->setFlags(Request::SECURE); 1693 } 1694 req->taskId(blk->task_id); 1695 1696 PacketPtr pkt = new Packet(req, MemCmd::WriteClean, blkSize, id); 1697 1698 if (dest) { 1699 req->setFlags(dest); 1700 pkt->setWriteThrough(); 1701 } 1702 1703 DPRINTF(Cache, "Create %s writable: %d, dirty: %d\n", pkt->print(), 1704 blk->isWritable(), blk->isDirty()); 1705 1706 if (blk->isWritable()) { 1707 // not asserting shared means we pass the block in modified 1708 // state, mark our own block non-writeable 1709 blk->status &= ~BlkWritable; 1710 } else { 1711 // we are in the Owned state, tell the receiver 1712 pkt->setHasSharers(); 1713 } 1714 1715 // make sure the block is not marked dirty 1716 blk->status &= ~BlkDirty; 1717 1718 pkt->allocate(); 1719 pkt->setDataFromBlock(blk->data, blkSize); 1720 1721 return pkt; 1722} 1723 1724 1725PacketPtr 1726Cache::cleanEvictBlk(CacheBlk *blk) 1727{ 1728 assert(!writebackClean); 1729 assert(blk && blk->isValid() && !blk->isDirty()); 1730 // Creating a zero sized write, a message to the snoop filter 1731 Request *req = 1732 new Request(tags->regenerateBlkAddr(blk), blkSize, 0, 1733 Request::wbMasterId); 1734 if (blk->isSecure()) 1735 req->setFlags(Request::SECURE); 1736 1737 req->taskId(blk->task_id); 1738 1739 PacketPtr pkt = new Packet(req, MemCmd::CleanEvict); 1740 pkt->allocate(); 1741 DPRINTF(Cache, "Create CleanEvict %s\n", pkt->print()); 1742 1743 return pkt; 1744} 1745 1746void 1747Cache::memWriteback() 1748{ 1749 CacheBlkVisitorWrapper visitor(*this, &Cache::writebackVisitor); 1750 tags->forEachBlk(visitor); 1751} 1752 1753void 1754Cache::memInvalidate() 1755{ 1756 CacheBlkVisitorWrapper visitor(*this, &Cache::invalidateVisitor); 1757 tags->forEachBlk(visitor); 1758} 1759 1760bool 1761Cache::isDirty() const 1762{ 1763 CacheBlkIsDirtyVisitor visitor; 1764 tags->forEachBlk(visitor); 1765 1766 return visitor.isDirty(); 1767} 1768 1769bool 1770Cache::writebackVisitor(CacheBlk &blk) 1771{ 1772 if (blk.isDirty()) { 1773 assert(blk.isValid()); 1774 1775 Request request(tags->regenerateBlkAddr(&blk), blkSize, 0, 1776 Request::funcMasterId); 1777 request.taskId(blk.task_id); 1778 if (blk.isSecure()) { 1779 request.setFlags(Request::SECURE); 1780 } 1781 1782 Packet packet(&request, MemCmd::WriteReq); 1783 packet.dataStatic(blk.data); 1784 1785 memSidePort->sendFunctional(&packet); 1786 1787 blk.status &= ~BlkDirty; 1788 } 1789 1790 return true; 1791} 1792 1793bool 1794Cache::invalidateVisitor(CacheBlk &blk) 1795{ 1796 1797 if (blk.isDirty()) 1798 warn_once("Invalidating dirty cache lines. Expect things to break.\n"); 1799 1800 if (blk.isValid()) { 1801 assert(!blk.isDirty()); 1802 invalidateBlock(&blk); 1803 } 1804 1805 return true; 1806} 1807 1808CacheBlk* 1809Cache::allocateBlock(Addr addr, bool is_secure, PacketList &writebacks) 1810{ 1811 // Find replacement victim 1812 CacheBlk *blk = tags->findVictim(addr); 1813 1814 // It is valid to return nullptr if there is no victim 1815 if (!blk) 1816 return nullptr; 1817 1818 if (blk->isValid()) { 1819 Addr repl_addr = tags->regenerateBlkAddr(blk); 1820 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure()); 1821 if (repl_mshr) { 1822 // must be an outstanding upgrade or clean request 1823 // on a block we're about to replace... 1824 assert((!blk->isWritable() && repl_mshr->needsWritable()) || 1825 repl_mshr->isCleaning()); 1826 // too hard to replace block with transient state 1827 // allocation failed, block not inserted 1828 return nullptr; 1829 } else { 1830 DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx " 1831 "(%s): %s\n", repl_addr, blk->isSecure() ? "s" : "ns", 1832 addr, is_secure ? "s" : "ns", 1833 blk->isDirty() ? "writeback" : "clean"); 1834 1835 if (blk->wasPrefetched()) { 1836 unusedPrefetches++; 1837 } 1838 // Will send up Writeback/CleanEvict snoops via isCachedAbove 1839 // when pushing this writeback list into the write buffer. 1840 if (blk->isDirty() || writebackClean) { 1841 // Save writeback packet for handling by caller 1842 writebacks.push_back(writebackBlk(blk)); 1843 } else { 1844 writebacks.push_back(cleanEvictBlk(blk)); 1845 } 1846 } 1847 } 1848 1849 return blk; 1850} 1851 1852void 1853Cache::invalidateBlock(CacheBlk *blk) 1854{ 1855 if (blk != tempBlock) 1856 tags->invalidate(blk); 1857 blk->invalidate(); 1858} 1859 1860// Note that the reason we return a list of writebacks rather than 1861// inserting them directly in the write buffer is that this function 1862// is called by both atomic and timing-mode accesses, and in atomic 1863// mode we don't mess with the write buffer (we just perform the 1864// writebacks atomically once the original request is complete). 1865CacheBlk* 1866Cache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks, 1867 bool allocate) 1868{ 1869 assert(pkt->isResponse() || pkt->cmd == MemCmd::WriteLineReq); 1870 Addr addr = pkt->getAddr(); 1871 bool is_secure = pkt->isSecure(); 1872#if TRACING_ON 1873 CacheBlk::State old_state = blk ? blk->status : 0; 1874#endif 1875 1876 // When handling a fill, we should have no writes to this line. 1877 assert(addr == pkt->getBlockAddr(blkSize)); 1878 assert(!writeBuffer.findMatch(addr, is_secure)); 1879 1880 if (blk == nullptr) { 1881 // better have read new data... 1882 assert(pkt->hasData()); 1883 1884 // only read responses and write-line requests have data; 1885 // note that we don't write the data here for write-line - that 1886 // happens in the subsequent call to satisfyRequest 1887 assert(pkt->isRead() || pkt->cmd == MemCmd::WriteLineReq); 1888 1889 // need to do a replacement if allocating, otherwise we stick 1890 // with the temporary storage 1891 blk = allocate ? allocateBlock(addr, is_secure, writebacks) : nullptr; 1892 1893 if (blk == nullptr) { 1894 // No replaceable block or a mostly exclusive 1895 // cache... just use temporary storage to complete the 1896 // current request and then get rid of it 1897 assert(!tempBlock->isValid()); 1898 blk = tempBlock; 1899 tempBlock->set = tags->extractSet(addr); 1900 tempBlock->tag = tags->extractTag(addr); 1901 if (is_secure) { 1902 tempBlock->status |= BlkSecure; 1903 } 1904 DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr, 1905 is_secure ? "s" : "ns"); 1906 } else { 1907 tags->insertBlock(pkt, blk); 1908 } 1909 1910 // we should never be overwriting a valid block 1911 assert(!blk->isValid()); 1912 } else { 1913 // existing block... probably an upgrade 1914 assert(blk->tag == tags->extractTag(addr)); 1915 // either we're getting new data or the block should already be valid 1916 assert(pkt->hasData() || blk->isValid()); 1917 // don't clear block status... if block is already dirty we 1918 // don't want to lose that 1919 } 1920 1921 if (is_secure) 1922 blk->status |= BlkSecure; 1923 blk->status |= BlkValid | BlkReadable; 1924 1925 // sanity check for whole-line writes, which should always be 1926 // marked as writable as part of the fill, and then later marked 1927 // dirty as part of satisfyRequest 1928 if (pkt->cmd == MemCmd::WriteLineReq) { 1929 assert(!pkt->hasSharers()); 1930 } 1931 1932 // here we deal with setting the appropriate state of the line, 1933 // and we start by looking at the hasSharers flag, and ignore the 1934 // cacheResponding flag (normally signalling dirty data) if the 1935 // packet has sharers, thus the line is never allocated as Owned 1936 // (dirty but not writable), and always ends up being either 1937 // Shared, Exclusive or Modified, see Packet::setCacheResponding 1938 // for more details 1939 if (!pkt->hasSharers()) { 1940 // we could get a writable line from memory (rather than a 1941 // cache) even in a read-only cache, note that we set this bit 1942 // even for a read-only cache, possibly revisit this decision 1943 blk->status |= BlkWritable; 1944 1945 // check if we got this via cache-to-cache transfer (i.e., from a 1946 // cache that had the block in Modified or Owned state) 1947 if (pkt->cacheResponding()) { 1948 // we got the block in Modified state, and invalidated the 1949 // owners copy 1950 blk->status |= BlkDirty; 1951 1952 chatty_assert(!isReadOnly, "Should never see dirty snoop response " 1953 "in read-only cache %s\n", name()); 1954 } 1955 } 1956 1957 DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n", 1958 addr, is_secure ? "s" : "ns", old_state, blk->print()); 1959 1960 // if we got new data, copy it in (checking for a read response 1961 // and a response that has data is the same in the end) 1962 if (pkt->isRead()) { 1963 // sanity checks 1964 assert(pkt->hasData()); 1965 assert(pkt->getSize() == blkSize); 1966 1967 pkt->writeDataToBlock(blk->data, blkSize); 1968 } 1969 // We pay for fillLatency here. 1970 blk->whenReady = clockEdge() + fillLatency * clockPeriod() + 1971 pkt->payloadDelay; 1972 1973 return blk; 1974} 1975 1976 1977///////////////////////////////////////////////////// 1978// 1979// Snoop path: requests coming in from the memory side 1980// 1981///////////////////////////////////////////////////// 1982 1983void 1984Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data, 1985 bool already_copied, bool pending_inval) 1986{ 1987 // sanity check 1988 assert(req_pkt->isRequest()); 1989 assert(req_pkt->needsResponse()); 1990 1991 DPRINTF(Cache, "%s: for %s\n", __func__, req_pkt->print()); 1992 // timing-mode snoop responses require a new packet, unless we 1993 // already made a copy... 1994 PacketPtr pkt = req_pkt; 1995 if (!already_copied) 1996 // do not clear flags, and allocate space for data if the 1997 // packet needs it (the only packets that carry data are read 1998 // responses) 1999 pkt = new Packet(req_pkt, false, req_pkt->isRead()); 2000 2001 assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() || 2002 pkt->hasSharers()); 2003 pkt->makeTimingResponse(); 2004 if (pkt->isRead()) { 2005 pkt->setDataFromBlock(blk_data, blkSize); 2006 } 2007 if (pkt->cmd == MemCmd::ReadResp && pending_inval) { 2008 // Assume we defer a response to a read from a far-away cache 2009 // A, then later defer a ReadExcl from a cache B on the same 2010 // bus as us. We'll assert cacheResponding in both cases, but 2011 // in the latter case cacheResponding will keep the 2012 // invalidation from reaching cache A. This special response 2013 // tells cache A that it gets the block to satisfy its read, 2014 // but must immediately invalidate it. 2015 pkt->cmd = MemCmd::ReadRespWithInvalidate; 2016 } 2017 // Here we consider forward_time, paying for just forward latency and 2018 // also charging the delay provided by the xbar. 2019 // forward_time is used as send_time in next allocateWriteBuffer(). 2020 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 2021 // Here we reset the timing of the packet. 2022 pkt->headerDelay = pkt->payloadDelay = 0; 2023 DPRINTF(CacheVerbose, "%s: created response: %s tick: %lu\n", __func__, 2024 pkt->print(), forward_time); 2025 memSidePort->schedTimingSnoopResp(pkt, forward_time, true); 2026} 2027 2028uint32_t 2029Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing, 2030 bool is_deferred, bool pending_inval) 2031{ 2032 DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print()); 2033 // deferred snoops can only happen in timing mode 2034 assert(!(is_deferred && !is_timing)); 2035 // pending_inval only makes sense on deferred snoops 2036 assert(!(pending_inval && !is_deferred)); 2037 assert(pkt->isRequest()); 2038 2039 // the packet may get modified if we or a forwarded snooper 2040 // responds in atomic mode, so remember a few things about the 2041 // original packet up front 2042 bool invalidate = pkt->isInvalidate(); 2043 bool M5_VAR_USED needs_writable = pkt->needsWritable(); 2044 2045 // at the moment we could get an uncacheable write which does not 2046 // have the invalidate flag, and we need a suitable way of dealing 2047 // with this case 2048 panic_if(invalidate && pkt->req->isUncacheable(), 2049 "%s got an invalidating uncacheable snoop request %s", 2050 name(), pkt->print()); 2051 2052 uint32_t snoop_delay = 0; 2053 2054 if (forwardSnoops) { 2055 // first propagate snoop upward to see if anyone above us wants to 2056 // handle it. save & restore packet src since it will get 2057 // rewritten to be relative to cpu-side bus (if any) 2058 bool alreadyResponded = pkt->cacheResponding(); 2059 if (is_timing) { 2060 // copy the packet so that we can clear any flags before 2061 // forwarding it upwards, we also allocate data (passing 2062 // the pointer along in case of static data), in case 2063 // there is a snoop hit in upper levels 2064 Packet snoopPkt(pkt, true, true); 2065 snoopPkt.setExpressSnoop(); 2066 // the snoop packet does not need to wait any additional 2067 // time 2068 snoopPkt.headerDelay = snoopPkt.payloadDelay = 0; 2069 cpuSidePort->sendTimingSnoopReq(&snoopPkt); 2070 2071 // add the header delay (including crossbar and snoop 2072 // delays) of the upward snoop to the snoop delay for this 2073 // cache 2074 snoop_delay += snoopPkt.headerDelay; 2075 2076 if (snoopPkt.cacheResponding()) { 2077 // cache-to-cache response from some upper cache 2078 assert(!alreadyResponded); 2079 pkt->setCacheResponding(); 2080 } 2081 // upstream cache has the block, or has an outstanding 2082 // MSHR, pass the flag on 2083 if (snoopPkt.hasSharers()) { 2084 pkt->setHasSharers(); 2085 } 2086 // If this request is a prefetch or clean evict and an upper level 2087 // signals block present, make sure to propagate the block 2088 // presence to the requester. 2089 if (snoopPkt.isBlockCached()) { 2090 pkt->setBlockCached(); 2091 } 2092 // If the request was satisfied by snooping the cache 2093 // above, mark the original packet as satisfied too. 2094 if (snoopPkt.satisfied()) { 2095 pkt->setSatisfied(); 2096 } 2097 } else { 2098 cpuSidePort->sendAtomicSnoop(pkt); 2099 if (!alreadyResponded && pkt->cacheResponding()) { 2100 // cache-to-cache response from some upper cache: 2101 // forward response to original requester 2102 assert(pkt->isResponse()); 2103 } 2104 } 2105 } 2106 2107 bool respond = false; 2108 bool blk_valid = blk && blk->isValid(); 2109 if (pkt->isClean()) { 2110 if (blk_valid && blk->isDirty()) { 2111 DPRINTF(CacheVerbose, "%s: packet (snoop) %s found block: %s\n", 2112 __func__, pkt->print(), blk->print()); 2113 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id); 2114 PacketList writebacks; 2115 writebacks.push_back(wb_pkt); 2116 2117 if (is_timing) { 2118 // anything that is merely forwarded pays for the forward 2119 // latency and the delay provided by the crossbar 2120 Tick forward_time = clockEdge(forwardLatency) + 2121 pkt->headerDelay; 2122 doWritebacks(writebacks, forward_time); 2123 } else { 2124 doWritebacksAtomic(writebacks); 2125 } 2126 pkt->setSatisfied(); 2127 } 2128 } else if (!blk_valid) { 2129 DPRINTF(CacheVerbose, "%s: snoop miss for %s\n", __func__, 2130 pkt->print()); 2131 if (is_deferred) { 2132 // we no longer have the block, and will not respond, but a 2133 // packet was allocated in MSHR::handleSnoop and we have 2134 // to delete it 2135 assert(pkt->needsResponse()); 2136 2137 // we have passed the block to a cache upstream, that 2138 // cache should be responding 2139 assert(pkt->cacheResponding()); 2140 2141 delete pkt; 2142 } 2143 return snoop_delay; 2144 } else { 2145 DPRINTF(Cache, "%s: snoop hit for %s, old state is %s\n", __func__, 2146 pkt->print(), blk->print()); 2147 2148 // We may end up modifying both the block state and the packet (if 2149 // we respond in atomic mode), so just figure out what to do now 2150 // and then do it later. We respond to all snoops that need 2151 // responses provided we have the block in dirty state. The 2152 // invalidation itself is taken care of below. We don't respond to 2153 // cache maintenance operations as this is done by the destination 2154 // xbar. 2155 respond = blk->isDirty() && pkt->needsResponse(); 2156 2157 chatty_assert(!(isReadOnly && blk->isDirty()), "Should never have " 2158 "a dirty block in a read-only cache %s\n", name()); 2159 } 2160 2161 // Invalidate any prefetch's from below that would strip write permissions 2162 // MemCmd::HardPFReq is only observed by upstream caches. After missing 2163 // above and in it's own cache, a new MemCmd::ReadReq is created that 2164 // downstream caches observe. 2165 if (pkt->mustCheckAbove()) { 2166 DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s " 2167 "from lower cache\n", pkt->getAddr(), pkt->print()); 2168 pkt->setBlockCached(); 2169 return snoop_delay; 2170 } 2171 2172 if (pkt->isRead() && !invalidate) { 2173 // reading without requiring the line in a writable state 2174 assert(!needs_writable); 2175 pkt->setHasSharers(); 2176 2177 // if the requesting packet is uncacheable, retain the line in 2178 // the current state, otherwhise unset the writable flag, 2179 // which means we go from Modified to Owned (and will respond 2180 // below), remain in Owned (and will respond below), from 2181 // Exclusive to Shared, or remain in Shared 2182 if (!pkt->req->isUncacheable()) 2183 blk->status &= ~BlkWritable; 2184 DPRINTF(Cache, "new state is %s\n", blk->print()); 2185 } 2186 2187 if (respond) { 2188 // prevent anyone else from responding, cache as well as 2189 // memory, and also prevent any memory from even seeing the 2190 // request 2191 pkt->setCacheResponding(); 2192 if (!pkt->isClean() && blk->isWritable()) { 2193 // inform the cache hierarchy that this cache had the line 2194 // in the Modified state so that we avoid unnecessary 2195 // invalidations (see Packet::setResponderHadWritable) 2196 pkt->setResponderHadWritable(); 2197 2198 // in the case of an uncacheable request there is no point 2199 // in setting the responderHadWritable flag, but since the 2200 // recipient does not care there is no harm in doing so 2201 } else { 2202 // if the packet has needsWritable set we invalidate our 2203 // copy below and all other copies will be invalidates 2204 // through express snoops, and if needsWritable is not set 2205 // we already called setHasSharers above 2206 } 2207 2208 // if we are returning a writable and dirty (Modified) line, 2209 // we should be invalidating the line 2210 panic_if(!invalidate && !pkt->hasSharers(), 2211 "%s is passing a Modified line through %s, " 2212 "but keeping the block", name(), pkt->print()); 2213 2214 if (is_timing) { 2215 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval); 2216 } else { 2217 pkt->makeAtomicResponse(); 2218 // packets such as upgrades do not actually have any data 2219 // payload 2220 if (pkt->hasData()) 2221 pkt->setDataFromBlock(blk->data, blkSize); 2222 } 2223 } 2224 2225 if (!respond && is_deferred) { 2226 assert(pkt->needsResponse()); 2227 2228 // if we copied the deferred packet with the intention to 2229 // respond, but are not responding, then a cache above us must 2230 // be, and we can use this as the indication of whether this 2231 // is a packet where we created a copy of the request or not 2232 if (!pkt->cacheResponding()) { 2233 delete pkt->req; 2234 } 2235 2236 delete pkt; 2237 } 2238 2239 // Do this last in case it deallocates block data or something 2240 // like that 2241 if (blk_valid && invalidate) { 2242 invalidateBlock(blk); 2243 DPRINTF(Cache, "new state is %s\n", blk->print()); 2244 } 2245 2246 return snoop_delay; 2247} 2248 2249 2250void 2251Cache::recvTimingSnoopReq(PacketPtr pkt) 2252{ 2253 DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print()); 2254 2255 // Snoops shouldn't happen when bypassing caches 2256 assert(!system->bypassCaches()); 2257 2258 // no need to snoop requests that are not in range 2259 if (!inRange(pkt->getAddr())) { 2260 return; 2261 } 2262 2263 bool is_secure = pkt->isSecure(); 2264 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 2265 2266 Addr blk_addr = pkt->getBlockAddr(blkSize); 2267 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 2268 2269 // Update the latency cost of the snoop so that the crossbar can 2270 // account for it. Do not overwrite what other neighbouring caches 2271 // have already done, rather take the maximum. The update is 2272 // tentative, for cases where we return before an upward snoop 2273 // happens below. 2274 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, 2275 lookupLatency * clockPeriod()); 2276 2277 // Inform request(Prefetch, CleanEvict or Writeback) from below of 2278 // MSHR hit, set setBlockCached. 2279 if (mshr && pkt->mustCheckAbove()) { 2280 DPRINTF(Cache, "Setting block cached for %s from lower cache on " 2281 "mshr hit\n", pkt->print()); 2282 pkt->setBlockCached(); 2283 return; 2284 } 2285 2286 // Bypass any existing cache maintenance requests if the request 2287 // has been satisfied already (i.e., the dirty block has been 2288 // found). 2289 if (mshr && pkt->req->isCacheMaintenance() && pkt->satisfied()) { 2290 return; 2291 } 2292 2293 // Let the MSHR itself track the snoop and decide whether we want 2294 // to go ahead and do the regular cache snoop 2295 if (mshr && mshr->handleSnoop(pkt, order++)) { 2296 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)." 2297 "mshrs: %s\n", blk_addr, is_secure ? "s" : "ns", 2298 mshr->print()); 2299 2300 if (mshr->getNumTargets() > numTarget) 2301 warn("allocating bonus target for snoop"); //handle later 2302 return; 2303 } 2304 2305 //We also need to check the writeback buffers and handle those 2306 WriteQueueEntry *wb_entry = writeBuffer.findMatch(blk_addr, is_secure); 2307 if (wb_entry) { 2308 DPRINTF(Cache, "Snoop hit in writeback to addr %#llx (%s)\n", 2309 pkt->getAddr(), is_secure ? "s" : "ns"); 2310 // Expect to see only Writebacks and/or CleanEvicts here, both of 2311 // which should not be generated for uncacheable data. 2312 assert(!wb_entry->isUncacheable()); 2313 // There should only be a single request responsible for generating 2314 // Writebacks/CleanEvicts. 2315 assert(wb_entry->getNumTargets() == 1); 2316 PacketPtr wb_pkt = wb_entry->getTarget()->pkt; 2317 assert(wb_pkt->isEviction() || wb_pkt->cmd == MemCmd::WriteClean); 2318 2319 if (pkt->isEviction()) { 2320 // if the block is found in the write queue, set the BLOCK_CACHED 2321 // flag for Writeback/CleanEvict snoop. On return the snoop will 2322 // propagate the BLOCK_CACHED flag in Writeback packets and prevent 2323 // any CleanEvicts from travelling down the memory hierarchy. 2324 pkt->setBlockCached(); 2325 DPRINTF(Cache, "%s: Squashing %s from lower cache on writequeue " 2326 "hit\n", __func__, pkt->print()); 2327 return; 2328 } 2329 2330 // conceptually writebacks are no different to other blocks in 2331 // this cache, so the behaviour is modelled after handleSnoop, 2332 // the difference being that instead of querying the block 2333 // state to determine if it is dirty and writable, we use the 2334 // command and fields of the writeback packet 2335 bool respond = wb_pkt->cmd == MemCmd::WritebackDirty && 2336 pkt->needsResponse(); 2337 bool have_writable = !wb_pkt->hasSharers(); 2338 bool invalidate = pkt->isInvalidate(); 2339 2340 if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) { 2341 assert(!pkt->needsWritable()); 2342 pkt->setHasSharers(); 2343 wb_pkt->setHasSharers(); 2344 } 2345 2346 if (respond) { 2347 pkt->setCacheResponding(); 2348 2349 if (have_writable) { 2350 pkt->setResponderHadWritable(); 2351 } 2352 2353 doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(), 2354 false, false); 2355 } 2356 2357 if (invalidate && wb_pkt->cmd != MemCmd::WriteClean) { 2358 // Invalidation trumps our writeback... discard here 2359 // Note: markInService will remove entry from writeback buffer. 2360 markInService(wb_entry); 2361 delete wb_pkt; 2362 } 2363 } 2364 2365 // If this was a shared writeback, there may still be 2366 // other shared copies above that require invalidation. 2367 // We could be more selective and return here if the 2368 // request is non-exclusive or if the writeback is 2369 // exclusive. 2370 uint32_t snoop_delay = handleSnoop(pkt, blk, true, false, false); 2371 2372 // Override what we did when we first saw the snoop, as we now 2373 // also have the cost of the upwards snoops to account for 2374 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, snoop_delay + 2375 lookupLatency * clockPeriod()); 2376} 2377 2378bool 2379Cache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt) 2380{ 2381 // Express snoop responses from master to slave, e.g., from L1 to L2 2382 cache->recvTimingSnoopResp(pkt); 2383 return true; 2384} 2385 2386Tick 2387Cache::recvAtomicSnoop(PacketPtr pkt) 2388{ 2389 // Snoops shouldn't happen when bypassing caches 2390 assert(!system->bypassCaches()); 2391 2392 // no need to snoop requests that are not in range. 2393 if (!inRange(pkt->getAddr())) { 2394 return 0; 2395 } 2396 2397 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 2398 uint32_t snoop_delay = handleSnoop(pkt, blk, false, false, false); 2399 return snoop_delay + lookupLatency * clockPeriod(); 2400} 2401 2402 2403QueueEntry* 2404Cache::getNextQueueEntry() 2405{ 2406 // Check both MSHR queue and write buffer for potential requests, 2407 // note that null does not mean there is no request, it could 2408 // simply be that it is not ready 2409 MSHR *miss_mshr = mshrQueue.getNext(); 2410 WriteQueueEntry *wq_entry = writeBuffer.getNext(); 2411 2412 // If we got a write buffer request ready, first priority is a 2413 // full write buffer, otherwise we favour the miss requests 2414 if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) { 2415 // need to search MSHR queue for conflicting earlier miss. 2416 MSHR *conflict_mshr = 2417 mshrQueue.findPending(wq_entry->blkAddr, 2418 wq_entry->isSecure); 2419 2420 if (conflict_mshr && conflict_mshr->order < wq_entry->order) { 2421 // Service misses in order until conflict is cleared. 2422 return conflict_mshr; 2423 2424 // @todo Note that we ignore the ready time of the conflict here 2425 } 2426 2427 // No conflicts; issue write 2428 return wq_entry; 2429 } else if (miss_mshr) { 2430 // need to check for conflicting earlier writeback 2431 WriteQueueEntry *conflict_mshr = 2432 writeBuffer.findPending(miss_mshr->blkAddr, 2433 miss_mshr->isSecure); 2434 if (conflict_mshr) { 2435 // not sure why we don't check order here... it was in the 2436 // original code but commented out. 2437 2438 // The only way this happens is if we are 2439 // doing a write and we didn't have permissions 2440 // then subsequently saw a writeback (owned got evicted) 2441 // We need to make sure to perform the writeback first 2442 // To preserve the dirty data, then we can issue the write 2443 2444 // should we return wq_entry here instead? I.e. do we 2445 // have to flush writes in order? I don't think so... not 2446 // for Alpha anyway. Maybe for x86? 2447 return conflict_mshr; 2448 2449 // @todo Note that we ignore the ready time of the conflict here 2450 } 2451 2452 // No conflicts; issue read 2453 return miss_mshr; 2454 } 2455 2456 // fall through... no pending requests. Try a prefetch. 2457 assert(!miss_mshr && !wq_entry); 2458 if (prefetcher && mshrQueue.canPrefetch()) { 2459 // If we have a miss queue slot, we can try a prefetch 2460 PacketPtr pkt = prefetcher->getPacket(); 2461 if (pkt) { 2462 Addr pf_addr = pkt->getBlockAddr(blkSize); 2463 if (!tags->findBlock(pf_addr, pkt->isSecure()) && 2464 !mshrQueue.findMatch(pf_addr, pkt->isSecure()) && 2465 !writeBuffer.findMatch(pf_addr, pkt->isSecure())) { 2466 // Update statistic on number of prefetches issued 2467 // (hwpf_mshr_misses) 2468 assert(pkt->req->masterId() < system->maxMasters()); 2469 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 2470 2471 // allocate an MSHR and return it, note 2472 // that we send the packet straight away, so do not 2473 // schedule the send 2474 return allocateMissBuffer(pkt, curTick(), false); 2475 } else { 2476 // free the request and packet 2477 delete pkt->req; 2478 delete pkt; 2479 } 2480 } 2481 } 2482 2483 return nullptr; 2484} 2485 2486bool 2487Cache::isCachedAbove(PacketPtr pkt, bool is_timing) const 2488{ 2489 if (!forwardSnoops) 2490 return false; 2491 // Mirroring the flow of HardPFReqs, the cache sends CleanEvict and 2492 // Writeback snoops into upper level caches to check for copies of the 2493 // same block. Using the BLOCK_CACHED flag with the Writeback/CleanEvict 2494 // packet, the cache can inform the crossbar below of presence or absence 2495 // of the block. 2496 if (is_timing) { 2497 Packet snoop_pkt(pkt, true, false); 2498 snoop_pkt.setExpressSnoop(); 2499 // Assert that packet is either Writeback or CleanEvict and not a 2500 // prefetch request because prefetch requests need an MSHR and may 2501 // generate a snoop response. 2502 assert(pkt->isEviction() || pkt->cmd == MemCmd::WriteClean); 2503 snoop_pkt.senderState = nullptr; 2504 cpuSidePort->sendTimingSnoopReq(&snoop_pkt); 2505 // Writeback/CleanEvict snoops do not generate a snoop response. 2506 assert(!(snoop_pkt.cacheResponding())); 2507 return snoop_pkt.isBlockCached(); 2508 } else { 2509 cpuSidePort->sendAtomicSnoop(pkt); 2510 return pkt->isBlockCached(); 2511 } 2512} 2513 2514Tick 2515Cache::nextQueueReadyTime() const 2516{ 2517 Tick nextReady = std::min(mshrQueue.nextReadyTime(), 2518 writeBuffer.nextReadyTime()); 2519 2520 // Don't signal prefetch ready time if no MSHRs available 2521 // Will signal once enoguh MSHRs are deallocated 2522 if (prefetcher && mshrQueue.canPrefetch()) { 2523 nextReady = std::min(nextReady, 2524 prefetcher->nextPrefetchReadyTime()); 2525 } 2526 2527 return nextReady; 2528} 2529 2530bool 2531Cache::sendMSHRQueuePacket(MSHR* mshr) 2532{ 2533 assert(mshr); 2534 2535 // use request from 1st target 2536 PacketPtr tgt_pkt = mshr->getTarget()->pkt; 2537 2538 DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print()); 2539 2540 CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure); 2541 2542 if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) { 2543 // we should never have hardware prefetches to allocated 2544 // blocks 2545 assert(blk == nullptr); 2546 2547 // We need to check the caches above us to verify that 2548 // they don't have a copy of this block in the dirty state 2549 // at the moment. Without this check we could get a stale 2550 // copy from memory that might get used in place of the 2551 // dirty one. 2552 Packet snoop_pkt(tgt_pkt, true, false); 2553 snoop_pkt.setExpressSnoop(); 2554 // We are sending this packet upwards, but if it hits we will 2555 // get a snoop response that we end up treating just like a 2556 // normal response, hence it needs the MSHR as its sender 2557 // state 2558 snoop_pkt.senderState = mshr; 2559 cpuSidePort->sendTimingSnoopReq(&snoop_pkt); 2560 2561 // Check to see if the prefetch was squashed by an upper cache (to 2562 // prevent us from grabbing the line) or if a Check to see if a 2563 // writeback arrived between the time the prefetch was placed in 2564 // the MSHRs and when it was selected to be sent or if the 2565 // prefetch was squashed by an upper cache. 2566 2567 // It is important to check cacheResponding before 2568 // prefetchSquashed. If another cache has committed to 2569 // responding, it will be sending a dirty response which will 2570 // arrive at the MSHR allocated for this request. Checking the 2571 // prefetchSquash first may result in the MSHR being 2572 // prematurely deallocated. 2573 if (snoop_pkt.cacheResponding()) { 2574 auto M5_VAR_USED r = outstandingSnoop.insert(snoop_pkt.req); 2575 assert(r.second); 2576 2577 // if we are getting a snoop response with no sharers it 2578 // will be allocated as Modified 2579 bool pending_modified_resp = !snoop_pkt.hasSharers(); 2580 markInService(mshr, pending_modified_resp); 2581 2582 DPRINTF(Cache, "Upward snoop of prefetch for addr" 2583 " %#x (%s) hit\n", 2584 tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns"); 2585 return false; 2586 } 2587 2588 if (snoop_pkt.isBlockCached()) { 2589 DPRINTF(Cache, "Block present, prefetch squashed by cache. " 2590 "Deallocating mshr target %#x.\n", 2591 mshr->blkAddr); 2592 2593 // Deallocate the mshr target 2594 if (mshrQueue.forceDeallocateTarget(mshr)) { 2595 // Clear block if this deallocation resulted freed an 2596 // mshr when all had previously been utilized 2597 clearBlocked(Blocked_NoMSHRs); 2598 } 2599 2600 // given that no response is expected, delete Request and Packet 2601 delete tgt_pkt->req; 2602 delete tgt_pkt; 2603 2604 return false; 2605 } 2606 } 2607 2608 // either a prefetch that is not present upstream, or a normal 2609 // MSHR request, proceed to get the packet to send downstream 2610 PacketPtr pkt = createMissPacket(tgt_pkt, blk, mshr->needsWritable()); 2611 2612 mshr->isForward = (pkt == nullptr); 2613 2614 if (mshr->isForward) { 2615 // not a cache block request, but a response is expected 2616 // make copy of current packet to forward, keep current 2617 // copy for response handling 2618 pkt = new Packet(tgt_pkt, false, true); 2619 assert(!pkt->isWrite()); 2620 } 2621 2622 // play it safe and append (rather than set) the sender state, 2623 // as forwarded packets may already have existing state 2624 pkt->pushSenderState(mshr); 2625 2626 if (pkt->isClean() && blk && blk->isDirty()) { 2627 // A cache clean opearation is looking for a dirty block. Mark 2628 // the packet so that the destination xbar can determine that 2629 // there will be a follow-up write packet as well. 2630 pkt->setSatisfied(); 2631 } 2632 2633 if (!memSidePort->sendTimingReq(pkt)) { 2634 // we are awaiting a retry, but we 2635 // delete the packet and will be creating a new packet 2636 // when we get the opportunity 2637 delete pkt; 2638 2639 // note that we have now masked any requestBus and 2640 // schedSendEvent (we will wait for a retry before 2641 // doing anything), and this is so even if we do not 2642 // care about this packet and might override it before 2643 // it gets retried 2644 return true; 2645 } else { 2646 // As part of the call to sendTimingReq the packet is 2647 // forwarded to all neighbouring caches (and any caches 2648 // above them) as a snoop. Thus at this point we know if 2649 // any of the neighbouring caches are responding, and if 2650 // so, we know it is dirty, and we can determine if it is 2651 // being passed as Modified, making our MSHR the ordering 2652 // point 2653 bool pending_modified_resp = !pkt->hasSharers() && 2654 pkt->cacheResponding(); 2655 markInService(mshr, pending_modified_resp); 2656 if (pkt->isClean() && blk && blk->isDirty()) { 2657 // A cache clean opearation is looking for a dirty 2658 // block. If a dirty block is encountered a WriteClean 2659 // will update any copies to the path to the memory 2660 // until the point of reference. 2661 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n", 2662 __func__, pkt->print(), blk->print()); 2663 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), 2664 pkt->id); 2665 PacketList writebacks; 2666 writebacks.push_back(wb_pkt); 2667 doWritebacks(writebacks, 0); 2668 } 2669 2670 return false; 2671 } 2672} 2673 2674bool 2675Cache::sendWriteQueuePacket(WriteQueueEntry* wq_entry) 2676{ 2677 assert(wq_entry); 2678 2679 // always a single target for write queue entries 2680 PacketPtr tgt_pkt = wq_entry->getTarget()->pkt; 2681 2682 DPRINTF(Cache, "%s: write %s\n", __func__, tgt_pkt->print()); 2683 2684 // forward as is, both for evictions and uncacheable writes 2685 if (!memSidePort->sendTimingReq(tgt_pkt)) { 2686 // note that we have now masked any requestBus and 2687 // schedSendEvent (we will wait for a retry before 2688 // doing anything), and this is so even if we do not 2689 // care about this packet and might override it before 2690 // it gets retried 2691 return true; 2692 } else { 2693 markInService(wq_entry); 2694 return false; 2695 } 2696} 2697 2698void 2699Cache::serialize(CheckpointOut &cp) const 2700{ 2701 bool dirty(isDirty()); 2702 2703 if (dirty) { 2704 warn("*** The cache still contains dirty data. ***\n"); 2705 warn(" Make sure to drain the system using the correct flags.\n"); 2706 warn(" This checkpoint will not restore correctly and dirty data " 2707 " in the cache will be lost!\n"); 2708 } 2709 2710 // Since we don't checkpoint the data in the cache, any dirty data 2711 // will be lost when restoring from a checkpoint of a system that 2712 // wasn't drained properly. Flag the checkpoint as invalid if the 2713 // cache contains dirty data. 2714 bool bad_checkpoint(dirty); 2715 SERIALIZE_SCALAR(bad_checkpoint); 2716} 2717 2718void 2719Cache::unserialize(CheckpointIn &cp) 2720{ 2721 bool bad_checkpoint; 2722 UNSERIALIZE_SCALAR(bad_checkpoint); 2723 if (bad_checkpoint) { 2724 fatal("Restoring from checkpoints with dirty caches is not supported " 2725 "in the classic memory system. Please remove any caches or " 2726 " drain them properly before taking checkpoints.\n"); 2727 } 2728} 2729 2730/////////////// 2731// 2732// CpuSidePort 2733// 2734/////////////// 2735 2736AddrRangeList 2737Cache::CpuSidePort::getAddrRanges() const 2738{ 2739 return cache->getAddrRanges(); 2740} 2741 2742bool 2743Cache::CpuSidePort::tryTiming(PacketPtr pkt) 2744{ 2745 assert(!cache->system->bypassCaches()); 2746 2747 // always let express snoop packets through if even if blocked 2748 if (pkt->isExpressSnoop()) { 2749 return true; 2750 } else if (isBlocked() || mustSendRetry) { 2751 // either already committed to send a retry, or blocked 2752 mustSendRetry = true; 2753 return false; 2754 } 2755 mustSendRetry = false; 2756 return true; 2757} 2758 2759bool 2760Cache::CpuSidePort::recvTimingReq(PacketPtr pkt) 2761{ 2762 assert(!cache->system->bypassCaches()); 2763 2764 // always let express snoop packets through if even if blocked 2765 if (pkt->isExpressSnoop() || tryTiming(pkt)) { 2766 cache->recvTimingReq(pkt); 2767 return true; 2768 } 2769 return false; 2770} 2771 2772Tick 2773Cache::CpuSidePort::recvAtomic(PacketPtr pkt) 2774{ 2775 return cache->recvAtomic(pkt); 2776} 2777 2778void 2779Cache::CpuSidePort::recvFunctional(PacketPtr pkt) 2780{ 2781 // functional request 2782 cache->functionalAccess(pkt, true); 2783} 2784 2785Cache:: 2786CpuSidePort::CpuSidePort(const std::string &_name, Cache *_cache, 2787 const std::string &_label) 2788 : BaseCache::CacheSlavePort(_name, _cache, _label), cache(_cache) 2789{ 2790} 2791 2792Cache* 2793CacheParams::create() 2794{ 2795 assert(tags); 2796 assert(replacement_policy); 2797 2798 return new Cache(this); 2799} 2800/////////////// 2801// 2802// MemSidePort 2803// 2804/////////////// 2805 2806bool 2807Cache::MemSidePort::recvTimingResp(PacketPtr pkt) 2808{ 2809 cache->recvTimingResp(pkt); 2810 return true; 2811} 2812 2813// Express snooping requests to memside port 2814void 2815Cache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt) 2816{ 2817 // handle snooping requests 2818 cache->recvTimingSnoopReq(pkt); 2819} 2820 2821Tick 2822Cache::MemSidePort::recvAtomicSnoop(PacketPtr pkt) 2823{ 2824 return cache->recvAtomicSnoop(pkt); 2825} 2826 2827void 2828Cache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt) 2829{ 2830 // functional snoop (note that in contrast to atomic we don't have 2831 // a specific functionalSnoop method, as they have the same 2832 // behaviour regardless) 2833 cache->functionalAccess(pkt, false); 2834} 2835 2836void 2837Cache::CacheReqPacketQueue::sendDeferredPacket() 2838{ 2839 // sanity check 2840 assert(!waitingOnRetry); 2841 2842 // there should never be any deferred request packets in the 2843 // queue, instead we resly on the cache to provide the packets 2844 // from the MSHR queue or write queue 2845 assert(deferredPacketReadyTime() == MaxTick); 2846 2847 // check for request packets (requests & writebacks) 2848 QueueEntry* entry = cache.getNextQueueEntry(); 2849 2850 if (!entry) { 2851 // can happen if e.g. we attempt a writeback and fail, but 2852 // before the retry, the writeback is eliminated because 2853 // we snoop another cache's ReadEx. 2854 } else { 2855 // let our snoop responses go first if there are responses to 2856 // the same addresses 2857 if (checkConflictingSnoop(entry->blkAddr)) { 2858 return; 2859 } 2860 waitingOnRetry = entry->sendPacket(cache); 2861 } 2862 2863 // if we succeeded and are not waiting for a retry, schedule the 2864 // next send considering when the next queue is ready, note that 2865 // snoop responses have their own packet queue and thus schedule 2866 // their own events 2867 if (!waitingOnRetry) { 2868 schedSendEvent(cache.nextQueueReadyTime()); 2869 } 2870} 2871 2872Cache:: 2873MemSidePort::MemSidePort(const std::string &_name, Cache *_cache, 2874 const std::string &_label) 2875 : BaseCache::CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue), 2876 _reqQueue(*_cache, *this, _snoopRespQueue, _label), 2877 _snoopRespQueue(*_cache, *this, _label), cache(_cache) 2878{ 2879} 2880