cache.cc revision 12749
1/* 2 * Copyright (c) 2010-2018 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2002-2005 The Regents of The University of Michigan 15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc. 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Erik Hallnor 42 * Dave Greene 43 * Nathan Binkert 44 * Steve Reinhardt 45 * Ron Dreslinski 46 * Andreas Sandberg 47 * Nikos Nikoleris 48 */ 49 50/** 51 * @file 52 * Cache definitions. 53 */ 54 55#include "mem/cache/cache.hh" 56 57#include <cassert> 58 59#include "base/compiler.hh" 60#include "base/logging.hh" 61#include "base/trace.hh" 62#include "base/types.hh" 63#include "debug/Cache.hh" 64#include "debug/CacheTags.hh" 65#include "debug/CacheVerbose.hh" 66#include "enums/Clusivity.hh" 67#include "mem/cache/blk.hh" 68#include "mem/cache/mshr.hh" 69#include "mem/cache/tags/base.hh" 70#include "mem/cache/write_queue_entry.hh" 71#include "mem/request.hh" 72#include "params/Cache.hh" 73 74Cache::Cache(const CacheParams *p) 75 : BaseCache(p, p->system->cacheLineSize()), 76 doFastWrites(true) 77{ 78} 79 80void 81Cache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, 82 bool deferred_response, bool pending_downgrade) 83{ 84 BaseCache::satisfyRequest(pkt, blk); 85 86 if (pkt->isRead()) { 87 // determine if this read is from a (coherent) cache or not 88 if (pkt->fromCache()) { 89 assert(pkt->getSize() == blkSize); 90 // special handling for coherent block requests from 91 // upper-level caches 92 if (pkt->needsWritable()) { 93 // sanity check 94 assert(pkt->cmd == MemCmd::ReadExReq || 95 pkt->cmd == MemCmd::SCUpgradeFailReq); 96 assert(!pkt->hasSharers()); 97 98 // if we have a dirty copy, make sure the recipient 99 // keeps it marked dirty (in the modified state) 100 if (blk->isDirty()) { 101 pkt->setCacheResponding(); 102 blk->status &= ~BlkDirty; 103 } 104 } else if (blk->isWritable() && !pending_downgrade && 105 !pkt->hasSharers() && 106 pkt->cmd != MemCmd::ReadCleanReq) { 107 // we can give the requester a writable copy on a read 108 // request if: 109 // - we have a writable copy at this level (& below) 110 // - we don't have a pending snoop from below 111 // signaling another read request 112 // - no other cache above has a copy (otherwise it 113 // would have set hasSharers flag when 114 // snooping the packet) 115 // - the read has explicitly asked for a clean 116 // copy of the line 117 if (blk->isDirty()) { 118 // special considerations if we're owner: 119 if (!deferred_response) { 120 // respond with the line in Modified state 121 // (cacheResponding set, hasSharers not set) 122 pkt->setCacheResponding(); 123 124 // if this cache is mostly inclusive, we 125 // keep the block in the Exclusive state, 126 // and pass it upwards as Modified 127 // (writable and dirty), hence we have 128 // multiple caches, all on the same path 129 // towards memory, all considering the 130 // same block writable, but only one 131 // considering it Modified 132 133 // we get away with multiple caches (on 134 // the same path to memory) considering 135 // the block writeable as we always enter 136 // the cache hierarchy through a cache, 137 // and first snoop upwards in all other 138 // branches 139 blk->status &= ~BlkDirty; 140 } else { 141 // if we're responding after our own miss, 142 // there's a window where the recipient didn't 143 // know it was getting ownership and may not 144 // have responded to snoops correctly, so we 145 // have to respond with a shared line 146 pkt->setHasSharers(); 147 } 148 } 149 } else { 150 // otherwise only respond with a shared copy 151 pkt->setHasSharers(); 152 } 153 } 154 } 155} 156 157///////////////////////////////////////////////////// 158// 159// Access path: requests coming in from the CPU side 160// 161///////////////////////////////////////////////////// 162 163bool 164Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, 165 PacketList &writebacks) 166{ 167 168 if (pkt->req->isUncacheable()) { 169 assert(pkt->isRequest()); 170 171 chatty_assert(!(isReadOnly && pkt->isWrite()), 172 "Should never see a write in a read-only cache %s\n", 173 name()); 174 175 DPRINTF(Cache, "%s for %s\n", __func__, pkt->print()); 176 177 // flush and invalidate any existing block 178 CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure())); 179 if (old_blk && old_blk->isValid()) { 180 evictBlock(old_blk, writebacks); 181 } 182 183 blk = nullptr; 184 // lookupLatency is the latency in case the request is uncacheable. 185 lat = lookupLatency; 186 return false; 187 } 188 189 return BaseCache::access(pkt, blk, lat, writebacks); 190} 191 192void 193Cache::doWritebacks(PacketList& writebacks, Tick forward_time) 194{ 195 while (!writebacks.empty()) { 196 PacketPtr wbPkt = writebacks.front(); 197 // We use forwardLatency here because we are copying writebacks to 198 // write buffer. 199 200 // Call isCachedAbove for Writebacks, CleanEvicts and 201 // WriteCleans to discover if the block is cached above. 202 if (isCachedAbove(wbPkt)) { 203 if (wbPkt->cmd == MemCmd::CleanEvict) { 204 // Delete CleanEvict because cached copies exist above. The 205 // packet destructor will delete the request object because 206 // this is a non-snoop request packet which does not require a 207 // response. 208 delete wbPkt; 209 } else if (wbPkt->cmd == MemCmd::WritebackClean) { 210 // clean writeback, do not send since the block is 211 // still cached above 212 assert(writebackClean); 213 delete wbPkt; 214 } else { 215 assert(wbPkt->cmd == MemCmd::WritebackDirty || 216 wbPkt->cmd == MemCmd::WriteClean); 217 // Set BLOCK_CACHED flag in Writeback and send below, so that 218 // the Writeback does not reset the bit corresponding to this 219 // address in the snoop filter below. 220 wbPkt->setBlockCached(); 221 allocateWriteBuffer(wbPkt, forward_time); 222 } 223 } else { 224 // If the block is not cached above, send packet below. Both 225 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will 226 // reset the bit corresponding to this address in the snoop filter 227 // below. 228 allocateWriteBuffer(wbPkt, forward_time); 229 } 230 writebacks.pop_front(); 231 } 232} 233 234void 235Cache::doWritebacksAtomic(PacketList& writebacks) 236{ 237 while (!writebacks.empty()) { 238 PacketPtr wbPkt = writebacks.front(); 239 // Call isCachedAbove for both Writebacks and CleanEvicts. If 240 // isCachedAbove returns true we set BLOCK_CACHED flag in Writebacks 241 // and discard CleanEvicts. 242 if (isCachedAbove(wbPkt, false)) { 243 if (wbPkt->cmd == MemCmd::WritebackDirty || 244 wbPkt->cmd == MemCmd::WriteClean) { 245 // Set BLOCK_CACHED flag in Writeback and send below, 246 // so that the Writeback does not reset the bit 247 // corresponding to this address in the snoop filter 248 // below. We can discard CleanEvicts because cached 249 // copies exist above. Atomic mode isCachedAbove 250 // modifies packet to set BLOCK_CACHED flag 251 memSidePort.sendAtomic(wbPkt); 252 } 253 } else { 254 // If the block is not cached above, send packet below. Both 255 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will 256 // reset the bit corresponding to this address in the snoop filter 257 // below. 258 memSidePort.sendAtomic(wbPkt); 259 } 260 writebacks.pop_front(); 261 // In case of CleanEvicts, the packet destructor will delete the 262 // request object because this is a non-snoop request packet which 263 // does not require a response. 264 delete wbPkt; 265 } 266} 267 268 269void 270Cache::recvTimingSnoopResp(PacketPtr pkt) 271{ 272 DPRINTF(Cache, "%s for %s\n", __func__, pkt->print()); 273 274 // determine if the response is from a snoop request we created 275 // (in which case it should be in the outstandingSnoop), or if we 276 // merely forwarded someone else's snoop request 277 const bool forwardAsSnoop = outstandingSnoop.find(pkt->req) == 278 outstandingSnoop.end(); 279 280 if (!forwardAsSnoop) { 281 // the packet came from this cache, so sink it here and do not 282 // forward it 283 assert(pkt->cmd == MemCmd::HardPFResp); 284 285 outstandingSnoop.erase(pkt->req); 286 287 DPRINTF(Cache, "Got prefetch response from above for addr " 288 "%#llx (%s)\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns"); 289 recvTimingResp(pkt); 290 return; 291 } 292 293 // forwardLatency is set here because there is a response from an 294 // upper level cache. 295 // To pay the delay that occurs if the packet comes from the bus, 296 // we charge also headerDelay. 297 Tick snoop_resp_time = clockEdge(forwardLatency) + pkt->headerDelay; 298 // Reset the timing of the packet. 299 pkt->headerDelay = pkt->payloadDelay = 0; 300 memSidePort.schedTimingSnoopResp(pkt, snoop_resp_time); 301} 302 303void 304Cache::promoteWholeLineWrites(PacketPtr pkt) 305{ 306 // Cache line clearing instructions 307 if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) && 308 (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0)) { 309 pkt->cmd = MemCmd::WriteLineReq; 310 DPRINTF(Cache, "packet promoted from Write to WriteLineReq\n"); 311 } 312} 313 314void 315Cache::handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time) 316{ 317 // should never be satisfying an uncacheable access as we 318 // flush and invalidate any existing block as part of the 319 // lookup 320 assert(!pkt->req->isUncacheable()); 321 322 BaseCache::handleTimingReqHit(pkt, blk, request_time); 323} 324 325void 326Cache::handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk, Tick forward_time, 327 Tick request_time) 328{ 329 if (pkt->req->isUncacheable()) { 330 // ignore any existing MSHR if we are dealing with an 331 // uncacheable request 332 333 // should have flushed and have no valid block 334 assert(!blk || !blk->isValid()); 335 336 mshr_uncacheable[pkt->cmdToIndex()][pkt->req->masterId()]++; 337 338 if (pkt->isWrite()) { 339 allocateWriteBuffer(pkt, forward_time); 340 } else { 341 assert(pkt->isRead()); 342 343 // uncacheable accesses always allocate a new MSHR 344 345 // Here we are using forward_time, modelling the latency of 346 // a miss (outbound) just as forwardLatency, neglecting the 347 // lookupLatency component. 348 allocateMissBuffer(pkt, forward_time); 349 } 350 351 return; 352 } 353 354 Addr blk_addr = pkt->getBlockAddr(blkSize); 355 356 MSHR *mshr = mshrQueue.findMatch(blk_addr, pkt->isSecure()); 357 358 // Software prefetch handling: 359 // To keep the core from waiting on data it won't look at 360 // anyway, send back a response with dummy data. Miss handling 361 // will continue asynchronously. Unfortunately, the core will 362 // insist upon freeing original Packet/Request, so we have to 363 // create a new pair with a different lifecycle. Note that this 364 // processing happens before any MSHR munging on the behalf of 365 // this request because this new Request will be the one stored 366 // into the MSHRs, not the original. 367 if (pkt->cmd.isSWPrefetch()) { 368 assert(pkt->needsResponse()); 369 assert(pkt->req->hasPaddr()); 370 assert(!pkt->req->isUncacheable()); 371 372 // There's no reason to add a prefetch as an additional target 373 // to an existing MSHR. If an outstanding request is already 374 // in progress, there is nothing for the prefetch to do. 375 // If this is the case, we don't even create a request at all. 376 PacketPtr pf = nullptr; 377 378 if (!mshr) { 379 // copy the request and create a new SoftPFReq packet 380 RequestPtr req = std::make_shared<Request>(pkt->req->getPaddr(), 381 pkt->req->getSize(), 382 pkt->req->getFlags(), 383 pkt->req->masterId()); 384 pf = new Packet(req, pkt->cmd); 385 pf->allocate(); 386 assert(pf->getAddr() == pkt->getAddr()); 387 assert(pf->getSize() == pkt->getSize()); 388 } 389 390 pkt->makeTimingResponse(); 391 392 // request_time is used here, taking into account lat and the delay 393 // charged if the packet comes from the xbar. 394 cpuSidePort.schedTimingResp(pkt, request_time, true); 395 396 // If an outstanding request is in progress (we found an 397 // MSHR) this is set to null 398 pkt = pf; 399 } 400 401 BaseCache::handleTimingReqMiss(pkt, mshr, blk, forward_time, request_time); 402} 403 404void 405Cache::recvTimingReq(PacketPtr pkt) 406{ 407 DPRINTF(CacheTags, "%s tags:\n%s\n", __func__, tags->print()); 408 409 promoteWholeLineWrites(pkt); 410 411 if (pkt->cacheResponding()) { 412 // a cache above us (but not where the packet came from) is 413 // responding to the request, in other words it has the line 414 // in Modified or Owned state 415 DPRINTF(Cache, "Cache above responding to %s: not responding\n", 416 pkt->print()); 417 418 // if the packet needs the block to be writable, and the cache 419 // that has promised to respond (setting the cache responding 420 // flag) is not providing writable (it is in Owned rather than 421 // the Modified state), we know that there may be other Shared 422 // copies in the system; go out and invalidate them all 423 assert(pkt->needsWritable() && !pkt->responderHadWritable()); 424 425 // an upstream cache that had the line in Owned state 426 // (dirty, but not writable), is responding and thus 427 // transferring the dirty line from one branch of the 428 // cache hierarchy to another 429 430 // send out an express snoop and invalidate all other 431 // copies (snooping a packet that needs writable is the 432 // same as an invalidation), thus turning the Owned line 433 // into a Modified line, note that we don't invalidate the 434 // block in the current cache or any other cache on the 435 // path to memory 436 437 // create a downstream express snoop with cleared packet 438 // flags, there is no need to allocate any data as the 439 // packet is merely used to co-ordinate state transitions 440 Packet *snoop_pkt = new Packet(pkt, true, false); 441 442 // also reset the bus time that the original packet has 443 // not yet paid for 444 snoop_pkt->headerDelay = snoop_pkt->payloadDelay = 0; 445 446 // make this an instantaneous express snoop, and let the 447 // other caches in the system know that the another cache 448 // is responding, because we have found the authorative 449 // copy (Modified or Owned) that will supply the right 450 // data 451 snoop_pkt->setExpressSnoop(); 452 snoop_pkt->setCacheResponding(); 453 454 // this express snoop travels towards the memory, and at 455 // every crossbar it is snooped upwards thus reaching 456 // every cache in the system 457 bool M5_VAR_USED success = memSidePort.sendTimingReq(snoop_pkt); 458 // express snoops always succeed 459 assert(success); 460 461 // main memory will delete the snoop packet 462 463 // queue for deletion, as opposed to immediate deletion, as 464 // the sending cache is still relying on the packet 465 pendingDelete.reset(pkt); 466 467 // no need to take any further action in this particular cache 468 // as an upstram cache has already committed to responding, 469 // and we have already sent out any express snoops in the 470 // section above to ensure all other copies in the system are 471 // invalidated 472 return; 473 } 474 475 BaseCache::recvTimingReq(pkt); 476} 477 478PacketPtr 479Cache::createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk, 480 bool needsWritable) const 481{ 482 // should never see evictions here 483 assert(!cpu_pkt->isEviction()); 484 485 bool blkValid = blk && blk->isValid(); 486 487 if (cpu_pkt->req->isUncacheable() || 488 (!blkValid && cpu_pkt->isUpgrade()) || 489 cpu_pkt->cmd == MemCmd::InvalidateReq || cpu_pkt->isClean()) { 490 // uncacheable requests and upgrades from upper-level caches 491 // that missed completely just go through as is 492 return nullptr; 493 } 494 495 assert(cpu_pkt->needsResponse()); 496 497 MemCmd cmd; 498 // @TODO make useUpgrades a parameter. 499 // Note that ownership protocols require upgrade, otherwise a 500 // write miss on a shared owned block will generate a ReadExcl, 501 // which will clobber the owned copy. 502 const bool useUpgrades = true; 503 if (cpu_pkt->cmd == MemCmd::WriteLineReq) { 504 assert(!blkValid || !blk->isWritable()); 505 // forward as invalidate to all other caches, this gives us 506 // the line in Exclusive state, and invalidates all other 507 // copies 508 cmd = MemCmd::InvalidateReq; 509 } else if (blkValid && useUpgrades) { 510 // only reason to be here is that blk is read only and we need 511 // it to be writable 512 assert(needsWritable); 513 assert(!blk->isWritable()); 514 cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq; 515 } else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq || 516 cpu_pkt->cmd == MemCmd::StoreCondFailReq) { 517 // Even though this SC will fail, we still need to send out the 518 // request and get the data to supply it to other snoopers in the case 519 // where the determination the StoreCond fails is delayed due to 520 // all caches not being on the same local bus. 521 cmd = MemCmd::SCUpgradeFailReq; 522 } else { 523 // block is invalid 524 525 // If the request does not need a writable there are two cases 526 // where we need to ensure the response will not fetch the 527 // block in dirty state: 528 // * this cache is read only and it does not perform 529 // writebacks, 530 // * this cache is mostly exclusive and will not fill (since 531 // it does not fill it will have to writeback the dirty data 532 // immediately which generates uneccesary writebacks). 533 bool force_clean_rsp = isReadOnly || clusivity == Enums::mostly_excl; 534 cmd = needsWritable ? MemCmd::ReadExReq : 535 (force_clean_rsp ? MemCmd::ReadCleanReq : MemCmd::ReadSharedReq); 536 } 537 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize); 538 539 // if there are upstream caches that have already marked the 540 // packet as having sharers (not passing writable), pass that info 541 // downstream 542 if (cpu_pkt->hasSharers() && !needsWritable) { 543 // note that cpu_pkt may have spent a considerable time in the 544 // MSHR queue and that the information could possibly be out 545 // of date, however, there is no harm in conservatively 546 // assuming the block has sharers 547 pkt->setHasSharers(); 548 DPRINTF(Cache, "%s: passing hasSharers from %s to %s\n", 549 __func__, cpu_pkt->print(), pkt->print()); 550 } 551 552 // the packet should be block aligned 553 assert(pkt->getAddr() == pkt->getBlockAddr(blkSize)); 554 555 pkt->allocate(); 556 DPRINTF(Cache, "%s: created %s from %s\n", __func__, pkt->print(), 557 cpu_pkt->print()); 558 return pkt; 559} 560 561 562Cycles 563Cache::handleAtomicReqMiss(PacketPtr pkt, CacheBlk *blk, 564 PacketList &writebacks) 565{ 566 // deal with the packets that go through the write path of 567 // the cache, i.e. any evictions and writes 568 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean || 569 (pkt->req->isUncacheable() && pkt->isWrite())) { 570 Cycles latency = ticksToCycles(memSidePort.sendAtomic(pkt)); 571 572 // at this point, if the request was an uncacheable write 573 // request, it has been satisfied by a memory below and the 574 // packet carries the response back 575 assert(!(pkt->req->isUncacheable() && pkt->isWrite()) || 576 pkt->isResponse()); 577 578 return latency; 579 } 580 581 // only misses left 582 583 PacketPtr bus_pkt = createMissPacket(pkt, blk, pkt->needsWritable()); 584 585 bool is_forward = (bus_pkt == nullptr); 586 587 if (is_forward) { 588 // just forwarding the same request to the next level 589 // no local cache operation involved 590 bus_pkt = pkt; 591 } 592 593 DPRINTF(Cache, "%s: Sending an atomic %s\n", __func__, 594 bus_pkt->print()); 595 596#if TRACING_ON 597 CacheBlk::State old_state = blk ? blk->status : 0; 598#endif 599 600 Cycles latency = ticksToCycles(memSidePort.sendAtomic(bus_pkt)); 601 602 bool is_invalidate = bus_pkt->isInvalidate(); 603 604 // We are now dealing with the response handling 605 DPRINTF(Cache, "%s: Receive response: %s in state %i\n", __func__, 606 bus_pkt->print(), old_state); 607 608 // If packet was a forward, the response (if any) is already 609 // in place in the bus_pkt == pkt structure, so we don't need 610 // to do anything. Otherwise, use the separate bus_pkt to 611 // generate response to pkt and then delete it. 612 if (!is_forward) { 613 if (pkt->needsResponse()) { 614 assert(bus_pkt->isResponse()); 615 if (bus_pkt->isError()) { 616 pkt->makeAtomicResponse(); 617 pkt->copyError(bus_pkt); 618 } else if (pkt->cmd == MemCmd::WriteLineReq) { 619 // note the use of pkt, not bus_pkt here. 620 621 // write-line request to the cache that promoted 622 // the write to a whole line 623 blk = handleFill(pkt, blk, writebacks, 624 allocOnFill(pkt->cmd)); 625 assert(blk != NULL); 626 is_invalidate = false; 627 satisfyRequest(pkt, blk); 628 } else if (bus_pkt->isRead() || 629 bus_pkt->cmd == MemCmd::UpgradeResp) { 630 // we're updating cache state to allow us to 631 // satisfy the upstream request from the cache 632 blk = handleFill(bus_pkt, blk, writebacks, 633 allocOnFill(pkt->cmd)); 634 satisfyRequest(pkt, blk); 635 maintainClusivity(pkt->fromCache(), blk); 636 } else { 637 // we're satisfying the upstream request without 638 // modifying cache state, e.g., a write-through 639 pkt->makeAtomicResponse(); 640 } 641 } 642 delete bus_pkt; 643 } 644 645 if (is_invalidate && blk && blk->isValid()) { 646 invalidateBlock(blk); 647 } 648 649 return latency; 650} 651 652Tick 653Cache::recvAtomic(PacketPtr pkt) 654{ 655 promoteWholeLineWrites(pkt); 656 657 return BaseCache::recvAtomic(pkt); 658} 659 660 661///////////////////////////////////////////////////// 662// 663// Response handling: responses from the memory side 664// 665///////////////////////////////////////////////////// 666 667 668void 669Cache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk, 670 PacketList &writebacks) 671{ 672 MSHR::Target *initial_tgt = mshr->getTarget(); 673 // First offset for critical word first calculations 674 const int initial_offset = initial_tgt->pkt->getOffset(blkSize); 675 676 const bool is_error = pkt->isError(); 677 bool is_fill = !mshr->isForward && 678 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp); 679 // allow invalidation responses originating from write-line 680 // requests to be discarded 681 bool is_invalidate = pkt->isInvalidate(); 682 683 MSHR::TargetList targets = mshr->extractServiceableTargets(pkt); 684 for (auto &target: targets) { 685 Packet *tgt_pkt = target.pkt; 686 switch (target.source) { 687 case MSHR::Target::FromCPU: 688 Tick completion_time; 689 // Here we charge on completion_time the delay of the xbar if the 690 // packet comes from it, charged on headerDelay. 691 completion_time = pkt->headerDelay; 692 693 // Software prefetch handling for cache closest to core 694 if (tgt_pkt->cmd.isSWPrefetch()) { 695 // a software prefetch would have already been ack'd 696 // immediately with dummy data so the core would be able to 697 // retire it. This request completes right here, so we 698 // deallocate it. 699 delete tgt_pkt; 700 break; // skip response 701 } 702 703 // unlike the other packet flows, where data is found in other 704 // caches or memory and brought back, write-line requests always 705 // have the data right away, so the above check for "is fill?" 706 // cannot actually be determined until examining the stored MSHR 707 // state. We "catch up" with that logic here, which is duplicated 708 // from above. 709 if (tgt_pkt->cmd == MemCmd::WriteLineReq) { 710 assert(!is_error); 711 // we got the block in a writable state, so promote 712 // any deferred targets if possible 713 mshr->promoteWritable(); 714 // NB: we use the original packet here and not the response! 715 blk = handleFill(tgt_pkt, blk, writebacks, 716 targets.allocOnFill); 717 assert(blk); 718 719 // treat as a fill, and discard the invalidation 720 // response 721 is_fill = true; 722 is_invalidate = false; 723 } 724 725 if (is_fill) { 726 satisfyRequest(tgt_pkt, blk, true, mshr->hasPostDowngrade()); 727 728 // How many bytes past the first request is this one 729 int transfer_offset = 730 tgt_pkt->getOffset(blkSize) - initial_offset; 731 if (transfer_offset < 0) { 732 transfer_offset += blkSize; 733 } 734 735 // If not critical word (offset) return payloadDelay. 736 // responseLatency is the latency of the return path 737 // from lower level caches/memory to an upper level cache or 738 // the core. 739 completion_time += clockEdge(responseLatency) + 740 (transfer_offset ? pkt->payloadDelay : 0); 741 742 assert(!tgt_pkt->req->isUncacheable()); 743 744 assert(tgt_pkt->req->masterId() < system->maxMasters()); 745 missLatency[tgt_pkt->cmdToIndex()][tgt_pkt->req->masterId()] += 746 completion_time - target.recvTime; 747 } else if (pkt->cmd == MemCmd::UpgradeFailResp) { 748 // failed StoreCond upgrade 749 assert(tgt_pkt->cmd == MemCmd::StoreCondReq || 750 tgt_pkt->cmd == MemCmd::StoreCondFailReq || 751 tgt_pkt->cmd == MemCmd::SCUpgradeFailReq); 752 // responseLatency is the latency of the return path 753 // from lower level caches/memory to an upper level cache or 754 // the core. 755 completion_time += clockEdge(responseLatency) + 756 pkt->payloadDelay; 757 tgt_pkt->req->setExtraData(0); 758 } else { 759 // We are about to send a response to a cache above 760 // that asked for an invalidation; we need to 761 // invalidate our copy immediately as the most 762 // up-to-date copy of the block will now be in the 763 // cache above. It will also prevent this cache from 764 // responding (if the block was previously dirty) to 765 // snoops as they should snoop the caches above where 766 // they will get the response from. 767 if (is_invalidate && blk && blk->isValid()) { 768 invalidateBlock(blk); 769 } 770 // not a cache fill, just forwarding response 771 // responseLatency is the latency of the return path 772 // from lower level cahces/memory to the core. 773 completion_time += clockEdge(responseLatency) + 774 pkt->payloadDelay; 775 if (pkt->isRead() && !is_error) { 776 // sanity check 777 assert(pkt->getAddr() == tgt_pkt->getAddr()); 778 assert(pkt->getSize() >= tgt_pkt->getSize()); 779 780 tgt_pkt->setData(pkt->getConstPtr<uint8_t>()); 781 } 782 } 783 tgt_pkt->makeTimingResponse(); 784 // if this packet is an error copy that to the new packet 785 if (is_error) 786 tgt_pkt->copyError(pkt); 787 if (tgt_pkt->cmd == MemCmd::ReadResp && 788 (is_invalidate || mshr->hasPostInvalidate())) { 789 // If intermediate cache got ReadRespWithInvalidate, 790 // propagate that. Response should not have 791 // isInvalidate() set otherwise. 792 tgt_pkt->cmd = MemCmd::ReadRespWithInvalidate; 793 DPRINTF(Cache, "%s: updated cmd to %s\n", __func__, 794 tgt_pkt->print()); 795 } 796 // Reset the bus additional time as it is now accounted for 797 tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0; 798 cpuSidePort.schedTimingResp(tgt_pkt, completion_time, true); 799 break; 800 801 case MSHR::Target::FromPrefetcher: 802 assert(tgt_pkt->cmd == MemCmd::HardPFReq); 803 if (blk) 804 blk->status |= BlkHWPrefetched; 805 delete tgt_pkt; 806 break; 807 808 case MSHR::Target::FromSnoop: 809 // I don't believe that a snoop can be in an error state 810 assert(!is_error); 811 // response to snoop request 812 DPRINTF(Cache, "processing deferred snoop...\n"); 813 // If the response is invalidating, a snooping target can 814 // be satisfied if it is also invalidating. If the reponse is, not 815 // only invalidating, but more specifically an InvalidateResp and 816 // the MSHR was created due to an InvalidateReq then a cache above 817 // is waiting to satisfy a WriteLineReq. In this case even an 818 // non-invalidating snoop is added as a target here since this is 819 // the ordering point. When the InvalidateResp reaches this cache, 820 // the snooping target will snoop further the cache above with the 821 // WriteLineReq. 822 assert(!is_invalidate || pkt->cmd == MemCmd::InvalidateResp || 823 pkt->req->isCacheMaintenance() || 824 mshr->hasPostInvalidate()); 825 handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate()); 826 break; 827 828 default: 829 panic("Illegal target->source enum %d\n", target.source); 830 } 831 } 832 833 maintainClusivity(targets.hasFromCache, blk); 834 835 if (blk && blk->isValid()) { 836 // an invalidate response stemming from a write line request 837 // should not invalidate the block, so check if the 838 // invalidation should be discarded 839 if (is_invalidate || mshr->hasPostInvalidate()) { 840 invalidateBlock(blk); 841 } else if (mshr->hasPostDowngrade()) { 842 blk->status &= ~BlkWritable; 843 } 844 } 845} 846 847PacketPtr 848Cache::evictBlock(CacheBlk *blk) 849{ 850 PacketPtr pkt = (blk->isDirty() || writebackClean) ? 851 writebackBlk(blk) : cleanEvictBlk(blk); 852 853 invalidateBlock(blk); 854 855 return pkt; 856} 857 858void 859Cache::evictBlock(CacheBlk *blk, PacketList &writebacks) 860{ 861 PacketPtr pkt = evictBlock(blk); 862 if (pkt) { 863 writebacks.push_back(pkt); 864 } 865} 866 867PacketPtr 868Cache::cleanEvictBlk(CacheBlk *blk) 869{ 870 assert(!writebackClean); 871 assert(blk && blk->isValid() && !blk->isDirty()); 872 873 // Creating a zero sized write, a message to the snoop filter 874 RequestPtr req = std::make_shared<Request>( 875 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId); 876 877 if (blk->isSecure()) 878 req->setFlags(Request::SECURE); 879 880 req->taskId(blk->task_id); 881 882 PacketPtr pkt = new Packet(req, MemCmd::CleanEvict); 883 pkt->allocate(); 884 DPRINTF(Cache, "Create CleanEvict %s\n", pkt->print()); 885 886 return pkt; 887} 888 889///////////////////////////////////////////////////// 890// 891// Snoop path: requests coming in from the memory side 892// 893///////////////////////////////////////////////////// 894 895void 896Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data, 897 bool already_copied, bool pending_inval) 898{ 899 // sanity check 900 assert(req_pkt->isRequest()); 901 assert(req_pkt->needsResponse()); 902 903 DPRINTF(Cache, "%s: for %s\n", __func__, req_pkt->print()); 904 // timing-mode snoop responses require a new packet, unless we 905 // already made a copy... 906 PacketPtr pkt = req_pkt; 907 if (!already_copied) 908 // do not clear flags, and allocate space for data if the 909 // packet needs it (the only packets that carry data are read 910 // responses) 911 pkt = new Packet(req_pkt, false, req_pkt->isRead()); 912 913 assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() || 914 pkt->hasSharers()); 915 pkt->makeTimingResponse(); 916 if (pkt->isRead()) { 917 pkt->setDataFromBlock(blk_data, blkSize); 918 } 919 if (pkt->cmd == MemCmd::ReadResp && pending_inval) { 920 // Assume we defer a response to a read from a far-away cache 921 // A, then later defer a ReadExcl from a cache B on the same 922 // bus as us. We'll assert cacheResponding in both cases, but 923 // in the latter case cacheResponding will keep the 924 // invalidation from reaching cache A. This special response 925 // tells cache A that it gets the block to satisfy its read, 926 // but must immediately invalidate it. 927 pkt->cmd = MemCmd::ReadRespWithInvalidate; 928 } 929 // Here we consider forward_time, paying for just forward latency and 930 // also charging the delay provided by the xbar. 931 // forward_time is used as send_time in next allocateWriteBuffer(). 932 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 933 // Here we reset the timing of the packet. 934 pkt->headerDelay = pkt->payloadDelay = 0; 935 DPRINTF(CacheVerbose, "%s: created response: %s tick: %lu\n", __func__, 936 pkt->print(), forward_time); 937 memSidePort.schedTimingSnoopResp(pkt, forward_time, true); 938} 939 940uint32_t 941Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing, 942 bool is_deferred, bool pending_inval) 943{ 944 DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print()); 945 // deferred snoops can only happen in timing mode 946 assert(!(is_deferred && !is_timing)); 947 // pending_inval only makes sense on deferred snoops 948 assert(!(pending_inval && !is_deferred)); 949 assert(pkt->isRequest()); 950 951 // the packet may get modified if we or a forwarded snooper 952 // responds in atomic mode, so remember a few things about the 953 // original packet up front 954 bool invalidate = pkt->isInvalidate(); 955 bool M5_VAR_USED needs_writable = pkt->needsWritable(); 956 957 // at the moment we could get an uncacheable write which does not 958 // have the invalidate flag, and we need a suitable way of dealing 959 // with this case 960 panic_if(invalidate && pkt->req->isUncacheable(), 961 "%s got an invalidating uncacheable snoop request %s", 962 name(), pkt->print()); 963 964 uint32_t snoop_delay = 0; 965 966 if (forwardSnoops) { 967 // first propagate snoop upward to see if anyone above us wants to 968 // handle it. save & restore packet src since it will get 969 // rewritten to be relative to cpu-side bus (if any) 970 bool alreadyResponded = pkt->cacheResponding(); 971 if (is_timing) { 972 // copy the packet so that we can clear any flags before 973 // forwarding it upwards, we also allocate data (passing 974 // the pointer along in case of static data), in case 975 // there is a snoop hit in upper levels 976 Packet snoopPkt(pkt, true, true); 977 snoopPkt.setExpressSnoop(); 978 // the snoop packet does not need to wait any additional 979 // time 980 snoopPkt.headerDelay = snoopPkt.payloadDelay = 0; 981 cpuSidePort.sendTimingSnoopReq(&snoopPkt); 982 983 // add the header delay (including crossbar and snoop 984 // delays) of the upward snoop to the snoop delay for this 985 // cache 986 snoop_delay += snoopPkt.headerDelay; 987 988 if (snoopPkt.cacheResponding()) { 989 // cache-to-cache response from some upper cache 990 assert(!alreadyResponded); 991 pkt->setCacheResponding(); 992 } 993 // upstream cache has the block, or has an outstanding 994 // MSHR, pass the flag on 995 if (snoopPkt.hasSharers()) { 996 pkt->setHasSharers(); 997 } 998 // If this request is a prefetch or clean evict and an upper level 999 // signals block present, make sure to propagate the block 1000 // presence to the requester. 1001 if (snoopPkt.isBlockCached()) { 1002 pkt->setBlockCached(); 1003 } 1004 // If the request was satisfied by snooping the cache 1005 // above, mark the original packet as satisfied too. 1006 if (snoopPkt.satisfied()) { 1007 pkt->setSatisfied(); 1008 } 1009 } else { 1010 cpuSidePort.sendAtomicSnoop(pkt); 1011 if (!alreadyResponded && pkt->cacheResponding()) { 1012 // cache-to-cache response from some upper cache: 1013 // forward response to original requester 1014 assert(pkt->isResponse()); 1015 } 1016 } 1017 } 1018 1019 bool respond = false; 1020 bool blk_valid = blk && blk->isValid(); 1021 if (pkt->isClean()) { 1022 if (blk_valid && blk->isDirty()) { 1023 DPRINTF(CacheVerbose, "%s: packet (snoop) %s found block: %s\n", 1024 __func__, pkt->print(), blk->print()); 1025 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id); 1026 PacketList writebacks; 1027 writebacks.push_back(wb_pkt); 1028 1029 if (is_timing) { 1030 // anything that is merely forwarded pays for the forward 1031 // latency and the delay provided by the crossbar 1032 Tick forward_time = clockEdge(forwardLatency) + 1033 pkt->headerDelay; 1034 doWritebacks(writebacks, forward_time); 1035 } else { 1036 doWritebacksAtomic(writebacks); 1037 } 1038 pkt->setSatisfied(); 1039 } 1040 } else if (!blk_valid) { 1041 DPRINTF(CacheVerbose, "%s: snoop miss for %s\n", __func__, 1042 pkt->print()); 1043 if (is_deferred) { 1044 // we no longer have the block, and will not respond, but a 1045 // packet was allocated in MSHR::handleSnoop and we have 1046 // to delete it 1047 assert(pkt->needsResponse()); 1048 1049 // we have passed the block to a cache upstream, that 1050 // cache should be responding 1051 assert(pkt->cacheResponding()); 1052 1053 delete pkt; 1054 } 1055 return snoop_delay; 1056 } else { 1057 DPRINTF(Cache, "%s: snoop hit for %s, old state is %s\n", __func__, 1058 pkt->print(), blk->print()); 1059 1060 // We may end up modifying both the block state and the packet (if 1061 // we respond in atomic mode), so just figure out what to do now 1062 // and then do it later. We respond to all snoops that need 1063 // responses provided we have the block in dirty state. The 1064 // invalidation itself is taken care of below. We don't respond to 1065 // cache maintenance operations as this is done by the destination 1066 // xbar. 1067 respond = blk->isDirty() && pkt->needsResponse(); 1068 1069 chatty_assert(!(isReadOnly && blk->isDirty()), "Should never have " 1070 "a dirty block in a read-only cache %s\n", name()); 1071 } 1072 1073 // Invalidate any prefetch's from below that would strip write permissions 1074 // MemCmd::HardPFReq is only observed by upstream caches. After missing 1075 // above and in it's own cache, a new MemCmd::ReadReq is created that 1076 // downstream caches observe. 1077 if (pkt->mustCheckAbove()) { 1078 DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s " 1079 "from lower cache\n", pkt->getAddr(), pkt->print()); 1080 pkt->setBlockCached(); 1081 return snoop_delay; 1082 } 1083 1084 if (pkt->isRead() && !invalidate) { 1085 // reading without requiring the line in a writable state 1086 assert(!needs_writable); 1087 pkt->setHasSharers(); 1088 1089 // if the requesting packet is uncacheable, retain the line in 1090 // the current state, otherwhise unset the writable flag, 1091 // which means we go from Modified to Owned (and will respond 1092 // below), remain in Owned (and will respond below), from 1093 // Exclusive to Shared, or remain in Shared 1094 if (!pkt->req->isUncacheable()) 1095 blk->status &= ~BlkWritable; 1096 DPRINTF(Cache, "new state is %s\n", blk->print()); 1097 } 1098 1099 if (respond) { 1100 // prevent anyone else from responding, cache as well as 1101 // memory, and also prevent any memory from even seeing the 1102 // request 1103 pkt->setCacheResponding(); 1104 if (!pkt->isClean() && blk->isWritable()) { 1105 // inform the cache hierarchy that this cache had the line 1106 // in the Modified state so that we avoid unnecessary 1107 // invalidations (see Packet::setResponderHadWritable) 1108 pkt->setResponderHadWritable(); 1109 1110 // in the case of an uncacheable request there is no point 1111 // in setting the responderHadWritable flag, but since the 1112 // recipient does not care there is no harm in doing so 1113 } else { 1114 // if the packet has needsWritable set we invalidate our 1115 // copy below and all other copies will be invalidates 1116 // through express snoops, and if needsWritable is not set 1117 // we already called setHasSharers above 1118 } 1119 1120 // if we are returning a writable and dirty (Modified) line, 1121 // we should be invalidating the line 1122 panic_if(!invalidate && !pkt->hasSharers(), 1123 "%s is passing a Modified line through %s, " 1124 "but keeping the block", name(), pkt->print()); 1125 1126 if (is_timing) { 1127 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval); 1128 } else { 1129 pkt->makeAtomicResponse(); 1130 // packets such as upgrades do not actually have any data 1131 // payload 1132 if (pkt->hasData()) 1133 pkt->setDataFromBlock(blk->data, blkSize); 1134 } 1135 } 1136 1137 if (!respond && is_deferred) { 1138 assert(pkt->needsResponse()); 1139 delete pkt; 1140 } 1141 1142 // Do this last in case it deallocates block data or something 1143 // like that 1144 if (blk_valid && invalidate) { 1145 invalidateBlock(blk); 1146 DPRINTF(Cache, "new state is %s\n", blk->print()); 1147 } 1148 1149 return snoop_delay; 1150} 1151 1152 1153void 1154Cache::recvTimingSnoopReq(PacketPtr pkt) 1155{ 1156 DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print()); 1157 1158 // no need to snoop requests that are not in range 1159 if (!inRange(pkt->getAddr())) { 1160 return; 1161 } 1162 1163 bool is_secure = pkt->isSecure(); 1164 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 1165 1166 Addr blk_addr = pkt->getBlockAddr(blkSize); 1167 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 1168 1169 // Update the latency cost of the snoop so that the crossbar can 1170 // account for it. Do not overwrite what other neighbouring caches 1171 // have already done, rather take the maximum. The update is 1172 // tentative, for cases where we return before an upward snoop 1173 // happens below. 1174 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, 1175 lookupLatency * clockPeriod()); 1176 1177 // Inform request(Prefetch, CleanEvict or Writeback) from below of 1178 // MSHR hit, set setBlockCached. 1179 if (mshr && pkt->mustCheckAbove()) { 1180 DPRINTF(Cache, "Setting block cached for %s from lower cache on " 1181 "mshr hit\n", pkt->print()); 1182 pkt->setBlockCached(); 1183 return; 1184 } 1185 1186 // Bypass any existing cache maintenance requests if the request 1187 // has been satisfied already (i.e., the dirty block has been 1188 // found). 1189 if (mshr && pkt->req->isCacheMaintenance() && pkt->satisfied()) { 1190 return; 1191 } 1192 1193 // Let the MSHR itself track the snoop and decide whether we want 1194 // to go ahead and do the regular cache snoop 1195 if (mshr && mshr->handleSnoop(pkt, order++)) { 1196 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)." 1197 "mshrs: %s\n", blk_addr, is_secure ? "s" : "ns", 1198 mshr->print()); 1199 1200 if (mshr->getNumTargets() > numTarget) 1201 warn("allocating bonus target for snoop"); //handle later 1202 return; 1203 } 1204 1205 //We also need to check the writeback buffers and handle those 1206 WriteQueueEntry *wb_entry = writeBuffer.findMatch(blk_addr, is_secure); 1207 if (wb_entry) { 1208 DPRINTF(Cache, "Snoop hit in writeback to addr %#llx (%s)\n", 1209 pkt->getAddr(), is_secure ? "s" : "ns"); 1210 // Expect to see only Writebacks and/or CleanEvicts here, both of 1211 // which should not be generated for uncacheable data. 1212 assert(!wb_entry->isUncacheable()); 1213 // There should only be a single request responsible for generating 1214 // Writebacks/CleanEvicts. 1215 assert(wb_entry->getNumTargets() == 1); 1216 PacketPtr wb_pkt = wb_entry->getTarget()->pkt; 1217 assert(wb_pkt->isEviction() || wb_pkt->cmd == MemCmd::WriteClean); 1218 1219 if (pkt->isEviction()) { 1220 // if the block is found in the write queue, set the BLOCK_CACHED 1221 // flag for Writeback/CleanEvict snoop. On return the snoop will 1222 // propagate the BLOCK_CACHED flag in Writeback packets and prevent 1223 // any CleanEvicts from travelling down the memory hierarchy. 1224 pkt->setBlockCached(); 1225 DPRINTF(Cache, "%s: Squashing %s from lower cache on writequeue " 1226 "hit\n", __func__, pkt->print()); 1227 return; 1228 } 1229 1230 // conceptually writebacks are no different to other blocks in 1231 // this cache, so the behaviour is modelled after handleSnoop, 1232 // the difference being that instead of querying the block 1233 // state to determine if it is dirty and writable, we use the 1234 // command and fields of the writeback packet 1235 bool respond = wb_pkt->cmd == MemCmd::WritebackDirty && 1236 pkt->needsResponse(); 1237 bool have_writable = !wb_pkt->hasSharers(); 1238 bool invalidate = pkt->isInvalidate(); 1239 1240 if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) { 1241 assert(!pkt->needsWritable()); 1242 pkt->setHasSharers(); 1243 wb_pkt->setHasSharers(); 1244 } 1245 1246 if (respond) { 1247 pkt->setCacheResponding(); 1248 1249 if (have_writable) { 1250 pkt->setResponderHadWritable(); 1251 } 1252 1253 doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(), 1254 false, false); 1255 } 1256 1257 if (invalidate && wb_pkt->cmd != MemCmd::WriteClean) { 1258 // Invalidation trumps our writeback... discard here 1259 // Note: markInService will remove entry from writeback buffer. 1260 markInService(wb_entry); 1261 delete wb_pkt; 1262 } 1263 } 1264 1265 // If this was a shared writeback, there may still be 1266 // other shared copies above that require invalidation. 1267 // We could be more selective and return here if the 1268 // request is non-exclusive or if the writeback is 1269 // exclusive. 1270 uint32_t snoop_delay = handleSnoop(pkt, blk, true, false, false); 1271 1272 // Override what we did when we first saw the snoop, as we now 1273 // also have the cost of the upwards snoops to account for 1274 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, snoop_delay + 1275 lookupLatency * clockPeriod()); 1276} 1277 1278Tick 1279Cache::recvAtomicSnoop(PacketPtr pkt) 1280{ 1281 // no need to snoop requests that are not in range. 1282 if (!inRange(pkt->getAddr())) { 1283 return 0; 1284 } 1285 1286 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 1287 uint32_t snoop_delay = handleSnoop(pkt, blk, false, false, false); 1288 return snoop_delay + lookupLatency * clockPeriod(); 1289} 1290 1291bool 1292Cache::isCachedAbove(PacketPtr pkt, bool is_timing) 1293{ 1294 if (!forwardSnoops) 1295 return false; 1296 // Mirroring the flow of HardPFReqs, the cache sends CleanEvict and 1297 // Writeback snoops into upper level caches to check for copies of the 1298 // same block. Using the BLOCK_CACHED flag with the Writeback/CleanEvict 1299 // packet, the cache can inform the crossbar below of presence or absence 1300 // of the block. 1301 if (is_timing) { 1302 Packet snoop_pkt(pkt, true, false); 1303 snoop_pkt.setExpressSnoop(); 1304 // Assert that packet is either Writeback or CleanEvict and not a 1305 // prefetch request because prefetch requests need an MSHR and may 1306 // generate a snoop response. 1307 assert(pkt->isEviction() || pkt->cmd == MemCmd::WriteClean); 1308 snoop_pkt.senderState = nullptr; 1309 cpuSidePort.sendTimingSnoopReq(&snoop_pkt); 1310 // Writeback/CleanEvict snoops do not generate a snoop response. 1311 assert(!(snoop_pkt.cacheResponding())); 1312 return snoop_pkt.isBlockCached(); 1313 } else { 1314 cpuSidePort.sendAtomicSnoop(pkt); 1315 return pkt->isBlockCached(); 1316 } 1317} 1318 1319bool 1320Cache::sendMSHRQueuePacket(MSHR* mshr) 1321{ 1322 assert(mshr); 1323 1324 // use request from 1st target 1325 PacketPtr tgt_pkt = mshr->getTarget()->pkt; 1326 1327 if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) { 1328 DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print()); 1329 1330 // we should never have hardware prefetches to allocated 1331 // blocks 1332 assert(!tags->findBlock(mshr->blkAddr, mshr->isSecure)); 1333 1334 // We need to check the caches above us to verify that 1335 // they don't have a copy of this block in the dirty state 1336 // at the moment. Without this check we could get a stale 1337 // copy from memory that might get used in place of the 1338 // dirty one. 1339 Packet snoop_pkt(tgt_pkt, true, false); 1340 snoop_pkt.setExpressSnoop(); 1341 // We are sending this packet upwards, but if it hits we will 1342 // get a snoop response that we end up treating just like a 1343 // normal response, hence it needs the MSHR as its sender 1344 // state 1345 snoop_pkt.senderState = mshr; 1346 cpuSidePort.sendTimingSnoopReq(&snoop_pkt); 1347 1348 // Check to see if the prefetch was squashed by an upper cache (to 1349 // prevent us from grabbing the line) or if a Check to see if a 1350 // writeback arrived between the time the prefetch was placed in 1351 // the MSHRs and when it was selected to be sent or if the 1352 // prefetch was squashed by an upper cache. 1353 1354 // It is important to check cacheResponding before 1355 // prefetchSquashed. If another cache has committed to 1356 // responding, it will be sending a dirty response which will 1357 // arrive at the MSHR allocated for this request. Checking the 1358 // prefetchSquash first may result in the MSHR being 1359 // prematurely deallocated. 1360 if (snoop_pkt.cacheResponding()) { 1361 auto M5_VAR_USED r = outstandingSnoop.insert(snoop_pkt.req); 1362 assert(r.second); 1363 1364 // if we are getting a snoop response with no sharers it 1365 // will be allocated as Modified 1366 bool pending_modified_resp = !snoop_pkt.hasSharers(); 1367 markInService(mshr, pending_modified_resp); 1368 1369 DPRINTF(Cache, "Upward snoop of prefetch for addr" 1370 " %#x (%s) hit\n", 1371 tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns"); 1372 return false; 1373 } 1374 1375 if (snoop_pkt.isBlockCached()) { 1376 DPRINTF(Cache, "Block present, prefetch squashed by cache. " 1377 "Deallocating mshr target %#x.\n", 1378 mshr->blkAddr); 1379 1380 // Deallocate the mshr target 1381 if (mshrQueue.forceDeallocateTarget(mshr)) { 1382 // Clear block if this deallocation resulted freed an 1383 // mshr when all had previously been utilized 1384 clearBlocked(Blocked_NoMSHRs); 1385 } 1386 1387 // given that no response is expected, delete Request and Packet 1388 delete tgt_pkt; 1389 1390 return false; 1391 } 1392 } 1393 1394 return BaseCache::sendMSHRQueuePacket(mshr); 1395} 1396 1397Cache* 1398CacheParams::create() 1399{ 1400 assert(tags); 1401 assert(replacement_policy); 1402 1403 return new Cache(this); 1404} 1405