1/* 2 * Copyright (c) 2010-2019 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2002-2005 The Regents of The University of Michigan 15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc. 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Erik Hallnor 42 * Dave Greene 43 * Nathan Binkert 44 * Steve Reinhardt 45 * Ron Dreslinski 46 * Andreas Sandberg 47 * Nikos Nikoleris 48 */ 49 50/** 51 * @file 52 * Cache definitions. 53 */ 54 55#include "mem/cache/cache.hh" 56 57#include <cassert> 58 59#include "base/compiler.hh" 60#include "base/logging.hh" 61#include "base/trace.hh" 62#include "base/types.hh" 63#include "debug/Cache.hh" 64#include "debug/CacheTags.hh" 65#include "debug/CacheVerbose.hh" 66#include "enums/Clusivity.hh" 67#include "mem/cache/cache_blk.hh" 68#include "mem/cache/mshr.hh" 69#include "mem/cache/tags/base.hh" 70#include "mem/cache/write_queue_entry.hh" 71#include "mem/request.hh" 72#include "params/Cache.hh" 73 74Cache::Cache(const CacheParams *p) 75 : BaseCache(p, p->system->cacheLineSize()), 76 doFastWrites(true) 77{ 78} 79 80void 81Cache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, 82 bool deferred_response, bool pending_downgrade) 83{ 84 BaseCache::satisfyRequest(pkt, blk); 85 86 if (pkt->isRead()) { 87 // determine if this read is from a (coherent) cache or not 88 if (pkt->fromCache()) { 89 assert(pkt->getSize() == blkSize); 90 // special handling for coherent block requests from 91 // upper-level caches 92 if (pkt->needsWritable()) { 93 // sanity check 94 assert(pkt->cmd == MemCmd::ReadExReq || 95 pkt->cmd == MemCmd::SCUpgradeFailReq); 96 assert(!pkt->hasSharers()); 97 98 // if we have a dirty copy, make sure the recipient 99 // keeps it marked dirty (in the modified state) 100 if (blk->isDirty()) { 101 pkt->setCacheResponding(); 102 blk->status &= ~BlkDirty; 103 } 104 } else if (blk->isWritable() && !pending_downgrade && 105 !pkt->hasSharers() && 106 pkt->cmd != MemCmd::ReadCleanReq) { 107 // we can give the requester a writable copy on a read 108 // request if: 109 // - we have a writable copy at this level (& below) 110 // - we don't have a pending snoop from below 111 // signaling another read request 112 // - no other cache above has a copy (otherwise it 113 // would have set hasSharers flag when 114 // snooping the packet) 115 // - the read has explicitly asked for a clean 116 // copy of the line 117 if (blk->isDirty()) { 118 // special considerations if we're owner: 119 if (!deferred_response) { 120 // respond with the line in Modified state 121 // (cacheResponding set, hasSharers not set) 122 pkt->setCacheResponding(); 123 124 // if this cache is mostly inclusive, we 125 // keep the block in the Exclusive state, 126 // and pass it upwards as Modified 127 // (writable and dirty), hence we have 128 // multiple caches, all on the same path 129 // towards memory, all considering the 130 // same block writable, but only one 131 // considering it Modified 132 133 // we get away with multiple caches (on 134 // the same path to memory) considering 135 // the block writeable as we always enter 136 // the cache hierarchy through a cache, 137 // and first snoop upwards in all other 138 // branches 139 blk->status &= ~BlkDirty; 140 } else { 141 // if we're responding after our own miss, 142 // there's a window where the recipient didn't 143 // know it was getting ownership and may not 144 // have responded to snoops correctly, so we 145 // have to respond with a shared line 146 pkt->setHasSharers(); 147 } 148 } 149 } else { 150 // otherwise only respond with a shared copy 151 pkt->setHasSharers(); 152 } 153 } 154 } 155} 156 157///////////////////////////////////////////////////// 158// 159// Access path: requests coming in from the CPU side 160// 161///////////////////////////////////////////////////// 162 163bool 164Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, 165 PacketList &writebacks) 166{ 167 168 if (pkt->req->isUncacheable()) { 169 assert(pkt->isRequest()); 170 171 chatty_assert(!(isReadOnly && pkt->isWrite()), 172 "Should never see a write in a read-only cache %s\n", 173 name()); 174 175 DPRINTF(Cache, "%s for %s\n", __func__, pkt->print()); 176 177 // flush and invalidate any existing block 178 CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure())); 179 if (old_blk && old_blk->isValid()) { 180 BaseCache::evictBlock(old_blk, writebacks); 181 } 182 183 blk = nullptr; 184 // lookupLatency is the latency in case the request is uncacheable. 185 lat = lookupLatency; 186 return false; 187 } 188 189 return BaseCache::access(pkt, blk, lat, writebacks); 190} 191 192void 193Cache::doWritebacks(PacketList& writebacks, Tick forward_time) 194{ 195 while (!writebacks.empty()) { 196 PacketPtr wbPkt = writebacks.front(); 197 // We use forwardLatency here because we are copying writebacks to 198 // write buffer. 199 200 // Call isCachedAbove for Writebacks, CleanEvicts and 201 // WriteCleans to discover if the block is cached above. 202 if (isCachedAbove(wbPkt)) { 203 if (wbPkt->cmd == MemCmd::CleanEvict) { 204 // Delete CleanEvict because cached copies exist above. The 205 // packet destructor will delete the request object because 206 // this is a non-snoop request packet which does not require a 207 // response. 208 delete wbPkt; 209 } else if (wbPkt->cmd == MemCmd::WritebackClean) { 210 // clean writeback, do not send since the block is 211 // still cached above 212 assert(writebackClean); 213 delete wbPkt; 214 } else { 215 assert(wbPkt->cmd == MemCmd::WritebackDirty || 216 wbPkt->cmd == MemCmd::WriteClean); 217 // Set BLOCK_CACHED flag in Writeback and send below, so that 218 // the Writeback does not reset the bit corresponding to this 219 // address in the snoop filter below. 220 wbPkt->setBlockCached(); 221 allocateWriteBuffer(wbPkt, forward_time); 222 } 223 } else { 224 // If the block is not cached above, send packet below. Both 225 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will 226 // reset the bit corresponding to this address in the snoop filter 227 // below. 228 allocateWriteBuffer(wbPkt, forward_time); 229 } 230 writebacks.pop_front(); 231 } 232} 233 234void 235Cache::doWritebacksAtomic(PacketList& writebacks) 236{ 237 while (!writebacks.empty()) { 238 PacketPtr wbPkt = writebacks.front(); 239 // Call isCachedAbove for both Writebacks and CleanEvicts. If 240 // isCachedAbove returns true we set BLOCK_CACHED flag in Writebacks 241 // and discard CleanEvicts. 242 if (isCachedAbove(wbPkt, false)) { 243 if (wbPkt->cmd == MemCmd::WritebackDirty || 244 wbPkt->cmd == MemCmd::WriteClean) { 245 // Set BLOCK_CACHED flag in Writeback and send below, 246 // so that the Writeback does not reset the bit 247 // corresponding to this address in the snoop filter 248 // below. We can discard CleanEvicts because cached 249 // copies exist above. Atomic mode isCachedAbove 250 // modifies packet to set BLOCK_CACHED flag 251 memSidePort.sendAtomic(wbPkt); 252 } 253 } else { 254 // If the block is not cached above, send packet below. Both 255 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will 256 // reset the bit corresponding to this address in the snoop filter 257 // below. 258 memSidePort.sendAtomic(wbPkt); 259 } 260 writebacks.pop_front(); 261 // In case of CleanEvicts, the packet destructor will delete the 262 // request object because this is a non-snoop request packet which 263 // does not require a response. 264 delete wbPkt; 265 } 266} 267 268 269void 270Cache::recvTimingSnoopResp(PacketPtr pkt) 271{ 272 DPRINTF(Cache, "%s for %s\n", __func__, pkt->print()); 273 274 // determine if the response is from a snoop request we created 275 // (in which case it should be in the outstandingSnoop), or if we 276 // merely forwarded someone else's snoop request 277 const bool forwardAsSnoop = outstandingSnoop.find(pkt->req) == 278 outstandingSnoop.end(); 279 280 if (!forwardAsSnoop) { 281 // the packet came from this cache, so sink it here and do not 282 // forward it 283 assert(pkt->cmd == MemCmd::HardPFResp); 284 285 outstandingSnoop.erase(pkt->req); 286 287 DPRINTF(Cache, "Got prefetch response from above for addr " 288 "%#llx (%s)\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns"); 289 recvTimingResp(pkt); 290 return; 291 } 292 293 // forwardLatency is set here because there is a response from an 294 // upper level cache. 295 // To pay the delay that occurs if the packet comes from the bus, 296 // we charge also headerDelay. 297 Tick snoop_resp_time = clockEdge(forwardLatency) + pkt->headerDelay; 298 // Reset the timing of the packet. 299 pkt->headerDelay = pkt->payloadDelay = 0; 300 memSidePort.schedTimingSnoopResp(pkt, snoop_resp_time); 301} 302 303void 304Cache::promoteWholeLineWrites(PacketPtr pkt) 305{ 306 // Cache line clearing instructions 307 if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) && 308 (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0) && 309 !pkt->isMaskedWrite()) { 310 pkt->cmd = MemCmd::WriteLineReq; 311 DPRINTF(Cache, "packet promoted from Write to WriteLineReq\n"); 312 } 313} 314 315void 316Cache::handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time) 317{ 318 // should never be satisfying an uncacheable access as we 319 // flush and invalidate any existing block as part of the 320 // lookup 321 assert(!pkt->req->isUncacheable()); 322 323 BaseCache::handleTimingReqHit(pkt, blk, request_time); 324} 325 326void 327Cache::handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk, Tick forward_time, 328 Tick request_time) 329{ 330 if (pkt->req->isUncacheable()) { 331 // ignore any existing MSHR if we are dealing with an 332 // uncacheable request 333 334 // should have flushed and have no valid block 335 assert(!blk || !blk->isValid()); 336 337 mshr_uncacheable[pkt->cmdToIndex()][pkt->req->masterId()]++; 338 339 if (pkt->isWrite()) { 340 allocateWriteBuffer(pkt, forward_time); 341 } else { 342 assert(pkt->isRead()); 343 344 // uncacheable accesses always allocate a new MSHR 345 346 // Here we are using forward_time, modelling the latency of 347 // a miss (outbound) just as forwardLatency, neglecting the 348 // lookupLatency component. 349 allocateMissBuffer(pkt, forward_time); 350 } 351 352 return; 353 } 354 355 Addr blk_addr = pkt->getBlockAddr(blkSize); 356 357 MSHR *mshr = mshrQueue.findMatch(blk_addr, pkt->isSecure()); 358 359 // Software prefetch handling: 360 // To keep the core from waiting on data it won't look at 361 // anyway, send back a response with dummy data. Miss handling 362 // will continue asynchronously. Unfortunately, the core will 363 // insist upon freeing original Packet/Request, so we have to 364 // create a new pair with a different lifecycle. Note that this 365 // processing happens before any MSHR munging on the behalf of 366 // this request because this new Request will be the one stored 367 // into the MSHRs, not the original. 368 if (pkt->cmd.isSWPrefetch()) { 369 assert(pkt->needsResponse()); 370 assert(pkt->req->hasPaddr()); 371 assert(!pkt->req->isUncacheable()); 372 373 // There's no reason to add a prefetch as an additional target 374 // to an existing MSHR. If an outstanding request is already 375 // in progress, there is nothing for the prefetch to do. 376 // If this is the case, we don't even create a request at all. 377 PacketPtr pf = nullptr; 378 379 if (!mshr) { 380 // copy the request and create a new SoftPFReq packet 381 RequestPtr req = std::make_shared<Request>(pkt->req->getPaddr(), 382 pkt->req->getSize(), 383 pkt->req->getFlags(), 384 pkt->req->masterId()); 385 pf = new Packet(req, pkt->cmd); 386 pf->allocate(); 387 assert(pf->matchAddr(pkt)); 388 assert(pf->getSize() == pkt->getSize()); 389 } 390 391 pkt->makeTimingResponse(); 392 393 // request_time is used here, taking into account lat and the delay 394 // charged if the packet comes from the xbar. 395 cpuSidePort.schedTimingResp(pkt, request_time); 396 397 // If an outstanding request is in progress (we found an 398 // MSHR) this is set to null 399 pkt = pf; 400 } 401 402 BaseCache::handleTimingReqMiss(pkt, mshr, blk, forward_time, request_time); 403} 404 405void 406Cache::recvTimingReq(PacketPtr pkt) 407{ 408 DPRINTF(CacheTags, "%s tags:\n%s\n", __func__, tags->print()); 409 410 promoteWholeLineWrites(pkt); 411 412 if (pkt->cacheResponding()) { 413 // a cache above us (but not where the packet came from) is 414 // responding to the request, in other words it has the line 415 // in Modified or Owned state 416 DPRINTF(Cache, "Cache above responding to %s: not responding\n", 417 pkt->print()); 418 419 // if the packet needs the block to be writable, and the cache 420 // that has promised to respond (setting the cache responding 421 // flag) is not providing writable (it is in Owned rather than 422 // the Modified state), we know that there may be other Shared 423 // copies in the system; go out and invalidate them all 424 assert(pkt->needsWritable() && !pkt->responderHadWritable()); 425 426 // an upstream cache that had the line in Owned state 427 // (dirty, but not writable), is responding and thus 428 // transferring the dirty line from one branch of the 429 // cache hierarchy to another 430 431 // send out an express snoop and invalidate all other 432 // copies (snooping a packet that needs writable is the 433 // same as an invalidation), thus turning the Owned line 434 // into a Modified line, note that we don't invalidate the 435 // block in the current cache or any other cache on the 436 // path to memory 437 438 // create a downstream express snoop with cleared packet 439 // flags, there is no need to allocate any data as the 440 // packet is merely used to co-ordinate state transitions 441 Packet *snoop_pkt = new Packet(pkt, true, false); 442 443 // also reset the bus time that the original packet has 444 // not yet paid for 445 snoop_pkt->headerDelay = snoop_pkt->payloadDelay = 0; 446 447 // make this an instantaneous express snoop, and let the 448 // other caches in the system know that the another cache 449 // is responding, because we have found the authorative 450 // copy (Modified or Owned) that will supply the right 451 // data 452 snoop_pkt->setExpressSnoop(); 453 snoop_pkt->setCacheResponding(); 454 455 // this express snoop travels towards the memory, and at 456 // every crossbar it is snooped upwards thus reaching 457 // every cache in the system 458 bool M5_VAR_USED success = memSidePort.sendTimingReq(snoop_pkt); 459 // express snoops always succeed 460 assert(success); 461 462 // main memory will delete the snoop packet 463 464 // queue for deletion, as opposed to immediate deletion, as 465 // the sending cache is still relying on the packet 466 pendingDelete.reset(pkt); 467 468 // no need to take any further action in this particular cache 469 // as an upstram cache has already committed to responding, 470 // and we have already sent out any express snoops in the 471 // section above to ensure all other copies in the system are 472 // invalidated 473 return; 474 } 475 476 BaseCache::recvTimingReq(pkt); 477} 478 479PacketPtr 480Cache::createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk, 481 bool needsWritable, 482 bool is_whole_line_write) const 483{ 484 // should never see evictions here 485 assert(!cpu_pkt->isEviction()); 486 487 bool blkValid = blk && blk->isValid(); 488 489 if (cpu_pkt->req->isUncacheable() || 490 (!blkValid && cpu_pkt->isUpgrade()) || 491 cpu_pkt->cmd == MemCmd::InvalidateReq || cpu_pkt->isClean()) { 492 // uncacheable requests and upgrades from upper-level caches 493 // that missed completely just go through as is 494 return nullptr; 495 } 496 497 assert(cpu_pkt->needsResponse()); 498 499 MemCmd cmd; 500 // @TODO make useUpgrades a parameter. 501 // Note that ownership protocols require upgrade, otherwise a 502 // write miss on a shared owned block will generate a ReadExcl, 503 // which will clobber the owned copy. 504 const bool useUpgrades = true; 505 assert(cpu_pkt->cmd != MemCmd::WriteLineReq || is_whole_line_write); 506 if (is_whole_line_write) { 507 assert(!blkValid || !blk->isWritable()); 508 // forward as invalidate to all other caches, this gives us 509 // the line in Exclusive state, and invalidates all other 510 // copies 511 cmd = MemCmd::InvalidateReq; 512 } else if (blkValid && useUpgrades) { 513 // only reason to be here is that blk is read only and we need 514 // it to be writable 515 assert(needsWritable); 516 assert(!blk->isWritable()); 517 cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq; 518 } else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq || 519 cpu_pkt->cmd == MemCmd::StoreCondFailReq) { 520 // Even though this SC will fail, we still need to send out the 521 // request and get the data to supply it to other snoopers in the case 522 // where the determination the StoreCond fails is delayed due to 523 // all caches not being on the same local bus. 524 cmd = MemCmd::SCUpgradeFailReq; 525 } else { 526 // block is invalid 527 528 // If the request does not need a writable there are two cases 529 // where we need to ensure the response will not fetch the 530 // block in dirty state: 531 // * this cache is read only and it does not perform 532 // writebacks, 533 // * this cache is mostly exclusive and will not fill (since 534 // it does not fill it will have to writeback the dirty data 535 // immediately which generates uneccesary writebacks). 536 bool force_clean_rsp = isReadOnly || clusivity == Enums::mostly_excl; 537 cmd = needsWritable ? MemCmd::ReadExReq : 538 (force_clean_rsp ? MemCmd::ReadCleanReq : MemCmd::ReadSharedReq); 539 } 540 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize); 541 542 // if there are upstream caches that have already marked the 543 // packet as having sharers (not passing writable), pass that info 544 // downstream 545 if (cpu_pkt->hasSharers() && !needsWritable) { 546 // note that cpu_pkt may have spent a considerable time in the 547 // MSHR queue and that the information could possibly be out 548 // of date, however, there is no harm in conservatively 549 // assuming the block has sharers 550 pkt->setHasSharers(); 551 DPRINTF(Cache, "%s: passing hasSharers from %s to %s\n", 552 __func__, cpu_pkt->print(), pkt->print()); 553 } 554 555 // the packet should be block aligned 556 assert(pkt->getAddr() == pkt->getBlockAddr(blkSize)); 557 558 pkt->allocate(); 559 DPRINTF(Cache, "%s: created %s from %s\n", __func__, pkt->print(), 560 cpu_pkt->print()); 561 return pkt; 562} 563 564 565Cycles 566Cache::handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk, 567 PacketList &writebacks) 568{ 569 // deal with the packets that go through the write path of 570 // the cache, i.e. any evictions and writes 571 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean || 572 (pkt->req->isUncacheable() && pkt->isWrite())) { 573 Cycles latency = ticksToCycles(memSidePort.sendAtomic(pkt)); 574 575 // at this point, if the request was an uncacheable write 576 // request, it has been satisfied by a memory below and the 577 // packet carries the response back 578 assert(!(pkt->req->isUncacheable() && pkt->isWrite()) || 579 pkt->isResponse()); 580 581 return latency; 582 } 583 584 // only misses left 585 586 PacketPtr bus_pkt = createMissPacket(pkt, blk, pkt->needsWritable(), 587 pkt->isWholeLineWrite(blkSize)); 588 589 bool is_forward = (bus_pkt == nullptr); 590 591 if (is_forward) { 592 // just forwarding the same request to the next level 593 // no local cache operation involved 594 bus_pkt = pkt; 595 } 596 597 DPRINTF(Cache, "%s: Sending an atomic %s\n", __func__, 598 bus_pkt->print()); 599 600#if TRACING_ON 601 CacheBlk::State old_state = blk ? blk->status : 0; 602#endif 603 604 Cycles latency = ticksToCycles(memSidePort.sendAtomic(bus_pkt)); 605 606 bool is_invalidate = bus_pkt->isInvalidate(); 607 608 // We are now dealing with the response handling 609 DPRINTF(Cache, "%s: Receive response: %s in state %i\n", __func__, 610 bus_pkt->print(), old_state); 611 612 // If packet was a forward, the response (if any) is already 613 // in place in the bus_pkt == pkt structure, so we don't need 614 // to do anything. Otherwise, use the separate bus_pkt to 615 // generate response to pkt and then delete it. 616 if (!is_forward) { 617 if (pkt->needsResponse()) { 618 assert(bus_pkt->isResponse()); 619 if (bus_pkt->isError()) { 620 pkt->makeAtomicResponse(); 621 pkt->copyError(bus_pkt); 622 } else if (pkt->isWholeLineWrite(blkSize)) { 623 // note the use of pkt, not bus_pkt here. 624 625 // write-line request to the cache that promoted 626 // the write to a whole line 627 const bool allocate = allocOnFill(pkt->cmd) && 628 (!writeAllocator || writeAllocator->allocate()); 629 blk = handleFill(bus_pkt, blk, writebacks, allocate); 630 assert(blk != NULL); 631 is_invalidate = false; 632 satisfyRequest(pkt, blk); 633 } else if (bus_pkt->isRead() || 634 bus_pkt->cmd == MemCmd::UpgradeResp) { 635 // we're updating cache state to allow us to 636 // satisfy the upstream request from the cache 637 blk = handleFill(bus_pkt, blk, writebacks, 638 allocOnFill(pkt->cmd)); 639 satisfyRequest(pkt, blk); 640 maintainClusivity(pkt->fromCache(), blk); 641 } else { 642 // we're satisfying the upstream request without 643 // modifying cache state, e.g., a write-through 644 pkt->makeAtomicResponse(); 645 } 646 } 647 delete bus_pkt; 648 } 649 650 if (is_invalidate && blk && blk->isValid()) { 651 invalidateBlock(blk); 652 } 653 654 return latency; 655} 656 657Tick 658Cache::recvAtomic(PacketPtr pkt) 659{ 660 promoteWholeLineWrites(pkt); 661 662 // follow the same flow as in recvTimingReq, and check if a cache 663 // above us is responding 664 if (pkt->cacheResponding()) { 665 assert(!pkt->req->isCacheInvalidate()); 666 DPRINTF(Cache, "Cache above responding to %s: not responding\n", 667 pkt->print()); 668 669 // if a cache is responding, and it had the line in Owned 670 // rather than Modified state, we need to invalidate any 671 // copies that are not on the same path to memory 672 assert(pkt->needsWritable() && !pkt->responderHadWritable()); 673 674 return memSidePort.sendAtomic(pkt); 675 } 676 677 return BaseCache::recvAtomic(pkt); 678} 679 680 681///////////////////////////////////////////////////// 682// 683// Response handling: responses from the memory side 684// 685///////////////////////////////////////////////////// 686 687 688void 689Cache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk) 690{ 691 QueueEntry::Target *initial_tgt = mshr->getTarget(); 692 // First offset for critical word first calculations 693 const int initial_offset = initial_tgt->pkt->getOffset(blkSize); 694 695 const bool is_error = pkt->isError(); 696 // allow invalidation responses originating from write-line 697 // requests to be discarded 698 bool is_invalidate = pkt->isInvalidate() && 699 !mshr->wasWholeLineWrite; 700 701 MSHR::TargetList targets = mshr->extractServiceableTargets(pkt); 702 for (auto &target: targets) { 703 Packet *tgt_pkt = target.pkt; 704 switch (target.source) { 705 case MSHR::Target::FromCPU: 706 Tick completion_time; 707 // Here we charge on completion_time the delay of the xbar if the 708 // packet comes from it, charged on headerDelay. 709 completion_time = pkt->headerDelay; 710 711 // Software prefetch handling for cache closest to core 712 if (tgt_pkt->cmd.isSWPrefetch()) { 713 if (tgt_pkt->needsWritable()) { 714 // All other copies of the block were invalidated and we 715 // have an exclusive copy. 716 717 // The coherence protocol assumes that if we fetched an 718 // exclusive copy of the block, we have the intention to 719 // modify it. Therefore the MSHR for the PrefetchExReq has 720 // been the point of ordering and this cache has commited 721 // to respond to snoops for the block. 722 // 723 // In most cases this is true anyway - a PrefetchExReq 724 // will be followed by a WriteReq. However, if that 725 // doesn't happen, the block is not marked as dirty and 726 // the cache doesn't respond to snoops that has committed 727 // to do so. 728 // 729 // To avoid deadlocks in cases where there is a snoop 730 // between the PrefetchExReq and the expected WriteReq, we 731 // proactively mark the block as Dirty. 732 assert(blk); 733 blk->status |= BlkDirty; 734 735 panic_if(isReadOnly, "Prefetch exclusive requests from " 736 "read-only cache %s\n", name()); 737 } 738 739 // a software prefetch would have already been ack'd 740 // immediately with dummy data so the core would be able to 741 // retire it. This request completes right here, so we 742 // deallocate it. 743 delete tgt_pkt; 744 break; // skip response 745 } 746 747 // unlike the other packet flows, where data is found in other 748 // caches or memory and brought back, write-line requests always 749 // have the data right away, so the above check for "is fill?" 750 // cannot actually be determined until examining the stored MSHR 751 // state. We "catch up" with that logic here, which is duplicated 752 // from above. 753 if (tgt_pkt->cmd == MemCmd::WriteLineReq) { 754 assert(!is_error); 755 assert(blk); 756 assert(blk->isWritable()); 757 } 758 759 if (blk && blk->isValid() && !mshr->isForward) { 760 satisfyRequest(tgt_pkt, blk, true, mshr->hasPostDowngrade()); 761 762 // How many bytes past the first request is this one 763 int transfer_offset = 764 tgt_pkt->getOffset(blkSize) - initial_offset; 765 if (transfer_offset < 0) { 766 transfer_offset += blkSize; 767 } 768 769 // If not critical word (offset) return payloadDelay. 770 // responseLatency is the latency of the return path 771 // from lower level caches/memory to an upper level cache or 772 // the core. 773 completion_time += clockEdge(responseLatency) + 774 (transfer_offset ? pkt->payloadDelay : 0); 775 776 assert(!tgt_pkt->req->isUncacheable()); 777 778 assert(tgt_pkt->req->masterId() < system->maxMasters()); 779 missLatency[tgt_pkt->cmdToIndex()][tgt_pkt->req->masterId()] += 780 completion_time - target.recvTime; 781 } else if (pkt->cmd == MemCmd::UpgradeFailResp) { 782 // failed StoreCond upgrade 783 assert(tgt_pkt->cmd == MemCmd::StoreCondReq || 784 tgt_pkt->cmd == MemCmd::StoreCondFailReq || 785 tgt_pkt->cmd == MemCmd::SCUpgradeFailReq); 786 // responseLatency is the latency of the return path 787 // from lower level caches/memory to an upper level cache or 788 // the core. 789 completion_time += clockEdge(responseLatency) + 790 pkt->payloadDelay; 791 tgt_pkt->req->setExtraData(0); 792 } else { 793 // We are about to send a response to a cache above 794 // that asked for an invalidation; we need to 795 // invalidate our copy immediately as the most 796 // up-to-date copy of the block will now be in the 797 // cache above. It will also prevent this cache from 798 // responding (if the block was previously dirty) to 799 // snoops as they should snoop the caches above where 800 // they will get the response from. 801 if (is_invalidate && blk && blk->isValid()) { 802 invalidateBlock(blk); 803 } 804 // not a cache fill, just forwarding response 805 // responseLatency is the latency of the return path 806 // from lower level cahces/memory to the core. 807 completion_time += clockEdge(responseLatency) + 808 pkt->payloadDelay; 809 if (pkt->isRead() && !is_error) { 810 // sanity check 811 assert(pkt->matchAddr(tgt_pkt)); 812 assert(pkt->getSize() >= tgt_pkt->getSize()); 813 814 tgt_pkt->setData(pkt->getConstPtr<uint8_t>()); 815 } 816 817 // this response did not allocate here and therefore 818 // it was not consumed, make sure that any flags are 819 // carried over to cache above 820 tgt_pkt->copyResponderFlags(pkt); 821 } 822 tgt_pkt->makeTimingResponse(); 823 // if this packet is an error copy that to the new packet 824 if (is_error) 825 tgt_pkt->copyError(pkt); 826 if (tgt_pkt->cmd == MemCmd::ReadResp && 827 (is_invalidate || mshr->hasPostInvalidate())) { 828 // If intermediate cache got ReadRespWithInvalidate, 829 // propagate that. Response should not have 830 // isInvalidate() set otherwise. 831 tgt_pkt->cmd = MemCmd::ReadRespWithInvalidate; 832 DPRINTF(Cache, "%s: updated cmd to %s\n", __func__, 833 tgt_pkt->print()); 834 } 835 // Reset the bus additional time as it is now accounted for 836 tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0; 837 cpuSidePort.schedTimingResp(tgt_pkt, completion_time); 838 break; 839 840 case MSHR::Target::FromPrefetcher: 841 assert(tgt_pkt->cmd == MemCmd::HardPFReq); 842 if (blk) 843 blk->status |= BlkHWPrefetched; 844 delete tgt_pkt; 845 break; 846 847 case MSHR::Target::FromSnoop: 848 // I don't believe that a snoop can be in an error state 849 assert(!is_error); 850 // response to snoop request 851 DPRINTF(Cache, "processing deferred snoop...\n"); 852 // If the response is invalidating, a snooping target can 853 // be satisfied if it is also invalidating. If the reponse is, not 854 // only invalidating, but more specifically an InvalidateResp and 855 // the MSHR was created due to an InvalidateReq then a cache above 856 // is waiting to satisfy a WriteLineReq. In this case even an 857 // non-invalidating snoop is added as a target here since this is 858 // the ordering point. When the InvalidateResp reaches this cache, 859 // the snooping target will snoop further the cache above with the 860 // WriteLineReq. 861 assert(!is_invalidate || pkt->cmd == MemCmd::InvalidateResp || 862 pkt->req->isCacheMaintenance() || 863 mshr->hasPostInvalidate()); 864 handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate()); 865 break; 866 867 default: 868 panic("Illegal target->source enum %d\n", target.source); 869 } 870 } 871 872 maintainClusivity(targets.hasFromCache, blk); 873 874 if (blk && blk->isValid()) { 875 // an invalidate response stemming from a write line request 876 // should not invalidate the block, so check if the 877 // invalidation should be discarded 878 if (is_invalidate || mshr->hasPostInvalidate()) { 879 invalidateBlock(blk); 880 } else if (mshr->hasPostDowngrade()) { 881 blk->status &= ~BlkWritable; 882 } 883 } 884} 885 886PacketPtr 887Cache::evictBlock(CacheBlk *blk) 888{ 889 PacketPtr pkt = (blk->isDirty() || writebackClean) ? 890 writebackBlk(blk) : cleanEvictBlk(blk); 891 892 invalidateBlock(blk); 893 894 return pkt; 895} 896 897PacketPtr 898Cache::cleanEvictBlk(CacheBlk *blk) 899{ 900 assert(!writebackClean); 901 assert(blk && blk->isValid() && !blk->isDirty()); 902 903 // Creating a zero sized write, a message to the snoop filter 904 RequestPtr req = std::make_shared<Request>( 905 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId); 906 907 if (blk->isSecure()) 908 req->setFlags(Request::SECURE); 909 910 req->taskId(blk->task_id); 911 912 PacketPtr pkt = new Packet(req, MemCmd::CleanEvict); 913 pkt->allocate(); 914 DPRINTF(Cache, "Create CleanEvict %s\n", pkt->print()); 915 916 return pkt; 917} 918 919///////////////////////////////////////////////////// 920// 921// Snoop path: requests coming in from the memory side 922// 923///////////////////////////////////////////////////// 924 925void 926Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data, 927 bool already_copied, bool pending_inval) 928{ 929 // sanity check 930 assert(req_pkt->isRequest()); 931 assert(req_pkt->needsResponse()); 932 933 DPRINTF(Cache, "%s: for %s\n", __func__, req_pkt->print()); 934 // timing-mode snoop responses require a new packet, unless we 935 // already made a copy... 936 PacketPtr pkt = req_pkt; 937 if (!already_copied) 938 // do not clear flags, and allocate space for data if the 939 // packet needs it (the only packets that carry data are read 940 // responses) 941 pkt = new Packet(req_pkt, false, req_pkt->isRead()); 942 943 assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() || 944 pkt->hasSharers()); 945 pkt->makeTimingResponse(); 946 if (pkt->isRead()) { 947 pkt->setDataFromBlock(blk_data, blkSize); 948 } 949 if (pkt->cmd == MemCmd::ReadResp && pending_inval) { 950 // Assume we defer a response to a read from a far-away cache 951 // A, then later defer a ReadExcl from a cache B on the same 952 // bus as us. We'll assert cacheResponding in both cases, but 953 // in the latter case cacheResponding will keep the 954 // invalidation from reaching cache A. This special response 955 // tells cache A that it gets the block to satisfy its read, 956 // but must immediately invalidate it. 957 pkt->cmd = MemCmd::ReadRespWithInvalidate; 958 } 959 // Here we consider forward_time, paying for just forward latency and 960 // also charging the delay provided by the xbar. 961 // forward_time is used as send_time in next allocateWriteBuffer(). 962 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 963 // Here we reset the timing of the packet. 964 pkt->headerDelay = pkt->payloadDelay = 0; 965 DPRINTF(CacheVerbose, "%s: created response: %s tick: %lu\n", __func__, 966 pkt->print(), forward_time); 967 memSidePort.schedTimingSnoopResp(pkt, forward_time); 968} 969 970uint32_t 971Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing, 972 bool is_deferred, bool pending_inval) 973{ 974 DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print()); 975 // deferred snoops can only happen in timing mode 976 assert(!(is_deferred && !is_timing)); 977 // pending_inval only makes sense on deferred snoops 978 assert(!(pending_inval && !is_deferred)); 979 assert(pkt->isRequest()); 980 981 // the packet may get modified if we or a forwarded snooper 982 // responds in atomic mode, so remember a few things about the 983 // original packet up front 984 bool invalidate = pkt->isInvalidate(); 985 bool M5_VAR_USED needs_writable = pkt->needsWritable(); 986 987 // at the moment we could get an uncacheable write which does not 988 // have the invalidate flag, and we need a suitable way of dealing 989 // with this case 990 panic_if(invalidate && pkt->req->isUncacheable(), 991 "%s got an invalidating uncacheable snoop request %s", 992 name(), pkt->print()); 993 994 uint32_t snoop_delay = 0; 995 996 if (forwardSnoops) { 997 // first propagate snoop upward to see if anyone above us wants to 998 // handle it. save & restore packet src since it will get 999 // rewritten to be relative to cpu-side bus (if any) 1000 if (is_timing) { 1001 // copy the packet so that we can clear any flags before 1002 // forwarding it upwards, we also allocate data (passing 1003 // the pointer along in case of static data), in case 1004 // there is a snoop hit in upper levels 1005 Packet snoopPkt(pkt, true, true); 1006 snoopPkt.setExpressSnoop(); 1007 // the snoop packet does not need to wait any additional 1008 // time 1009 snoopPkt.headerDelay = snoopPkt.payloadDelay = 0; 1010 cpuSidePort.sendTimingSnoopReq(&snoopPkt); 1011 1012 // add the header delay (including crossbar and snoop 1013 // delays) of the upward snoop to the snoop delay for this 1014 // cache 1015 snoop_delay += snoopPkt.headerDelay; 1016 1017 // If this request is a prefetch or clean evict and an upper level 1018 // signals block present, make sure to propagate the block 1019 // presence to the requester. 1020 if (snoopPkt.isBlockCached()) { 1021 pkt->setBlockCached(); 1022 } 1023 // If the request was satisfied by snooping the cache 1024 // above, mark the original packet as satisfied too. 1025 if (snoopPkt.satisfied()) { 1026 pkt->setSatisfied(); 1027 } 1028 1029 // Copy over flags from the snoop response to make sure we 1030 // inform the final destination 1031 pkt->copyResponderFlags(&snoopPkt); 1032 } else { 1033 bool already_responded = pkt->cacheResponding(); 1034 cpuSidePort.sendAtomicSnoop(pkt); 1035 if (!already_responded && pkt->cacheResponding()) { 1036 // cache-to-cache response from some upper cache: 1037 // forward response to original requester 1038 assert(pkt->isResponse()); 1039 } 1040 } 1041 } 1042 1043 bool respond = false; 1044 bool blk_valid = blk && blk->isValid(); 1045 if (pkt->isClean()) { 1046 if (blk_valid && blk->isDirty()) { 1047 DPRINTF(CacheVerbose, "%s: packet (snoop) %s found block: %s\n", 1048 __func__, pkt->print(), blk->print()); 1049 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id); 1050 PacketList writebacks; 1051 writebacks.push_back(wb_pkt); 1052 1053 if (is_timing) { 1054 // anything that is merely forwarded pays for the forward 1055 // latency and the delay provided by the crossbar 1056 Tick forward_time = clockEdge(forwardLatency) + 1057 pkt->headerDelay; 1058 doWritebacks(writebacks, forward_time); 1059 } else { 1060 doWritebacksAtomic(writebacks); 1061 } 1062 pkt->setSatisfied(); 1063 } 1064 } else if (!blk_valid) { 1065 DPRINTF(CacheVerbose, "%s: snoop miss for %s\n", __func__, 1066 pkt->print()); 1067 if (is_deferred) { 1068 // we no longer have the block, and will not respond, but a 1069 // packet was allocated in MSHR::handleSnoop and we have 1070 // to delete it 1071 assert(pkt->needsResponse()); 1072 1073 // we have passed the block to a cache upstream, that 1074 // cache should be responding 1075 assert(pkt->cacheResponding()); 1076 1077 delete pkt; 1078 } 1079 return snoop_delay; 1080 } else { 1081 DPRINTF(Cache, "%s: snoop hit for %s, old state is %s\n", __func__, 1082 pkt->print(), blk->print()); 1083 1084 // We may end up modifying both the block state and the packet (if 1085 // we respond in atomic mode), so just figure out what to do now 1086 // and then do it later. We respond to all snoops that need 1087 // responses provided we have the block in dirty state. The 1088 // invalidation itself is taken care of below. We don't respond to 1089 // cache maintenance operations as this is done by the destination 1090 // xbar. 1091 respond = blk->isDirty() && pkt->needsResponse(); 1092 1093 chatty_assert(!(isReadOnly && blk->isDirty()), "Should never have " 1094 "a dirty block in a read-only cache %s\n", name()); 1095 } 1096 1097 // Invalidate any prefetch's from below that would strip write permissions 1098 // MemCmd::HardPFReq is only observed by upstream caches. After missing 1099 // above and in it's own cache, a new MemCmd::ReadReq is created that 1100 // downstream caches observe. 1101 if (pkt->mustCheckAbove()) { 1102 DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s " 1103 "from lower cache\n", pkt->getAddr(), pkt->print()); 1104 pkt->setBlockCached(); 1105 return snoop_delay; 1106 } 1107 1108 if (pkt->isRead() && !invalidate) { 1109 // reading without requiring the line in a writable state 1110 assert(!needs_writable); 1111 pkt->setHasSharers(); 1112 1113 // if the requesting packet is uncacheable, retain the line in 1114 // the current state, otherwhise unset the writable flag, 1115 // which means we go from Modified to Owned (and will respond 1116 // below), remain in Owned (and will respond below), from 1117 // Exclusive to Shared, or remain in Shared 1118 if (!pkt->req->isUncacheable()) 1119 blk->status &= ~BlkWritable; 1120 DPRINTF(Cache, "new state is %s\n", blk->print()); 1121 } 1122 1123 if (respond) { 1124 // prevent anyone else from responding, cache as well as 1125 // memory, and also prevent any memory from even seeing the 1126 // request 1127 pkt->setCacheResponding(); 1128 if (!pkt->isClean() && blk->isWritable()) { 1129 // inform the cache hierarchy that this cache had the line 1130 // in the Modified state so that we avoid unnecessary 1131 // invalidations (see Packet::setResponderHadWritable) 1132 pkt->setResponderHadWritable(); 1133 1134 // in the case of an uncacheable request there is no point 1135 // in setting the responderHadWritable flag, but since the 1136 // recipient does not care there is no harm in doing so 1137 } else { 1138 // if the packet has needsWritable set we invalidate our 1139 // copy below and all other copies will be invalidates 1140 // through express snoops, and if needsWritable is not set 1141 // we already called setHasSharers above 1142 } 1143 1144 // if we are returning a writable and dirty (Modified) line, 1145 // we should be invalidating the line 1146 panic_if(!invalidate && !pkt->hasSharers(), 1147 "%s is passing a Modified line through %s, " 1148 "but keeping the block", name(), pkt->print()); 1149 1150 if (is_timing) { 1151 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval); 1152 } else { 1153 pkt->makeAtomicResponse(); 1154 // packets such as upgrades do not actually have any data 1155 // payload 1156 if (pkt->hasData()) 1157 pkt->setDataFromBlock(blk->data, blkSize); 1158 } 1159 1160 // When a block is compressed, it must first be decompressed before 1161 // being read, and this increases the snoop delay. 1162 if (compressor && pkt->isRead()) { 1163 snoop_delay += compressor->getDecompressionLatency(blk); 1164 } 1165 } 1166 1167 if (!respond && is_deferred) { 1168 assert(pkt->needsResponse()); 1169 delete pkt; 1170 } 1171 1172 // Do this last in case it deallocates block data or something 1173 // like that 1174 if (blk_valid && invalidate) { 1175 invalidateBlock(blk); 1176 DPRINTF(Cache, "new state is %s\n", blk->print()); 1177 } 1178 1179 return snoop_delay; 1180} 1181 1182 1183void 1184Cache::recvTimingSnoopReq(PacketPtr pkt) 1185{ 1186 DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print()); 1187 1188 // no need to snoop requests that are not in range 1189 if (!inRange(pkt->getAddr())) { 1190 return; 1191 } 1192 1193 bool is_secure = pkt->isSecure(); 1194 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 1195 1196 Addr blk_addr = pkt->getBlockAddr(blkSize); 1197 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 1198 1199 // Update the latency cost of the snoop so that the crossbar can 1200 // account for it. Do not overwrite what other neighbouring caches 1201 // have already done, rather take the maximum. The update is 1202 // tentative, for cases where we return before an upward snoop 1203 // happens below. 1204 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, 1205 lookupLatency * clockPeriod()); 1206 1207 // Inform request(Prefetch, CleanEvict or Writeback) from below of 1208 // MSHR hit, set setBlockCached. 1209 if (mshr && pkt->mustCheckAbove()) { 1210 DPRINTF(Cache, "Setting block cached for %s from lower cache on " 1211 "mshr hit\n", pkt->print()); 1212 pkt->setBlockCached(); 1213 return; 1214 } 1215 1216 // Let the MSHR itself track the snoop and decide whether we want 1217 // to go ahead and do the regular cache snoop 1218 if (mshr && mshr->handleSnoop(pkt, order++)) { 1219 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)." 1220 "mshrs: %s\n", blk_addr, is_secure ? "s" : "ns", 1221 mshr->print()); 1222 1223 if (mshr->getNumTargets() > numTarget) 1224 warn("allocating bonus target for snoop"); //handle later 1225 return; 1226 } 1227 1228 //We also need to check the writeback buffers and handle those 1229 WriteQueueEntry *wb_entry = writeBuffer.findMatch(blk_addr, is_secure); 1230 if (wb_entry) { 1231 DPRINTF(Cache, "Snoop hit in writeback to addr %#llx (%s)\n", 1232 pkt->getAddr(), is_secure ? "s" : "ns"); 1233 // Expect to see only Writebacks and/or CleanEvicts here, both of 1234 // which should not be generated for uncacheable data. 1235 assert(!wb_entry->isUncacheable()); 1236 // There should only be a single request responsible for generating 1237 // Writebacks/CleanEvicts. 1238 assert(wb_entry->getNumTargets() == 1); 1239 PacketPtr wb_pkt = wb_entry->getTarget()->pkt; 1240 assert(wb_pkt->isEviction() || wb_pkt->cmd == MemCmd::WriteClean); 1241 1242 if (pkt->isEviction()) { 1243 // if the block is found in the write queue, set the BLOCK_CACHED 1244 // flag for Writeback/CleanEvict snoop. On return the snoop will 1245 // propagate the BLOCK_CACHED flag in Writeback packets and prevent 1246 // any CleanEvicts from travelling down the memory hierarchy. 1247 pkt->setBlockCached(); 1248 DPRINTF(Cache, "%s: Squashing %s from lower cache on writequeue " 1249 "hit\n", __func__, pkt->print()); 1250 return; 1251 } 1252 1253 // conceptually writebacks are no different to other blocks in 1254 // this cache, so the behaviour is modelled after handleSnoop, 1255 // the difference being that instead of querying the block 1256 // state to determine if it is dirty and writable, we use the 1257 // command and fields of the writeback packet 1258 bool respond = wb_pkt->cmd == MemCmd::WritebackDirty && 1259 pkt->needsResponse(); 1260 bool have_writable = !wb_pkt->hasSharers(); 1261 bool invalidate = pkt->isInvalidate(); 1262 1263 if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) { 1264 assert(!pkt->needsWritable()); 1265 pkt->setHasSharers(); 1266 wb_pkt->setHasSharers(); 1267 } 1268 1269 if (respond) { 1270 pkt->setCacheResponding(); 1271 1272 if (have_writable) { 1273 pkt->setResponderHadWritable(); 1274 } 1275 1276 doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(), 1277 false, false); 1278 } 1279 1280 if (invalidate && wb_pkt->cmd != MemCmd::WriteClean) { 1281 // Invalidation trumps our writeback... discard here 1282 // Note: markInService will remove entry from writeback buffer. 1283 markInService(wb_entry); 1284 delete wb_pkt; 1285 } 1286 } 1287 1288 // If this was a shared writeback, there may still be 1289 // other shared copies above that require invalidation. 1290 // We could be more selective and return here if the 1291 // request is non-exclusive or if the writeback is 1292 // exclusive. 1293 uint32_t snoop_delay = handleSnoop(pkt, blk, true, false, false); 1294 1295 // Override what we did when we first saw the snoop, as we now 1296 // also have the cost of the upwards snoops to account for 1297 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, snoop_delay + 1298 lookupLatency * clockPeriod()); 1299} 1300 1301Tick 1302Cache::recvAtomicSnoop(PacketPtr pkt) 1303{ 1304 // no need to snoop requests that are not in range. 1305 if (!inRange(pkt->getAddr())) { 1306 return 0; 1307 } 1308 1309 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 1310 uint32_t snoop_delay = handleSnoop(pkt, blk, false, false, false); 1311 return snoop_delay + lookupLatency * clockPeriod(); 1312} 1313 1314bool 1315Cache::isCachedAbove(PacketPtr pkt, bool is_timing) 1316{ 1317 if (!forwardSnoops) 1318 return false; 1319 // Mirroring the flow of HardPFReqs, the cache sends CleanEvict and 1320 // Writeback snoops into upper level caches to check for copies of the 1321 // same block. Using the BLOCK_CACHED flag with the Writeback/CleanEvict 1322 // packet, the cache can inform the crossbar below of presence or absence 1323 // of the block. 1324 if (is_timing) { 1325 Packet snoop_pkt(pkt, true, false); 1326 snoop_pkt.setExpressSnoop(); 1327 // Assert that packet is either Writeback or CleanEvict and not a 1328 // prefetch request because prefetch requests need an MSHR and may 1329 // generate a snoop response. 1330 assert(pkt->isEviction() || pkt->cmd == MemCmd::WriteClean); 1331 snoop_pkt.senderState = nullptr; 1332 cpuSidePort.sendTimingSnoopReq(&snoop_pkt); 1333 // Writeback/CleanEvict snoops do not generate a snoop response. 1334 assert(!(snoop_pkt.cacheResponding())); 1335 return snoop_pkt.isBlockCached(); 1336 } else { 1337 cpuSidePort.sendAtomicSnoop(pkt); 1338 return pkt->isBlockCached(); 1339 } 1340} 1341 1342bool 1343Cache::sendMSHRQueuePacket(MSHR* mshr) 1344{ 1345 assert(mshr); 1346 1347 // use request from 1st target 1348 PacketPtr tgt_pkt = mshr->getTarget()->pkt; 1349 1350 if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) { 1351 DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print()); 1352 1353 // we should never have hardware prefetches to allocated 1354 // blocks 1355 assert(!tags->findBlock(mshr->blkAddr, mshr->isSecure)); 1356 1357 // We need to check the caches above us to verify that 1358 // they don't have a copy of this block in the dirty state 1359 // at the moment. Without this check we could get a stale 1360 // copy from memory that might get used in place of the 1361 // dirty one. 1362 Packet snoop_pkt(tgt_pkt, true, false); 1363 snoop_pkt.setExpressSnoop(); 1364 // We are sending this packet upwards, but if it hits we will 1365 // get a snoop response that we end up treating just like a 1366 // normal response, hence it needs the MSHR as its sender 1367 // state 1368 snoop_pkt.senderState = mshr; 1369 cpuSidePort.sendTimingSnoopReq(&snoop_pkt); 1370 1371 // Check to see if the prefetch was squashed by an upper cache (to 1372 // prevent us from grabbing the line) or if a Check to see if a 1373 // writeback arrived between the time the prefetch was placed in 1374 // the MSHRs and when it was selected to be sent or if the 1375 // prefetch was squashed by an upper cache. 1376 1377 // It is important to check cacheResponding before 1378 // prefetchSquashed. If another cache has committed to 1379 // responding, it will be sending a dirty response which will 1380 // arrive at the MSHR allocated for this request. Checking the 1381 // prefetchSquash first may result in the MSHR being 1382 // prematurely deallocated. 1383 if (snoop_pkt.cacheResponding()) { 1384 auto M5_VAR_USED r = outstandingSnoop.insert(snoop_pkt.req); 1385 assert(r.second); 1386 1387 // if we are getting a snoop response with no sharers it 1388 // will be allocated as Modified 1389 bool pending_modified_resp = !snoop_pkt.hasSharers(); 1390 markInService(mshr, pending_modified_resp); 1391 1392 DPRINTF(Cache, "Upward snoop of prefetch for addr" 1393 " %#x (%s) hit\n", 1394 tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns"); 1395 return false; 1396 } 1397 1398 if (snoop_pkt.isBlockCached()) { 1399 DPRINTF(Cache, "Block present, prefetch squashed by cache. " 1400 "Deallocating mshr target %#x.\n", 1401 mshr->blkAddr); 1402 1403 // Deallocate the mshr target 1404 if (mshrQueue.forceDeallocateTarget(mshr)) { 1405 // Clear block if this deallocation resulted freed an 1406 // mshr when all had previously been utilized 1407 clearBlocked(Blocked_NoMSHRs); 1408 } 1409 1410 // given that no response is expected, delete Request and Packet 1411 delete tgt_pkt; 1412 1413 return false; 1414 } 1415 } 1416 1417 return BaseCache::sendMSHRQueuePacket(mshr); 1418} 1419 1420Cache* 1421CacheParams::create() 1422{ 1423 assert(tags); 1424 assert(replacement_policy); 1425 1426 return new Cache(this); 1427} 1428