cache.cc revision 11747
1/* 2 * Copyright (c) 2010-2016 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2002-2005 The Regents of The University of Michigan 15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc. 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Erik Hallnor 42 * Dave Greene 43 * Nathan Binkert 44 * Steve Reinhardt 45 * Ron Dreslinski 46 * Andreas Sandberg 47 */ 48 49/** 50 * @file 51 * Cache definitions. 52 */ 53 54#include "mem/cache/cache.hh" 55 56#include "base/misc.hh" 57#include "base/types.hh" 58#include "debug/Cache.hh" 59#include "debug/CachePort.hh" 60#include "debug/CacheTags.hh" 61#include "debug/CacheVerbose.hh" 62#include "mem/cache/blk.hh" 63#include "mem/cache/mshr.hh" 64#include "mem/cache/prefetch/base.hh" 65#include "sim/sim_exit.hh" 66 67Cache::Cache(const CacheParams *p) 68 : BaseCache(p, p->system->cacheLineSize()), 69 tags(p->tags), 70 prefetcher(p->prefetcher), 71 doFastWrites(true), 72 prefetchOnAccess(p->prefetch_on_access), 73 clusivity(p->clusivity), 74 writebackClean(p->writeback_clean), 75 tempBlockWriteback(nullptr), 76 writebackTempBlockAtomicEvent(this, false, 77 EventBase::Delayed_Writeback_Pri) 78{ 79 tempBlock = new CacheBlk(); 80 tempBlock->data = new uint8_t[blkSize]; 81 82 cpuSidePort = new CpuSidePort(p->name + ".cpu_side", this, 83 "CpuSidePort"); 84 memSidePort = new MemSidePort(p->name + ".mem_side", this, 85 "MemSidePort"); 86 87 tags->setCache(this); 88 if (prefetcher) 89 prefetcher->setCache(this); 90} 91 92Cache::~Cache() 93{ 94 delete [] tempBlock->data; 95 delete tempBlock; 96 97 delete cpuSidePort; 98 delete memSidePort; 99} 100 101void 102Cache::regStats() 103{ 104 BaseCache::regStats(); 105} 106 107void 108Cache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt) 109{ 110 assert(pkt->isRequest()); 111 112 uint64_t overwrite_val; 113 bool overwrite_mem; 114 uint64_t condition_val64; 115 uint32_t condition_val32; 116 117 int offset = tags->extractBlkOffset(pkt->getAddr()); 118 uint8_t *blk_data = blk->data + offset; 119 120 assert(sizeof(uint64_t) >= pkt->getSize()); 121 122 overwrite_mem = true; 123 // keep a copy of our possible write value, and copy what is at the 124 // memory address into the packet 125 pkt->writeData((uint8_t *)&overwrite_val); 126 pkt->setData(blk_data); 127 128 if (pkt->req->isCondSwap()) { 129 if (pkt->getSize() == sizeof(uint64_t)) { 130 condition_val64 = pkt->req->getExtraData(); 131 overwrite_mem = !std::memcmp(&condition_val64, blk_data, 132 sizeof(uint64_t)); 133 } else if (pkt->getSize() == sizeof(uint32_t)) { 134 condition_val32 = (uint32_t)pkt->req->getExtraData(); 135 overwrite_mem = !std::memcmp(&condition_val32, blk_data, 136 sizeof(uint32_t)); 137 } else 138 panic("Invalid size for conditional read/write\n"); 139 } 140 141 if (overwrite_mem) { 142 std::memcpy(blk_data, &overwrite_val, pkt->getSize()); 143 blk->status |= BlkDirty; 144 } 145} 146 147 148void 149Cache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, 150 bool deferred_response, bool pending_downgrade) 151{ 152 assert(pkt->isRequest()); 153 154 assert(blk && blk->isValid()); 155 // Occasionally this is not true... if we are a lower-level cache 156 // satisfying a string of Read and ReadEx requests from 157 // upper-level caches, a Read will mark the block as shared but we 158 // can satisfy a following ReadEx anyway since we can rely on the 159 // Read requester(s) to have buffered the ReadEx snoop and to 160 // invalidate their blocks after receiving them. 161 // assert(!pkt->needsWritable() || blk->isWritable()); 162 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize); 163 164 // Check RMW operations first since both isRead() and 165 // isWrite() will be true for them 166 if (pkt->cmd == MemCmd::SwapReq) { 167 cmpAndSwap(blk, pkt); 168 } else if (pkt->isWrite()) { 169 // we have the block in a writable state and can go ahead, 170 // note that the line may be also be considered writable in 171 // downstream caches along the path to memory, but always 172 // Exclusive, and never Modified 173 assert(blk->isWritable()); 174 // Write or WriteLine at the first cache with block in writable state 175 if (blk->checkWrite(pkt)) { 176 pkt->writeDataToBlock(blk->data, blkSize); 177 } 178 // Always mark the line as dirty (and thus transition to the 179 // Modified state) even if we are a failed StoreCond so we 180 // supply data to any snoops that have appended themselves to 181 // this cache before knowing the store will fail. 182 blk->status |= BlkDirty; 183 DPRINTF(CacheVerbose, "%s for %s (write)\n", __func__, pkt->print()); 184 } else if (pkt->isRead()) { 185 if (pkt->isLLSC()) { 186 blk->trackLoadLocked(pkt); 187 } 188 189 // all read responses have a data payload 190 assert(pkt->hasRespData()); 191 pkt->setDataFromBlock(blk->data, blkSize); 192 193 // determine if this read is from a (coherent) cache or not 194 if (pkt->fromCache()) { 195 assert(pkt->getSize() == blkSize); 196 // special handling for coherent block requests from 197 // upper-level caches 198 if (pkt->needsWritable()) { 199 // sanity check 200 assert(pkt->cmd == MemCmd::ReadExReq || 201 pkt->cmd == MemCmd::SCUpgradeFailReq); 202 assert(!pkt->hasSharers()); 203 204 // if we have a dirty copy, make sure the recipient 205 // keeps it marked dirty (in the modified state) 206 if (blk->isDirty()) { 207 pkt->setCacheResponding(); 208 blk->status &= ~BlkDirty; 209 } 210 } else if (blk->isWritable() && !pending_downgrade && 211 !pkt->hasSharers() && 212 pkt->cmd != MemCmd::ReadCleanReq) { 213 // we can give the requester a writable copy on a read 214 // request if: 215 // - we have a writable copy at this level (& below) 216 // - we don't have a pending snoop from below 217 // signaling another read request 218 // - no other cache above has a copy (otherwise it 219 // would have set hasSharers flag when 220 // snooping the packet) 221 // - the read has explicitly asked for a clean 222 // copy of the line 223 if (blk->isDirty()) { 224 // special considerations if we're owner: 225 if (!deferred_response) { 226 // respond with the line in Modified state 227 // (cacheResponding set, hasSharers not set) 228 pkt->setCacheResponding(); 229 230 // if this cache is mostly inclusive, we 231 // keep the block in the Exclusive state, 232 // and pass it upwards as Modified 233 // (writable and dirty), hence we have 234 // multiple caches, all on the same path 235 // towards memory, all considering the 236 // same block writable, but only one 237 // considering it Modified 238 239 // we get away with multiple caches (on 240 // the same path to memory) considering 241 // the block writeable as we always enter 242 // the cache hierarchy through a cache, 243 // and first snoop upwards in all other 244 // branches 245 blk->status &= ~BlkDirty; 246 } else { 247 // if we're responding after our own miss, 248 // there's a window where the recipient didn't 249 // know it was getting ownership and may not 250 // have responded to snoops correctly, so we 251 // have to respond with a shared line 252 pkt->setHasSharers(); 253 } 254 } 255 } else { 256 // otherwise only respond with a shared copy 257 pkt->setHasSharers(); 258 } 259 } 260 } else if (pkt->isUpgrade()) { 261 // sanity check 262 assert(!pkt->hasSharers()); 263 264 if (blk->isDirty()) { 265 // we were in the Owned state, and a cache above us that 266 // has the line in Shared state needs to be made aware 267 // that the data it already has is in fact dirty 268 pkt->setCacheResponding(); 269 blk->status &= ~BlkDirty; 270 } 271 } else { 272 assert(pkt->isInvalidate()); 273 invalidateBlock(blk); 274 DPRINTF(CacheVerbose, "%s for %s (invalidation)\n", __func__, 275 pkt->print()); 276 } 277} 278 279///////////////////////////////////////////////////// 280// 281// Access path: requests coming in from the CPU side 282// 283///////////////////////////////////////////////////// 284 285bool 286Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, 287 PacketList &writebacks) 288{ 289 // sanity check 290 assert(pkt->isRequest()); 291 292 chatty_assert(!(isReadOnly && pkt->isWrite()), 293 "Should never see a write in a read-only cache %s\n", 294 name()); 295 296 DPRINTF(CacheVerbose, "%s for %s\n", __func__, pkt->print()); 297 298 if (pkt->req->isUncacheable()) { 299 DPRINTF(Cache, "uncacheable: %s\n", pkt->print()); 300 301 // flush and invalidate any existing block 302 CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure())); 303 if (old_blk && old_blk->isValid()) { 304 if (old_blk->isDirty() || writebackClean) 305 writebacks.push_back(writebackBlk(old_blk)); 306 else 307 writebacks.push_back(cleanEvictBlk(old_blk)); 308 tags->invalidate(old_blk); 309 old_blk->invalidate(); 310 } 311 312 blk = nullptr; 313 // lookupLatency is the latency in case the request is uncacheable. 314 lat = lookupLatency; 315 return false; 316 } 317 318 ContextID id = pkt->req->hasContextId() ? 319 pkt->req->contextId() : InvalidContextID; 320 // Here lat is the value passed as parameter to accessBlock() function 321 // that can modify its value. 322 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), lat, id); 323 324 DPRINTF(Cache, "%s %s\n", pkt->print(), 325 blk ? "hit " + blk->print() : "miss"); 326 327 328 if (pkt->isEviction()) { 329 // We check for presence of block in above caches before issuing 330 // Writeback or CleanEvict to write buffer. Therefore the only 331 // possible cases can be of a CleanEvict packet coming from above 332 // encountering a Writeback generated in this cache peer cache and 333 // waiting in the write buffer. Cases of upper level peer caches 334 // generating CleanEvict and Writeback or simply CleanEvict and 335 // CleanEvict almost simultaneously will be caught by snoops sent out 336 // by crossbar. 337 WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(), 338 pkt->isSecure()); 339 if (wb_entry) { 340 assert(wb_entry->getNumTargets() == 1); 341 PacketPtr wbPkt = wb_entry->getTarget()->pkt; 342 assert(wbPkt->isWriteback()); 343 344 if (pkt->isCleanEviction()) { 345 // The CleanEvict and WritebackClean snoops into other 346 // peer caches of the same level while traversing the 347 // crossbar. If a copy of the block is found, the 348 // packet is deleted in the crossbar. Hence, none of 349 // the other upper level caches connected to this 350 // cache have the block, so we can clear the 351 // BLOCK_CACHED flag in the Writeback if set and 352 // discard the CleanEvict by returning true. 353 wbPkt->clearBlockCached(); 354 return true; 355 } else { 356 assert(pkt->cmd == MemCmd::WritebackDirty); 357 // Dirty writeback from above trumps our clean 358 // writeback... discard here 359 // Note: markInService will remove entry from writeback buffer. 360 markInService(wb_entry); 361 delete wbPkt; 362 } 363 } 364 } 365 366 // Writeback handling is special case. We can write the block into 367 // the cache without having a writeable copy (or any copy at all). 368 if (pkt->isWriteback()) { 369 assert(blkSize == pkt->getSize()); 370 371 // we could get a clean writeback while we are having 372 // outstanding accesses to a block, do the simple thing for 373 // now and drop the clean writeback so that we do not upset 374 // any ordering/decisions about ownership already taken 375 if (pkt->cmd == MemCmd::WritebackClean && 376 mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) { 377 DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, " 378 "dropping\n", pkt->getAddr()); 379 return true; 380 } 381 382 if (blk == nullptr) { 383 // need to do a replacement 384 blk = allocateBlock(pkt->getAddr(), pkt->isSecure(), writebacks); 385 if (blk == nullptr) { 386 // no replaceable block available: give up, fwd to next level. 387 incMissCount(pkt); 388 return false; 389 } 390 tags->insertBlock(pkt, blk); 391 392 blk->status = (BlkValid | BlkReadable); 393 if (pkt->isSecure()) { 394 blk->status |= BlkSecure; 395 } 396 } 397 // only mark the block dirty if we got a writeback command, 398 // and leave it as is for a clean writeback 399 if (pkt->cmd == MemCmd::WritebackDirty) { 400 blk->status |= BlkDirty; 401 } 402 // if the packet does not have sharers, it is passing 403 // writable, and we got the writeback in Modified or Exclusive 404 // state, if not we are in the Owned or Shared state 405 if (!pkt->hasSharers()) { 406 blk->status |= BlkWritable; 407 } 408 // nothing else to do; writeback doesn't expect response 409 assert(!pkt->needsResponse()); 410 std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize); 411 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 412 incHitCount(pkt); 413 return true; 414 } else if (pkt->cmd == MemCmd::CleanEvict) { 415 if (blk != nullptr) { 416 // Found the block in the tags, need to stop CleanEvict from 417 // propagating further down the hierarchy. Returning true will 418 // treat the CleanEvict like a satisfied write request and delete 419 // it. 420 return true; 421 } 422 // We didn't find the block here, propagate the CleanEvict further 423 // down the memory hierarchy. Returning false will treat the CleanEvict 424 // like a Writeback which could not find a replaceable block so has to 425 // go to next level. 426 return false; 427 } else if (blk && (pkt->needsWritable() ? blk->isWritable() : 428 blk->isReadable())) { 429 // OK to satisfy access 430 incHitCount(pkt); 431 satisfyRequest(pkt, blk); 432 maintainClusivity(pkt->fromCache(), blk); 433 434 return true; 435 } 436 437 // Can't satisfy access normally... either no block (blk == nullptr) 438 // or have block but need writable 439 440 incMissCount(pkt); 441 442 if (blk == nullptr && pkt->isLLSC() && pkt->isWrite()) { 443 // complete miss on store conditional... just give up now 444 pkt->req->setExtraData(0); 445 return true; 446 } 447 448 return false; 449} 450 451void 452Cache::maintainClusivity(bool from_cache, CacheBlk *blk) 453{ 454 if (from_cache && blk && blk->isValid() && !blk->isDirty() && 455 clusivity == Enums::mostly_excl) { 456 // if we have responded to a cache, and our block is still 457 // valid, but not dirty, and this cache is mostly exclusive 458 // with respect to the cache above, drop the block 459 invalidateBlock(blk); 460 } 461} 462 463void 464Cache::doWritebacks(PacketList& writebacks, Tick forward_time) 465{ 466 while (!writebacks.empty()) { 467 PacketPtr wbPkt = writebacks.front(); 468 // We use forwardLatency here because we are copying writebacks to 469 // write buffer. Call isCachedAbove for both Writebacks and 470 // CleanEvicts. If isCachedAbove returns true we set BLOCK_CACHED flag 471 // in Writebacks and discard CleanEvicts. 472 if (isCachedAbove(wbPkt)) { 473 if (wbPkt->cmd == MemCmd::CleanEvict) { 474 // Delete CleanEvict because cached copies exist above. The 475 // packet destructor will delete the request object because 476 // this is a non-snoop request packet which does not require a 477 // response. 478 delete wbPkt; 479 } else if (wbPkt->cmd == MemCmd::WritebackClean) { 480 // clean writeback, do not send since the block is 481 // still cached above 482 assert(writebackClean); 483 delete wbPkt; 484 } else { 485 assert(wbPkt->cmd == MemCmd::WritebackDirty); 486 // Set BLOCK_CACHED flag in Writeback and send below, so that 487 // the Writeback does not reset the bit corresponding to this 488 // address in the snoop filter below. 489 wbPkt->setBlockCached(); 490 allocateWriteBuffer(wbPkt, forward_time); 491 } 492 } else { 493 // If the block is not cached above, send packet below. Both 494 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will 495 // reset the bit corresponding to this address in the snoop filter 496 // below. 497 allocateWriteBuffer(wbPkt, forward_time); 498 } 499 writebacks.pop_front(); 500 } 501} 502 503void 504Cache::doWritebacksAtomic(PacketList& writebacks) 505{ 506 while (!writebacks.empty()) { 507 PacketPtr wbPkt = writebacks.front(); 508 // Call isCachedAbove for both Writebacks and CleanEvicts. If 509 // isCachedAbove returns true we set BLOCK_CACHED flag in Writebacks 510 // and discard CleanEvicts. 511 if (isCachedAbove(wbPkt, false)) { 512 if (wbPkt->cmd == MemCmd::WritebackDirty) { 513 // Set BLOCK_CACHED flag in Writeback and send below, 514 // so that the Writeback does not reset the bit 515 // corresponding to this address in the snoop filter 516 // below. We can discard CleanEvicts because cached 517 // copies exist above. Atomic mode isCachedAbove 518 // modifies packet to set BLOCK_CACHED flag 519 memSidePort->sendAtomic(wbPkt); 520 } 521 } else { 522 // If the block is not cached above, send packet below. Both 523 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will 524 // reset the bit corresponding to this address in the snoop filter 525 // below. 526 memSidePort->sendAtomic(wbPkt); 527 } 528 writebacks.pop_front(); 529 // In case of CleanEvicts, the packet destructor will delete the 530 // request object because this is a non-snoop request packet which 531 // does not require a response. 532 delete wbPkt; 533 } 534} 535 536 537void 538Cache::recvTimingSnoopResp(PacketPtr pkt) 539{ 540 DPRINTF(Cache, "%s for %s\n", __func__, pkt->print()); 541 542 assert(pkt->isResponse()); 543 assert(!system->bypassCaches()); 544 545 // determine if the response is from a snoop request we created 546 // (in which case it should be in the outstandingSnoop), or if we 547 // merely forwarded someone else's snoop request 548 const bool forwardAsSnoop = outstandingSnoop.find(pkt->req) == 549 outstandingSnoop.end(); 550 551 if (!forwardAsSnoop) { 552 // the packet came from this cache, so sink it here and do not 553 // forward it 554 assert(pkt->cmd == MemCmd::HardPFResp); 555 556 outstandingSnoop.erase(pkt->req); 557 558 DPRINTF(Cache, "Got prefetch response from above for addr " 559 "%#llx (%s)\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns"); 560 recvTimingResp(pkt); 561 return; 562 } 563 564 // forwardLatency is set here because there is a response from an 565 // upper level cache. 566 // To pay the delay that occurs if the packet comes from the bus, 567 // we charge also headerDelay. 568 Tick snoop_resp_time = clockEdge(forwardLatency) + pkt->headerDelay; 569 // Reset the timing of the packet. 570 pkt->headerDelay = pkt->payloadDelay = 0; 571 memSidePort->schedTimingSnoopResp(pkt, snoop_resp_time); 572} 573 574void 575Cache::promoteWholeLineWrites(PacketPtr pkt) 576{ 577 // Cache line clearing instructions 578 if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) && 579 (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0)) { 580 pkt->cmd = MemCmd::WriteLineReq; 581 DPRINTF(Cache, "packet promoted from Write to WriteLineReq\n"); 582 } 583} 584 585bool 586Cache::recvTimingReq(PacketPtr pkt) 587{ 588 DPRINTF(CacheTags, "%s tags: %s\n", __func__, tags->print()); 589 590 assert(pkt->isRequest()); 591 592 // Just forward the packet if caches are disabled. 593 if (system->bypassCaches()) { 594 // @todo This should really enqueue the packet rather 595 bool M5_VAR_USED success = memSidePort->sendTimingReq(pkt); 596 assert(success); 597 return true; 598 } 599 600 promoteWholeLineWrites(pkt); 601 602 if (pkt->cacheResponding()) { 603 // a cache above us (but not where the packet came from) is 604 // responding to the request, in other words it has the line 605 // in Modified or Owned state 606 DPRINTF(Cache, "Cache above responding to %s: not responding\n", 607 pkt->print()); 608 609 // if the packet needs the block to be writable, and the cache 610 // that has promised to respond (setting the cache responding 611 // flag) is not providing writable (it is in Owned rather than 612 // the Modified state), we know that there may be other Shared 613 // copies in the system; go out and invalidate them all 614 assert(pkt->needsWritable() && !pkt->responderHadWritable()); 615 616 // an upstream cache that had the line in Owned state 617 // (dirty, but not writable), is responding and thus 618 // transferring the dirty line from one branch of the 619 // cache hierarchy to another 620 621 // send out an express snoop and invalidate all other 622 // copies (snooping a packet that needs writable is the 623 // same as an invalidation), thus turning the Owned line 624 // into a Modified line, note that we don't invalidate the 625 // block in the current cache or any other cache on the 626 // path to memory 627 628 // create a downstream express snoop with cleared packet 629 // flags, there is no need to allocate any data as the 630 // packet is merely used to co-ordinate state transitions 631 Packet *snoop_pkt = new Packet(pkt, true, false); 632 633 // also reset the bus time that the original packet has 634 // not yet paid for 635 snoop_pkt->headerDelay = snoop_pkt->payloadDelay = 0; 636 637 // make this an instantaneous express snoop, and let the 638 // other caches in the system know that the another cache 639 // is responding, because we have found the authorative 640 // copy (Modified or Owned) that will supply the right 641 // data 642 snoop_pkt->setExpressSnoop(); 643 snoop_pkt->setCacheResponding(); 644 645 // this express snoop travels towards the memory, and at 646 // every crossbar it is snooped upwards thus reaching 647 // every cache in the system 648 bool M5_VAR_USED success = memSidePort->sendTimingReq(snoop_pkt); 649 // express snoops always succeed 650 assert(success); 651 652 // main memory will delete the snoop packet 653 654 // queue for deletion, as opposed to immediate deletion, as 655 // the sending cache is still relying on the packet 656 pendingDelete.reset(pkt); 657 658 // no need to take any further action in this particular cache 659 // as an upstram cache has already committed to responding, 660 // and we have already sent out any express snoops in the 661 // section above to ensure all other copies in the system are 662 // invalidated 663 return true; 664 } 665 666 // anything that is merely forwarded pays for the forward latency and 667 // the delay provided by the crossbar 668 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 669 670 // We use lookupLatency here because it is used to specify the latency 671 // to access. 672 Cycles lat = lookupLatency; 673 CacheBlk *blk = nullptr; 674 bool satisfied = false; 675 { 676 PacketList writebacks; 677 // Note that lat is passed by reference here. The function 678 // access() calls accessBlock() which can modify lat value. 679 satisfied = access(pkt, blk, lat, writebacks); 680 681 // copy writebacks to write buffer here to ensure they logically 682 // proceed anything happening below 683 doWritebacks(writebacks, forward_time); 684 } 685 686 // Here we charge the headerDelay that takes into account the latencies 687 // of the bus, if the packet comes from it. 688 // The latency charged it is just lat that is the value of lookupLatency 689 // modified by access() function, or if not just lookupLatency. 690 // In case of a hit we are neglecting response latency. 691 // In case of a miss we are neglecting forward latency. 692 Tick request_time = clockEdge(lat) + pkt->headerDelay; 693 // Here we reset the timing of the packet. 694 pkt->headerDelay = pkt->payloadDelay = 0; 695 696 // track time of availability of next prefetch, if any 697 Tick next_pf_time = MaxTick; 698 699 bool needsResponse = pkt->needsResponse(); 700 701 if (satisfied) { 702 // should never be satisfying an uncacheable access as we 703 // flush and invalidate any existing block as part of the 704 // lookup 705 assert(!pkt->req->isUncacheable()); 706 707 // hit (for all other request types) 708 709 if (prefetcher && (prefetchOnAccess || 710 (blk && blk->wasPrefetched()))) { 711 if (blk) 712 blk->status &= ~BlkHWPrefetched; 713 714 // Don't notify on SWPrefetch 715 if (!pkt->cmd.isSWPrefetch()) 716 next_pf_time = prefetcher->notify(pkt); 717 } 718 719 if (needsResponse) { 720 pkt->makeTimingResponse(); 721 // @todo: Make someone pay for this 722 pkt->headerDelay = pkt->payloadDelay = 0; 723 724 // In this case we are considering request_time that takes 725 // into account the delay of the xbar, if any, and just 726 // lat, neglecting responseLatency, modelling hit latency 727 // just as lookupLatency or or the value of lat overriden 728 // by access(), that calls accessBlock() function. 729 cpuSidePort->schedTimingResp(pkt, request_time, true); 730 } else { 731 DPRINTF(Cache, "%s satisfied %s, no response needed\n", __func__, 732 pkt->print()); 733 734 // queue the packet for deletion, as the sending cache is 735 // still relying on it; if the block is found in access(), 736 // CleanEvict and Writeback messages will be deleted 737 // here as well 738 pendingDelete.reset(pkt); 739 } 740 } else { 741 // miss 742 743 Addr blk_addr = blockAlign(pkt->getAddr()); 744 745 // ignore any existing MSHR if we are dealing with an 746 // uncacheable request 747 MSHR *mshr = pkt->req->isUncacheable() ? nullptr : 748 mshrQueue.findMatch(blk_addr, pkt->isSecure()); 749 750 // Software prefetch handling: 751 // To keep the core from waiting on data it won't look at 752 // anyway, send back a response with dummy data. Miss handling 753 // will continue asynchronously. Unfortunately, the core will 754 // insist upon freeing original Packet/Request, so we have to 755 // create a new pair with a different lifecycle. Note that this 756 // processing happens before any MSHR munging on the behalf of 757 // this request because this new Request will be the one stored 758 // into the MSHRs, not the original. 759 if (pkt->cmd.isSWPrefetch()) { 760 assert(needsResponse); 761 assert(pkt->req->hasPaddr()); 762 assert(!pkt->req->isUncacheable()); 763 764 // There's no reason to add a prefetch as an additional target 765 // to an existing MSHR. If an outstanding request is already 766 // in progress, there is nothing for the prefetch to do. 767 // If this is the case, we don't even create a request at all. 768 PacketPtr pf = nullptr; 769 770 if (!mshr) { 771 // copy the request and create a new SoftPFReq packet 772 RequestPtr req = new Request(pkt->req->getPaddr(), 773 pkt->req->getSize(), 774 pkt->req->getFlags(), 775 pkt->req->masterId()); 776 pf = new Packet(req, pkt->cmd); 777 pf->allocate(); 778 assert(pf->getAddr() == pkt->getAddr()); 779 assert(pf->getSize() == pkt->getSize()); 780 } 781 782 pkt->makeTimingResponse(); 783 784 // request_time is used here, taking into account lat and the delay 785 // charged if the packet comes from the xbar. 786 cpuSidePort->schedTimingResp(pkt, request_time, true); 787 788 // If an outstanding request is in progress (we found an 789 // MSHR) this is set to null 790 pkt = pf; 791 } 792 793 if (mshr) { 794 /// MSHR hit 795 /// @note writebacks will be checked in getNextMSHR() 796 /// for any conflicting requests to the same block 797 798 //@todo remove hw_pf here 799 800 // Coalesce unless it was a software prefetch (see above). 801 if (pkt) { 802 assert(!pkt->isWriteback()); 803 // CleanEvicts corresponding to blocks which have 804 // outstanding requests in MSHRs are simply sunk here 805 if (pkt->cmd == MemCmd::CleanEvict) { 806 pendingDelete.reset(pkt); 807 } else { 808 DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__, 809 pkt->print()); 810 811 assert(pkt->req->masterId() < system->maxMasters()); 812 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++; 813 // We use forward_time here because it is the same 814 // considering new targets. We have multiple 815 // requests for the same address here. It 816 // specifies the latency to allocate an internal 817 // buffer and to schedule an event to the queued 818 // port and also takes into account the additional 819 // delay of the xbar. 820 mshr->allocateTarget(pkt, forward_time, order++, 821 allocOnFill(pkt->cmd)); 822 if (mshr->getNumTargets() == numTarget) { 823 noTargetMSHR = mshr; 824 setBlocked(Blocked_NoTargets); 825 // need to be careful with this... if this mshr isn't 826 // ready yet (i.e. time > curTick()), we don't want to 827 // move it ahead of mshrs that are ready 828 // mshrQueue.moveToFront(mshr); 829 } 830 } 831 // We should call the prefetcher reguardless if the request is 832 // satisfied or not, reguardless if the request is in the MSHR 833 // or not. The request could be a ReadReq hit, but still not 834 // satisfied (potentially because of a prior write to the same 835 // cache line. So, even when not satisfied, tehre is an MSHR 836 // already allocated for this, we need to let the prefetcher 837 // know about the request 838 if (prefetcher) { 839 // Don't notify on SWPrefetch 840 if (!pkt->cmd.isSWPrefetch()) 841 next_pf_time = prefetcher->notify(pkt); 842 } 843 } 844 } else { 845 // no MSHR 846 assert(pkt->req->masterId() < system->maxMasters()); 847 if (pkt->req->isUncacheable()) { 848 mshr_uncacheable[pkt->cmdToIndex()][pkt->req->masterId()]++; 849 } else { 850 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 851 } 852 853 if (pkt->isEviction() || 854 (pkt->req->isUncacheable() && pkt->isWrite())) { 855 // We use forward_time here because there is an 856 // uncached memory write, forwarded to WriteBuffer. 857 allocateWriteBuffer(pkt, forward_time); 858 } else { 859 if (blk && blk->isValid()) { 860 // should have flushed and have no valid block 861 assert(!pkt->req->isUncacheable()); 862 863 // If we have a write miss to a valid block, we 864 // need to mark the block non-readable. Otherwise 865 // if we allow reads while there's an outstanding 866 // write miss, the read could return stale data 867 // out of the cache block... a more aggressive 868 // system could detect the overlap (if any) and 869 // forward data out of the MSHRs, but we don't do 870 // that yet. Note that we do need to leave the 871 // block valid so that it stays in the cache, in 872 // case we get an upgrade response (and hence no 873 // new data) when the write miss completes. 874 // As long as CPUs do proper store/load forwarding 875 // internally, and have a sufficiently weak memory 876 // model, this is probably unnecessary, but at some 877 // point it must have seemed like we needed it... 878 assert(pkt->needsWritable()); 879 assert(!blk->isWritable()); 880 blk->status &= ~BlkReadable; 881 } 882 // Here we are using forward_time, modelling the latency of 883 // a miss (outbound) just as forwardLatency, neglecting the 884 // lookupLatency component. 885 allocateMissBuffer(pkt, forward_time); 886 } 887 888 if (prefetcher) { 889 // Don't notify on SWPrefetch 890 if (!pkt->cmd.isSWPrefetch()) 891 next_pf_time = prefetcher->notify(pkt); 892 } 893 } 894 } 895 896 if (next_pf_time != MaxTick) 897 schedMemSideSendEvent(next_pf_time); 898 899 return true; 900} 901 902PacketPtr 903Cache::createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk, 904 bool needsWritable) const 905{ 906 // should never see evictions here 907 assert(!cpu_pkt->isEviction()); 908 909 bool blkValid = blk && blk->isValid(); 910 911 if (cpu_pkt->req->isUncacheable() || 912 (!blkValid && cpu_pkt->isUpgrade()) || 913 cpu_pkt->cmd == MemCmd::InvalidateReq) { 914 // uncacheable requests and upgrades from upper-level caches 915 // that missed completely just go through as is 916 return nullptr; 917 } 918 919 assert(cpu_pkt->needsResponse()); 920 921 MemCmd cmd; 922 // @TODO make useUpgrades a parameter. 923 // Note that ownership protocols require upgrade, otherwise a 924 // write miss on a shared owned block will generate a ReadExcl, 925 // which will clobber the owned copy. 926 const bool useUpgrades = true; 927 if (cpu_pkt->cmd == MemCmd::WriteLineReq) { 928 assert(!blkValid || !blk->isWritable()); 929 // forward as invalidate to all other caches, this gives us 930 // the line in Exclusive state, and invalidates all other 931 // copies 932 cmd = MemCmd::InvalidateReq; 933 } else if (blkValid && useUpgrades) { 934 // only reason to be here is that blk is read only and we need 935 // it to be writable 936 assert(needsWritable); 937 assert(!blk->isWritable()); 938 cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq; 939 } else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq || 940 cpu_pkt->cmd == MemCmd::StoreCondFailReq) { 941 // Even though this SC will fail, we still need to send out the 942 // request and get the data to supply it to other snoopers in the case 943 // where the determination the StoreCond fails is delayed due to 944 // all caches not being on the same local bus. 945 cmd = MemCmd::SCUpgradeFailReq; 946 } else { 947 // block is invalid 948 cmd = needsWritable ? MemCmd::ReadExReq : 949 (isReadOnly ? MemCmd::ReadCleanReq : MemCmd::ReadSharedReq); 950 } 951 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize); 952 953 // if there are upstream caches that have already marked the 954 // packet as having sharers (not passing writable), pass that info 955 // downstream 956 if (cpu_pkt->hasSharers() && !needsWritable) { 957 // note that cpu_pkt may have spent a considerable time in the 958 // MSHR queue and that the information could possibly be out 959 // of date, however, there is no harm in conservatively 960 // assuming the block has sharers 961 pkt->setHasSharers(); 962 DPRINTF(Cache, "%s: passing hasSharers from %s to %s\n", 963 __func__, cpu_pkt->print(), pkt->print()); 964 } 965 966 // the packet should be block aligned 967 assert(pkt->getAddr() == blockAlign(pkt->getAddr())); 968 969 pkt->allocate(); 970 DPRINTF(Cache, "%s: created %s from %s\n", __func__, pkt->print(), 971 cpu_pkt->print()); 972 return pkt; 973} 974 975 976Tick 977Cache::recvAtomic(PacketPtr pkt) 978{ 979 // We are in atomic mode so we pay just for lookupLatency here. 980 Cycles lat = lookupLatency; 981 982 // Forward the request if the system is in cache bypass mode. 983 if (system->bypassCaches()) 984 return ticksToCycles(memSidePort->sendAtomic(pkt)); 985 986 promoteWholeLineWrites(pkt); 987 988 // follow the same flow as in recvTimingReq, and check if a cache 989 // above us is responding 990 if (pkt->cacheResponding()) { 991 DPRINTF(Cache, "Cache above responding to %s: not responding\n", 992 pkt->print()); 993 994 // if a cache is responding, and it had the line in Owned 995 // rather than Modified state, we need to invalidate any 996 // copies that are not on the same path to memory 997 assert(pkt->needsWritable() && !pkt->responderHadWritable()); 998 lat += ticksToCycles(memSidePort->sendAtomic(pkt)); 999 1000 return lat * clockPeriod(); 1001 } 1002 1003 // should assert here that there are no outstanding MSHRs or 1004 // writebacks... that would mean that someone used an atomic 1005 // access in timing mode 1006 1007 CacheBlk *blk = nullptr; 1008 PacketList writebacks; 1009 bool satisfied = access(pkt, blk, lat, writebacks); 1010 1011 // handle writebacks resulting from the access here to ensure they 1012 // logically proceed anything happening below 1013 doWritebacksAtomic(writebacks); 1014 1015 if (!satisfied) { 1016 // MISS 1017 1018 // deal with the packets that go through the write path of 1019 // the cache, i.e. any evictions and uncacheable writes 1020 if (pkt->isEviction() || 1021 (pkt->req->isUncacheable() && pkt->isWrite())) { 1022 lat += ticksToCycles(memSidePort->sendAtomic(pkt)); 1023 return lat * clockPeriod(); 1024 } 1025 // only misses left 1026 1027 PacketPtr bus_pkt = createMissPacket(pkt, blk, pkt->needsWritable()); 1028 1029 bool is_forward = (bus_pkt == nullptr); 1030 1031 if (is_forward) { 1032 // just forwarding the same request to the next level 1033 // no local cache operation involved 1034 bus_pkt = pkt; 1035 } 1036 1037 DPRINTF(Cache, "%s: Sending an atomic %s\n", __func__, 1038 bus_pkt->print()); 1039 1040#if TRACING_ON 1041 CacheBlk::State old_state = blk ? blk->status : 0; 1042#endif 1043 1044 lat += ticksToCycles(memSidePort->sendAtomic(bus_pkt)); 1045 1046 bool is_invalidate = bus_pkt->isInvalidate(); 1047 1048 // We are now dealing with the response handling 1049 DPRINTF(Cache, "%s: Receive response: %s in state %i\n", __func__, 1050 bus_pkt->print(), old_state); 1051 1052 // If packet was a forward, the response (if any) is already 1053 // in place in the bus_pkt == pkt structure, so we don't need 1054 // to do anything. Otherwise, use the separate bus_pkt to 1055 // generate response to pkt and then delete it. 1056 if (!is_forward) { 1057 if (pkt->needsResponse()) { 1058 assert(bus_pkt->isResponse()); 1059 if (bus_pkt->isError()) { 1060 pkt->makeAtomicResponse(); 1061 pkt->copyError(bus_pkt); 1062 } else if (pkt->cmd == MemCmd::WriteLineReq) { 1063 // note the use of pkt, not bus_pkt here. 1064 1065 // write-line request to the cache that promoted 1066 // the write to a whole line 1067 blk = handleFill(pkt, blk, writebacks, 1068 allocOnFill(pkt->cmd)); 1069 assert(blk != NULL); 1070 is_invalidate = false; 1071 satisfyRequest(pkt, blk); 1072 } else if (bus_pkt->isRead() || 1073 bus_pkt->cmd == MemCmd::UpgradeResp) { 1074 // we're updating cache state to allow us to 1075 // satisfy the upstream request from the cache 1076 blk = handleFill(bus_pkt, blk, writebacks, 1077 allocOnFill(pkt->cmd)); 1078 satisfyRequest(pkt, blk); 1079 maintainClusivity(pkt->fromCache(), blk); 1080 } else { 1081 // we're satisfying the upstream request without 1082 // modifying cache state, e.g., a write-through 1083 pkt->makeAtomicResponse(); 1084 } 1085 } 1086 delete bus_pkt; 1087 } 1088 1089 if (is_invalidate && blk && blk->isValid()) { 1090 invalidateBlock(blk); 1091 } 1092 } 1093 1094 // Note that we don't invoke the prefetcher at all in atomic mode. 1095 // It's not clear how to do it properly, particularly for 1096 // prefetchers that aggressively generate prefetch candidates and 1097 // rely on bandwidth contention to throttle them; these will tend 1098 // to pollute the cache in atomic mode since there is no bandwidth 1099 // contention. If we ever do want to enable prefetching in atomic 1100 // mode, though, this is the place to do it... see timingAccess() 1101 // for an example (though we'd want to issue the prefetch(es) 1102 // immediately rather than calling requestMemSideBus() as we do 1103 // there). 1104 1105 // do any writebacks resulting from the response handling 1106 doWritebacksAtomic(writebacks); 1107 1108 // if we used temp block, check to see if its valid and if so 1109 // clear it out, but only do so after the call to recvAtomic is 1110 // finished so that any downstream observers (such as a snoop 1111 // filter), first see the fill, and only then see the eviction 1112 if (blk == tempBlock && tempBlock->isValid()) { 1113 // the atomic CPU calls recvAtomic for fetch and load/store 1114 // sequentuially, and we may already have a tempBlock 1115 // writeback from the fetch that we have not yet sent 1116 if (tempBlockWriteback) { 1117 // if that is the case, write the prevoius one back, and 1118 // do not schedule any new event 1119 writebackTempBlockAtomic(); 1120 } else { 1121 // the writeback/clean eviction happens after the call to 1122 // recvAtomic has finished (but before any successive 1123 // calls), so that the response handling from the fill is 1124 // allowed to happen first 1125 schedule(writebackTempBlockAtomicEvent, curTick()); 1126 } 1127 1128 tempBlockWriteback = (blk->isDirty() || writebackClean) ? 1129 writebackBlk(blk) : cleanEvictBlk(blk); 1130 blk->invalidate(); 1131 } 1132 1133 if (pkt->needsResponse()) { 1134 pkt->makeAtomicResponse(); 1135 } 1136 1137 return lat * clockPeriod(); 1138} 1139 1140 1141void 1142Cache::functionalAccess(PacketPtr pkt, bool fromCpuSide) 1143{ 1144 if (system->bypassCaches()) { 1145 // Packets from the memory side are snoop request and 1146 // shouldn't happen in bypass mode. 1147 assert(fromCpuSide); 1148 1149 // The cache should be flushed if we are in cache bypass mode, 1150 // so we don't need to check if we need to update anything. 1151 memSidePort->sendFunctional(pkt); 1152 return; 1153 } 1154 1155 Addr blk_addr = blockAlign(pkt->getAddr()); 1156 bool is_secure = pkt->isSecure(); 1157 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 1158 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 1159 1160 pkt->pushLabel(name()); 1161 1162 CacheBlkPrintWrapper cbpw(blk); 1163 1164 // Note that just because an L2/L3 has valid data doesn't mean an 1165 // L1 doesn't have a more up-to-date modified copy that still 1166 // needs to be found. As a result we always update the request if 1167 // we have it, but only declare it satisfied if we are the owner. 1168 1169 // see if we have data at all (owned or otherwise) 1170 bool have_data = blk && blk->isValid() 1171 && pkt->checkFunctional(&cbpw, blk_addr, is_secure, blkSize, 1172 blk->data); 1173 1174 // data we have is dirty if marked as such or if we have an 1175 // in-service MSHR that is pending a modified line 1176 bool have_dirty = 1177 have_data && (blk->isDirty() || 1178 (mshr && mshr->inService && mshr->isPendingModified())); 1179 1180 bool done = have_dirty 1181 || cpuSidePort->checkFunctional(pkt) 1182 || mshrQueue.checkFunctional(pkt, blk_addr) 1183 || writeBuffer.checkFunctional(pkt, blk_addr) 1184 || memSidePort->checkFunctional(pkt); 1185 1186 DPRINTF(CacheVerbose, "%s: %s %s%s%s\n", __func__, pkt->print(), 1187 (blk && blk->isValid()) ? "valid " : "", 1188 have_data ? "data " : "", done ? "done " : ""); 1189 1190 // We're leaving the cache, so pop cache->name() label 1191 pkt->popLabel(); 1192 1193 if (done) { 1194 pkt->makeResponse(); 1195 } else { 1196 // if it came as a request from the CPU side then make sure it 1197 // continues towards the memory side 1198 if (fromCpuSide) { 1199 memSidePort->sendFunctional(pkt); 1200 } else if (cpuSidePort->isSnooping()) { 1201 // if it came from the memory side, it must be a snoop request 1202 // and we should only forward it if we are forwarding snoops 1203 cpuSidePort->sendFunctionalSnoop(pkt); 1204 } 1205 } 1206} 1207 1208 1209///////////////////////////////////////////////////// 1210// 1211// Response handling: responses from the memory side 1212// 1213///////////////////////////////////////////////////// 1214 1215 1216void 1217Cache::handleUncacheableWriteResp(PacketPtr pkt) 1218{ 1219 Tick completion_time = clockEdge(responseLatency) + 1220 pkt->headerDelay + pkt->payloadDelay; 1221 1222 // Reset the bus additional time as it is now accounted for 1223 pkt->headerDelay = pkt->payloadDelay = 0; 1224 1225 cpuSidePort->schedTimingResp(pkt, completion_time, true); 1226} 1227 1228void 1229Cache::recvTimingResp(PacketPtr pkt) 1230{ 1231 assert(pkt->isResponse()); 1232 1233 // all header delay should be paid for by the crossbar, unless 1234 // this is a prefetch response from above 1235 panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp, 1236 "%s saw a non-zero packet delay\n", name()); 1237 1238 bool is_error = pkt->isError(); 1239 1240 if (is_error) { 1241 DPRINTF(Cache, "%s: Cache received %s with error\n", __func__, 1242 pkt->print()); 1243 } 1244 1245 DPRINTF(Cache, "%s: Handling response %s\n", __func__, 1246 pkt->print()); 1247 1248 // if this is a write, we should be looking at an uncacheable 1249 // write 1250 if (pkt->isWrite()) { 1251 assert(pkt->req->isUncacheable()); 1252 handleUncacheableWriteResp(pkt); 1253 return; 1254 } 1255 1256 // we have dealt with any (uncacheable) writes above, from here on 1257 // we know we are dealing with an MSHR due to a miss or a prefetch 1258 MSHR *mshr = dynamic_cast<MSHR*>(pkt->popSenderState()); 1259 assert(mshr); 1260 1261 if (mshr == noTargetMSHR) { 1262 // we always clear at least one target 1263 clearBlocked(Blocked_NoTargets); 1264 noTargetMSHR = nullptr; 1265 } 1266 1267 // Initial target is used just for stats 1268 MSHR::Target *initial_tgt = mshr->getTarget(); 1269 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex(); 1270 Tick miss_latency = curTick() - initial_tgt->recvTime; 1271 1272 if (pkt->req->isUncacheable()) { 1273 assert(pkt->req->masterId() < system->maxMasters()); 1274 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] += 1275 miss_latency; 1276 } else { 1277 assert(pkt->req->masterId() < system->maxMasters()); 1278 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] += 1279 miss_latency; 1280 } 1281 1282 bool wasFull = mshrQueue.isFull(); 1283 1284 PacketList writebacks; 1285 1286 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 1287 1288 // upgrade deferred targets if the response has no sharers, and is 1289 // thus passing writable 1290 if (!pkt->hasSharers()) { 1291 mshr->promoteWritable(); 1292 } 1293 1294 bool is_fill = !mshr->isForward && 1295 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp); 1296 1297 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 1298 1299 if (is_fill && !is_error) { 1300 DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n", 1301 pkt->getAddr()); 1302 1303 blk = handleFill(pkt, blk, writebacks, mshr->allocOnFill()); 1304 assert(blk != nullptr); 1305 } 1306 1307 // allow invalidation responses originating from write-line 1308 // requests to be discarded 1309 bool is_invalidate = pkt->isInvalidate(); 1310 1311 // First offset for critical word first calculations 1312 int initial_offset = initial_tgt->pkt->getOffset(blkSize); 1313 1314 bool from_cache = false; 1315 MSHR::TargetList targets = mshr->extractServiceableTargets(pkt); 1316 for (auto &target: targets) { 1317 Packet *tgt_pkt = target.pkt; 1318 switch (target.source) { 1319 case MSHR::Target::FromCPU: 1320 Tick completion_time; 1321 // Here we charge on completion_time the delay of the xbar if the 1322 // packet comes from it, charged on headerDelay. 1323 completion_time = pkt->headerDelay; 1324 1325 // Software prefetch handling for cache closest to core 1326 if (tgt_pkt->cmd.isSWPrefetch()) { 1327 // a software prefetch would have already been ack'd 1328 // immediately with dummy data so the core would be able to 1329 // retire it. This request completes right here, so we 1330 // deallocate it. 1331 delete tgt_pkt->req; 1332 delete tgt_pkt; 1333 break; // skip response 1334 } 1335 1336 // keep track of whether we have responded to another 1337 // cache 1338 from_cache = from_cache || tgt_pkt->fromCache(); 1339 1340 // unlike the other packet flows, where data is found in other 1341 // caches or memory and brought back, write-line requests always 1342 // have the data right away, so the above check for "is fill?" 1343 // cannot actually be determined until examining the stored MSHR 1344 // state. We "catch up" with that logic here, which is duplicated 1345 // from above. 1346 if (tgt_pkt->cmd == MemCmd::WriteLineReq) { 1347 assert(!is_error); 1348 // we got the block in a writable state, so promote 1349 // any deferred targets if possible 1350 mshr->promoteWritable(); 1351 // NB: we use the original packet here and not the response! 1352 blk = handleFill(tgt_pkt, blk, writebacks, 1353 targets.allocOnFill); 1354 assert(blk != nullptr); 1355 1356 // treat as a fill, and discard the invalidation 1357 // response 1358 is_fill = true; 1359 is_invalidate = false; 1360 } 1361 1362 if (is_fill) { 1363 satisfyRequest(tgt_pkt, blk, true, mshr->hasPostDowngrade()); 1364 1365 // How many bytes past the first request is this one 1366 int transfer_offset = 1367 tgt_pkt->getOffset(blkSize) - initial_offset; 1368 if (transfer_offset < 0) { 1369 transfer_offset += blkSize; 1370 } 1371 1372 // If not critical word (offset) return payloadDelay. 1373 // responseLatency is the latency of the return path 1374 // from lower level caches/memory to an upper level cache or 1375 // the core. 1376 completion_time += clockEdge(responseLatency) + 1377 (transfer_offset ? pkt->payloadDelay : 0); 1378 1379 assert(!tgt_pkt->req->isUncacheable()); 1380 1381 assert(tgt_pkt->req->masterId() < system->maxMasters()); 1382 missLatency[tgt_pkt->cmdToIndex()][tgt_pkt->req->masterId()] += 1383 completion_time - target.recvTime; 1384 } else if (pkt->cmd == MemCmd::UpgradeFailResp) { 1385 // failed StoreCond upgrade 1386 assert(tgt_pkt->cmd == MemCmd::StoreCondReq || 1387 tgt_pkt->cmd == MemCmd::StoreCondFailReq || 1388 tgt_pkt->cmd == MemCmd::SCUpgradeFailReq); 1389 // responseLatency is the latency of the return path 1390 // from lower level caches/memory to an upper level cache or 1391 // the core. 1392 completion_time += clockEdge(responseLatency) + 1393 pkt->payloadDelay; 1394 tgt_pkt->req->setExtraData(0); 1395 } else { 1396 // not a cache fill, just forwarding response 1397 // responseLatency is the latency of the return path 1398 // from lower level cahces/memory to the core. 1399 completion_time += clockEdge(responseLatency) + 1400 pkt->payloadDelay; 1401 if (pkt->isRead() && !is_error) { 1402 // sanity check 1403 assert(pkt->getAddr() == tgt_pkt->getAddr()); 1404 assert(pkt->getSize() >= tgt_pkt->getSize()); 1405 1406 tgt_pkt->setData(pkt->getConstPtr<uint8_t>()); 1407 } 1408 } 1409 tgt_pkt->makeTimingResponse(); 1410 // if this packet is an error copy that to the new packet 1411 if (is_error) 1412 tgt_pkt->copyError(pkt); 1413 if (tgt_pkt->cmd == MemCmd::ReadResp && 1414 (is_invalidate || mshr->hasPostInvalidate())) { 1415 // If intermediate cache got ReadRespWithInvalidate, 1416 // propagate that. Response should not have 1417 // isInvalidate() set otherwise. 1418 tgt_pkt->cmd = MemCmd::ReadRespWithInvalidate; 1419 DPRINTF(Cache, "%s: updated cmd to %s\n", __func__, 1420 tgt_pkt->print()); 1421 } 1422 // Reset the bus additional time as it is now accounted for 1423 tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0; 1424 cpuSidePort->schedTimingResp(tgt_pkt, completion_time, true); 1425 break; 1426 1427 case MSHR::Target::FromPrefetcher: 1428 assert(tgt_pkt->cmd == MemCmd::HardPFReq); 1429 if (blk) 1430 blk->status |= BlkHWPrefetched; 1431 delete tgt_pkt->req; 1432 delete tgt_pkt; 1433 break; 1434 1435 case MSHR::Target::FromSnoop: 1436 // I don't believe that a snoop can be in an error state 1437 assert(!is_error); 1438 // response to snoop request 1439 DPRINTF(Cache, "processing deferred snoop...\n"); 1440 assert(!(is_invalidate && !mshr->hasPostInvalidate())); 1441 handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate()); 1442 break; 1443 1444 default: 1445 panic("Illegal target->source enum %d\n", target.source); 1446 } 1447 } 1448 1449 maintainClusivity(from_cache, blk); 1450 1451 if (blk && blk->isValid()) { 1452 // an invalidate response stemming from a write line request 1453 // should not invalidate the block, so check if the 1454 // invalidation should be discarded 1455 if (is_invalidate || mshr->hasPostInvalidate()) { 1456 invalidateBlock(blk); 1457 } else if (mshr->hasPostDowngrade()) { 1458 blk->status &= ~BlkWritable; 1459 } 1460 } 1461 1462 if (mshr->promoteDeferredTargets()) { 1463 // avoid later read getting stale data while write miss is 1464 // outstanding.. see comment in timingAccess() 1465 if (blk) { 1466 blk->status &= ~BlkReadable; 1467 } 1468 mshrQueue.markPending(mshr); 1469 schedMemSideSendEvent(clockEdge() + pkt->payloadDelay); 1470 } else { 1471 mshrQueue.deallocate(mshr); 1472 if (wasFull && !mshrQueue.isFull()) { 1473 clearBlocked(Blocked_NoMSHRs); 1474 } 1475 1476 // Request the bus for a prefetch if this deallocation freed enough 1477 // MSHRs for a prefetch to take place 1478 if (prefetcher && mshrQueue.canPrefetch()) { 1479 Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(), 1480 clockEdge()); 1481 if (next_pf_time != MaxTick) 1482 schedMemSideSendEvent(next_pf_time); 1483 } 1484 } 1485 // reset the xbar additional timinig as it is now accounted for 1486 pkt->headerDelay = pkt->payloadDelay = 0; 1487 1488 // copy writebacks to write buffer 1489 doWritebacks(writebacks, forward_time); 1490 1491 // if we used temp block, check to see if its valid and then clear it out 1492 if (blk == tempBlock && tempBlock->isValid()) { 1493 // We use forwardLatency here because we are copying 1494 // Writebacks/CleanEvicts to write buffer. It specifies the latency to 1495 // allocate an internal buffer and to schedule an event to the 1496 // queued port. 1497 if (blk->isDirty() || writebackClean) { 1498 PacketPtr wbPkt = writebackBlk(blk); 1499 allocateWriteBuffer(wbPkt, forward_time); 1500 // Set BLOCK_CACHED flag if cached above. 1501 if (isCachedAbove(wbPkt)) 1502 wbPkt->setBlockCached(); 1503 } else { 1504 PacketPtr wcPkt = cleanEvictBlk(blk); 1505 // Check to see if block is cached above. If not allocate 1506 // write buffer 1507 if (isCachedAbove(wcPkt)) 1508 delete wcPkt; 1509 else 1510 allocateWriteBuffer(wcPkt, forward_time); 1511 } 1512 blk->invalidate(); 1513 } 1514 1515 DPRINTF(CacheVerbose, "%s: Leaving with %s\n", __func__, pkt->print()); 1516 delete pkt; 1517} 1518 1519PacketPtr 1520Cache::writebackBlk(CacheBlk *blk) 1521{ 1522 chatty_assert(!isReadOnly || writebackClean, 1523 "Writeback from read-only cache"); 1524 assert(blk && blk->isValid() && (blk->isDirty() || writebackClean)); 1525 1526 writebacks[Request::wbMasterId]++; 1527 1528 Request *req = new Request(tags->regenerateBlkAddr(blk->tag, blk->set), 1529 blkSize, 0, Request::wbMasterId); 1530 if (blk->isSecure()) 1531 req->setFlags(Request::SECURE); 1532 1533 req->taskId(blk->task_id); 1534 blk->task_id= ContextSwitchTaskId::Unknown; 1535 blk->tickInserted = curTick(); 1536 1537 PacketPtr pkt = 1538 new Packet(req, blk->isDirty() ? 1539 MemCmd::WritebackDirty : MemCmd::WritebackClean); 1540 1541 DPRINTF(Cache, "Create Writeback %s writable: %d, dirty: %d\n", 1542 pkt->print(), blk->isWritable(), blk->isDirty()); 1543 1544 if (blk->isWritable()) { 1545 // not asserting shared means we pass the block in modified 1546 // state, mark our own block non-writeable 1547 blk->status &= ~BlkWritable; 1548 } else { 1549 // we are in the Owned state, tell the receiver 1550 pkt->setHasSharers(); 1551 } 1552 1553 // make sure the block is not marked dirty 1554 blk->status &= ~BlkDirty; 1555 1556 pkt->allocate(); 1557 std::memcpy(pkt->getPtr<uint8_t>(), blk->data, blkSize); 1558 1559 return pkt; 1560} 1561 1562PacketPtr 1563Cache::cleanEvictBlk(CacheBlk *blk) 1564{ 1565 assert(!writebackClean); 1566 assert(blk && blk->isValid() && !blk->isDirty()); 1567 // Creating a zero sized write, a message to the snoop filter 1568 Request *req = 1569 new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0, 1570 Request::wbMasterId); 1571 if (blk->isSecure()) 1572 req->setFlags(Request::SECURE); 1573 1574 req->taskId(blk->task_id); 1575 blk->task_id = ContextSwitchTaskId::Unknown; 1576 blk->tickInserted = curTick(); 1577 1578 PacketPtr pkt = new Packet(req, MemCmd::CleanEvict); 1579 pkt->allocate(); 1580 DPRINTF(Cache, "Create CleanEvict %s\n", pkt->print()); 1581 1582 return pkt; 1583} 1584 1585void 1586Cache::memWriteback() 1587{ 1588 CacheBlkVisitorWrapper visitor(*this, &Cache::writebackVisitor); 1589 tags->forEachBlk(visitor); 1590} 1591 1592void 1593Cache::memInvalidate() 1594{ 1595 CacheBlkVisitorWrapper visitor(*this, &Cache::invalidateVisitor); 1596 tags->forEachBlk(visitor); 1597} 1598 1599bool 1600Cache::isDirty() const 1601{ 1602 CacheBlkIsDirtyVisitor visitor; 1603 tags->forEachBlk(visitor); 1604 1605 return visitor.isDirty(); 1606} 1607 1608bool 1609Cache::writebackVisitor(CacheBlk &blk) 1610{ 1611 if (blk.isDirty()) { 1612 assert(blk.isValid()); 1613 1614 Request request(tags->regenerateBlkAddr(blk.tag, blk.set), 1615 blkSize, 0, Request::funcMasterId); 1616 request.taskId(blk.task_id); 1617 1618 Packet packet(&request, MemCmd::WriteReq); 1619 packet.dataStatic(blk.data); 1620 1621 memSidePort->sendFunctional(&packet); 1622 1623 blk.status &= ~BlkDirty; 1624 } 1625 1626 return true; 1627} 1628 1629bool 1630Cache::invalidateVisitor(CacheBlk &blk) 1631{ 1632 1633 if (blk.isDirty()) 1634 warn_once("Invalidating dirty cache lines. Expect things to break.\n"); 1635 1636 if (blk.isValid()) { 1637 assert(!blk.isDirty()); 1638 tags->invalidate(&blk); 1639 blk.invalidate(); 1640 } 1641 1642 return true; 1643} 1644 1645CacheBlk* 1646Cache::allocateBlock(Addr addr, bool is_secure, PacketList &writebacks) 1647{ 1648 CacheBlk *blk = tags->findVictim(addr); 1649 1650 // It is valid to return nullptr if there is no victim 1651 if (!blk) 1652 return nullptr; 1653 1654 if (blk->isValid()) { 1655 Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set); 1656 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure()); 1657 if (repl_mshr) { 1658 // must be an outstanding upgrade request 1659 // on a block we're about to replace... 1660 assert(!blk->isWritable() || blk->isDirty()); 1661 assert(repl_mshr->needsWritable()); 1662 // too hard to replace block with transient state 1663 // allocation failed, block not inserted 1664 return nullptr; 1665 } else { 1666 DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx " 1667 "(%s): %s\n", repl_addr, blk->isSecure() ? "s" : "ns", 1668 addr, is_secure ? "s" : "ns", 1669 blk->isDirty() ? "writeback" : "clean"); 1670 1671 if (blk->wasPrefetched()) { 1672 unusedPrefetches++; 1673 } 1674 // Will send up Writeback/CleanEvict snoops via isCachedAbove 1675 // when pushing this writeback list into the write buffer. 1676 if (blk->isDirty() || writebackClean) { 1677 // Save writeback packet for handling by caller 1678 writebacks.push_back(writebackBlk(blk)); 1679 } else { 1680 writebacks.push_back(cleanEvictBlk(blk)); 1681 } 1682 } 1683 } 1684 1685 return blk; 1686} 1687 1688void 1689Cache::invalidateBlock(CacheBlk *blk) 1690{ 1691 if (blk != tempBlock) 1692 tags->invalidate(blk); 1693 blk->invalidate(); 1694} 1695 1696// Note that the reason we return a list of writebacks rather than 1697// inserting them directly in the write buffer is that this function 1698// is called by both atomic and timing-mode accesses, and in atomic 1699// mode we don't mess with the write buffer (we just perform the 1700// writebacks atomically once the original request is complete). 1701CacheBlk* 1702Cache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks, 1703 bool allocate) 1704{ 1705 assert(pkt->isResponse() || pkt->cmd == MemCmd::WriteLineReq); 1706 Addr addr = pkt->getAddr(); 1707 bool is_secure = pkt->isSecure(); 1708#if TRACING_ON 1709 CacheBlk::State old_state = blk ? blk->status : 0; 1710#endif 1711 1712 // When handling a fill, we should have no writes to this line. 1713 assert(addr == blockAlign(addr)); 1714 assert(!writeBuffer.findMatch(addr, is_secure)); 1715 1716 if (blk == nullptr) { 1717 // better have read new data... 1718 assert(pkt->hasData()); 1719 1720 // only read responses and write-line requests have data; 1721 // note that we don't write the data here for write-line - that 1722 // happens in the subsequent call to satisfyRequest 1723 assert(pkt->isRead() || pkt->cmd == MemCmd::WriteLineReq); 1724 1725 // need to do a replacement if allocating, otherwise we stick 1726 // with the temporary storage 1727 blk = allocate ? allocateBlock(addr, is_secure, writebacks) : nullptr; 1728 1729 if (blk == nullptr) { 1730 // No replaceable block or a mostly exclusive 1731 // cache... just use temporary storage to complete the 1732 // current request and then get rid of it 1733 assert(!tempBlock->isValid()); 1734 blk = tempBlock; 1735 tempBlock->set = tags->extractSet(addr); 1736 tempBlock->tag = tags->extractTag(addr); 1737 // @todo: set security state as well... 1738 DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr, 1739 is_secure ? "s" : "ns"); 1740 } else { 1741 tags->insertBlock(pkt, blk); 1742 } 1743 1744 // we should never be overwriting a valid block 1745 assert(!blk->isValid()); 1746 } else { 1747 // existing block... probably an upgrade 1748 assert(blk->tag == tags->extractTag(addr)); 1749 // either we're getting new data or the block should already be valid 1750 assert(pkt->hasData() || blk->isValid()); 1751 // don't clear block status... if block is already dirty we 1752 // don't want to lose that 1753 } 1754 1755 if (is_secure) 1756 blk->status |= BlkSecure; 1757 blk->status |= BlkValid | BlkReadable; 1758 1759 // sanity check for whole-line writes, which should always be 1760 // marked as writable as part of the fill, and then later marked 1761 // dirty as part of satisfyRequest 1762 if (pkt->cmd == MemCmd::WriteLineReq) { 1763 assert(!pkt->hasSharers()); 1764 // at the moment other caches do not respond to the 1765 // invalidation requests corresponding to a whole-line write 1766 assert(!pkt->cacheResponding()); 1767 } 1768 1769 // here we deal with setting the appropriate state of the line, 1770 // and we start by looking at the hasSharers flag, and ignore the 1771 // cacheResponding flag (normally signalling dirty data) if the 1772 // packet has sharers, thus the line is never allocated as Owned 1773 // (dirty but not writable), and always ends up being either 1774 // Shared, Exclusive or Modified, see Packet::setCacheResponding 1775 // for more details 1776 if (!pkt->hasSharers()) { 1777 // we could get a writable line from memory (rather than a 1778 // cache) even in a read-only cache, note that we set this bit 1779 // even for a read-only cache, possibly revisit this decision 1780 blk->status |= BlkWritable; 1781 1782 // check if we got this via cache-to-cache transfer (i.e., from a 1783 // cache that had the block in Modified or Owned state) 1784 if (pkt->cacheResponding()) { 1785 // we got the block in Modified state, and invalidated the 1786 // owners copy 1787 blk->status |= BlkDirty; 1788 1789 chatty_assert(!isReadOnly, "Should never see dirty snoop response " 1790 "in read-only cache %s\n", name()); 1791 } 1792 } 1793 1794 DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n", 1795 addr, is_secure ? "s" : "ns", old_state, blk->print()); 1796 1797 // if we got new data, copy it in (checking for a read response 1798 // and a response that has data is the same in the end) 1799 if (pkt->isRead()) { 1800 // sanity checks 1801 assert(pkt->hasData()); 1802 assert(pkt->getSize() == blkSize); 1803 1804 std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize); 1805 } 1806 // We pay for fillLatency here. 1807 blk->whenReady = clockEdge() + fillLatency * clockPeriod() + 1808 pkt->payloadDelay; 1809 1810 return blk; 1811} 1812 1813 1814///////////////////////////////////////////////////// 1815// 1816// Snoop path: requests coming in from the memory side 1817// 1818///////////////////////////////////////////////////// 1819 1820void 1821Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data, 1822 bool already_copied, bool pending_inval) 1823{ 1824 // sanity check 1825 assert(req_pkt->isRequest()); 1826 assert(req_pkt->needsResponse()); 1827 1828 DPRINTF(Cache, "%s: for %s\n", __func__, req_pkt->print()); 1829 // timing-mode snoop responses require a new packet, unless we 1830 // already made a copy... 1831 PacketPtr pkt = req_pkt; 1832 if (!already_copied) 1833 // do not clear flags, and allocate space for data if the 1834 // packet needs it (the only packets that carry data are read 1835 // responses) 1836 pkt = new Packet(req_pkt, false, req_pkt->isRead()); 1837 1838 assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() || 1839 pkt->hasSharers()); 1840 pkt->makeTimingResponse(); 1841 if (pkt->isRead()) { 1842 pkt->setDataFromBlock(blk_data, blkSize); 1843 } 1844 if (pkt->cmd == MemCmd::ReadResp && pending_inval) { 1845 // Assume we defer a response to a read from a far-away cache 1846 // A, then later defer a ReadExcl from a cache B on the same 1847 // bus as us. We'll assert cacheResponding in both cases, but 1848 // in the latter case cacheResponding will keep the 1849 // invalidation from reaching cache A. This special response 1850 // tells cache A that it gets the block to satisfy its read, 1851 // but must immediately invalidate it. 1852 pkt->cmd = MemCmd::ReadRespWithInvalidate; 1853 } 1854 // Here we consider forward_time, paying for just forward latency and 1855 // also charging the delay provided by the xbar. 1856 // forward_time is used as send_time in next allocateWriteBuffer(). 1857 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 1858 // Here we reset the timing of the packet. 1859 pkt->headerDelay = pkt->payloadDelay = 0; 1860 DPRINTF(CacheVerbose, "%s: created response: %s tick: %lu\n", __func__, 1861 pkt->print(), forward_time); 1862 memSidePort->schedTimingSnoopResp(pkt, forward_time, true); 1863} 1864 1865uint32_t 1866Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing, 1867 bool is_deferred, bool pending_inval) 1868{ 1869 DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print()); 1870 // deferred snoops can only happen in timing mode 1871 assert(!(is_deferred && !is_timing)); 1872 // pending_inval only makes sense on deferred snoops 1873 assert(!(pending_inval && !is_deferred)); 1874 assert(pkt->isRequest()); 1875 1876 // the packet may get modified if we or a forwarded snooper 1877 // responds in atomic mode, so remember a few things about the 1878 // original packet up front 1879 bool invalidate = pkt->isInvalidate(); 1880 bool M5_VAR_USED needs_writable = pkt->needsWritable(); 1881 1882 // at the moment we could get an uncacheable write which does not 1883 // have the invalidate flag, and we need a suitable way of dealing 1884 // with this case 1885 panic_if(invalidate && pkt->req->isUncacheable(), 1886 "%s got an invalidating uncacheable snoop request %s", 1887 name(), pkt->print()); 1888 1889 uint32_t snoop_delay = 0; 1890 1891 if (forwardSnoops) { 1892 // first propagate snoop upward to see if anyone above us wants to 1893 // handle it. save & restore packet src since it will get 1894 // rewritten to be relative to cpu-side bus (if any) 1895 bool alreadyResponded = pkt->cacheResponding(); 1896 if (is_timing) { 1897 // copy the packet so that we can clear any flags before 1898 // forwarding it upwards, we also allocate data (passing 1899 // the pointer along in case of static data), in case 1900 // there is a snoop hit in upper levels 1901 Packet snoopPkt(pkt, true, true); 1902 snoopPkt.setExpressSnoop(); 1903 // the snoop packet does not need to wait any additional 1904 // time 1905 snoopPkt.headerDelay = snoopPkt.payloadDelay = 0; 1906 cpuSidePort->sendTimingSnoopReq(&snoopPkt); 1907 1908 // add the header delay (including crossbar and snoop 1909 // delays) of the upward snoop to the snoop delay for this 1910 // cache 1911 snoop_delay += snoopPkt.headerDelay; 1912 1913 if (snoopPkt.cacheResponding()) { 1914 // cache-to-cache response from some upper cache 1915 assert(!alreadyResponded); 1916 pkt->setCacheResponding(); 1917 } 1918 // upstream cache has the block, or has an outstanding 1919 // MSHR, pass the flag on 1920 if (snoopPkt.hasSharers()) { 1921 pkt->setHasSharers(); 1922 } 1923 // If this request is a prefetch or clean evict and an upper level 1924 // signals block present, make sure to propagate the block 1925 // presence to the requester. 1926 if (snoopPkt.isBlockCached()) { 1927 pkt->setBlockCached(); 1928 } 1929 } else { 1930 cpuSidePort->sendAtomicSnoop(pkt); 1931 if (!alreadyResponded && pkt->cacheResponding()) { 1932 // cache-to-cache response from some upper cache: 1933 // forward response to original requester 1934 assert(pkt->isResponse()); 1935 } 1936 } 1937 } 1938 1939 if (!blk || !blk->isValid()) { 1940 DPRINTF(CacheVerbose, "%s: snoop miss for %s\n", __func__, 1941 pkt->print()); 1942 if (is_deferred) { 1943 // we no longer have the block, and will not respond, but a 1944 // packet was allocated in MSHR::handleSnoop and we have 1945 // to delete it 1946 assert(pkt->needsResponse()); 1947 1948 // we have passed the block to a cache upstream, that 1949 // cache should be responding 1950 assert(pkt->cacheResponding()); 1951 1952 delete pkt; 1953 } 1954 return snoop_delay; 1955 } else { 1956 DPRINTF(Cache, "%s: snoop hit for %s, old state is %s\n", __func__, 1957 pkt->print(), blk->print()); 1958 } 1959 1960 chatty_assert(!(isReadOnly && blk->isDirty()), 1961 "Should never have a dirty block in a read-only cache %s\n", 1962 name()); 1963 1964 // We may end up modifying both the block state and the packet (if 1965 // we respond in atomic mode), so just figure out what to do now 1966 // and then do it later. If we find dirty data while snooping for 1967 // an invalidate, we don't need to send a response. The 1968 // invalidation itself is taken care of below. 1969 bool respond = blk->isDirty() && pkt->needsResponse() && 1970 pkt->cmd != MemCmd::InvalidateReq; 1971 bool have_writable = blk->isWritable(); 1972 1973 // Invalidate any prefetch's from below that would strip write permissions 1974 // MemCmd::HardPFReq is only observed by upstream caches. After missing 1975 // above and in it's own cache, a new MemCmd::ReadReq is created that 1976 // downstream caches observe. 1977 if (pkt->mustCheckAbove()) { 1978 DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s " 1979 "from lower cache\n", pkt->getAddr(), pkt->print()); 1980 pkt->setBlockCached(); 1981 return snoop_delay; 1982 } 1983 1984 if (pkt->isRead() && !invalidate) { 1985 // reading without requiring the line in a writable state 1986 assert(!needs_writable); 1987 pkt->setHasSharers(); 1988 1989 // if the requesting packet is uncacheable, retain the line in 1990 // the current state, otherwhise unset the writable flag, 1991 // which means we go from Modified to Owned (and will respond 1992 // below), remain in Owned (and will respond below), from 1993 // Exclusive to Shared, or remain in Shared 1994 if (!pkt->req->isUncacheable()) 1995 blk->status &= ~BlkWritable; 1996 } 1997 1998 if (respond) { 1999 // prevent anyone else from responding, cache as well as 2000 // memory, and also prevent any memory from even seeing the 2001 // request 2002 pkt->setCacheResponding(); 2003 if (have_writable) { 2004 // inform the cache hierarchy that this cache had the line 2005 // in the Modified state so that we avoid unnecessary 2006 // invalidations (see Packet::setResponderHadWritable) 2007 pkt->setResponderHadWritable(); 2008 2009 // in the case of an uncacheable request there is no point 2010 // in setting the responderHadWritable flag, but since the 2011 // recipient does not care there is no harm in doing so 2012 } else { 2013 // if the packet has needsWritable set we invalidate our 2014 // copy below and all other copies will be invalidates 2015 // through express snoops, and if needsWritable is not set 2016 // we already called setHasSharers above 2017 } 2018 2019 // if we are returning a writable and dirty (Modified) line, 2020 // we should be invalidating the line 2021 panic_if(!invalidate && !pkt->hasSharers(), 2022 "%s is passing a Modified line through %s, " 2023 "but keeping the block", name(), pkt->print()); 2024 2025 if (is_timing) { 2026 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval); 2027 } else { 2028 pkt->makeAtomicResponse(); 2029 // packets such as upgrades do not actually have any data 2030 // payload 2031 if (pkt->hasData()) 2032 pkt->setDataFromBlock(blk->data, blkSize); 2033 } 2034 } 2035 2036 if (!respond && is_deferred) { 2037 assert(pkt->needsResponse()); 2038 2039 // if we copied the deferred packet with the intention to 2040 // respond, but are not responding, then a cache above us must 2041 // be, and we can use this as the indication of whether this 2042 // is a packet where we created a copy of the request or not 2043 if (!pkt->cacheResponding()) { 2044 delete pkt->req; 2045 } 2046 2047 delete pkt; 2048 } 2049 2050 // Do this last in case it deallocates block data or something 2051 // like that 2052 if (invalidate) { 2053 invalidateBlock(blk); 2054 } 2055 2056 DPRINTF(Cache, "new state is %s\n", blk->print()); 2057 2058 return snoop_delay; 2059} 2060 2061 2062void 2063Cache::recvTimingSnoopReq(PacketPtr pkt) 2064{ 2065 DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print()); 2066 2067 // Snoops shouldn't happen when bypassing caches 2068 assert(!system->bypassCaches()); 2069 2070 // no need to snoop requests that are not in range 2071 if (!inRange(pkt->getAddr())) { 2072 return; 2073 } 2074 2075 bool is_secure = pkt->isSecure(); 2076 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 2077 2078 Addr blk_addr = blockAlign(pkt->getAddr()); 2079 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 2080 2081 // Update the latency cost of the snoop so that the crossbar can 2082 // account for it. Do not overwrite what other neighbouring caches 2083 // have already done, rather take the maximum. The update is 2084 // tentative, for cases where we return before an upward snoop 2085 // happens below. 2086 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, 2087 lookupLatency * clockPeriod()); 2088 2089 // Inform request(Prefetch, CleanEvict or Writeback) from below of 2090 // MSHR hit, set setBlockCached. 2091 if (mshr && pkt->mustCheckAbove()) { 2092 DPRINTF(Cache, "Setting block cached for %s from lower cache on " 2093 "mshr hit\n", pkt->print()); 2094 pkt->setBlockCached(); 2095 return; 2096 } 2097 2098 // Let the MSHR itself track the snoop and decide whether we want 2099 // to go ahead and do the regular cache snoop 2100 if (mshr && mshr->handleSnoop(pkt, order++)) { 2101 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)." 2102 "mshrs: %s\n", blk_addr, is_secure ? "s" : "ns", 2103 mshr->print()); 2104 2105 if (mshr->getNumTargets() > numTarget) 2106 warn("allocating bonus target for snoop"); //handle later 2107 return; 2108 } 2109 2110 //We also need to check the writeback buffers and handle those 2111 WriteQueueEntry *wb_entry = writeBuffer.findMatch(blk_addr, is_secure); 2112 if (wb_entry) { 2113 DPRINTF(Cache, "Snoop hit in writeback to addr %#llx (%s)\n", 2114 pkt->getAddr(), is_secure ? "s" : "ns"); 2115 // Expect to see only Writebacks and/or CleanEvicts here, both of 2116 // which should not be generated for uncacheable data. 2117 assert(!wb_entry->isUncacheable()); 2118 // There should only be a single request responsible for generating 2119 // Writebacks/CleanEvicts. 2120 assert(wb_entry->getNumTargets() == 1); 2121 PacketPtr wb_pkt = wb_entry->getTarget()->pkt; 2122 assert(wb_pkt->isEviction()); 2123 2124 if (pkt->isEviction()) { 2125 // if the block is found in the write queue, set the BLOCK_CACHED 2126 // flag for Writeback/CleanEvict snoop. On return the snoop will 2127 // propagate the BLOCK_CACHED flag in Writeback packets and prevent 2128 // any CleanEvicts from travelling down the memory hierarchy. 2129 pkt->setBlockCached(); 2130 DPRINTF(Cache, "%s: Squashing %s from lower cache on writequeue " 2131 "hit\n", __func__, pkt->print()); 2132 return; 2133 } 2134 2135 // conceptually writebacks are no different to other blocks in 2136 // this cache, so the behaviour is modelled after handleSnoop, 2137 // the difference being that instead of querying the block 2138 // state to determine if it is dirty and writable, we use the 2139 // command and fields of the writeback packet 2140 bool respond = wb_pkt->cmd == MemCmd::WritebackDirty && 2141 pkt->needsResponse() && pkt->cmd != MemCmd::InvalidateReq; 2142 bool have_writable = !wb_pkt->hasSharers(); 2143 bool invalidate = pkt->isInvalidate(); 2144 2145 if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) { 2146 assert(!pkt->needsWritable()); 2147 pkt->setHasSharers(); 2148 wb_pkt->setHasSharers(); 2149 } 2150 2151 if (respond) { 2152 pkt->setCacheResponding(); 2153 2154 if (have_writable) { 2155 pkt->setResponderHadWritable(); 2156 } 2157 2158 doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(), 2159 false, false); 2160 } 2161 2162 if (invalidate) { 2163 // Invalidation trumps our writeback... discard here 2164 // Note: markInService will remove entry from writeback buffer. 2165 markInService(wb_entry); 2166 delete wb_pkt; 2167 } 2168 } 2169 2170 // If this was a shared writeback, there may still be 2171 // other shared copies above that require invalidation. 2172 // We could be more selective and return here if the 2173 // request is non-exclusive or if the writeback is 2174 // exclusive. 2175 uint32_t snoop_delay = handleSnoop(pkt, blk, true, false, false); 2176 2177 // Override what we did when we first saw the snoop, as we now 2178 // also have the cost of the upwards snoops to account for 2179 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, snoop_delay + 2180 lookupLatency * clockPeriod()); 2181} 2182 2183bool 2184Cache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt) 2185{ 2186 // Express snoop responses from master to slave, e.g., from L1 to L2 2187 cache->recvTimingSnoopResp(pkt); 2188 return true; 2189} 2190 2191Tick 2192Cache::recvAtomicSnoop(PacketPtr pkt) 2193{ 2194 // Snoops shouldn't happen when bypassing caches 2195 assert(!system->bypassCaches()); 2196 2197 // no need to snoop requests that are not in range. 2198 if (!inRange(pkt->getAddr())) { 2199 return 0; 2200 } 2201 2202 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 2203 uint32_t snoop_delay = handleSnoop(pkt, blk, false, false, false); 2204 return snoop_delay + lookupLatency * clockPeriod(); 2205} 2206 2207 2208QueueEntry* 2209Cache::getNextQueueEntry() 2210{ 2211 // Check both MSHR queue and write buffer for potential requests, 2212 // note that null does not mean there is no request, it could 2213 // simply be that it is not ready 2214 MSHR *miss_mshr = mshrQueue.getNext(); 2215 WriteQueueEntry *wq_entry = writeBuffer.getNext(); 2216 2217 // If we got a write buffer request ready, first priority is a 2218 // full write buffer, otherwise we favour the miss requests 2219 if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) { 2220 // need to search MSHR queue for conflicting earlier miss. 2221 MSHR *conflict_mshr = 2222 mshrQueue.findPending(wq_entry->blkAddr, 2223 wq_entry->isSecure); 2224 2225 if (conflict_mshr && conflict_mshr->order < wq_entry->order) { 2226 // Service misses in order until conflict is cleared. 2227 return conflict_mshr; 2228 2229 // @todo Note that we ignore the ready time of the conflict here 2230 } 2231 2232 // No conflicts; issue write 2233 return wq_entry; 2234 } else if (miss_mshr) { 2235 // need to check for conflicting earlier writeback 2236 WriteQueueEntry *conflict_mshr = 2237 writeBuffer.findPending(miss_mshr->blkAddr, 2238 miss_mshr->isSecure); 2239 if (conflict_mshr) { 2240 // not sure why we don't check order here... it was in the 2241 // original code but commented out. 2242 2243 // The only way this happens is if we are 2244 // doing a write and we didn't have permissions 2245 // then subsequently saw a writeback (owned got evicted) 2246 // We need to make sure to perform the writeback first 2247 // To preserve the dirty data, then we can issue the write 2248 2249 // should we return wq_entry here instead? I.e. do we 2250 // have to flush writes in order? I don't think so... not 2251 // for Alpha anyway. Maybe for x86? 2252 return conflict_mshr; 2253 2254 // @todo Note that we ignore the ready time of the conflict here 2255 } 2256 2257 // No conflicts; issue read 2258 return miss_mshr; 2259 } 2260 2261 // fall through... no pending requests. Try a prefetch. 2262 assert(!miss_mshr && !wq_entry); 2263 if (prefetcher && mshrQueue.canPrefetch()) { 2264 // If we have a miss queue slot, we can try a prefetch 2265 PacketPtr pkt = prefetcher->getPacket(); 2266 if (pkt) { 2267 Addr pf_addr = blockAlign(pkt->getAddr()); 2268 if (!tags->findBlock(pf_addr, pkt->isSecure()) && 2269 !mshrQueue.findMatch(pf_addr, pkt->isSecure()) && 2270 !writeBuffer.findMatch(pf_addr, pkt->isSecure())) { 2271 // Update statistic on number of prefetches issued 2272 // (hwpf_mshr_misses) 2273 assert(pkt->req->masterId() < system->maxMasters()); 2274 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 2275 2276 // allocate an MSHR and return it, note 2277 // that we send the packet straight away, so do not 2278 // schedule the send 2279 return allocateMissBuffer(pkt, curTick(), false); 2280 } else { 2281 // free the request and packet 2282 delete pkt->req; 2283 delete pkt; 2284 } 2285 } 2286 } 2287 2288 return nullptr; 2289} 2290 2291bool 2292Cache::isCachedAbove(PacketPtr pkt, bool is_timing) const 2293{ 2294 if (!forwardSnoops) 2295 return false; 2296 // Mirroring the flow of HardPFReqs, the cache sends CleanEvict and 2297 // Writeback snoops into upper level caches to check for copies of the 2298 // same block. Using the BLOCK_CACHED flag with the Writeback/CleanEvict 2299 // packet, the cache can inform the crossbar below of presence or absence 2300 // of the block. 2301 if (is_timing) { 2302 Packet snoop_pkt(pkt, true, false); 2303 snoop_pkt.setExpressSnoop(); 2304 // Assert that packet is either Writeback or CleanEvict and not a 2305 // prefetch request because prefetch requests need an MSHR and may 2306 // generate a snoop response. 2307 assert(pkt->isEviction()); 2308 snoop_pkt.senderState = nullptr; 2309 cpuSidePort->sendTimingSnoopReq(&snoop_pkt); 2310 // Writeback/CleanEvict snoops do not generate a snoop response. 2311 assert(!(snoop_pkt.cacheResponding())); 2312 return snoop_pkt.isBlockCached(); 2313 } else { 2314 cpuSidePort->sendAtomicSnoop(pkt); 2315 return pkt->isBlockCached(); 2316 } 2317} 2318 2319Tick 2320Cache::nextQueueReadyTime() const 2321{ 2322 Tick nextReady = std::min(mshrQueue.nextReadyTime(), 2323 writeBuffer.nextReadyTime()); 2324 2325 // Don't signal prefetch ready time if no MSHRs available 2326 // Will signal once enoguh MSHRs are deallocated 2327 if (prefetcher && mshrQueue.canPrefetch()) { 2328 nextReady = std::min(nextReady, 2329 prefetcher->nextPrefetchReadyTime()); 2330 } 2331 2332 return nextReady; 2333} 2334 2335bool 2336Cache::sendMSHRQueuePacket(MSHR* mshr) 2337{ 2338 assert(mshr); 2339 2340 // use request from 1st target 2341 PacketPtr tgt_pkt = mshr->getTarget()->pkt; 2342 2343 DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print()); 2344 2345 CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure); 2346 2347 if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) { 2348 // we should never have hardware prefetches to allocated 2349 // blocks 2350 assert(blk == nullptr); 2351 2352 // We need to check the caches above us to verify that 2353 // they don't have a copy of this block in the dirty state 2354 // at the moment. Without this check we could get a stale 2355 // copy from memory that might get used in place of the 2356 // dirty one. 2357 Packet snoop_pkt(tgt_pkt, true, false); 2358 snoop_pkt.setExpressSnoop(); 2359 // We are sending this packet upwards, but if it hits we will 2360 // get a snoop response that we end up treating just like a 2361 // normal response, hence it needs the MSHR as its sender 2362 // state 2363 snoop_pkt.senderState = mshr; 2364 cpuSidePort->sendTimingSnoopReq(&snoop_pkt); 2365 2366 // Check to see if the prefetch was squashed by an upper cache (to 2367 // prevent us from grabbing the line) or if a Check to see if a 2368 // writeback arrived between the time the prefetch was placed in 2369 // the MSHRs and when it was selected to be sent or if the 2370 // prefetch was squashed by an upper cache. 2371 2372 // It is important to check cacheResponding before 2373 // prefetchSquashed. If another cache has committed to 2374 // responding, it will be sending a dirty response which will 2375 // arrive at the MSHR allocated for this request. Checking the 2376 // prefetchSquash first may result in the MSHR being 2377 // prematurely deallocated. 2378 if (snoop_pkt.cacheResponding()) { 2379 auto M5_VAR_USED r = outstandingSnoop.insert(snoop_pkt.req); 2380 assert(r.second); 2381 2382 // if we are getting a snoop response with no sharers it 2383 // will be allocated as Modified 2384 bool pending_modified_resp = !snoop_pkt.hasSharers(); 2385 markInService(mshr, pending_modified_resp); 2386 2387 DPRINTF(Cache, "Upward snoop of prefetch for addr" 2388 " %#x (%s) hit\n", 2389 tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns"); 2390 return false; 2391 } 2392 2393 if (snoop_pkt.isBlockCached()) { 2394 DPRINTF(Cache, "Block present, prefetch squashed by cache. " 2395 "Deallocating mshr target %#x.\n", 2396 mshr->blkAddr); 2397 2398 // Deallocate the mshr target 2399 if (mshrQueue.forceDeallocateTarget(mshr)) { 2400 // Clear block if this deallocation resulted freed an 2401 // mshr when all had previously been utilized 2402 clearBlocked(Blocked_NoMSHRs); 2403 } 2404 return false; 2405 } 2406 } 2407 2408 // either a prefetch that is not present upstream, or a normal 2409 // MSHR request, proceed to get the packet to send downstream 2410 PacketPtr pkt = createMissPacket(tgt_pkt, blk, mshr->needsWritable()); 2411 2412 mshr->isForward = (pkt == nullptr); 2413 2414 if (mshr->isForward) { 2415 // not a cache block request, but a response is expected 2416 // make copy of current packet to forward, keep current 2417 // copy for response handling 2418 pkt = new Packet(tgt_pkt, false, true); 2419 assert(!pkt->isWrite()); 2420 } 2421 2422 // play it safe and append (rather than set) the sender state, 2423 // as forwarded packets may already have existing state 2424 pkt->pushSenderState(mshr); 2425 2426 if (!memSidePort->sendTimingReq(pkt)) { 2427 // we are awaiting a retry, but we 2428 // delete the packet and will be creating a new packet 2429 // when we get the opportunity 2430 delete pkt; 2431 2432 // note that we have now masked any requestBus and 2433 // schedSendEvent (we will wait for a retry before 2434 // doing anything), and this is so even if we do not 2435 // care about this packet and might override it before 2436 // it gets retried 2437 return true; 2438 } else { 2439 // As part of the call to sendTimingReq the packet is 2440 // forwarded to all neighbouring caches (and any caches 2441 // above them) as a snoop. Thus at this point we know if 2442 // any of the neighbouring caches are responding, and if 2443 // so, we know it is dirty, and we can determine if it is 2444 // being passed as Modified, making our MSHR the ordering 2445 // point 2446 bool pending_modified_resp = !pkt->hasSharers() && 2447 pkt->cacheResponding(); 2448 markInService(mshr, pending_modified_resp); 2449 return false; 2450 } 2451} 2452 2453bool 2454Cache::sendWriteQueuePacket(WriteQueueEntry* wq_entry) 2455{ 2456 assert(wq_entry); 2457 2458 // always a single target for write queue entries 2459 PacketPtr tgt_pkt = wq_entry->getTarget()->pkt; 2460 2461 DPRINTF(Cache, "%s: write %s\n", __func__, tgt_pkt->print()); 2462 2463 // forward as is, both for evictions and uncacheable writes 2464 if (!memSidePort->sendTimingReq(tgt_pkt)) { 2465 // note that we have now masked any requestBus and 2466 // schedSendEvent (we will wait for a retry before 2467 // doing anything), and this is so even if we do not 2468 // care about this packet and might override it before 2469 // it gets retried 2470 return true; 2471 } else { 2472 markInService(wq_entry); 2473 return false; 2474 } 2475} 2476 2477void 2478Cache::serialize(CheckpointOut &cp) const 2479{ 2480 bool dirty(isDirty()); 2481 2482 if (dirty) { 2483 warn("*** The cache still contains dirty data. ***\n"); 2484 warn(" Make sure to drain the system using the correct flags.\n"); 2485 warn(" This checkpoint will not restore correctly and dirty data " 2486 " in the cache will be lost!\n"); 2487 } 2488 2489 // Since we don't checkpoint the data in the cache, any dirty data 2490 // will be lost when restoring from a checkpoint of a system that 2491 // wasn't drained properly. Flag the checkpoint as invalid if the 2492 // cache contains dirty data. 2493 bool bad_checkpoint(dirty); 2494 SERIALIZE_SCALAR(bad_checkpoint); 2495} 2496 2497void 2498Cache::unserialize(CheckpointIn &cp) 2499{ 2500 bool bad_checkpoint; 2501 UNSERIALIZE_SCALAR(bad_checkpoint); 2502 if (bad_checkpoint) { 2503 fatal("Restoring from checkpoints with dirty caches is not supported " 2504 "in the classic memory system. Please remove any caches or " 2505 " drain them properly before taking checkpoints.\n"); 2506 } 2507} 2508 2509/////////////// 2510// 2511// CpuSidePort 2512// 2513/////////////// 2514 2515AddrRangeList 2516Cache::CpuSidePort::getAddrRanges() const 2517{ 2518 return cache->getAddrRanges(); 2519} 2520 2521bool 2522Cache::CpuSidePort::recvTimingReq(PacketPtr pkt) 2523{ 2524 assert(!cache->system->bypassCaches()); 2525 2526 bool success = false; 2527 2528 // always let express snoop packets through if even if blocked 2529 if (pkt->isExpressSnoop()) { 2530 // do not change the current retry state 2531 bool M5_VAR_USED bypass_success = cache->recvTimingReq(pkt); 2532 assert(bypass_success); 2533 return true; 2534 } else if (blocked || mustSendRetry) { 2535 // either already committed to send a retry, or blocked 2536 success = false; 2537 } else { 2538 // pass it on to the cache, and let the cache decide if we 2539 // have to retry or not 2540 success = cache->recvTimingReq(pkt); 2541 } 2542 2543 // remember if we have to retry 2544 mustSendRetry = !success; 2545 return success; 2546} 2547 2548Tick 2549Cache::CpuSidePort::recvAtomic(PacketPtr pkt) 2550{ 2551 return cache->recvAtomic(pkt); 2552} 2553 2554void 2555Cache::CpuSidePort::recvFunctional(PacketPtr pkt) 2556{ 2557 // functional request 2558 cache->functionalAccess(pkt, true); 2559} 2560 2561Cache:: 2562CpuSidePort::CpuSidePort(const std::string &_name, Cache *_cache, 2563 const std::string &_label) 2564 : BaseCache::CacheSlavePort(_name, _cache, _label), cache(_cache) 2565{ 2566} 2567 2568Cache* 2569CacheParams::create() 2570{ 2571 assert(tags); 2572 2573 return new Cache(this); 2574} 2575/////////////// 2576// 2577// MemSidePort 2578// 2579/////////////// 2580 2581bool 2582Cache::MemSidePort::recvTimingResp(PacketPtr pkt) 2583{ 2584 cache->recvTimingResp(pkt); 2585 return true; 2586} 2587 2588// Express snooping requests to memside port 2589void 2590Cache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt) 2591{ 2592 // handle snooping requests 2593 cache->recvTimingSnoopReq(pkt); 2594} 2595 2596Tick 2597Cache::MemSidePort::recvAtomicSnoop(PacketPtr pkt) 2598{ 2599 return cache->recvAtomicSnoop(pkt); 2600} 2601 2602void 2603Cache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt) 2604{ 2605 // functional snoop (note that in contrast to atomic we don't have 2606 // a specific functionalSnoop method, as they have the same 2607 // behaviour regardless) 2608 cache->functionalAccess(pkt, false); 2609} 2610 2611void 2612Cache::CacheReqPacketQueue::sendDeferredPacket() 2613{ 2614 // sanity check 2615 assert(!waitingOnRetry); 2616 2617 // there should never be any deferred request packets in the 2618 // queue, instead we resly on the cache to provide the packets 2619 // from the MSHR queue or write queue 2620 assert(deferredPacketReadyTime() == MaxTick); 2621 2622 // check for request packets (requests & writebacks) 2623 QueueEntry* entry = cache.getNextQueueEntry(); 2624 2625 if (!entry) { 2626 // can happen if e.g. we attempt a writeback and fail, but 2627 // before the retry, the writeback is eliminated because 2628 // we snoop another cache's ReadEx. 2629 } else { 2630 // let our snoop responses go first if there are responses to 2631 // the same addresses 2632 if (checkConflictingSnoop(entry->blkAddr)) { 2633 return; 2634 } 2635 waitingOnRetry = entry->sendPacket(cache); 2636 } 2637 2638 // if we succeeded and are not waiting for a retry, schedule the 2639 // next send considering when the next queue is ready, note that 2640 // snoop responses have their own packet queue and thus schedule 2641 // their own events 2642 if (!waitingOnRetry) { 2643 schedSendEvent(cache.nextQueueReadyTime()); 2644 } 2645} 2646 2647Cache:: 2648MemSidePort::MemSidePort(const std::string &_name, Cache *_cache, 2649 const std::string &_label) 2650 : BaseCache::CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue), 2651 _reqQueue(*_cache, *this, _snoopRespQueue, _label), 2652 _snoopRespQueue(*_cache, *this, _label), cache(_cache) 2653{ 2654} 2655