cache.cc revision 11054:00bddca96da6
1/* 2 * Copyright (c) 2010-2015 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2002-2005 The Regents of The University of Michigan 15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc. 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Erik Hallnor 42 * Dave Greene 43 * Nathan Binkert 44 * Steve Reinhardt 45 * Ron Dreslinski 46 * Andreas Sandberg 47 */ 48 49/** 50 * @file 51 * Cache definitions. 52 */ 53 54#include "mem/cache/cache.hh" 55 56#include "base/misc.hh" 57#include "base/types.hh" 58#include "debug/Cache.hh" 59#include "debug/CachePort.hh" 60#include "debug/CacheTags.hh" 61#include "mem/cache/blk.hh" 62#include "mem/cache/mshr.hh" 63#include "mem/cache/prefetch/base.hh" 64#include "sim/sim_exit.hh" 65 66Cache::Cache(const CacheParams *p) 67 : BaseCache(p, p->system->cacheLineSize()), 68 tags(p->tags), 69 prefetcher(p->prefetcher), 70 doFastWrites(true), 71 prefetchOnAccess(p->prefetch_on_access) 72{ 73 tempBlock = new CacheBlk(); 74 tempBlock->data = new uint8_t[blkSize]; 75 76 cpuSidePort = new CpuSidePort(p->name + ".cpu_side", this, 77 "CpuSidePort"); 78 memSidePort = new MemSidePort(p->name + ".mem_side", this, 79 "MemSidePort"); 80 81 tags->setCache(this); 82 if (prefetcher) 83 prefetcher->setCache(this); 84} 85 86Cache::~Cache() 87{ 88 delete [] tempBlock->data; 89 delete tempBlock; 90 91 delete cpuSidePort; 92 delete memSidePort; 93} 94 95void 96Cache::regStats() 97{ 98 BaseCache::regStats(); 99} 100 101void 102Cache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt) 103{ 104 assert(pkt->isRequest()); 105 106 uint64_t overwrite_val; 107 bool overwrite_mem; 108 uint64_t condition_val64; 109 uint32_t condition_val32; 110 111 int offset = tags->extractBlkOffset(pkt->getAddr()); 112 uint8_t *blk_data = blk->data + offset; 113 114 assert(sizeof(uint64_t) >= pkt->getSize()); 115 116 overwrite_mem = true; 117 // keep a copy of our possible write value, and copy what is at the 118 // memory address into the packet 119 pkt->writeData((uint8_t *)&overwrite_val); 120 pkt->setData(blk_data); 121 122 if (pkt->req->isCondSwap()) { 123 if (pkt->getSize() == sizeof(uint64_t)) { 124 condition_val64 = pkt->req->getExtraData(); 125 overwrite_mem = !std::memcmp(&condition_val64, blk_data, 126 sizeof(uint64_t)); 127 } else if (pkt->getSize() == sizeof(uint32_t)) { 128 condition_val32 = (uint32_t)pkt->req->getExtraData(); 129 overwrite_mem = !std::memcmp(&condition_val32, blk_data, 130 sizeof(uint32_t)); 131 } else 132 panic("Invalid size for conditional read/write\n"); 133 } 134 135 if (overwrite_mem) { 136 std::memcpy(blk_data, &overwrite_val, pkt->getSize()); 137 blk->status |= BlkDirty; 138 } 139} 140 141 142void 143Cache::satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk, 144 bool deferred_response, bool pending_downgrade) 145{ 146 assert(pkt->isRequest()); 147 148 assert(blk && blk->isValid()); 149 // Occasionally this is not true... if we are a lower-level cache 150 // satisfying a string of Read and ReadEx requests from 151 // upper-level caches, a Read will mark the block as shared but we 152 // can satisfy a following ReadEx anyway since we can rely on the 153 // Read requester(s) to have buffered the ReadEx snoop and to 154 // invalidate their blocks after receiving them. 155 // assert(!pkt->needsExclusive() || blk->isWritable()); 156 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize); 157 158 // Check RMW operations first since both isRead() and 159 // isWrite() will be true for them 160 if (pkt->cmd == MemCmd::SwapReq) { 161 cmpAndSwap(blk, pkt); 162 } else if (pkt->isWrite()) { 163 assert(blk->isWritable()); 164 // Write or WriteLine at the first cache with block in Exclusive 165 if (blk->checkWrite(pkt)) { 166 pkt->writeDataToBlock(blk->data, blkSize); 167 } 168 // Always mark the line as dirty even if we are a failed 169 // StoreCond so we supply data to any snoops that have 170 // appended themselves to this cache before knowing the store 171 // will fail. 172 blk->status |= BlkDirty; 173 DPRINTF(Cache, "%s for %s addr %#llx size %d (write)\n", __func__, 174 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 175 } else if (pkt->isRead()) { 176 if (pkt->isLLSC()) { 177 blk->trackLoadLocked(pkt); 178 } 179 pkt->setDataFromBlock(blk->data, blkSize); 180 // determine if this read is from a (coherent) cache, or not 181 // by looking at the command type; we could potentially add a 182 // packet attribute such as 'FromCache' to make this check a 183 // bit cleaner 184 if (pkt->cmd == MemCmd::ReadExReq || 185 pkt->cmd == MemCmd::ReadSharedReq || 186 pkt->cmd == MemCmd::ReadCleanReq || 187 pkt->cmd == MemCmd::SCUpgradeFailReq) { 188 assert(pkt->getSize() == blkSize); 189 // special handling for coherent block requests from 190 // upper-level caches 191 if (pkt->needsExclusive()) { 192 // sanity check 193 assert(pkt->cmd == MemCmd::ReadExReq || 194 pkt->cmd == MemCmd::SCUpgradeFailReq); 195 196 // if we have a dirty copy, make sure the recipient 197 // keeps it marked dirty 198 if (blk->isDirty()) { 199 pkt->assertMemInhibit(); 200 } 201 // on ReadExReq we give up our copy unconditionally 202 if (blk != tempBlock) 203 tags->invalidate(blk); 204 blk->invalidate(); 205 } else if (blk->isWritable() && !pending_downgrade && 206 !pkt->sharedAsserted() && 207 pkt->cmd != MemCmd::ReadCleanReq) { 208 // we can give the requester an exclusive copy (by not 209 // asserting shared line) on a read request if: 210 // - we have an exclusive copy at this level (& below) 211 // - we don't have a pending snoop from below 212 // signaling another read request 213 // - no other cache above has a copy (otherwise it 214 // would have asseretd shared line on request) 215 // - we are not satisfying an instruction fetch (this 216 // prevents dirty data in the i-cache) 217 218 if (blk->isDirty()) { 219 // special considerations if we're owner: 220 if (!deferred_response) { 221 // if we are responding immediately and can 222 // signal that we're transferring ownership 223 // along with exclusivity, do so 224 pkt->assertMemInhibit(); 225 blk->status &= ~BlkDirty; 226 } else { 227 // if we're responding after our own miss, 228 // there's a window where the recipient didn't 229 // know it was getting ownership and may not 230 // have responded to snoops correctly, so we 231 // can't pass off ownership *or* exclusivity 232 pkt->assertShared(); 233 } 234 } 235 } else { 236 // otherwise only respond with a shared copy 237 pkt->assertShared(); 238 } 239 } 240 } else { 241 // Upgrade or Invalidate, since we have it Exclusively (E or 242 // M), we ack then invalidate. 243 assert(pkt->isUpgrade() || pkt->isInvalidate()); 244 assert(blk != tempBlock); 245 tags->invalidate(blk); 246 blk->invalidate(); 247 DPRINTF(Cache, "%s for %s addr %#llx size %d (invalidation)\n", 248 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 249 } 250} 251 252 253///////////////////////////////////////////////////// 254// 255// MSHR helper functions 256// 257///////////////////////////////////////////////////// 258 259 260void 261Cache::markInService(MSHR *mshr, bool pending_dirty_resp) 262{ 263 markInServiceInternal(mshr, pending_dirty_resp); 264} 265 266///////////////////////////////////////////////////// 267// 268// Access path: requests coming in from the CPU side 269// 270///////////////////////////////////////////////////// 271 272bool 273Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, 274 PacketList &writebacks) 275{ 276 // sanity check 277 assert(pkt->isRequest()); 278 279 chatty_assert(!(isReadOnly && pkt->isWrite()), 280 "Should never see a write in a read-only cache %s\n", 281 name()); 282 283 DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, 284 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 285 286 if (pkt->req->isUncacheable()) { 287 DPRINTF(Cache, "%s%s addr %#llx uncacheable\n", pkt->cmdString(), 288 pkt->req->isInstFetch() ? " (ifetch)" : "", 289 pkt->getAddr()); 290 291 if (pkt->req->isClearLL()) 292 tags->clearLocks(); 293 294 // flush and invalidate any existing block 295 CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure())); 296 if (old_blk && old_blk->isValid()) { 297 if (old_blk->isDirty()) 298 writebacks.push_back(writebackBlk(old_blk)); 299 else 300 writebacks.push_back(cleanEvictBlk(old_blk)); 301 tags->invalidate(old_blk); 302 old_blk->invalidate(); 303 } 304 305 blk = NULL; 306 // lookupLatency is the latency in case the request is uncacheable. 307 lat = lookupLatency; 308 return false; 309 } 310 311 ContextID id = pkt->req->hasContextId() ? 312 pkt->req->contextId() : InvalidContextID; 313 // Here lat is the value passed as parameter to accessBlock() function 314 // that can modify its value. 315 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), lat, id); 316 317 DPRINTF(Cache, "%s%s addr %#llx size %d (%s) %s\n", pkt->cmdString(), 318 pkt->req->isInstFetch() ? " (ifetch)" : "", 319 pkt->getAddr(), pkt->getSize(), pkt->isSecure() ? "s" : "ns", 320 blk ? "hit " + blk->print() : "miss"); 321 322 323 if (pkt->evictingBlock()) { 324 // We check for presence of block in above caches before issuing 325 // Writeback or CleanEvict to write buffer. Therefore the only 326 // possible cases can be of a CleanEvict packet coming from above 327 // encountering a Writeback generated in this cache peer cache and 328 // waiting in the write buffer. Cases of upper level peer caches 329 // generating CleanEvict and Writeback or simply CleanEvict and 330 // CleanEvict almost simultaneously will be caught by snoops sent out 331 // by crossbar. 332 std::vector<MSHR *> outgoing; 333 if (writeBuffer.findMatches(pkt->getAddr(), pkt->isSecure(), 334 outgoing)) { 335 assert(outgoing.size() == 1); 336 PacketPtr wbPkt = outgoing[0]->getTarget()->pkt; 337 assert(pkt->cmd == MemCmd::CleanEvict && 338 wbPkt->cmd == MemCmd::Writeback); 339 // As the CleanEvict is coming from above, it would have snooped 340 // into other peer caches of the same level while traversing the 341 // crossbar. If a copy of the block had been found, the CleanEvict 342 // would have been deleted in the crossbar. Now that the 343 // CleanEvict is here we can be sure none of the other upper level 344 // caches connected to this cache have the block, so we can clear 345 // the BLOCK_CACHED flag in the Writeback if set and discard the 346 // CleanEvict by returning true. 347 wbPkt->clearBlockCached(); 348 return true; 349 } 350 } 351 352 // Writeback handling is special case. We can write the block into 353 // the cache without having a writeable copy (or any copy at all). 354 if (pkt->cmd == MemCmd::Writeback) { 355 assert(blkSize == pkt->getSize()); 356 if (blk == NULL) { 357 // need to do a replacement 358 blk = allocateBlock(pkt->getAddr(), pkt->isSecure(), writebacks); 359 if (blk == NULL) { 360 // no replaceable block available: give up, fwd to next level. 361 incMissCount(pkt); 362 return false; 363 } 364 tags->insertBlock(pkt, blk); 365 366 blk->status = (BlkValid | BlkReadable); 367 if (pkt->isSecure()) { 368 blk->status |= BlkSecure; 369 } 370 } 371 blk->status |= BlkDirty; 372 // if shared is not asserted we got the writeback in modified 373 // state, if it is asserted we are in the owned state 374 if (!pkt->sharedAsserted()) { 375 blk->status |= BlkWritable; 376 } 377 // nothing else to do; writeback doesn't expect response 378 assert(!pkt->needsResponse()); 379 std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize); 380 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 381 incHitCount(pkt); 382 return true; 383 } else if (pkt->cmd == MemCmd::CleanEvict) { 384 if (blk != NULL) { 385 // Found the block in the tags, need to stop CleanEvict from 386 // propagating further down the hierarchy. Returning true will 387 // treat the CleanEvict like a satisfied write request and delete 388 // it. 389 return true; 390 } 391 // We didn't find the block here, propagate the CleanEvict further 392 // down the memory hierarchy. Returning false will treat the CleanEvict 393 // like a Writeback which could not find a replaceable block so has to 394 // go to next level. 395 return false; 396 } else if ((blk != NULL) && 397 (pkt->needsExclusive() ? blk->isWritable() 398 : blk->isReadable())) { 399 // OK to satisfy access 400 incHitCount(pkt); 401 satisfyCpuSideRequest(pkt, blk); 402 return true; 403 } 404 405 // Can't satisfy access normally... either no block (blk == NULL) 406 // or have block but need exclusive & only have shared. 407 408 incMissCount(pkt); 409 410 if (blk == NULL && pkt->isLLSC() && pkt->isWrite()) { 411 // complete miss on store conditional... just give up now 412 pkt->req->setExtraData(0); 413 return true; 414 } 415 416 return false; 417} 418 419 420class ForwardResponseRecord : public Packet::SenderState 421{ 422 public: 423 424 ForwardResponseRecord() {} 425}; 426 427void 428Cache::doWritebacks(PacketList& writebacks, Tick forward_time) 429{ 430 while (!writebacks.empty()) { 431 PacketPtr wbPkt = writebacks.front(); 432 // We use forwardLatency here because we are copying writebacks to 433 // write buffer. Call isCachedAbove for both Writebacks and 434 // CleanEvicts. If isCachedAbove returns true we set BLOCK_CACHED flag 435 // in Writebacks and discard CleanEvicts. 436 if (isCachedAbove(wbPkt)) { 437 if (wbPkt->cmd == MemCmd::CleanEvict) { 438 // Delete CleanEvict because cached copies exist above. The 439 // packet destructor will delete the request object because 440 // this is a non-snoop request packet which does not require a 441 // response. 442 delete wbPkt; 443 } else { 444 // Set BLOCK_CACHED flag in Writeback and send below, so that 445 // the Writeback does not reset the bit corresponding to this 446 // address in the snoop filter below. 447 wbPkt->setBlockCached(); 448 allocateWriteBuffer(wbPkt, forward_time); 449 } 450 } else { 451 // If the block is not cached above, send packet below. Both 452 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will 453 // reset the bit corresponding to this address in the snoop filter 454 // below. 455 allocateWriteBuffer(wbPkt, forward_time); 456 } 457 writebacks.pop_front(); 458 } 459} 460 461 462void 463Cache::recvTimingSnoopResp(PacketPtr pkt) 464{ 465 DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, 466 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 467 468 assert(pkt->isResponse()); 469 470 // must be cache-to-cache response from upper to lower level 471 ForwardResponseRecord *rec = 472 dynamic_cast<ForwardResponseRecord *>(pkt->senderState); 473 assert(!system->bypassCaches()); 474 475 if (rec == NULL) { 476 // @todo What guarantee do we have that this HardPFResp is 477 // actually for this cache, and not a cache closer to the 478 // memory? 479 assert(pkt->cmd == MemCmd::HardPFResp); 480 // Check if it's a prefetch response and handle it. We shouldn't 481 // get any other kinds of responses without FRRs. 482 DPRINTF(Cache, "Got prefetch response from above for addr %#llx (%s)\n", 483 pkt->getAddr(), pkt->isSecure() ? "s" : "ns"); 484 recvTimingResp(pkt); 485 return; 486 } 487 488 pkt->popSenderState(); 489 delete rec; 490 // forwardLatency is set here because there is a response from an 491 // upper level cache. 492 // To pay the delay that occurs if the packet comes from the bus, 493 // we charge also headerDelay. 494 Tick snoop_resp_time = clockEdge(forwardLatency) + pkt->headerDelay; 495 // Reset the timing of the packet. 496 pkt->headerDelay = pkt->payloadDelay = 0; 497 memSidePort->schedTimingSnoopResp(pkt, snoop_resp_time); 498} 499 500void 501Cache::promoteWholeLineWrites(PacketPtr pkt) 502{ 503 // Cache line clearing instructions 504 if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) && 505 (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0)) { 506 pkt->cmd = MemCmd::WriteLineReq; 507 DPRINTF(Cache, "packet promoted from Write to WriteLineReq\n"); 508 } 509} 510 511bool 512Cache::recvTimingReq(PacketPtr pkt) 513{ 514 DPRINTF(CacheTags, "%s tags: %s\n", __func__, tags->print()); 515//@todo Add back in MemDebug Calls 516// MemDebug::cacheAccess(pkt); 517 518 519 /// @todo temporary hack to deal with memory corruption issue until 520 /// 4-phase transactions are complete 521 for (int x = 0; x < pendingDelete.size(); x++) 522 delete pendingDelete[x]; 523 pendingDelete.clear(); 524 525 assert(pkt->isRequest()); 526 527 // Just forward the packet if caches are disabled. 528 if (system->bypassCaches()) { 529 // @todo This should really enqueue the packet rather 530 bool M5_VAR_USED success = memSidePort->sendTimingReq(pkt); 531 assert(success); 532 return true; 533 } 534 535 promoteWholeLineWrites(pkt); 536 537 if (pkt->memInhibitAsserted()) { 538 // a cache above us (but not where the packet came from) is 539 // responding to the request 540 DPRINTF(Cache, "mem inhibited on addr %#llx (%s): not responding\n", 541 pkt->getAddr(), pkt->isSecure() ? "s" : "ns"); 542 543 // if the packet needs exclusive, and the cache that has 544 // promised to respond (setting the inhibit flag) is not 545 // providing exclusive (it is in O vs M state), we know that 546 // there may be other shared copies in the system; go out and 547 // invalidate them all 548 if (pkt->needsExclusive() && !pkt->isSupplyExclusive()) { 549 // create a downstream express snoop with cleared packet 550 // flags, there is no need to allocate any data as the 551 // packet is merely used to co-ordinate state transitions 552 Packet *snoop_pkt = new Packet(pkt, true, false); 553 554 // also reset the bus time that the original packet has 555 // not yet paid for 556 snoop_pkt->headerDelay = snoop_pkt->payloadDelay = 0; 557 558 // make this an instantaneous express snoop, and let the 559 // other caches in the system know that the packet is 560 // inhibited, because we have found the authorative copy 561 // (O) that will supply the right data 562 snoop_pkt->setExpressSnoop(); 563 snoop_pkt->assertMemInhibit(); 564 565 // this express snoop travels towards the memory, and at 566 // every crossbar it is snooped upwards thus reaching 567 // every cache in the system 568 bool M5_VAR_USED success = memSidePort->sendTimingReq(snoop_pkt); 569 // express snoops always succeed 570 assert(success); 571 572 // main memory will delete the packet 573 } 574 575 /// @todo nominally we should just delete the packet here, 576 /// however, until 4-phase stuff we can't because sending 577 /// cache is still relying on it. 578 pendingDelete.push_back(pkt); 579 580 // no need to take any action in this particular cache as the 581 // caches along the path to memory are allowed to keep lines 582 // in a shared state, and a cache above us already committed 583 // to responding 584 return true; 585 } 586 587 // anything that is merely forwarded pays for the forward latency and 588 // the delay provided by the crossbar 589 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 590 591 // We use lookupLatency here because it is used to specify the latency 592 // to access. 593 Cycles lat = lookupLatency; 594 CacheBlk *blk = NULL; 595 bool satisfied = false; 596 { 597 PacketList writebacks; 598 // Note that lat is passed by reference here. The function 599 // access() calls accessBlock() which can modify lat value. 600 satisfied = access(pkt, blk, lat, writebacks); 601 602 // copy writebacks to write buffer here to ensure they logically 603 // proceed anything happening below 604 doWritebacks(writebacks, forward_time); 605 } 606 607 // Here we charge the headerDelay that takes into account the latencies 608 // of the bus, if the packet comes from it. 609 // The latency charged it is just lat that is the value of lookupLatency 610 // modified by access() function, or if not just lookupLatency. 611 // In case of a hit we are neglecting response latency. 612 // In case of a miss we are neglecting forward latency. 613 Tick request_time = clockEdge(lat) + pkt->headerDelay; 614 // Here we reset the timing of the packet. 615 pkt->headerDelay = pkt->payloadDelay = 0; 616 617 // track time of availability of next prefetch, if any 618 Tick next_pf_time = MaxTick; 619 620 bool needsResponse = pkt->needsResponse(); 621 622 if (satisfied) { 623 // should never be satisfying an uncacheable access as we 624 // flush and invalidate any existing block as part of the 625 // lookup 626 assert(!pkt->req->isUncacheable()); 627 628 // hit (for all other request types) 629 630 if (prefetcher && (prefetchOnAccess || (blk && blk->wasPrefetched()))) { 631 if (blk) 632 blk->status &= ~BlkHWPrefetched; 633 634 // Don't notify on SWPrefetch 635 if (!pkt->cmd.isSWPrefetch()) 636 next_pf_time = prefetcher->notify(pkt); 637 } 638 639 if (needsResponse) { 640 pkt->makeTimingResponse(); 641 // @todo: Make someone pay for this 642 pkt->headerDelay = pkt->payloadDelay = 0; 643 644 // In this case we are considering request_time that takes 645 // into account the delay of the xbar, if any, and just 646 // lat, neglecting responseLatency, modelling hit latency 647 // just as lookupLatency or or the value of lat overriden 648 // by access(), that calls accessBlock() function. 649 cpuSidePort->schedTimingResp(pkt, request_time); 650 } else { 651 /// @todo nominally we should just delete the packet here, 652 /// however, until 4-phase stuff we can't because sending cache is 653 /// still relying on it. If the block is found in access(), 654 /// CleanEvict and Writeback messages will be deleted here as 655 /// well. 656 pendingDelete.push_back(pkt); 657 } 658 } else { 659 // miss 660 661 Addr blk_addr = blockAlign(pkt->getAddr()); 662 663 // ignore any existing MSHR if we are dealing with an 664 // uncacheable request 665 MSHR *mshr = pkt->req->isUncacheable() ? nullptr : 666 mshrQueue.findMatch(blk_addr, pkt->isSecure()); 667 668 // Software prefetch handling: 669 // To keep the core from waiting on data it won't look at 670 // anyway, send back a response with dummy data. Miss handling 671 // will continue asynchronously. Unfortunately, the core will 672 // insist upon freeing original Packet/Request, so we have to 673 // create a new pair with a different lifecycle. Note that this 674 // processing happens before any MSHR munging on the behalf of 675 // this request because this new Request will be the one stored 676 // into the MSHRs, not the original. 677 if (pkt->cmd.isSWPrefetch()) { 678 assert(needsResponse); 679 assert(pkt->req->hasPaddr()); 680 assert(!pkt->req->isUncacheable()); 681 682 // There's no reason to add a prefetch as an additional target 683 // to an existing MSHR. If an outstanding request is already 684 // in progress, there is nothing for the prefetch to do. 685 // If this is the case, we don't even create a request at all. 686 PacketPtr pf = nullptr; 687 688 if (!mshr) { 689 // copy the request and create a new SoftPFReq packet 690 RequestPtr req = new Request(pkt->req->getPaddr(), 691 pkt->req->getSize(), 692 pkt->req->getFlags(), 693 pkt->req->masterId()); 694 pf = new Packet(req, pkt->cmd); 695 pf->allocate(); 696 assert(pf->getAddr() == pkt->getAddr()); 697 assert(pf->getSize() == pkt->getSize()); 698 } 699 700 pkt->makeTimingResponse(); 701 // for debugging, set all the bits in the response data 702 // (also keeps valgrind from complaining when debugging settings 703 // print out instruction results) 704 std::memset(pkt->getPtr<uint8_t>(), 0xFF, pkt->getSize()); 705 // request_time is used here, taking into account lat and the delay 706 // charged if the packet comes from the xbar. 707 cpuSidePort->schedTimingResp(pkt, request_time); 708 709 // If an outstanding request is in progress (we found an 710 // MSHR) this is set to null 711 pkt = pf; 712 } 713 714 if (mshr) { 715 /// MSHR hit 716 /// @note writebacks will be checked in getNextMSHR() 717 /// for any conflicting requests to the same block 718 719 //@todo remove hw_pf here 720 721 // Coalesce unless it was a software prefetch (see above). 722 if (pkt) { 723 assert(pkt->cmd != MemCmd::Writeback); 724 // CleanEvicts corresponding to blocks which have outstanding 725 // requests in MSHRs can be deleted here. 726 if (pkt->cmd == MemCmd::CleanEvict) { 727 pendingDelete.push_back(pkt); 728 } else { 729 DPRINTF(Cache, "%s coalescing MSHR for %s addr %#llx size %d\n", 730 __func__, pkt->cmdString(), pkt->getAddr(), 731 pkt->getSize()); 732 733 assert(pkt->req->masterId() < system->maxMasters()); 734 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++; 735 if (mshr->threadNum != 0/*pkt->req->threadId()*/) { 736 mshr->threadNum = -1; 737 } 738 // We use forward_time here because it is the same 739 // considering new targets. We have multiple 740 // requests for the same address here. It 741 // specifies the latency to allocate an internal 742 // buffer and to schedule an event to the queued 743 // port and also takes into account the additional 744 // delay of the xbar. 745 mshr->allocateTarget(pkt, forward_time, order++); 746 if (mshr->getNumTargets() == numTarget) { 747 noTargetMSHR = mshr; 748 setBlocked(Blocked_NoTargets); 749 // need to be careful with this... if this mshr isn't 750 // ready yet (i.e. time > curTick()), we don't want to 751 // move it ahead of mshrs that are ready 752 // mshrQueue.moveToFront(mshr); 753 } 754 } 755 // We should call the prefetcher reguardless if the request is 756 // satisfied or not, reguardless if the request is in the MSHR or 757 // not. The request could be a ReadReq hit, but still not 758 // satisfied (potentially because of a prior write to the same 759 // cache line. So, even when not satisfied, tehre is an MSHR 760 // already allocated for this, we need to let the prefetcher know 761 // about the request 762 if (prefetcher) { 763 // Don't notify on SWPrefetch 764 if (!pkt->cmd.isSWPrefetch()) 765 next_pf_time = prefetcher->notify(pkt); 766 } 767 } 768 } else { 769 // no MSHR 770 assert(pkt->req->masterId() < system->maxMasters()); 771 if (pkt->req->isUncacheable()) { 772 mshr_uncacheable[pkt->cmdToIndex()][pkt->req->masterId()]++; 773 } else { 774 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 775 } 776 777 if (pkt->evictingBlock() || 778 (pkt->req->isUncacheable() && pkt->isWrite())) { 779 // We use forward_time here because there is an 780 // uncached memory write, forwarded to WriteBuffer. 781 allocateWriteBuffer(pkt, forward_time); 782 } else { 783 if (blk && blk->isValid()) { 784 // should have flushed and have no valid block 785 assert(!pkt->req->isUncacheable()); 786 787 // If we have a write miss to a valid block, we 788 // need to mark the block non-readable. Otherwise 789 // if we allow reads while there's an outstanding 790 // write miss, the read could return stale data 791 // out of the cache block... a more aggressive 792 // system could detect the overlap (if any) and 793 // forward data out of the MSHRs, but we don't do 794 // that yet. Note that we do need to leave the 795 // block valid so that it stays in the cache, in 796 // case we get an upgrade response (and hence no 797 // new data) when the write miss completes. 798 // As long as CPUs do proper store/load forwarding 799 // internally, and have a sufficiently weak memory 800 // model, this is probably unnecessary, but at some 801 // point it must have seemed like we needed it... 802 assert(pkt->needsExclusive()); 803 assert(!blk->isWritable()); 804 blk->status &= ~BlkReadable; 805 } 806 // Here we are using forward_time, modelling the latency of 807 // a miss (outbound) just as forwardLatency, neglecting the 808 // lookupLatency component. 809 allocateMissBuffer(pkt, forward_time); 810 } 811 812 if (prefetcher) { 813 // Don't notify on SWPrefetch 814 if (!pkt->cmd.isSWPrefetch()) 815 next_pf_time = prefetcher->notify(pkt); 816 } 817 } 818 } 819 820 if (next_pf_time != MaxTick) 821 schedMemSideSendEvent(next_pf_time); 822 823 return true; 824} 825 826 827// See comment in cache.hh. 828PacketPtr 829Cache::getBusPacket(PacketPtr cpu_pkt, CacheBlk *blk, 830 bool needsExclusive) const 831{ 832 bool blkValid = blk && blk->isValid(); 833 834 if (cpu_pkt->req->isUncacheable()) { 835 // note that at the point we see the uncacheable request we 836 // flush any block, but there could be an outstanding MSHR, 837 // and the cache could have filled again before we actually 838 // send out the forwarded uncacheable request (blk could thus 839 // be non-null) 840 return NULL; 841 } 842 843 if (!blkValid && 844 (cpu_pkt->isUpgrade() || 845 cpu_pkt->evictingBlock())) { 846 // Writebacks that weren't allocated in access() and upgrades 847 // from upper-level caches that missed completely just go 848 // through. 849 return NULL; 850 } 851 852 assert(cpu_pkt->needsResponse()); 853 854 MemCmd cmd; 855 // @TODO make useUpgrades a parameter. 856 // Note that ownership protocols require upgrade, otherwise a 857 // write miss on a shared owned block will generate a ReadExcl, 858 // which will clobber the owned copy. 859 const bool useUpgrades = true; 860 if (blkValid && useUpgrades) { 861 // only reason to be here is that blk is shared 862 // (read-only) and we need exclusive 863 assert(needsExclusive); 864 assert(!blk->isWritable()); 865 cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq; 866 } else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq || 867 cpu_pkt->cmd == MemCmd::StoreCondFailReq) { 868 // Even though this SC will fail, we still need to send out the 869 // request and get the data to supply it to other snoopers in the case 870 // where the determination the StoreCond fails is delayed due to 871 // all caches not being on the same local bus. 872 cmd = MemCmd::SCUpgradeFailReq; 873 } else if (cpu_pkt->cmd == MemCmd::WriteLineReq) { 874 // forward as invalidate to all other caches, this gives us 875 // the line in exclusive state, and invalidates all other 876 // copies 877 cmd = MemCmd::InvalidateReq; 878 } else { 879 // block is invalid 880 cmd = needsExclusive ? MemCmd::ReadExReq : 881 (isReadOnly ? MemCmd::ReadCleanReq : MemCmd::ReadSharedReq); 882 } 883 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize); 884 885 // if there are sharers in the upper levels, pass that info downstream 886 if (cpu_pkt->sharedAsserted()) { 887 // note that cpu_pkt may have spent a considerable time in the 888 // MSHR queue and that the information could possibly be out 889 // of date, however, there is no harm in conservatively 890 // assuming the block is shared 891 pkt->assertShared(); 892 DPRINTF(Cache, "%s passing shared from %s to %s addr %#llx size %d\n", 893 __func__, cpu_pkt->cmdString(), pkt->cmdString(), 894 pkt->getAddr(), pkt->getSize()); 895 } 896 897 // the packet should be block aligned 898 assert(pkt->getAddr() == blockAlign(pkt->getAddr())); 899 900 pkt->allocate(); 901 DPRINTF(Cache, "%s created %s from %s for addr %#llx size %d\n", 902 __func__, pkt->cmdString(), cpu_pkt->cmdString(), pkt->getAddr(), 903 pkt->getSize()); 904 return pkt; 905} 906 907 908Tick 909Cache::recvAtomic(PacketPtr pkt) 910{ 911 // We are in atomic mode so we pay just for lookupLatency here. 912 Cycles lat = lookupLatency; 913 // @TODO: make this a parameter 914 bool last_level_cache = false; 915 916 // Forward the request if the system is in cache bypass mode. 917 if (system->bypassCaches()) 918 return ticksToCycles(memSidePort->sendAtomic(pkt)); 919 920 promoteWholeLineWrites(pkt); 921 922 if (pkt->memInhibitAsserted()) { 923 // have to invalidate ourselves and any lower caches even if 924 // upper cache will be responding 925 if (pkt->isInvalidate()) { 926 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 927 if (blk && blk->isValid()) { 928 tags->invalidate(blk); 929 blk->invalidate(); 930 DPRINTF(Cache, "rcvd mem-inhibited %s on %#llx (%s):" 931 " invalidating\n", 932 pkt->cmdString(), pkt->getAddr(), 933 pkt->isSecure() ? "s" : "ns"); 934 } 935 if (!last_level_cache) { 936 DPRINTF(Cache, "forwarding mem-inhibited %s on %#llx (%s)\n", 937 pkt->cmdString(), pkt->getAddr(), 938 pkt->isSecure() ? "s" : "ns"); 939 lat += ticksToCycles(memSidePort->sendAtomic(pkt)); 940 } 941 } else { 942 DPRINTF(Cache, "rcvd mem-inhibited %s on %#llx: not responding\n", 943 pkt->cmdString(), pkt->getAddr()); 944 } 945 946 return lat * clockPeriod(); 947 } 948 949 // should assert here that there are no outstanding MSHRs or 950 // writebacks... that would mean that someone used an atomic 951 // access in timing mode 952 953 CacheBlk *blk = NULL; 954 PacketList writebacks; 955 bool satisfied = access(pkt, blk, lat, writebacks); 956 957 // handle writebacks resulting from the access here to ensure they 958 // logically proceed anything happening below 959 while (!writebacks.empty()){ 960 PacketPtr wbPkt = writebacks.front(); 961 memSidePort->sendAtomic(wbPkt); 962 writebacks.pop_front(); 963 delete wbPkt; 964 } 965 966 if (!satisfied) { 967 // MISS 968 969 PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsExclusive()); 970 971 bool is_forward = (bus_pkt == NULL); 972 973 if (is_forward) { 974 // just forwarding the same request to the next level 975 // no local cache operation involved 976 bus_pkt = pkt; 977 } 978 979 DPRINTF(Cache, "Sending an atomic %s for %#llx (%s)\n", 980 bus_pkt->cmdString(), bus_pkt->getAddr(), 981 bus_pkt->isSecure() ? "s" : "ns"); 982 983#if TRACING_ON 984 CacheBlk::State old_state = blk ? blk->status : 0; 985#endif 986 987 lat += ticksToCycles(memSidePort->sendAtomic(bus_pkt)); 988 989 // We are now dealing with the response handling 990 DPRINTF(Cache, "Receive response: %s for addr %#llx (%s) in state %i\n", 991 bus_pkt->cmdString(), bus_pkt->getAddr(), 992 bus_pkt->isSecure() ? "s" : "ns", 993 old_state); 994 995 // If packet was a forward, the response (if any) is already 996 // in place in the bus_pkt == pkt structure, so we don't need 997 // to do anything. Otherwise, use the separate bus_pkt to 998 // generate response to pkt and then delete it. 999 if (!is_forward) { 1000 if (pkt->needsResponse()) { 1001 assert(bus_pkt->isResponse()); 1002 if (bus_pkt->isError()) { 1003 pkt->makeAtomicResponse(); 1004 pkt->copyError(bus_pkt); 1005 } else if (pkt->cmd == MemCmd::InvalidateReq) { 1006 if (blk) { 1007 // invalidate response to a cache that received 1008 // an invalidate request 1009 satisfyCpuSideRequest(pkt, blk); 1010 } 1011 } else if (pkt->cmd == MemCmd::WriteLineReq) { 1012 // note the use of pkt, not bus_pkt here. 1013 1014 // write-line request to the cache that promoted 1015 // the write to a whole line 1016 blk = handleFill(pkt, blk, writebacks); 1017 satisfyCpuSideRequest(pkt, blk); 1018 } else if (bus_pkt->isRead() || 1019 bus_pkt->cmd == MemCmd::UpgradeResp) { 1020 // we're updating cache state to allow us to 1021 // satisfy the upstream request from the cache 1022 blk = handleFill(bus_pkt, blk, writebacks); 1023 satisfyCpuSideRequest(pkt, blk); 1024 } else { 1025 // we're satisfying the upstream request without 1026 // modifying cache state, e.g., a write-through 1027 pkt->makeAtomicResponse(); 1028 } 1029 } 1030 delete bus_pkt; 1031 } 1032 } 1033 1034 // Note that we don't invoke the prefetcher at all in atomic mode. 1035 // It's not clear how to do it properly, particularly for 1036 // prefetchers that aggressively generate prefetch candidates and 1037 // rely on bandwidth contention to throttle them; these will tend 1038 // to pollute the cache in atomic mode since there is no bandwidth 1039 // contention. If we ever do want to enable prefetching in atomic 1040 // mode, though, this is the place to do it... see timingAccess() 1041 // for an example (though we'd want to issue the prefetch(es) 1042 // immediately rather than calling requestMemSideBus() as we do 1043 // there). 1044 1045 // Handle writebacks (from the response handling) if needed 1046 while (!writebacks.empty()){ 1047 PacketPtr wbPkt = writebacks.front(); 1048 memSidePort->sendAtomic(wbPkt); 1049 writebacks.pop_front(); 1050 delete wbPkt; 1051 } 1052 1053 if (pkt->needsResponse()) { 1054 pkt->makeAtomicResponse(); 1055 } 1056 1057 return lat * clockPeriod(); 1058} 1059 1060 1061void 1062Cache::functionalAccess(PacketPtr pkt, bool fromCpuSide) 1063{ 1064 if (system->bypassCaches()) { 1065 // Packets from the memory side are snoop request and 1066 // shouldn't happen in bypass mode. 1067 assert(fromCpuSide); 1068 1069 // The cache should be flushed if we are in cache bypass mode, 1070 // so we don't need to check if we need to update anything. 1071 memSidePort->sendFunctional(pkt); 1072 return; 1073 } 1074 1075 Addr blk_addr = blockAlign(pkt->getAddr()); 1076 bool is_secure = pkt->isSecure(); 1077 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 1078 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 1079 1080 pkt->pushLabel(name()); 1081 1082 CacheBlkPrintWrapper cbpw(blk); 1083 1084 // Note that just because an L2/L3 has valid data doesn't mean an 1085 // L1 doesn't have a more up-to-date modified copy that still 1086 // needs to be found. As a result we always update the request if 1087 // we have it, but only declare it satisfied if we are the owner. 1088 1089 // see if we have data at all (owned or otherwise) 1090 bool have_data = blk && blk->isValid() 1091 && pkt->checkFunctional(&cbpw, blk_addr, is_secure, blkSize, 1092 blk->data); 1093 1094 // data we have is dirty if marked as such or if valid & ownership 1095 // pending due to outstanding UpgradeReq 1096 bool have_dirty = 1097 have_data && (blk->isDirty() || 1098 (mshr && mshr->inService && mshr->isPendingDirty())); 1099 1100 bool done = have_dirty 1101 || cpuSidePort->checkFunctional(pkt) 1102 || mshrQueue.checkFunctional(pkt, blk_addr) 1103 || writeBuffer.checkFunctional(pkt, blk_addr) 1104 || memSidePort->checkFunctional(pkt); 1105 1106 DPRINTF(Cache, "functional %s %#llx (%s) %s%s%s\n", 1107 pkt->cmdString(), pkt->getAddr(), is_secure ? "s" : "ns", 1108 (blk && blk->isValid()) ? "valid " : "", 1109 have_data ? "data " : "", done ? "done " : ""); 1110 1111 // We're leaving the cache, so pop cache->name() label 1112 pkt->popLabel(); 1113 1114 if (done) { 1115 pkt->makeResponse(); 1116 } else { 1117 // if it came as a request from the CPU side then make sure it 1118 // continues towards the memory side 1119 if (fromCpuSide) { 1120 memSidePort->sendFunctional(pkt); 1121 } else if (forwardSnoops && cpuSidePort->isSnooping()) { 1122 // if it came from the memory side, it must be a snoop request 1123 // and we should only forward it if we are forwarding snoops 1124 cpuSidePort->sendFunctionalSnoop(pkt); 1125 } 1126 } 1127} 1128 1129 1130///////////////////////////////////////////////////// 1131// 1132// Response handling: responses from the memory side 1133// 1134///////////////////////////////////////////////////// 1135 1136 1137void 1138Cache::recvTimingResp(PacketPtr pkt) 1139{ 1140 assert(pkt->isResponse()); 1141 1142 // all header delay should be paid for by the crossbar, unless 1143 // this is a prefetch response from above 1144 panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp, 1145 "%s saw a non-zero packet delay\n", name()); 1146 1147 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState); 1148 bool is_error = pkt->isError(); 1149 1150 assert(mshr); 1151 1152 if (is_error) { 1153 DPRINTF(Cache, "Cache received packet with error for addr %#llx (%s), " 1154 "cmd: %s\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns", 1155 pkt->cmdString()); 1156 } 1157 1158 DPRINTF(Cache, "Handling response %s for addr %#llx size %d (%s)\n", 1159 pkt->cmdString(), pkt->getAddr(), pkt->getSize(), 1160 pkt->isSecure() ? "s" : "ns"); 1161 1162 MSHRQueue *mq = mshr->queue; 1163 bool wasFull = mq->isFull(); 1164 1165 if (mshr == noTargetMSHR) { 1166 // we always clear at least one target 1167 clearBlocked(Blocked_NoTargets); 1168 noTargetMSHR = NULL; 1169 } 1170 1171 // Initial target is used just for stats 1172 MSHR::Target *initial_tgt = mshr->getTarget(); 1173 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 1174 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex(); 1175 Tick miss_latency = curTick() - initial_tgt->recvTime; 1176 PacketList writebacks; 1177 // We need forward_time here because we have a call of 1178 // allocateWriteBuffer() that need this parameter to specify the 1179 // time to request the bus. In this case we use forward latency 1180 // because there is a writeback. We pay also here for headerDelay 1181 // that is charged of bus latencies if the packet comes from the 1182 // bus. 1183 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 1184 1185 if (pkt->req->isUncacheable()) { 1186 assert(pkt->req->masterId() < system->maxMasters()); 1187 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] += 1188 miss_latency; 1189 } else { 1190 assert(pkt->req->masterId() < system->maxMasters()); 1191 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] += 1192 miss_latency; 1193 } 1194 1195 bool is_fill = !mshr->isForward && 1196 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp); 1197 1198 if (is_fill && !is_error) { 1199 DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n", 1200 pkt->getAddr()); 1201 1202 // give mshr a chance to do some dirty work 1203 mshr->handleFill(pkt, blk); 1204 1205 blk = handleFill(pkt, blk, writebacks); 1206 assert(blk != NULL); 1207 } 1208 1209 // allow invalidation responses originating from write-line 1210 // requests to be discarded 1211 bool discard_invalidate = false; 1212 1213 // First offset for critical word first calculations 1214 int initial_offset = initial_tgt->pkt->getOffset(blkSize); 1215 1216 while (mshr->hasTargets()) { 1217 MSHR::Target *target = mshr->getTarget(); 1218 Packet *tgt_pkt = target->pkt; 1219 1220 switch (target->source) { 1221 case MSHR::Target::FromCPU: 1222 Tick completion_time; 1223 // Here we charge on completion_time the delay of the xbar if the 1224 // packet comes from it, charged on headerDelay. 1225 completion_time = pkt->headerDelay; 1226 1227 // Software prefetch handling for cache closest to core 1228 if (tgt_pkt->cmd.isSWPrefetch()) { 1229 // a software prefetch would have already been ack'd immediately 1230 // with dummy data so the core would be able to retire it. 1231 // this request completes right here, so we deallocate it. 1232 delete tgt_pkt->req; 1233 delete tgt_pkt; 1234 break; // skip response 1235 } 1236 1237 // unlike the other packet flows, where data is found in other 1238 // caches or memory and brought back, write-line requests always 1239 // have the data right away, so the above check for "is fill?" 1240 // cannot actually be determined until examining the stored MSHR 1241 // state. We "catch up" with that logic here, which is duplicated 1242 // from above. 1243 if (tgt_pkt->cmd == MemCmd::WriteLineReq) { 1244 assert(!is_error); 1245 1246 // NB: we use the original packet here and not the response! 1247 mshr->handleFill(tgt_pkt, blk); 1248 blk = handleFill(tgt_pkt, blk, writebacks); 1249 assert(blk != NULL); 1250 1251 // treat as a fill, and discard the invalidation 1252 // response 1253 is_fill = true; 1254 discard_invalidate = true; 1255 } 1256 1257 if (is_fill) { 1258 satisfyCpuSideRequest(tgt_pkt, blk, 1259 true, mshr->hasPostDowngrade()); 1260 1261 // How many bytes past the first request is this one 1262 int transfer_offset = 1263 tgt_pkt->getOffset(blkSize) - initial_offset; 1264 if (transfer_offset < 0) { 1265 transfer_offset += blkSize; 1266 } 1267 1268 // If not critical word (offset) return payloadDelay. 1269 // responseLatency is the latency of the return path 1270 // from lower level caches/memory to an upper level cache or 1271 // the core. 1272 completion_time += clockEdge(responseLatency) + 1273 (transfer_offset ? pkt->payloadDelay : 0); 1274 1275 assert(!tgt_pkt->req->isUncacheable()); 1276 1277 assert(tgt_pkt->req->masterId() < system->maxMasters()); 1278 missLatency[tgt_pkt->cmdToIndex()][tgt_pkt->req->masterId()] += 1279 completion_time - target->recvTime; 1280 } else if (pkt->cmd == MemCmd::UpgradeFailResp) { 1281 // failed StoreCond upgrade 1282 assert(tgt_pkt->cmd == MemCmd::StoreCondReq || 1283 tgt_pkt->cmd == MemCmd::StoreCondFailReq || 1284 tgt_pkt->cmd == MemCmd::SCUpgradeFailReq); 1285 // responseLatency is the latency of the return path 1286 // from lower level caches/memory to an upper level cache or 1287 // the core. 1288 completion_time += clockEdge(responseLatency) + 1289 pkt->payloadDelay; 1290 tgt_pkt->req->setExtraData(0); 1291 } else { 1292 // not a cache fill, just forwarding response 1293 // responseLatency is the latency of the return path 1294 // from lower level cahces/memory to the core. 1295 completion_time += clockEdge(responseLatency) + 1296 pkt->payloadDelay; 1297 if (pkt->isRead() && !is_error) { 1298 // sanity check 1299 assert(pkt->getAddr() == tgt_pkt->getAddr()); 1300 assert(pkt->getSize() >= tgt_pkt->getSize()); 1301 1302 tgt_pkt->setData(pkt->getConstPtr<uint8_t>()); 1303 } 1304 } 1305 tgt_pkt->makeTimingResponse(); 1306 // if this packet is an error copy that to the new packet 1307 if (is_error) 1308 tgt_pkt->copyError(pkt); 1309 if (tgt_pkt->cmd == MemCmd::ReadResp && 1310 (pkt->isInvalidate() || mshr->hasPostInvalidate())) { 1311 // If intermediate cache got ReadRespWithInvalidate, 1312 // propagate that. Response should not have 1313 // isInvalidate() set otherwise. 1314 tgt_pkt->cmd = MemCmd::ReadRespWithInvalidate; 1315 DPRINTF(Cache, "%s updated cmd to %s for addr %#llx\n", 1316 __func__, tgt_pkt->cmdString(), tgt_pkt->getAddr()); 1317 } 1318 // Reset the bus additional time as it is now accounted for 1319 tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0; 1320 cpuSidePort->schedTimingResp(tgt_pkt, completion_time); 1321 break; 1322 1323 case MSHR::Target::FromPrefetcher: 1324 assert(tgt_pkt->cmd == MemCmd::HardPFReq); 1325 if (blk) 1326 blk->status |= BlkHWPrefetched; 1327 delete tgt_pkt->req; 1328 delete tgt_pkt; 1329 break; 1330 1331 case MSHR::Target::FromSnoop: 1332 // I don't believe that a snoop can be in an error state 1333 assert(!is_error); 1334 // response to snoop request 1335 DPRINTF(Cache, "processing deferred snoop...\n"); 1336 assert(!(pkt->isInvalidate() && !mshr->hasPostInvalidate())); 1337 handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate()); 1338 break; 1339 1340 default: 1341 panic("Illegal target->source enum %d\n", target->source); 1342 } 1343 1344 mshr->popTarget(); 1345 } 1346 1347 if (blk && blk->isValid()) { 1348 // an invalidate response stemming from a write line request 1349 // should not invalidate the block, so check if the 1350 // invalidation should be discarded 1351 if ((pkt->isInvalidate() || mshr->hasPostInvalidate()) && 1352 !discard_invalidate) { 1353 assert(blk != tempBlock); 1354 tags->invalidate(blk); 1355 blk->invalidate(); 1356 } else if (mshr->hasPostDowngrade()) { 1357 blk->status &= ~BlkWritable; 1358 } 1359 } 1360 1361 if (mshr->promoteDeferredTargets()) { 1362 // avoid later read getting stale data while write miss is 1363 // outstanding.. see comment in timingAccess() 1364 if (blk) { 1365 blk->status &= ~BlkReadable; 1366 } 1367 mq = mshr->queue; 1368 mq->markPending(mshr); 1369 schedMemSideSendEvent(clockEdge() + pkt->payloadDelay); 1370 } else { 1371 mq->deallocate(mshr); 1372 if (wasFull && !mq->isFull()) { 1373 clearBlocked((BlockedCause)mq->index); 1374 } 1375 1376 // Request the bus for a prefetch if this deallocation freed enough 1377 // MSHRs for a prefetch to take place 1378 if (prefetcher && mq == &mshrQueue && mshrQueue.canPrefetch()) { 1379 Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(), 1380 clockEdge()); 1381 if (next_pf_time != MaxTick) 1382 schedMemSideSendEvent(next_pf_time); 1383 } 1384 } 1385 // reset the xbar additional timinig as it is now accounted for 1386 pkt->headerDelay = pkt->payloadDelay = 0; 1387 1388 // copy writebacks to write buffer 1389 doWritebacks(writebacks, forward_time); 1390 1391 // if we used temp block, check to see if its valid and then clear it out 1392 if (blk == tempBlock && tempBlock->isValid()) { 1393 // We use forwardLatency here because we are copying 1394 // Writebacks/CleanEvicts to write buffer. It specifies the latency to 1395 // allocate an internal buffer and to schedule an event to the 1396 // queued port. 1397 if (blk->isDirty()) { 1398 PacketPtr wbPkt = writebackBlk(blk); 1399 allocateWriteBuffer(wbPkt, forward_time); 1400 // Set BLOCK_CACHED flag if cached above. 1401 if (isCachedAbove(wbPkt)) 1402 wbPkt->setBlockCached(); 1403 } else { 1404 PacketPtr wcPkt = cleanEvictBlk(blk); 1405 // Check to see if block is cached above. If not allocate 1406 // write buffer 1407 if (isCachedAbove(wcPkt)) 1408 delete wcPkt; 1409 else 1410 allocateWriteBuffer(wcPkt, forward_time); 1411 } 1412 blk->invalidate(); 1413 } 1414 1415 DPRINTF(Cache, "Leaving %s with %s for addr %#llx\n", __func__, 1416 pkt->cmdString(), pkt->getAddr()); 1417 delete pkt; 1418} 1419 1420PacketPtr 1421Cache::writebackBlk(CacheBlk *blk) 1422{ 1423 chatty_assert(!isReadOnly, "Writeback from read-only cache"); 1424 assert(blk && blk->isValid() && blk->isDirty()); 1425 1426 writebacks[Request::wbMasterId]++; 1427 1428 Request *writebackReq = 1429 new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0, 1430 Request::wbMasterId); 1431 if (blk->isSecure()) 1432 writebackReq->setFlags(Request::SECURE); 1433 1434 writebackReq->taskId(blk->task_id); 1435 blk->task_id= ContextSwitchTaskId::Unknown; 1436 blk->tickInserted = curTick(); 1437 1438 PacketPtr writeback = new Packet(writebackReq, MemCmd::Writeback); 1439 if (blk->isWritable()) { 1440 // not asserting shared means we pass the block in modified 1441 // state, mark our own block non-writeable 1442 blk->status &= ~BlkWritable; 1443 } else { 1444 // we are in the owned state, tell the receiver 1445 writeback->assertShared(); 1446 } 1447 1448 writeback->allocate(); 1449 std::memcpy(writeback->getPtr<uint8_t>(), blk->data, blkSize); 1450 1451 blk->status &= ~BlkDirty; 1452 return writeback; 1453} 1454 1455PacketPtr 1456Cache::cleanEvictBlk(CacheBlk *blk) 1457{ 1458 assert(blk && blk->isValid() && !blk->isDirty()); 1459 // Creating a zero sized write, a message to the snoop filter 1460 Request *req = 1461 new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0, 1462 Request::wbMasterId); 1463 if (blk->isSecure()) 1464 req->setFlags(Request::SECURE); 1465 1466 req->taskId(blk->task_id); 1467 blk->task_id = ContextSwitchTaskId::Unknown; 1468 blk->tickInserted = curTick(); 1469 1470 PacketPtr pkt = new Packet(req, MemCmd::CleanEvict); 1471 pkt->allocate(); 1472 DPRINTF(Cache, "%s%s %x Create CleanEvict\n", pkt->cmdString(), 1473 pkt->req->isInstFetch() ? " (ifetch)" : "", 1474 pkt->getAddr()); 1475 1476 return pkt; 1477} 1478 1479void 1480Cache::memWriteback() 1481{ 1482 CacheBlkVisitorWrapper visitor(*this, &Cache::writebackVisitor); 1483 tags->forEachBlk(visitor); 1484} 1485 1486void 1487Cache::memInvalidate() 1488{ 1489 CacheBlkVisitorWrapper visitor(*this, &Cache::invalidateVisitor); 1490 tags->forEachBlk(visitor); 1491} 1492 1493bool 1494Cache::isDirty() const 1495{ 1496 CacheBlkIsDirtyVisitor visitor; 1497 tags->forEachBlk(visitor); 1498 1499 return visitor.isDirty(); 1500} 1501 1502bool 1503Cache::writebackVisitor(CacheBlk &blk) 1504{ 1505 if (blk.isDirty()) { 1506 assert(blk.isValid()); 1507 1508 Request request(tags->regenerateBlkAddr(blk.tag, blk.set), 1509 blkSize, 0, Request::funcMasterId); 1510 request.taskId(blk.task_id); 1511 1512 Packet packet(&request, MemCmd::WriteReq); 1513 packet.dataStatic(blk.data); 1514 1515 memSidePort->sendFunctional(&packet); 1516 1517 blk.status &= ~BlkDirty; 1518 } 1519 1520 return true; 1521} 1522 1523bool 1524Cache::invalidateVisitor(CacheBlk &blk) 1525{ 1526 1527 if (blk.isDirty()) 1528 warn_once("Invalidating dirty cache lines. Expect things to break.\n"); 1529 1530 if (blk.isValid()) { 1531 assert(!blk.isDirty()); 1532 tags->invalidate(&blk); 1533 blk.invalidate(); 1534 } 1535 1536 return true; 1537} 1538 1539CacheBlk* 1540Cache::allocateBlock(Addr addr, bool is_secure, PacketList &writebacks) 1541{ 1542 CacheBlk *blk = tags->findVictim(addr); 1543 1544 // It is valid to return NULL if there is no victim 1545 if (!blk) 1546 return nullptr; 1547 1548 if (blk->isValid()) { 1549 Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set); 1550 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure()); 1551 if (repl_mshr) { 1552 // must be an outstanding upgrade request 1553 // on a block we're about to replace... 1554 assert(!blk->isWritable() || blk->isDirty()); 1555 assert(repl_mshr->needsExclusive()); 1556 // too hard to replace block with transient state 1557 // allocation failed, block not inserted 1558 return NULL; 1559 } else { 1560 DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx (%s): %s\n", 1561 repl_addr, blk->isSecure() ? "s" : "ns", 1562 addr, is_secure ? "s" : "ns", 1563 blk->isDirty() ? "writeback" : "clean"); 1564 1565 // Will send up Writeback/CleanEvict snoops via isCachedAbove 1566 // when pushing this writeback list into the write buffer. 1567 if (blk->isDirty()) { 1568 // Save writeback packet for handling by caller 1569 writebacks.push_back(writebackBlk(blk)); 1570 } else { 1571 writebacks.push_back(cleanEvictBlk(blk)); 1572 } 1573 } 1574 } 1575 1576 return blk; 1577} 1578 1579 1580// Note that the reason we return a list of writebacks rather than 1581// inserting them directly in the write buffer is that this function 1582// is called by both atomic and timing-mode accesses, and in atomic 1583// mode we don't mess with the write buffer (we just perform the 1584// writebacks atomically once the original request is complete). 1585CacheBlk* 1586Cache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks) 1587{ 1588 assert(pkt->isResponse() || pkt->cmd == MemCmd::WriteLineReq); 1589 Addr addr = pkt->getAddr(); 1590 bool is_secure = pkt->isSecure(); 1591#if TRACING_ON 1592 CacheBlk::State old_state = blk ? blk->status : 0; 1593#endif 1594 1595 // When handling a fill, discard any CleanEvicts for the 1596 // same address in write buffer. 1597 Addr M5_VAR_USED blk_addr = blockAlign(pkt->getAddr()); 1598 std::vector<MSHR *> M5_VAR_USED wbs; 1599 assert (!writeBuffer.findMatches(blk_addr, is_secure, wbs)); 1600 1601 if (blk == NULL) { 1602 // better have read new data... 1603 assert(pkt->hasData()); 1604 1605 // only read responses and write-line requests have data; 1606 // note that we don't write the data here for write-line - that 1607 // happens in the subsequent satisfyCpuSideRequest. 1608 assert(pkt->isRead() || pkt->cmd == MemCmd::WriteLineReq); 1609 1610 // need to do a replacement 1611 blk = allocateBlock(addr, is_secure, writebacks); 1612 if (blk == NULL) { 1613 // No replaceable block... just use temporary storage to 1614 // complete the current request and then get rid of it 1615 assert(!tempBlock->isValid()); 1616 blk = tempBlock; 1617 tempBlock->set = tags->extractSet(addr); 1618 tempBlock->tag = tags->extractTag(addr); 1619 // @todo: set security state as well... 1620 DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr, 1621 is_secure ? "s" : "ns"); 1622 } else { 1623 tags->insertBlock(pkt, blk); 1624 } 1625 1626 // we should never be overwriting a valid block 1627 assert(!blk->isValid()); 1628 } else { 1629 // existing block... probably an upgrade 1630 assert(blk->tag == tags->extractTag(addr)); 1631 // either we're getting new data or the block should already be valid 1632 assert(pkt->hasData() || blk->isValid()); 1633 // don't clear block status... if block is already dirty we 1634 // don't want to lose that 1635 } 1636 1637 if (is_secure) 1638 blk->status |= BlkSecure; 1639 blk->status |= BlkValid | BlkReadable; 1640 1641 if (!pkt->sharedAsserted()) { 1642 // we could get non-shared responses from memory (rather than 1643 // a cache) even in a read-only cache, note that we set this 1644 // bit even for a read-only cache as we use it to represent 1645 // the exclusive state 1646 blk->status |= BlkWritable; 1647 1648 // If we got this via cache-to-cache transfer (i.e., from a 1649 // cache that was an owner) and took away that owner's copy, 1650 // then we need to write it back. Normally this happens 1651 // anyway as a side effect of getting a copy to write it, but 1652 // there are cases (such as failed store conditionals or 1653 // compare-and-swaps) where we'll demand an exclusive copy but 1654 // end up not writing it. 1655 if (pkt->memInhibitAsserted()) { 1656 blk->status |= BlkDirty; 1657 1658 chatty_assert(!isReadOnly, "Should never see dirty snoop response " 1659 "in read-only cache %s\n", name()); 1660 } 1661 } 1662 1663 DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n", 1664 addr, is_secure ? "s" : "ns", old_state, blk->print()); 1665 1666 // if we got new data, copy it in (checking for a read response 1667 // and a response that has data is the same in the end) 1668 if (pkt->isRead()) { 1669 // sanity checks 1670 assert(pkt->hasData()); 1671 assert(pkt->getSize() == blkSize); 1672 1673 std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize); 1674 } 1675 // We pay for fillLatency here. 1676 blk->whenReady = clockEdge() + fillLatency * clockPeriod() + 1677 pkt->payloadDelay; 1678 1679 return blk; 1680} 1681 1682 1683///////////////////////////////////////////////////// 1684// 1685// Snoop path: requests coming in from the memory side 1686// 1687///////////////////////////////////////////////////// 1688 1689void 1690Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data, 1691 bool already_copied, bool pending_inval) 1692{ 1693 // sanity check 1694 assert(req_pkt->isRequest()); 1695 assert(req_pkt->needsResponse()); 1696 1697 DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, 1698 req_pkt->cmdString(), req_pkt->getAddr(), req_pkt->getSize()); 1699 // timing-mode snoop responses require a new packet, unless we 1700 // already made a copy... 1701 PacketPtr pkt = req_pkt; 1702 if (!already_copied) 1703 // do not clear flags, and allocate space for data if the 1704 // packet needs it (the only packets that carry data are read 1705 // responses) 1706 pkt = new Packet(req_pkt, false, req_pkt->isRead()); 1707 1708 assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() || 1709 pkt->sharedAsserted()); 1710 pkt->makeTimingResponse(); 1711 if (pkt->isRead()) { 1712 pkt->setDataFromBlock(blk_data, blkSize); 1713 } 1714 if (pkt->cmd == MemCmd::ReadResp && pending_inval) { 1715 // Assume we defer a response to a read from a far-away cache 1716 // A, then later defer a ReadExcl from a cache B on the same 1717 // bus as us. We'll assert MemInhibit in both cases, but in 1718 // the latter case MemInhibit will keep the invalidation from 1719 // reaching cache A. This special response tells cache A that 1720 // it gets the block to satisfy its read, but must immediately 1721 // invalidate it. 1722 pkt->cmd = MemCmd::ReadRespWithInvalidate; 1723 } 1724 // Here we consider forward_time, paying for just forward latency and 1725 // also charging the delay provided by the xbar. 1726 // forward_time is used as send_time in next allocateWriteBuffer(). 1727 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 1728 // Here we reset the timing of the packet. 1729 pkt->headerDelay = pkt->payloadDelay = 0; 1730 DPRINTF(Cache, "%s created response: %s addr %#llx size %d tick: %lu\n", 1731 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize(), 1732 forward_time); 1733 memSidePort->schedTimingSnoopResp(pkt, forward_time, true); 1734} 1735 1736void 1737Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing, 1738 bool is_deferred, bool pending_inval) 1739{ 1740 DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, 1741 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 1742 // deferred snoops can only happen in timing mode 1743 assert(!(is_deferred && !is_timing)); 1744 // pending_inval only makes sense on deferred snoops 1745 assert(!(pending_inval && !is_deferred)); 1746 assert(pkt->isRequest()); 1747 1748 // the packet may get modified if we or a forwarded snooper 1749 // responds in atomic mode, so remember a few things about the 1750 // original packet up front 1751 bool invalidate = pkt->isInvalidate(); 1752 bool M5_VAR_USED needs_exclusive = pkt->needsExclusive(); 1753 1754 if (forwardSnoops) { 1755 // first propagate snoop upward to see if anyone above us wants to 1756 // handle it. save & restore packet src since it will get 1757 // rewritten to be relative to cpu-side bus (if any) 1758 bool alreadyResponded = pkt->memInhibitAsserted(); 1759 if (is_timing) { 1760 // copy the packet so that we can clear any flags before 1761 // forwarding it upwards, we also allocate data (passing 1762 // the pointer along in case of static data), in case 1763 // there is a snoop hit in upper levels 1764 Packet snoopPkt(pkt, true, true); 1765 snoopPkt.setExpressSnoop(); 1766 snoopPkt.pushSenderState(new ForwardResponseRecord()); 1767 // the snoop packet does not need to wait any additional 1768 // time 1769 snoopPkt.headerDelay = snoopPkt.payloadDelay = 0; 1770 cpuSidePort->sendTimingSnoopReq(&snoopPkt); 1771 if (snoopPkt.memInhibitAsserted()) { 1772 // cache-to-cache response from some upper cache 1773 assert(!alreadyResponded); 1774 pkt->assertMemInhibit(); 1775 } else { 1776 // no cache (or anyone else for that matter) will 1777 // respond, so delete the ForwardResponseRecord here 1778 delete snoopPkt.popSenderState(); 1779 } 1780 if (snoopPkt.sharedAsserted()) { 1781 pkt->assertShared(); 1782 } 1783 // If this request is a prefetch or clean evict and an upper level 1784 // signals block present, make sure to propagate the block 1785 // presence to the requester. 1786 if (snoopPkt.isBlockCached()) { 1787 pkt->setBlockCached(); 1788 } 1789 } else { 1790 cpuSidePort->sendAtomicSnoop(pkt); 1791 if (!alreadyResponded && pkt->memInhibitAsserted()) { 1792 // cache-to-cache response from some upper cache: 1793 // forward response to original requester 1794 assert(pkt->isResponse()); 1795 } 1796 } 1797 } 1798 1799 if (!blk || !blk->isValid()) { 1800 DPRINTF(Cache, "%s snoop miss for %s addr %#llx size %d\n", 1801 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 1802 return; 1803 } else { 1804 DPRINTF(Cache, "%s snoop hit for %s for addr %#llx size %d, " 1805 "old state is %s\n", __func__, pkt->cmdString(), 1806 pkt->getAddr(), pkt->getSize(), blk->print()); 1807 } 1808 1809 chatty_assert(!(isReadOnly && blk->isDirty()), 1810 "Should never have a dirty block in a read-only cache %s\n", 1811 name()); 1812 1813 // We may end up modifying both the block state and the packet (if 1814 // we respond in atomic mode), so just figure out what to do now 1815 // and then do it later. If we find dirty data while snooping for 1816 // an invalidate, we don't need to send a response. The 1817 // invalidation itself is taken care of below. 1818 bool respond = blk->isDirty() && pkt->needsResponse() && 1819 pkt->cmd != MemCmd::InvalidateReq; 1820 bool have_exclusive = blk->isWritable(); 1821 1822 // Invalidate any prefetch's from below that would strip write permissions 1823 // MemCmd::HardPFReq is only observed by upstream caches. After missing 1824 // above and in it's own cache, a new MemCmd::ReadReq is created that 1825 // downstream caches observe. 1826 if (pkt->mustCheckAbove()) { 1827 DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s from" 1828 " lower cache\n", pkt->getAddr(), pkt->cmdString()); 1829 pkt->setBlockCached(); 1830 return; 1831 } 1832 1833 if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) { 1834 assert(!needs_exclusive); 1835 pkt->assertShared(); 1836 int bits_to_clear = BlkWritable; 1837 const bool haveOwnershipState = true; // for now 1838 if (!haveOwnershipState) { 1839 // if we don't support pure ownership (dirty && !writable), 1840 // have to clear dirty bit here, assume memory snarfs data 1841 // on cache-to-cache xfer 1842 bits_to_clear |= BlkDirty; 1843 } 1844 blk->status &= ~bits_to_clear; 1845 } 1846 1847 if (respond) { 1848 // prevent anyone else from responding, cache as well as 1849 // memory, and also prevent any memory from even seeing the 1850 // request (with current inhibited semantics), note that this 1851 // applies both to reads and writes and that for writes it 1852 // works thanks to the fact that we still have dirty data and 1853 // will write it back at a later point 1854 pkt->assertMemInhibit(); 1855 if (have_exclusive) { 1856 // in the case of an uncacheable request there is no need 1857 // to set the exclusive flag, but since the recipient does 1858 // not care there is no harm in doing so 1859 pkt->setSupplyExclusive(); 1860 } 1861 if (is_timing) { 1862 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval); 1863 } else { 1864 pkt->makeAtomicResponse(); 1865 pkt->setDataFromBlock(blk->data, blkSize); 1866 } 1867 } 1868 1869 if (!respond && is_timing && is_deferred) { 1870 // if it's a deferred timing snoop then we've made a copy of 1871 // both the request and the packet, and so if we're not using 1872 // those copies to respond and delete them here 1873 DPRINTF(Cache, "Deleting pkt %p and request %p for cmd %s addr: %p\n", 1874 pkt, pkt->req, pkt->cmdString(), pkt->getAddr()); 1875 1876 // the packets needs a response (just not from us), so we also 1877 // need to delete the request and not rely on the packet 1878 // destructor 1879 assert(pkt->needsResponse()); 1880 delete pkt->req; 1881 delete pkt; 1882 } 1883 1884 // Do this last in case it deallocates block data or something 1885 // like that 1886 if (invalidate) { 1887 if (blk != tempBlock) 1888 tags->invalidate(blk); 1889 blk->invalidate(); 1890 } 1891 1892 DPRINTF(Cache, "new state is %s\n", blk->print()); 1893} 1894 1895 1896void 1897Cache::recvTimingSnoopReq(PacketPtr pkt) 1898{ 1899 DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, 1900 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 1901 1902 // Snoops shouldn't happen when bypassing caches 1903 assert(!system->bypassCaches()); 1904 1905 // no need to snoop writebacks or requests that are not in range 1906 if (!inRange(pkt->getAddr())) { 1907 return; 1908 } 1909 1910 bool is_secure = pkt->isSecure(); 1911 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 1912 1913 Addr blk_addr = blockAlign(pkt->getAddr()); 1914 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 1915 1916 // Inform request(Prefetch, CleanEvict or Writeback) from below of 1917 // MSHR hit, set setBlockCached. 1918 if (mshr && pkt->mustCheckAbove()) { 1919 DPRINTF(Cache, "Setting block cached for %s from" 1920 "lower cache on mshr hit %#x\n", 1921 pkt->cmdString(), pkt->getAddr()); 1922 pkt->setBlockCached(); 1923 return; 1924 } 1925 1926 // Let the MSHR itself track the snoop and decide whether we want 1927 // to go ahead and do the regular cache snoop 1928 if (mshr && mshr->handleSnoop(pkt, order++)) { 1929 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)." 1930 "mshrs: %s\n", blk_addr, is_secure ? "s" : "ns", 1931 mshr->print()); 1932 1933 if (mshr->getNumTargets() > numTarget) 1934 warn("allocating bonus target for snoop"); //handle later 1935 return; 1936 } 1937 1938 //We also need to check the writeback buffers and handle those 1939 std::vector<MSHR *> writebacks; 1940 if (writeBuffer.findMatches(blk_addr, is_secure, writebacks)) { 1941 DPRINTF(Cache, "Snoop hit in writeback to addr %#llx (%s)\n", 1942 pkt->getAddr(), is_secure ? "s" : "ns"); 1943 1944 // Look through writebacks for any cachable writes. 1945 // We should only ever find a single match 1946 assert(writebacks.size() == 1); 1947 MSHR *wb_entry = writebacks[0]; 1948 // Expect to see only Writebacks and/or CleanEvicts here, both of 1949 // which should not be generated for uncacheable data. 1950 assert(!wb_entry->isUncacheable()); 1951 // There should only be a single request responsible for generating 1952 // Writebacks/CleanEvicts. 1953 assert(wb_entry->getNumTargets() == 1); 1954 PacketPtr wb_pkt = wb_entry->getTarget()->pkt; 1955 assert(wb_pkt->evictingBlock()); 1956 1957 if (pkt->evictingBlock()) { 1958 // if the block is found in the write queue, set the BLOCK_CACHED 1959 // flag for Writeback/CleanEvict snoop. On return the snoop will 1960 // propagate the BLOCK_CACHED flag in Writeback packets and prevent 1961 // any CleanEvicts from travelling down the memory hierarchy. 1962 pkt->setBlockCached(); 1963 DPRINTF(Cache, "Squashing %s from lower cache on writequeue hit" 1964 " %#x\n", pkt->cmdString(), pkt->getAddr()); 1965 return; 1966 } 1967 1968 if (wb_pkt->cmd == MemCmd::Writeback) { 1969 assert(!pkt->memInhibitAsserted()); 1970 pkt->assertMemInhibit(); 1971 if (!pkt->needsExclusive()) { 1972 pkt->assertShared(); 1973 // the writeback is no longer passing exclusivity (the 1974 // receiving cache should consider the block owned 1975 // rather than modified) 1976 wb_pkt->assertShared(); 1977 } else { 1978 // if we're not asserting the shared line, we need to 1979 // invalidate our copy. we'll do that below as long as 1980 // the packet's invalidate flag is set... 1981 assert(pkt->isInvalidate()); 1982 } 1983 doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(), 1984 false, false); 1985 } else { 1986 assert(wb_pkt->cmd == MemCmd::CleanEvict); 1987 // The cache technically holds the block until the 1988 // corresponding CleanEvict message reaches the crossbar 1989 // below. Therefore when a snoop encounters a CleanEvict 1990 // message we must set assertShared (just like when it 1991 // encounters a Writeback) to avoid the snoop filter 1992 // prematurely clearing the holder bit in the crossbar 1993 // below 1994 if (!pkt->needsExclusive()) 1995 pkt->assertShared(); 1996 else 1997 assert(pkt->isInvalidate()); 1998 } 1999 2000 if (pkt->isInvalidate()) { 2001 // Invalidation trumps our writeback... discard here 2002 // Note: markInService will remove entry from writeback buffer. 2003 markInService(wb_entry, false); 2004 delete wb_pkt; 2005 } 2006 } 2007 2008 // If this was a shared writeback, there may still be 2009 // other shared copies above that require invalidation. 2010 // We could be more selective and return here if the 2011 // request is non-exclusive or if the writeback is 2012 // exclusive. 2013 handleSnoop(pkt, blk, true, false, false); 2014} 2015 2016bool 2017Cache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt) 2018{ 2019 // Express snoop responses from master to slave, e.g., from L1 to L2 2020 cache->recvTimingSnoopResp(pkt); 2021 return true; 2022} 2023 2024Tick 2025Cache::recvAtomicSnoop(PacketPtr pkt) 2026{ 2027 // Snoops shouldn't happen when bypassing caches 2028 assert(!system->bypassCaches()); 2029 2030 // no need to snoop writebacks or requests that are not in range. In 2031 // atomic we have no Writebacks/CleanEvicts queued and no prefetches, 2032 // hence there is no need to snoop upwards and determine if they are 2033 // present above. 2034 if (pkt->evictingBlock() || !inRange(pkt->getAddr())) { 2035 return 0; 2036 } 2037 2038 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 2039 handleSnoop(pkt, blk, false, false, false); 2040 // We consider forwardLatency here because a snoop occurs in atomic mode 2041 return forwardLatency * clockPeriod(); 2042} 2043 2044 2045MSHR * 2046Cache::getNextMSHR() 2047{ 2048 // Check both MSHR queue and write buffer for potential requests, 2049 // note that null does not mean there is no request, it could 2050 // simply be that it is not ready 2051 MSHR *miss_mshr = mshrQueue.getNextMSHR(); 2052 MSHR *write_mshr = writeBuffer.getNextMSHR(); 2053 2054 // If we got a write buffer request ready, first priority is a 2055 // full write buffer, otherwhise we favour the miss requests 2056 if (write_mshr && 2057 ((writeBuffer.isFull() && writeBuffer.inServiceEntries == 0) || 2058 !miss_mshr)) { 2059 // need to search MSHR queue for conflicting earlier miss. 2060 MSHR *conflict_mshr = 2061 mshrQueue.findPending(write_mshr->blkAddr, 2062 write_mshr->isSecure); 2063 2064 if (conflict_mshr && conflict_mshr->order < write_mshr->order) { 2065 // Service misses in order until conflict is cleared. 2066 return conflict_mshr; 2067 2068 // @todo Note that we ignore the ready time of the conflict here 2069 } 2070 2071 // No conflicts; issue write 2072 return write_mshr; 2073 } else if (miss_mshr) { 2074 // need to check for conflicting earlier writeback 2075 MSHR *conflict_mshr = 2076 writeBuffer.findPending(miss_mshr->blkAddr, 2077 miss_mshr->isSecure); 2078 if (conflict_mshr) { 2079 // not sure why we don't check order here... it was in the 2080 // original code but commented out. 2081 2082 // The only way this happens is if we are 2083 // doing a write and we didn't have permissions 2084 // then subsequently saw a writeback (owned got evicted) 2085 // We need to make sure to perform the writeback first 2086 // To preserve the dirty data, then we can issue the write 2087 2088 // should we return write_mshr here instead? I.e. do we 2089 // have to flush writes in order? I don't think so... not 2090 // for Alpha anyway. Maybe for x86? 2091 return conflict_mshr; 2092 2093 // @todo Note that we ignore the ready time of the conflict here 2094 } 2095 2096 // No conflicts; issue read 2097 return miss_mshr; 2098 } 2099 2100 // fall through... no pending requests. Try a prefetch. 2101 assert(!miss_mshr && !write_mshr); 2102 if (prefetcher && mshrQueue.canPrefetch()) { 2103 // If we have a miss queue slot, we can try a prefetch 2104 PacketPtr pkt = prefetcher->getPacket(); 2105 if (pkt) { 2106 Addr pf_addr = blockAlign(pkt->getAddr()); 2107 if (!tags->findBlock(pf_addr, pkt->isSecure()) && 2108 !mshrQueue.findMatch(pf_addr, pkt->isSecure()) && 2109 !writeBuffer.findMatch(pf_addr, pkt->isSecure())) { 2110 // Update statistic on number of prefetches issued 2111 // (hwpf_mshr_misses) 2112 assert(pkt->req->masterId() < system->maxMasters()); 2113 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 2114 2115 // allocate an MSHR and return it, note 2116 // that we send the packet straight away, so do not 2117 // schedule the send 2118 return allocateMissBuffer(pkt, curTick(), false); 2119 } else { 2120 // free the request and packet 2121 delete pkt->req; 2122 delete pkt; 2123 } 2124 } 2125 } 2126 2127 return NULL; 2128} 2129 2130bool 2131Cache::isCachedAbove(const PacketPtr pkt) const 2132{ 2133 if (!forwardSnoops) 2134 return false; 2135 // Mirroring the flow of HardPFReqs, the cache sends CleanEvict and 2136 // Writeback snoops into upper level caches to check for copies of the 2137 // same block. Using the BLOCK_CACHED flag with the Writeback/CleanEvict 2138 // packet, the cache can inform the crossbar below of presence or absence 2139 // of the block. 2140 2141 Packet snoop_pkt(pkt, true, false); 2142 snoop_pkt.setExpressSnoop(); 2143 // Assert that packet is either Writeback or CleanEvict and not a prefetch 2144 // request because prefetch requests need an MSHR and may generate a snoop 2145 // response. 2146 assert(pkt->evictingBlock()); 2147 snoop_pkt.senderState = NULL; 2148 cpuSidePort->sendTimingSnoopReq(&snoop_pkt); 2149 // Writeback/CleanEvict snoops do not generate a separate snoop response. 2150 assert(!(snoop_pkt.memInhibitAsserted())); 2151 return snoop_pkt.isBlockCached(); 2152} 2153 2154PacketPtr 2155Cache::getTimingPacket() 2156{ 2157 MSHR *mshr = getNextMSHR(); 2158 2159 if (mshr == NULL) { 2160 return NULL; 2161 } 2162 2163 // use request from 1st target 2164 PacketPtr tgt_pkt = mshr->getTarget()->pkt; 2165 PacketPtr pkt = NULL; 2166 2167 DPRINTF(CachePort, "%s %s for addr %#llx size %d\n", __func__, 2168 tgt_pkt->cmdString(), tgt_pkt->getAddr(), tgt_pkt->getSize()); 2169 2170 CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure); 2171 2172 if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) { 2173 // We need to check the caches above us to verify that 2174 // they don't have a copy of this block in the dirty state 2175 // at the moment. Without this check we could get a stale 2176 // copy from memory that might get used in place of the 2177 // dirty one. 2178 Packet snoop_pkt(tgt_pkt, true, false); 2179 snoop_pkt.setExpressSnoop(); 2180 snoop_pkt.senderState = mshr; 2181 cpuSidePort->sendTimingSnoopReq(&snoop_pkt); 2182 2183 // Check to see if the prefetch was squashed by an upper cache (to 2184 // prevent us from grabbing the line) or if a Check to see if a 2185 // writeback arrived between the time the prefetch was placed in 2186 // the MSHRs and when it was selected to be sent or if the 2187 // prefetch was squashed by an upper cache. 2188 2189 // It is important to check memInhibitAsserted before 2190 // prefetchSquashed. If another cache has asserted MEM_INGIBIT, it 2191 // will be sending a response which will arrive at the MSHR 2192 // allocated ofr this request. Checking the prefetchSquash first 2193 // may result in the MSHR being prematurely deallocated. 2194 2195 if (snoop_pkt.memInhibitAsserted()) { 2196 // If we are getting a non-shared response it is dirty 2197 bool pending_dirty_resp = !snoop_pkt.sharedAsserted(); 2198 markInService(mshr, pending_dirty_resp); 2199 DPRINTF(Cache, "Upward snoop of prefetch for addr" 2200 " %#x (%s) hit\n", 2201 tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns"); 2202 return NULL; 2203 } 2204 2205 if (snoop_pkt.isBlockCached() || blk != NULL) { 2206 DPRINTF(Cache, "Block present, prefetch squashed by cache. " 2207 "Deallocating mshr target %#x.\n", 2208 mshr->blkAddr); 2209 2210 // Deallocate the mshr target 2211 if (tgt_pkt->cmd != MemCmd::Writeback) { 2212 if (mshr->queue->forceDeallocateTarget(mshr)) { 2213 // Clear block if this deallocation resulted freed an 2214 // mshr when all had previously been utilized 2215 clearBlocked((BlockedCause)(mshr->queue->index)); 2216 } 2217 return NULL; 2218 } else { 2219 // If this is a Writeback, and the snoops indicate that the blk 2220 // is cached above, set the BLOCK_CACHED flag in the Writeback 2221 // packet, so that it does not reset the bits corresponding to 2222 // this block in the snoop filter below. 2223 tgt_pkt->setBlockCached(); 2224 } 2225 } 2226 } 2227 2228 if (mshr->isForwardNoResponse()) { 2229 // no response expected, just forward packet as it is 2230 assert(tags->findBlock(mshr->blkAddr, mshr->isSecure) == NULL); 2231 pkt = tgt_pkt; 2232 } else { 2233 pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive()); 2234 2235 mshr->isForward = (pkt == NULL); 2236 2237 if (mshr->isForward) { 2238 // not a cache block request, but a response is expected 2239 // make copy of current packet to forward, keep current 2240 // copy for response handling 2241 pkt = new Packet(tgt_pkt, false, true); 2242 if (pkt->isWrite()) { 2243 pkt->setData(tgt_pkt->getConstPtr<uint8_t>()); 2244 } 2245 } 2246 } 2247 2248 assert(pkt != NULL); 2249 pkt->senderState = mshr; 2250 return pkt; 2251} 2252 2253 2254Tick 2255Cache::nextMSHRReadyTime() const 2256{ 2257 Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(), 2258 writeBuffer.nextMSHRReadyTime()); 2259 2260 // Don't signal prefetch ready time if no MSHRs available 2261 // Will signal once enoguh MSHRs are deallocated 2262 if (prefetcher && mshrQueue.canPrefetch()) { 2263 nextReady = std::min(nextReady, 2264 prefetcher->nextPrefetchReadyTime()); 2265 } 2266 2267 return nextReady; 2268} 2269 2270void 2271Cache::serialize(CheckpointOut &cp) const 2272{ 2273 bool dirty(isDirty()); 2274 2275 if (dirty) { 2276 warn("*** The cache still contains dirty data. ***\n"); 2277 warn(" Make sure to drain the system using the correct flags.\n"); 2278 warn(" This checkpoint will not restore correctly and dirty data in " 2279 "the cache will be lost!\n"); 2280 } 2281 2282 // Since we don't checkpoint the data in the cache, any dirty data 2283 // will be lost when restoring from a checkpoint of a system that 2284 // wasn't drained properly. Flag the checkpoint as invalid if the 2285 // cache contains dirty data. 2286 bool bad_checkpoint(dirty); 2287 SERIALIZE_SCALAR(bad_checkpoint); 2288} 2289 2290void 2291Cache::unserialize(CheckpointIn &cp) 2292{ 2293 bool bad_checkpoint; 2294 UNSERIALIZE_SCALAR(bad_checkpoint); 2295 if (bad_checkpoint) { 2296 fatal("Restoring from checkpoints with dirty caches is not supported " 2297 "in the classic memory system. Please remove any caches or " 2298 " drain them properly before taking checkpoints.\n"); 2299 } 2300} 2301 2302/////////////// 2303// 2304// CpuSidePort 2305// 2306/////////////// 2307 2308AddrRangeList 2309Cache::CpuSidePort::getAddrRanges() const 2310{ 2311 return cache->getAddrRanges(); 2312} 2313 2314bool 2315Cache::CpuSidePort::recvTimingReq(PacketPtr pkt) 2316{ 2317 assert(!cache->system->bypassCaches()); 2318 2319 bool success = false; 2320 2321 // always let inhibited requests through, even if blocked, 2322 // ultimately we should check if this is an express snoop, but at 2323 // the moment that flag is only set in the cache itself 2324 if (pkt->memInhibitAsserted()) { 2325 // do not change the current retry state 2326 bool M5_VAR_USED bypass_success = cache->recvTimingReq(pkt); 2327 assert(bypass_success); 2328 return true; 2329 } else if (blocked || mustSendRetry) { 2330 // either already committed to send a retry, or blocked 2331 success = false; 2332 } else { 2333 // pass it on to the cache, and let the cache decide if we 2334 // have to retry or not 2335 success = cache->recvTimingReq(pkt); 2336 } 2337 2338 // remember if we have to retry 2339 mustSendRetry = !success; 2340 return success; 2341} 2342 2343Tick 2344Cache::CpuSidePort::recvAtomic(PacketPtr pkt) 2345{ 2346 return cache->recvAtomic(pkt); 2347} 2348 2349void 2350Cache::CpuSidePort::recvFunctional(PacketPtr pkt) 2351{ 2352 // functional request 2353 cache->functionalAccess(pkt, true); 2354} 2355 2356Cache:: 2357CpuSidePort::CpuSidePort(const std::string &_name, Cache *_cache, 2358 const std::string &_label) 2359 : BaseCache::CacheSlavePort(_name, _cache, _label), cache(_cache) 2360{ 2361} 2362 2363Cache* 2364CacheParams::create() 2365{ 2366 assert(tags); 2367 2368 return new Cache(this); 2369} 2370/////////////// 2371// 2372// MemSidePort 2373// 2374/////////////// 2375 2376bool 2377Cache::MemSidePort::recvTimingResp(PacketPtr pkt) 2378{ 2379 cache->recvTimingResp(pkt); 2380 return true; 2381} 2382 2383// Express snooping requests to memside port 2384void 2385Cache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt) 2386{ 2387 // handle snooping requests 2388 cache->recvTimingSnoopReq(pkt); 2389} 2390 2391Tick 2392Cache::MemSidePort::recvAtomicSnoop(PacketPtr pkt) 2393{ 2394 return cache->recvAtomicSnoop(pkt); 2395} 2396 2397void 2398Cache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt) 2399{ 2400 // functional snoop (note that in contrast to atomic we don't have 2401 // a specific functionalSnoop method, as they have the same 2402 // behaviour regardless) 2403 cache->functionalAccess(pkt, false); 2404} 2405 2406void 2407Cache::CacheReqPacketQueue::sendDeferredPacket() 2408{ 2409 // sanity check 2410 assert(!waitingOnRetry); 2411 2412 // there should never be any deferred request packets in the 2413 // queue, instead we resly on the cache to provide the packets 2414 // from the MSHR queue or write queue 2415 assert(deferredPacketReadyTime() == MaxTick); 2416 2417 // check for request packets (requests & writebacks) 2418 PacketPtr pkt = cache.getTimingPacket(); 2419 if (pkt == NULL) { 2420 // can happen if e.g. we attempt a writeback and fail, but 2421 // before the retry, the writeback is eliminated because 2422 // we snoop another cache's ReadEx. 2423 } else { 2424 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState); 2425 // in most cases getTimingPacket allocates a new packet, and 2426 // we must delete it unless it is successfully sent 2427 bool delete_pkt = !mshr->isForwardNoResponse(); 2428 2429 // let our snoop responses go first if there are responses to 2430 // the same addresses we are about to writeback, note that 2431 // this creates a dependency between requests and snoop 2432 // responses, but that should not be a problem since there is 2433 // a chain already and the key is that the snoop responses can 2434 // sink unconditionally 2435 if (snoopRespQueue.hasAddr(pkt->getAddr())) { 2436 DPRINTF(CachePort, "Waiting for snoop response to be sent\n"); 2437 Tick when = snoopRespQueue.deferredPacketReadyTime(); 2438 schedSendEvent(when); 2439 2440 if (delete_pkt) 2441 delete pkt; 2442 2443 return; 2444 } 2445 2446 2447 waitingOnRetry = !masterPort.sendTimingReq(pkt); 2448 2449 if (waitingOnRetry) { 2450 DPRINTF(CachePort, "now waiting on a retry\n"); 2451 if (delete_pkt) { 2452 // we are awaiting a retry, but we 2453 // delete the packet and will be creating a new packet 2454 // when we get the opportunity 2455 delete pkt; 2456 } 2457 // note that we have now masked any requestBus and 2458 // schedSendEvent (we will wait for a retry before 2459 // doing anything), and this is so even if we do not 2460 // care about this packet and might override it before 2461 // it gets retried 2462 } else { 2463 // As part of the call to sendTimingReq the packet is 2464 // forwarded to all neighbouring caches (and any 2465 // caches above them) as a snoop. The packet is also 2466 // sent to any potential cache below as the 2467 // interconnect is not allowed to buffer the 2468 // packet. Thus at this point we know if any of the 2469 // neighbouring, or the downstream cache is 2470 // responding, and if so, if it is with a dirty line 2471 // or not. 2472 bool pending_dirty_resp = !pkt->sharedAsserted() && 2473 pkt->memInhibitAsserted(); 2474 2475 cache.markInService(mshr, pending_dirty_resp); 2476 } 2477 } 2478 2479 // if we succeeded and are not waiting for a retry, schedule the 2480 // next send considering when the next MSHR is ready, note that 2481 // snoop responses have their own packet queue and thus schedule 2482 // their own events 2483 if (!waitingOnRetry) { 2484 schedSendEvent(cache.nextMSHRReadyTime()); 2485 } 2486} 2487 2488Cache:: 2489MemSidePort::MemSidePort(const std::string &_name, Cache *_cache, 2490 const std::string &_label) 2491 : BaseCache::CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue), 2492 _reqQueue(*_cache, *this, _snoopRespQueue, _label), 2493 _snoopRespQueue(*_cache, *this, _label), cache(_cache) 2494{ 2495} 2496