cache.cc revision 11190
1/* 2 * Copyright (c) 2010-2015 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2002-2005 The Regents of The University of Michigan 15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc. 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Erik Hallnor 42 * Dave Greene 43 * Nathan Binkert 44 * Steve Reinhardt 45 * Ron Dreslinski 46 * Andreas Sandberg 47 */ 48 49/** 50 * @file 51 * Cache definitions. 52 */ 53 54#include "mem/cache/cache.hh" 55 56#include "base/misc.hh" 57#include "base/types.hh" 58#include "debug/Cache.hh" 59#include "debug/CachePort.hh" 60#include "debug/CacheTags.hh" 61#include "mem/cache/blk.hh" 62#include "mem/cache/mshr.hh" 63#include "mem/cache/prefetch/base.hh" 64#include "sim/sim_exit.hh" 65 66Cache::Cache(const CacheParams *p) 67 : BaseCache(p, p->system->cacheLineSize()), 68 tags(p->tags), 69 prefetcher(p->prefetcher), 70 doFastWrites(true), 71 prefetchOnAccess(p->prefetch_on_access) 72{ 73 tempBlock = new CacheBlk(); 74 tempBlock->data = new uint8_t[blkSize]; 75 76 cpuSidePort = new CpuSidePort(p->name + ".cpu_side", this, 77 "CpuSidePort"); 78 memSidePort = new MemSidePort(p->name + ".mem_side", this, 79 "MemSidePort"); 80 81 tags->setCache(this); 82 if (prefetcher) 83 prefetcher->setCache(this); 84} 85 86Cache::~Cache() 87{ 88 delete [] tempBlock->data; 89 delete tempBlock; 90 91 delete cpuSidePort; 92 delete memSidePort; 93} 94 95void 96Cache::regStats() 97{ 98 BaseCache::regStats(); 99} 100 101void 102Cache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt) 103{ 104 assert(pkt->isRequest()); 105 106 uint64_t overwrite_val; 107 bool overwrite_mem; 108 uint64_t condition_val64; 109 uint32_t condition_val32; 110 111 int offset = tags->extractBlkOffset(pkt->getAddr()); 112 uint8_t *blk_data = blk->data + offset; 113 114 assert(sizeof(uint64_t) >= pkt->getSize()); 115 116 overwrite_mem = true; 117 // keep a copy of our possible write value, and copy what is at the 118 // memory address into the packet 119 pkt->writeData((uint8_t *)&overwrite_val); 120 pkt->setData(blk_data); 121 122 if (pkt->req->isCondSwap()) { 123 if (pkt->getSize() == sizeof(uint64_t)) { 124 condition_val64 = pkt->req->getExtraData(); 125 overwrite_mem = !std::memcmp(&condition_val64, blk_data, 126 sizeof(uint64_t)); 127 } else if (pkt->getSize() == sizeof(uint32_t)) { 128 condition_val32 = (uint32_t)pkt->req->getExtraData(); 129 overwrite_mem = !std::memcmp(&condition_val32, blk_data, 130 sizeof(uint32_t)); 131 } else 132 panic("Invalid size for conditional read/write\n"); 133 } 134 135 if (overwrite_mem) { 136 std::memcpy(blk_data, &overwrite_val, pkt->getSize()); 137 blk->status |= BlkDirty; 138 } 139} 140 141 142void 143Cache::satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk, 144 bool deferred_response, bool pending_downgrade) 145{ 146 assert(pkt->isRequest()); 147 148 assert(blk && blk->isValid()); 149 // Occasionally this is not true... if we are a lower-level cache 150 // satisfying a string of Read and ReadEx requests from 151 // upper-level caches, a Read will mark the block as shared but we 152 // can satisfy a following ReadEx anyway since we can rely on the 153 // Read requester(s) to have buffered the ReadEx snoop and to 154 // invalidate their blocks after receiving them. 155 // assert(!pkt->needsExclusive() || blk->isWritable()); 156 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize); 157 158 // Check RMW operations first since both isRead() and 159 // isWrite() will be true for them 160 if (pkt->cmd == MemCmd::SwapReq) { 161 cmpAndSwap(blk, pkt); 162 } else if (pkt->isWrite()) { 163 assert(blk->isWritable()); 164 // Write or WriteLine at the first cache with block in Exclusive 165 if (blk->checkWrite(pkt)) { 166 pkt->writeDataToBlock(blk->data, blkSize); 167 } 168 // Always mark the line as dirty even if we are a failed 169 // StoreCond so we supply data to any snoops that have 170 // appended themselves to this cache before knowing the store 171 // will fail. 172 blk->status |= BlkDirty; 173 DPRINTF(Cache, "%s for %s addr %#llx size %d (write)\n", __func__, 174 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 175 } else if (pkt->isRead()) { 176 if (pkt->isLLSC()) { 177 blk->trackLoadLocked(pkt); 178 } 179 pkt->setDataFromBlock(blk->data, blkSize); 180 // determine if this read is from a (coherent) cache, or not 181 // by looking at the command type; we could potentially add a 182 // packet attribute such as 'FromCache' to make this check a 183 // bit cleaner 184 if (pkt->cmd == MemCmd::ReadExReq || 185 pkt->cmd == MemCmd::ReadSharedReq || 186 pkt->cmd == MemCmd::ReadCleanReq || 187 pkt->cmd == MemCmd::SCUpgradeFailReq) { 188 assert(pkt->getSize() == blkSize); 189 // special handling for coherent block requests from 190 // upper-level caches 191 if (pkt->needsExclusive()) { 192 // sanity check 193 assert(pkt->cmd == MemCmd::ReadExReq || 194 pkt->cmd == MemCmd::SCUpgradeFailReq); 195 196 // if we have a dirty copy, make sure the recipient 197 // keeps it marked dirty 198 if (blk->isDirty()) { 199 pkt->assertMemInhibit(); 200 } 201 // on ReadExReq we give up our copy unconditionally 202 if (blk != tempBlock) 203 tags->invalidate(blk); 204 blk->invalidate(); 205 } else if (blk->isWritable() && !pending_downgrade && 206 !pkt->sharedAsserted() && 207 pkt->cmd != MemCmd::ReadCleanReq) { 208 // we can give the requester an exclusive copy (by not 209 // asserting shared line) on a read request if: 210 // - we have an exclusive copy at this level (& below) 211 // - we don't have a pending snoop from below 212 // signaling another read request 213 // - no other cache above has a copy (otherwise it 214 // would have asseretd shared line on request) 215 // - we are not satisfying an instruction fetch (this 216 // prevents dirty data in the i-cache) 217 218 if (blk->isDirty()) { 219 // special considerations if we're owner: 220 if (!deferred_response) { 221 // if we are responding immediately and can 222 // signal that we're transferring ownership 223 // along with exclusivity, do so 224 pkt->assertMemInhibit(); 225 blk->status &= ~BlkDirty; 226 } else { 227 // if we're responding after our own miss, 228 // there's a window where the recipient didn't 229 // know it was getting ownership and may not 230 // have responded to snoops correctly, so we 231 // can't pass off ownership *or* exclusivity 232 pkt->assertShared(); 233 } 234 } 235 } else { 236 // otherwise only respond with a shared copy 237 pkt->assertShared(); 238 } 239 } 240 } else { 241 // Upgrade or Invalidate, since we have it Exclusively (E or 242 // M), we ack then invalidate. 243 assert(pkt->isUpgrade() || pkt->isInvalidate()); 244 assert(blk != tempBlock); 245 tags->invalidate(blk); 246 blk->invalidate(); 247 DPRINTF(Cache, "%s for %s addr %#llx size %d (invalidation)\n", 248 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 249 } 250} 251 252 253///////////////////////////////////////////////////// 254// 255// MSHR helper functions 256// 257///////////////////////////////////////////////////// 258 259 260void 261Cache::markInService(MSHR *mshr, bool pending_dirty_resp) 262{ 263 markInServiceInternal(mshr, pending_dirty_resp); 264} 265 266///////////////////////////////////////////////////// 267// 268// Access path: requests coming in from the CPU side 269// 270///////////////////////////////////////////////////// 271 272bool 273Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, 274 PacketList &writebacks) 275{ 276 // sanity check 277 assert(pkt->isRequest()); 278 279 chatty_assert(!(isReadOnly && pkt->isWrite()), 280 "Should never see a write in a read-only cache %s\n", 281 name()); 282 283 DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, 284 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 285 286 if (pkt->req->isUncacheable()) { 287 DPRINTF(Cache, "%s%s addr %#llx uncacheable\n", pkt->cmdString(), 288 pkt->req->isInstFetch() ? " (ifetch)" : "", 289 pkt->getAddr()); 290 291 // flush and invalidate any existing block 292 CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure())); 293 if (old_blk && old_blk->isValid()) { 294 if (old_blk->isDirty()) 295 writebacks.push_back(writebackBlk(old_blk)); 296 else 297 writebacks.push_back(cleanEvictBlk(old_blk)); 298 tags->invalidate(old_blk); 299 old_blk->invalidate(); 300 } 301 302 blk = NULL; 303 // lookupLatency is the latency in case the request is uncacheable. 304 lat = lookupLatency; 305 return false; 306 } 307 308 ContextID id = pkt->req->hasContextId() ? 309 pkt->req->contextId() : InvalidContextID; 310 // Here lat is the value passed as parameter to accessBlock() function 311 // that can modify its value. 312 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), lat, id); 313 314 DPRINTF(Cache, "%s%s addr %#llx size %d (%s) %s\n", pkt->cmdString(), 315 pkt->req->isInstFetch() ? " (ifetch)" : "", 316 pkt->getAddr(), pkt->getSize(), pkt->isSecure() ? "s" : "ns", 317 blk ? "hit " + blk->print() : "miss"); 318 319 320 if (pkt->evictingBlock()) { 321 // We check for presence of block in above caches before issuing 322 // Writeback or CleanEvict to write buffer. Therefore the only 323 // possible cases can be of a CleanEvict packet coming from above 324 // encountering a Writeback generated in this cache peer cache and 325 // waiting in the write buffer. Cases of upper level peer caches 326 // generating CleanEvict and Writeback or simply CleanEvict and 327 // CleanEvict almost simultaneously will be caught by snoops sent out 328 // by crossbar. 329 std::vector<MSHR *> outgoing; 330 if (writeBuffer.findMatches(pkt->getAddr(), pkt->isSecure(), 331 outgoing)) { 332 assert(outgoing.size() == 1); 333 PacketPtr wbPkt = outgoing[0]->getTarget()->pkt; 334 assert(pkt->cmd == MemCmd::CleanEvict && 335 wbPkt->cmd == MemCmd::Writeback); 336 // As the CleanEvict is coming from above, it would have snooped 337 // into other peer caches of the same level while traversing the 338 // crossbar. If a copy of the block had been found, the CleanEvict 339 // would have been deleted in the crossbar. Now that the 340 // CleanEvict is here we can be sure none of the other upper level 341 // caches connected to this cache have the block, so we can clear 342 // the BLOCK_CACHED flag in the Writeback if set and discard the 343 // CleanEvict by returning true. 344 wbPkt->clearBlockCached(); 345 return true; 346 } 347 } 348 349 // Writeback handling is special case. We can write the block into 350 // the cache without having a writeable copy (or any copy at all). 351 if (pkt->cmd == MemCmd::Writeback) { 352 assert(blkSize == pkt->getSize()); 353 if (blk == NULL) { 354 // need to do a replacement 355 blk = allocateBlock(pkt->getAddr(), pkt->isSecure(), writebacks); 356 if (blk == NULL) { 357 // no replaceable block available: give up, fwd to next level. 358 incMissCount(pkt); 359 return false; 360 } 361 tags->insertBlock(pkt, blk); 362 363 blk->status = (BlkValid | BlkReadable); 364 if (pkt->isSecure()) { 365 blk->status |= BlkSecure; 366 } 367 } 368 blk->status |= BlkDirty; 369 // if shared is not asserted we got the writeback in modified 370 // state, if it is asserted we are in the owned state 371 if (!pkt->sharedAsserted()) { 372 blk->status |= BlkWritable; 373 } 374 // nothing else to do; writeback doesn't expect response 375 assert(!pkt->needsResponse()); 376 std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize); 377 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 378 incHitCount(pkt); 379 return true; 380 } else if (pkt->cmd == MemCmd::CleanEvict) { 381 if (blk != NULL) { 382 // Found the block in the tags, need to stop CleanEvict from 383 // propagating further down the hierarchy. Returning true will 384 // treat the CleanEvict like a satisfied write request and delete 385 // it. 386 return true; 387 } 388 // We didn't find the block here, propagate the CleanEvict further 389 // down the memory hierarchy. Returning false will treat the CleanEvict 390 // like a Writeback which could not find a replaceable block so has to 391 // go to next level. 392 return false; 393 } else if ((blk != NULL) && 394 (pkt->needsExclusive() ? blk->isWritable() 395 : blk->isReadable())) { 396 // OK to satisfy access 397 incHitCount(pkt); 398 satisfyCpuSideRequest(pkt, blk); 399 return true; 400 } 401 402 // Can't satisfy access normally... either no block (blk == NULL) 403 // or have block but need exclusive & only have shared. 404 405 incMissCount(pkt); 406 407 if (blk == NULL && pkt->isLLSC() && pkt->isWrite()) { 408 // complete miss on store conditional... just give up now 409 pkt->req->setExtraData(0); 410 return true; 411 } 412 413 return false; 414} 415 416 417class ForwardResponseRecord : public Packet::SenderState 418{ 419 public: 420 421 ForwardResponseRecord() {} 422}; 423 424void 425Cache::doWritebacks(PacketList& writebacks, Tick forward_time) 426{ 427 while (!writebacks.empty()) { 428 PacketPtr wbPkt = writebacks.front(); 429 // We use forwardLatency here because we are copying writebacks to 430 // write buffer. Call isCachedAbove for both Writebacks and 431 // CleanEvicts. If isCachedAbove returns true we set BLOCK_CACHED flag 432 // in Writebacks and discard CleanEvicts. 433 if (isCachedAbove(wbPkt)) { 434 if (wbPkt->cmd == MemCmd::CleanEvict) { 435 // Delete CleanEvict because cached copies exist above. The 436 // packet destructor will delete the request object because 437 // this is a non-snoop request packet which does not require a 438 // response. 439 delete wbPkt; 440 } else { 441 // Set BLOCK_CACHED flag in Writeback and send below, so that 442 // the Writeback does not reset the bit corresponding to this 443 // address in the snoop filter below. 444 wbPkt->setBlockCached(); 445 allocateWriteBuffer(wbPkt, forward_time); 446 } 447 } else { 448 // If the block is not cached above, send packet below. Both 449 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will 450 // reset the bit corresponding to this address in the snoop filter 451 // below. 452 allocateWriteBuffer(wbPkt, forward_time); 453 } 454 writebacks.pop_front(); 455 } 456} 457 458void 459Cache::doWritebacksAtomic(PacketList& writebacks) 460{ 461 while (!writebacks.empty()) { 462 PacketPtr wbPkt = writebacks.front(); 463 // Call isCachedAbove for both Writebacks and CleanEvicts. If 464 // isCachedAbove returns true we set BLOCK_CACHED flag in Writebacks 465 // and discard CleanEvicts. 466 if (isCachedAbove(wbPkt, false)) { 467 if (wbPkt->cmd == MemCmd::Writeback) { 468 // Set BLOCK_CACHED flag in Writeback and send below, 469 // so that the Writeback does not reset the bit 470 // corresponding to this address in the snoop filter 471 // below. We can discard CleanEvicts because cached 472 // copies exist above. Atomic mode isCachedAbove 473 // modifies packet to set BLOCK_CACHED flag 474 memSidePort->sendAtomic(wbPkt); 475 } 476 } else { 477 // If the block is not cached above, send packet below. Both 478 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will 479 // reset the bit corresponding to this address in the snoop filter 480 // below. 481 memSidePort->sendAtomic(wbPkt); 482 } 483 writebacks.pop_front(); 484 // In case of CleanEvicts, the packet destructor will delete the 485 // request object because this is a non-snoop request packet which 486 // does not require a response. 487 delete wbPkt; 488 } 489} 490 491 492void 493Cache::recvTimingSnoopResp(PacketPtr pkt) 494{ 495 DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, 496 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 497 498 assert(pkt->isResponse()); 499 500 // must be cache-to-cache response from upper to lower level 501 ForwardResponseRecord *rec = 502 dynamic_cast<ForwardResponseRecord *>(pkt->senderState); 503 assert(!system->bypassCaches()); 504 505 if (rec == NULL) { 506 // @todo What guarantee do we have that this HardPFResp is 507 // actually for this cache, and not a cache closer to the 508 // memory? 509 assert(pkt->cmd == MemCmd::HardPFResp); 510 // Check if it's a prefetch response and handle it. We shouldn't 511 // get any other kinds of responses without FRRs. 512 DPRINTF(Cache, "Got prefetch response from above for addr %#llx (%s)\n", 513 pkt->getAddr(), pkt->isSecure() ? "s" : "ns"); 514 recvTimingResp(pkt); 515 return; 516 } 517 518 pkt->popSenderState(); 519 delete rec; 520 // forwardLatency is set here because there is a response from an 521 // upper level cache. 522 // To pay the delay that occurs if the packet comes from the bus, 523 // we charge also headerDelay. 524 Tick snoop_resp_time = clockEdge(forwardLatency) + pkt->headerDelay; 525 // Reset the timing of the packet. 526 pkt->headerDelay = pkt->payloadDelay = 0; 527 memSidePort->schedTimingSnoopResp(pkt, snoop_resp_time); 528} 529 530void 531Cache::promoteWholeLineWrites(PacketPtr pkt) 532{ 533 // Cache line clearing instructions 534 if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) && 535 (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0)) { 536 pkt->cmd = MemCmd::WriteLineReq; 537 DPRINTF(Cache, "packet promoted from Write to WriteLineReq\n"); 538 } 539} 540 541bool 542Cache::recvTimingReq(PacketPtr pkt) 543{ 544 DPRINTF(CacheTags, "%s tags: %s\n", __func__, tags->print()); 545 546 assert(pkt->isRequest()); 547 548 // Just forward the packet if caches are disabled. 549 if (system->bypassCaches()) { 550 // @todo This should really enqueue the packet rather 551 bool M5_VAR_USED success = memSidePort->sendTimingReq(pkt); 552 assert(success); 553 return true; 554 } 555 556 promoteWholeLineWrites(pkt); 557 558 if (pkt->memInhibitAsserted()) { 559 // a cache above us (but not where the packet came from) is 560 // responding to the request 561 DPRINTF(Cache, "mem inhibited on addr %#llx (%s): not responding\n", 562 pkt->getAddr(), pkt->isSecure() ? "s" : "ns"); 563 564 // if the packet needs exclusive, and the cache that has 565 // promised to respond (setting the inhibit flag) is not 566 // providing exclusive (it is in O vs M state), we know that 567 // there may be other shared copies in the system; go out and 568 // invalidate them all 569 if (pkt->needsExclusive() && !pkt->isSupplyExclusive()) { 570 // create a downstream express snoop with cleared packet 571 // flags, there is no need to allocate any data as the 572 // packet is merely used to co-ordinate state transitions 573 Packet *snoop_pkt = new Packet(pkt, true, false); 574 575 // also reset the bus time that the original packet has 576 // not yet paid for 577 snoop_pkt->headerDelay = snoop_pkt->payloadDelay = 0; 578 579 // make this an instantaneous express snoop, and let the 580 // other caches in the system know that the packet is 581 // inhibited, because we have found the authorative copy 582 // (O) that will supply the right data 583 snoop_pkt->setExpressSnoop(); 584 snoop_pkt->assertMemInhibit(); 585 586 // this express snoop travels towards the memory, and at 587 // every crossbar it is snooped upwards thus reaching 588 // every cache in the system 589 bool M5_VAR_USED success = memSidePort->sendTimingReq(snoop_pkt); 590 // express snoops always succeed 591 assert(success); 592 593 // main memory will delete the packet 594 } 595 596 // queue for deletion, as the sending cache is still relying 597 // on the packet 598 pendingDelete.reset(pkt); 599 600 // no need to take any action in this particular cache as the 601 // caches along the path to memory are allowed to keep lines 602 // in a shared state, and a cache above us already committed 603 // to responding 604 return true; 605 } 606 607 // anything that is merely forwarded pays for the forward latency and 608 // the delay provided by the crossbar 609 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 610 611 // We use lookupLatency here because it is used to specify the latency 612 // to access. 613 Cycles lat = lookupLatency; 614 CacheBlk *blk = NULL; 615 bool satisfied = false; 616 { 617 PacketList writebacks; 618 // Note that lat is passed by reference here. The function 619 // access() calls accessBlock() which can modify lat value. 620 satisfied = access(pkt, blk, lat, writebacks); 621 622 // copy writebacks to write buffer here to ensure they logically 623 // proceed anything happening below 624 doWritebacks(writebacks, forward_time); 625 } 626 627 // Here we charge the headerDelay that takes into account the latencies 628 // of the bus, if the packet comes from it. 629 // The latency charged it is just lat that is the value of lookupLatency 630 // modified by access() function, or if not just lookupLatency. 631 // In case of a hit we are neglecting response latency. 632 // In case of a miss we are neglecting forward latency. 633 Tick request_time = clockEdge(lat) + pkt->headerDelay; 634 // Here we reset the timing of the packet. 635 pkt->headerDelay = pkt->payloadDelay = 0; 636 637 // track time of availability of next prefetch, if any 638 Tick next_pf_time = MaxTick; 639 640 bool needsResponse = pkt->needsResponse(); 641 642 if (satisfied) { 643 // should never be satisfying an uncacheable access as we 644 // flush and invalidate any existing block as part of the 645 // lookup 646 assert(!pkt->req->isUncacheable()); 647 648 // hit (for all other request types) 649 650 if (prefetcher && (prefetchOnAccess || (blk && blk->wasPrefetched()))) { 651 if (blk) 652 blk->status &= ~BlkHWPrefetched; 653 654 // Don't notify on SWPrefetch 655 if (!pkt->cmd.isSWPrefetch()) 656 next_pf_time = prefetcher->notify(pkt); 657 } 658 659 if (needsResponse) { 660 pkt->makeTimingResponse(); 661 // @todo: Make someone pay for this 662 pkt->headerDelay = pkt->payloadDelay = 0; 663 664 // In this case we are considering request_time that takes 665 // into account the delay of the xbar, if any, and just 666 // lat, neglecting responseLatency, modelling hit latency 667 // just as lookupLatency or or the value of lat overriden 668 // by access(), that calls accessBlock() function. 669 cpuSidePort->schedTimingResp(pkt, request_time); 670 } else { 671 // queue the packet for deletion, as the sending cache is 672 // still relying on it; if the block is found in access(), 673 // CleanEvict and Writeback messages will be deleted 674 // here as well 675 pendingDelete.reset(pkt); 676 } 677 } else { 678 // miss 679 680 Addr blk_addr = blockAlign(pkt->getAddr()); 681 682 // ignore any existing MSHR if we are dealing with an 683 // uncacheable request 684 MSHR *mshr = pkt->req->isUncacheable() ? nullptr : 685 mshrQueue.findMatch(blk_addr, pkt->isSecure()); 686 687 // Software prefetch handling: 688 // To keep the core from waiting on data it won't look at 689 // anyway, send back a response with dummy data. Miss handling 690 // will continue asynchronously. Unfortunately, the core will 691 // insist upon freeing original Packet/Request, so we have to 692 // create a new pair with a different lifecycle. Note that this 693 // processing happens before any MSHR munging on the behalf of 694 // this request because this new Request will be the one stored 695 // into the MSHRs, not the original. 696 if (pkt->cmd.isSWPrefetch()) { 697 assert(needsResponse); 698 assert(pkt->req->hasPaddr()); 699 assert(!pkt->req->isUncacheable()); 700 701 // There's no reason to add a prefetch as an additional target 702 // to an existing MSHR. If an outstanding request is already 703 // in progress, there is nothing for the prefetch to do. 704 // If this is the case, we don't even create a request at all. 705 PacketPtr pf = nullptr; 706 707 if (!mshr) { 708 // copy the request and create a new SoftPFReq packet 709 RequestPtr req = new Request(pkt->req->getPaddr(), 710 pkt->req->getSize(), 711 pkt->req->getFlags(), 712 pkt->req->masterId()); 713 pf = new Packet(req, pkt->cmd); 714 pf->allocate(); 715 assert(pf->getAddr() == pkt->getAddr()); 716 assert(pf->getSize() == pkt->getSize()); 717 } 718 719 pkt->makeTimingResponse(); 720 // for debugging, set all the bits in the response data 721 // (also keeps valgrind from complaining when debugging settings 722 // print out instruction results) 723 std::memset(pkt->getPtr<uint8_t>(), 0xFF, pkt->getSize()); 724 // request_time is used here, taking into account lat and the delay 725 // charged if the packet comes from the xbar. 726 cpuSidePort->schedTimingResp(pkt, request_time); 727 728 // If an outstanding request is in progress (we found an 729 // MSHR) this is set to null 730 pkt = pf; 731 } 732 733 if (mshr) { 734 /// MSHR hit 735 /// @note writebacks will be checked in getNextMSHR() 736 /// for any conflicting requests to the same block 737 738 //@todo remove hw_pf here 739 740 // Coalesce unless it was a software prefetch (see above). 741 if (pkt) { 742 assert(pkt->cmd != MemCmd::Writeback); 743 // CleanEvicts corresponding to blocks which have outstanding 744 // requests in MSHRs can be deleted here. 745 if (pkt->cmd == MemCmd::CleanEvict) { 746 pendingDelete.reset(pkt); 747 } else { 748 DPRINTF(Cache, "%s coalescing MSHR for %s addr %#llx size %d\n", 749 __func__, pkt->cmdString(), pkt->getAddr(), 750 pkt->getSize()); 751 752 assert(pkt->req->masterId() < system->maxMasters()); 753 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++; 754 if (mshr->threadNum != 0/*pkt->req->threadId()*/) { 755 mshr->threadNum = -1; 756 } 757 // We use forward_time here because it is the same 758 // considering new targets. We have multiple 759 // requests for the same address here. It 760 // specifies the latency to allocate an internal 761 // buffer and to schedule an event to the queued 762 // port and also takes into account the additional 763 // delay of the xbar. 764 mshr->allocateTarget(pkt, forward_time, order++); 765 if (mshr->getNumTargets() == numTarget) { 766 noTargetMSHR = mshr; 767 setBlocked(Blocked_NoTargets); 768 // need to be careful with this... if this mshr isn't 769 // ready yet (i.e. time > curTick()), we don't want to 770 // move it ahead of mshrs that are ready 771 // mshrQueue.moveToFront(mshr); 772 } 773 } 774 // We should call the prefetcher reguardless if the request is 775 // satisfied or not, reguardless if the request is in the MSHR or 776 // not. The request could be a ReadReq hit, but still not 777 // satisfied (potentially because of a prior write to the same 778 // cache line. So, even when not satisfied, tehre is an MSHR 779 // already allocated for this, we need to let the prefetcher know 780 // about the request 781 if (prefetcher) { 782 // Don't notify on SWPrefetch 783 if (!pkt->cmd.isSWPrefetch()) 784 next_pf_time = prefetcher->notify(pkt); 785 } 786 } 787 } else { 788 // no MSHR 789 assert(pkt->req->masterId() < system->maxMasters()); 790 if (pkt->req->isUncacheable()) { 791 mshr_uncacheable[pkt->cmdToIndex()][pkt->req->masterId()]++; 792 } else { 793 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 794 } 795 796 if (pkt->evictingBlock() || 797 (pkt->req->isUncacheable() && pkt->isWrite())) { 798 // We use forward_time here because there is an 799 // uncached memory write, forwarded to WriteBuffer. 800 allocateWriteBuffer(pkt, forward_time); 801 } else { 802 if (blk && blk->isValid()) { 803 // should have flushed and have no valid block 804 assert(!pkt->req->isUncacheable()); 805 806 // If we have a write miss to a valid block, we 807 // need to mark the block non-readable. Otherwise 808 // if we allow reads while there's an outstanding 809 // write miss, the read could return stale data 810 // out of the cache block... a more aggressive 811 // system could detect the overlap (if any) and 812 // forward data out of the MSHRs, but we don't do 813 // that yet. Note that we do need to leave the 814 // block valid so that it stays in the cache, in 815 // case we get an upgrade response (and hence no 816 // new data) when the write miss completes. 817 // As long as CPUs do proper store/load forwarding 818 // internally, and have a sufficiently weak memory 819 // model, this is probably unnecessary, but at some 820 // point it must have seemed like we needed it... 821 assert(pkt->needsExclusive()); 822 assert(!blk->isWritable()); 823 blk->status &= ~BlkReadable; 824 } 825 // Here we are using forward_time, modelling the latency of 826 // a miss (outbound) just as forwardLatency, neglecting the 827 // lookupLatency component. 828 allocateMissBuffer(pkt, forward_time); 829 } 830 831 if (prefetcher) { 832 // Don't notify on SWPrefetch 833 if (!pkt->cmd.isSWPrefetch()) 834 next_pf_time = prefetcher->notify(pkt); 835 } 836 } 837 } 838 839 if (next_pf_time != MaxTick) 840 schedMemSideSendEvent(next_pf_time); 841 842 return true; 843} 844 845 846// See comment in cache.hh. 847PacketPtr 848Cache::getBusPacket(PacketPtr cpu_pkt, CacheBlk *blk, 849 bool needsExclusive) const 850{ 851 bool blkValid = blk && blk->isValid(); 852 853 if (cpu_pkt->req->isUncacheable()) { 854 // note that at the point we see the uncacheable request we 855 // flush any block, but there could be an outstanding MSHR, 856 // and the cache could have filled again before we actually 857 // send out the forwarded uncacheable request (blk could thus 858 // be non-null) 859 return NULL; 860 } 861 862 if (!blkValid && 863 (cpu_pkt->isUpgrade() || 864 cpu_pkt->evictingBlock())) { 865 // Writebacks that weren't allocated in access() and upgrades 866 // from upper-level caches that missed completely just go 867 // through. 868 return NULL; 869 } 870 871 assert(cpu_pkt->needsResponse()); 872 873 MemCmd cmd; 874 // @TODO make useUpgrades a parameter. 875 // Note that ownership protocols require upgrade, otherwise a 876 // write miss on a shared owned block will generate a ReadExcl, 877 // which will clobber the owned copy. 878 const bool useUpgrades = true; 879 if (blkValid && useUpgrades) { 880 // only reason to be here is that blk is shared 881 // (read-only) and we need exclusive 882 assert(needsExclusive); 883 assert(!blk->isWritable()); 884 cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq; 885 } else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq || 886 cpu_pkt->cmd == MemCmd::StoreCondFailReq) { 887 // Even though this SC will fail, we still need to send out the 888 // request and get the data to supply it to other snoopers in the case 889 // where the determination the StoreCond fails is delayed due to 890 // all caches not being on the same local bus. 891 cmd = MemCmd::SCUpgradeFailReq; 892 } else if (cpu_pkt->cmd == MemCmd::WriteLineReq) { 893 // forward as invalidate to all other caches, this gives us 894 // the line in exclusive state, and invalidates all other 895 // copies 896 cmd = MemCmd::InvalidateReq; 897 } else { 898 // block is invalid 899 cmd = needsExclusive ? MemCmd::ReadExReq : 900 (isReadOnly ? MemCmd::ReadCleanReq : MemCmd::ReadSharedReq); 901 } 902 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize); 903 904 // if there are sharers in the upper levels, pass that info downstream 905 if (cpu_pkt->sharedAsserted()) { 906 // note that cpu_pkt may have spent a considerable time in the 907 // MSHR queue and that the information could possibly be out 908 // of date, however, there is no harm in conservatively 909 // assuming the block is shared 910 pkt->assertShared(); 911 DPRINTF(Cache, "%s passing shared from %s to %s addr %#llx size %d\n", 912 __func__, cpu_pkt->cmdString(), pkt->cmdString(), 913 pkt->getAddr(), pkt->getSize()); 914 } 915 916 // the packet should be block aligned 917 assert(pkt->getAddr() == blockAlign(pkt->getAddr())); 918 919 pkt->allocate(); 920 DPRINTF(Cache, "%s created %s from %s for addr %#llx size %d\n", 921 __func__, pkt->cmdString(), cpu_pkt->cmdString(), pkt->getAddr(), 922 pkt->getSize()); 923 return pkt; 924} 925 926 927Tick 928Cache::recvAtomic(PacketPtr pkt) 929{ 930 // We are in atomic mode so we pay just for lookupLatency here. 931 Cycles lat = lookupLatency; 932 // @TODO: make this a parameter 933 bool last_level_cache = false; 934 935 // Forward the request if the system is in cache bypass mode. 936 if (system->bypassCaches()) 937 return ticksToCycles(memSidePort->sendAtomic(pkt)); 938 939 promoteWholeLineWrites(pkt); 940 941 if (pkt->memInhibitAsserted()) { 942 // have to invalidate ourselves and any lower caches even if 943 // upper cache will be responding 944 if (pkt->isInvalidate()) { 945 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 946 if (blk && blk->isValid()) { 947 tags->invalidate(blk); 948 blk->invalidate(); 949 DPRINTF(Cache, "rcvd mem-inhibited %s on %#llx (%s):" 950 " invalidating\n", 951 pkt->cmdString(), pkt->getAddr(), 952 pkt->isSecure() ? "s" : "ns"); 953 } 954 if (!last_level_cache) { 955 DPRINTF(Cache, "forwarding mem-inhibited %s on %#llx (%s)\n", 956 pkt->cmdString(), pkt->getAddr(), 957 pkt->isSecure() ? "s" : "ns"); 958 lat += ticksToCycles(memSidePort->sendAtomic(pkt)); 959 } 960 } else { 961 DPRINTF(Cache, "rcvd mem-inhibited %s on %#llx: not responding\n", 962 pkt->cmdString(), pkt->getAddr()); 963 } 964 965 return lat * clockPeriod(); 966 } 967 968 // should assert here that there are no outstanding MSHRs or 969 // writebacks... that would mean that someone used an atomic 970 // access in timing mode 971 972 CacheBlk *blk = NULL; 973 PacketList writebacks; 974 bool satisfied = access(pkt, blk, lat, writebacks); 975 976 // handle writebacks resulting from the access here to ensure they 977 // logically proceed anything happening below 978 doWritebacksAtomic(writebacks); 979 980 if (!satisfied) { 981 // MISS 982 983 PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsExclusive()); 984 985 bool is_forward = (bus_pkt == NULL); 986 987 if (is_forward) { 988 // just forwarding the same request to the next level 989 // no local cache operation involved 990 bus_pkt = pkt; 991 } 992 993 DPRINTF(Cache, "Sending an atomic %s for %#llx (%s)\n", 994 bus_pkt->cmdString(), bus_pkt->getAddr(), 995 bus_pkt->isSecure() ? "s" : "ns"); 996 997#if TRACING_ON 998 CacheBlk::State old_state = blk ? blk->status : 0; 999#endif 1000 1001 lat += ticksToCycles(memSidePort->sendAtomic(bus_pkt)); 1002 1003 // We are now dealing with the response handling 1004 DPRINTF(Cache, "Receive response: %s for addr %#llx (%s) in state %i\n", 1005 bus_pkt->cmdString(), bus_pkt->getAddr(), 1006 bus_pkt->isSecure() ? "s" : "ns", 1007 old_state); 1008 1009 // If packet was a forward, the response (if any) is already 1010 // in place in the bus_pkt == pkt structure, so we don't need 1011 // to do anything. Otherwise, use the separate bus_pkt to 1012 // generate response to pkt and then delete it. 1013 if (!is_forward) { 1014 if (pkt->needsResponse()) { 1015 assert(bus_pkt->isResponse()); 1016 if (bus_pkt->isError()) { 1017 pkt->makeAtomicResponse(); 1018 pkt->copyError(bus_pkt); 1019 } else if (pkt->cmd == MemCmd::InvalidateReq) { 1020 if (blk) { 1021 // invalidate response to a cache that received 1022 // an invalidate request 1023 satisfyCpuSideRequest(pkt, blk); 1024 } 1025 } else if (pkt->cmd == MemCmd::WriteLineReq) { 1026 // note the use of pkt, not bus_pkt here. 1027 1028 // write-line request to the cache that promoted 1029 // the write to a whole line 1030 blk = handleFill(pkt, blk, writebacks); 1031 satisfyCpuSideRequest(pkt, blk); 1032 } else if (bus_pkt->isRead() || 1033 bus_pkt->cmd == MemCmd::UpgradeResp) { 1034 // we're updating cache state to allow us to 1035 // satisfy the upstream request from the cache 1036 blk = handleFill(bus_pkt, blk, writebacks); 1037 satisfyCpuSideRequest(pkt, blk); 1038 } else { 1039 // we're satisfying the upstream request without 1040 // modifying cache state, e.g., a write-through 1041 pkt->makeAtomicResponse(); 1042 } 1043 } 1044 delete bus_pkt; 1045 } 1046 } 1047 1048 // Note that we don't invoke the prefetcher at all in atomic mode. 1049 // It's not clear how to do it properly, particularly for 1050 // prefetchers that aggressively generate prefetch candidates and 1051 // rely on bandwidth contention to throttle them; these will tend 1052 // to pollute the cache in atomic mode since there is no bandwidth 1053 // contention. If we ever do want to enable prefetching in atomic 1054 // mode, though, this is the place to do it... see timingAccess() 1055 // for an example (though we'd want to issue the prefetch(es) 1056 // immediately rather than calling requestMemSideBus() as we do 1057 // there). 1058 1059 // Handle writebacks (from the response handling) if needed 1060 doWritebacksAtomic(writebacks); 1061 1062 if (pkt->needsResponse()) { 1063 pkt->makeAtomicResponse(); 1064 } 1065 1066 return lat * clockPeriod(); 1067} 1068 1069 1070void 1071Cache::functionalAccess(PacketPtr pkt, bool fromCpuSide) 1072{ 1073 if (system->bypassCaches()) { 1074 // Packets from the memory side are snoop request and 1075 // shouldn't happen in bypass mode. 1076 assert(fromCpuSide); 1077 1078 // The cache should be flushed if we are in cache bypass mode, 1079 // so we don't need to check if we need to update anything. 1080 memSidePort->sendFunctional(pkt); 1081 return; 1082 } 1083 1084 Addr blk_addr = blockAlign(pkt->getAddr()); 1085 bool is_secure = pkt->isSecure(); 1086 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 1087 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 1088 1089 pkt->pushLabel(name()); 1090 1091 CacheBlkPrintWrapper cbpw(blk); 1092 1093 // Note that just because an L2/L3 has valid data doesn't mean an 1094 // L1 doesn't have a more up-to-date modified copy that still 1095 // needs to be found. As a result we always update the request if 1096 // we have it, but only declare it satisfied if we are the owner. 1097 1098 // see if we have data at all (owned or otherwise) 1099 bool have_data = blk && blk->isValid() 1100 && pkt->checkFunctional(&cbpw, blk_addr, is_secure, blkSize, 1101 blk->data); 1102 1103 // data we have is dirty if marked as such or if valid & ownership 1104 // pending due to outstanding UpgradeReq 1105 bool have_dirty = 1106 have_data && (blk->isDirty() || 1107 (mshr && mshr->inService && mshr->isPendingDirty())); 1108 1109 bool done = have_dirty 1110 || cpuSidePort->checkFunctional(pkt) 1111 || mshrQueue.checkFunctional(pkt, blk_addr) 1112 || writeBuffer.checkFunctional(pkt, blk_addr) 1113 || memSidePort->checkFunctional(pkt); 1114 1115 DPRINTF(Cache, "functional %s %#llx (%s) %s%s%s\n", 1116 pkt->cmdString(), pkt->getAddr(), is_secure ? "s" : "ns", 1117 (blk && blk->isValid()) ? "valid " : "", 1118 have_data ? "data " : "", done ? "done " : ""); 1119 1120 // We're leaving the cache, so pop cache->name() label 1121 pkt->popLabel(); 1122 1123 if (done) { 1124 pkt->makeResponse(); 1125 } else { 1126 // if it came as a request from the CPU side then make sure it 1127 // continues towards the memory side 1128 if (fromCpuSide) { 1129 memSidePort->sendFunctional(pkt); 1130 } else if (forwardSnoops && cpuSidePort->isSnooping()) { 1131 // if it came from the memory side, it must be a snoop request 1132 // and we should only forward it if we are forwarding snoops 1133 cpuSidePort->sendFunctionalSnoop(pkt); 1134 } 1135 } 1136} 1137 1138 1139///////////////////////////////////////////////////// 1140// 1141// Response handling: responses from the memory side 1142// 1143///////////////////////////////////////////////////// 1144 1145 1146void 1147Cache::recvTimingResp(PacketPtr pkt) 1148{ 1149 assert(pkt->isResponse()); 1150 1151 // all header delay should be paid for by the crossbar, unless 1152 // this is a prefetch response from above 1153 panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp, 1154 "%s saw a non-zero packet delay\n", name()); 1155 1156 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState); 1157 bool is_error = pkt->isError(); 1158 1159 assert(mshr); 1160 1161 if (is_error) { 1162 DPRINTF(Cache, "Cache received packet with error for addr %#llx (%s), " 1163 "cmd: %s\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns", 1164 pkt->cmdString()); 1165 } 1166 1167 DPRINTF(Cache, "Handling response %s for addr %#llx size %d (%s)\n", 1168 pkt->cmdString(), pkt->getAddr(), pkt->getSize(), 1169 pkt->isSecure() ? "s" : "ns"); 1170 1171 MSHRQueue *mq = mshr->queue; 1172 bool wasFull = mq->isFull(); 1173 1174 if (mshr == noTargetMSHR) { 1175 // we always clear at least one target 1176 clearBlocked(Blocked_NoTargets); 1177 noTargetMSHR = NULL; 1178 } 1179 1180 // Initial target is used just for stats 1181 MSHR::Target *initial_tgt = mshr->getTarget(); 1182 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex(); 1183 Tick miss_latency = curTick() - initial_tgt->recvTime; 1184 PacketList writebacks; 1185 // We need forward_time here because we have a call of 1186 // allocateWriteBuffer() that need this parameter to specify the 1187 // time to request the bus. In this case we use forward latency 1188 // because there is a writeback. We pay also here for headerDelay 1189 // that is charged of bus latencies if the packet comes from the 1190 // bus. 1191 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 1192 1193 if (pkt->req->isUncacheable()) { 1194 assert(pkt->req->masterId() < system->maxMasters()); 1195 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] += 1196 miss_latency; 1197 } else { 1198 assert(pkt->req->masterId() < system->maxMasters()); 1199 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] += 1200 miss_latency; 1201 } 1202 1203 // upgrade deferred targets if we got exclusive 1204 if (!pkt->sharedAsserted()) { 1205 mshr->promoteExclusive(); 1206 } 1207 1208 bool is_fill = !mshr->isForward && 1209 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp); 1210 1211 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 1212 1213 if (is_fill && !is_error) { 1214 DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n", 1215 pkt->getAddr()); 1216 1217 blk = handleFill(pkt, blk, writebacks); 1218 assert(blk != NULL); 1219 } 1220 1221 // allow invalidation responses originating from write-line 1222 // requests to be discarded 1223 bool is_invalidate = pkt->isInvalidate(); 1224 1225 // First offset for critical word first calculations 1226 int initial_offset = initial_tgt->pkt->getOffset(blkSize); 1227 1228 while (mshr->hasTargets()) { 1229 MSHR::Target *target = mshr->getTarget(); 1230 Packet *tgt_pkt = target->pkt; 1231 1232 switch (target->source) { 1233 case MSHR::Target::FromCPU: 1234 Tick completion_time; 1235 // Here we charge on completion_time the delay of the xbar if the 1236 // packet comes from it, charged on headerDelay. 1237 completion_time = pkt->headerDelay; 1238 1239 // Software prefetch handling for cache closest to core 1240 if (tgt_pkt->cmd.isSWPrefetch()) { 1241 // a software prefetch would have already been ack'd immediately 1242 // with dummy data so the core would be able to retire it. 1243 // this request completes right here, so we deallocate it. 1244 delete tgt_pkt->req; 1245 delete tgt_pkt; 1246 break; // skip response 1247 } 1248 1249 // unlike the other packet flows, where data is found in other 1250 // caches or memory and brought back, write-line requests always 1251 // have the data right away, so the above check for "is fill?" 1252 // cannot actually be determined until examining the stored MSHR 1253 // state. We "catch up" with that logic here, which is duplicated 1254 // from above. 1255 if (tgt_pkt->cmd == MemCmd::WriteLineReq) { 1256 assert(!is_error); 1257 // we got the block in exclusive state, so promote any 1258 // deferred targets if possible 1259 mshr->promoteExclusive(); 1260 // NB: we use the original packet here and not the response! 1261 blk = handleFill(tgt_pkt, blk, writebacks); 1262 assert(blk != NULL); 1263 1264 // treat as a fill, and discard the invalidation 1265 // response 1266 is_fill = true; 1267 is_invalidate = false; 1268 } 1269 1270 if (is_fill) { 1271 satisfyCpuSideRequest(tgt_pkt, blk, 1272 true, mshr->hasPostDowngrade()); 1273 1274 // How many bytes past the first request is this one 1275 int transfer_offset = 1276 tgt_pkt->getOffset(blkSize) - initial_offset; 1277 if (transfer_offset < 0) { 1278 transfer_offset += blkSize; 1279 } 1280 1281 // If not critical word (offset) return payloadDelay. 1282 // responseLatency is the latency of the return path 1283 // from lower level caches/memory to an upper level cache or 1284 // the core. 1285 completion_time += clockEdge(responseLatency) + 1286 (transfer_offset ? pkt->payloadDelay : 0); 1287 1288 assert(!tgt_pkt->req->isUncacheable()); 1289 1290 assert(tgt_pkt->req->masterId() < system->maxMasters()); 1291 missLatency[tgt_pkt->cmdToIndex()][tgt_pkt->req->masterId()] += 1292 completion_time - target->recvTime; 1293 } else if (pkt->cmd == MemCmd::UpgradeFailResp) { 1294 // failed StoreCond upgrade 1295 assert(tgt_pkt->cmd == MemCmd::StoreCondReq || 1296 tgt_pkt->cmd == MemCmd::StoreCondFailReq || 1297 tgt_pkt->cmd == MemCmd::SCUpgradeFailReq); 1298 // responseLatency is the latency of the return path 1299 // from lower level caches/memory to an upper level cache or 1300 // the core. 1301 completion_time += clockEdge(responseLatency) + 1302 pkt->payloadDelay; 1303 tgt_pkt->req->setExtraData(0); 1304 } else { 1305 // not a cache fill, just forwarding response 1306 // responseLatency is the latency of the return path 1307 // from lower level cahces/memory to the core. 1308 completion_time += clockEdge(responseLatency) + 1309 pkt->payloadDelay; 1310 if (pkt->isRead() && !is_error) { 1311 // sanity check 1312 assert(pkt->getAddr() == tgt_pkt->getAddr()); 1313 assert(pkt->getSize() >= tgt_pkt->getSize()); 1314 1315 tgt_pkt->setData(pkt->getConstPtr<uint8_t>()); 1316 } 1317 } 1318 tgt_pkt->makeTimingResponse(); 1319 // if this packet is an error copy that to the new packet 1320 if (is_error) 1321 tgt_pkt->copyError(pkt); 1322 if (tgt_pkt->cmd == MemCmd::ReadResp && 1323 (is_invalidate || mshr->hasPostInvalidate())) { 1324 // If intermediate cache got ReadRespWithInvalidate, 1325 // propagate that. Response should not have 1326 // isInvalidate() set otherwise. 1327 tgt_pkt->cmd = MemCmd::ReadRespWithInvalidate; 1328 DPRINTF(Cache, "%s updated cmd to %s for addr %#llx\n", 1329 __func__, tgt_pkt->cmdString(), tgt_pkt->getAddr()); 1330 } 1331 // Reset the bus additional time as it is now accounted for 1332 tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0; 1333 cpuSidePort->schedTimingResp(tgt_pkt, completion_time); 1334 break; 1335 1336 case MSHR::Target::FromPrefetcher: 1337 assert(tgt_pkt->cmd == MemCmd::HardPFReq); 1338 if (blk) 1339 blk->status |= BlkHWPrefetched; 1340 delete tgt_pkt->req; 1341 delete tgt_pkt; 1342 break; 1343 1344 case MSHR::Target::FromSnoop: 1345 // I don't believe that a snoop can be in an error state 1346 assert(!is_error); 1347 // response to snoop request 1348 DPRINTF(Cache, "processing deferred snoop...\n"); 1349 assert(!(is_invalidate && !mshr->hasPostInvalidate())); 1350 handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate()); 1351 break; 1352 1353 default: 1354 panic("Illegal target->source enum %d\n", target->source); 1355 } 1356 1357 mshr->popTarget(); 1358 } 1359 1360 if (blk && blk->isValid()) { 1361 // an invalidate response stemming from a write line request 1362 // should not invalidate the block, so check if the 1363 // invalidation should be discarded 1364 if (is_invalidate || mshr->hasPostInvalidate()) { 1365 assert(blk != tempBlock); 1366 tags->invalidate(blk); 1367 blk->invalidate(); 1368 } else if (mshr->hasPostDowngrade()) { 1369 blk->status &= ~BlkWritable; 1370 } 1371 } 1372 1373 if (mshr->promoteDeferredTargets()) { 1374 // avoid later read getting stale data while write miss is 1375 // outstanding.. see comment in timingAccess() 1376 if (blk) { 1377 blk->status &= ~BlkReadable; 1378 } 1379 mq = mshr->queue; 1380 mq->markPending(mshr); 1381 schedMemSideSendEvent(clockEdge() + pkt->payloadDelay); 1382 } else { 1383 mq->deallocate(mshr); 1384 if (wasFull && !mq->isFull()) { 1385 clearBlocked((BlockedCause)mq->index); 1386 } 1387 1388 // Request the bus for a prefetch if this deallocation freed enough 1389 // MSHRs for a prefetch to take place 1390 if (prefetcher && mq == &mshrQueue && mshrQueue.canPrefetch()) { 1391 Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(), 1392 clockEdge()); 1393 if (next_pf_time != MaxTick) 1394 schedMemSideSendEvent(next_pf_time); 1395 } 1396 } 1397 // reset the xbar additional timinig as it is now accounted for 1398 pkt->headerDelay = pkt->payloadDelay = 0; 1399 1400 // copy writebacks to write buffer 1401 doWritebacks(writebacks, forward_time); 1402 1403 // if we used temp block, check to see if its valid and then clear it out 1404 if (blk == tempBlock && tempBlock->isValid()) { 1405 // We use forwardLatency here because we are copying 1406 // Writebacks/CleanEvicts to write buffer. It specifies the latency to 1407 // allocate an internal buffer and to schedule an event to the 1408 // queued port. 1409 if (blk->isDirty()) { 1410 PacketPtr wbPkt = writebackBlk(blk); 1411 allocateWriteBuffer(wbPkt, forward_time); 1412 // Set BLOCK_CACHED flag if cached above. 1413 if (isCachedAbove(wbPkt)) 1414 wbPkt->setBlockCached(); 1415 } else { 1416 PacketPtr wcPkt = cleanEvictBlk(blk); 1417 // Check to see if block is cached above. If not allocate 1418 // write buffer 1419 if (isCachedAbove(wcPkt)) 1420 delete wcPkt; 1421 else 1422 allocateWriteBuffer(wcPkt, forward_time); 1423 } 1424 blk->invalidate(); 1425 } 1426 1427 DPRINTF(Cache, "Leaving %s with %s for addr %#llx\n", __func__, 1428 pkt->cmdString(), pkt->getAddr()); 1429 delete pkt; 1430} 1431 1432PacketPtr 1433Cache::writebackBlk(CacheBlk *blk) 1434{ 1435 chatty_assert(!isReadOnly, "Writeback from read-only cache"); 1436 assert(blk && blk->isValid() && blk->isDirty()); 1437 1438 writebacks[Request::wbMasterId]++; 1439 1440 Request *writebackReq = 1441 new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0, 1442 Request::wbMasterId); 1443 if (blk->isSecure()) 1444 writebackReq->setFlags(Request::SECURE); 1445 1446 writebackReq->taskId(blk->task_id); 1447 blk->task_id= ContextSwitchTaskId::Unknown; 1448 blk->tickInserted = curTick(); 1449 1450 PacketPtr writeback = new Packet(writebackReq, MemCmd::Writeback); 1451 if (blk->isWritable()) { 1452 // not asserting shared means we pass the block in modified 1453 // state, mark our own block non-writeable 1454 blk->status &= ~BlkWritable; 1455 } else { 1456 // we are in the owned state, tell the receiver 1457 writeback->assertShared(); 1458 } 1459 1460 writeback->allocate(); 1461 std::memcpy(writeback->getPtr<uint8_t>(), blk->data, blkSize); 1462 1463 blk->status &= ~BlkDirty; 1464 return writeback; 1465} 1466 1467PacketPtr 1468Cache::cleanEvictBlk(CacheBlk *blk) 1469{ 1470 assert(blk && blk->isValid() && !blk->isDirty()); 1471 // Creating a zero sized write, a message to the snoop filter 1472 Request *req = 1473 new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0, 1474 Request::wbMasterId); 1475 if (blk->isSecure()) 1476 req->setFlags(Request::SECURE); 1477 1478 req->taskId(blk->task_id); 1479 blk->task_id = ContextSwitchTaskId::Unknown; 1480 blk->tickInserted = curTick(); 1481 1482 PacketPtr pkt = new Packet(req, MemCmd::CleanEvict); 1483 pkt->allocate(); 1484 DPRINTF(Cache, "%s%s %x Create CleanEvict\n", pkt->cmdString(), 1485 pkt->req->isInstFetch() ? " (ifetch)" : "", 1486 pkt->getAddr()); 1487 1488 return pkt; 1489} 1490 1491void 1492Cache::memWriteback() 1493{ 1494 CacheBlkVisitorWrapper visitor(*this, &Cache::writebackVisitor); 1495 tags->forEachBlk(visitor); 1496} 1497 1498void 1499Cache::memInvalidate() 1500{ 1501 CacheBlkVisitorWrapper visitor(*this, &Cache::invalidateVisitor); 1502 tags->forEachBlk(visitor); 1503} 1504 1505bool 1506Cache::isDirty() const 1507{ 1508 CacheBlkIsDirtyVisitor visitor; 1509 tags->forEachBlk(visitor); 1510 1511 return visitor.isDirty(); 1512} 1513 1514bool 1515Cache::writebackVisitor(CacheBlk &blk) 1516{ 1517 if (blk.isDirty()) { 1518 assert(blk.isValid()); 1519 1520 Request request(tags->regenerateBlkAddr(blk.tag, blk.set), 1521 blkSize, 0, Request::funcMasterId); 1522 request.taskId(blk.task_id); 1523 1524 Packet packet(&request, MemCmd::WriteReq); 1525 packet.dataStatic(blk.data); 1526 1527 memSidePort->sendFunctional(&packet); 1528 1529 blk.status &= ~BlkDirty; 1530 } 1531 1532 return true; 1533} 1534 1535bool 1536Cache::invalidateVisitor(CacheBlk &blk) 1537{ 1538 1539 if (blk.isDirty()) 1540 warn_once("Invalidating dirty cache lines. Expect things to break.\n"); 1541 1542 if (blk.isValid()) { 1543 assert(!blk.isDirty()); 1544 tags->invalidate(&blk); 1545 blk.invalidate(); 1546 } 1547 1548 return true; 1549} 1550 1551CacheBlk* 1552Cache::allocateBlock(Addr addr, bool is_secure, PacketList &writebacks) 1553{ 1554 CacheBlk *blk = tags->findVictim(addr); 1555 1556 // It is valid to return NULL if there is no victim 1557 if (!blk) 1558 return nullptr; 1559 1560 if (blk->isValid()) { 1561 Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set); 1562 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure()); 1563 if (repl_mshr) { 1564 // must be an outstanding upgrade request 1565 // on a block we're about to replace... 1566 assert(!blk->isWritable() || blk->isDirty()); 1567 assert(repl_mshr->needsExclusive()); 1568 // too hard to replace block with transient state 1569 // allocation failed, block not inserted 1570 return NULL; 1571 } else { 1572 DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx (%s): %s\n", 1573 repl_addr, blk->isSecure() ? "s" : "ns", 1574 addr, is_secure ? "s" : "ns", 1575 blk->isDirty() ? "writeback" : "clean"); 1576 1577 // Will send up Writeback/CleanEvict snoops via isCachedAbove 1578 // when pushing this writeback list into the write buffer. 1579 if (blk->isDirty()) { 1580 // Save writeback packet for handling by caller 1581 writebacks.push_back(writebackBlk(blk)); 1582 } else { 1583 writebacks.push_back(cleanEvictBlk(blk)); 1584 } 1585 } 1586 } 1587 1588 return blk; 1589} 1590 1591 1592// Note that the reason we return a list of writebacks rather than 1593// inserting them directly in the write buffer is that this function 1594// is called by both atomic and timing-mode accesses, and in atomic 1595// mode we don't mess with the write buffer (we just perform the 1596// writebacks atomically once the original request is complete). 1597CacheBlk* 1598Cache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks) 1599{ 1600 assert(pkt->isResponse() || pkt->cmd == MemCmd::WriteLineReq); 1601 Addr addr = pkt->getAddr(); 1602 bool is_secure = pkt->isSecure(); 1603#if TRACING_ON 1604 CacheBlk::State old_state = blk ? blk->status : 0; 1605#endif 1606 1607 // When handling a fill, discard any CleanEvicts for the 1608 // same address in write buffer. 1609 Addr M5_VAR_USED blk_addr = blockAlign(pkt->getAddr()); 1610 std::vector<MSHR *> M5_VAR_USED wbs; 1611 assert (!writeBuffer.findMatches(blk_addr, is_secure, wbs)); 1612 1613 if (blk == NULL) { 1614 // better have read new data... 1615 assert(pkt->hasData()); 1616 1617 // only read responses and write-line requests have data; 1618 // note that we don't write the data here for write-line - that 1619 // happens in the subsequent satisfyCpuSideRequest. 1620 assert(pkt->isRead() || pkt->cmd == MemCmd::WriteLineReq); 1621 1622 // need to do a replacement 1623 blk = allocateBlock(addr, is_secure, writebacks); 1624 if (blk == NULL) { 1625 // No replaceable block... just use temporary storage to 1626 // complete the current request and then get rid of it 1627 assert(!tempBlock->isValid()); 1628 blk = tempBlock; 1629 tempBlock->set = tags->extractSet(addr); 1630 tempBlock->tag = tags->extractTag(addr); 1631 // @todo: set security state as well... 1632 DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr, 1633 is_secure ? "s" : "ns"); 1634 } else { 1635 tags->insertBlock(pkt, blk); 1636 } 1637 1638 // we should never be overwriting a valid block 1639 assert(!blk->isValid()); 1640 } else { 1641 // existing block... probably an upgrade 1642 assert(blk->tag == tags->extractTag(addr)); 1643 // either we're getting new data or the block should already be valid 1644 assert(pkt->hasData() || blk->isValid()); 1645 // don't clear block status... if block is already dirty we 1646 // don't want to lose that 1647 } 1648 1649 if (is_secure) 1650 blk->status |= BlkSecure; 1651 blk->status |= BlkValid | BlkReadable; 1652 1653 // sanity check for whole-line writes, which should always be 1654 // marked as writable as part of the fill, and then later marked 1655 // dirty as part of satisfyCpuSideRequest 1656 if (pkt->cmd == MemCmd::WriteLineReq) { 1657 assert(!pkt->sharedAsserted()); 1658 // at the moment other caches do not respond to the 1659 // invalidation requests corresponding to a whole-line write 1660 assert(!pkt->memInhibitAsserted()); 1661 } 1662 1663 if (!pkt->sharedAsserted()) { 1664 // we could get non-shared responses from memory (rather than 1665 // a cache) even in a read-only cache, note that we set this 1666 // bit even for a read-only cache as we use it to represent 1667 // the exclusive state 1668 blk->status |= BlkWritable; 1669 1670 // If we got this via cache-to-cache transfer (i.e., from a 1671 // cache that was an owner) and took away that owner's copy, 1672 // then we need to write it back. Normally this happens 1673 // anyway as a side effect of getting a copy to write it, but 1674 // there are cases (such as failed store conditionals or 1675 // compare-and-swaps) where we'll demand an exclusive copy but 1676 // end up not writing it. 1677 if (pkt->memInhibitAsserted()) { 1678 blk->status |= BlkDirty; 1679 1680 chatty_assert(!isReadOnly, "Should never see dirty snoop response " 1681 "in read-only cache %s\n", name()); 1682 } 1683 } 1684 1685 DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n", 1686 addr, is_secure ? "s" : "ns", old_state, blk->print()); 1687 1688 // if we got new data, copy it in (checking for a read response 1689 // and a response that has data is the same in the end) 1690 if (pkt->isRead()) { 1691 // sanity checks 1692 assert(pkt->hasData()); 1693 assert(pkt->getSize() == blkSize); 1694 1695 std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize); 1696 } 1697 // We pay for fillLatency here. 1698 blk->whenReady = clockEdge() + fillLatency * clockPeriod() + 1699 pkt->payloadDelay; 1700 1701 return blk; 1702} 1703 1704 1705///////////////////////////////////////////////////// 1706// 1707// Snoop path: requests coming in from the memory side 1708// 1709///////////////////////////////////////////////////// 1710 1711void 1712Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data, 1713 bool already_copied, bool pending_inval) 1714{ 1715 // sanity check 1716 assert(req_pkt->isRequest()); 1717 assert(req_pkt->needsResponse()); 1718 1719 DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, 1720 req_pkt->cmdString(), req_pkt->getAddr(), req_pkt->getSize()); 1721 // timing-mode snoop responses require a new packet, unless we 1722 // already made a copy... 1723 PacketPtr pkt = req_pkt; 1724 if (!already_copied) 1725 // do not clear flags, and allocate space for data if the 1726 // packet needs it (the only packets that carry data are read 1727 // responses) 1728 pkt = new Packet(req_pkt, false, req_pkt->isRead()); 1729 1730 assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() || 1731 pkt->sharedAsserted()); 1732 pkt->makeTimingResponse(); 1733 if (pkt->isRead()) { 1734 pkt->setDataFromBlock(blk_data, blkSize); 1735 } 1736 if (pkt->cmd == MemCmd::ReadResp && pending_inval) { 1737 // Assume we defer a response to a read from a far-away cache 1738 // A, then later defer a ReadExcl from a cache B on the same 1739 // bus as us. We'll assert MemInhibit in both cases, but in 1740 // the latter case MemInhibit will keep the invalidation from 1741 // reaching cache A. This special response tells cache A that 1742 // it gets the block to satisfy its read, but must immediately 1743 // invalidate it. 1744 pkt->cmd = MemCmd::ReadRespWithInvalidate; 1745 } 1746 // Here we consider forward_time, paying for just forward latency and 1747 // also charging the delay provided by the xbar. 1748 // forward_time is used as send_time in next allocateWriteBuffer(). 1749 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 1750 // Here we reset the timing of the packet. 1751 pkt->headerDelay = pkt->payloadDelay = 0; 1752 DPRINTF(Cache, "%s created response: %s addr %#llx size %d tick: %lu\n", 1753 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize(), 1754 forward_time); 1755 memSidePort->schedTimingSnoopResp(pkt, forward_time, true); 1756} 1757 1758uint32_t 1759Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing, 1760 bool is_deferred, bool pending_inval) 1761{ 1762 DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, 1763 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 1764 // deferred snoops can only happen in timing mode 1765 assert(!(is_deferred && !is_timing)); 1766 // pending_inval only makes sense on deferred snoops 1767 assert(!(pending_inval && !is_deferred)); 1768 assert(pkt->isRequest()); 1769 1770 // the packet may get modified if we or a forwarded snooper 1771 // responds in atomic mode, so remember a few things about the 1772 // original packet up front 1773 bool invalidate = pkt->isInvalidate(); 1774 bool M5_VAR_USED needs_exclusive = pkt->needsExclusive(); 1775 1776 uint32_t snoop_delay = 0; 1777 1778 if (forwardSnoops) { 1779 // first propagate snoop upward to see if anyone above us wants to 1780 // handle it. save & restore packet src since it will get 1781 // rewritten to be relative to cpu-side bus (if any) 1782 bool alreadyResponded = pkt->memInhibitAsserted(); 1783 if (is_timing) { 1784 // copy the packet so that we can clear any flags before 1785 // forwarding it upwards, we also allocate data (passing 1786 // the pointer along in case of static data), in case 1787 // there is a snoop hit in upper levels 1788 Packet snoopPkt(pkt, true, true); 1789 snoopPkt.setExpressSnoop(); 1790 snoopPkt.pushSenderState(new ForwardResponseRecord()); 1791 // the snoop packet does not need to wait any additional 1792 // time 1793 snoopPkt.headerDelay = snoopPkt.payloadDelay = 0; 1794 cpuSidePort->sendTimingSnoopReq(&snoopPkt); 1795 1796 // add the header delay (including crossbar and snoop 1797 // delays) of the upward snoop to the snoop delay for this 1798 // cache 1799 snoop_delay += snoopPkt.headerDelay; 1800 1801 if (snoopPkt.memInhibitAsserted()) { 1802 // cache-to-cache response from some upper cache 1803 assert(!alreadyResponded); 1804 pkt->assertMemInhibit(); 1805 } else { 1806 // no cache (or anyone else for that matter) will 1807 // respond, so delete the ForwardResponseRecord here 1808 delete snoopPkt.popSenderState(); 1809 } 1810 if (snoopPkt.sharedAsserted()) { 1811 pkt->assertShared(); 1812 } 1813 // If this request is a prefetch or clean evict and an upper level 1814 // signals block present, make sure to propagate the block 1815 // presence to the requester. 1816 if (snoopPkt.isBlockCached()) { 1817 pkt->setBlockCached(); 1818 } 1819 } else { 1820 cpuSidePort->sendAtomicSnoop(pkt); 1821 if (!alreadyResponded && pkt->memInhibitAsserted()) { 1822 // cache-to-cache response from some upper cache: 1823 // forward response to original requester 1824 assert(pkt->isResponse()); 1825 } 1826 } 1827 } 1828 1829 if (!blk || !blk->isValid()) { 1830 DPRINTF(Cache, "%s snoop miss for %s addr %#llx size %d\n", 1831 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 1832 return snoop_delay; 1833 } else { 1834 DPRINTF(Cache, "%s snoop hit for %s for addr %#llx size %d, " 1835 "old state is %s\n", __func__, pkt->cmdString(), 1836 pkt->getAddr(), pkt->getSize(), blk->print()); 1837 } 1838 1839 chatty_assert(!(isReadOnly && blk->isDirty()), 1840 "Should never have a dirty block in a read-only cache %s\n", 1841 name()); 1842 1843 // We may end up modifying both the block state and the packet (if 1844 // we respond in atomic mode), so just figure out what to do now 1845 // and then do it later. If we find dirty data while snooping for 1846 // an invalidate, we don't need to send a response. The 1847 // invalidation itself is taken care of below. 1848 bool respond = blk->isDirty() && pkt->needsResponse() && 1849 pkt->cmd != MemCmd::InvalidateReq; 1850 bool have_exclusive = blk->isWritable(); 1851 1852 // Invalidate any prefetch's from below that would strip write permissions 1853 // MemCmd::HardPFReq is only observed by upstream caches. After missing 1854 // above and in it's own cache, a new MemCmd::ReadReq is created that 1855 // downstream caches observe. 1856 if (pkt->mustCheckAbove()) { 1857 DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s from" 1858 " lower cache\n", pkt->getAddr(), pkt->cmdString()); 1859 pkt->setBlockCached(); 1860 return snoop_delay; 1861 } 1862 1863 if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) { 1864 // reading non-exclusive shared data, note that we retain 1865 // the block in owned state if it is dirty, with the response 1866 // taken care of below, and otherwhise simply downgrade to 1867 // shared 1868 assert(!needs_exclusive); 1869 pkt->assertShared(); 1870 blk->status &= ~BlkWritable; 1871 } 1872 1873 if (respond) { 1874 // prevent anyone else from responding, cache as well as 1875 // memory, and also prevent any memory from even seeing the 1876 // request (with current inhibited semantics), note that this 1877 // applies both to reads and writes and that for writes it 1878 // works thanks to the fact that we still have dirty data and 1879 // will write it back at a later point 1880 pkt->assertMemInhibit(); 1881 if (have_exclusive) { 1882 // in the case of an uncacheable request there is no point 1883 // in setting the exclusive flag, but since the recipient 1884 // does not care there is no harm in doing so, in any case 1885 // it is just a hint 1886 pkt->setSupplyExclusive(); 1887 } 1888 if (is_timing) { 1889 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval); 1890 } else { 1891 pkt->makeAtomicResponse(); 1892 pkt->setDataFromBlock(blk->data, blkSize); 1893 } 1894 } 1895 1896 if (!respond && is_timing && is_deferred) { 1897 // if it's a deferred timing snoop then we've made a copy of 1898 // both the request and the packet, and so if we're not using 1899 // those copies to respond and delete them here 1900 DPRINTF(Cache, "Deleting pkt %p and request %p for cmd %s addr: %p\n", 1901 pkt, pkt->req, pkt->cmdString(), pkt->getAddr()); 1902 1903 // the packets needs a response (just not from us), so we also 1904 // need to delete the request and not rely on the packet 1905 // destructor 1906 assert(pkt->needsResponse()); 1907 delete pkt->req; 1908 delete pkt; 1909 } 1910 1911 // Do this last in case it deallocates block data or something 1912 // like that 1913 if (invalidate) { 1914 if (blk != tempBlock) 1915 tags->invalidate(blk); 1916 blk->invalidate(); 1917 } 1918 1919 DPRINTF(Cache, "new state is %s\n", blk->print()); 1920 1921 return snoop_delay; 1922} 1923 1924 1925void 1926Cache::recvTimingSnoopReq(PacketPtr pkt) 1927{ 1928 DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, 1929 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 1930 1931 // Snoops shouldn't happen when bypassing caches 1932 assert(!system->bypassCaches()); 1933 1934 // no need to snoop requests that are not in range 1935 if (!inRange(pkt->getAddr())) { 1936 return; 1937 } 1938 1939 bool is_secure = pkt->isSecure(); 1940 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 1941 1942 Addr blk_addr = blockAlign(pkt->getAddr()); 1943 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 1944 1945 // Update the latency cost of the snoop so that the crossbar can 1946 // account for it. Do not overwrite what other neighbouring caches 1947 // have already done, rather take the maximum. The update is 1948 // tentative, for cases where we return before an upward snoop 1949 // happens below. 1950 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, 1951 lookupLatency * clockPeriod()); 1952 1953 // Inform request(Prefetch, CleanEvict or Writeback) from below of 1954 // MSHR hit, set setBlockCached. 1955 if (mshr && pkt->mustCheckAbove()) { 1956 DPRINTF(Cache, "Setting block cached for %s from" 1957 "lower cache on mshr hit %#x\n", 1958 pkt->cmdString(), pkt->getAddr()); 1959 pkt->setBlockCached(); 1960 return; 1961 } 1962 1963 // Let the MSHR itself track the snoop and decide whether we want 1964 // to go ahead and do the regular cache snoop 1965 if (mshr && mshr->handleSnoop(pkt, order++)) { 1966 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)." 1967 "mshrs: %s\n", blk_addr, is_secure ? "s" : "ns", 1968 mshr->print()); 1969 1970 if (mshr->getNumTargets() > numTarget) 1971 warn("allocating bonus target for snoop"); //handle later 1972 return; 1973 } 1974 1975 //We also need to check the writeback buffers and handle those 1976 std::vector<MSHR *> writebacks; 1977 if (writeBuffer.findMatches(blk_addr, is_secure, writebacks)) { 1978 DPRINTF(Cache, "Snoop hit in writeback to addr %#llx (%s)\n", 1979 pkt->getAddr(), is_secure ? "s" : "ns"); 1980 1981 // Look through writebacks for any cachable writes. 1982 // We should only ever find a single match 1983 assert(writebacks.size() == 1); 1984 MSHR *wb_entry = writebacks[0]; 1985 // Expect to see only Writebacks and/or CleanEvicts here, both of 1986 // which should not be generated for uncacheable data. 1987 assert(!wb_entry->isUncacheable()); 1988 // There should only be a single request responsible for generating 1989 // Writebacks/CleanEvicts. 1990 assert(wb_entry->getNumTargets() == 1); 1991 PacketPtr wb_pkt = wb_entry->getTarget()->pkt; 1992 assert(wb_pkt->evictingBlock()); 1993 1994 if (pkt->evictingBlock()) { 1995 // if the block is found in the write queue, set the BLOCK_CACHED 1996 // flag for Writeback/CleanEvict snoop. On return the snoop will 1997 // propagate the BLOCK_CACHED flag in Writeback packets and prevent 1998 // any CleanEvicts from travelling down the memory hierarchy. 1999 pkt->setBlockCached(); 2000 DPRINTF(Cache, "Squashing %s from lower cache on writequeue hit" 2001 " %#x\n", pkt->cmdString(), pkt->getAddr()); 2002 return; 2003 } 2004 2005 if (wb_pkt->cmd == MemCmd::Writeback) { 2006 assert(!pkt->memInhibitAsserted()); 2007 pkt->assertMemInhibit(); 2008 if (!pkt->needsExclusive()) { 2009 pkt->assertShared(); 2010 // the writeback is no longer passing exclusivity (the 2011 // receiving cache should consider the block owned 2012 // rather than modified) 2013 wb_pkt->assertShared(); 2014 } else { 2015 // if we're not asserting the shared line, we need to 2016 // invalidate our copy. we'll do that below as long as 2017 // the packet's invalidate flag is set... 2018 assert(pkt->isInvalidate()); 2019 } 2020 doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(), 2021 false, false); 2022 } else { 2023 assert(wb_pkt->cmd == MemCmd::CleanEvict); 2024 // The cache technically holds the block until the 2025 // corresponding CleanEvict message reaches the crossbar 2026 // below. Therefore when a snoop encounters a CleanEvict 2027 // message we must set assertShared (just like when it 2028 // encounters a Writeback) to avoid the snoop filter 2029 // prematurely clearing the holder bit in the crossbar 2030 // below 2031 if (!pkt->needsExclusive()) 2032 pkt->assertShared(); 2033 else 2034 assert(pkt->isInvalidate()); 2035 } 2036 2037 if (pkt->isInvalidate()) { 2038 // Invalidation trumps our writeback... discard here 2039 // Note: markInService will remove entry from writeback buffer. 2040 markInService(wb_entry, false); 2041 delete wb_pkt; 2042 } 2043 } 2044 2045 // If this was a shared writeback, there may still be 2046 // other shared copies above that require invalidation. 2047 // We could be more selective and return here if the 2048 // request is non-exclusive or if the writeback is 2049 // exclusive. 2050 uint32_t snoop_delay = handleSnoop(pkt, blk, true, false, false); 2051 2052 // Override what we did when we first saw the snoop, as we now 2053 // also have the cost of the upwards snoops to account for 2054 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, snoop_delay + 2055 lookupLatency * clockPeriod()); 2056} 2057 2058bool 2059Cache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt) 2060{ 2061 // Express snoop responses from master to slave, e.g., from L1 to L2 2062 cache->recvTimingSnoopResp(pkt); 2063 return true; 2064} 2065 2066Tick 2067Cache::recvAtomicSnoop(PacketPtr pkt) 2068{ 2069 // Snoops shouldn't happen when bypassing caches 2070 assert(!system->bypassCaches()); 2071 2072 // no need to snoop requests that are not in range. 2073 if (!inRange(pkt->getAddr())) { 2074 return 0; 2075 } 2076 2077 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 2078 uint32_t snoop_delay = handleSnoop(pkt, blk, false, false, false); 2079 return snoop_delay + lookupLatency * clockPeriod(); 2080} 2081 2082 2083MSHR * 2084Cache::getNextMSHR() 2085{ 2086 // Check both MSHR queue and write buffer for potential requests, 2087 // note that null does not mean there is no request, it could 2088 // simply be that it is not ready 2089 MSHR *miss_mshr = mshrQueue.getNextMSHR(); 2090 MSHR *write_mshr = writeBuffer.getNextMSHR(); 2091 2092 // If we got a write buffer request ready, first priority is a 2093 // full write buffer, otherwhise we favour the miss requests 2094 if (write_mshr && 2095 ((writeBuffer.isFull() && writeBuffer.inServiceEntries == 0) || 2096 !miss_mshr)) { 2097 // need to search MSHR queue for conflicting earlier miss. 2098 MSHR *conflict_mshr = 2099 mshrQueue.findPending(write_mshr->blkAddr, 2100 write_mshr->isSecure); 2101 2102 if (conflict_mshr && conflict_mshr->order < write_mshr->order) { 2103 // Service misses in order until conflict is cleared. 2104 return conflict_mshr; 2105 2106 // @todo Note that we ignore the ready time of the conflict here 2107 } 2108 2109 // No conflicts; issue write 2110 return write_mshr; 2111 } else if (miss_mshr) { 2112 // need to check for conflicting earlier writeback 2113 MSHR *conflict_mshr = 2114 writeBuffer.findPending(miss_mshr->blkAddr, 2115 miss_mshr->isSecure); 2116 if (conflict_mshr) { 2117 // not sure why we don't check order here... it was in the 2118 // original code but commented out. 2119 2120 // The only way this happens is if we are 2121 // doing a write and we didn't have permissions 2122 // then subsequently saw a writeback (owned got evicted) 2123 // We need to make sure to perform the writeback first 2124 // To preserve the dirty data, then we can issue the write 2125 2126 // should we return write_mshr here instead? I.e. do we 2127 // have to flush writes in order? I don't think so... not 2128 // for Alpha anyway. Maybe for x86? 2129 return conflict_mshr; 2130 2131 // @todo Note that we ignore the ready time of the conflict here 2132 } 2133 2134 // No conflicts; issue read 2135 return miss_mshr; 2136 } 2137 2138 // fall through... no pending requests. Try a prefetch. 2139 assert(!miss_mshr && !write_mshr); 2140 if (prefetcher && mshrQueue.canPrefetch()) { 2141 // If we have a miss queue slot, we can try a prefetch 2142 PacketPtr pkt = prefetcher->getPacket(); 2143 if (pkt) { 2144 Addr pf_addr = blockAlign(pkt->getAddr()); 2145 if (!tags->findBlock(pf_addr, pkt->isSecure()) && 2146 !mshrQueue.findMatch(pf_addr, pkt->isSecure()) && 2147 !writeBuffer.findMatch(pf_addr, pkt->isSecure())) { 2148 // Update statistic on number of prefetches issued 2149 // (hwpf_mshr_misses) 2150 assert(pkt->req->masterId() < system->maxMasters()); 2151 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 2152 2153 // allocate an MSHR and return it, note 2154 // that we send the packet straight away, so do not 2155 // schedule the send 2156 return allocateMissBuffer(pkt, curTick(), false); 2157 } else { 2158 // free the request and packet 2159 delete pkt->req; 2160 delete pkt; 2161 } 2162 } 2163 } 2164 2165 return NULL; 2166} 2167 2168bool 2169Cache::isCachedAbove(PacketPtr pkt, bool is_timing) const 2170{ 2171 if (!forwardSnoops) 2172 return false; 2173 // Mirroring the flow of HardPFReqs, the cache sends CleanEvict and 2174 // Writeback snoops into upper level caches to check for copies of the 2175 // same block. Using the BLOCK_CACHED flag with the Writeback/CleanEvict 2176 // packet, the cache can inform the crossbar below of presence or absence 2177 // of the block. 2178 if (is_timing) { 2179 Packet snoop_pkt(pkt, true, false); 2180 snoop_pkt.setExpressSnoop(); 2181 // Assert that packet is either Writeback or CleanEvict and not a 2182 // prefetch request because prefetch requests need an MSHR and may 2183 // generate a snoop response. 2184 assert(pkt->evictingBlock()); 2185 snoop_pkt.senderState = NULL; 2186 cpuSidePort->sendTimingSnoopReq(&snoop_pkt); 2187 // Writeback/CleanEvict snoops do not generate a snoop response. 2188 assert(!(snoop_pkt.memInhibitAsserted())); 2189 return snoop_pkt.isBlockCached(); 2190 } else { 2191 cpuSidePort->sendAtomicSnoop(pkt); 2192 return pkt->isBlockCached(); 2193 } 2194} 2195 2196PacketPtr 2197Cache::getTimingPacket() 2198{ 2199 MSHR *mshr = getNextMSHR(); 2200 2201 if (mshr == NULL) { 2202 return NULL; 2203 } 2204 2205 // use request from 1st target 2206 PacketPtr tgt_pkt = mshr->getTarget()->pkt; 2207 PacketPtr pkt = NULL; 2208 2209 DPRINTF(CachePort, "%s %s for addr %#llx size %d\n", __func__, 2210 tgt_pkt->cmdString(), tgt_pkt->getAddr(), tgt_pkt->getSize()); 2211 2212 CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure); 2213 2214 if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) { 2215 // We need to check the caches above us to verify that 2216 // they don't have a copy of this block in the dirty state 2217 // at the moment. Without this check we could get a stale 2218 // copy from memory that might get used in place of the 2219 // dirty one. 2220 Packet snoop_pkt(tgt_pkt, true, false); 2221 snoop_pkt.setExpressSnoop(); 2222 snoop_pkt.senderState = mshr; 2223 cpuSidePort->sendTimingSnoopReq(&snoop_pkt); 2224 2225 // Check to see if the prefetch was squashed by an upper cache (to 2226 // prevent us from grabbing the line) or if a Check to see if a 2227 // writeback arrived between the time the prefetch was placed in 2228 // the MSHRs and when it was selected to be sent or if the 2229 // prefetch was squashed by an upper cache. 2230 2231 // It is important to check memInhibitAsserted before 2232 // prefetchSquashed. If another cache has asserted MEM_INGIBIT, it 2233 // will be sending a response which will arrive at the MSHR 2234 // allocated ofr this request. Checking the prefetchSquash first 2235 // may result in the MSHR being prematurely deallocated. 2236 2237 if (snoop_pkt.memInhibitAsserted()) { 2238 // If we are getting a non-shared response it is dirty 2239 bool pending_dirty_resp = !snoop_pkt.sharedAsserted(); 2240 markInService(mshr, pending_dirty_resp); 2241 DPRINTF(Cache, "Upward snoop of prefetch for addr" 2242 " %#x (%s) hit\n", 2243 tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns"); 2244 return NULL; 2245 } 2246 2247 if (snoop_pkt.isBlockCached() || blk != NULL) { 2248 DPRINTF(Cache, "Block present, prefetch squashed by cache. " 2249 "Deallocating mshr target %#x.\n", 2250 mshr->blkAddr); 2251 2252 // Deallocate the mshr target 2253 if (tgt_pkt->cmd != MemCmd::Writeback) { 2254 if (mshr->queue->forceDeallocateTarget(mshr)) { 2255 // Clear block if this deallocation resulted freed an 2256 // mshr when all had previously been utilized 2257 clearBlocked((BlockedCause)(mshr->queue->index)); 2258 } 2259 return NULL; 2260 } else { 2261 // If this is a Writeback, and the snoops indicate that the blk 2262 // is cached above, set the BLOCK_CACHED flag in the Writeback 2263 // packet, so that it does not reset the bits corresponding to 2264 // this block in the snoop filter below. 2265 tgt_pkt->setBlockCached(); 2266 } 2267 } 2268 } 2269 2270 if (mshr->isForwardNoResponse()) { 2271 // no response expected, just forward packet as it is 2272 assert(tags->findBlock(mshr->blkAddr, mshr->isSecure) == NULL); 2273 pkt = tgt_pkt; 2274 } else { 2275 pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive()); 2276 2277 mshr->isForward = (pkt == NULL); 2278 2279 if (mshr->isForward) { 2280 // not a cache block request, but a response is expected 2281 // make copy of current packet to forward, keep current 2282 // copy for response handling 2283 pkt = new Packet(tgt_pkt, false, true); 2284 if (pkt->isWrite()) { 2285 pkt->setData(tgt_pkt->getConstPtr<uint8_t>()); 2286 } 2287 } 2288 } 2289 2290 assert(pkt != NULL); 2291 pkt->senderState = mshr; 2292 return pkt; 2293} 2294 2295 2296Tick 2297Cache::nextMSHRReadyTime() const 2298{ 2299 Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(), 2300 writeBuffer.nextMSHRReadyTime()); 2301 2302 // Don't signal prefetch ready time if no MSHRs available 2303 // Will signal once enoguh MSHRs are deallocated 2304 if (prefetcher && mshrQueue.canPrefetch()) { 2305 nextReady = std::min(nextReady, 2306 prefetcher->nextPrefetchReadyTime()); 2307 } 2308 2309 return nextReady; 2310} 2311 2312void 2313Cache::serialize(CheckpointOut &cp) const 2314{ 2315 bool dirty(isDirty()); 2316 2317 if (dirty) { 2318 warn("*** The cache still contains dirty data. ***\n"); 2319 warn(" Make sure to drain the system using the correct flags.\n"); 2320 warn(" This checkpoint will not restore correctly and dirty data in " 2321 "the cache will be lost!\n"); 2322 } 2323 2324 // Since we don't checkpoint the data in the cache, any dirty data 2325 // will be lost when restoring from a checkpoint of a system that 2326 // wasn't drained properly. Flag the checkpoint as invalid if the 2327 // cache contains dirty data. 2328 bool bad_checkpoint(dirty); 2329 SERIALIZE_SCALAR(bad_checkpoint); 2330} 2331 2332void 2333Cache::unserialize(CheckpointIn &cp) 2334{ 2335 bool bad_checkpoint; 2336 UNSERIALIZE_SCALAR(bad_checkpoint); 2337 if (bad_checkpoint) { 2338 fatal("Restoring from checkpoints with dirty caches is not supported " 2339 "in the classic memory system. Please remove any caches or " 2340 " drain them properly before taking checkpoints.\n"); 2341 } 2342} 2343 2344/////////////// 2345// 2346// CpuSidePort 2347// 2348/////////////// 2349 2350AddrRangeList 2351Cache::CpuSidePort::getAddrRanges() const 2352{ 2353 return cache->getAddrRanges(); 2354} 2355 2356bool 2357Cache::CpuSidePort::recvTimingReq(PacketPtr pkt) 2358{ 2359 assert(!cache->system->bypassCaches()); 2360 2361 bool success = false; 2362 2363 // always let inhibited requests through, even if blocked, 2364 // ultimately we should check if this is an express snoop, but at 2365 // the moment that flag is only set in the cache itself 2366 if (pkt->memInhibitAsserted()) { 2367 // do not change the current retry state 2368 bool M5_VAR_USED bypass_success = cache->recvTimingReq(pkt); 2369 assert(bypass_success); 2370 return true; 2371 } else if (blocked || mustSendRetry) { 2372 // either already committed to send a retry, or blocked 2373 success = false; 2374 } else { 2375 // pass it on to the cache, and let the cache decide if we 2376 // have to retry or not 2377 success = cache->recvTimingReq(pkt); 2378 } 2379 2380 // remember if we have to retry 2381 mustSendRetry = !success; 2382 return success; 2383} 2384 2385Tick 2386Cache::CpuSidePort::recvAtomic(PacketPtr pkt) 2387{ 2388 return cache->recvAtomic(pkt); 2389} 2390 2391void 2392Cache::CpuSidePort::recvFunctional(PacketPtr pkt) 2393{ 2394 // functional request 2395 cache->functionalAccess(pkt, true); 2396} 2397 2398Cache:: 2399CpuSidePort::CpuSidePort(const std::string &_name, Cache *_cache, 2400 const std::string &_label) 2401 : BaseCache::CacheSlavePort(_name, _cache, _label), cache(_cache) 2402{ 2403} 2404 2405Cache* 2406CacheParams::create() 2407{ 2408 assert(tags); 2409 2410 return new Cache(this); 2411} 2412/////////////// 2413// 2414// MemSidePort 2415// 2416/////////////// 2417 2418bool 2419Cache::MemSidePort::recvTimingResp(PacketPtr pkt) 2420{ 2421 cache->recvTimingResp(pkt); 2422 return true; 2423} 2424 2425// Express snooping requests to memside port 2426void 2427Cache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt) 2428{ 2429 // handle snooping requests 2430 cache->recvTimingSnoopReq(pkt); 2431} 2432 2433Tick 2434Cache::MemSidePort::recvAtomicSnoop(PacketPtr pkt) 2435{ 2436 return cache->recvAtomicSnoop(pkt); 2437} 2438 2439void 2440Cache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt) 2441{ 2442 // functional snoop (note that in contrast to atomic we don't have 2443 // a specific functionalSnoop method, as they have the same 2444 // behaviour regardless) 2445 cache->functionalAccess(pkt, false); 2446} 2447 2448void 2449Cache::CacheReqPacketQueue::sendDeferredPacket() 2450{ 2451 // sanity check 2452 assert(!waitingOnRetry); 2453 2454 // there should never be any deferred request packets in the 2455 // queue, instead we resly on the cache to provide the packets 2456 // from the MSHR queue or write queue 2457 assert(deferredPacketReadyTime() == MaxTick); 2458 2459 // check for request packets (requests & writebacks) 2460 PacketPtr pkt = cache.getTimingPacket(); 2461 if (pkt == NULL) { 2462 // can happen if e.g. we attempt a writeback and fail, but 2463 // before the retry, the writeback is eliminated because 2464 // we snoop another cache's ReadEx. 2465 } else { 2466 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState); 2467 // in most cases getTimingPacket allocates a new packet, and 2468 // we must delete it unless it is successfully sent 2469 bool delete_pkt = !mshr->isForwardNoResponse(); 2470 2471 // let our snoop responses go first if there are responses to 2472 // the same addresses we are about to writeback, note that 2473 // this creates a dependency between requests and snoop 2474 // responses, but that should not be a problem since there is 2475 // a chain already and the key is that the snoop responses can 2476 // sink unconditionally 2477 if (snoopRespQueue.hasAddr(pkt->getAddr())) { 2478 DPRINTF(CachePort, "Waiting for snoop response to be sent\n"); 2479 Tick when = snoopRespQueue.deferredPacketReadyTime(); 2480 schedSendEvent(when); 2481 2482 if (delete_pkt) 2483 delete pkt; 2484 2485 return; 2486 } 2487 2488 2489 waitingOnRetry = !masterPort.sendTimingReq(pkt); 2490 2491 if (waitingOnRetry) { 2492 DPRINTF(CachePort, "now waiting on a retry\n"); 2493 if (delete_pkt) { 2494 // we are awaiting a retry, but we 2495 // delete the packet and will be creating a new packet 2496 // when we get the opportunity 2497 delete pkt; 2498 } 2499 // note that we have now masked any requestBus and 2500 // schedSendEvent (we will wait for a retry before 2501 // doing anything), and this is so even if we do not 2502 // care about this packet and might override it before 2503 // it gets retried 2504 } else { 2505 // As part of the call to sendTimingReq the packet is 2506 // forwarded to all neighbouring caches (and any 2507 // caches above them) as a snoop. The packet is also 2508 // sent to any potential cache below as the 2509 // interconnect is not allowed to buffer the 2510 // packet. Thus at this point we know if any of the 2511 // neighbouring, or the downstream cache is 2512 // responding, and if so, if it is with a dirty line 2513 // or not. 2514 bool pending_dirty_resp = !pkt->sharedAsserted() && 2515 pkt->memInhibitAsserted(); 2516 2517 cache.markInService(mshr, pending_dirty_resp); 2518 } 2519 } 2520 2521 // if we succeeded and are not waiting for a retry, schedule the 2522 // next send considering when the next MSHR is ready, note that 2523 // snoop responses have their own packet queue and thus schedule 2524 // their own events 2525 if (!waitingOnRetry) { 2526 schedSendEvent(cache.nextMSHRReadyTime()); 2527 } 2528} 2529 2530Cache:: 2531MemSidePort::MemSidePort(const std::string &_name, Cache *_cache, 2532 const std::string &_label) 2533 : BaseCache::CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue), 2534 _reqQueue(*_cache, *this, _snoopRespQueue, _label), 2535 _snoopRespQueue(*_cache, *this, _label), cache(_cache) 2536{ 2537} 2538