cache.cc revision 11601
16145SN/A/* 28683SN/A * Copyright (c) 2010-2016 ARM Limited 310973Sdavid.hashe@amd.com * All rights reserved. 46145SN/A * 56145SN/A * The license below extends only to copyright in the software and shall 66145SN/A * not be construed as granting a license to any other intellectual 76145SN/A * property including but not limited to intellectual property relating 86145SN/A * to a hardware implementation of the functionality of the software 96145SN/A * licensed hereunder. You may use the software subject to the license 106145SN/A * terms below provided that you ensure that this notice is replicated 116145SN/A * unmodified and in its entirety in all distributions of the software, 126145SN/A * modified or unmodified, in source code or in binary form. 136145SN/A * 146145SN/A * Copyright (c) 2002-2005 The Regents of The University of Michigan 156145SN/A * Copyright (c) 2010,2015 Advanced Micro Devices, Inc. 166145SN/A * All rights reserved. 176145SN/A * 186145SN/A * Redistribution and use in source and binary forms, with or without 196145SN/A * modification, are permitted provided that the following conditions are 206145SN/A * met: redistributions of source code must retain the above copyright 216145SN/A * notice, this list of conditions and the following disclaimer; 226145SN/A * redistributions in binary form must reproduce the above copyright 236145SN/A * notice, this list of conditions and the following disclaimer in the 246145SN/A * documentation and/or other materials provided with the distribution; 256145SN/A * neither the name of the copyright holders nor the names of its 266145SN/A * contributors may be used to endorse or promote products derived from 276145SN/A * this software without specific prior written permission. 286145SN/A * 296145SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 3010441Snilay@cs.wisc.edu * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 3110441Snilay@cs.wisc.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 326145SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 337055SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 346145SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 356145SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 367039SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 379104SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 3810301Snilay@cs.wisc.edu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 399105SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 408174SN/A * 417039SN/A * Authors: Erik Hallnor 427039SN/A * Dave Greene 437039SN/A * Nathan Binkert 4410970Sdavid.hashe@amd.com * Steve Reinhardt 4510301Snilay@cs.wisc.edu * Ron Dreslinski 4610301Snilay@cs.wisc.edu * Andreas Sandberg 477039SN/A */ 487039SN/A 496145SN/A/** 507039SN/A * @file 517039SN/A * Cache definitions. 527039SN/A */ 536876SN/A 547039SN/A#include "mem/cache/cache.hh" 557039SN/A 566145SN/A#include "base/misc.hh" 577039SN/A#include "base/types.hh" 586145SN/A#include "debug/Cache.hh" 5911049Snilay@cs.wisc.edu#include "debug/CachePort.hh" 6011049Snilay@cs.wisc.edu#include "debug/CacheTags.hh" 6111049Snilay@cs.wisc.edu#include "debug/CacheVerbose.hh" 6211049Snilay@cs.wisc.edu#include "mem/cache/blk.hh" 6311049Snilay@cs.wisc.edu#include "mem/cache/mshr.hh" 6411049Snilay@cs.wisc.edu#include "mem/cache/prefetch/base.hh" 6511049Snilay@cs.wisc.edu#include "sim/sim_exit.hh" 6611049Snilay@cs.wisc.edu 6711049Snilay@cs.wisc.eduCache::Cache(const CacheParams *p) 687039SN/A : BaseCache(p, p->system->cacheLineSize()), 6911025Snilay@cs.wisc.edu tags(p->tags), 706145SN/A prefetcher(p->prefetcher), 717039SN/A doFastWrites(true), 727039SN/A prefetchOnAccess(p->prefetch_on_access), 737039SN/A clusivity(p->clusivity), 7411025Snilay@cs.wisc.edu writebackClean(p->writeback_clean), 756145SN/A tempBlockWriteback(nullptr), 767039SN/A writebackTempBlockAtomicEvent(this, false, 7711025Snilay@cs.wisc.edu EventBase::Delayed_Writeback_Pri) 7810974Sdavid.hashe@amd.com{ 7911025Snilay@cs.wisc.edu tempBlock = new CacheBlk(); 8010974Sdavid.hashe@amd.com tempBlock->data = new uint8_t[blkSize]; 8110974Sdavid.hashe@amd.com 8210974Sdavid.hashe@amd.com cpuSidePort = new CpuSidePort(p->name + ".cpu_side", this, 8311025Snilay@cs.wisc.edu "CpuSidePort"); 848193SN/A memSidePort = new MemSidePort(p->name + ".mem_side", this, 8510974Sdavid.hashe@amd.com "MemSidePort"); 868193SN/A 876145SN/A tags->setCache(this); 887039SN/A if (prefetcher) 8911025Snilay@cs.wisc.edu prefetcher->setCache(this); 906145SN/A} 917039SN/A 9211025Snilay@cs.wisc.eduCache::~Cache() 936145SN/A{ 947039SN/A delete [] tempBlock->data; 9511025Snilay@cs.wisc.edu delete tempBlock; 9611025Snilay@cs.wisc.edu 976145SN/A delete cpuSidePort; 9810969Sdavid.hashe@amd.com delete memSidePort; 9910969Sdavid.hashe@amd.com} 10010969Sdavid.hashe@amd.com 10111049Snilay@cs.wisc.eduvoid 10211049Snilay@cs.wisc.eduCache::regStats() 1036285SN/A{ 1047039SN/A BaseCache::regStats(); 1058683SN/A} 1066145SN/A 1077039SN/Avoid 10811025Snilay@cs.wisc.eduCache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt) 1096145SN/A{ 11011025Snilay@cs.wisc.edu assert(pkt->isRequest()); 11111025Snilay@cs.wisc.edu 11211025Snilay@cs.wisc.edu uint64_t overwrite_val; 1139692SN/A bool overwrite_mem; 1147039SN/A uint64_t condition_val64; 1157055SN/A uint32_t condition_val32; 1167055SN/A 1176145SN/A int offset = tags->extractBlkOffset(pkt->getAddr()); 1189692SN/A uint8_t *blk_data = blk->data + offset; 11911025Snilay@cs.wisc.edu 12011025Snilay@cs.wisc.edu assert(sizeof(uint64_t) >= pkt->getSize()); 1216374SN/A 1229692SN/A overwrite_mem = true; 1239692SN/A // keep a copy of our possible write value, and copy what is at the 1249692SN/A // memory address into the packet 1259692SN/A pkt->writeData((uint8_t *)&overwrite_val); 1269692SN/A pkt->setData(blk_data); 1279692SN/A 1289692SN/A if (pkt->req->isCondSwap()) { 1299692SN/A if (pkt->getSize() == sizeof(uint64_t)) { 1309692SN/A condition_val64 = pkt->req->getExtraData(); 1319692SN/A overwrite_mem = !std::memcmp(&condition_val64, blk_data, 1329104SN/A sizeof(uint64_t)); 1339104SN/A } else if (pkt->getSize() == sizeof(uint32_t)) { 1349104SN/A condition_val32 = (uint32_t)pkt->req->getExtraData(); 1359104SN/A overwrite_mem = !std::memcmp(&condition_val32, blk_data, 1369104SN/A sizeof(uint32_t)); 1379104SN/A } else 1389105SN/A panic("Invalid size for conditional read/write\n"); 1399105SN/A } 1409692SN/A 14110973Sdavid.hashe@amd.com if (overwrite_mem) { 14210973Sdavid.hashe@amd.com std::memcpy(blk_data, &overwrite_val, pkt->getSize()); 14311025Snilay@cs.wisc.edu blk->status |= BlkDirty; 14410973Sdavid.hashe@amd.com } 1457039SN/A} 1467039SN/A 14711049Snilay@cs.wisc.edu 1486145SN/Avoid 1497039SN/ACache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, 1507039SN/A bool deferred_response, bool pending_downgrade) 15111049Snilay@cs.wisc.edu{ 15211049Snilay@cs.wisc.edu assert(pkt->isRequest()); 1536145SN/A 1547039SN/A assert(blk && blk->isValid()); 1557039SN/A // Occasionally this is not true... if we are a lower-level cache 1567039SN/A // satisfying a string of Read and ReadEx requests from 1576145SN/A // upper-level caches, a Read will mark the block as shared but we 1587039SN/A // can satisfy a following ReadEx anyway since we can rely on the 1597039SN/A // Read requester(s) to have buffered the ReadEx snoop and to 1607039SN/A // invalidate their blocks after receiving them. 1616285SN/A // assert(!pkt->needsWritable() || blk->isWritable()); 1627039SN/A assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize); 1637039SN/A 16411025Snilay@cs.wisc.edu // Check RMW operations first since both isRead() and 1657454SN/A // isWrite() will be true for them 1666145SN/A if (pkt->cmd == MemCmd::SwapReq) { 1677039SN/A cmpAndSwap(blk, pkt); 1686145SN/A } else if (pkt->isWrite()) { 1699105SN/A // we have the block in a writable state and can go ahead, 1709105SN/A // note that the line may be also be considered writable in 1719105SN/A // downstream caches along the path to memory, but always 1727039SN/A // Exclusive, and never Modified 1737039SN/A assert(blk->isWritable()); 1747039SN/A // Write or WriteLine at the first cache with block in writable state 1757039SN/A if (blk->checkWrite(pkt)) { 1767564SN/A pkt->writeDataToBlock(blk->data, blkSize); 1779105SN/A } 1786145SN/A // Always mark the line as dirty (and thus transition to the 1796145SN/A // Modified state) even if we are a failed StoreCond so we 1809554SN/A // supply data to any snoops that have appended themselves to 1819554SN/A // this cache before knowing the store will fail. 18210441Snilay@cs.wisc.edu blk->status |= BlkDirty; 183 DPRINTF(CacheVerbose, "%s for %s addr %#llx size %d (write)\n", 184 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 185 } else if (pkt->isRead()) { 186 if (pkt->isLLSC()) { 187 blk->trackLoadLocked(pkt); 188 } 189 190 // all read responses have a data payload 191 assert(pkt->hasRespData()); 192 pkt->setDataFromBlock(blk->data, blkSize); 193 194 // determine if this read is from a (coherent) cache or not 195 if (pkt->fromCache()) { 196 assert(pkt->getSize() == blkSize); 197 // special handling for coherent block requests from 198 // upper-level caches 199 if (pkt->needsWritable()) { 200 // sanity check 201 assert(pkt->cmd == MemCmd::ReadExReq || 202 pkt->cmd == MemCmd::SCUpgradeFailReq); 203 204 // if we have a dirty copy, make sure the recipient 205 // keeps it marked dirty (in the modified state) 206 if (blk->isDirty()) { 207 pkt->setCacheResponding(); 208 } 209 // on ReadExReq we give up our copy unconditionally, 210 // even if this cache is mostly inclusive, we may want 211 // to revisit this 212 invalidateBlock(blk); 213 } else if (blk->isWritable() && !pending_downgrade && 214 !pkt->hasSharers() && 215 pkt->cmd != MemCmd::ReadCleanReq) { 216 // we can give the requester a writable copy on a read 217 // request if: 218 // - we have a writable copy at this level (& below) 219 // - we don't have a pending snoop from below 220 // signaling another read request 221 // - no other cache above has a copy (otherwise it 222 // would have set hasSharers flag when 223 // snooping the packet) 224 // - the read has explicitly asked for a clean 225 // copy of the line 226 if (blk->isDirty()) { 227 // special considerations if we're owner: 228 if (!deferred_response) { 229 // respond with the line in Modified state 230 // (cacheResponding set, hasSharers not set) 231 pkt->setCacheResponding(); 232 233 // if this cache is mostly inclusive, we 234 // keep the block in the Exclusive state, 235 // and pass it upwards as Modified 236 // (writable and dirty), hence we have 237 // multiple caches, all on the same path 238 // towards memory, all considering the 239 // same block writable, but only one 240 // considering it Modified 241 242 // we get away with multiple caches (on 243 // the same path to memory) considering 244 // the block writeable as we always enter 245 // the cache hierarchy through a cache, 246 // and first snoop upwards in all other 247 // branches 248 blk->status &= ~BlkDirty; 249 } else { 250 // if we're responding after our own miss, 251 // there's a window where the recipient didn't 252 // know it was getting ownership and may not 253 // have responded to snoops correctly, so we 254 // have to respond with a shared line 255 pkt->setHasSharers(); 256 } 257 } 258 } else { 259 // otherwise only respond with a shared copy 260 pkt->setHasSharers(); 261 } 262 } 263 } else { 264 // Upgrade or Invalidate 265 assert(pkt->isUpgrade() || pkt->isInvalidate()); 266 267 // for invalidations we could be looking at the temp block 268 // (for upgrades we always allocate) 269 invalidateBlock(blk); 270 DPRINTF(CacheVerbose, "%s for %s addr %#llx size %d (invalidation)\n", 271 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 272 } 273} 274 275///////////////////////////////////////////////////// 276// 277// Access path: requests coming in from the CPU side 278// 279///////////////////////////////////////////////////// 280 281bool 282Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, 283 PacketList &writebacks) 284{ 285 // sanity check 286 assert(pkt->isRequest()); 287 288 chatty_assert(!(isReadOnly && pkt->isWrite()), 289 "Should never see a write in a read-only cache %s\n", 290 name()); 291 292 DPRINTF(CacheVerbose, "%s for %s addr %#llx size %d\n", __func__, 293 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 294 295 if (pkt->req->isUncacheable()) { 296 DPRINTF(Cache, "%s%s addr %#llx uncacheable\n", pkt->cmdString(), 297 pkt->req->isInstFetch() ? " (ifetch)" : "", 298 pkt->getAddr()); 299 300 // flush and invalidate any existing block 301 CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure())); 302 if (old_blk && old_blk->isValid()) { 303 if (old_blk->isDirty() || writebackClean) 304 writebacks.push_back(writebackBlk(old_blk)); 305 else 306 writebacks.push_back(cleanEvictBlk(old_blk)); 307 tags->invalidate(old_blk); 308 old_blk->invalidate(); 309 } 310 311 blk = nullptr; 312 // lookupLatency is the latency in case the request is uncacheable. 313 lat = lookupLatency; 314 return false; 315 } 316 317 ContextID id = pkt->req->hasContextId() ? 318 pkt->req->contextId() : InvalidContextID; 319 // Here lat is the value passed as parameter to accessBlock() function 320 // that can modify its value. 321 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), lat, id); 322 323 DPRINTF(Cache, "%s%s addr %#llx size %d (%s) %s\n", pkt->cmdString(), 324 pkt->req->isInstFetch() ? " (ifetch)" : "", 325 pkt->getAddr(), pkt->getSize(), pkt->isSecure() ? "s" : "ns", 326 blk ? "hit " + blk->print() : "miss"); 327 328 329 if (pkt->isEviction()) { 330 // We check for presence of block in above caches before issuing 331 // Writeback or CleanEvict to write buffer. Therefore the only 332 // possible cases can be of a CleanEvict packet coming from above 333 // encountering a Writeback generated in this cache peer cache and 334 // waiting in the write buffer. Cases of upper level peer caches 335 // generating CleanEvict and Writeback or simply CleanEvict and 336 // CleanEvict almost simultaneously will be caught by snoops sent out 337 // by crossbar. 338 WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(), 339 pkt->isSecure()); 340 if (wb_entry) { 341 assert(wb_entry->getNumTargets() == 1); 342 PacketPtr wbPkt = wb_entry->getTarget()->pkt; 343 assert(wbPkt->isWriteback()); 344 345 if (pkt->isCleanEviction()) { 346 // The CleanEvict and WritebackClean snoops into other 347 // peer caches of the same level while traversing the 348 // crossbar. If a copy of the block is found, the 349 // packet is deleted in the crossbar. Hence, none of 350 // the other upper level caches connected to this 351 // cache have the block, so we can clear the 352 // BLOCK_CACHED flag in the Writeback if set and 353 // discard the CleanEvict by returning true. 354 wbPkt->clearBlockCached(); 355 return true; 356 } else { 357 assert(pkt->cmd == MemCmd::WritebackDirty); 358 // Dirty writeback from above trumps our clean 359 // writeback... discard here 360 // Note: markInService will remove entry from writeback buffer. 361 markInService(wb_entry); 362 delete wbPkt; 363 } 364 } 365 } 366 367 // Writeback handling is special case. We can write the block into 368 // the cache without having a writeable copy (or any copy at all). 369 if (pkt->isWriteback()) { 370 assert(blkSize == pkt->getSize()); 371 372 // we could get a clean writeback while we are having 373 // outstanding accesses to a block, do the simple thing for 374 // now and drop the clean writeback so that we do not upset 375 // any ordering/decisions about ownership already taken 376 if (pkt->cmd == MemCmd::WritebackClean && 377 mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) { 378 DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, " 379 "dropping\n", pkt->getAddr()); 380 return true; 381 } 382 383 if (blk == nullptr) { 384 // need to do a replacement 385 blk = allocateBlock(pkt->getAddr(), pkt->isSecure(), writebacks); 386 if (blk == nullptr) { 387 // no replaceable block available: give up, fwd to next level. 388 incMissCount(pkt); 389 return false; 390 } 391 tags->insertBlock(pkt, blk); 392 393 blk->status = (BlkValid | BlkReadable); 394 if (pkt->isSecure()) { 395 blk->status |= BlkSecure; 396 } 397 } 398 // only mark the block dirty if we got a writeback command, 399 // and leave it as is for a clean writeback 400 if (pkt->cmd == MemCmd::WritebackDirty) { 401 blk->status |= BlkDirty; 402 } 403 // if the packet does not have sharers, it is passing 404 // writable, and we got the writeback in Modified or Exclusive 405 // state, if not we are in the Owned or Shared state 406 if (!pkt->hasSharers()) { 407 blk->status |= BlkWritable; 408 } 409 // nothing else to do; writeback doesn't expect response 410 assert(!pkt->needsResponse()); 411 std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize); 412 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 413 incHitCount(pkt); 414 return true; 415 } else if (pkt->cmd == MemCmd::CleanEvict) { 416 if (blk != nullptr) { 417 // Found the block in the tags, need to stop CleanEvict from 418 // propagating further down the hierarchy. Returning true will 419 // treat the CleanEvict like a satisfied write request and delete 420 // it. 421 return true; 422 } 423 // We didn't find the block here, propagate the CleanEvict further 424 // down the memory hierarchy. Returning false will treat the CleanEvict 425 // like a Writeback which could not find a replaceable block so has to 426 // go to next level. 427 return false; 428 } else if (blk && (pkt->needsWritable() ? blk->isWritable() : 429 blk->isReadable())) { 430 // OK to satisfy access 431 incHitCount(pkt); 432 satisfyRequest(pkt, blk); 433 maintainClusivity(pkt->fromCache(), blk); 434 435 return true; 436 } 437 438 // Can't satisfy access normally... either no block (blk == nullptr) 439 // or have block but need writable 440 441 incMissCount(pkt); 442 443 if (blk == nullptr && pkt->isLLSC() && pkt->isWrite()) { 444 // complete miss on store conditional... just give up now 445 pkt->req->setExtraData(0); 446 return true; 447 } 448 449 return false; 450} 451 452void 453Cache::maintainClusivity(bool from_cache, CacheBlk *blk) 454{ 455 if (from_cache && blk && blk->isValid() && !blk->isDirty() && 456 clusivity == Enums::mostly_excl) { 457 // if we have responded to a cache, and our block is still 458 // valid, but not dirty, and this cache is mostly exclusive 459 // with respect to the cache above, drop the block 460 invalidateBlock(blk); 461 } 462} 463 464void 465Cache::doWritebacks(PacketList& writebacks, Tick forward_time) 466{ 467 while (!writebacks.empty()) { 468 PacketPtr wbPkt = writebacks.front(); 469 // We use forwardLatency here because we are copying writebacks to 470 // write buffer. Call isCachedAbove for both Writebacks and 471 // CleanEvicts. If isCachedAbove returns true we set BLOCK_CACHED flag 472 // in Writebacks and discard CleanEvicts. 473 if (isCachedAbove(wbPkt)) { 474 if (wbPkt->cmd == MemCmd::CleanEvict) { 475 // Delete CleanEvict because cached copies exist above. The 476 // packet destructor will delete the request object because 477 // this is a non-snoop request packet which does not require a 478 // response. 479 delete wbPkt; 480 } else if (wbPkt->cmd == MemCmd::WritebackClean) { 481 // clean writeback, do not send since the block is 482 // still cached above 483 assert(writebackClean); 484 delete wbPkt; 485 } else { 486 assert(wbPkt->cmd == MemCmd::WritebackDirty); 487 // Set BLOCK_CACHED flag in Writeback and send below, so that 488 // the Writeback does not reset the bit corresponding to this 489 // address in the snoop filter below. 490 wbPkt->setBlockCached(); 491 allocateWriteBuffer(wbPkt, forward_time); 492 } 493 } else { 494 // If the block is not cached above, send packet below. Both 495 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will 496 // reset the bit corresponding to this address in the snoop filter 497 // below. 498 allocateWriteBuffer(wbPkt, forward_time); 499 } 500 writebacks.pop_front(); 501 } 502} 503 504void 505Cache::doWritebacksAtomic(PacketList& writebacks) 506{ 507 while (!writebacks.empty()) { 508 PacketPtr wbPkt = writebacks.front(); 509 // Call isCachedAbove for both Writebacks and CleanEvicts. If 510 // isCachedAbove returns true we set BLOCK_CACHED flag in Writebacks 511 // and discard CleanEvicts. 512 if (isCachedAbove(wbPkt, false)) { 513 if (wbPkt->cmd == MemCmd::WritebackDirty) { 514 // Set BLOCK_CACHED flag in Writeback and send below, 515 // so that the Writeback does not reset the bit 516 // corresponding to this address in the snoop filter 517 // below. We can discard CleanEvicts because cached 518 // copies exist above. Atomic mode isCachedAbove 519 // modifies packet to set BLOCK_CACHED flag 520 memSidePort->sendAtomic(wbPkt); 521 } 522 } else { 523 // If the block is not cached above, send packet below. Both 524 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will 525 // reset the bit corresponding to this address in the snoop filter 526 // below. 527 memSidePort->sendAtomic(wbPkt); 528 } 529 writebacks.pop_front(); 530 // In case of CleanEvicts, the packet destructor will delete the 531 // request object because this is a non-snoop request packet which 532 // does not require a response. 533 delete wbPkt; 534 } 535} 536 537 538void 539Cache::recvTimingSnoopResp(PacketPtr pkt) 540{ 541 DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, 542 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 543 544 assert(pkt->isResponse()); 545 assert(!system->bypassCaches()); 546 547 // determine if the response is from a snoop request we created 548 // (in which case it should be in the outstandingSnoop), or if we 549 // merely forwarded someone else's snoop request 550 const bool forwardAsSnoop = outstandingSnoop.find(pkt->req) == 551 outstandingSnoop.end(); 552 553 if (!forwardAsSnoop) { 554 // the packet came from this cache, so sink it here and do not 555 // forward it 556 assert(pkt->cmd == MemCmd::HardPFResp); 557 558 outstandingSnoop.erase(pkt->req); 559 560 DPRINTF(Cache, "Got prefetch response from above for addr " 561 "%#llx (%s)\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns"); 562 recvTimingResp(pkt); 563 return; 564 } 565 566 // forwardLatency is set here because there is a response from an 567 // upper level cache. 568 // To pay the delay that occurs if the packet comes from the bus, 569 // we charge also headerDelay. 570 Tick snoop_resp_time = clockEdge(forwardLatency) + pkt->headerDelay; 571 // Reset the timing of the packet. 572 pkt->headerDelay = pkt->payloadDelay = 0; 573 memSidePort->schedTimingSnoopResp(pkt, snoop_resp_time); 574} 575 576void 577Cache::promoteWholeLineWrites(PacketPtr pkt) 578{ 579 // Cache line clearing instructions 580 if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) && 581 (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0)) { 582 pkt->cmd = MemCmd::WriteLineReq; 583 DPRINTF(Cache, "packet promoted from Write to WriteLineReq\n"); 584 } 585} 586 587bool 588Cache::recvTimingReq(PacketPtr pkt) 589{ 590 DPRINTF(CacheTags, "%s tags: %s\n", __func__, tags->print()); 591 592 assert(pkt->isRequest()); 593 594 // Just forward the packet if caches are disabled. 595 if (system->bypassCaches()) { 596 // @todo This should really enqueue the packet rather 597 bool M5_VAR_USED success = memSidePort->sendTimingReq(pkt); 598 assert(success); 599 return true; 600 } 601 602 promoteWholeLineWrites(pkt); 603 604 if (pkt->cacheResponding()) { 605 // a cache above us (but not where the packet came from) is 606 // responding to the request, in other words it has the line 607 // in Modified or Owned state 608 DPRINTF(Cache, "Cache above responding to %#llx (%s): " 609 "not responding\n", 610 pkt->getAddr(), pkt->isSecure() ? "s" : "ns"); 611 612 // if the packet needs the block to be writable, and the cache 613 // that has promised to respond (setting the cache responding 614 // flag) is not providing writable (it is in Owned rather than 615 // the Modified state), we know that there may be other Shared 616 // copies in the system; go out and invalidate them all 617 assert(pkt->needsWritable() && !pkt->responderHadWritable()); 618 619 // an upstream cache that had the line in Owned state 620 // (dirty, but not writable), is responding and thus 621 // transferring the dirty line from one branch of the 622 // cache hierarchy to another 623 624 // send out an express snoop and invalidate all other 625 // copies (snooping a packet that needs writable is the 626 // same as an invalidation), thus turning the Owned line 627 // into a Modified line, note that we don't invalidate the 628 // block in the current cache or any other cache on the 629 // path to memory 630 631 // create a downstream express snoop with cleared packet 632 // flags, there is no need to allocate any data as the 633 // packet is merely used to co-ordinate state transitions 634 Packet *snoop_pkt = new Packet(pkt, true, false); 635 636 // also reset the bus time that the original packet has 637 // not yet paid for 638 snoop_pkt->headerDelay = snoop_pkt->payloadDelay = 0; 639 640 // make this an instantaneous express snoop, and let the 641 // other caches in the system know that the another cache 642 // is responding, because we have found the authorative 643 // copy (Modified or Owned) that will supply the right 644 // data 645 snoop_pkt->setExpressSnoop(); 646 snoop_pkt->setCacheResponding(); 647 648 // this express snoop travels towards the memory, and at 649 // every crossbar it is snooped upwards thus reaching 650 // every cache in the system 651 bool M5_VAR_USED success = memSidePort->sendTimingReq(snoop_pkt); 652 // express snoops always succeed 653 assert(success); 654 655 // main memory will delete the snoop packet 656 657 // queue for deletion, as opposed to immediate deletion, as 658 // the sending cache is still relying on the packet 659 pendingDelete.reset(pkt); 660 661 // no need to take any further action in this particular cache 662 // as an upstram cache has already committed to responding, 663 // and we have already sent out any express snoops in the 664 // section above to ensure all other copies in the system are 665 // invalidated 666 return true; 667 } 668 669 // anything that is merely forwarded pays for the forward latency and 670 // the delay provided by the crossbar 671 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 672 673 // We use lookupLatency here because it is used to specify the latency 674 // to access. 675 Cycles lat = lookupLatency; 676 CacheBlk *blk = nullptr; 677 bool satisfied = false; 678 { 679 PacketList writebacks; 680 // Note that lat is passed by reference here. The function 681 // access() calls accessBlock() which can modify lat value. 682 satisfied = access(pkt, blk, lat, writebacks); 683 684 // copy writebacks to write buffer here to ensure they logically 685 // proceed anything happening below 686 doWritebacks(writebacks, forward_time); 687 } 688 689 // Here we charge the headerDelay that takes into account the latencies 690 // of the bus, if the packet comes from it. 691 // The latency charged it is just lat that is the value of lookupLatency 692 // modified by access() function, or if not just lookupLatency. 693 // In case of a hit we are neglecting response latency. 694 // In case of a miss we are neglecting forward latency. 695 Tick request_time = clockEdge(lat) + pkt->headerDelay; 696 // Here we reset the timing of the packet. 697 pkt->headerDelay = pkt->payloadDelay = 0; 698 699 // track time of availability of next prefetch, if any 700 Tick next_pf_time = MaxTick; 701 702 bool needsResponse = pkt->needsResponse(); 703 704 if (satisfied) { 705 // should never be satisfying an uncacheable access as we 706 // flush and invalidate any existing block as part of the 707 // lookup 708 assert(!pkt->req->isUncacheable()); 709 710 // hit (for all other request types) 711 712 if (prefetcher && (prefetchOnAccess || 713 (blk && blk->wasPrefetched()))) { 714 if (blk) 715 blk->status &= ~BlkHWPrefetched; 716 717 // Don't notify on SWPrefetch 718 if (!pkt->cmd.isSWPrefetch()) 719 next_pf_time = prefetcher->notify(pkt); 720 } 721 722 if (needsResponse) { 723 pkt->makeTimingResponse(); 724 // @todo: Make someone pay for this 725 pkt->headerDelay = pkt->payloadDelay = 0; 726 727 // In this case we are considering request_time that takes 728 // into account the delay of the xbar, if any, and just 729 // lat, neglecting responseLatency, modelling hit latency 730 // just as lookupLatency or or the value of lat overriden 731 // by access(), that calls accessBlock() function. 732 cpuSidePort->schedTimingResp(pkt, request_time, true); 733 } else { 734 DPRINTF(Cache, "%s satisfied %s addr %#llx, no response needed\n", 735 __func__, pkt->cmdString(), pkt->getAddr()); 736 737 // queue the packet for deletion, as the sending cache is 738 // still relying on it; if the block is found in access(), 739 // CleanEvict and Writeback messages will be deleted 740 // here as well 741 pendingDelete.reset(pkt); 742 } 743 } else { 744 // miss 745 746 Addr blk_addr = blockAlign(pkt->getAddr()); 747 748 // ignore any existing MSHR if we are dealing with an 749 // uncacheable request 750 MSHR *mshr = pkt->req->isUncacheable() ? nullptr : 751 mshrQueue.findMatch(blk_addr, pkt->isSecure()); 752 753 // Software prefetch handling: 754 // To keep the core from waiting on data it won't look at 755 // anyway, send back a response with dummy data. Miss handling 756 // will continue asynchronously. Unfortunately, the core will 757 // insist upon freeing original Packet/Request, so we have to 758 // create a new pair with a different lifecycle. Note that this 759 // processing happens before any MSHR munging on the behalf of 760 // this request because this new Request will be the one stored 761 // into the MSHRs, not the original. 762 if (pkt->cmd.isSWPrefetch()) { 763 assert(needsResponse); 764 assert(pkt->req->hasPaddr()); 765 assert(!pkt->req->isUncacheable()); 766 767 // There's no reason to add a prefetch as an additional target 768 // to an existing MSHR. If an outstanding request is already 769 // in progress, there is nothing for the prefetch to do. 770 // If this is the case, we don't even create a request at all. 771 PacketPtr pf = nullptr; 772 773 if (!mshr) { 774 // copy the request and create a new SoftPFReq packet 775 RequestPtr req = new Request(pkt->req->getPaddr(), 776 pkt->req->getSize(), 777 pkt->req->getFlags(), 778 pkt->req->masterId()); 779 pf = new Packet(req, pkt->cmd); 780 pf->allocate(); 781 assert(pf->getAddr() == pkt->getAddr()); 782 assert(pf->getSize() == pkt->getSize()); 783 } 784 785 pkt->makeTimingResponse(); 786 787 // request_time is used here, taking into account lat and the delay 788 // charged if the packet comes from the xbar. 789 cpuSidePort->schedTimingResp(pkt, request_time, true); 790 791 // If an outstanding request is in progress (we found an 792 // MSHR) this is set to null 793 pkt = pf; 794 } 795 796 if (mshr) { 797 /// MSHR hit 798 /// @note writebacks will be checked in getNextMSHR() 799 /// for any conflicting requests to the same block 800 801 //@todo remove hw_pf here 802 803 // Coalesce unless it was a software prefetch (see above). 804 if (pkt) { 805 assert(!pkt->isWriteback()); 806 // CleanEvicts corresponding to blocks which have 807 // outstanding requests in MSHRs are simply sunk here 808 if (pkt->cmd == MemCmd::CleanEvict) { 809 pendingDelete.reset(pkt); 810 } else { 811 DPRINTF(Cache, "%s coalescing MSHR for %s addr %#llx " 812 "size %d\n", __func__, pkt->cmdString(), 813 pkt->getAddr(), pkt->getSize()); 814 815 assert(pkt->req->masterId() < system->maxMasters()); 816 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++; 817 // We use forward_time here because it is the same 818 // considering new targets. We have multiple 819 // requests for the same address here. It 820 // specifies the latency to allocate an internal 821 // buffer and to schedule an event to the queued 822 // port and also takes into account the additional 823 // delay of the xbar. 824 mshr->allocateTarget(pkt, forward_time, order++, 825 allocOnFill(pkt->cmd)); 826 if (mshr->getNumTargets() == numTarget) { 827 noTargetMSHR = mshr; 828 setBlocked(Blocked_NoTargets); 829 // need to be careful with this... if this mshr isn't 830 // ready yet (i.e. time > curTick()), we don't want to 831 // move it ahead of mshrs that are ready 832 // mshrQueue.moveToFront(mshr); 833 } 834 } 835 // We should call the prefetcher reguardless if the request is 836 // satisfied or not, reguardless if the request is in the MSHR 837 // or not. The request could be a ReadReq hit, but still not 838 // satisfied (potentially because of a prior write to the same 839 // cache line. So, even when not satisfied, tehre is an MSHR 840 // already allocated for this, we need to let the prefetcher 841 // know about the request 842 if (prefetcher) { 843 // Don't notify on SWPrefetch 844 if (!pkt->cmd.isSWPrefetch()) 845 next_pf_time = prefetcher->notify(pkt); 846 } 847 } 848 } else { 849 // no MSHR 850 assert(pkt->req->masterId() < system->maxMasters()); 851 if (pkt->req->isUncacheable()) { 852 mshr_uncacheable[pkt->cmdToIndex()][pkt->req->masterId()]++; 853 } else { 854 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 855 } 856 857 if (pkt->isEviction() || 858 (pkt->req->isUncacheable() && pkt->isWrite())) { 859 // We use forward_time here because there is an 860 // uncached memory write, forwarded to WriteBuffer. 861 allocateWriteBuffer(pkt, forward_time); 862 } else { 863 if (blk && blk->isValid()) { 864 // should have flushed and have no valid block 865 assert(!pkt->req->isUncacheable()); 866 867 // If we have a write miss to a valid block, we 868 // need to mark the block non-readable. Otherwise 869 // if we allow reads while there's an outstanding 870 // write miss, the read could return stale data 871 // out of the cache block... a more aggressive 872 // system could detect the overlap (if any) and 873 // forward data out of the MSHRs, but we don't do 874 // that yet. Note that we do need to leave the 875 // block valid so that it stays in the cache, in 876 // case we get an upgrade response (and hence no 877 // new data) when the write miss completes. 878 // As long as CPUs do proper store/load forwarding 879 // internally, and have a sufficiently weak memory 880 // model, this is probably unnecessary, but at some 881 // point it must have seemed like we needed it... 882 assert(pkt->needsWritable()); 883 assert(!blk->isWritable()); 884 blk->status &= ~BlkReadable; 885 } 886 // Here we are using forward_time, modelling the latency of 887 // a miss (outbound) just as forwardLatency, neglecting the 888 // lookupLatency component. 889 allocateMissBuffer(pkt, forward_time); 890 } 891 892 if (prefetcher) { 893 // Don't notify on SWPrefetch 894 if (!pkt->cmd.isSWPrefetch()) 895 next_pf_time = prefetcher->notify(pkt); 896 } 897 } 898 } 899 900 if (next_pf_time != MaxTick) 901 schedMemSideSendEvent(next_pf_time); 902 903 return true; 904} 905 906PacketPtr 907Cache::createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk, 908 bool needsWritable) const 909{ 910 // should never see evictions here 911 assert(!cpu_pkt->isEviction()); 912 913 bool blkValid = blk && blk->isValid(); 914 915 if (cpu_pkt->req->isUncacheable() || 916 (!blkValid && cpu_pkt->isUpgrade())) { 917 // uncacheable requests and upgrades from upper-level caches 918 // that missed completely just go through as is 919 return nullptr; 920 } 921 922 assert(cpu_pkt->needsResponse()); 923 924 MemCmd cmd; 925 // @TODO make useUpgrades a parameter. 926 // Note that ownership protocols require upgrade, otherwise a 927 // write miss on a shared owned block will generate a ReadExcl, 928 // which will clobber the owned copy. 929 const bool useUpgrades = true; 930 if (blkValid && useUpgrades) { 931 // only reason to be here is that blk is read only and we need 932 // it to be writable 933 assert(needsWritable); 934 assert(!blk->isWritable()); 935 cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq; 936 } else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq || 937 cpu_pkt->cmd == MemCmd::StoreCondFailReq) { 938 // Even though this SC will fail, we still need to send out the 939 // request and get the data to supply it to other snoopers in the case 940 // where the determination the StoreCond fails is delayed due to 941 // all caches not being on the same local bus. 942 cmd = MemCmd::SCUpgradeFailReq; 943 } else if (cpu_pkt->cmd == MemCmd::WriteLineReq || 944 cpu_pkt->cmd == MemCmd::InvalidateReq) { 945 // forward as invalidate to all other caches, this gives us 946 // the line in Exclusive state, and invalidates all other 947 // copies 948 cmd = MemCmd::InvalidateReq; 949 } else { 950 // block is invalid 951 cmd = needsWritable ? MemCmd::ReadExReq : 952 (isReadOnly ? MemCmd::ReadCleanReq : MemCmd::ReadSharedReq); 953 } 954 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize); 955 956 // if there are upstream caches that have already marked the 957 // packet as having sharers (not passing writable), pass that info 958 // downstream 959 if (cpu_pkt->hasSharers()) { 960 // note that cpu_pkt may have spent a considerable time in the 961 // MSHR queue and that the information could possibly be out 962 // of date, however, there is no harm in conservatively 963 // assuming the block has sharers 964 pkt->setHasSharers(); 965 DPRINTF(Cache, "%s passing hasSharers from %s to %s addr %#llx " 966 "size %d\n", 967 __func__, cpu_pkt->cmdString(), pkt->cmdString(), 968 pkt->getAddr(), pkt->getSize()); 969 } 970 971 // the packet should be block aligned 972 assert(pkt->getAddr() == blockAlign(pkt->getAddr())); 973 974 pkt->allocate(); 975 DPRINTF(Cache, "%s created %s from %s for addr %#llx size %d\n", 976 __func__, pkt->cmdString(), cpu_pkt->cmdString(), pkt->getAddr(), 977 pkt->getSize()); 978 return pkt; 979} 980 981 982Tick 983Cache::recvAtomic(PacketPtr pkt) 984{ 985 // We are in atomic mode so we pay just for lookupLatency here. 986 Cycles lat = lookupLatency; 987 988 // Forward the request if the system is in cache bypass mode. 989 if (system->bypassCaches()) 990 return ticksToCycles(memSidePort->sendAtomic(pkt)); 991 992 promoteWholeLineWrites(pkt); 993 994 // follow the same flow as in recvTimingReq, and check if a cache 995 // above us is responding 996 if (pkt->cacheResponding()) { 997 DPRINTF(Cache, "Cache above responding to %#llx (%s): " 998 "not responding\n", 999 pkt->getAddr(), pkt->isSecure() ? "s" : "ns"); 1000 1001 // if a cache is responding, and it had the line in Owned 1002 // rather than Modified state, we need to invalidate any 1003 // copies that are not on the same path to memory 1004 assert(pkt->needsWritable() && !pkt->responderHadWritable()); 1005 lat += ticksToCycles(memSidePort->sendAtomic(pkt)); 1006 1007 return lat * clockPeriod(); 1008 } 1009 1010 // should assert here that there are no outstanding MSHRs or 1011 // writebacks... that would mean that someone used an atomic 1012 // access in timing mode 1013 1014 CacheBlk *blk = nullptr; 1015 PacketList writebacks; 1016 bool satisfied = access(pkt, blk, lat, writebacks); 1017 1018 // handle writebacks resulting from the access here to ensure they 1019 // logically proceed anything happening below 1020 doWritebacksAtomic(writebacks); 1021 1022 if (!satisfied) { 1023 // MISS 1024 1025 // deal with the packets that go through the write path of 1026 // the cache, i.e. any evictions and uncacheable writes 1027 if (pkt->isEviction() || 1028 (pkt->req->isUncacheable() && pkt->isWrite())) { 1029 lat += ticksToCycles(memSidePort->sendAtomic(pkt)); 1030 return lat * clockPeriod(); 1031 } 1032 // only misses left 1033 1034 PacketPtr bus_pkt = createMissPacket(pkt, blk, pkt->needsWritable()); 1035 1036 bool is_forward = (bus_pkt == nullptr); 1037 1038 if (is_forward) { 1039 // just forwarding the same request to the next level 1040 // no local cache operation involved 1041 bus_pkt = pkt; 1042 } 1043 1044 DPRINTF(Cache, "Sending an atomic %s for %#llx (%s)\n", 1045 bus_pkt->cmdString(), bus_pkt->getAddr(), 1046 bus_pkt->isSecure() ? "s" : "ns"); 1047 1048#if TRACING_ON 1049 CacheBlk::State old_state = blk ? blk->status : 0; 1050#endif 1051 1052 lat += ticksToCycles(memSidePort->sendAtomic(bus_pkt)); 1053 1054 bool is_invalidate = bus_pkt->isInvalidate(); 1055 1056 // We are now dealing with the response handling 1057 DPRINTF(Cache, "Receive response: %s for addr %#llx (%s) in " 1058 "state %i\n", bus_pkt->cmdString(), bus_pkt->getAddr(), 1059 bus_pkt->isSecure() ? "s" : "ns", 1060 old_state); 1061 1062 // If packet was a forward, the response (if any) is already 1063 // in place in the bus_pkt == pkt structure, so we don't need 1064 // to do anything. Otherwise, use the separate bus_pkt to 1065 // generate response to pkt and then delete it. 1066 if (!is_forward) { 1067 if (pkt->needsResponse()) { 1068 assert(bus_pkt->isResponse()); 1069 if (bus_pkt->isError()) { 1070 pkt->makeAtomicResponse(); 1071 pkt->copyError(bus_pkt); 1072 } else if (pkt->cmd == MemCmd::WriteLineReq) { 1073 // note the use of pkt, not bus_pkt here. 1074 1075 // write-line request to the cache that promoted 1076 // the write to a whole line 1077 blk = handleFill(pkt, blk, writebacks, 1078 allocOnFill(pkt->cmd)); 1079 assert(blk != NULL); 1080 is_invalidate = false; 1081 satisfyRequest(pkt, blk); 1082 } else if (bus_pkt->isRead() || 1083 bus_pkt->cmd == MemCmd::UpgradeResp) { 1084 // we're updating cache state to allow us to 1085 // satisfy the upstream request from the cache 1086 blk = handleFill(bus_pkt, blk, writebacks, 1087 allocOnFill(pkt->cmd)); 1088 satisfyRequest(pkt, blk); 1089 maintainClusivity(pkt->fromCache(), blk); 1090 } else { 1091 // we're satisfying the upstream request without 1092 // modifying cache state, e.g., a write-through 1093 pkt->makeAtomicResponse(); 1094 } 1095 } 1096 delete bus_pkt; 1097 } 1098 1099 if (is_invalidate && blk && blk->isValid()) { 1100 invalidateBlock(blk); 1101 } 1102 } 1103 1104 // Note that we don't invoke the prefetcher at all in atomic mode. 1105 // It's not clear how to do it properly, particularly for 1106 // prefetchers that aggressively generate prefetch candidates and 1107 // rely on bandwidth contention to throttle them; these will tend 1108 // to pollute the cache in atomic mode since there is no bandwidth 1109 // contention. If we ever do want to enable prefetching in atomic 1110 // mode, though, this is the place to do it... see timingAccess() 1111 // for an example (though we'd want to issue the prefetch(es) 1112 // immediately rather than calling requestMemSideBus() as we do 1113 // there). 1114 1115 // do any writebacks resulting from the response handling 1116 doWritebacksAtomic(writebacks); 1117 1118 // if we used temp block, check to see if its valid and if so 1119 // clear it out, but only do so after the call to recvAtomic is 1120 // finished so that any downstream observers (such as a snoop 1121 // filter), first see the fill, and only then see the eviction 1122 if (blk == tempBlock && tempBlock->isValid()) { 1123 // the atomic CPU calls recvAtomic for fetch and load/store 1124 // sequentuially, and we may already have a tempBlock 1125 // writeback from the fetch that we have not yet sent 1126 if (tempBlockWriteback) { 1127 // if that is the case, write the prevoius one back, and 1128 // do not schedule any new event 1129 writebackTempBlockAtomic(); 1130 } else { 1131 // the writeback/clean eviction happens after the call to 1132 // recvAtomic has finished (but before any successive 1133 // calls), so that the response handling from the fill is 1134 // allowed to happen first 1135 schedule(writebackTempBlockAtomicEvent, curTick()); 1136 } 1137 1138 tempBlockWriteback = (blk->isDirty() || writebackClean) ? 1139 writebackBlk(blk) : cleanEvictBlk(blk); 1140 blk->invalidate(); 1141 } 1142 1143 if (pkt->needsResponse()) { 1144 pkt->makeAtomicResponse(); 1145 } 1146 1147 return lat * clockPeriod(); 1148} 1149 1150 1151void 1152Cache::functionalAccess(PacketPtr pkt, bool fromCpuSide) 1153{ 1154 if (system->bypassCaches()) { 1155 // Packets from the memory side are snoop request and 1156 // shouldn't happen in bypass mode. 1157 assert(fromCpuSide); 1158 1159 // The cache should be flushed if we are in cache bypass mode, 1160 // so we don't need to check if we need to update anything. 1161 memSidePort->sendFunctional(pkt); 1162 return; 1163 } 1164 1165 Addr blk_addr = blockAlign(pkt->getAddr()); 1166 bool is_secure = pkt->isSecure(); 1167 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 1168 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 1169 1170 pkt->pushLabel(name()); 1171 1172 CacheBlkPrintWrapper cbpw(blk); 1173 1174 // Note that just because an L2/L3 has valid data doesn't mean an 1175 // L1 doesn't have a more up-to-date modified copy that still 1176 // needs to be found. As a result we always update the request if 1177 // we have it, but only declare it satisfied if we are the owner. 1178 1179 // see if we have data at all (owned or otherwise) 1180 bool have_data = blk && blk->isValid() 1181 && pkt->checkFunctional(&cbpw, blk_addr, is_secure, blkSize, 1182 blk->data); 1183 1184 // data we have is dirty if marked as such or if we have an 1185 // in-service MSHR that is pending a modified line 1186 bool have_dirty = 1187 have_data && (blk->isDirty() || 1188 (mshr && mshr->inService && mshr->isPendingModified())); 1189 1190 bool done = have_dirty 1191 || cpuSidePort->checkFunctional(pkt) 1192 || mshrQueue.checkFunctional(pkt, blk_addr) 1193 || writeBuffer.checkFunctional(pkt, blk_addr) 1194 || memSidePort->checkFunctional(pkt); 1195 1196 DPRINTF(CacheVerbose, "functional %s %#llx (%s) %s%s%s\n", 1197 pkt->cmdString(), pkt->getAddr(), is_secure ? "s" : "ns", 1198 (blk && blk->isValid()) ? "valid " : "", 1199 have_data ? "data " : "", done ? "done " : ""); 1200 1201 // We're leaving the cache, so pop cache->name() label 1202 pkt->popLabel(); 1203 1204 if (done) { 1205 pkt->makeResponse(); 1206 } else { 1207 // if it came as a request from the CPU side then make sure it 1208 // continues towards the memory side 1209 if (fromCpuSide) { 1210 memSidePort->sendFunctional(pkt); 1211 } else if (cpuSidePort->isSnooping()) { 1212 // if it came from the memory side, it must be a snoop request 1213 // and we should only forward it if we are forwarding snoops 1214 cpuSidePort->sendFunctionalSnoop(pkt); 1215 } 1216 } 1217} 1218 1219 1220///////////////////////////////////////////////////// 1221// 1222// Response handling: responses from the memory side 1223// 1224///////////////////////////////////////////////////// 1225 1226 1227void 1228Cache::handleUncacheableWriteResp(PacketPtr pkt) 1229{ 1230 Tick completion_time = clockEdge(responseLatency) + 1231 pkt->headerDelay + pkt->payloadDelay; 1232 1233 // Reset the bus additional time as it is now accounted for 1234 pkt->headerDelay = pkt->payloadDelay = 0; 1235 1236 cpuSidePort->schedTimingResp(pkt, completion_time, true); 1237} 1238 1239void 1240Cache::recvTimingResp(PacketPtr pkt) 1241{ 1242 assert(pkt->isResponse()); 1243 1244 // all header delay should be paid for by the crossbar, unless 1245 // this is a prefetch response from above 1246 panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp, 1247 "%s saw a non-zero packet delay\n", name()); 1248 1249 bool is_error = pkt->isError(); 1250 1251 if (is_error) { 1252 DPRINTF(Cache, "Cache received packet with error for addr %#llx (%s), " 1253 "cmd: %s\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns", 1254 pkt->cmdString()); 1255 } 1256 1257 DPRINTF(Cache, "Handling response %s for addr %#llx size %d (%s)\n", 1258 pkt->cmdString(), pkt->getAddr(), pkt->getSize(), 1259 pkt->isSecure() ? "s" : "ns"); 1260 1261 // if this is a write, we should be looking at an uncacheable 1262 // write 1263 if (pkt->isWrite()) { 1264 assert(pkt->req->isUncacheable()); 1265 handleUncacheableWriteResp(pkt); 1266 return; 1267 } 1268 1269 // we have dealt with any (uncacheable) writes above, from here on 1270 // we know we are dealing with an MSHR due to a miss or a prefetch 1271 MSHR *mshr = dynamic_cast<MSHR*>(pkt->popSenderState()); 1272 assert(mshr); 1273 1274 if (mshr == noTargetMSHR) { 1275 // we always clear at least one target 1276 clearBlocked(Blocked_NoTargets); 1277 noTargetMSHR = nullptr; 1278 } 1279 1280 // Initial target is used just for stats 1281 MSHR::Target *initial_tgt = mshr->getTarget(); 1282 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex(); 1283 Tick miss_latency = curTick() - initial_tgt->recvTime; 1284 1285 if (pkt->req->isUncacheable()) { 1286 assert(pkt->req->masterId() < system->maxMasters()); 1287 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] += 1288 miss_latency; 1289 } else { 1290 assert(pkt->req->masterId() < system->maxMasters()); 1291 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] += 1292 miss_latency; 1293 } 1294 1295 bool wasFull = mshrQueue.isFull(); 1296 1297 PacketList writebacks; 1298 1299 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 1300 1301 // upgrade deferred targets if the response has no sharers, and is 1302 // thus passing writable 1303 if (!pkt->hasSharers()) { 1304 mshr->promoteWritable(); 1305 } 1306 1307 bool is_fill = !mshr->isForward && 1308 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp); 1309 1310 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 1311 1312 if (is_fill && !is_error) { 1313 DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n", 1314 pkt->getAddr()); 1315 1316 blk = handleFill(pkt, blk, writebacks, mshr->allocOnFill); 1317 assert(blk != nullptr); 1318 } 1319 1320 // allow invalidation responses originating from write-line 1321 // requests to be discarded 1322 bool is_invalidate = pkt->isInvalidate(); 1323 1324 // First offset for critical word first calculations 1325 int initial_offset = initial_tgt->pkt->getOffset(blkSize); 1326 1327 bool from_cache = false; 1328 1329 while (mshr->hasTargets()) { 1330 MSHR::Target *target = mshr->getTarget(); 1331 Packet *tgt_pkt = target->pkt; 1332 1333 switch (target->source) { 1334 case MSHR::Target::FromCPU: 1335 Tick completion_time; 1336 // Here we charge on completion_time the delay of the xbar if the 1337 // packet comes from it, charged on headerDelay. 1338 completion_time = pkt->headerDelay; 1339 1340 // Software prefetch handling for cache closest to core 1341 if (tgt_pkt->cmd.isSWPrefetch()) { 1342 // a software prefetch would have already been ack'd 1343 // immediately with dummy data so the core would be able to 1344 // retire it. This request completes right here, so we 1345 // deallocate it. 1346 delete tgt_pkt->req; 1347 delete tgt_pkt; 1348 break; // skip response 1349 } 1350 1351 // keep track of whether we have responded to another 1352 // cache 1353 from_cache = from_cache || tgt_pkt->fromCache(); 1354 1355 // unlike the other packet flows, where data is found in other 1356 // caches or memory and brought back, write-line requests always 1357 // have the data right away, so the above check for "is fill?" 1358 // cannot actually be determined until examining the stored MSHR 1359 // state. We "catch up" with that logic here, which is duplicated 1360 // from above. 1361 if (tgt_pkt->cmd == MemCmd::WriteLineReq) { 1362 assert(!is_error); 1363 // we got the block in a writable state, so promote 1364 // any deferred targets if possible 1365 mshr->promoteWritable(); 1366 // NB: we use the original packet here and not the response! 1367 blk = handleFill(tgt_pkt, blk, writebacks, mshr->allocOnFill); 1368 assert(blk != nullptr); 1369 1370 // treat as a fill, and discard the invalidation 1371 // response 1372 is_fill = true; 1373 is_invalidate = false; 1374 } 1375 1376 if (is_fill) { 1377 satisfyRequest(tgt_pkt, blk, true, mshr->hasPostDowngrade()); 1378 1379 // How many bytes past the first request is this one 1380 int transfer_offset = 1381 tgt_pkt->getOffset(blkSize) - initial_offset; 1382 if (transfer_offset < 0) { 1383 transfer_offset += blkSize; 1384 } 1385 1386 // If not critical word (offset) return payloadDelay. 1387 // responseLatency is the latency of the return path 1388 // from lower level caches/memory to an upper level cache or 1389 // the core. 1390 completion_time += clockEdge(responseLatency) + 1391 (transfer_offset ? pkt->payloadDelay : 0); 1392 1393 assert(!tgt_pkt->req->isUncacheable()); 1394 1395 assert(tgt_pkt->req->masterId() < system->maxMasters()); 1396 missLatency[tgt_pkt->cmdToIndex()][tgt_pkt->req->masterId()] += 1397 completion_time - target->recvTime; 1398 } else if (pkt->cmd == MemCmd::UpgradeFailResp) { 1399 // failed StoreCond upgrade 1400 assert(tgt_pkt->cmd == MemCmd::StoreCondReq || 1401 tgt_pkt->cmd == MemCmd::StoreCondFailReq || 1402 tgt_pkt->cmd == MemCmd::SCUpgradeFailReq); 1403 // responseLatency is the latency of the return path 1404 // from lower level caches/memory to an upper level cache or 1405 // the core. 1406 completion_time += clockEdge(responseLatency) + 1407 pkt->payloadDelay; 1408 tgt_pkt->req->setExtraData(0); 1409 } else { 1410 // not a cache fill, just forwarding response 1411 // responseLatency is the latency of the return path 1412 // from lower level cahces/memory to the core. 1413 completion_time += clockEdge(responseLatency) + 1414 pkt->payloadDelay; 1415 if (pkt->isRead() && !is_error) { 1416 // sanity check 1417 assert(pkt->getAddr() == tgt_pkt->getAddr()); 1418 assert(pkt->getSize() >= tgt_pkt->getSize()); 1419 1420 tgt_pkt->setData(pkt->getConstPtr<uint8_t>()); 1421 } 1422 } 1423 tgt_pkt->makeTimingResponse(); 1424 // if this packet is an error copy that to the new packet 1425 if (is_error) 1426 tgt_pkt->copyError(pkt); 1427 if (tgt_pkt->cmd == MemCmd::ReadResp && 1428 (is_invalidate || mshr->hasPostInvalidate())) { 1429 // If intermediate cache got ReadRespWithInvalidate, 1430 // propagate that. Response should not have 1431 // isInvalidate() set otherwise. 1432 tgt_pkt->cmd = MemCmd::ReadRespWithInvalidate; 1433 DPRINTF(Cache, "%s updated cmd to %s for addr %#llx\n", 1434 __func__, tgt_pkt->cmdString(), tgt_pkt->getAddr()); 1435 } 1436 // Reset the bus additional time as it is now accounted for 1437 tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0; 1438 cpuSidePort->schedTimingResp(tgt_pkt, completion_time, true); 1439 break; 1440 1441 case MSHR::Target::FromPrefetcher: 1442 assert(tgt_pkt->cmd == MemCmd::HardPFReq); 1443 if (blk) 1444 blk->status |= BlkHWPrefetched; 1445 delete tgt_pkt->req; 1446 delete tgt_pkt; 1447 break; 1448 1449 case MSHR::Target::FromSnoop: 1450 // I don't believe that a snoop can be in an error state 1451 assert(!is_error); 1452 // response to snoop request 1453 DPRINTF(Cache, "processing deferred snoop...\n"); 1454 assert(!(is_invalidate && !mshr->hasPostInvalidate())); 1455 handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate()); 1456 break; 1457 1458 default: 1459 panic("Illegal target->source enum %d\n", target->source); 1460 } 1461 1462 mshr->popTarget(); 1463 } 1464 1465 maintainClusivity(from_cache, blk); 1466 1467 if (blk && blk->isValid()) { 1468 // an invalidate response stemming from a write line request 1469 // should not invalidate the block, so check if the 1470 // invalidation should be discarded 1471 if (is_invalidate || mshr->hasPostInvalidate()) { 1472 invalidateBlock(blk); 1473 } else if (mshr->hasPostDowngrade()) { 1474 blk->status &= ~BlkWritable; 1475 } 1476 } 1477 1478 if (mshr->promoteDeferredTargets()) { 1479 // avoid later read getting stale data while write miss is 1480 // outstanding.. see comment in timingAccess() 1481 if (blk) { 1482 blk->status &= ~BlkReadable; 1483 } 1484 mshrQueue.markPending(mshr); 1485 schedMemSideSendEvent(clockEdge() + pkt->payloadDelay); 1486 } else { 1487 mshrQueue.deallocate(mshr); 1488 if (wasFull && !mshrQueue.isFull()) { 1489 clearBlocked(Blocked_NoMSHRs); 1490 } 1491 1492 // Request the bus for a prefetch if this deallocation freed enough 1493 // MSHRs for a prefetch to take place 1494 if (prefetcher && mshrQueue.canPrefetch()) { 1495 Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(), 1496 clockEdge()); 1497 if (next_pf_time != MaxTick) 1498 schedMemSideSendEvent(next_pf_time); 1499 } 1500 } 1501 // reset the xbar additional timinig as it is now accounted for 1502 pkt->headerDelay = pkt->payloadDelay = 0; 1503 1504 // copy writebacks to write buffer 1505 doWritebacks(writebacks, forward_time); 1506 1507 // if we used temp block, check to see if its valid and then clear it out 1508 if (blk == tempBlock && tempBlock->isValid()) { 1509 // We use forwardLatency here because we are copying 1510 // Writebacks/CleanEvicts to write buffer. It specifies the latency to 1511 // allocate an internal buffer and to schedule an event to the 1512 // queued port. 1513 if (blk->isDirty() || writebackClean) { 1514 PacketPtr wbPkt = writebackBlk(blk); 1515 allocateWriteBuffer(wbPkt, forward_time); 1516 // Set BLOCK_CACHED flag if cached above. 1517 if (isCachedAbove(wbPkt)) 1518 wbPkt->setBlockCached(); 1519 } else { 1520 PacketPtr wcPkt = cleanEvictBlk(blk); 1521 // Check to see if block is cached above. If not allocate 1522 // write buffer 1523 if (isCachedAbove(wcPkt)) 1524 delete wcPkt; 1525 else 1526 allocateWriteBuffer(wcPkt, forward_time); 1527 } 1528 blk->invalidate(); 1529 } 1530 1531 DPRINTF(CacheVerbose, "Leaving %s with %s for addr %#llx\n", __func__, 1532 pkt->cmdString(), pkt->getAddr()); 1533 delete pkt; 1534} 1535 1536PacketPtr 1537Cache::writebackBlk(CacheBlk *blk) 1538{ 1539 chatty_assert(!isReadOnly || writebackClean, 1540 "Writeback from read-only cache"); 1541 assert(blk && blk->isValid() && (blk->isDirty() || writebackClean)); 1542 1543 writebacks[Request::wbMasterId]++; 1544 1545 Request *req = new Request(tags->regenerateBlkAddr(blk->tag, blk->set), 1546 blkSize, 0, Request::wbMasterId); 1547 if (blk->isSecure()) 1548 req->setFlags(Request::SECURE); 1549 1550 req->taskId(blk->task_id); 1551 blk->task_id= ContextSwitchTaskId::Unknown; 1552 blk->tickInserted = curTick(); 1553 1554 PacketPtr pkt = 1555 new Packet(req, blk->isDirty() ? 1556 MemCmd::WritebackDirty : MemCmd::WritebackClean); 1557 1558 DPRINTF(Cache, "Create Writeback %#llx writable: %d, dirty: %d\n", 1559 pkt->getAddr(), blk->isWritable(), blk->isDirty()); 1560 1561 if (blk->isWritable()) { 1562 // not asserting shared means we pass the block in modified 1563 // state, mark our own block non-writeable 1564 blk->status &= ~BlkWritable; 1565 } else { 1566 // we are in the Owned state, tell the receiver 1567 pkt->setHasSharers(); 1568 } 1569 1570 // make sure the block is not marked dirty 1571 blk->status &= ~BlkDirty; 1572 1573 pkt->allocate(); 1574 std::memcpy(pkt->getPtr<uint8_t>(), blk->data, blkSize); 1575 1576 return pkt; 1577} 1578 1579PacketPtr 1580Cache::cleanEvictBlk(CacheBlk *blk) 1581{ 1582 assert(!writebackClean); 1583 assert(blk && blk->isValid() && !blk->isDirty()); 1584 // Creating a zero sized write, a message to the snoop filter 1585 Request *req = 1586 new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0, 1587 Request::wbMasterId); 1588 if (blk->isSecure()) 1589 req->setFlags(Request::SECURE); 1590 1591 req->taskId(blk->task_id); 1592 blk->task_id = ContextSwitchTaskId::Unknown; 1593 blk->tickInserted = curTick(); 1594 1595 PacketPtr pkt = new Packet(req, MemCmd::CleanEvict); 1596 pkt->allocate(); 1597 DPRINTF(Cache, "%s%s %x Create CleanEvict\n", pkt->cmdString(), 1598 pkt->req->isInstFetch() ? " (ifetch)" : "", 1599 pkt->getAddr()); 1600 1601 return pkt; 1602} 1603 1604void 1605Cache::memWriteback() 1606{ 1607 CacheBlkVisitorWrapper visitor(*this, &Cache::writebackVisitor); 1608 tags->forEachBlk(visitor); 1609} 1610 1611void 1612Cache::memInvalidate() 1613{ 1614 CacheBlkVisitorWrapper visitor(*this, &Cache::invalidateVisitor); 1615 tags->forEachBlk(visitor); 1616} 1617 1618bool 1619Cache::isDirty() const 1620{ 1621 CacheBlkIsDirtyVisitor visitor; 1622 tags->forEachBlk(visitor); 1623 1624 return visitor.isDirty(); 1625} 1626 1627bool 1628Cache::writebackVisitor(CacheBlk &blk) 1629{ 1630 if (blk.isDirty()) { 1631 assert(blk.isValid()); 1632 1633 Request request(tags->regenerateBlkAddr(blk.tag, blk.set), 1634 blkSize, 0, Request::funcMasterId); 1635 request.taskId(blk.task_id); 1636 1637 Packet packet(&request, MemCmd::WriteReq); 1638 packet.dataStatic(blk.data); 1639 1640 memSidePort->sendFunctional(&packet); 1641 1642 blk.status &= ~BlkDirty; 1643 } 1644 1645 return true; 1646} 1647 1648bool 1649Cache::invalidateVisitor(CacheBlk &blk) 1650{ 1651 1652 if (blk.isDirty()) 1653 warn_once("Invalidating dirty cache lines. Expect things to break.\n"); 1654 1655 if (blk.isValid()) { 1656 assert(!blk.isDirty()); 1657 tags->invalidate(&blk); 1658 blk.invalidate(); 1659 } 1660 1661 return true; 1662} 1663 1664CacheBlk* 1665Cache::allocateBlock(Addr addr, bool is_secure, PacketList &writebacks) 1666{ 1667 CacheBlk *blk = tags->findVictim(addr); 1668 1669 // It is valid to return nullptr if there is no victim 1670 if (!blk) 1671 return nullptr; 1672 1673 if (blk->isValid()) { 1674 Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set); 1675 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure()); 1676 if (repl_mshr) { 1677 // must be an outstanding upgrade request 1678 // on a block we're about to replace... 1679 assert(!blk->isWritable() || blk->isDirty()); 1680 assert(repl_mshr->needsWritable()); 1681 // too hard to replace block with transient state 1682 // allocation failed, block not inserted 1683 return nullptr; 1684 } else { 1685 DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx " 1686 "(%s): %s\n", repl_addr, blk->isSecure() ? "s" : "ns", 1687 addr, is_secure ? "s" : "ns", 1688 blk->isDirty() ? "writeback" : "clean"); 1689 1690 if (blk->wasPrefetched()) { 1691 unusedPrefetches++; 1692 } 1693 // Will send up Writeback/CleanEvict snoops via isCachedAbove 1694 // when pushing this writeback list into the write buffer. 1695 if (blk->isDirty() || writebackClean) { 1696 // Save writeback packet for handling by caller 1697 writebacks.push_back(writebackBlk(blk)); 1698 } else { 1699 writebacks.push_back(cleanEvictBlk(blk)); 1700 } 1701 } 1702 } 1703 1704 return blk; 1705} 1706 1707void 1708Cache::invalidateBlock(CacheBlk *blk) 1709{ 1710 if (blk != tempBlock) 1711 tags->invalidate(blk); 1712 blk->invalidate(); 1713} 1714 1715// Note that the reason we return a list of writebacks rather than 1716// inserting them directly in the write buffer is that this function 1717// is called by both atomic and timing-mode accesses, and in atomic 1718// mode we don't mess with the write buffer (we just perform the 1719// writebacks atomically once the original request is complete). 1720CacheBlk* 1721Cache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks, 1722 bool allocate) 1723{ 1724 assert(pkt->isResponse() || pkt->cmd == MemCmd::WriteLineReq); 1725 Addr addr = pkt->getAddr(); 1726 bool is_secure = pkt->isSecure(); 1727#if TRACING_ON 1728 CacheBlk::State old_state = blk ? blk->status : 0; 1729#endif 1730 1731 // When handling a fill, we should have no writes to this line. 1732 assert(addr == blockAlign(addr)); 1733 assert(!writeBuffer.findMatch(addr, is_secure)); 1734 1735 if (blk == nullptr) { 1736 // better have read new data... 1737 assert(pkt->hasData()); 1738 1739 // only read responses and write-line requests have data; 1740 // note that we don't write the data here for write-line - that 1741 // happens in the subsequent call to satisfyRequest 1742 assert(pkt->isRead() || pkt->cmd == MemCmd::WriteLineReq); 1743 1744 // need to do a replacement if allocating, otherwise we stick 1745 // with the temporary storage 1746 blk = allocate ? allocateBlock(addr, is_secure, writebacks) : nullptr; 1747 1748 if (blk == nullptr) { 1749 // No replaceable block or a mostly exclusive 1750 // cache... just use temporary storage to complete the 1751 // current request and then get rid of it 1752 assert(!tempBlock->isValid()); 1753 blk = tempBlock; 1754 tempBlock->set = tags->extractSet(addr); 1755 tempBlock->tag = tags->extractTag(addr); 1756 // @todo: set security state as well... 1757 DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr, 1758 is_secure ? "s" : "ns"); 1759 } else { 1760 tags->insertBlock(pkt, blk); 1761 } 1762 1763 // we should never be overwriting a valid block 1764 assert(!blk->isValid()); 1765 } else { 1766 // existing block... probably an upgrade 1767 assert(blk->tag == tags->extractTag(addr)); 1768 // either we're getting new data or the block should already be valid 1769 assert(pkt->hasData() || blk->isValid()); 1770 // don't clear block status... if block is already dirty we 1771 // don't want to lose that 1772 } 1773 1774 if (is_secure) 1775 blk->status |= BlkSecure; 1776 blk->status |= BlkValid | BlkReadable; 1777 1778 // sanity check for whole-line writes, which should always be 1779 // marked as writable as part of the fill, and then later marked 1780 // dirty as part of satisfyRequest 1781 if (pkt->cmd == MemCmd::WriteLineReq) { 1782 assert(!pkt->hasSharers()); 1783 // at the moment other caches do not respond to the 1784 // invalidation requests corresponding to a whole-line write 1785 assert(!pkt->cacheResponding()); 1786 } 1787 1788 // here we deal with setting the appropriate state of the line, 1789 // and we start by looking at the hasSharers flag, and ignore the 1790 // cacheResponding flag (normally signalling dirty data) if the 1791 // packet has sharers, thus the line is never allocated as Owned 1792 // (dirty but not writable), and always ends up being either 1793 // Shared, Exclusive or Modified, see Packet::setCacheResponding 1794 // for more details 1795 if (!pkt->hasSharers()) { 1796 // we could get a writable line from memory (rather than a 1797 // cache) even in a read-only cache, note that we set this bit 1798 // even for a read-only cache, possibly revisit this decision 1799 blk->status |= BlkWritable; 1800 1801 // check if we got this via cache-to-cache transfer (i.e., from a 1802 // cache that had the block in Modified or Owned state) 1803 if (pkt->cacheResponding()) { 1804 // we got the block in Modified state, and invalidated the 1805 // owners copy 1806 blk->status |= BlkDirty; 1807 1808 chatty_assert(!isReadOnly, "Should never see dirty snoop response " 1809 "in read-only cache %s\n", name()); 1810 } 1811 } 1812 1813 DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n", 1814 addr, is_secure ? "s" : "ns", old_state, blk->print()); 1815 1816 // if we got new data, copy it in (checking for a read response 1817 // and a response that has data is the same in the end) 1818 if (pkt->isRead()) { 1819 // sanity checks 1820 assert(pkt->hasData()); 1821 assert(pkt->getSize() == blkSize); 1822 1823 std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize); 1824 } 1825 // We pay for fillLatency here. 1826 blk->whenReady = clockEdge() + fillLatency * clockPeriod() + 1827 pkt->payloadDelay; 1828 1829 return blk; 1830} 1831 1832 1833///////////////////////////////////////////////////// 1834// 1835// Snoop path: requests coming in from the memory side 1836// 1837///////////////////////////////////////////////////// 1838 1839void 1840Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data, 1841 bool already_copied, bool pending_inval) 1842{ 1843 // sanity check 1844 assert(req_pkt->isRequest()); 1845 assert(req_pkt->needsResponse()); 1846 1847 DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, 1848 req_pkt->cmdString(), req_pkt->getAddr(), req_pkt->getSize()); 1849 // timing-mode snoop responses require a new packet, unless we 1850 // already made a copy... 1851 PacketPtr pkt = req_pkt; 1852 if (!already_copied) 1853 // do not clear flags, and allocate space for data if the 1854 // packet needs it (the only packets that carry data are read 1855 // responses) 1856 pkt = new Packet(req_pkt, false, req_pkt->isRead()); 1857 1858 assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() || 1859 pkt->hasSharers()); 1860 pkt->makeTimingResponse(); 1861 if (pkt->isRead()) { 1862 pkt->setDataFromBlock(blk_data, blkSize); 1863 } 1864 if (pkt->cmd == MemCmd::ReadResp && pending_inval) { 1865 // Assume we defer a response to a read from a far-away cache 1866 // A, then later defer a ReadExcl from a cache B on the same 1867 // bus as us. We'll assert cacheResponding in both cases, but 1868 // in the latter case cacheResponding will keep the 1869 // invalidation from reaching cache A. This special response 1870 // tells cache A that it gets the block to satisfy its read, 1871 // but must immediately invalidate it. 1872 pkt->cmd = MemCmd::ReadRespWithInvalidate; 1873 } 1874 // Here we consider forward_time, paying for just forward latency and 1875 // also charging the delay provided by the xbar. 1876 // forward_time is used as send_time in next allocateWriteBuffer(). 1877 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 1878 // Here we reset the timing of the packet. 1879 pkt->headerDelay = pkt->payloadDelay = 0; 1880 DPRINTF(CacheVerbose, 1881 "%s created response: %s addr %#llx size %d tick: %lu\n", 1882 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize(), 1883 forward_time); 1884 memSidePort->schedTimingSnoopResp(pkt, forward_time, true); 1885} 1886 1887uint32_t 1888Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing, 1889 bool is_deferred, bool pending_inval) 1890{ 1891 DPRINTF(CacheVerbose, "%s for %s addr %#llx size %d\n", __func__, 1892 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 1893 // deferred snoops can only happen in timing mode 1894 assert(!(is_deferred && !is_timing)); 1895 // pending_inval only makes sense on deferred snoops 1896 assert(!(pending_inval && !is_deferred)); 1897 assert(pkt->isRequest()); 1898 1899 // the packet may get modified if we or a forwarded snooper 1900 // responds in atomic mode, so remember a few things about the 1901 // original packet up front 1902 bool invalidate = pkt->isInvalidate(); 1903 bool M5_VAR_USED needs_writable = pkt->needsWritable(); 1904 1905 // at the moment we could get an uncacheable write which does not 1906 // have the invalidate flag, and we need a suitable way of dealing 1907 // with this case 1908 panic_if(invalidate && pkt->req->isUncacheable(), 1909 "%s got an invalidating uncacheable snoop request %s to %#llx", 1910 name(), pkt->cmdString(), pkt->getAddr()); 1911 1912 uint32_t snoop_delay = 0; 1913 1914 if (forwardSnoops) { 1915 // first propagate snoop upward to see if anyone above us wants to 1916 // handle it. save & restore packet src since it will get 1917 // rewritten to be relative to cpu-side bus (if any) 1918 bool alreadyResponded = pkt->cacheResponding(); 1919 if (is_timing) { 1920 // copy the packet so that we can clear any flags before 1921 // forwarding it upwards, we also allocate data (passing 1922 // the pointer along in case of static data), in case 1923 // there is a snoop hit in upper levels 1924 Packet snoopPkt(pkt, true, true); 1925 snoopPkt.setExpressSnoop(); 1926 // the snoop packet does not need to wait any additional 1927 // time 1928 snoopPkt.headerDelay = snoopPkt.payloadDelay = 0; 1929 cpuSidePort->sendTimingSnoopReq(&snoopPkt); 1930 1931 // add the header delay (including crossbar and snoop 1932 // delays) of the upward snoop to the snoop delay for this 1933 // cache 1934 snoop_delay += snoopPkt.headerDelay; 1935 1936 if (snoopPkt.cacheResponding()) { 1937 // cache-to-cache response from some upper cache 1938 assert(!alreadyResponded); 1939 pkt->setCacheResponding(); 1940 } 1941 // upstream cache has the block, or has an outstanding 1942 // MSHR, pass the flag on 1943 if (snoopPkt.hasSharers()) { 1944 pkt->setHasSharers(); 1945 } 1946 // If this request is a prefetch or clean evict and an upper level 1947 // signals block present, make sure to propagate the block 1948 // presence to the requester. 1949 if (snoopPkt.isBlockCached()) { 1950 pkt->setBlockCached(); 1951 } 1952 } else { 1953 cpuSidePort->sendAtomicSnoop(pkt); 1954 if (!alreadyResponded && pkt->cacheResponding()) { 1955 // cache-to-cache response from some upper cache: 1956 // forward response to original requester 1957 assert(pkt->isResponse()); 1958 } 1959 } 1960 } 1961 1962 if (!blk || !blk->isValid()) { 1963 if (is_deferred) { 1964 // we no longer have the block, and will not respond, but a 1965 // packet was allocated in MSHR::handleSnoop and we have 1966 // to delete it 1967 assert(pkt->needsResponse()); 1968 1969 // we have passed the block to a cache upstream, that 1970 // cache should be responding 1971 assert(pkt->cacheResponding()); 1972 1973 delete pkt; 1974 } 1975 1976 DPRINTF(CacheVerbose, "%s snoop miss for %s addr %#llx size %d\n", 1977 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 1978 return snoop_delay; 1979 } else { 1980 DPRINTF(Cache, "%s snoop hit for %s addr %#llx size %d, " 1981 "old state is %s\n", __func__, pkt->cmdString(), 1982 pkt->getAddr(), pkt->getSize(), blk->print()); 1983 } 1984 1985 chatty_assert(!(isReadOnly && blk->isDirty()), 1986 "Should never have a dirty block in a read-only cache %s\n", 1987 name()); 1988 1989 // We may end up modifying both the block state and the packet (if 1990 // we respond in atomic mode), so just figure out what to do now 1991 // and then do it later. If we find dirty data while snooping for 1992 // an invalidate, we don't need to send a response. The 1993 // invalidation itself is taken care of below. 1994 bool respond = blk->isDirty() && pkt->needsResponse() && 1995 pkt->cmd != MemCmd::InvalidateReq; 1996 bool have_writable = blk->isWritable(); 1997 1998 // Invalidate any prefetch's from below that would strip write permissions 1999 // MemCmd::HardPFReq is only observed by upstream caches. After missing 2000 // above and in it's own cache, a new MemCmd::ReadReq is created that 2001 // downstream caches observe. 2002 if (pkt->mustCheckAbove()) { 2003 DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s " 2004 "from lower cache\n", pkt->getAddr(), pkt->cmdString()); 2005 pkt->setBlockCached(); 2006 return snoop_delay; 2007 } 2008 2009 if (pkt->isRead() && !invalidate) { 2010 // reading without requiring the line in a writable state 2011 assert(!needs_writable); 2012 pkt->setHasSharers(); 2013 2014 // if the requesting packet is uncacheable, retain the line in 2015 // the current state, otherwhise unset the writable flag, 2016 // which means we go from Modified to Owned (and will respond 2017 // below), remain in Owned (and will respond below), from 2018 // Exclusive to Shared, or remain in Shared 2019 if (!pkt->req->isUncacheable()) 2020 blk->status &= ~BlkWritable; 2021 } 2022 2023 if (respond) { 2024 // prevent anyone else from responding, cache as well as 2025 // memory, and also prevent any memory from even seeing the 2026 // request 2027 pkt->setCacheResponding(); 2028 if (have_writable) { 2029 // inform the cache hierarchy that this cache had the line 2030 // in the Modified state so that we avoid unnecessary 2031 // invalidations (see Packet::setResponderHadWritable) 2032 pkt->setResponderHadWritable(); 2033 2034 // in the case of an uncacheable request there is no point 2035 // in setting the responderHadWritable flag, but since the 2036 // recipient does not care there is no harm in doing so 2037 } else { 2038 // if the packet has needsWritable set we invalidate our 2039 // copy below and all other copies will be invalidates 2040 // through express snoops, and if needsWritable is not set 2041 // we already called setHasSharers above 2042 } 2043 2044 // if we are returning a writable and dirty (Modified) line, 2045 // we should be invalidating the line 2046 panic_if(!invalidate && !pkt->hasSharers(), 2047 "%s is passing a Modified line through %s to %#llx, " 2048 "but keeping the block", 2049 name(), pkt->cmdString(), pkt->getAddr()); 2050 2051 if (is_timing) { 2052 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval); 2053 } else { 2054 pkt->makeAtomicResponse(); 2055 // packets such as upgrades do not actually have any data 2056 // payload 2057 if (pkt->hasData()) 2058 pkt->setDataFromBlock(blk->data, blkSize); 2059 } 2060 } 2061 2062 if (!respond && is_timing && is_deferred) { 2063 // if it's a deferred timing snoop to which we are not 2064 // responding, then we've made a copy of both the request and 2065 // the packet, delete them here 2066 assert(pkt->needsResponse()); 2067 assert(!pkt->cacheResponding()); 2068 delete pkt->req; 2069 delete pkt; 2070 } 2071 2072 // Do this last in case it deallocates block data or something 2073 // like that 2074 if (invalidate) { 2075 invalidateBlock(blk); 2076 } 2077 2078 DPRINTF(Cache, "new state is %s\n", blk->print()); 2079 2080 return snoop_delay; 2081} 2082 2083 2084void 2085Cache::recvTimingSnoopReq(PacketPtr pkt) 2086{ 2087 DPRINTF(CacheVerbose, "%s for %s addr %#llx size %d\n", __func__, 2088 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 2089 2090 // Snoops shouldn't happen when bypassing caches 2091 assert(!system->bypassCaches()); 2092 2093 // no need to snoop requests that are not in range 2094 if (!inRange(pkt->getAddr())) { 2095 return; 2096 } 2097 2098 bool is_secure = pkt->isSecure(); 2099 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 2100 2101 Addr blk_addr = blockAlign(pkt->getAddr()); 2102 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 2103 2104 // Update the latency cost of the snoop so that the crossbar can 2105 // account for it. Do not overwrite what other neighbouring caches 2106 // have already done, rather take the maximum. The update is 2107 // tentative, for cases where we return before an upward snoop 2108 // happens below. 2109 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, 2110 lookupLatency * clockPeriod()); 2111 2112 // Inform request(Prefetch, CleanEvict or Writeback) from below of 2113 // MSHR hit, set setBlockCached. 2114 if (mshr && pkt->mustCheckAbove()) { 2115 DPRINTF(Cache, "Setting block cached for %s from" 2116 "lower cache on mshr hit %#x\n", 2117 pkt->cmdString(), pkt->getAddr()); 2118 pkt->setBlockCached(); 2119 return; 2120 } 2121 2122 // Let the MSHR itself track the snoop and decide whether we want 2123 // to go ahead and do the regular cache snoop 2124 if (mshr && mshr->handleSnoop(pkt, order++)) { 2125 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)." 2126 "mshrs: %s\n", blk_addr, is_secure ? "s" : "ns", 2127 mshr->print()); 2128 2129 if (mshr->getNumTargets() > numTarget) 2130 warn("allocating bonus target for snoop"); //handle later 2131 return; 2132 } 2133 2134 //We also need to check the writeback buffers and handle those 2135 WriteQueueEntry *wb_entry = writeBuffer.findMatch(blk_addr, is_secure); 2136 if (wb_entry) { 2137 DPRINTF(Cache, "Snoop hit in writeback to addr %#llx (%s)\n", 2138 pkt->getAddr(), is_secure ? "s" : "ns"); 2139 // Expect to see only Writebacks and/or CleanEvicts here, both of 2140 // which should not be generated for uncacheable data. 2141 assert(!wb_entry->isUncacheable()); 2142 // There should only be a single request responsible for generating 2143 // Writebacks/CleanEvicts. 2144 assert(wb_entry->getNumTargets() == 1); 2145 PacketPtr wb_pkt = wb_entry->getTarget()->pkt; 2146 assert(wb_pkt->isEviction()); 2147 2148 if (pkt->isEviction()) { 2149 // if the block is found in the write queue, set the BLOCK_CACHED 2150 // flag for Writeback/CleanEvict snoop. On return the snoop will 2151 // propagate the BLOCK_CACHED flag in Writeback packets and prevent 2152 // any CleanEvicts from travelling down the memory hierarchy. 2153 pkt->setBlockCached(); 2154 DPRINTF(Cache, "Squashing %s from lower cache on writequeue hit" 2155 " %#x\n", pkt->cmdString(), pkt->getAddr()); 2156 return; 2157 } 2158 2159 // conceptually writebacks are no different to other blocks in 2160 // this cache, so the behaviour is modelled after handleSnoop, 2161 // the difference being that instead of querying the block 2162 // state to determine if it is dirty and writable, we use the 2163 // command and fields of the writeback packet 2164 bool respond = wb_pkt->cmd == MemCmd::WritebackDirty && 2165 pkt->needsResponse() && pkt->cmd != MemCmd::InvalidateReq; 2166 bool have_writable = !wb_pkt->hasSharers(); 2167 bool invalidate = pkt->isInvalidate(); 2168 2169 if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) { 2170 assert(!pkt->needsWritable()); 2171 pkt->setHasSharers(); 2172 wb_pkt->setHasSharers(); 2173 } 2174 2175 if (respond) { 2176 pkt->setCacheResponding(); 2177 2178 if (have_writable) { 2179 pkt->setResponderHadWritable(); 2180 } 2181 2182 doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(), 2183 false, false); 2184 } 2185 2186 if (invalidate) { 2187 // Invalidation trumps our writeback... discard here 2188 // Note: markInService will remove entry from writeback buffer. 2189 markInService(wb_entry); 2190 delete wb_pkt; 2191 } 2192 } 2193 2194 // If this was a shared writeback, there may still be 2195 // other shared copies above that require invalidation. 2196 // We could be more selective and return here if the 2197 // request is non-exclusive or if the writeback is 2198 // exclusive. 2199 uint32_t snoop_delay = handleSnoop(pkt, blk, true, false, false); 2200 2201 // Override what we did when we first saw the snoop, as we now 2202 // also have the cost of the upwards snoops to account for 2203 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, snoop_delay + 2204 lookupLatency * clockPeriod()); 2205} 2206 2207bool 2208Cache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt) 2209{ 2210 // Express snoop responses from master to slave, e.g., from L1 to L2 2211 cache->recvTimingSnoopResp(pkt); 2212 return true; 2213} 2214 2215Tick 2216Cache::recvAtomicSnoop(PacketPtr pkt) 2217{ 2218 // Snoops shouldn't happen when bypassing caches 2219 assert(!system->bypassCaches()); 2220 2221 // no need to snoop requests that are not in range. 2222 if (!inRange(pkt->getAddr())) { 2223 return 0; 2224 } 2225 2226 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 2227 uint32_t snoop_delay = handleSnoop(pkt, blk, false, false, false); 2228 return snoop_delay + lookupLatency * clockPeriod(); 2229} 2230 2231 2232QueueEntry* 2233Cache::getNextQueueEntry() 2234{ 2235 // Check both MSHR queue and write buffer for potential requests, 2236 // note that null does not mean there is no request, it could 2237 // simply be that it is not ready 2238 MSHR *miss_mshr = mshrQueue.getNext(); 2239 WriteQueueEntry *wq_entry = writeBuffer.getNext(); 2240 2241 // If we got a write buffer request ready, first priority is a 2242 // full write buffer, otherwise we favour the miss requests 2243 if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) { 2244 // need to search MSHR queue for conflicting earlier miss. 2245 MSHR *conflict_mshr = 2246 mshrQueue.findPending(wq_entry->blkAddr, 2247 wq_entry->isSecure); 2248 2249 if (conflict_mshr && conflict_mshr->order < wq_entry->order) { 2250 // Service misses in order until conflict is cleared. 2251 return conflict_mshr; 2252 2253 // @todo Note that we ignore the ready time of the conflict here 2254 } 2255 2256 // No conflicts; issue write 2257 return wq_entry; 2258 } else if (miss_mshr) { 2259 // need to check for conflicting earlier writeback 2260 WriteQueueEntry *conflict_mshr = 2261 writeBuffer.findPending(miss_mshr->blkAddr, 2262 miss_mshr->isSecure); 2263 if (conflict_mshr) { 2264 // not sure why we don't check order here... it was in the 2265 // original code but commented out. 2266 2267 // The only way this happens is if we are 2268 // doing a write and we didn't have permissions 2269 // then subsequently saw a writeback (owned got evicted) 2270 // We need to make sure to perform the writeback first 2271 // To preserve the dirty data, then we can issue the write 2272 2273 // should we return wq_entry here instead? I.e. do we 2274 // have to flush writes in order? I don't think so... not 2275 // for Alpha anyway. Maybe for x86? 2276 return conflict_mshr; 2277 2278 // @todo Note that we ignore the ready time of the conflict here 2279 } 2280 2281 // No conflicts; issue read 2282 return miss_mshr; 2283 } 2284 2285 // fall through... no pending requests. Try a prefetch. 2286 assert(!miss_mshr && !wq_entry); 2287 if (prefetcher && mshrQueue.canPrefetch()) { 2288 // If we have a miss queue slot, we can try a prefetch 2289 PacketPtr pkt = prefetcher->getPacket(); 2290 if (pkt) { 2291 Addr pf_addr = blockAlign(pkt->getAddr()); 2292 if (!tags->findBlock(pf_addr, pkt->isSecure()) && 2293 !mshrQueue.findMatch(pf_addr, pkt->isSecure()) && 2294 !writeBuffer.findMatch(pf_addr, pkt->isSecure())) { 2295 // Update statistic on number of prefetches issued 2296 // (hwpf_mshr_misses) 2297 assert(pkt->req->masterId() < system->maxMasters()); 2298 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 2299 2300 // allocate an MSHR and return it, note 2301 // that we send the packet straight away, so do not 2302 // schedule the send 2303 return allocateMissBuffer(pkt, curTick(), false); 2304 } else { 2305 // free the request and packet 2306 delete pkt->req; 2307 delete pkt; 2308 } 2309 } 2310 } 2311 2312 return nullptr; 2313} 2314 2315bool 2316Cache::isCachedAbove(PacketPtr pkt, bool is_timing) const 2317{ 2318 if (!forwardSnoops) 2319 return false; 2320 // Mirroring the flow of HardPFReqs, the cache sends CleanEvict and 2321 // Writeback snoops into upper level caches to check for copies of the 2322 // same block. Using the BLOCK_CACHED flag with the Writeback/CleanEvict 2323 // packet, the cache can inform the crossbar below of presence or absence 2324 // of the block. 2325 if (is_timing) { 2326 Packet snoop_pkt(pkt, true, false); 2327 snoop_pkt.setExpressSnoop(); 2328 // Assert that packet is either Writeback or CleanEvict and not a 2329 // prefetch request because prefetch requests need an MSHR and may 2330 // generate a snoop response. 2331 assert(pkt->isEviction()); 2332 snoop_pkt.senderState = nullptr; 2333 cpuSidePort->sendTimingSnoopReq(&snoop_pkt); 2334 // Writeback/CleanEvict snoops do not generate a snoop response. 2335 assert(!(snoop_pkt.cacheResponding())); 2336 return snoop_pkt.isBlockCached(); 2337 } else { 2338 cpuSidePort->sendAtomicSnoop(pkt); 2339 return pkt->isBlockCached(); 2340 } 2341} 2342 2343Tick 2344Cache::nextQueueReadyTime() const 2345{ 2346 Tick nextReady = std::min(mshrQueue.nextReadyTime(), 2347 writeBuffer.nextReadyTime()); 2348 2349 // Don't signal prefetch ready time if no MSHRs available 2350 // Will signal once enoguh MSHRs are deallocated 2351 if (prefetcher && mshrQueue.canPrefetch()) { 2352 nextReady = std::min(nextReady, 2353 prefetcher->nextPrefetchReadyTime()); 2354 } 2355 2356 return nextReady; 2357} 2358 2359bool 2360Cache::sendMSHRQueuePacket(MSHR* mshr) 2361{ 2362 assert(mshr); 2363 2364 // use request from 1st target 2365 PacketPtr tgt_pkt = mshr->getTarget()->pkt; 2366 2367 DPRINTF(Cache, "%s MSHR %s for addr %#llx size %d\n", __func__, 2368 tgt_pkt->cmdString(), tgt_pkt->getAddr(), 2369 tgt_pkt->getSize()); 2370 2371 CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure); 2372 2373 if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) { 2374 // we should never have hardware prefetches to allocated 2375 // blocks 2376 assert(blk == nullptr); 2377 2378 // We need to check the caches above us to verify that 2379 // they don't have a copy of this block in the dirty state 2380 // at the moment. Without this check we could get a stale 2381 // copy from memory that might get used in place of the 2382 // dirty one. 2383 Packet snoop_pkt(tgt_pkt, true, false); 2384 snoop_pkt.setExpressSnoop(); 2385 // We are sending this packet upwards, but if it hits we will 2386 // get a snoop response that we end up treating just like a 2387 // normal response, hence it needs the MSHR as its sender 2388 // state 2389 snoop_pkt.senderState = mshr; 2390 cpuSidePort->sendTimingSnoopReq(&snoop_pkt); 2391 2392 // Check to see if the prefetch was squashed by an upper cache (to 2393 // prevent us from grabbing the line) or if a Check to see if a 2394 // writeback arrived between the time the prefetch was placed in 2395 // the MSHRs and when it was selected to be sent or if the 2396 // prefetch was squashed by an upper cache. 2397 2398 // It is important to check cacheResponding before 2399 // prefetchSquashed. If another cache has committed to 2400 // responding, it will be sending a dirty response which will 2401 // arrive at the MSHR allocated for this request. Checking the 2402 // prefetchSquash first may result in the MSHR being 2403 // prematurely deallocated. 2404 if (snoop_pkt.cacheResponding()) { 2405 auto M5_VAR_USED r = outstandingSnoop.insert(snoop_pkt.req); 2406 assert(r.second); 2407 2408 // if we are getting a snoop response with no sharers it 2409 // will be allocated as Modified 2410 bool pending_modified_resp = !snoop_pkt.hasSharers(); 2411 markInService(mshr, pending_modified_resp); 2412 2413 DPRINTF(Cache, "Upward snoop of prefetch for addr" 2414 " %#x (%s) hit\n", 2415 tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns"); 2416 return false; 2417 } 2418 2419 if (snoop_pkt.isBlockCached()) { 2420 DPRINTF(Cache, "Block present, prefetch squashed by cache. " 2421 "Deallocating mshr target %#x.\n", 2422 mshr->blkAddr); 2423 2424 // Deallocate the mshr target 2425 if (mshrQueue.forceDeallocateTarget(mshr)) { 2426 // Clear block if this deallocation resulted freed an 2427 // mshr when all had previously been utilized 2428 clearBlocked(Blocked_NoMSHRs); 2429 } 2430 return false; 2431 } 2432 } 2433 2434 // either a prefetch that is not present upstream, or a normal 2435 // MSHR request, proceed to get the packet to send downstream 2436 PacketPtr pkt = createMissPacket(tgt_pkt, blk, mshr->needsWritable()); 2437 2438 mshr->isForward = (pkt == nullptr); 2439 2440 if (mshr->isForward) { 2441 // not a cache block request, but a response is expected 2442 // make copy of current packet to forward, keep current 2443 // copy for response handling 2444 pkt = new Packet(tgt_pkt, false, true); 2445 assert(!pkt->isWrite()); 2446 } 2447 2448 // play it safe and append (rather than set) the sender state, 2449 // as forwarded packets may already have existing state 2450 pkt->pushSenderState(mshr); 2451 2452 if (!memSidePort->sendTimingReq(pkt)) { 2453 // we are awaiting a retry, but we 2454 // delete the packet and will be creating a new packet 2455 // when we get the opportunity 2456 delete pkt; 2457 2458 // note that we have now masked any requestBus and 2459 // schedSendEvent (we will wait for a retry before 2460 // doing anything), and this is so even if we do not 2461 // care about this packet and might override it before 2462 // it gets retried 2463 return true; 2464 } else { 2465 // As part of the call to sendTimingReq the packet is 2466 // forwarded to all neighbouring caches (and any caches 2467 // above them) as a snoop. Thus at this point we know if 2468 // any of the neighbouring caches are responding, and if 2469 // so, we know it is dirty, and we can determine if it is 2470 // being passed as Modified, making our MSHR the ordering 2471 // point 2472 bool pending_modified_resp = !pkt->hasSharers() && 2473 pkt->cacheResponding(); 2474 markInService(mshr, pending_modified_resp); 2475 return false; 2476 } 2477} 2478 2479bool 2480Cache::sendWriteQueuePacket(WriteQueueEntry* wq_entry) 2481{ 2482 assert(wq_entry); 2483 2484 // always a single target for write queue entries 2485 PacketPtr tgt_pkt = wq_entry->getTarget()->pkt; 2486 2487 DPRINTF(Cache, "%s write %s for addr %#llx size %d\n", __func__, 2488 tgt_pkt->cmdString(), tgt_pkt->getAddr(), 2489 tgt_pkt->getSize()); 2490 2491 // forward as is, both for evictions and uncacheable writes 2492 if (!memSidePort->sendTimingReq(tgt_pkt)) { 2493 // note that we have now masked any requestBus and 2494 // schedSendEvent (we will wait for a retry before 2495 // doing anything), and this is so even if we do not 2496 // care about this packet and might override it before 2497 // it gets retried 2498 return true; 2499 } else { 2500 markInService(wq_entry); 2501 return false; 2502 } 2503} 2504 2505void 2506Cache::serialize(CheckpointOut &cp) const 2507{ 2508 bool dirty(isDirty()); 2509 2510 if (dirty) { 2511 warn("*** The cache still contains dirty data. ***\n"); 2512 warn(" Make sure to drain the system using the correct flags.\n"); 2513 warn(" This checkpoint will not restore correctly and dirty data " 2514 " in the cache will be lost!\n"); 2515 } 2516 2517 // Since we don't checkpoint the data in the cache, any dirty data 2518 // will be lost when restoring from a checkpoint of a system that 2519 // wasn't drained properly. Flag the checkpoint as invalid if the 2520 // cache contains dirty data. 2521 bool bad_checkpoint(dirty); 2522 SERIALIZE_SCALAR(bad_checkpoint); 2523} 2524 2525void 2526Cache::unserialize(CheckpointIn &cp) 2527{ 2528 bool bad_checkpoint; 2529 UNSERIALIZE_SCALAR(bad_checkpoint); 2530 if (bad_checkpoint) { 2531 fatal("Restoring from checkpoints with dirty caches is not supported " 2532 "in the classic memory system. Please remove any caches or " 2533 " drain them properly before taking checkpoints.\n"); 2534 } 2535} 2536 2537/////////////// 2538// 2539// CpuSidePort 2540// 2541/////////////// 2542 2543AddrRangeList 2544Cache::CpuSidePort::getAddrRanges() const 2545{ 2546 return cache->getAddrRanges(); 2547} 2548 2549bool 2550Cache::CpuSidePort::recvTimingReq(PacketPtr pkt) 2551{ 2552 assert(!cache->system->bypassCaches()); 2553 2554 bool success = false; 2555 2556 // always let express snoop packets through if even if blocked 2557 if (pkt->isExpressSnoop()) { 2558 // do not change the current retry state 2559 bool M5_VAR_USED bypass_success = cache->recvTimingReq(pkt); 2560 assert(bypass_success); 2561 return true; 2562 } else if (blocked || mustSendRetry) { 2563 // either already committed to send a retry, or blocked 2564 success = false; 2565 } else { 2566 // pass it on to the cache, and let the cache decide if we 2567 // have to retry or not 2568 success = cache->recvTimingReq(pkt); 2569 } 2570 2571 // remember if we have to retry 2572 mustSendRetry = !success; 2573 return success; 2574} 2575 2576Tick 2577Cache::CpuSidePort::recvAtomic(PacketPtr pkt) 2578{ 2579 return cache->recvAtomic(pkt); 2580} 2581 2582void 2583Cache::CpuSidePort::recvFunctional(PacketPtr pkt) 2584{ 2585 // functional request 2586 cache->functionalAccess(pkt, true); 2587} 2588 2589Cache:: 2590CpuSidePort::CpuSidePort(const std::string &_name, Cache *_cache, 2591 const std::string &_label) 2592 : BaseCache::CacheSlavePort(_name, _cache, _label), cache(_cache) 2593{ 2594} 2595 2596Cache* 2597CacheParams::create() 2598{ 2599 assert(tags); 2600 2601 return new Cache(this); 2602} 2603/////////////// 2604// 2605// MemSidePort 2606// 2607/////////////// 2608 2609bool 2610Cache::MemSidePort::recvTimingResp(PacketPtr pkt) 2611{ 2612 cache->recvTimingResp(pkt); 2613 return true; 2614} 2615 2616// Express snooping requests to memside port 2617void 2618Cache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt) 2619{ 2620 // handle snooping requests 2621 cache->recvTimingSnoopReq(pkt); 2622} 2623 2624Tick 2625Cache::MemSidePort::recvAtomicSnoop(PacketPtr pkt) 2626{ 2627 return cache->recvAtomicSnoop(pkt); 2628} 2629 2630void 2631Cache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt) 2632{ 2633 // functional snoop (note that in contrast to atomic we don't have 2634 // a specific functionalSnoop method, as they have the same 2635 // behaviour regardless) 2636 cache->functionalAccess(pkt, false); 2637} 2638 2639void 2640Cache::CacheReqPacketQueue::sendDeferredPacket() 2641{ 2642 // sanity check 2643 assert(!waitingOnRetry); 2644 2645 // there should never be any deferred request packets in the 2646 // queue, instead we resly on the cache to provide the packets 2647 // from the MSHR queue or write queue 2648 assert(deferredPacketReadyTime() == MaxTick); 2649 2650 // check for request packets (requests & writebacks) 2651 QueueEntry* entry = cache.getNextQueueEntry(); 2652 2653 if (!entry) { 2654 // can happen if e.g. we attempt a writeback and fail, but 2655 // before the retry, the writeback is eliminated because 2656 // we snoop another cache's ReadEx. 2657 } else { 2658 // let our snoop responses go first if there are responses to 2659 // the same addresses 2660 if (checkConflictingSnoop(entry->blkAddr)) { 2661 return; 2662 } 2663 waitingOnRetry = entry->sendPacket(cache); 2664 } 2665 2666 // if we succeeded and are not waiting for a retry, schedule the 2667 // next send considering when the next queue is ready, note that 2668 // snoop responses have their own packet queue and thus schedule 2669 // their own events 2670 if (!waitingOnRetry) { 2671 schedSendEvent(cache.nextQueueReadyTime()); 2672 } 2673} 2674 2675Cache:: 2676MemSidePort::MemSidePort(const std::string &_name, Cache *_cache, 2677 const std::string &_label) 2678 : BaseCache::CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue), 2679 _reqQueue(*_cache, *this, _snoopRespQueue, _label), 2680 _snoopRespQueue(*_cache, *this, _label), cache(_cache) 2681{ 2682} 2683