1/* 2 * Copyright (c) 2010-2015 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2002-2005 The Regents of The University of Michigan 15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc. 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Erik Hallnor 42 * Dave Greene 43 * Nathan Binkert 44 * Steve Reinhardt 45 * Ron Dreslinski 46 * Andreas Sandberg 47 */ 48 49/** 50 * @file 51 * Cache definitions. 52 */ 53 54#include "mem/cache/cache.hh" 55 56#include "base/misc.hh" 57#include "base/types.hh" 58#include "debug/Cache.hh" 59#include "debug/CachePort.hh" 60#include "debug/CacheTags.hh" 61#include "mem/cache/blk.hh" 62#include "mem/cache/mshr.hh" 63#include "mem/cache/prefetch/base.hh" 64#include "sim/sim_exit.hh" 65 66Cache::Cache(const CacheParams *p) 67 : BaseCache(p, p->system->cacheLineSize()), 68 tags(p->tags), 69 prefetcher(p->prefetcher), 70 doFastWrites(true), 71 prefetchOnAccess(p->prefetch_on_access) 72{ 73 tempBlock = new CacheBlk(); 74 tempBlock->data = new uint8_t[blkSize]; 75 76 cpuSidePort = new CpuSidePort(p->name + ".cpu_side", this, 77 "CpuSidePort"); 78 memSidePort = new MemSidePort(p->name + ".mem_side", this, 79 "MemSidePort"); 80 81 tags->setCache(this); 82 if (prefetcher) 83 prefetcher->setCache(this); 84} 85 86Cache::~Cache() 87{ 88 delete [] tempBlock->data; 89 delete tempBlock; 90 91 delete cpuSidePort; 92 delete memSidePort; 93} 94 95void 96Cache::regStats() 97{ 98 BaseCache::regStats(); 99} 100 101void 102Cache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt) 103{ 104 assert(pkt->isRequest()); 105 106 uint64_t overwrite_val; 107 bool overwrite_mem; 108 uint64_t condition_val64; 109 uint32_t condition_val32; 110 111 int offset = tags->extractBlkOffset(pkt->getAddr()); 112 uint8_t *blk_data = blk->data + offset; 113 114 assert(sizeof(uint64_t) >= pkt->getSize()); 115 116 overwrite_mem = true; 117 // keep a copy of our possible write value, and copy what is at the 118 // memory address into the packet 119 pkt->writeData((uint8_t *)&overwrite_val); 120 pkt->setData(blk_data); 121 122 if (pkt->req->isCondSwap()) { 123 if (pkt->getSize() == sizeof(uint64_t)) { 124 condition_val64 = pkt->req->getExtraData(); 125 overwrite_mem = !std::memcmp(&condition_val64, blk_data, 126 sizeof(uint64_t)); 127 } else if (pkt->getSize() == sizeof(uint32_t)) { 128 condition_val32 = (uint32_t)pkt->req->getExtraData(); 129 overwrite_mem = !std::memcmp(&condition_val32, blk_data, 130 sizeof(uint32_t)); 131 } else 132 panic("Invalid size for conditional read/write\n"); 133 } 134 135 if (overwrite_mem) { 136 std::memcpy(blk_data, &overwrite_val, pkt->getSize()); 137 blk->status |= BlkDirty; 138 } 139} 140 141 142void 143Cache::satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk, 144 bool deferred_response, bool pending_downgrade) 145{ 146 assert(pkt->isRequest()); 147 148 assert(blk && blk->isValid()); 149 // Occasionally this is not true... if we are a lower-level cache 150 // satisfying a string of Read and ReadEx requests from 151 // upper-level caches, a Read will mark the block as shared but we 152 // can satisfy a following ReadEx anyway since we can rely on the 153 // Read requester(s) to have buffered the ReadEx snoop and to 154 // invalidate their blocks after receiving them. 155 // assert(!pkt->needsExclusive() || blk->isWritable()); 156 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize); 157 158 // Check RMW operations first since both isRead() and 159 // isWrite() will be true for them 160 if (pkt->cmd == MemCmd::SwapReq) { 161 cmpAndSwap(blk, pkt); 162 } else if (pkt->isWrite()) { 163 assert(blk->isWritable()); 164 // Write or WriteLine at the first cache with block in Exclusive 165 if (blk->checkWrite(pkt)) { 166 pkt->writeDataToBlock(blk->data, blkSize); 167 } 168 // Always mark the line as dirty even if we are a failed 169 // StoreCond so we supply data to any snoops that have 170 // appended themselves to this cache before knowing the store 171 // will fail. 172 blk->status |= BlkDirty; 173 DPRINTF(Cache, "%s for %s addr %#llx size %d (write)\n", __func__, 174 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 175 } else if (pkt->isRead()) { 176 if (pkt->isLLSC()) { 177 blk->trackLoadLocked(pkt); 178 } 179 pkt->setDataFromBlock(blk->data, blkSize); 180 // determine if this read is from a (coherent) cache, or not 181 // by looking at the command type; we could potentially add a 182 // packet attribute such as 'FromCache' to make this check a 183 // bit cleaner 184 if (pkt->cmd == MemCmd::ReadExReq || 185 pkt->cmd == MemCmd::ReadSharedReq || 186 pkt->cmd == MemCmd::ReadCleanReq || 187 pkt->cmd == MemCmd::SCUpgradeFailReq) { 188 assert(pkt->getSize() == blkSize); 189 // special handling for coherent block requests from 190 // upper-level caches 191 if (pkt->needsExclusive()) { 192 // sanity check 193 assert(pkt->cmd == MemCmd::ReadExReq || 194 pkt->cmd == MemCmd::SCUpgradeFailReq); 195 196 // if we have a dirty copy, make sure the recipient 197 // keeps it marked dirty 198 if (blk->isDirty()) { 199 pkt->assertMemInhibit(); 200 } 201 // on ReadExReq we give up our copy unconditionally 202 if (blk != tempBlock) 203 tags->invalidate(blk); 204 blk->invalidate(); 205 } else if (blk->isWritable() && !pending_downgrade && 206 !pkt->sharedAsserted() && 207 pkt->cmd != MemCmd::ReadCleanReq) { 208 // we can give the requester an exclusive copy (by not 209 // asserting shared line) on a read request if: 210 // - we have an exclusive copy at this level (& below) 211 // - we don't have a pending snoop from below 212 // signaling another read request 213 // - no other cache above has a copy (otherwise it 214 // would have asseretd shared line on request) 215 // - we are not satisfying an instruction fetch (this 216 // prevents dirty data in the i-cache) 217 218 if (blk->isDirty()) { 219 // special considerations if we're owner: 220 if (!deferred_response) { 221 // if we are responding immediately and can 222 // signal that we're transferring ownership 223 // along with exclusivity, do so 224 pkt->assertMemInhibit(); 225 blk->status &= ~BlkDirty; 226 } else { 227 // if we're responding after our own miss, 228 // there's a window where the recipient didn't 229 // know it was getting ownership and may not 230 // have responded to snoops correctly, so we 231 // can't pass off ownership *or* exclusivity 232 pkt->assertShared(); 233 } 234 } 235 } else { 236 // otherwise only respond with a shared copy 237 pkt->assertShared(); 238 } 239 } 240 } else { 241 // Upgrade or Invalidate, since we have it Exclusively (E or 242 // M), we ack then invalidate. 243 assert(pkt->isUpgrade() || pkt->isInvalidate()); 244 assert(blk != tempBlock); 245 tags->invalidate(blk); 246 blk->invalidate(); 247 DPRINTF(Cache, "%s for %s addr %#llx size %d (invalidation)\n", 248 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 249 } 250} 251 252 253///////////////////////////////////////////////////// 254// 255// MSHR helper functions 256// 257///////////////////////////////////////////////////// 258 259 260void 261Cache::markInService(MSHR *mshr, bool pending_dirty_resp) 262{ 263 markInServiceInternal(mshr, pending_dirty_resp); 264} 265 266///////////////////////////////////////////////////// 267// 268// Access path: requests coming in from the CPU side 269// 270///////////////////////////////////////////////////// 271 272bool 273Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, 274 PacketList &writebacks) 275{ 276 // sanity check 277 assert(pkt->isRequest()); 278 279 chatty_assert(!(isReadOnly && pkt->isWrite()), 280 "Should never see a write in a read-only cache %s\n", 281 name()); 282 283 DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, 284 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 285 286 if (pkt->req->isUncacheable()) { 287 DPRINTF(Cache, "%s%s addr %#llx uncacheable\n", pkt->cmdString(), 288 pkt->req->isInstFetch() ? " (ifetch)" : "", 289 pkt->getAddr()); 290 291 // flush and invalidate any existing block 292 CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure())); 293 if (old_blk && old_blk->isValid()) { 294 if (old_blk->isDirty()) 295 writebacks.push_back(writebackBlk(old_blk)); 296 else 297 writebacks.push_back(cleanEvictBlk(old_blk)); 298 tags->invalidate(old_blk); 299 old_blk->invalidate(); 300 } 301 302 blk = NULL; 303 // lookupLatency is the latency in case the request is uncacheable. 304 lat = lookupLatency; 305 return false; 306 } 307 308 ContextID id = pkt->req->hasContextId() ? 309 pkt->req->contextId() : InvalidContextID; 310 // Here lat is the value passed as parameter to accessBlock() function 311 // that can modify its value. 312 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), lat, id); 313 314 DPRINTF(Cache, "%s%s addr %#llx size %d (%s) %s\n", pkt->cmdString(), 315 pkt->req->isInstFetch() ? " (ifetch)" : "", 316 pkt->getAddr(), pkt->getSize(), pkt->isSecure() ? "s" : "ns", 317 blk ? "hit " + blk->print() : "miss"); 318 319 320 if (pkt->evictingBlock()) { 321 // We check for presence of block in above caches before issuing 322 // Writeback or CleanEvict to write buffer. Therefore the only 323 // possible cases can be of a CleanEvict packet coming from above 324 // encountering a Writeback generated in this cache peer cache and 325 // waiting in the write buffer. Cases of upper level peer caches 326 // generating CleanEvict and Writeback or simply CleanEvict and 327 // CleanEvict almost simultaneously will be caught by snoops sent out 328 // by crossbar. 329 std::vector<MSHR *> outgoing; 330 if (writeBuffer.findMatches(pkt->getAddr(), pkt->isSecure(), 331 outgoing)) { 332 assert(outgoing.size() == 1); 333 PacketPtr wbPkt = outgoing[0]->getTarget()->pkt; 334 assert(pkt->cmd == MemCmd::CleanEvict && 335 wbPkt->cmd == MemCmd::Writeback); 336 // As the CleanEvict is coming from above, it would have snooped 337 // into other peer caches of the same level while traversing the 338 // crossbar. If a copy of the block had been found, the CleanEvict 339 // would have been deleted in the crossbar. Now that the 340 // CleanEvict is here we can be sure none of the other upper level 341 // caches connected to this cache have the block, so we can clear 342 // the BLOCK_CACHED flag in the Writeback if set and discard the 343 // CleanEvict by returning true. 344 wbPkt->clearBlockCached(); 345 return true; 346 } 347 } 348 349 // Writeback handling is special case. We can write the block into 350 // the cache without having a writeable copy (or any copy at all). 351 if (pkt->cmd == MemCmd::Writeback) { 352 assert(blkSize == pkt->getSize()); 353 if (blk == NULL) { 354 // need to do a replacement 355 blk = allocateBlock(pkt->getAddr(), pkt->isSecure(), writebacks); 356 if (blk == NULL) { 357 // no replaceable block available: give up, fwd to next level. 358 incMissCount(pkt); 359 return false; 360 } 361 tags->insertBlock(pkt, blk); 362 363 blk->status = (BlkValid | BlkReadable); 364 if (pkt->isSecure()) { 365 blk->status |= BlkSecure; 366 } 367 } 368 blk->status |= BlkDirty; 369 // if shared is not asserted we got the writeback in modified 370 // state, if it is asserted we are in the owned state 371 if (!pkt->sharedAsserted()) { 372 blk->status |= BlkWritable; 373 } 374 // nothing else to do; writeback doesn't expect response 375 assert(!pkt->needsResponse()); 376 std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize); 377 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 378 incHitCount(pkt); 379 return true; 380 } else if (pkt->cmd == MemCmd::CleanEvict) { 381 if (blk != NULL) { 382 // Found the block in the tags, need to stop CleanEvict from 383 // propagating further down the hierarchy. Returning true will 384 // treat the CleanEvict like a satisfied write request and delete 385 // it. 386 return true; 387 } 388 // We didn't find the block here, propagate the CleanEvict further 389 // down the memory hierarchy. Returning false will treat the CleanEvict 390 // like a Writeback which could not find a replaceable block so has to 391 // go to next level. 392 return false; 393 } else if ((blk != NULL) && 394 (pkt->needsExclusive() ? blk->isWritable() 395 : blk->isReadable())) { 396 // OK to satisfy access 397 incHitCount(pkt); 398 satisfyCpuSideRequest(pkt, blk); 399 return true; 400 } 401 402 // Can't satisfy access normally... either no block (blk == NULL) 403 // or have block but need exclusive & only have shared. 404 405 incMissCount(pkt); 406 407 if (blk == NULL && pkt->isLLSC() && pkt->isWrite()) { 408 // complete miss on store conditional... just give up now 409 pkt->req->setExtraData(0); 410 return true; 411 } 412 413 return false; 414} 415 416 417class ForwardResponseRecord : public Packet::SenderState 418{ 419 public: 420 421 ForwardResponseRecord() {} 422}; 423 424void 425Cache::doWritebacks(PacketList& writebacks, Tick forward_time) 426{ 427 while (!writebacks.empty()) { 428 PacketPtr wbPkt = writebacks.front(); 429 // We use forwardLatency here because we are copying writebacks to 430 // write buffer. Call isCachedAbove for both Writebacks and 431 // CleanEvicts. If isCachedAbove returns true we set BLOCK_CACHED flag 432 // in Writebacks and discard CleanEvicts. 433 if (isCachedAbove(wbPkt)) { 434 if (wbPkt->cmd == MemCmd::CleanEvict) { 435 // Delete CleanEvict because cached copies exist above. The 436 // packet destructor will delete the request object because 437 // this is a non-snoop request packet which does not require a 438 // response. 439 delete wbPkt; 440 } else { 441 // Set BLOCK_CACHED flag in Writeback and send below, so that 442 // the Writeback does not reset the bit corresponding to this 443 // address in the snoop filter below. 444 wbPkt->setBlockCached(); 445 allocateWriteBuffer(wbPkt, forward_time); 446 } 447 } else { 448 // If the block is not cached above, send packet below. Both 449 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will 450 // reset the bit corresponding to this address in the snoop filter 451 // below. 452 allocateWriteBuffer(wbPkt, forward_time); 453 } 454 writebacks.pop_front(); 455 } 456} 457
| 1/* 2 * Copyright (c) 2010-2015 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2002-2005 The Regents of The University of Michigan 15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc. 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Erik Hallnor 42 * Dave Greene 43 * Nathan Binkert 44 * Steve Reinhardt 45 * Ron Dreslinski 46 * Andreas Sandberg 47 */ 48 49/** 50 * @file 51 * Cache definitions. 52 */ 53 54#include "mem/cache/cache.hh" 55 56#include "base/misc.hh" 57#include "base/types.hh" 58#include "debug/Cache.hh" 59#include "debug/CachePort.hh" 60#include "debug/CacheTags.hh" 61#include "mem/cache/blk.hh" 62#include "mem/cache/mshr.hh" 63#include "mem/cache/prefetch/base.hh" 64#include "sim/sim_exit.hh" 65 66Cache::Cache(const CacheParams *p) 67 : BaseCache(p, p->system->cacheLineSize()), 68 tags(p->tags), 69 prefetcher(p->prefetcher), 70 doFastWrites(true), 71 prefetchOnAccess(p->prefetch_on_access) 72{ 73 tempBlock = new CacheBlk(); 74 tempBlock->data = new uint8_t[blkSize]; 75 76 cpuSidePort = new CpuSidePort(p->name + ".cpu_side", this, 77 "CpuSidePort"); 78 memSidePort = new MemSidePort(p->name + ".mem_side", this, 79 "MemSidePort"); 80 81 tags->setCache(this); 82 if (prefetcher) 83 prefetcher->setCache(this); 84} 85 86Cache::~Cache() 87{ 88 delete [] tempBlock->data; 89 delete tempBlock; 90 91 delete cpuSidePort; 92 delete memSidePort; 93} 94 95void 96Cache::regStats() 97{ 98 BaseCache::regStats(); 99} 100 101void 102Cache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt) 103{ 104 assert(pkt->isRequest()); 105 106 uint64_t overwrite_val; 107 bool overwrite_mem; 108 uint64_t condition_val64; 109 uint32_t condition_val32; 110 111 int offset = tags->extractBlkOffset(pkt->getAddr()); 112 uint8_t *blk_data = blk->data + offset; 113 114 assert(sizeof(uint64_t) >= pkt->getSize()); 115 116 overwrite_mem = true; 117 // keep a copy of our possible write value, and copy what is at the 118 // memory address into the packet 119 pkt->writeData((uint8_t *)&overwrite_val); 120 pkt->setData(blk_data); 121 122 if (pkt->req->isCondSwap()) { 123 if (pkt->getSize() == sizeof(uint64_t)) { 124 condition_val64 = pkt->req->getExtraData(); 125 overwrite_mem = !std::memcmp(&condition_val64, blk_data, 126 sizeof(uint64_t)); 127 } else if (pkt->getSize() == sizeof(uint32_t)) { 128 condition_val32 = (uint32_t)pkt->req->getExtraData(); 129 overwrite_mem = !std::memcmp(&condition_val32, blk_data, 130 sizeof(uint32_t)); 131 } else 132 panic("Invalid size for conditional read/write\n"); 133 } 134 135 if (overwrite_mem) { 136 std::memcpy(blk_data, &overwrite_val, pkt->getSize()); 137 blk->status |= BlkDirty; 138 } 139} 140 141 142void 143Cache::satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk, 144 bool deferred_response, bool pending_downgrade) 145{ 146 assert(pkt->isRequest()); 147 148 assert(blk && blk->isValid()); 149 // Occasionally this is not true... if we are a lower-level cache 150 // satisfying a string of Read and ReadEx requests from 151 // upper-level caches, a Read will mark the block as shared but we 152 // can satisfy a following ReadEx anyway since we can rely on the 153 // Read requester(s) to have buffered the ReadEx snoop and to 154 // invalidate their blocks after receiving them. 155 // assert(!pkt->needsExclusive() || blk->isWritable()); 156 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize); 157 158 // Check RMW operations first since both isRead() and 159 // isWrite() will be true for them 160 if (pkt->cmd == MemCmd::SwapReq) { 161 cmpAndSwap(blk, pkt); 162 } else if (pkt->isWrite()) { 163 assert(blk->isWritable()); 164 // Write or WriteLine at the first cache with block in Exclusive 165 if (blk->checkWrite(pkt)) { 166 pkt->writeDataToBlock(blk->data, blkSize); 167 } 168 // Always mark the line as dirty even if we are a failed 169 // StoreCond so we supply data to any snoops that have 170 // appended themselves to this cache before knowing the store 171 // will fail. 172 blk->status |= BlkDirty; 173 DPRINTF(Cache, "%s for %s addr %#llx size %d (write)\n", __func__, 174 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 175 } else if (pkt->isRead()) { 176 if (pkt->isLLSC()) { 177 blk->trackLoadLocked(pkt); 178 } 179 pkt->setDataFromBlock(blk->data, blkSize); 180 // determine if this read is from a (coherent) cache, or not 181 // by looking at the command type; we could potentially add a 182 // packet attribute such as 'FromCache' to make this check a 183 // bit cleaner 184 if (pkt->cmd == MemCmd::ReadExReq || 185 pkt->cmd == MemCmd::ReadSharedReq || 186 pkt->cmd == MemCmd::ReadCleanReq || 187 pkt->cmd == MemCmd::SCUpgradeFailReq) { 188 assert(pkt->getSize() == blkSize); 189 // special handling for coherent block requests from 190 // upper-level caches 191 if (pkt->needsExclusive()) { 192 // sanity check 193 assert(pkt->cmd == MemCmd::ReadExReq || 194 pkt->cmd == MemCmd::SCUpgradeFailReq); 195 196 // if we have a dirty copy, make sure the recipient 197 // keeps it marked dirty 198 if (blk->isDirty()) { 199 pkt->assertMemInhibit(); 200 } 201 // on ReadExReq we give up our copy unconditionally 202 if (blk != tempBlock) 203 tags->invalidate(blk); 204 blk->invalidate(); 205 } else if (blk->isWritable() && !pending_downgrade && 206 !pkt->sharedAsserted() && 207 pkt->cmd != MemCmd::ReadCleanReq) { 208 // we can give the requester an exclusive copy (by not 209 // asserting shared line) on a read request if: 210 // - we have an exclusive copy at this level (& below) 211 // - we don't have a pending snoop from below 212 // signaling another read request 213 // - no other cache above has a copy (otherwise it 214 // would have asseretd shared line on request) 215 // - we are not satisfying an instruction fetch (this 216 // prevents dirty data in the i-cache) 217 218 if (blk->isDirty()) { 219 // special considerations if we're owner: 220 if (!deferred_response) { 221 // if we are responding immediately and can 222 // signal that we're transferring ownership 223 // along with exclusivity, do so 224 pkt->assertMemInhibit(); 225 blk->status &= ~BlkDirty; 226 } else { 227 // if we're responding after our own miss, 228 // there's a window where the recipient didn't 229 // know it was getting ownership and may not 230 // have responded to snoops correctly, so we 231 // can't pass off ownership *or* exclusivity 232 pkt->assertShared(); 233 } 234 } 235 } else { 236 // otherwise only respond with a shared copy 237 pkt->assertShared(); 238 } 239 } 240 } else { 241 // Upgrade or Invalidate, since we have it Exclusively (E or 242 // M), we ack then invalidate. 243 assert(pkt->isUpgrade() || pkt->isInvalidate()); 244 assert(blk != tempBlock); 245 tags->invalidate(blk); 246 blk->invalidate(); 247 DPRINTF(Cache, "%s for %s addr %#llx size %d (invalidation)\n", 248 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 249 } 250} 251 252 253///////////////////////////////////////////////////// 254// 255// MSHR helper functions 256// 257///////////////////////////////////////////////////// 258 259 260void 261Cache::markInService(MSHR *mshr, bool pending_dirty_resp) 262{ 263 markInServiceInternal(mshr, pending_dirty_resp); 264} 265 266///////////////////////////////////////////////////// 267// 268// Access path: requests coming in from the CPU side 269// 270///////////////////////////////////////////////////// 271 272bool 273Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, 274 PacketList &writebacks) 275{ 276 // sanity check 277 assert(pkt->isRequest()); 278 279 chatty_assert(!(isReadOnly && pkt->isWrite()), 280 "Should never see a write in a read-only cache %s\n", 281 name()); 282 283 DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, 284 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 285 286 if (pkt->req->isUncacheable()) { 287 DPRINTF(Cache, "%s%s addr %#llx uncacheable\n", pkt->cmdString(), 288 pkt->req->isInstFetch() ? " (ifetch)" : "", 289 pkt->getAddr()); 290 291 // flush and invalidate any existing block 292 CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure())); 293 if (old_blk && old_blk->isValid()) { 294 if (old_blk->isDirty()) 295 writebacks.push_back(writebackBlk(old_blk)); 296 else 297 writebacks.push_back(cleanEvictBlk(old_blk)); 298 tags->invalidate(old_blk); 299 old_blk->invalidate(); 300 } 301 302 blk = NULL; 303 // lookupLatency is the latency in case the request is uncacheable. 304 lat = lookupLatency; 305 return false; 306 } 307 308 ContextID id = pkt->req->hasContextId() ? 309 pkt->req->contextId() : InvalidContextID; 310 // Here lat is the value passed as parameter to accessBlock() function 311 // that can modify its value. 312 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), lat, id); 313 314 DPRINTF(Cache, "%s%s addr %#llx size %d (%s) %s\n", pkt->cmdString(), 315 pkt->req->isInstFetch() ? " (ifetch)" : "", 316 pkt->getAddr(), pkt->getSize(), pkt->isSecure() ? "s" : "ns", 317 blk ? "hit " + blk->print() : "miss"); 318 319 320 if (pkt->evictingBlock()) { 321 // We check for presence of block in above caches before issuing 322 // Writeback or CleanEvict to write buffer. Therefore the only 323 // possible cases can be of a CleanEvict packet coming from above 324 // encountering a Writeback generated in this cache peer cache and 325 // waiting in the write buffer. Cases of upper level peer caches 326 // generating CleanEvict and Writeback or simply CleanEvict and 327 // CleanEvict almost simultaneously will be caught by snoops sent out 328 // by crossbar. 329 std::vector<MSHR *> outgoing; 330 if (writeBuffer.findMatches(pkt->getAddr(), pkt->isSecure(), 331 outgoing)) { 332 assert(outgoing.size() == 1); 333 PacketPtr wbPkt = outgoing[0]->getTarget()->pkt; 334 assert(pkt->cmd == MemCmd::CleanEvict && 335 wbPkt->cmd == MemCmd::Writeback); 336 // As the CleanEvict is coming from above, it would have snooped 337 // into other peer caches of the same level while traversing the 338 // crossbar. If a copy of the block had been found, the CleanEvict 339 // would have been deleted in the crossbar. Now that the 340 // CleanEvict is here we can be sure none of the other upper level 341 // caches connected to this cache have the block, so we can clear 342 // the BLOCK_CACHED flag in the Writeback if set and discard the 343 // CleanEvict by returning true. 344 wbPkt->clearBlockCached(); 345 return true; 346 } 347 } 348 349 // Writeback handling is special case. We can write the block into 350 // the cache without having a writeable copy (or any copy at all). 351 if (pkt->cmd == MemCmd::Writeback) { 352 assert(blkSize == pkt->getSize()); 353 if (blk == NULL) { 354 // need to do a replacement 355 blk = allocateBlock(pkt->getAddr(), pkt->isSecure(), writebacks); 356 if (blk == NULL) { 357 // no replaceable block available: give up, fwd to next level. 358 incMissCount(pkt); 359 return false; 360 } 361 tags->insertBlock(pkt, blk); 362 363 blk->status = (BlkValid | BlkReadable); 364 if (pkt->isSecure()) { 365 blk->status |= BlkSecure; 366 } 367 } 368 blk->status |= BlkDirty; 369 // if shared is not asserted we got the writeback in modified 370 // state, if it is asserted we are in the owned state 371 if (!pkt->sharedAsserted()) { 372 blk->status |= BlkWritable; 373 } 374 // nothing else to do; writeback doesn't expect response 375 assert(!pkt->needsResponse()); 376 std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize); 377 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 378 incHitCount(pkt); 379 return true; 380 } else if (pkt->cmd == MemCmd::CleanEvict) { 381 if (blk != NULL) { 382 // Found the block in the tags, need to stop CleanEvict from 383 // propagating further down the hierarchy. Returning true will 384 // treat the CleanEvict like a satisfied write request and delete 385 // it. 386 return true; 387 } 388 // We didn't find the block here, propagate the CleanEvict further 389 // down the memory hierarchy. Returning false will treat the CleanEvict 390 // like a Writeback which could not find a replaceable block so has to 391 // go to next level. 392 return false; 393 } else if ((blk != NULL) && 394 (pkt->needsExclusive() ? blk->isWritable() 395 : blk->isReadable())) { 396 // OK to satisfy access 397 incHitCount(pkt); 398 satisfyCpuSideRequest(pkt, blk); 399 return true; 400 } 401 402 // Can't satisfy access normally... either no block (blk == NULL) 403 // or have block but need exclusive & only have shared. 404 405 incMissCount(pkt); 406 407 if (blk == NULL && pkt->isLLSC() && pkt->isWrite()) { 408 // complete miss on store conditional... just give up now 409 pkt->req->setExtraData(0); 410 return true; 411 } 412 413 return false; 414} 415 416 417class ForwardResponseRecord : public Packet::SenderState 418{ 419 public: 420 421 ForwardResponseRecord() {} 422}; 423 424void 425Cache::doWritebacks(PacketList& writebacks, Tick forward_time) 426{ 427 while (!writebacks.empty()) { 428 PacketPtr wbPkt = writebacks.front(); 429 // We use forwardLatency here because we are copying writebacks to 430 // write buffer. Call isCachedAbove for both Writebacks and 431 // CleanEvicts. If isCachedAbove returns true we set BLOCK_CACHED flag 432 // in Writebacks and discard CleanEvicts. 433 if (isCachedAbove(wbPkt)) { 434 if (wbPkt->cmd == MemCmd::CleanEvict) { 435 // Delete CleanEvict because cached copies exist above. The 436 // packet destructor will delete the request object because 437 // this is a non-snoop request packet which does not require a 438 // response. 439 delete wbPkt; 440 } else { 441 // Set BLOCK_CACHED flag in Writeback and send below, so that 442 // the Writeback does not reset the bit corresponding to this 443 // address in the snoop filter below. 444 wbPkt->setBlockCached(); 445 allocateWriteBuffer(wbPkt, forward_time); 446 } 447 } else { 448 // If the block is not cached above, send packet below. Both 449 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will 450 // reset the bit corresponding to this address in the snoop filter 451 // below. 452 allocateWriteBuffer(wbPkt, forward_time); 453 } 454 writebacks.pop_front(); 455 } 456} 457
|
| 458void 459Cache::doWritebacksAtomic(PacketList& writebacks) 460{ 461 while (!writebacks.empty()) { 462 PacketPtr wbPkt = writebacks.front(); 463 // Call isCachedAbove for both Writebacks and CleanEvicts. If 464 // isCachedAbove returns true we set BLOCK_CACHED flag in Writebacks 465 // and discard CleanEvicts. 466 if (isCachedAbove(wbPkt, false)) { 467 if (wbPkt->cmd == MemCmd::Writeback) { 468 // Set BLOCK_CACHED flag in Writeback and send below, 469 // so that the Writeback does not reset the bit 470 // corresponding to this address in the snoop filter 471 // below. We can discard CleanEvicts because cached 472 // copies exist above. Atomic mode isCachedAbove 473 // modifies packet to set BLOCK_CACHED flag 474 memSidePort->sendAtomic(wbPkt); 475 } 476 } else { 477 // If the block is not cached above, send packet below. Both 478 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will 479 // reset the bit corresponding to this address in the snoop filter 480 // below. 481 memSidePort->sendAtomic(wbPkt); 482 } 483 writebacks.pop_front(); 484 // In case of CleanEvicts, the packet destructor will delete the 485 // request object because this is a non-snoop request packet which 486 // does not require a response. 487 delete wbPkt; 488 } 489}
|
458
| 490
|
| 491
|
459void 460Cache::recvTimingSnoopResp(PacketPtr pkt) 461{ 462 DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, 463 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 464 465 assert(pkt->isResponse()); 466 467 // must be cache-to-cache response from upper to lower level 468 ForwardResponseRecord *rec = 469 dynamic_cast<ForwardResponseRecord *>(pkt->senderState); 470 assert(!system->bypassCaches()); 471 472 if (rec == NULL) { 473 // @todo What guarantee do we have that this HardPFResp is 474 // actually for this cache, and not a cache closer to the 475 // memory? 476 assert(pkt->cmd == MemCmd::HardPFResp); 477 // Check if it's a prefetch response and handle it. We shouldn't 478 // get any other kinds of responses without FRRs. 479 DPRINTF(Cache, "Got prefetch response from above for addr %#llx (%s)\n", 480 pkt->getAddr(), pkt->isSecure() ? "s" : "ns"); 481 recvTimingResp(pkt); 482 return; 483 } 484 485 pkt->popSenderState(); 486 delete rec; 487 // forwardLatency is set here because there is a response from an 488 // upper level cache. 489 // To pay the delay that occurs if the packet comes from the bus, 490 // we charge also headerDelay. 491 Tick snoop_resp_time = clockEdge(forwardLatency) + pkt->headerDelay; 492 // Reset the timing of the packet. 493 pkt->headerDelay = pkt->payloadDelay = 0; 494 memSidePort->schedTimingSnoopResp(pkt, snoop_resp_time); 495} 496 497void 498Cache::promoteWholeLineWrites(PacketPtr pkt) 499{ 500 // Cache line clearing instructions 501 if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) && 502 (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0)) { 503 pkt->cmd = MemCmd::WriteLineReq; 504 DPRINTF(Cache, "packet promoted from Write to WriteLineReq\n"); 505 } 506} 507 508bool 509Cache::recvTimingReq(PacketPtr pkt) 510{ 511 DPRINTF(CacheTags, "%s tags: %s\n", __func__, tags->print()); 512//@todo Add back in MemDebug Calls 513// MemDebug::cacheAccess(pkt); 514 515 516 /// @todo temporary hack to deal with memory corruption issue until 517 /// 4-phase transactions are complete 518 for (int x = 0; x < pendingDelete.size(); x++) 519 delete pendingDelete[x]; 520 pendingDelete.clear(); 521 522 assert(pkt->isRequest()); 523 524 // Just forward the packet if caches are disabled. 525 if (system->bypassCaches()) { 526 // @todo This should really enqueue the packet rather 527 bool M5_VAR_USED success = memSidePort->sendTimingReq(pkt); 528 assert(success); 529 return true; 530 } 531 532 promoteWholeLineWrites(pkt); 533 534 if (pkt->memInhibitAsserted()) { 535 // a cache above us (but not where the packet came from) is 536 // responding to the request 537 DPRINTF(Cache, "mem inhibited on addr %#llx (%s): not responding\n", 538 pkt->getAddr(), pkt->isSecure() ? "s" : "ns"); 539 540 // if the packet needs exclusive, and the cache that has 541 // promised to respond (setting the inhibit flag) is not 542 // providing exclusive (it is in O vs M state), we know that 543 // there may be other shared copies in the system; go out and 544 // invalidate them all 545 if (pkt->needsExclusive() && !pkt->isSupplyExclusive()) { 546 // create a downstream express snoop with cleared packet 547 // flags, there is no need to allocate any data as the 548 // packet is merely used to co-ordinate state transitions 549 Packet *snoop_pkt = new Packet(pkt, true, false); 550 551 // also reset the bus time that the original packet has 552 // not yet paid for 553 snoop_pkt->headerDelay = snoop_pkt->payloadDelay = 0; 554 555 // make this an instantaneous express snoop, and let the 556 // other caches in the system know that the packet is 557 // inhibited, because we have found the authorative copy 558 // (O) that will supply the right data 559 snoop_pkt->setExpressSnoop(); 560 snoop_pkt->assertMemInhibit(); 561 562 // this express snoop travels towards the memory, and at 563 // every crossbar it is snooped upwards thus reaching 564 // every cache in the system 565 bool M5_VAR_USED success = memSidePort->sendTimingReq(snoop_pkt); 566 // express snoops always succeed 567 assert(success); 568 569 // main memory will delete the packet 570 } 571 572 /// @todo nominally we should just delete the packet here, 573 /// however, until 4-phase stuff we can't because sending 574 /// cache is still relying on it. 575 pendingDelete.push_back(pkt); 576 577 // no need to take any action in this particular cache as the 578 // caches along the path to memory are allowed to keep lines 579 // in a shared state, and a cache above us already committed 580 // to responding 581 return true; 582 } 583 584 // anything that is merely forwarded pays for the forward latency and 585 // the delay provided by the crossbar 586 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 587 588 // We use lookupLatency here because it is used to specify the latency 589 // to access. 590 Cycles lat = lookupLatency; 591 CacheBlk *blk = NULL; 592 bool satisfied = false; 593 { 594 PacketList writebacks; 595 // Note that lat is passed by reference here. The function 596 // access() calls accessBlock() which can modify lat value. 597 satisfied = access(pkt, blk, lat, writebacks); 598 599 // copy writebacks to write buffer here to ensure they logically 600 // proceed anything happening below 601 doWritebacks(writebacks, forward_time); 602 } 603 604 // Here we charge the headerDelay that takes into account the latencies 605 // of the bus, if the packet comes from it. 606 // The latency charged it is just lat that is the value of lookupLatency 607 // modified by access() function, or if not just lookupLatency. 608 // In case of a hit we are neglecting response latency. 609 // In case of a miss we are neglecting forward latency. 610 Tick request_time = clockEdge(lat) + pkt->headerDelay; 611 // Here we reset the timing of the packet. 612 pkt->headerDelay = pkt->payloadDelay = 0; 613 614 // track time of availability of next prefetch, if any 615 Tick next_pf_time = MaxTick; 616 617 bool needsResponse = pkt->needsResponse(); 618 619 if (satisfied) { 620 // should never be satisfying an uncacheable access as we 621 // flush and invalidate any existing block as part of the 622 // lookup 623 assert(!pkt->req->isUncacheable()); 624 625 // hit (for all other request types) 626 627 if (prefetcher && (prefetchOnAccess || (blk && blk->wasPrefetched()))) { 628 if (blk) 629 blk->status &= ~BlkHWPrefetched; 630 631 // Don't notify on SWPrefetch 632 if (!pkt->cmd.isSWPrefetch()) 633 next_pf_time = prefetcher->notify(pkt); 634 } 635 636 if (needsResponse) { 637 pkt->makeTimingResponse(); 638 // @todo: Make someone pay for this 639 pkt->headerDelay = pkt->payloadDelay = 0; 640 641 // In this case we are considering request_time that takes 642 // into account the delay of the xbar, if any, and just 643 // lat, neglecting responseLatency, modelling hit latency 644 // just as lookupLatency or or the value of lat overriden 645 // by access(), that calls accessBlock() function. 646 cpuSidePort->schedTimingResp(pkt, request_time); 647 } else { 648 /// @todo nominally we should just delete the packet here, 649 /// however, until 4-phase stuff we can't because sending cache is 650 /// still relying on it. If the block is found in access(), 651 /// CleanEvict and Writeback messages will be deleted here as 652 /// well. 653 pendingDelete.push_back(pkt); 654 } 655 } else { 656 // miss 657 658 Addr blk_addr = blockAlign(pkt->getAddr()); 659 660 // ignore any existing MSHR if we are dealing with an 661 // uncacheable request 662 MSHR *mshr = pkt->req->isUncacheable() ? nullptr : 663 mshrQueue.findMatch(blk_addr, pkt->isSecure()); 664 665 // Software prefetch handling: 666 // To keep the core from waiting on data it won't look at 667 // anyway, send back a response with dummy data. Miss handling 668 // will continue asynchronously. Unfortunately, the core will 669 // insist upon freeing original Packet/Request, so we have to 670 // create a new pair with a different lifecycle. Note that this 671 // processing happens before any MSHR munging on the behalf of 672 // this request because this new Request will be the one stored 673 // into the MSHRs, not the original. 674 if (pkt->cmd.isSWPrefetch()) { 675 assert(needsResponse); 676 assert(pkt->req->hasPaddr()); 677 assert(!pkt->req->isUncacheable()); 678 679 // There's no reason to add a prefetch as an additional target 680 // to an existing MSHR. If an outstanding request is already 681 // in progress, there is nothing for the prefetch to do. 682 // If this is the case, we don't even create a request at all. 683 PacketPtr pf = nullptr; 684 685 if (!mshr) { 686 // copy the request and create a new SoftPFReq packet 687 RequestPtr req = new Request(pkt->req->getPaddr(), 688 pkt->req->getSize(), 689 pkt->req->getFlags(), 690 pkt->req->masterId()); 691 pf = new Packet(req, pkt->cmd); 692 pf->allocate(); 693 assert(pf->getAddr() == pkt->getAddr()); 694 assert(pf->getSize() == pkt->getSize()); 695 } 696 697 pkt->makeTimingResponse(); 698 // for debugging, set all the bits in the response data 699 // (also keeps valgrind from complaining when debugging settings 700 // print out instruction results) 701 std::memset(pkt->getPtr<uint8_t>(), 0xFF, pkt->getSize()); 702 // request_time is used here, taking into account lat and the delay 703 // charged if the packet comes from the xbar. 704 cpuSidePort->schedTimingResp(pkt, request_time); 705 706 // If an outstanding request is in progress (we found an 707 // MSHR) this is set to null 708 pkt = pf; 709 } 710 711 if (mshr) { 712 /// MSHR hit 713 /// @note writebacks will be checked in getNextMSHR() 714 /// for any conflicting requests to the same block 715 716 //@todo remove hw_pf here 717 718 // Coalesce unless it was a software prefetch (see above). 719 if (pkt) { 720 assert(pkt->cmd != MemCmd::Writeback); 721 // CleanEvicts corresponding to blocks which have outstanding 722 // requests in MSHRs can be deleted here. 723 if (pkt->cmd == MemCmd::CleanEvict) { 724 pendingDelete.push_back(pkt); 725 } else { 726 DPRINTF(Cache, "%s coalescing MSHR for %s addr %#llx size %d\n", 727 __func__, pkt->cmdString(), pkt->getAddr(), 728 pkt->getSize()); 729 730 assert(pkt->req->masterId() < system->maxMasters()); 731 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++; 732 if (mshr->threadNum != 0/*pkt->req->threadId()*/) { 733 mshr->threadNum = -1; 734 } 735 // We use forward_time here because it is the same 736 // considering new targets. We have multiple 737 // requests for the same address here. It 738 // specifies the latency to allocate an internal 739 // buffer and to schedule an event to the queued 740 // port and also takes into account the additional 741 // delay of the xbar. 742 mshr->allocateTarget(pkt, forward_time, order++); 743 if (mshr->getNumTargets() == numTarget) { 744 noTargetMSHR = mshr; 745 setBlocked(Blocked_NoTargets); 746 // need to be careful with this... if this mshr isn't 747 // ready yet (i.e. time > curTick()), we don't want to 748 // move it ahead of mshrs that are ready 749 // mshrQueue.moveToFront(mshr); 750 } 751 } 752 // We should call the prefetcher reguardless if the request is 753 // satisfied or not, reguardless if the request is in the MSHR or 754 // not. The request could be a ReadReq hit, but still not 755 // satisfied (potentially because of a prior write to the same 756 // cache line. So, even when not satisfied, tehre is an MSHR 757 // already allocated for this, we need to let the prefetcher know 758 // about the request 759 if (prefetcher) { 760 // Don't notify on SWPrefetch 761 if (!pkt->cmd.isSWPrefetch()) 762 next_pf_time = prefetcher->notify(pkt); 763 } 764 } 765 } else { 766 // no MSHR 767 assert(pkt->req->masterId() < system->maxMasters()); 768 if (pkt->req->isUncacheable()) { 769 mshr_uncacheable[pkt->cmdToIndex()][pkt->req->masterId()]++; 770 } else { 771 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 772 } 773 774 if (pkt->evictingBlock() || 775 (pkt->req->isUncacheable() && pkt->isWrite())) { 776 // We use forward_time here because there is an 777 // uncached memory write, forwarded to WriteBuffer. 778 allocateWriteBuffer(pkt, forward_time); 779 } else { 780 if (blk && blk->isValid()) { 781 // should have flushed and have no valid block 782 assert(!pkt->req->isUncacheable()); 783 784 // If we have a write miss to a valid block, we 785 // need to mark the block non-readable. Otherwise 786 // if we allow reads while there's an outstanding 787 // write miss, the read could return stale data 788 // out of the cache block... a more aggressive 789 // system could detect the overlap (if any) and 790 // forward data out of the MSHRs, but we don't do 791 // that yet. Note that we do need to leave the 792 // block valid so that it stays in the cache, in 793 // case we get an upgrade response (and hence no 794 // new data) when the write miss completes. 795 // As long as CPUs do proper store/load forwarding 796 // internally, and have a sufficiently weak memory 797 // model, this is probably unnecessary, but at some 798 // point it must have seemed like we needed it... 799 assert(pkt->needsExclusive()); 800 assert(!blk->isWritable()); 801 blk->status &= ~BlkReadable; 802 } 803 // Here we are using forward_time, modelling the latency of 804 // a miss (outbound) just as forwardLatency, neglecting the 805 // lookupLatency component. 806 allocateMissBuffer(pkt, forward_time); 807 } 808 809 if (prefetcher) { 810 // Don't notify on SWPrefetch 811 if (!pkt->cmd.isSWPrefetch()) 812 next_pf_time = prefetcher->notify(pkt); 813 } 814 } 815 } 816 817 if (next_pf_time != MaxTick) 818 schedMemSideSendEvent(next_pf_time); 819 820 return true; 821} 822 823 824// See comment in cache.hh. 825PacketPtr 826Cache::getBusPacket(PacketPtr cpu_pkt, CacheBlk *blk, 827 bool needsExclusive) const 828{ 829 bool blkValid = blk && blk->isValid(); 830 831 if (cpu_pkt->req->isUncacheable()) { 832 // note that at the point we see the uncacheable request we 833 // flush any block, but there could be an outstanding MSHR, 834 // and the cache could have filled again before we actually 835 // send out the forwarded uncacheable request (blk could thus 836 // be non-null) 837 return NULL; 838 } 839 840 if (!blkValid && 841 (cpu_pkt->isUpgrade() || 842 cpu_pkt->evictingBlock())) { 843 // Writebacks that weren't allocated in access() and upgrades 844 // from upper-level caches that missed completely just go 845 // through. 846 return NULL; 847 } 848 849 assert(cpu_pkt->needsResponse()); 850 851 MemCmd cmd; 852 // @TODO make useUpgrades a parameter. 853 // Note that ownership protocols require upgrade, otherwise a 854 // write miss on a shared owned block will generate a ReadExcl, 855 // which will clobber the owned copy. 856 const bool useUpgrades = true; 857 if (blkValid && useUpgrades) { 858 // only reason to be here is that blk is shared 859 // (read-only) and we need exclusive 860 assert(needsExclusive); 861 assert(!blk->isWritable()); 862 cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq; 863 } else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq || 864 cpu_pkt->cmd == MemCmd::StoreCondFailReq) { 865 // Even though this SC will fail, we still need to send out the 866 // request and get the data to supply it to other snoopers in the case 867 // where the determination the StoreCond fails is delayed due to 868 // all caches not being on the same local bus. 869 cmd = MemCmd::SCUpgradeFailReq; 870 } else if (cpu_pkt->cmd == MemCmd::WriteLineReq) { 871 // forward as invalidate to all other caches, this gives us 872 // the line in exclusive state, and invalidates all other 873 // copies 874 cmd = MemCmd::InvalidateReq; 875 } else { 876 // block is invalid 877 cmd = needsExclusive ? MemCmd::ReadExReq : 878 (isReadOnly ? MemCmd::ReadCleanReq : MemCmd::ReadSharedReq); 879 } 880 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize); 881 882 // if there are sharers in the upper levels, pass that info downstream 883 if (cpu_pkt->sharedAsserted()) { 884 // note that cpu_pkt may have spent a considerable time in the 885 // MSHR queue and that the information could possibly be out 886 // of date, however, there is no harm in conservatively 887 // assuming the block is shared 888 pkt->assertShared(); 889 DPRINTF(Cache, "%s passing shared from %s to %s addr %#llx size %d\n", 890 __func__, cpu_pkt->cmdString(), pkt->cmdString(), 891 pkt->getAddr(), pkt->getSize()); 892 } 893 894 // the packet should be block aligned 895 assert(pkt->getAddr() == blockAlign(pkt->getAddr())); 896 897 pkt->allocate(); 898 DPRINTF(Cache, "%s created %s from %s for addr %#llx size %d\n", 899 __func__, pkt->cmdString(), cpu_pkt->cmdString(), pkt->getAddr(), 900 pkt->getSize()); 901 return pkt; 902} 903 904 905Tick 906Cache::recvAtomic(PacketPtr pkt) 907{ 908 // We are in atomic mode so we pay just for lookupLatency here. 909 Cycles lat = lookupLatency; 910 // @TODO: make this a parameter 911 bool last_level_cache = false; 912 913 // Forward the request if the system is in cache bypass mode. 914 if (system->bypassCaches()) 915 return ticksToCycles(memSidePort->sendAtomic(pkt)); 916 917 promoteWholeLineWrites(pkt); 918 919 if (pkt->memInhibitAsserted()) { 920 // have to invalidate ourselves and any lower caches even if 921 // upper cache will be responding 922 if (pkt->isInvalidate()) { 923 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 924 if (blk && blk->isValid()) { 925 tags->invalidate(blk); 926 blk->invalidate(); 927 DPRINTF(Cache, "rcvd mem-inhibited %s on %#llx (%s):" 928 " invalidating\n", 929 pkt->cmdString(), pkt->getAddr(), 930 pkt->isSecure() ? "s" : "ns"); 931 } 932 if (!last_level_cache) { 933 DPRINTF(Cache, "forwarding mem-inhibited %s on %#llx (%s)\n", 934 pkt->cmdString(), pkt->getAddr(), 935 pkt->isSecure() ? "s" : "ns"); 936 lat += ticksToCycles(memSidePort->sendAtomic(pkt)); 937 } 938 } else { 939 DPRINTF(Cache, "rcvd mem-inhibited %s on %#llx: not responding\n", 940 pkt->cmdString(), pkt->getAddr()); 941 } 942 943 return lat * clockPeriod(); 944 } 945 946 // should assert here that there are no outstanding MSHRs or 947 // writebacks... that would mean that someone used an atomic 948 // access in timing mode 949 950 CacheBlk *blk = NULL; 951 PacketList writebacks; 952 bool satisfied = access(pkt, blk, lat, writebacks); 953 954 // handle writebacks resulting from the access here to ensure they 955 // logically proceed anything happening below
| 492void 493Cache::recvTimingSnoopResp(PacketPtr pkt) 494{ 495 DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, 496 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 497 498 assert(pkt->isResponse()); 499 500 // must be cache-to-cache response from upper to lower level 501 ForwardResponseRecord *rec = 502 dynamic_cast<ForwardResponseRecord *>(pkt->senderState); 503 assert(!system->bypassCaches()); 504 505 if (rec == NULL) { 506 // @todo What guarantee do we have that this HardPFResp is 507 // actually for this cache, and not a cache closer to the 508 // memory? 509 assert(pkt->cmd == MemCmd::HardPFResp); 510 // Check if it's a prefetch response and handle it. We shouldn't 511 // get any other kinds of responses without FRRs. 512 DPRINTF(Cache, "Got prefetch response from above for addr %#llx (%s)\n", 513 pkt->getAddr(), pkt->isSecure() ? "s" : "ns"); 514 recvTimingResp(pkt); 515 return; 516 } 517 518 pkt->popSenderState(); 519 delete rec; 520 // forwardLatency is set here because there is a response from an 521 // upper level cache. 522 // To pay the delay that occurs if the packet comes from the bus, 523 // we charge also headerDelay. 524 Tick snoop_resp_time = clockEdge(forwardLatency) + pkt->headerDelay; 525 // Reset the timing of the packet. 526 pkt->headerDelay = pkt->payloadDelay = 0; 527 memSidePort->schedTimingSnoopResp(pkt, snoop_resp_time); 528} 529 530void 531Cache::promoteWholeLineWrites(PacketPtr pkt) 532{ 533 // Cache line clearing instructions 534 if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) && 535 (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0)) { 536 pkt->cmd = MemCmd::WriteLineReq; 537 DPRINTF(Cache, "packet promoted from Write to WriteLineReq\n"); 538 } 539} 540 541bool 542Cache::recvTimingReq(PacketPtr pkt) 543{ 544 DPRINTF(CacheTags, "%s tags: %s\n", __func__, tags->print()); 545//@todo Add back in MemDebug Calls 546// MemDebug::cacheAccess(pkt); 547 548 549 /// @todo temporary hack to deal with memory corruption issue until 550 /// 4-phase transactions are complete 551 for (int x = 0; x < pendingDelete.size(); x++) 552 delete pendingDelete[x]; 553 pendingDelete.clear(); 554 555 assert(pkt->isRequest()); 556 557 // Just forward the packet if caches are disabled. 558 if (system->bypassCaches()) { 559 // @todo This should really enqueue the packet rather 560 bool M5_VAR_USED success = memSidePort->sendTimingReq(pkt); 561 assert(success); 562 return true; 563 } 564 565 promoteWholeLineWrites(pkt); 566 567 if (pkt->memInhibitAsserted()) { 568 // a cache above us (but not where the packet came from) is 569 // responding to the request 570 DPRINTF(Cache, "mem inhibited on addr %#llx (%s): not responding\n", 571 pkt->getAddr(), pkt->isSecure() ? "s" : "ns"); 572 573 // if the packet needs exclusive, and the cache that has 574 // promised to respond (setting the inhibit flag) is not 575 // providing exclusive (it is in O vs M state), we know that 576 // there may be other shared copies in the system; go out and 577 // invalidate them all 578 if (pkt->needsExclusive() && !pkt->isSupplyExclusive()) { 579 // create a downstream express snoop with cleared packet 580 // flags, there is no need to allocate any data as the 581 // packet is merely used to co-ordinate state transitions 582 Packet *snoop_pkt = new Packet(pkt, true, false); 583 584 // also reset the bus time that the original packet has 585 // not yet paid for 586 snoop_pkt->headerDelay = snoop_pkt->payloadDelay = 0; 587 588 // make this an instantaneous express snoop, and let the 589 // other caches in the system know that the packet is 590 // inhibited, because we have found the authorative copy 591 // (O) that will supply the right data 592 snoop_pkt->setExpressSnoop(); 593 snoop_pkt->assertMemInhibit(); 594 595 // this express snoop travels towards the memory, and at 596 // every crossbar it is snooped upwards thus reaching 597 // every cache in the system 598 bool M5_VAR_USED success = memSidePort->sendTimingReq(snoop_pkt); 599 // express snoops always succeed 600 assert(success); 601 602 // main memory will delete the packet 603 } 604 605 /// @todo nominally we should just delete the packet here, 606 /// however, until 4-phase stuff we can't because sending 607 /// cache is still relying on it. 608 pendingDelete.push_back(pkt); 609 610 // no need to take any action in this particular cache as the 611 // caches along the path to memory are allowed to keep lines 612 // in a shared state, and a cache above us already committed 613 // to responding 614 return true; 615 } 616 617 // anything that is merely forwarded pays for the forward latency and 618 // the delay provided by the crossbar 619 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 620 621 // We use lookupLatency here because it is used to specify the latency 622 // to access. 623 Cycles lat = lookupLatency; 624 CacheBlk *blk = NULL; 625 bool satisfied = false; 626 { 627 PacketList writebacks; 628 // Note that lat is passed by reference here. The function 629 // access() calls accessBlock() which can modify lat value. 630 satisfied = access(pkt, blk, lat, writebacks); 631 632 // copy writebacks to write buffer here to ensure they logically 633 // proceed anything happening below 634 doWritebacks(writebacks, forward_time); 635 } 636 637 // Here we charge the headerDelay that takes into account the latencies 638 // of the bus, if the packet comes from it. 639 // The latency charged it is just lat that is the value of lookupLatency 640 // modified by access() function, or if not just lookupLatency. 641 // In case of a hit we are neglecting response latency. 642 // In case of a miss we are neglecting forward latency. 643 Tick request_time = clockEdge(lat) + pkt->headerDelay; 644 // Here we reset the timing of the packet. 645 pkt->headerDelay = pkt->payloadDelay = 0; 646 647 // track time of availability of next prefetch, if any 648 Tick next_pf_time = MaxTick; 649 650 bool needsResponse = pkt->needsResponse(); 651 652 if (satisfied) { 653 // should never be satisfying an uncacheable access as we 654 // flush and invalidate any existing block as part of the 655 // lookup 656 assert(!pkt->req->isUncacheable()); 657 658 // hit (for all other request types) 659 660 if (prefetcher && (prefetchOnAccess || (blk && blk->wasPrefetched()))) { 661 if (blk) 662 blk->status &= ~BlkHWPrefetched; 663 664 // Don't notify on SWPrefetch 665 if (!pkt->cmd.isSWPrefetch()) 666 next_pf_time = prefetcher->notify(pkt); 667 } 668 669 if (needsResponse) { 670 pkt->makeTimingResponse(); 671 // @todo: Make someone pay for this 672 pkt->headerDelay = pkt->payloadDelay = 0; 673 674 // In this case we are considering request_time that takes 675 // into account the delay of the xbar, if any, and just 676 // lat, neglecting responseLatency, modelling hit latency 677 // just as lookupLatency or or the value of lat overriden 678 // by access(), that calls accessBlock() function. 679 cpuSidePort->schedTimingResp(pkt, request_time); 680 } else { 681 /// @todo nominally we should just delete the packet here, 682 /// however, until 4-phase stuff we can't because sending cache is 683 /// still relying on it. If the block is found in access(), 684 /// CleanEvict and Writeback messages will be deleted here as 685 /// well. 686 pendingDelete.push_back(pkt); 687 } 688 } else { 689 // miss 690 691 Addr blk_addr = blockAlign(pkt->getAddr()); 692 693 // ignore any existing MSHR if we are dealing with an 694 // uncacheable request 695 MSHR *mshr = pkt->req->isUncacheable() ? nullptr : 696 mshrQueue.findMatch(blk_addr, pkt->isSecure()); 697 698 // Software prefetch handling: 699 // To keep the core from waiting on data it won't look at 700 // anyway, send back a response with dummy data. Miss handling 701 // will continue asynchronously. Unfortunately, the core will 702 // insist upon freeing original Packet/Request, so we have to 703 // create a new pair with a different lifecycle. Note that this 704 // processing happens before any MSHR munging on the behalf of 705 // this request because this new Request will be the one stored 706 // into the MSHRs, not the original. 707 if (pkt->cmd.isSWPrefetch()) { 708 assert(needsResponse); 709 assert(pkt->req->hasPaddr()); 710 assert(!pkt->req->isUncacheable()); 711 712 // There's no reason to add a prefetch as an additional target 713 // to an existing MSHR. If an outstanding request is already 714 // in progress, there is nothing for the prefetch to do. 715 // If this is the case, we don't even create a request at all. 716 PacketPtr pf = nullptr; 717 718 if (!mshr) { 719 // copy the request and create a new SoftPFReq packet 720 RequestPtr req = new Request(pkt->req->getPaddr(), 721 pkt->req->getSize(), 722 pkt->req->getFlags(), 723 pkt->req->masterId()); 724 pf = new Packet(req, pkt->cmd); 725 pf->allocate(); 726 assert(pf->getAddr() == pkt->getAddr()); 727 assert(pf->getSize() == pkt->getSize()); 728 } 729 730 pkt->makeTimingResponse(); 731 // for debugging, set all the bits in the response data 732 // (also keeps valgrind from complaining when debugging settings 733 // print out instruction results) 734 std::memset(pkt->getPtr<uint8_t>(), 0xFF, pkt->getSize()); 735 // request_time is used here, taking into account lat and the delay 736 // charged if the packet comes from the xbar. 737 cpuSidePort->schedTimingResp(pkt, request_time); 738 739 // If an outstanding request is in progress (we found an 740 // MSHR) this is set to null 741 pkt = pf; 742 } 743 744 if (mshr) { 745 /// MSHR hit 746 /// @note writebacks will be checked in getNextMSHR() 747 /// for any conflicting requests to the same block 748 749 //@todo remove hw_pf here 750 751 // Coalesce unless it was a software prefetch (see above). 752 if (pkt) { 753 assert(pkt->cmd != MemCmd::Writeback); 754 // CleanEvicts corresponding to blocks which have outstanding 755 // requests in MSHRs can be deleted here. 756 if (pkt->cmd == MemCmd::CleanEvict) { 757 pendingDelete.push_back(pkt); 758 } else { 759 DPRINTF(Cache, "%s coalescing MSHR for %s addr %#llx size %d\n", 760 __func__, pkt->cmdString(), pkt->getAddr(), 761 pkt->getSize()); 762 763 assert(pkt->req->masterId() < system->maxMasters()); 764 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++; 765 if (mshr->threadNum != 0/*pkt->req->threadId()*/) { 766 mshr->threadNum = -1; 767 } 768 // We use forward_time here because it is the same 769 // considering new targets. We have multiple 770 // requests for the same address here. It 771 // specifies the latency to allocate an internal 772 // buffer and to schedule an event to the queued 773 // port and also takes into account the additional 774 // delay of the xbar. 775 mshr->allocateTarget(pkt, forward_time, order++); 776 if (mshr->getNumTargets() == numTarget) { 777 noTargetMSHR = mshr; 778 setBlocked(Blocked_NoTargets); 779 // need to be careful with this... if this mshr isn't 780 // ready yet (i.e. time > curTick()), we don't want to 781 // move it ahead of mshrs that are ready 782 // mshrQueue.moveToFront(mshr); 783 } 784 } 785 // We should call the prefetcher reguardless if the request is 786 // satisfied or not, reguardless if the request is in the MSHR or 787 // not. The request could be a ReadReq hit, but still not 788 // satisfied (potentially because of a prior write to the same 789 // cache line. So, even when not satisfied, tehre is an MSHR 790 // already allocated for this, we need to let the prefetcher know 791 // about the request 792 if (prefetcher) { 793 // Don't notify on SWPrefetch 794 if (!pkt->cmd.isSWPrefetch()) 795 next_pf_time = prefetcher->notify(pkt); 796 } 797 } 798 } else { 799 // no MSHR 800 assert(pkt->req->masterId() < system->maxMasters()); 801 if (pkt->req->isUncacheable()) { 802 mshr_uncacheable[pkt->cmdToIndex()][pkt->req->masterId()]++; 803 } else { 804 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 805 } 806 807 if (pkt->evictingBlock() || 808 (pkt->req->isUncacheable() && pkt->isWrite())) { 809 // We use forward_time here because there is an 810 // uncached memory write, forwarded to WriteBuffer. 811 allocateWriteBuffer(pkt, forward_time); 812 } else { 813 if (blk && blk->isValid()) { 814 // should have flushed and have no valid block 815 assert(!pkt->req->isUncacheable()); 816 817 // If we have a write miss to a valid block, we 818 // need to mark the block non-readable. Otherwise 819 // if we allow reads while there's an outstanding 820 // write miss, the read could return stale data 821 // out of the cache block... a more aggressive 822 // system could detect the overlap (if any) and 823 // forward data out of the MSHRs, but we don't do 824 // that yet. Note that we do need to leave the 825 // block valid so that it stays in the cache, in 826 // case we get an upgrade response (and hence no 827 // new data) when the write miss completes. 828 // As long as CPUs do proper store/load forwarding 829 // internally, and have a sufficiently weak memory 830 // model, this is probably unnecessary, but at some 831 // point it must have seemed like we needed it... 832 assert(pkt->needsExclusive()); 833 assert(!blk->isWritable()); 834 blk->status &= ~BlkReadable; 835 } 836 // Here we are using forward_time, modelling the latency of 837 // a miss (outbound) just as forwardLatency, neglecting the 838 // lookupLatency component. 839 allocateMissBuffer(pkt, forward_time); 840 } 841 842 if (prefetcher) { 843 // Don't notify on SWPrefetch 844 if (!pkt->cmd.isSWPrefetch()) 845 next_pf_time = prefetcher->notify(pkt); 846 } 847 } 848 } 849 850 if (next_pf_time != MaxTick) 851 schedMemSideSendEvent(next_pf_time); 852 853 return true; 854} 855 856 857// See comment in cache.hh. 858PacketPtr 859Cache::getBusPacket(PacketPtr cpu_pkt, CacheBlk *blk, 860 bool needsExclusive) const 861{ 862 bool blkValid = blk && blk->isValid(); 863 864 if (cpu_pkt->req->isUncacheable()) { 865 // note that at the point we see the uncacheable request we 866 // flush any block, but there could be an outstanding MSHR, 867 // and the cache could have filled again before we actually 868 // send out the forwarded uncacheable request (blk could thus 869 // be non-null) 870 return NULL; 871 } 872 873 if (!blkValid && 874 (cpu_pkt->isUpgrade() || 875 cpu_pkt->evictingBlock())) { 876 // Writebacks that weren't allocated in access() and upgrades 877 // from upper-level caches that missed completely just go 878 // through. 879 return NULL; 880 } 881 882 assert(cpu_pkt->needsResponse()); 883 884 MemCmd cmd; 885 // @TODO make useUpgrades a parameter. 886 // Note that ownership protocols require upgrade, otherwise a 887 // write miss on a shared owned block will generate a ReadExcl, 888 // which will clobber the owned copy. 889 const bool useUpgrades = true; 890 if (blkValid && useUpgrades) { 891 // only reason to be here is that blk is shared 892 // (read-only) and we need exclusive 893 assert(needsExclusive); 894 assert(!blk->isWritable()); 895 cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq; 896 } else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq || 897 cpu_pkt->cmd == MemCmd::StoreCondFailReq) { 898 // Even though this SC will fail, we still need to send out the 899 // request and get the data to supply it to other snoopers in the case 900 // where the determination the StoreCond fails is delayed due to 901 // all caches not being on the same local bus. 902 cmd = MemCmd::SCUpgradeFailReq; 903 } else if (cpu_pkt->cmd == MemCmd::WriteLineReq) { 904 // forward as invalidate to all other caches, this gives us 905 // the line in exclusive state, and invalidates all other 906 // copies 907 cmd = MemCmd::InvalidateReq; 908 } else { 909 // block is invalid 910 cmd = needsExclusive ? MemCmd::ReadExReq : 911 (isReadOnly ? MemCmd::ReadCleanReq : MemCmd::ReadSharedReq); 912 } 913 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize); 914 915 // if there are sharers in the upper levels, pass that info downstream 916 if (cpu_pkt->sharedAsserted()) { 917 // note that cpu_pkt may have spent a considerable time in the 918 // MSHR queue and that the information could possibly be out 919 // of date, however, there is no harm in conservatively 920 // assuming the block is shared 921 pkt->assertShared(); 922 DPRINTF(Cache, "%s passing shared from %s to %s addr %#llx size %d\n", 923 __func__, cpu_pkt->cmdString(), pkt->cmdString(), 924 pkt->getAddr(), pkt->getSize()); 925 } 926 927 // the packet should be block aligned 928 assert(pkt->getAddr() == blockAlign(pkt->getAddr())); 929 930 pkt->allocate(); 931 DPRINTF(Cache, "%s created %s from %s for addr %#llx size %d\n", 932 __func__, pkt->cmdString(), cpu_pkt->cmdString(), pkt->getAddr(), 933 pkt->getSize()); 934 return pkt; 935} 936 937 938Tick 939Cache::recvAtomic(PacketPtr pkt) 940{ 941 // We are in atomic mode so we pay just for lookupLatency here. 942 Cycles lat = lookupLatency; 943 // @TODO: make this a parameter 944 bool last_level_cache = false; 945 946 // Forward the request if the system is in cache bypass mode. 947 if (system->bypassCaches()) 948 return ticksToCycles(memSidePort->sendAtomic(pkt)); 949 950 promoteWholeLineWrites(pkt); 951 952 if (pkt->memInhibitAsserted()) { 953 // have to invalidate ourselves and any lower caches even if 954 // upper cache will be responding 955 if (pkt->isInvalidate()) { 956 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 957 if (blk && blk->isValid()) { 958 tags->invalidate(blk); 959 blk->invalidate(); 960 DPRINTF(Cache, "rcvd mem-inhibited %s on %#llx (%s):" 961 " invalidating\n", 962 pkt->cmdString(), pkt->getAddr(), 963 pkt->isSecure() ? "s" : "ns"); 964 } 965 if (!last_level_cache) { 966 DPRINTF(Cache, "forwarding mem-inhibited %s on %#llx (%s)\n", 967 pkt->cmdString(), pkt->getAddr(), 968 pkt->isSecure() ? "s" : "ns"); 969 lat += ticksToCycles(memSidePort->sendAtomic(pkt)); 970 } 971 } else { 972 DPRINTF(Cache, "rcvd mem-inhibited %s on %#llx: not responding\n", 973 pkt->cmdString(), pkt->getAddr()); 974 } 975 976 return lat * clockPeriod(); 977 } 978 979 // should assert here that there are no outstanding MSHRs or 980 // writebacks... that would mean that someone used an atomic 981 // access in timing mode 982 983 CacheBlk *blk = NULL; 984 PacketList writebacks; 985 bool satisfied = access(pkt, blk, lat, writebacks); 986 987 // handle writebacks resulting from the access here to ensure they 988 // logically proceed anything happening below
|
956 while (!writebacks.empty()){ 957 PacketPtr wbPkt = writebacks.front(); 958 memSidePort->sendAtomic(wbPkt); 959 writebacks.pop_front(); 960 delete wbPkt; 961 }
| 989 doWritebacksAtomic(writebacks);
|
962 963 if (!satisfied) { 964 // MISS 965 966 PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsExclusive()); 967 968 bool is_forward = (bus_pkt == NULL); 969 970 if (is_forward) { 971 // just forwarding the same request to the next level 972 // no local cache operation involved 973 bus_pkt = pkt; 974 } 975 976 DPRINTF(Cache, "Sending an atomic %s for %#llx (%s)\n", 977 bus_pkt->cmdString(), bus_pkt->getAddr(), 978 bus_pkt->isSecure() ? "s" : "ns"); 979 980#if TRACING_ON 981 CacheBlk::State old_state = blk ? blk->status : 0; 982#endif 983 984 lat += ticksToCycles(memSidePort->sendAtomic(bus_pkt)); 985 986 // We are now dealing with the response handling 987 DPRINTF(Cache, "Receive response: %s for addr %#llx (%s) in state %i\n", 988 bus_pkt->cmdString(), bus_pkt->getAddr(), 989 bus_pkt->isSecure() ? "s" : "ns", 990 old_state); 991 992 // If packet was a forward, the response (if any) is already 993 // in place in the bus_pkt == pkt structure, so we don't need 994 // to do anything. Otherwise, use the separate bus_pkt to 995 // generate response to pkt and then delete it. 996 if (!is_forward) { 997 if (pkt->needsResponse()) { 998 assert(bus_pkt->isResponse()); 999 if (bus_pkt->isError()) { 1000 pkt->makeAtomicResponse(); 1001 pkt->copyError(bus_pkt); 1002 } else if (pkt->cmd == MemCmd::InvalidateReq) { 1003 if (blk) { 1004 // invalidate response to a cache that received 1005 // an invalidate request 1006 satisfyCpuSideRequest(pkt, blk); 1007 } 1008 } else if (pkt->cmd == MemCmd::WriteLineReq) { 1009 // note the use of pkt, not bus_pkt here. 1010 1011 // write-line request to the cache that promoted 1012 // the write to a whole line 1013 blk = handleFill(pkt, blk, writebacks); 1014 satisfyCpuSideRequest(pkt, blk); 1015 } else if (bus_pkt->isRead() || 1016 bus_pkt->cmd == MemCmd::UpgradeResp) { 1017 // we're updating cache state to allow us to 1018 // satisfy the upstream request from the cache 1019 blk = handleFill(bus_pkt, blk, writebacks); 1020 satisfyCpuSideRequest(pkt, blk); 1021 } else { 1022 // we're satisfying the upstream request without 1023 // modifying cache state, e.g., a write-through 1024 pkt->makeAtomicResponse(); 1025 } 1026 } 1027 delete bus_pkt; 1028 } 1029 } 1030 1031 // Note that we don't invoke the prefetcher at all in atomic mode. 1032 // It's not clear how to do it properly, particularly for 1033 // prefetchers that aggressively generate prefetch candidates and 1034 // rely on bandwidth contention to throttle them; these will tend 1035 // to pollute the cache in atomic mode since there is no bandwidth 1036 // contention. If we ever do want to enable prefetching in atomic 1037 // mode, though, this is the place to do it... see timingAccess() 1038 // for an example (though we'd want to issue the prefetch(es) 1039 // immediately rather than calling requestMemSideBus() as we do 1040 // there). 1041 1042 // Handle writebacks (from the response handling) if needed
| 990 991 if (!satisfied) { 992 // MISS 993 994 PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsExclusive()); 995 996 bool is_forward = (bus_pkt == NULL); 997 998 if (is_forward) { 999 // just forwarding the same request to the next level 1000 // no local cache operation involved 1001 bus_pkt = pkt; 1002 } 1003 1004 DPRINTF(Cache, "Sending an atomic %s for %#llx (%s)\n", 1005 bus_pkt->cmdString(), bus_pkt->getAddr(), 1006 bus_pkt->isSecure() ? "s" : "ns"); 1007 1008#if TRACING_ON 1009 CacheBlk::State old_state = blk ? blk->status : 0; 1010#endif 1011 1012 lat += ticksToCycles(memSidePort->sendAtomic(bus_pkt)); 1013 1014 // We are now dealing with the response handling 1015 DPRINTF(Cache, "Receive response: %s for addr %#llx (%s) in state %i\n", 1016 bus_pkt->cmdString(), bus_pkt->getAddr(), 1017 bus_pkt->isSecure() ? "s" : "ns", 1018 old_state); 1019 1020 // If packet was a forward, the response (if any) is already 1021 // in place in the bus_pkt == pkt structure, so we don't need 1022 // to do anything. Otherwise, use the separate bus_pkt to 1023 // generate response to pkt and then delete it. 1024 if (!is_forward) { 1025 if (pkt->needsResponse()) { 1026 assert(bus_pkt->isResponse()); 1027 if (bus_pkt->isError()) { 1028 pkt->makeAtomicResponse(); 1029 pkt->copyError(bus_pkt); 1030 } else if (pkt->cmd == MemCmd::InvalidateReq) { 1031 if (blk) { 1032 // invalidate response to a cache that received 1033 // an invalidate request 1034 satisfyCpuSideRequest(pkt, blk); 1035 } 1036 } else if (pkt->cmd == MemCmd::WriteLineReq) { 1037 // note the use of pkt, not bus_pkt here. 1038 1039 // write-line request to the cache that promoted 1040 // the write to a whole line 1041 blk = handleFill(pkt, blk, writebacks); 1042 satisfyCpuSideRequest(pkt, blk); 1043 } else if (bus_pkt->isRead() || 1044 bus_pkt->cmd == MemCmd::UpgradeResp) { 1045 // we're updating cache state to allow us to 1046 // satisfy the upstream request from the cache 1047 blk = handleFill(bus_pkt, blk, writebacks); 1048 satisfyCpuSideRequest(pkt, blk); 1049 } else { 1050 // we're satisfying the upstream request without 1051 // modifying cache state, e.g., a write-through 1052 pkt->makeAtomicResponse(); 1053 } 1054 } 1055 delete bus_pkt; 1056 } 1057 } 1058 1059 // Note that we don't invoke the prefetcher at all in atomic mode. 1060 // It's not clear how to do it properly, particularly for 1061 // prefetchers that aggressively generate prefetch candidates and 1062 // rely on bandwidth contention to throttle them; these will tend 1063 // to pollute the cache in atomic mode since there is no bandwidth 1064 // contention. If we ever do want to enable prefetching in atomic 1065 // mode, though, this is the place to do it... see timingAccess() 1066 // for an example (though we'd want to issue the prefetch(es) 1067 // immediately rather than calling requestMemSideBus() as we do 1068 // there). 1069 1070 // Handle writebacks (from the response handling) if needed
|
1043 while (!writebacks.empty()){ 1044 PacketPtr wbPkt = writebacks.front(); 1045 memSidePort->sendAtomic(wbPkt); 1046 writebacks.pop_front(); 1047 delete wbPkt; 1048 }
| 1071 doWritebacksAtomic(writebacks);
|
1049 1050 if (pkt->needsResponse()) { 1051 pkt->makeAtomicResponse(); 1052 } 1053 1054 return lat * clockPeriod(); 1055} 1056 1057 1058void 1059Cache::functionalAccess(PacketPtr pkt, bool fromCpuSide) 1060{ 1061 if (system->bypassCaches()) { 1062 // Packets from the memory side are snoop request and 1063 // shouldn't happen in bypass mode. 1064 assert(fromCpuSide); 1065 1066 // The cache should be flushed if we are in cache bypass mode, 1067 // so we don't need to check if we need to update anything. 1068 memSidePort->sendFunctional(pkt); 1069 return; 1070 } 1071 1072 Addr blk_addr = blockAlign(pkt->getAddr()); 1073 bool is_secure = pkt->isSecure(); 1074 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 1075 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 1076 1077 pkt->pushLabel(name()); 1078 1079 CacheBlkPrintWrapper cbpw(blk); 1080 1081 // Note that just because an L2/L3 has valid data doesn't mean an 1082 // L1 doesn't have a more up-to-date modified copy that still 1083 // needs to be found. As a result we always update the request if 1084 // we have it, but only declare it satisfied if we are the owner. 1085 1086 // see if we have data at all (owned or otherwise) 1087 bool have_data = blk && blk->isValid() 1088 && pkt->checkFunctional(&cbpw, blk_addr, is_secure, blkSize, 1089 blk->data); 1090 1091 // data we have is dirty if marked as such or if valid & ownership 1092 // pending due to outstanding UpgradeReq 1093 bool have_dirty = 1094 have_data && (blk->isDirty() || 1095 (mshr && mshr->inService && mshr->isPendingDirty())); 1096 1097 bool done = have_dirty 1098 || cpuSidePort->checkFunctional(pkt) 1099 || mshrQueue.checkFunctional(pkt, blk_addr) 1100 || writeBuffer.checkFunctional(pkt, blk_addr) 1101 || memSidePort->checkFunctional(pkt); 1102 1103 DPRINTF(Cache, "functional %s %#llx (%s) %s%s%s\n", 1104 pkt->cmdString(), pkt->getAddr(), is_secure ? "s" : "ns", 1105 (blk && blk->isValid()) ? "valid " : "", 1106 have_data ? "data " : "", done ? "done " : ""); 1107 1108 // We're leaving the cache, so pop cache->name() label 1109 pkt->popLabel(); 1110 1111 if (done) { 1112 pkt->makeResponse(); 1113 } else { 1114 // if it came as a request from the CPU side then make sure it 1115 // continues towards the memory side 1116 if (fromCpuSide) { 1117 memSidePort->sendFunctional(pkt); 1118 } else if (forwardSnoops && cpuSidePort->isSnooping()) { 1119 // if it came from the memory side, it must be a snoop request 1120 // and we should only forward it if we are forwarding snoops 1121 cpuSidePort->sendFunctionalSnoop(pkt); 1122 } 1123 } 1124} 1125 1126 1127///////////////////////////////////////////////////// 1128// 1129// Response handling: responses from the memory side 1130// 1131///////////////////////////////////////////////////// 1132 1133 1134void 1135Cache::recvTimingResp(PacketPtr pkt) 1136{ 1137 assert(pkt->isResponse()); 1138 1139 // all header delay should be paid for by the crossbar, unless 1140 // this is a prefetch response from above 1141 panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp, 1142 "%s saw a non-zero packet delay\n", name()); 1143 1144 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState); 1145 bool is_error = pkt->isError(); 1146 1147 assert(mshr); 1148 1149 if (is_error) { 1150 DPRINTF(Cache, "Cache received packet with error for addr %#llx (%s), " 1151 "cmd: %s\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns", 1152 pkt->cmdString()); 1153 } 1154 1155 DPRINTF(Cache, "Handling response %s for addr %#llx size %d (%s)\n", 1156 pkt->cmdString(), pkt->getAddr(), pkt->getSize(), 1157 pkt->isSecure() ? "s" : "ns"); 1158 1159 MSHRQueue *mq = mshr->queue; 1160 bool wasFull = mq->isFull(); 1161 1162 if (mshr == noTargetMSHR) { 1163 // we always clear at least one target 1164 clearBlocked(Blocked_NoTargets); 1165 noTargetMSHR = NULL; 1166 } 1167 1168 // Initial target is used just for stats 1169 MSHR::Target *initial_tgt = mshr->getTarget(); 1170 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 1171 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex(); 1172 Tick miss_latency = curTick() - initial_tgt->recvTime; 1173 PacketList writebacks; 1174 // We need forward_time here because we have a call of 1175 // allocateWriteBuffer() that need this parameter to specify the 1176 // time to request the bus. In this case we use forward latency 1177 // because there is a writeback. We pay also here for headerDelay 1178 // that is charged of bus latencies if the packet comes from the 1179 // bus. 1180 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 1181 1182 if (pkt->req->isUncacheable()) { 1183 assert(pkt->req->masterId() < system->maxMasters()); 1184 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] += 1185 miss_latency; 1186 } else { 1187 assert(pkt->req->masterId() < system->maxMasters()); 1188 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] += 1189 miss_latency; 1190 } 1191 1192 bool is_fill = !mshr->isForward && 1193 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp); 1194 1195 if (is_fill && !is_error) { 1196 DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n", 1197 pkt->getAddr()); 1198 1199 // give mshr a chance to do some dirty work 1200 mshr->handleFill(pkt, blk); 1201 1202 blk = handleFill(pkt, blk, writebacks); 1203 assert(blk != NULL); 1204 } 1205 1206 // allow invalidation responses originating from write-line 1207 // requests to be discarded 1208 bool discard_invalidate = false; 1209 1210 // First offset for critical word first calculations 1211 int initial_offset = initial_tgt->pkt->getOffset(blkSize); 1212 1213 while (mshr->hasTargets()) { 1214 MSHR::Target *target = mshr->getTarget(); 1215 Packet *tgt_pkt = target->pkt; 1216 1217 switch (target->source) { 1218 case MSHR::Target::FromCPU: 1219 Tick completion_time; 1220 // Here we charge on completion_time the delay of the xbar if the 1221 // packet comes from it, charged on headerDelay. 1222 completion_time = pkt->headerDelay; 1223 1224 // Software prefetch handling for cache closest to core 1225 if (tgt_pkt->cmd.isSWPrefetch()) { 1226 // a software prefetch would have already been ack'd immediately 1227 // with dummy data so the core would be able to retire it. 1228 // this request completes right here, so we deallocate it. 1229 delete tgt_pkt->req; 1230 delete tgt_pkt; 1231 break; // skip response 1232 } 1233 1234 // unlike the other packet flows, where data is found in other 1235 // caches or memory and brought back, write-line requests always 1236 // have the data right away, so the above check for "is fill?" 1237 // cannot actually be determined until examining the stored MSHR 1238 // state. We "catch up" with that logic here, which is duplicated 1239 // from above. 1240 if (tgt_pkt->cmd == MemCmd::WriteLineReq) { 1241 assert(!is_error); 1242 1243 // NB: we use the original packet here and not the response! 1244 mshr->handleFill(tgt_pkt, blk); 1245 blk = handleFill(tgt_pkt, blk, writebacks); 1246 assert(blk != NULL); 1247 1248 // treat as a fill, and discard the invalidation 1249 // response 1250 is_fill = true; 1251 discard_invalidate = true; 1252 } 1253 1254 if (is_fill) { 1255 satisfyCpuSideRequest(tgt_pkt, blk, 1256 true, mshr->hasPostDowngrade()); 1257 1258 // How many bytes past the first request is this one 1259 int transfer_offset = 1260 tgt_pkt->getOffset(blkSize) - initial_offset; 1261 if (transfer_offset < 0) { 1262 transfer_offset += blkSize; 1263 } 1264 1265 // If not critical word (offset) return payloadDelay. 1266 // responseLatency is the latency of the return path 1267 // from lower level caches/memory to an upper level cache or 1268 // the core. 1269 completion_time += clockEdge(responseLatency) + 1270 (transfer_offset ? pkt->payloadDelay : 0); 1271 1272 assert(!tgt_pkt->req->isUncacheable()); 1273 1274 assert(tgt_pkt->req->masterId() < system->maxMasters()); 1275 missLatency[tgt_pkt->cmdToIndex()][tgt_pkt->req->masterId()] += 1276 completion_time - target->recvTime; 1277 } else if (pkt->cmd == MemCmd::UpgradeFailResp) { 1278 // failed StoreCond upgrade 1279 assert(tgt_pkt->cmd == MemCmd::StoreCondReq || 1280 tgt_pkt->cmd == MemCmd::StoreCondFailReq || 1281 tgt_pkt->cmd == MemCmd::SCUpgradeFailReq); 1282 // responseLatency is the latency of the return path 1283 // from lower level caches/memory to an upper level cache or 1284 // the core. 1285 completion_time += clockEdge(responseLatency) + 1286 pkt->payloadDelay; 1287 tgt_pkt->req->setExtraData(0); 1288 } else { 1289 // not a cache fill, just forwarding response 1290 // responseLatency is the latency of the return path 1291 // from lower level cahces/memory to the core. 1292 completion_time += clockEdge(responseLatency) + 1293 pkt->payloadDelay; 1294 if (pkt->isRead() && !is_error) { 1295 // sanity check 1296 assert(pkt->getAddr() == tgt_pkt->getAddr()); 1297 assert(pkt->getSize() >= tgt_pkt->getSize()); 1298 1299 tgt_pkt->setData(pkt->getConstPtr<uint8_t>()); 1300 } 1301 } 1302 tgt_pkt->makeTimingResponse(); 1303 // if this packet is an error copy that to the new packet 1304 if (is_error) 1305 tgt_pkt->copyError(pkt); 1306 if (tgt_pkt->cmd == MemCmd::ReadResp && 1307 (pkt->isInvalidate() || mshr->hasPostInvalidate())) { 1308 // If intermediate cache got ReadRespWithInvalidate, 1309 // propagate that. Response should not have 1310 // isInvalidate() set otherwise. 1311 tgt_pkt->cmd = MemCmd::ReadRespWithInvalidate; 1312 DPRINTF(Cache, "%s updated cmd to %s for addr %#llx\n", 1313 __func__, tgt_pkt->cmdString(), tgt_pkt->getAddr()); 1314 } 1315 // Reset the bus additional time as it is now accounted for 1316 tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0; 1317 cpuSidePort->schedTimingResp(tgt_pkt, completion_time); 1318 break; 1319 1320 case MSHR::Target::FromPrefetcher: 1321 assert(tgt_pkt->cmd == MemCmd::HardPFReq); 1322 if (blk) 1323 blk->status |= BlkHWPrefetched; 1324 delete tgt_pkt->req; 1325 delete tgt_pkt; 1326 break; 1327 1328 case MSHR::Target::FromSnoop: 1329 // I don't believe that a snoop can be in an error state 1330 assert(!is_error); 1331 // response to snoop request 1332 DPRINTF(Cache, "processing deferred snoop...\n"); 1333 assert(!(pkt->isInvalidate() && !mshr->hasPostInvalidate())); 1334 handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate()); 1335 break; 1336 1337 default: 1338 panic("Illegal target->source enum %d\n", target->source); 1339 } 1340 1341 mshr->popTarget(); 1342 } 1343 1344 if (blk && blk->isValid()) { 1345 // an invalidate response stemming from a write line request 1346 // should not invalidate the block, so check if the 1347 // invalidation should be discarded 1348 if ((pkt->isInvalidate() || mshr->hasPostInvalidate()) && 1349 !discard_invalidate) { 1350 assert(blk != tempBlock); 1351 tags->invalidate(blk); 1352 blk->invalidate(); 1353 } else if (mshr->hasPostDowngrade()) { 1354 blk->status &= ~BlkWritable; 1355 } 1356 } 1357 1358 if (mshr->promoteDeferredTargets()) { 1359 // avoid later read getting stale data while write miss is 1360 // outstanding.. see comment in timingAccess() 1361 if (blk) { 1362 blk->status &= ~BlkReadable; 1363 } 1364 mq = mshr->queue; 1365 mq->markPending(mshr); 1366 schedMemSideSendEvent(clockEdge() + pkt->payloadDelay); 1367 } else { 1368 mq->deallocate(mshr); 1369 if (wasFull && !mq->isFull()) { 1370 clearBlocked((BlockedCause)mq->index); 1371 } 1372 1373 // Request the bus for a prefetch if this deallocation freed enough 1374 // MSHRs for a prefetch to take place 1375 if (prefetcher && mq == &mshrQueue && mshrQueue.canPrefetch()) { 1376 Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(), 1377 clockEdge()); 1378 if (next_pf_time != MaxTick) 1379 schedMemSideSendEvent(next_pf_time); 1380 } 1381 } 1382 // reset the xbar additional timinig as it is now accounted for 1383 pkt->headerDelay = pkt->payloadDelay = 0; 1384 1385 // copy writebacks to write buffer 1386 doWritebacks(writebacks, forward_time); 1387 1388 // if we used temp block, check to see if its valid and then clear it out 1389 if (blk == tempBlock && tempBlock->isValid()) { 1390 // We use forwardLatency here because we are copying 1391 // Writebacks/CleanEvicts to write buffer. It specifies the latency to 1392 // allocate an internal buffer and to schedule an event to the 1393 // queued port. 1394 if (blk->isDirty()) { 1395 PacketPtr wbPkt = writebackBlk(blk); 1396 allocateWriteBuffer(wbPkt, forward_time); 1397 // Set BLOCK_CACHED flag if cached above. 1398 if (isCachedAbove(wbPkt)) 1399 wbPkt->setBlockCached(); 1400 } else { 1401 PacketPtr wcPkt = cleanEvictBlk(blk); 1402 // Check to see if block is cached above. If not allocate 1403 // write buffer 1404 if (isCachedAbove(wcPkt)) 1405 delete wcPkt; 1406 else 1407 allocateWriteBuffer(wcPkt, forward_time); 1408 } 1409 blk->invalidate(); 1410 } 1411 1412 DPRINTF(Cache, "Leaving %s with %s for addr %#llx\n", __func__, 1413 pkt->cmdString(), pkt->getAddr()); 1414 delete pkt; 1415} 1416 1417PacketPtr 1418Cache::writebackBlk(CacheBlk *blk) 1419{ 1420 chatty_assert(!isReadOnly, "Writeback from read-only cache"); 1421 assert(blk && blk->isValid() && blk->isDirty()); 1422 1423 writebacks[Request::wbMasterId]++; 1424 1425 Request *writebackReq = 1426 new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0, 1427 Request::wbMasterId); 1428 if (blk->isSecure()) 1429 writebackReq->setFlags(Request::SECURE); 1430 1431 writebackReq->taskId(blk->task_id); 1432 blk->task_id= ContextSwitchTaskId::Unknown; 1433 blk->tickInserted = curTick(); 1434 1435 PacketPtr writeback = new Packet(writebackReq, MemCmd::Writeback); 1436 if (blk->isWritable()) { 1437 // not asserting shared means we pass the block in modified 1438 // state, mark our own block non-writeable 1439 blk->status &= ~BlkWritable; 1440 } else { 1441 // we are in the owned state, tell the receiver 1442 writeback->assertShared(); 1443 } 1444 1445 writeback->allocate(); 1446 std::memcpy(writeback->getPtr<uint8_t>(), blk->data, blkSize); 1447 1448 blk->status &= ~BlkDirty; 1449 return writeback; 1450} 1451 1452PacketPtr 1453Cache::cleanEvictBlk(CacheBlk *blk) 1454{ 1455 assert(blk && blk->isValid() && !blk->isDirty()); 1456 // Creating a zero sized write, a message to the snoop filter 1457 Request *req = 1458 new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0, 1459 Request::wbMasterId); 1460 if (blk->isSecure()) 1461 req->setFlags(Request::SECURE); 1462 1463 req->taskId(blk->task_id); 1464 blk->task_id = ContextSwitchTaskId::Unknown; 1465 blk->tickInserted = curTick(); 1466 1467 PacketPtr pkt = new Packet(req, MemCmd::CleanEvict); 1468 pkt->allocate(); 1469 DPRINTF(Cache, "%s%s %x Create CleanEvict\n", pkt->cmdString(), 1470 pkt->req->isInstFetch() ? " (ifetch)" : "", 1471 pkt->getAddr()); 1472 1473 return pkt; 1474} 1475 1476void 1477Cache::memWriteback() 1478{ 1479 CacheBlkVisitorWrapper visitor(*this, &Cache::writebackVisitor); 1480 tags->forEachBlk(visitor); 1481} 1482 1483void 1484Cache::memInvalidate() 1485{ 1486 CacheBlkVisitorWrapper visitor(*this, &Cache::invalidateVisitor); 1487 tags->forEachBlk(visitor); 1488} 1489 1490bool 1491Cache::isDirty() const 1492{ 1493 CacheBlkIsDirtyVisitor visitor; 1494 tags->forEachBlk(visitor); 1495 1496 return visitor.isDirty(); 1497} 1498 1499bool 1500Cache::writebackVisitor(CacheBlk &blk) 1501{ 1502 if (blk.isDirty()) { 1503 assert(blk.isValid()); 1504 1505 Request request(tags->regenerateBlkAddr(blk.tag, blk.set), 1506 blkSize, 0, Request::funcMasterId); 1507 request.taskId(blk.task_id); 1508 1509 Packet packet(&request, MemCmd::WriteReq); 1510 packet.dataStatic(blk.data); 1511 1512 memSidePort->sendFunctional(&packet); 1513 1514 blk.status &= ~BlkDirty; 1515 } 1516 1517 return true; 1518} 1519 1520bool 1521Cache::invalidateVisitor(CacheBlk &blk) 1522{ 1523 1524 if (blk.isDirty()) 1525 warn_once("Invalidating dirty cache lines. Expect things to break.\n"); 1526 1527 if (blk.isValid()) { 1528 assert(!blk.isDirty()); 1529 tags->invalidate(&blk); 1530 blk.invalidate(); 1531 } 1532 1533 return true; 1534} 1535 1536CacheBlk* 1537Cache::allocateBlock(Addr addr, bool is_secure, PacketList &writebacks) 1538{ 1539 CacheBlk *blk = tags->findVictim(addr); 1540 1541 // It is valid to return NULL if there is no victim 1542 if (!blk) 1543 return nullptr; 1544 1545 if (blk->isValid()) { 1546 Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set); 1547 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure()); 1548 if (repl_mshr) { 1549 // must be an outstanding upgrade request 1550 // on a block we're about to replace... 1551 assert(!blk->isWritable() || blk->isDirty()); 1552 assert(repl_mshr->needsExclusive()); 1553 // too hard to replace block with transient state 1554 // allocation failed, block not inserted 1555 return NULL; 1556 } else { 1557 DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx (%s): %s\n", 1558 repl_addr, blk->isSecure() ? "s" : "ns", 1559 addr, is_secure ? "s" : "ns", 1560 blk->isDirty() ? "writeback" : "clean"); 1561 1562 // Will send up Writeback/CleanEvict snoops via isCachedAbove 1563 // when pushing this writeback list into the write buffer. 1564 if (blk->isDirty()) { 1565 // Save writeback packet for handling by caller 1566 writebacks.push_back(writebackBlk(blk)); 1567 } else { 1568 writebacks.push_back(cleanEvictBlk(blk)); 1569 } 1570 } 1571 } 1572 1573 return blk; 1574} 1575 1576 1577// Note that the reason we return a list of writebacks rather than 1578// inserting them directly in the write buffer is that this function 1579// is called by both atomic and timing-mode accesses, and in atomic 1580// mode we don't mess with the write buffer (we just perform the 1581// writebacks atomically once the original request is complete). 1582CacheBlk* 1583Cache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks) 1584{ 1585 assert(pkt->isResponse() || pkt->cmd == MemCmd::WriteLineReq); 1586 Addr addr = pkt->getAddr(); 1587 bool is_secure = pkt->isSecure(); 1588#if TRACING_ON 1589 CacheBlk::State old_state = blk ? blk->status : 0; 1590#endif 1591 1592 // When handling a fill, discard any CleanEvicts for the 1593 // same address in write buffer. 1594 Addr M5_VAR_USED blk_addr = blockAlign(pkt->getAddr()); 1595 std::vector<MSHR *> M5_VAR_USED wbs; 1596 assert (!writeBuffer.findMatches(blk_addr, is_secure, wbs)); 1597 1598 if (blk == NULL) { 1599 // better have read new data... 1600 assert(pkt->hasData()); 1601 1602 // only read responses and write-line requests have data; 1603 // note that we don't write the data here for write-line - that 1604 // happens in the subsequent satisfyCpuSideRequest. 1605 assert(pkt->isRead() || pkt->cmd == MemCmd::WriteLineReq); 1606 1607 // need to do a replacement 1608 blk = allocateBlock(addr, is_secure, writebacks); 1609 if (blk == NULL) { 1610 // No replaceable block... just use temporary storage to 1611 // complete the current request and then get rid of it 1612 assert(!tempBlock->isValid()); 1613 blk = tempBlock; 1614 tempBlock->set = tags->extractSet(addr); 1615 tempBlock->tag = tags->extractTag(addr); 1616 // @todo: set security state as well... 1617 DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr, 1618 is_secure ? "s" : "ns"); 1619 } else { 1620 tags->insertBlock(pkt, blk); 1621 } 1622 1623 // we should never be overwriting a valid block 1624 assert(!blk->isValid()); 1625 } else { 1626 // existing block... probably an upgrade 1627 assert(blk->tag == tags->extractTag(addr)); 1628 // either we're getting new data or the block should already be valid 1629 assert(pkt->hasData() || blk->isValid()); 1630 // don't clear block status... if block is already dirty we 1631 // don't want to lose that 1632 } 1633 1634 if (is_secure) 1635 blk->status |= BlkSecure; 1636 blk->status |= BlkValid | BlkReadable; 1637 1638 if (!pkt->sharedAsserted()) { 1639 // we could get non-shared responses from memory (rather than 1640 // a cache) even in a read-only cache, note that we set this 1641 // bit even for a read-only cache as we use it to represent 1642 // the exclusive state 1643 blk->status |= BlkWritable; 1644 1645 // If we got this via cache-to-cache transfer (i.e., from a 1646 // cache that was an owner) and took away that owner's copy, 1647 // then we need to write it back. Normally this happens 1648 // anyway as a side effect of getting a copy to write it, but 1649 // there are cases (such as failed store conditionals or 1650 // compare-and-swaps) where we'll demand an exclusive copy but 1651 // end up not writing it. 1652 if (pkt->memInhibitAsserted()) { 1653 blk->status |= BlkDirty; 1654 1655 chatty_assert(!isReadOnly, "Should never see dirty snoop response " 1656 "in read-only cache %s\n", name()); 1657 } 1658 } 1659 1660 DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n", 1661 addr, is_secure ? "s" : "ns", old_state, blk->print()); 1662 1663 // if we got new data, copy it in (checking for a read response 1664 // and a response that has data is the same in the end) 1665 if (pkt->isRead()) { 1666 // sanity checks 1667 assert(pkt->hasData()); 1668 assert(pkt->getSize() == blkSize); 1669 1670 std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize); 1671 } 1672 // We pay for fillLatency here. 1673 blk->whenReady = clockEdge() + fillLatency * clockPeriod() + 1674 pkt->payloadDelay; 1675 1676 return blk; 1677} 1678 1679 1680///////////////////////////////////////////////////// 1681// 1682// Snoop path: requests coming in from the memory side 1683// 1684///////////////////////////////////////////////////// 1685 1686void 1687Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data, 1688 bool already_copied, bool pending_inval) 1689{ 1690 // sanity check 1691 assert(req_pkt->isRequest()); 1692 assert(req_pkt->needsResponse()); 1693 1694 DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, 1695 req_pkt->cmdString(), req_pkt->getAddr(), req_pkt->getSize()); 1696 // timing-mode snoop responses require a new packet, unless we 1697 // already made a copy... 1698 PacketPtr pkt = req_pkt; 1699 if (!already_copied) 1700 // do not clear flags, and allocate space for data if the 1701 // packet needs it (the only packets that carry data are read 1702 // responses) 1703 pkt = new Packet(req_pkt, false, req_pkt->isRead()); 1704 1705 assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() || 1706 pkt->sharedAsserted()); 1707 pkt->makeTimingResponse(); 1708 if (pkt->isRead()) { 1709 pkt->setDataFromBlock(blk_data, blkSize); 1710 } 1711 if (pkt->cmd == MemCmd::ReadResp && pending_inval) { 1712 // Assume we defer a response to a read from a far-away cache 1713 // A, then later defer a ReadExcl from a cache B on the same 1714 // bus as us. We'll assert MemInhibit in both cases, but in 1715 // the latter case MemInhibit will keep the invalidation from 1716 // reaching cache A. This special response tells cache A that 1717 // it gets the block to satisfy its read, but must immediately 1718 // invalidate it. 1719 pkt->cmd = MemCmd::ReadRespWithInvalidate; 1720 } 1721 // Here we consider forward_time, paying for just forward latency and 1722 // also charging the delay provided by the xbar. 1723 // forward_time is used as send_time in next allocateWriteBuffer(). 1724 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 1725 // Here we reset the timing of the packet. 1726 pkt->headerDelay = pkt->payloadDelay = 0; 1727 DPRINTF(Cache, "%s created response: %s addr %#llx size %d tick: %lu\n", 1728 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize(), 1729 forward_time); 1730 memSidePort->schedTimingSnoopResp(pkt, forward_time, true); 1731} 1732 1733uint32_t 1734Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing, 1735 bool is_deferred, bool pending_inval) 1736{ 1737 DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, 1738 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 1739 // deferred snoops can only happen in timing mode 1740 assert(!(is_deferred && !is_timing)); 1741 // pending_inval only makes sense on deferred snoops 1742 assert(!(pending_inval && !is_deferred)); 1743 assert(pkt->isRequest()); 1744 1745 // the packet may get modified if we or a forwarded snooper 1746 // responds in atomic mode, so remember a few things about the 1747 // original packet up front 1748 bool invalidate = pkt->isInvalidate(); 1749 bool M5_VAR_USED needs_exclusive = pkt->needsExclusive(); 1750 1751 uint32_t snoop_delay = 0; 1752 1753 if (forwardSnoops) { 1754 // first propagate snoop upward to see if anyone above us wants to 1755 // handle it. save & restore packet src since it will get 1756 // rewritten to be relative to cpu-side bus (if any) 1757 bool alreadyResponded = pkt->memInhibitAsserted(); 1758 if (is_timing) { 1759 // copy the packet so that we can clear any flags before 1760 // forwarding it upwards, we also allocate data (passing 1761 // the pointer along in case of static data), in case 1762 // there is a snoop hit in upper levels 1763 Packet snoopPkt(pkt, true, true); 1764 snoopPkt.setExpressSnoop(); 1765 snoopPkt.pushSenderState(new ForwardResponseRecord()); 1766 // the snoop packet does not need to wait any additional 1767 // time 1768 snoopPkt.headerDelay = snoopPkt.payloadDelay = 0; 1769 cpuSidePort->sendTimingSnoopReq(&snoopPkt); 1770 1771 // add the header delay (including crossbar and snoop 1772 // delays) of the upward snoop to the snoop delay for this 1773 // cache 1774 snoop_delay += snoopPkt.headerDelay; 1775 1776 if (snoopPkt.memInhibitAsserted()) { 1777 // cache-to-cache response from some upper cache 1778 assert(!alreadyResponded); 1779 pkt->assertMemInhibit(); 1780 } else { 1781 // no cache (or anyone else for that matter) will 1782 // respond, so delete the ForwardResponseRecord here 1783 delete snoopPkt.popSenderState(); 1784 } 1785 if (snoopPkt.sharedAsserted()) { 1786 pkt->assertShared(); 1787 } 1788 // If this request is a prefetch or clean evict and an upper level 1789 // signals block present, make sure to propagate the block 1790 // presence to the requester. 1791 if (snoopPkt.isBlockCached()) { 1792 pkt->setBlockCached(); 1793 } 1794 } else { 1795 cpuSidePort->sendAtomicSnoop(pkt); 1796 if (!alreadyResponded && pkt->memInhibitAsserted()) { 1797 // cache-to-cache response from some upper cache: 1798 // forward response to original requester 1799 assert(pkt->isResponse()); 1800 } 1801 } 1802 } 1803 1804 if (!blk || !blk->isValid()) { 1805 DPRINTF(Cache, "%s snoop miss for %s addr %#llx size %d\n", 1806 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 1807 return snoop_delay; 1808 } else { 1809 DPRINTF(Cache, "%s snoop hit for %s for addr %#llx size %d, " 1810 "old state is %s\n", __func__, pkt->cmdString(), 1811 pkt->getAddr(), pkt->getSize(), blk->print()); 1812 } 1813 1814 chatty_assert(!(isReadOnly && blk->isDirty()), 1815 "Should never have a dirty block in a read-only cache %s\n", 1816 name()); 1817 1818 // We may end up modifying both the block state and the packet (if 1819 // we respond in atomic mode), so just figure out what to do now 1820 // and then do it later. If we find dirty data while snooping for 1821 // an invalidate, we don't need to send a response. The 1822 // invalidation itself is taken care of below. 1823 bool respond = blk->isDirty() && pkt->needsResponse() && 1824 pkt->cmd != MemCmd::InvalidateReq; 1825 bool have_exclusive = blk->isWritable(); 1826 1827 // Invalidate any prefetch's from below that would strip write permissions 1828 // MemCmd::HardPFReq is only observed by upstream caches. After missing 1829 // above and in it's own cache, a new MemCmd::ReadReq is created that 1830 // downstream caches observe. 1831 if (pkt->mustCheckAbove()) { 1832 DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s from" 1833 " lower cache\n", pkt->getAddr(), pkt->cmdString()); 1834 pkt->setBlockCached(); 1835 return snoop_delay; 1836 } 1837 1838 if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) { 1839 // reading non-exclusive shared data, note that we retain 1840 // the block in owned state if it is dirty, with the response 1841 // taken care of below, and otherwhise simply downgrade to 1842 // shared 1843 assert(!needs_exclusive); 1844 pkt->assertShared(); 1845 blk->status &= ~BlkWritable; 1846 } 1847 1848 if (respond) { 1849 // prevent anyone else from responding, cache as well as 1850 // memory, and also prevent any memory from even seeing the 1851 // request (with current inhibited semantics), note that this 1852 // applies both to reads and writes and that for writes it 1853 // works thanks to the fact that we still have dirty data and 1854 // will write it back at a later point 1855 pkt->assertMemInhibit(); 1856 if (have_exclusive) { 1857 // in the case of an uncacheable request there is no point 1858 // in setting the exclusive flag, but since the recipient 1859 // does not care there is no harm in doing so, in any case 1860 // it is just a hint 1861 pkt->setSupplyExclusive(); 1862 } 1863 if (is_timing) { 1864 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval); 1865 } else { 1866 pkt->makeAtomicResponse(); 1867 pkt->setDataFromBlock(blk->data, blkSize); 1868 } 1869 } 1870 1871 if (!respond && is_timing && is_deferred) { 1872 // if it's a deferred timing snoop then we've made a copy of 1873 // both the request and the packet, and so if we're not using 1874 // those copies to respond and delete them here 1875 DPRINTF(Cache, "Deleting pkt %p and request %p for cmd %s addr: %p\n", 1876 pkt, pkt->req, pkt->cmdString(), pkt->getAddr()); 1877 1878 // the packets needs a response (just not from us), so we also 1879 // need to delete the request and not rely on the packet 1880 // destructor 1881 assert(pkt->needsResponse()); 1882 delete pkt->req; 1883 delete pkt; 1884 } 1885 1886 // Do this last in case it deallocates block data or something 1887 // like that 1888 if (invalidate) { 1889 if (blk != tempBlock) 1890 tags->invalidate(blk); 1891 blk->invalidate(); 1892 } 1893 1894 DPRINTF(Cache, "new state is %s\n", blk->print()); 1895 1896 return snoop_delay; 1897} 1898 1899 1900void 1901Cache::recvTimingSnoopReq(PacketPtr pkt) 1902{ 1903 DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, 1904 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 1905 1906 // Snoops shouldn't happen when bypassing caches 1907 assert(!system->bypassCaches()); 1908
| 1072 1073 if (pkt->needsResponse()) { 1074 pkt->makeAtomicResponse(); 1075 } 1076 1077 return lat * clockPeriod(); 1078} 1079 1080 1081void 1082Cache::functionalAccess(PacketPtr pkt, bool fromCpuSide) 1083{ 1084 if (system->bypassCaches()) { 1085 // Packets from the memory side are snoop request and 1086 // shouldn't happen in bypass mode. 1087 assert(fromCpuSide); 1088 1089 // The cache should be flushed if we are in cache bypass mode, 1090 // so we don't need to check if we need to update anything. 1091 memSidePort->sendFunctional(pkt); 1092 return; 1093 } 1094 1095 Addr blk_addr = blockAlign(pkt->getAddr()); 1096 bool is_secure = pkt->isSecure(); 1097 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 1098 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 1099 1100 pkt->pushLabel(name()); 1101 1102 CacheBlkPrintWrapper cbpw(blk); 1103 1104 // Note that just because an L2/L3 has valid data doesn't mean an 1105 // L1 doesn't have a more up-to-date modified copy that still 1106 // needs to be found. As a result we always update the request if 1107 // we have it, but only declare it satisfied if we are the owner. 1108 1109 // see if we have data at all (owned or otherwise) 1110 bool have_data = blk && blk->isValid() 1111 && pkt->checkFunctional(&cbpw, blk_addr, is_secure, blkSize, 1112 blk->data); 1113 1114 // data we have is dirty if marked as such or if valid & ownership 1115 // pending due to outstanding UpgradeReq 1116 bool have_dirty = 1117 have_data && (blk->isDirty() || 1118 (mshr && mshr->inService && mshr->isPendingDirty())); 1119 1120 bool done = have_dirty 1121 || cpuSidePort->checkFunctional(pkt) 1122 || mshrQueue.checkFunctional(pkt, blk_addr) 1123 || writeBuffer.checkFunctional(pkt, blk_addr) 1124 || memSidePort->checkFunctional(pkt); 1125 1126 DPRINTF(Cache, "functional %s %#llx (%s) %s%s%s\n", 1127 pkt->cmdString(), pkt->getAddr(), is_secure ? "s" : "ns", 1128 (blk && blk->isValid()) ? "valid " : "", 1129 have_data ? "data " : "", done ? "done " : ""); 1130 1131 // We're leaving the cache, so pop cache->name() label 1132 pkt->popLabel(); 1133 1134 if (done) { 1135 pkt->makeResponse(); 1136 } else { 1137 // if it came as a request from the CPU side then make sure it 1138 // continues towards the memory side 1139 if (fromCpuSide) { 1140 memSidePort->sendFunctional(pkt); 1141 } else if (forwardSnoops && cpuSidePort->isSnooping()) { 1142 // if it came from the memory side, it must be a snoop request 1143 // and we should only forward it if we are forwarding snoops 1144 cpuSidePort->sendFunctionalSnoop(pkt); 1145 } 1146 } 1147} 1148 1149 1150///////////////////////////////////////////////////// 1151// 1152// Response handling: responses from the memory side 1153// 1154///////////////////////////////////////////////////// 1155 1156 1157void 1158Cache::recvTimingResp(PacketPtr pkt) 1159{ 1160 assert(pkt->isResponse()); 1161 1162 // all header delay should be paid for by the crossbar, unless 1163 // this is a prefetch response from above 1164 panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp, 1165 "%s saw a non-zero packet delay\n", name()); 1166 1167 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState); 1168 bool is_error = pkt->isError(); 1169 1170 assert(mshr); 1171 1172 if (is_error) { 1173 DPRINTF(Cache, "Cache received packet with error for addr %#llx (%s), " 1174 "cmd: %s\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns", 1175 pkt->cmdString()); 1176 } 1177 1178 DPRINTF(Cache, "Handling response %s for addr %#llx size %d (%s)\n", 1179 pkt->cmdString(), pkt->getAddr(), pkt->getSize(), 1180 pkt->isSecure() ? "s" : "ns"); 1181 1182 MSHRQueue *mq = mshr->queue; 1183 bool wasFull = mq->isFull(); 1184 1185 if (mshr == noTargetMSHR) { 1186 // we always clear at least one target 1187 clearBlocked(Blocked_NoTargets); 1188 noTargetMSHR = NULL; 1189 } 1190 1191 // Initial target is used just for stats 1192 MSHR::Target *initial_tgt = mshr->getTarget(); 1193 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 1194 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex(); 1195 Tick miss_latency = curTick() - initial_tgt->recvTime; 1196 PacketList writebacks; 1197 // We need forward_time here because we have a call of 1198 // allocateWriteBuffer() that need this parameter to specify the 1199 // time to request the bus. In this case we use forward latency 1200 // because there is a writeback. We pay also here for headerDelay 1201 // that is charged of bus latencies if the packet comes from the 1202 // bus. 1203 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 1204 1205 if (pkt->req->isUncacheable()) { 1206 assert(pkt->req->masterId() < system->maxMasters()); 1207 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] += 1208 miss_latency; 1209 } else { 1210 assert(pkt->req->masterId() < system->maxMasters()); 1211 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] += 1212 miss_latency; 1213 } 1214 1215 bool is_fill = !mshr->isForward && 1216 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp); 1217 1218 if (is_fill && !is_error) { 1219 DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n", 1220 pkt->getAddr()); 1221 1222 // give mshr a chance to do some dirty work 1223 mshr->handleFill(pkt, blk); 1224 1225 blk = handleFill(pkt, blk, writebacks); 1226 assert(blk != NULL); 1227 } 1228 1229 // allow invalidation responses originating from write-line 1230 // requests to be discarded 1231 bool discard_invalidate = false; 1232 1233 // First offset for critical word first calculations 1234 int initial_offset = initial_tgt->pkt->getOffset(blkSize); 1235 1236 while (mshr->hasTargets()) { 1237 MSHR::Target *target = mshr->getTarget(); 1238 Packet *tgt_pkt = target->pkt; 1239 1240 switch (target->source) { 1241 case MSHR::Target::FromCPU: 1242 Tick completion_time; 1243 // Here we charge on completion_time the delay of the xbar if the 1244 // packet comes from it, charged on headerDelay. 1245 completion_time = pkt->headerDelay; 1246 1247 // Software prefetch handling for cache closest to core 1248 if (tgt_pkt->cmd.isSWPrefetch()) { 1249 // a software prefetch would have already been ack'd immediately 1250 // with dummy data so the core would be able to retire it. 1251 // this request completes right here, so we deallocate it. 1252 delete tgt_pkt->req; 1253 delete tgt_pkt; 1254 break; // skip response 1255 } 1256 1257 // unlike the other packet flows, where data is found in other 1258 // caches or memory and brought back, write-line requests always 1259 // have the data right away, so the above check for "is fill?" 1260 // cannot actually be determined until examining the stored MSHR 1261 // state. We "catch up" with that logic here, which is duplicated 1262 // from above. 1263 if (tgt_pkt->cmd == MemCmd::WriteLineReq) { 1264 assert(!is_error); 1265 1266 // NB: we use the original packet here and not the response! 1267 mshr->handleFill(tgt_pkt, blk); 1268 blk = handleFill(tgt_pkt, blk, writebacks); 1269 assert(blk != NULL); 1270 1271 // treat as a fill, and discard the invalidation 1272 // response 1273 is_fill = true; 1274 discard_invalidate = true; 1275 } 1276 1277 if (is_fill) { 1278 satisfyCpuSideRequest(tgt_pkt, blk, 1279 true, mshr->hasPostDowngrade()); 1280 1281 // How many bytes past the first request is this one 1282 int transfer_offset = 1283 tgt_pkt->getOffset(blkSize) - initial_offset; 1284 if (transfer_offset < 0) { 1285 transfer_offset += blkSize; 1286 } 1287 1288 // If not critical word (offset) return payloadDelay. 1289 // responseLatency is the latency of the return path 1290 // from lower level caches/memory to an upper level cache or 1291 // the core. 1292 completion_time += clockEdge(responseLatency) + 1293 (transfer_offset ? pkt->payloadDelay : 0); 1294 1295 assert(!tgt_pkt->req->isUncacheable()); 1296 1297 assert(tgt_pkt->req->masterId() < system->maxMasters()); 1298 missLatency[tgt_pkt->cmdToIndex()][tgt_pkt->req->masterId()] += 1299 completion_time - target->recvTime; 1300 } else if (pkt->cmd == MemCmd::UpgradeFailResp) { 1301 // failed StoreCond upgrade 1302 assert(tgt_pkt->cmd == MemCmd::StoreCondReq || 1303 tgt_pkt->cmd == MemCmd::StoreCondFailReq || 1304 tgt_pkt->cmd == MemCmd::SCUpgradeFailReq); 1305 // responseLatency is the latency of the return path 1306 // from lower level caches/memory to an upper level cache or 1307 // the core. 1308 completion_time += clockEdge(responseLatency) + 1309 pkt->payloadDelay; 1310 tgt_pkt->req->setExtraData(0); 1311 } else { 1312 // not a cache fill, just forwarding response 1313 // responseLatency is the latency of the return path 1314 // from lower level cahces/memory to the core. 1315 completion_time += clockEdge(responseLatency) + 1316 pkt->payloadDelay; 1317 if (pkt->isRead() && !is_error) { 1318 // sanity check 1319 assert(pkt->getAddr() == tgt_pkt->getAddr()); 1320 assert(pkt->getSize() >= tgt_pkt->getSize()); 1321 1322 tgt_pkt->setData(pkt->getConstPtr<uint8_t>()); 1323 } 1324 } 1325 tgt_pkt->makeTimingResponse(); 1326 // if this packet is an error copy that to the new packet 1327 if (is_error) 1328 tgt_pkt->copyError(pkt); 1329 if (tgt_pkt->cmd == MemCmd::ReadResp && 1330 (pkt->isInvalidate() || mshr->hasPostInvalidate())) { 1331 // If intermediate cache got ReadRespWithInvalidate, 1332 // propagate that. Response should not have 1333 // isInvalidate() set otherwise. 1334 tgt_pkt->cmd = MemCmd::ReadRespWithInvalidate; 1335 DPRINTF(Cache, "%s updated cmd to %s for addr %#llx\n", 1336 __func__, tgt_pkt->cmdString(), tgt_pkt->getAddr()); 1337 } 1338 // Reset the bus additional time as it is now accounted for 1339 tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0; 1340 cpuSidePort->schedTimingResp(tgt_pkt, completion_time); 1341 break; 1342 1343 case MSHR::Target::FromPrefetcher: 1344 assert(tgt_pkt->cmd == MemCmd::HardPFReq); 1345 if (blk) 1346 blk->status |= BlkHWPrefetched; 1347 delete tgt_pkt->req; 1348 delete tgt_pkt; 1349 break; 1350 1351 case MSHR::Target::FromSnoop: 1352 // I don't believe that a snoop can be in an error state 1353 assert(!is_error); 1354 // response to snoop request 1355 DPRINTF(Cache, "processing deferred snoop...\n"); 1356 assert(!(pkt->isInvalidate() && !mshr->hasPostInvalidate())); 1357 handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate()); 1358 break; 1359 1360 default: 1361 panic("Illegal target->source enum %d\n", target->source); 1362 } 1363 1364 mshr->popTarget(); 1365 } 1366 1367 if (blk && blk->isValid()) { 1368 // an invalidate response stemming from a write line request 1369 // should not invalidate the block, so check if the 1370 // invalidation should be discarded 1371 if ((pkt->isInvalidate() || mshr->hasPostInvalidate()) && 1372 !discard_invalidate) { 1373 assert(blk != tempBlock); 1374 tags->invalidate(blk); 1375 blk->invalidate(); 1376 } else if (mshr->hasPostDowngrade()) { 1377 blk->status &= ~BlkWritable; 1378 } 1379 } 1380 1381 if (mshr->promoteDeferredTargets()) { 1382 // avoid later read getting stale data while write miss is 1383 // outstanding.. see comment in timingAccess() 1384 if (blk) { 1385 blk->status &= ~BlkReadable; 1386 } 1387 mq = mshr->queue; 1388 mq->markPending(mshr); 1389 schedMemSideSendEvent(clockEdge() + pkt->payloadDelay); 1390 } else { 1391 mq->deallocate(mshr); 1392 if (wasFull && !mq->isFull()) { 1393 clearBlocked((BlockedCause)mq->index); 1394 } 1395 1396 // Request the bus for a prefetch if this deallocation freed enough 1397 // MSHRs for a prefetch to take place 1398 if (prefetcher && mq == &mshrQueue && mshrQueue.canPrefetch()) { 1399 Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(), 1400 clockEdge()); 1401 if (next_pf_time != MaxTick) 1402 schedMemSideSendEvent(next_pf_time); 1403 } 1404 } 1405 // reset the xbar additional timinig as it is now accounted for 1406 pkt->headerDelay = pkt->payloadDelay = 0; 1407 1408 // copy writebacks to write buffer 1409 doWritebacks(writebacks, forward_time); 1410 1411 // if we used temp block, check to see if its valid and then clear it out 1412 if (blk == tempBlock && tempBlock->isValid()) { 1413 // We use forwardLatency here because we are copying 1414 // Writebacks/CleanEvicts to write buffer. It specifies the latency to 1415 // allocate an internal buffer and to schedule an event to the 1416 // queued port. 1417 if (blk->isDirty()) { 1418 PacketPtr wbPkt = writebackBlk(blk); 1419 allocateWriteBuffer(wbPkt, forward_time); 1420 // Set BLOCK_CACHED flag if cached above. 1421 if (isCachedAbove(wbPkt)) 1422 wbPkt->setBlockCached(); 1423 } else { 1424 PacketPtr wcPkt = cleanEvictBlk(blk); 1425 // Check to see if block is cached above. If not allocate 1426 // write buffer 1427 if (isCachedAbove(wcPkt)) 1428 delete wcPkt; 1429 else 1430 allocateWriteBuffer(wcPkt, forward_time); 1431 } 1432 blk->invalidate(); 1433 } 1434 1435 DPRINTF(Cache, "Leaving %s with %s for addr %#llx\n", __func__, 1436 pkt->cmdString(), pkt->getAddr()); 1437 delete pkt; 1438} 1439 1440PacketPtr 1441Cache::writebackBlk(CacheBlk *blk) 1442{ 1443 chatty_assert(!isReadOnly, "Writeback from read-only cache"); 1444 assert(blk && blk->isValid() && blk->isDirty()); 1445 1446 writebacks[Request::wbMasterId]++; 1447 1448 Request *writebackReq = 1449 new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0, 1450 Request::wbMasterId); 1451 if (blk->isSecure()) 1452 writebackReq->setFlags(Request::SECURE); 1453 1454 writebackReq->taskId(blk->task_id); 1455 blk->task_id= ContextSwitchTaskId::Unknown; 1456 blk->tickInserted = curTick(); 1457 1458 PacketPtr writeback = new Packet(writebackReq, MemCmd::Writeback); 1459 if (blk->isWritable()) { 1460 // not asserting shared means we pass the block in modified 1461 // state, mark our own block non-writeable 1462 blk->status &= ~BlkWritable; 1463 } else { 1464 // we are in the owned state, tell the receiver 1465 writeback->assertShared(); 1466 } 1467 1468 writeback->allocate(); 1469 std::memcpy(writeback->getPtr<uint8_t>(), blk->data, blkSize); 1470 1471 blk->status &= ~BlkDirty; 1472 return writeback; 1473} 1474 1475PacketPtr 1476Cache::cleanEvictBlk(CacheBlk *blk) 1477{ 1478 assert(blk && blk->isValid() && !blk->isDirty()); 1479 // Creating a zero sized write, a message to the snoop filter 1480 Request *req = 1481 new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0, 1482 Request::wbMasterId); 1483 if (blk->isSecure()) 1484 req->setFlags(Request::SECURE); 1485 1486 req->taskId(blk->task_id); 1487 blk->task_id = ContextSwitchTaskId::Unknown; 1488 blk->tickInserted = curTick(); 1489 1490 PacketPtr pkt = new Packet(req, MemCmd::CleanEvict); 1491 pkt->allocate(); 1492 DPRINTF(Cache, "%s%s %x Create CleanEvict\n", pkt->cmdString(), 1493 pkt->req->isInstFetch() ? " (ifetch)" : "", 1494 pkt->getAddr()); 1495 1496 return pkt; 1497} 1498 1499void 1500Cache::memWriteback() 1501{ 1502 CacheBlkVisitorWrapper visitor(*this, &Cache::writebackVisitor); 1503 tags->forEachBlk(visitor); 1504} 1505 1506void 1507Cache::memInvalidate() 1508{ 1509 CacheBlkVisitorWrapper visitor(*this, &Cache::invalidateVisitor); 1510 tags->forEachBlk(visitor); 1511} 1512 1513bool 1514Cache::isDirty() const 1515{ 1516 CacheBlkIsDirtyVisitor visitor; 1517 tags->forEachBlk(visitor); 1518 1519 return visitor.isDirty(); 1520} 1521 1522bool 1523Cache::writebackVisitor(CacheBlk &blk) 1524{ 1525 if (blk.isDirty()) { 1526 assert(blk.isValid()); 1527 1528 Request request(tags->regenerateBlkAddr(blk.tag, blk.set), 1529 blkSize, 0, Request::funcMasterId); 1530 request.taskId(blk.task_id); 1531 1532 Packet packet(&request, MemCmd::WriteReq); 1533 packet.dataStatic(blk.data); 1534 1535 memSidePort->sendFunctional(&packet); 1536 1537 blk.status &= ~BlkDirty; 1538 } 1539 1540 return true; 1541} 1542 1543bool 1544Cache::invalidateVisitor(CacheBlk &blk) 1545{ 1546 1547 if (blk.isDirty()) 1548 warn_once("Invalidating dirty cache lines. Expect things to break.\n"); 1549 1550 if (blk.isValid()) { 1551 assert(!blk.isDirty()); 1552 tags->invalidate(&blk); 1553 blk.invalidate(); 1554 } 1555 1556 return true; 1557} 1558 1559CacheBlk* 1560Cache::allocateBlock(Addr addr, bool is_secure, PacketList &writebacks) 1561{ 1562 CacheBlk *blk = tags->findVictim(addr); 1563 1564 // It is valid to return NULL if there is no victim 1565 if (!blk) 1566 return nullptr; 1567 1568 if (blk->isValid()) { 1569 Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set); 1570 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure()); 1571 if (repl_mshr) { 1572 // must be an outstanding upgrade request 1573 // on a block we're about to replace... 1574 assert(!blk->isWritable() || blk->isDirty()); 1575 assert(repl_mshr->needsExclusive()); 1576 // too hard to replace block with transient state 1577 // allocation failed, block not inserted 1578 return NULL; 1579 } else { 1580 DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx (%s): %s\n", 1581 repl_addr, blk->isSecure() ? "s" : "ns", 1582 addr, is_secure ? "s" : "ns", 1583 blk->isDirty() ? "writeback" : "clean"); 1584 1585 // Will send up Writeback/CleanEvict snoops via isCachedAbove 1586 // when pushing this writeback list into the write buffer. 1587 if (blk->isDirty()) { 1588 // Save writeback packet for handling by caller 1589 writebacks.push_back(writebackBlk(blk)); 1590 } else { 1591 writebacks.push_back(cleanEvictBlk(blk)); 1592 } 1593 } 1594 } 1595 1596 return blk; 1597} 1598 1599 1600// Note that the reason we return a list of writebacks rather than 1601// inserting them directly in the write buffer is that this function 1602// is called by both atomic and timing-mode accesses, and in atomic 1603// mode we don't mess with the write buffer (we just perform the 1604// writebacks atomically once the original request is complete). 1605CacheBlk* 1606Cache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks) 1607{ 1608 assert(pkt->isResponse() || pkt->cmd == MemCmd::WriteLineReq); 1609 Addr addr = pkt->getAddr(); 1610 bool is_secure = pkt->isSecure(); 1611#if TRACING_ON 1612 CacheBlk::State old_state = blk ? blk->status : 0; 1613#endif 1614 1615 // When handling a fill, discard any CleanEvicts for the 1616 // same address in write buffer. 1617 Addr M5_VAR_USED blk_addr = blockAlign(pkt->getAddr()); 1618 std::vector<MSHR *> M5_VAR_USED wbs; 1619 assert (!writeBuffer.findMatches(blk_addr, is_secure, wbs)); 1620 1621 if (blk == NULL) { 1622 // better have read new data... 1623 assert(pkt->hasData()); 1624 1625 // only read responses and write-line requests have data; 1626 // note that we don't write the data here for write-line - that 1627 // happens in the subsequent satisfyCpuSideRequest. 1628 assert(pkt->isRead() || pkt->cmd == MemCmd::WriteLineReq); 1629 1630 // need to do a replacement 1631 blk = allocateBlock(addr, is_secure, writebacks); 1632 if (blk == NULL) { 1633 // No replaceable block... just use temporary storage to 1634 // complete the current request and then get rid of it 1635 assert(!tempBlock->isValid()); 1636 blk = tempBlock; 1637 tempBlock->set = tags->extractSet(addr); 1638 tempBlock->tag = tags->extractTag(addr); 1639 // @todo: set security state as well... 1640 DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr, 1641 is_secure ? "s" : "ns"); 1642 } else { 1643 tags->insertBlock(pkt, blk); 1644 } 1645 1646 // we should never be overwriting a valid block 1647 assert(!blk->isValid()); 1648 } else { 1649 // existing block... probably an upgrade 1650 assert(blk->tag == tags->extractTag(addr)); 1651 // either we're getting new data or the block should already be valid 1652 assert(pkt->hasData() || blk->isValid()); 1653 // don't clear block status... if block is already dirty we 1654 // don't want to lose that 1655 } 1656 1657 if (is_secure) 1658 blk->status |= BlkSecure; 1659 blk->status |= BlkValid | BlkReadable; 1660 1661 if (!pkt->sharedAsserted()) { 1662 // we could get non-shared responses from memory (rather than 1663 // a cache) even in a read-only cache, note that we set this 1664 // bit even for a read-only cache as we use it to represent 1665 // the exclusive state 1666 blk->status |= BlkWritable; 1667 1668 // If we got this via cache-to-cache transfer (i.e., from a 1669 // cache that was an owner) and took away that owner's copy, 1670 // then we need to write it back. Normally this happens 1671 // anyway as a side effect of getting a copy to write it, but 1672 // there are cases (such as failed store conditionals or 1673 // compare-and-swaps) where we'll demand an exclusive copy but 1674 // end up not writing it. 1675 if (pkt->memInhibitAsserted()) { 1676 blk->status |= BlkDirty; 1677 1678 chatty_assert(!isReadOnly, "Should never see dirty snoop response " 1679 "in read-only cache %s\n", name()); 1680 } 1681 } 1682 1683 DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n", 1684 addr, is_secure ? "s" : "ns", old_state, blk->print()); 1685 1686 // if we got new data, copy it in (checking for a read response 1687 // and a response that has data is the same in the end) 1688 if (pkt->isRead()) { 1689 // sanity checks 1690 assert(pkt->hasData()); 1691 assert(pkt->getSize() == blkSize); 1692 1693 std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize); 1694 } 1695 // We pay for fillLatency here. 1696 blk->whenReady = clockEdge() + fillLatency * clockPeriod() + 1697 pkt->payloadDelay; 1698 1699 return blk; 1700} 1701 1702 1703///////////////////////////////////////////////////// 1704// 1705// Snoop path: requests coming in from the memory side 1706// 1707///////////////////////////////////////////////////// 1708 1709void 1710Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data, 1711 bool already_copied, bool pending_inval) 1712{ 1713 // sanity check 1714 assert(req_pkt->isRequest()); 1715 assert(req_pkt->needsResponse()); 1716 1717 DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, 1718 req_pkt->cmdString(), req_pkt->getAddr(), req_pkt->getSize()); 1719 // timing-mode snoop responses require a new packet, unless we 1720 // already made a copy... 1721 PacketPtr pkt = req_pkt; 1722 if (!already_copied) 1723 // do not clear flags, and allocate space for data if the 1724 // packet needs it (the only packets that carry data are read 1725 // responses) 1726 pkt = new Packet(req_pkt, false, req_pkt->isRead()); 1727 1728 assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() || 1729 pkt->sharedAsserted()); 1730 pkt->makeTimingResponse(); 1731 if (pkt->isRead()) { 1732 pkt->setDataFromBlock(blk_data, blkSize); 1733 } 1734 if (pkt->cmd == MemCmd::ReadResp && pending_inval) { 1735 // Assume we defer a response to a read from a far-away cache 1736 // A, then later defer a ReadExcl from a cache B on the same 1737 // bus as us. We'll assert MemInhibit in both cases, but in 1738 // the latter case MemInhibit will keep the invalidation from 1739 // reaching cache A. This special response tells cache A that 1740 // it gets the block to satisfy its read, but must immediately 1741 // invalidate it. 1742 pkt->cmd = MemCmd::ReadRespWithInvalidate; 1743 } 1744 // Here we consider forward_time, paying for just forward latency and 1745 // also charging the delay provided by the xbar. 1746 // forward_time is used as send_time in next allocateWriteBuffer(). 1747 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 1748 // Here we reset the timing of the packet. 1749 pkt->headerDelay = pkt->payloadDelay = 0; 1750 DPRINTF(Cache, "%s created response: %s addr %#llx size %d tick: %lu\n", 1751 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize(), 1752 forward_time); 1753 memSidePort->schedTimingSnoopResp(pkt, forward_time, true); 1754} 1755 1756uint32_t 1757Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing, 1758 bool is_deferred, bool pending_inval) 1759{ 1760 DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, 1761 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 1762 // deferred snoops can only happen in timing mode 1763 assert(!(is_deferred && !is_timing)); 1764 // pending_inval only makes sense on deferred snoops 1765 assert(!(pending_inval && !is_deferred)); 1766 assert(pkt->isRequest()); 1767 1768 // the packet may get modified if we or a forwarded snooper 1769 // responds in atomic mode, so remember a few things about the 1770 // original packet up front 1771 bool invalidate = pkt->isInvalidate(); 1772 bool M5_VAR_USED needs_exclusive = pkt->needsExclusive(); 1773 1774 uint32_t snoop_delay = 0; 1775 1776 if (forwardSnoops) { 1777 // first propagate snoop upward to see if anyone above us wants to 1778 // handle it. save & restore packet src since it will get 1779 // rewritten to be relative to cpu-side bus (if any) 1780 bool alreadyResponded = pkt->memInhibitAsserted(); 1781 if (is_timing) { 1782 // copy the packet so that we can clear any flags before 1783 // forwarding it upwards, we also allocate data (passing 1784 // the pointer along in case of static data), in case 1785 // there is a snoop hit in upper levels 1786 Packet snoopPkt(pkt, true, true); 1787 snoopPkt.setExpressSnoop(); 1788 snoopPkt.pushSenderState(new ForwardResponseRecord()); 1789 // the snoop packet does not need to wait any additional 1790 // time 1791 snoopPkt.headerDelay = snoopPkt.payloadDelay = 0; 1792 cpuSidePort->sendTimingSnoopReq(&snoopPkt); 1793 1794 // add the header delay (including crossbar and snoop 1795 // delays) of the upward snoop to the snoop delay for this 1796 // cache 1797 snoop_delay += snoopPkt.headerDelay; 1798 1799 if (snoopPkt.memInhibitAsserted()) { 1800 // cache-to-cache response from some upper cache 1801 assert(!alreadyResponded); 1802 pkt->assertMemInhibit(); 1803 } else { 1804 // no cache (or anyone else for that matter) will 1805 // respond, so delete the ForwardResponseRecord here 1806 delete snoopPkt.popSenderState(); 1807 } 1808 if (snoopPkt.sharedAsserted()) { 1809 pkt->assertShared(); 1810 } 1811 // If this request is a prefetch or clean evict and an upper level 1812 // signals block present, make sure to propagate the block 1813 // presence to the requester. 1814 if (snoopPkt.isBlockCached()) { 1815 pkt->setBlockCached(); 1816 } 1817 } else { 1818 cpuSidePort->sendAtomicSnoop(pkt); 1819 if (!alreadyResponded && pkt->memInhibitAsserted()) { 1820 // cache-to-cache response from some upper cache: 1821 // forward response to original requester 1822 assert(pkt->isResponse()); 1823 } 1824 } 1825 } 1826 1827 if (!blk || !blk->isValid()) { 1828 DPRINTF(Cache, "%s snoop miss for %s addr %#llx size %d\n", 1829 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 1830 return snoop_delay; 1831 } else { 1832 DPRINTF(Cache, "%s snoop hit for %s for addr %#llx size %d, " 1833 "old state is %s\n", __func__, pkt->cmdString(), 1834 pkt->getAddr(), pkt->getSize(), blk->print()); 1835 } 1836 1837 chatty_assert(!(isReadOnly && blk->isDirty()), 1838 "Should never have a dirty block in a read-only cache %s\n", 1839 name()); 1840 1841 // We may end up modifying both the block state and the packet (if 1842 // we respond in atomic mode), so just figure out what to do now 1843 // and then do it later. If we find dirty data while snooping for 1844 // an invalidate, we don't need to send a response. The 1845 // invalidation itself is taken care of below. 1846 bool respond = blk->isDirty() && pkt->needsResponse() && 1847 pkt->cmd != MemCmd::InvalidateReq; 1848 bool have_exclusive = blk->isWritable(); 1849 1850 // Invalidate any prefetch's from below that would strip write permissions 1851 // MemCmd::HardPFReq is only observed by upstream caches. After missing 1852 // above and in it's own cache, a new MemCmd::ReadReq is created that 1853 // downstream caches observe. 1854 if (pkt->mustCheckAbove()) { 1855 DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s from" 1856 " lower cache\n", pkt->getAddr(), pkt->cmdString()); 1857 pkt->setBlockCached(); 1858 return snoop_delay; 1859 } 1860 1861 if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) { 1862 // reading non-exclusive shared data, note that we retain 1863 // the block in owned state if it is dirty, with the response 1864 // taken care of below, and otherwhise simply downgrade to 1865 // shared 1866 assert(!needs_exclusive); 1867 pkt->assertShared(); 1868 blk->status &= ~BlkWritable; 1869 } 1870 1871 if (respond) { 1872 // prevent anyone else from responding, cache as well as 1873 // memory, and also prevent any memory from even seeing the 1874 // request (with current inhibited semantics), note that this 1875 // applies both to reads and writes and that for writes it 1876 // works thanks to the fact that we still have dirty data and 1877 // will write it back at a later point 1878 pkt->assertMemInhibit(); 1879 if (have_exclusive) { 1880 // in the case of an uncacheable request there is no point 1881 // in setting the exclusive flag, but since the recipient 1882 // does not care there is no harm in doing so, in any case 1883 // it is just a hint 1884 pkt->setSupplyExclusive(); 1885 } 1886 if (is_timing) { 1887 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval); 1888 } else { 1889 pkt->makeAtomicResponse(); 1890 pkt->setDataFromBlock(blk->data, blkSize); 1891 } 1892 } 1893 1894 if (!respond && is_timing && is_deferred) { 1895 // if it's a deferred timing snoop then we've made a copy of 1896 // both the request and the packet, and so if we're not using 1897 // those copies to respond and delete them here 1898 DPRINTF(Cache, "Deleting pkt %p and request %p for cmd %s addr: %p\n", 1899 pkt, pkt->req, pkt->cmdString(), pkt->getAddr()); 1900 1901 // the packets needs a response (just not from us), so we also 1902 // need to delete the request and not rely on the packet 1903 // destructor 1904 assert(pkt->needsResponse()); 1905 delete pkt->req; 1906 delete pkt; 1907 } 1908 1909 // Do this last in case it deallocates block data or something 1910 // like that 1911 if (invalidate) { 1912 if (blk != tempBlock) 1913 tags->invalidate(blk); 1914 blk->invalidate(); 1915 } 1916 1917 DPRINTF(Cache, "new state is %s\n", blk->print()); 1918 1919 return snoop_delay; 1920} 1921 1922 1923void 1924Cache::recvTimingSnoopReq(PacketPtr pkt) 1925{ 1926 DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, 1927 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 1928 1929 // Snoops shouldn't happen when bypassing caches 1930 assert(!system->bypassCaches()); 1931
|
1909 // no need to snoop writebacks or requests that are not in range
| 1932 // no need to snoop requests that are not in range
|
1910 if (!inRange(pkt->getAddr())) { 1911 return; 1912 } 1913 1914 bool is_secure = pkt->isSecure(); 1915 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 1916 1917 Addr blk_addr = blockAlign(pkt->getAddr()); 1918 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 1919 1920 // Update the latency cost of the snoop so that the crossbar can 1921 // account for it. Do not overwrite what other neighbouring caches 1922 // have already done, rather take the maximum. The update is 1923 // tentative, for cases where we return before an upward snoop 1924 // happens below. 1925 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, 1926 lookupLatency * clockPeriod()); 1927 1928 // Inform request(Prefetch, CleanEvict or Writeback) from below of 1929 // MSHR hit, set setBlockCached. 1930 if (mshr && pkt->mustCheckAbove()) { 1931 DPRINTF(Cache, "Setting block cached for %s from" 1932 "lower cache on mshr hit %#x\n", 1933 pkt->cmdString(), pkt->getAddr()); 1934 pkt->setBlockCached(); 1935 return; 1936 } 1937 1938 // Let the MSHR itself track the snoop and decide whether we want 1939 // to go ahead and do the regular cache snoop 1940 if (mshr && mshr->handleSnoop(pkt, order++)) { 1941 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)." 1942 "mshrs: %s\n", blk_addr, is_secure ? "s" : "ns", 1943 mshr->print()); 1944 1945 if (mshr->getNumTargets() > numTarget) 1946 warn("allocating bonus target for snoop"); //handle later 1947 return; 1948 } 1949 1950 //We also need to check the writeback buffers and handle those 1951 std::vector<MSHR *> writebacks; 1952 if (writeBuffer.findMatches(blk_addr, is_secure, writebacks)) { 1953 DPRINTF(Cache, "Snoop hit in writeback to addr %#llx (%s)\n", 1954 pkt->getAddr(), is_secure ? "s" : "ns"); 1955 1956 // Look through writebacks for any cachable writes. 1957 // We should only ever find a single match 1958 assert(writebacks.size() == 1); 1959 MSHR *wb_entry = writebacks[0]; 1960 // Expect to see only Writebacks and/or CleanEvicts here, both of 1961 // which should not be generated for uncacheable data. 1962 assert(!wb_entry->isUncacheable()); 1963 // There should only be a single request responsible for generating 1964 // Writebacks/CleanEvicts. 1965 assert(wb_entry->getNumTargets() == 1); 1966 PacketPtr wb_pkt = wb_entry->getTarget()->pkt; 1967 assert(wb_pkt->evictingBlock()); 1968 1969 if (pkt->evictingBlock()) { 1970 // if the block is found in the write queue, set the BLOCK_CACHED 1971 // flag for Writeback/CleanEvict snoop. On return the snoop will 1972 // propagate the BLOCK_CACHED flag in Writeback packets and prevent 1973 // any CleanEvicts from travelling down the memory hierarchy. 1974 pkt->setBlockCached(); 1975 DPRINTF(Cache, "Squashing %s from lower cache on writequeue hit" 1976 " %#x\n", pkt->cmdString(), pkt->getAddr()); 1977 return; 1978 } 1979 1980 if (wb_pkt->cmd == MemCmd::Writeback) { 1981 assert(!pkt->memInhibitAsserted()); 1982 pkt->assertMemInhibit(); 1983 if (!pkt->needsExclusive()) { 1984 pkt->assertShared(); 1985 // the writeback is no longer passing exclusivity (the 1986 // receiving cache should consider the block owned 1987 // rather than modified) 1988 wb_pkt->assertShared(); 1989 } else { 1990 // if we're not asserting the shared line, we need to 1991 // invalidate our copy. we'll do that below as long as 1992 // the packet's invalidate flag is set... 1993 assert(pkt->isInvalidate()); 1994 } 1995 doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(), 1996 false, false); 1997 } else { 1998 assert(wb_pkt->cmd == MemCmd::CleanEvict); 1999 // The cache technically holds the block until the 2000 // corresponding CleanEvict message reaches the crossbar 2001 // below. Therefore when a snoop encounters a CleanEvict 2002 // message we must set assertShared (just like when it 2003 // encounters a Writeback) to avoid the snoop filter 2004 // prematurely clearing the holder bit in the crossbar 2005 // below 2006 if (!pkt->needsExclusive()) 2007 pkt->assertShared(); 2008 else 2009 assert(pkt->isInvalidate()); 2010 } 2011 2012 if (pkt->isInvalidate()) { 2013 // Invalidation trumps our writeback... discard here 2014 // Note: markInService will remove entry from writeback buffer. 2015 markInService(wb_entry, false); 2016 delete wb_pkt; 2017 } 2018 } 2019 2020 // If this was a shared writeback, there may still be 2021 // other shared copies above that require invalidation. 2022 // We could be more selective and return here if the 2023 // request is non-exclusive or if the writeback is 2024 // exclusive. 2025 uint32_t snoop_delay = handleSnoop(pkt, blk, true, false, false); 2026 2027 // Override what we did when we first saw the snoop, as we now 2028 // also have the cost of the upwards snoops to account for 2029 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, snoop_delay + 2030 lookupLatency * clockPeriod()); 2031} 2032 2033bool 2034Cache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt) 2035{ 2036 // Express snoop responses from master to slave, e.g., from L1 to L2 2037 cache->recvTimingSnoopResp(pkt); 2038 return true; 2039} 2040 2041Tick 2042Cache::recvAtomicSnoop(PacketPtr pkt) 2043{ 2044 // Snoops shouldn't happen when bypassing caches 2045 assert(!system->bypassCaches()); 2046
| 1933 if (!inRange(pkt->getAddr())) { 1934 return; 1935 } 1936 1937 bool is_secure = pkt->isSecure(); 1938 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 1939 1940 Addr blk_addr = blockAlign(pkt->getAddr()); 1941 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 1942 1943 // Update the latency cost of the snoop so that the crossbar can 1944 // account for it. Do not overwrite what other neighbouring caches 1945 // have already done, rather take the maximum. The update is 1946 // tentative, for cases where we return before an upward snoop 1947 // happens below. 1948 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, 1949 lookupLatency * clockPeriod()); 1950 1951 // Inform request(Prefetch, CleanEvict or Writeback) from below of 1952 // MSHR hit, set setBlockCached. 1953 if (mshr && pkt->mustCheckAbove()) { 1954 DPRINTF(Cache, "Setting block cached for %s from" 1955 "lower cache on mshr hit %#x\n", 1956 pkt->cmdString(), pkt->getAddr()); 1957 pkt->setBlockCached(); 1958 return; 1959 } 1960 1961 // Let the MSHR itself track the snoop and decide whether we want 1962 // to go ahead and do the regular cache snoop 1963 if (mshr && mshr->handleSnoop(pkt, order++)) { 1964 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)." 1965 "mshrs: %s\n", blk_addr, is_secure ? "s" : "ns", 1966 mshr->print()); 1967 1968 if (mshr->getNumTargets() > numTarget) 1969 warn("allocating bonus target for snoop"); //handle later 1970 return; 1971 } 1972 1973 //We also need to check the writeback buffers and handle those 1974 std::vector<MSHR *> writebacks; 1975 if (writeBuffer.findMatches(blk_addr, is_secure, writebacks)) { 1976 DPRINTF(Cache, "Snoop hit in writeback to addr %#llx (%s)\n", 1977 pkt->getAddr(), is_secure ? "s" : "ns"); 1978 1979 // Look through writebacks for any cachable writes. 1980 // We should only ever find a single match 1981 assert(writebacks.size() == 1); 1982 MSHR *wb_entry = writebacks[0]; 1983 // Expect to see only Writebacks and/or CleanEvicts here, both of 1984 // which should not be generated for uncacheable data. 1985 assert(!wb_entry->isUncacheable()); 1986 // There should only be a single request responsible for generating 1987 // Writebacks/CleanEvicts. 1988 assert(wb_entry->getNumTargets() == 1); 1989 PacketPtr wb_pkt = wb_entry->getTarget()->pkt; 1990 assert(wb_pkt->evictingBlock()); 1991 1992 if (pkt->evictingBlock()) { 1993 // if the block is found in the write queue, set the BLOCK_CACHED 1994 // flag for Writeback/CleanEvict snoop. On return the snoop will 1995 // propagate the BLOCK_CACHED flag in Writeback packets and prevent 1996 // any CleanEvicts from travelling down the memory hierarchy. 1997 pkt->setBlockCached(); 1998 DPRINTF(Cache, "Squashing %s from lower cache on writequeue hit" 1999 " %#x\n", pkt->cmdString(), pkt->getAddr()); 2000 return; 2001 } 2002 2003 if (wb_pkt->cmd == MemCmd::Writeback) { 2004 assert(!pkt->memInhibitAsserted()); 2005 pkt->assertMemInhibit(); 2006 if (!pkt->needsExclusive()) { 2007 pkt->assertShared(); 2008 // the writeback is no longer passing exclusivity (the 2009 // receiving cache should consider the block owned 2010 // rather than modified) 2011 wb_pkt->assertShared(); 2012 } else { 2013 // if we're not asserting the shared line, we need to 2014 // invalidate our copy. we'll do that below as long as 2015 // the packet's invalidate flag is set... 2016 assert(pkt->isInvalidate()); 2017 } 2018 doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(), 2019 false, false); 2020 } else { 2021 assert(wb_pkt->cmd == MemCmd::CleanEvict); 2022 // The cache technically holds the block until the 2023 // corresponding CleanEvict message reaches the crossbar 2024 // below. Therefore when a snoop encounters a CleanEvict 2025 // message we must set assertShared (just like when it 2026 // encounters a Writeback) to avoid the snoop filter 2027 // prematurely clearing the holder bit in the crossbar 2028 // below 2029 if (!pkt->needsExclusive()) 2030 pkt->assertShared(); 2031 else 2032 assert(pkt->isInvalidate()); 2033 } 2034 2035 if (pkt->isInvalidate()) { 2036 // Invalidation trumps our writeback... discard here 2037 // Note: markInService will remove entry from writeback buffer. 2038 markInService(wb_entry, false); 2039 delete wb_pkt; 2040 } 2041 } 2042 2043 // If this was a shared writeback, there may still be 2044 // other shared copies above that require invalidation. 2045 // We could be more selective and return here if the 2046 // request is non-exclusive or if the writeback is 2047 // exclusive. 2048 uint32_t snoop_delay = handleSnoop(pkt, blk, true, false, false); 2049 2050 // Override what we did when we first saw the snoop, as we now 2051 // also have the cost of the upwards snoops to account for 2052 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, snoop_delay + 2053 lookupLatency * clockPeriod()); 2054} 2055 2056bool 2057Cache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt) 2058{ 2059 // Express snoop responses from master to slave, e.g., from L1 to L2 2060 cache->recvTimingSnoopResp(pkt); 2061 return true; 2062} 2063 2064Tick 2065Cache::recvAtomicSnoop(PacketPtr pkt) 2066{ 2067 // Snoops shouldn't happen when bypassing caches 2068 assert(!system->bypassCaches()); 2069
|
2047 // no need to snoop writebacks or requests that are not in range. In 2048 // atomic we have no Writebacks/CleanEvicts queued and no prefetches, 2049 // hence there is no need to snoop upwards and determine if they are 2050 // present above. 2051 if (pkt->evictingBlock() || !inRange(pkt->getAddr())) {
| 2070 // no need to snoop requests that are not in range. 2071 if (!inRange(pkt->getAddr())) {
|
2052 return 0; 2053 } 2054 2055 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 2056 uint32_t snoop_delay = handleSnoop(pkt, blk, false, false, false); 2057 return snoop_delay + lookupLatency * clockPeriod(); 2058} 2059 2060 2061MSHR * 2062Cache::getNextMSHR() 2063{ 2064 // Check both MSHR queue and write buffer for potential requests, 2065 // note that null does not mean there is no request, it could 2066 // simply be that it is not ready 2067 MSHR *miss_mshr = mshrQueue.getNextMSHR(); 2068 MSHR *write_mshr = writeBuffer.getNextMSHR(); 2069 2070 // If we got a write buffer request ready, first priority is a 2071 // full write buffer, otherwhise we favour the miss requests 2072 if (write_mshr && 2073 ((writeBuffer.isFull() && writeBuffer.inServiceEntries == 0) || 2074 !miss_mshr)) { 2075 // need to search MSHR queue for conflicting earlier miss. 2076 MSHR *conflict_mshr = 2077 mshrQueue.findPending(write_mshr->blkAddr, 2078 write_mshr->isSecure); 2079 2080 if (conflict_mshr && conflict_mshr->order < write_mshr->order) { 2081 // Service misses in order until conflict is cleared. 2082 return conflict_mshr; 2083 2084 // @todo Note that we ignore the ready time of the conflict here 2085 } 2086 2087 // No conflicts; issue write 2088 return write_mshr; 2089 } else if (miss_mshr) { 2090 // need to check for conflicting earlier writeback 2091 MSHR *conflict_mshr = 2092 writeBuffer.findPending(miss_mshr->blkAddr, 2093 miss_mshr->isSecure); 2094 if (conflict_mshr) { 2095 // not sure why we don't check order here... it was in the 2096 // original code but commented out. 2097 2098 // The only way this happens is if we are 2099 // doing a write and we didn't have permissions 2100 // then subsequently saw a writeback (owned got evicted) 2101 // We need to make sure to perform the writeback first 2102 // To preserve the dirty data, then we can issue the write 2103 2104 // should we return write_mshr here instead? I.e. do we 2105 // have to flush writes in order? I don't think so... not 2106 // for Alpha anyway. Maybe for x86? 2107 return conflict_mshr; 2108 2109 // @todo Note that we ignore the ready time of the conflict here 2110 } 2111 2112 // No conflicts; issue read 2113 return miss_mshr; 2114 } 2115 2116 // fall through... no pending requests. Try a prefetch. 2117 assert(!miss_mshr && !write_mshr); 2118 if (prefetcher && mshrQueue.canPrefetch()) { 2119 // If we have a miss queue slot, we can try a prefetch 2120 PacketPtr pkt = prefetcher->getPacket(); 2121 if (pkt) { 2122 Addr pf_addr = blockAlign(pkt->getAddr()); 2123 if (!tags->findBlock(pf_addr, pkt->isSecure()) && 2124 !mshrQueue.findMatch(pf_addr, pkt->isSecure()) && 2125 !writeBuffer.findMatch(pf_addr, pkt->isSecure())) { 2126 // Update statistic on number of prefetches issued 2127 // (hwpf_mshr_misses) 2128 assert(pkt->req->masterId() < system->maxMasters()); 2129 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 2130 2131 // allocate an MSHR and return it, note 2132 // that we send the packet straight away, so do not 2133 // schedule the send 2134 return allocateMissBuffer(pkt, curTick(), false); 2135 } else { 2136 // free the request and packet 2137 delete pkt->req; 2138 delete pkt; 2139 } 2140 } 2141 } 2142 2143 return NULL; 2144} 2145 2146bool
| 2072 return 0; 2073 } 2074 2075 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 2076 uint32_t snoop_delay = handleSnoop(pkt, blk, false, false, false); 2077 return snoop_delay + lookupLatency * clockPeriod(); 2078} 2079 2080 2081MSHR * 2082Cache::getNextMSHR() 2083{ 2084 // Check both MSHR queue and write buffer for potential requests, 2085 // note that null does not mean there is no request, it could 2086 // simply be that it is not ready 2087 MSHR *miss_mshr = mshrQueue.getNextMSHR(); 2088 MSHR *write_mshr = writeBuffer.getNextMSHR(); 2089 2090 // If we got a write buffer request ready, first priority is a 2091 // full write buffer, otherwhise we favour the miss requests 2092 if (write_mshr && 2093 ((writeBuffer.isFull() && writeBuffer.inServiceEntries == 0) || 2094 !miss_mshr)) { 2095 // need to search MSHR queue for conflicting earlier miss. 2096 MSHR *conflict_mshr = 2097 mshrQueue.findPending(write_mshr->blkAddr, 2098 write_mshr->isSecure); 2099 2100 if (conflict_mshr && conflict_mshr->order < write_mshr->order) { 2101 // Service misses in order until conflict is cleared. 2102 return conflict_mshr; 2103 2104 // @todo Note that we ignore the ready time of the conflict here 2105 } 2106 2107 // No conflicts; issue write 2108 return write_mshr; 2109 } else if (miss_mshr) { 2110 // need to check for conflicting earlier writeback 2111 MSHR *conflict_mshr = 2112 writeBuffer.findPending(miss_mshr->blkAddr, 2113 miss_mshr->isSecure); 2114 if (conflict_mshr) { 2115 // not sure why we don't check order here... it was in the 2116 // original code but commented out. 2117 2118 // The only way this happens is if we are 2119 // doing a write and we didn't have permissions 2120 // then subsequently saw a writeback (owned got evicted) 2121 // We need to make sure to perform the writeback first 2122 // To preserve the dirty data, then we can issue the write 2123 2124 // should we return write_mshr here instead? I.e. do we 2125 // have to flush writes in order? I don't think so... not 2126 // for Alpha anyway. Maybe for x86? 2127 return conflict_mshr; 2128 2129 // @todo Note that we ignore the ready time of the conflict here 2130 } 2131 2132 // No conflicts; issue read 2133 return miss_mshr; 2134 } 2135 2136 // fall through... no pending requests. Try a prefetch. 2137 assert(!miss_mshr && !write_mshr); 2138 if (prefetcher && mshrQueue.canPrefetch()) { 2139 // If we have a miss queue slot, we can try a prefetch 2140 PacketPtr pkt = prefetcher->getPacket(); 2141 if (pkt) { 2142 Addr pf_addr = blockAlign(pkt->getAddr()); 2143 if (!tags->findBlock(pf_addr, pkt->isSecure()) && 2144 !mshrQueue.findMatch(pf_addr, pkt->isSecure()) && 2145 !writeBuffer.findMatch(pf_addr, pkt->isSecure())) { 2146 // Update statistic on number of prefetches issued 2147 // (hwpf_mshr_misses) 2148 assert(pkt->req->masterId() < system->maxMasters()); 2149 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 2150 2151 // allocate an MSHR and return it, note 2152 // that we send the packet straight away, so do not 2153 // schedule the send 2154 return allocateMissBuffer(pkt, curTick(), false); 2155 } else { 2156 // free the request and packet 2157 delete pkt->req; 2158 delete pkt; 2159 } 2160 } 2161 } 2162 2163 return NULL; 2164} 2165 2166bool
|
2147Cache::isCachedAbove(const PacketPtr pkt) const
| 2167Cache::isCachedAbove(PacketPtr pkt, bool is_timing) const
|
2148{ 2149 if (!forwardSnoops) 2150 return false; 2151 // Mirroring the flow of HardPFReqs, the cache sends CleanEvict and 2152 // Writeback snoops into upper level caches to check for copies of the 2153 // same block. Using the BLOCK_CACHED flag with the Writeback/CleanEvict 2154 // packet, the cache can inform the crossbar below of presence or absence 2155 // of the block.
| 2168{ 2169 if (!forwardSnoops) 2170 return false; 2171 // Mirroring the flow of HardPFReqs, the cache sends CleanEvict and 2172 // Writeback snoops into upper level caches to check for copies of the 2173 // same block. Using the BLOCK_CACHED flag with the Writeback/CleanEvict 2174 // packet, the cache can inform the crossbar below of presence or absence 2175 // of the block.
|
2156 2157 Packet snoop_pkt(pkt, true, false); 2158 snoop_pkt.setExpressSnoop(); 2159 // Assert that packet is either Writeback or CleanEvict and not a prefetch 2160 // request because prefetch requests need an MSHR and may generate a snoop 2161 // response. 2162 assert(pkt->evictingBlock()); 2163 snoop_pkt.senderState = NULL; 2164 cpuSidePort->sendTimingSnoopReq(&snoop_pkt); 2165 // Writeback/CleanEvict snoops do not generate a separate snoop response. 2166 assert(!(snoop_pkt.memInhibitAsserted())); 2167 return snoop_pkt.isBlockCached();
| 2176 if (is_timing) { 2177 Packet snoop_pkt(pkt, true, false); 2178 snoop_pkt.setExpressSnoop(); 2179 // Assert that packet is either Writeback or CleanEvict and not a 2180 // prefetch request because prefetch requests need an MSHR and may 2181 // generate a snoop response. 2182 assert(pkt->evictingBlock()); 2183 snoop_pkt.senderState = NULL; 2184 cpuSidePort->sendTimingSnoopReq(&snoop_pkt); 2185 // Writeback/CleanEvict snoops do not generate a snoop response. 2186 assert(!(snoop_pkt.memInhibitAsserted())); 2187 return snoop_pkt.isBlockCached(); 2188 } else { 2189 cpuSidePort->sendAtomicSnoop(pkt); 2190 return pkt->isBlockCached(); 2191 }
|
2168} 2169 2170PacketPtr 2171Cache::getTimingPacket() 2172{ 2173 MSHR *mshr = getNextMSHR(); 2174 2175 if (mshr == NULL) { 2176 return NULL; 2177 } 2178 2179 // use request from 1st target 2180 PacketPtr tgt_pkt = mshr->getTarget()->pkt; 2181 PacketPtr pkt = NULL; 2182 2183 DPRINTF(CachePort, "%s %s for addr %#llx size %d\n", __func__, 2184 tgt_pkt->cmdString(), tgt_pkt->getAddr(), tgt_pkt->getSize()); 2185 2186 CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure); 2187 2188 if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) { 2189 // We need to check the caches above us to verify that 2190 // they don't have a copy of this block in the dirty state 2191 // at the moment. Without this check we could get a stale 2192 // copy from memory that might get used in place of the 2193 // dirty one. 2194 Packet snoop_pkt(tgt_pkt, true, false); 2195 snoop_pkt.setExpressSnoop(); 2196 snoop_pkt.senderState = mshr; 2197 cpuSidePort->sendTimingSnoopReq(&snoop_pkt); 2198 2199 // Check to see if the prefetch was squashed by an upper cache (to 2200 // prevent us from grabbing the line) or if a Check to see if a 2201 // writeback arrived between the time the prefetch was placed in 2202 // the MSHRs and when it was selected to be sent or if the 2203 // prefetch was squashed by an upper cache. 2204 2205 // It is important to check memInhibitAsserted before 2206 // prefetchSquashed. If another cache has asserted MEM_INGIBIT, it 2207 // will be sending a response which will arrive at the MSHR 2208 // allocated ofr this request. Checking the prefetchSquash first 2209 // may result in the MSHR being prematurely deallocated. 2210 2211 if (snoop_pkt.memInhibitAsserted()) { 2212 // If we are getting a non-shared response it is dirty 2213 bool pending_dirty_resp = !snoop_pkt.sharedAsserted(); 2214 markInService(mshr, pending_dirty_resp); 2215 DPRINTF(Cache, "Upward snoop of prefetch for addr" 2216 " %#x (%s) hit\n", 2217 tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns"); 2218 return NULL; 2219 } 2220 2221 if (snoop_pkt.isBlockCached() || blk != NULL) { 2222 DPRINTF(Cache, "Block present, prefetch squashed by cache. " 2223 "Deallocating mshr target %#x.\n", 2224 mshr->blkAddr); 2225 2226 // Deallocate the mshr target 2227 if (tgt_pkt->cmd != MemCmd::Writeback) { 2228 if (mshr->queue->forceDeallocateTarget(mshr)) { 2229 // Clear block if this deallocation resulted freed an 2230 // mshr when all had previously been utilized 2231 clearBlocked((BlockedCause)(mshr->queue->index)); 2232 } 2233 return NULL; 2234 } else { 2235 // If this is a Writeback, and the snoops indicate that the blk 2236 // is cached above, set the BLOCK_CACHED flag in the Writeback 2237 // packet, so that it does not reset the bits corresponding to 2238 // this block in the snoop filter below. 2239 tgt_pkt->setBlockCached(); 2240 } 2241 } 2242 } 2243 2244 if (mshr->isForwardNoResponse()) { 2245 // no response expected, just forward packet as it is 2246 assert(tags->findBlock(mshr->blkAddr, mshr->isSecure) == NULL); 2247 pkt = tgt_pkt; 2248 } else { 2249 pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive()); 2250 2251 mshr->isForward = (pkt == NULL); 2252 2253 if (mshr->isForward) { 2254 // not a cache block request, but a response is expected 2255 // make copy of current packet to forward, keep current 2256 // copy for response handling 2257 pkt = new Packet(tgt_pkt, false, true); 2258 if (pkt->isWrite()) { 2259 pkt->setData(tgt_pkt->getConstPtr<uint8_t>()); 2260 } 2261 } 2262 } 2263 2264 assert(pkt != NULL); 2265 pkt->senderState = mshr; 2266 return pkt; 2267} 2268 2269 2270Tick 2271Cache::nextMSHRReadyTime() const 2272{ 2273 Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(), 2274 writeBuffer.nextMSHRReadyTime()); 2275 2276 // Don't signal prefetch ready time if no MSHRs available 2277 // Will signal once enoguh MSHRs are deallocated 2278 if (prefetcher && mshrQueue.canPrefetch()) { 2279 nextReady = std::min(nextReady, 2280 prefetcher->nextPrefetchReadyTime()); 2281 } 2282 2283 return nextReady; 2284} 2285 2286void 2287Cache::serialize(CheckpointOut &cp) const 2288{ 2289 bool dirty(isDirty()); 2290 2291 if (dirty) { 2292 warn("*** The cache still contains dirty data. ***\n"); 2293 warn(" Make sure to drain the system using the correct flags.\n"); 2294 warn(" This checkpoint will not restore correctly and dirty data in " 2295 "the cache will be lost!\n"); 2296 } 2297 2298 // Since we don't checkpoint the data in the cache, any dirty data 2299 // will be lost when restoring from a checkpoint of a system that 2300 // wasn't drained properly. Flag the checkpoint as invalid if the 2301 // cache contains dirty data. 2302 bool bad_checkpoint(dirty); 2303 SERIALIZE_SCALAR(bad_checkpoint); 2304} 2305 2306void 2307Cache::unserialize(CheckpointIn &cp) 2308{ 2309 bool bad_checkpoint; 2310 UNSERIALIZE_SCALAR(bad_checkpoint); 2311 if (bad_checkpoint) { 2312 fatal("Restoring from checkpoints with dirty caches is not supported " 2313 "in the classic memory system. Please remove any caches or " 2314 " drain them properly before taking checkpoints.\n"); 2315 } 2316} 2317 2318/////////////// 2319// 2320// CpuSidePort 2321// 2322/////////////// 2323 2324AddrRangeList 2325Cache::CpuSidePort::getAddrRanges() const 2326{ 2327 return cache->getAddrRanges(); 2328} 2329 2330bool 2331Cache::CpuSidePort::recvTimingReq(PacketPtr pkt) 2332{ 2333 assert(!cache->system->bypassCaches()); 2334 2335 bool success = false; 2336 2337 // always let inhibited requests through, even if blocked, 2338 // ultimately we should check if this is an express snoop, but at 2339 // the moment that flag is only set in the cache itself 2340 if (pkt->memInhibitAsserted()) { 2341 // do not change the current retry state 2342 bool M5_VAR_USED bypass_success = cache->recvTimingReq(pkt); 2343 assert(bypass_success); 2344 return true; 2345 } else if (blocked || mustSendRetry) { 2346 // either already committed to send a retry, or blocked 2347 success = false; 2348 } else { 2349 // pass it on to the cache, and let the cache decide if we 2350 // have to retry or not 2351 success = cache->recvTimingReq(pkt); 2352 } 2353 2354 // remember if we have to retry 2355 mustSendRetry = !success; 2356 return success; 2357} 2358 2359Tick 2360Cache::CpuSidePort::recvAtomic(PacketPtr pkt) 2361{ 2362 return cache->recvAtomic(pkt); 2363} 2364 2365void 2366Cache::CpuSidePort::recvFunctional(PacketPtr pkt) 2367{ 2368 // functional request 2369 cache->functionalAccess(pkt, true); 2370} 2371 2372Cache:: 2373CpuSidePort::CpuSidePort(const std::string &_name, Cache *_cache, 2374 const std::string &_label) 2375 : BaseCache::CacheSlavePort(_name, _cache, _label), cache(_cache) 2376{ 2377} 2378 2379Cache* 2380CacheParams::create() 2381{ 2382 assert(tags); 2383 2384 return new Cache(this); 2385} 2386/////////////// 2387// 2388// MemSidePort 2389// 2390/////////////// 2391 2392bool 2393Cache::MemSidePort::recvTimingResp(PacketPtr pkt) 2394{ 2395 cache->recvTimingResp(pkt); 2396 return true; 2397} 2398 2399// Express snooping requests to memside port 2400void 2401Cache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt) 2402{ 2403 // handle snooping requests 2404 cache->recvTimingSnoopReq(pkt); 2405} 2406 2407Tick 2408Cache::MemSidePort::recvAtomicSnoop(PacketPtr pkt) 2409{ 2410 return cache->recvAtomicSnoop(pkt); 2411} 2412 2413void 2414Cache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt) 2415{ 2416 // functional snoop (note that in contrast to atomic we don't have 2417 // a specific functionalSnoop method, as they have the same 2418 // behaviour regardless) 2419 cache->functionalAccess(pkt, false); 2420} 2421 2422void 2423Cache::CacheReqPacketQueue::sendDeferredPacket() 2424{ 2425 // sanity check 2426 assert(!waitingOnRetry); 2427 2428 // there should never be any deferred request packets in the 2429 // queue, instead we resly on the cache to provide the packets 2430 // from the MSHR queue or write queue 2431 assert(deferredPacketReadyTime() == MaxTick); 2432 2433 // check for request packets (requests & writebacks) 2434 PacketPtr pkt = cache.getTimingPacket(); 2435 if (pkt == NULL) { 2436 // can happen if e.g. we attempt a writeback and fail, but 2437 // before the retry, the writeback is eliminated because 2438 // we snoop another cache's ReadEx. 2439 } else { 2440 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState); 2441 // in most cases getTimingPacket allocates a new packet, and 2442 // we must delete it unless it is successfully sent 2443 bool delete_pkt = !mshr->isForwardNoResponse(); 2444 2445 // let our snoop responses go first if there are responses to 2446 // the same addresses we are about to writeback, note that 2447 // this creates a dependency between requests and snoop 2448 // responses, but that should not be a problem since there is 2449 // a chain already and the key is that the snoop responses can 2450 // sink unconditionally 2451 if (snoopRespQueue.hasAddr(pkt->getAddr())) { 2452 DPRINTF(CachePort, "Waiting for snoop response to be sent\n"); 2453 Tick when = snoopRespQueue.deferredPacketReadyTime(); 2454 schedSendEvent(when); 2455 2456 if (delete_pkt) 2457 delete pkt; 2458 2459 return; 2460 } 2461 2462 2463 waitingOnRetry = !masterPort.sendTimingReq(pkt); 2464 2465 if (waitingOnRetry) { 2466 DPRINTF(CachePort, "now waiting on a retry\n"); 2467 if (delete_pkt) { 2468 // we are awaiting a retry, but we 2469 // delete the packet and will be creating a new packet 2470 // when we get the opportunity 2471 delete pkt; 2472 } 2473 // note that we have now masked any requestBus and 2474 // schedSendEvent (we will wait for a retry before 2475 // doing anything), and this is so even if we do not 2476 // care about this packet and might override it before 2477 // it gets retried 2478 } else { 2479 // As part of the call to sendTimingReq the packet is 2480 // forwarded to all neighbouring caches (and any 2481 // caches above them) as a snoop. The packet is also 2482 // sent to any potential cache below as the 2483 // interconnect is not allowed to buffer the 2484 // packet. Thus at this point we know if any of the 2485 // neighbouring, or the downstream cache is 2486 // responding, and if so, if it is with a dirty line 2487 // or not. 2488 bool pending_dirty_resp = !pkt->sharedAsserted() && 2489 pkt->memInhibitAsserted(); 2490 2491 cache.markInService(mshr, pending_dirty_resp); 2492 } 2493 } 2494 2495 // if we succeeded and are not waiting for a retry, schedule the 2496 // next send considering when the next MSHR is ready, note that 2497 // snoop responses have their own packet queue and thus schedule 2498 // their own events 2499 if (!waitingOnRetry) { 2500 schedSendEvent(cache.nextMSHRReadyTime()); 2501 } 2502} 2503 2504Cache:: 2505MemSidePort::MemSidePort(const std::string &_name, Cache *_cache, 2506 const std::string &_label) 2507 : BaseCache::CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue), 2508 _reqQueue(*_cache, *this, _snoopRespQueue, _label), 2509 _snoopRespQueue(*_cache, *this, _label), cache(_cache) 2510{ 2511}
| 2192} 2193 2194PacketPtr 2195Cache::getTimingPacket() 2196{ 2197 MSHR *mshr = getNextMSHR(); 2198 2199 if (mshr == NULL) { 2200 return NULL; 2201 } 2202 2203 // use request from 1st target 2204 PacketPtr tgt_pkt = mshr->getTarget()->pkt; 2205 PacketPtr pkt = NULL; 2206 2207 DPRINTF(CachePort, "%s %s for addr %#llx size %d\n", __func__, 2208 tgt_pkt->cmdString(), tgt_pkt->getAddr(), tgt_pkt->getSize()); 2209 2210 CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure); 2211 2212 if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) { 2213 // We need to check the caches above us to verify that 2214 // they don't have a copy of this block in the dirty state 2215 // at the moment. Without this check we could get a stale 2216 // copy from memory that might get used in place of the 2217 // dirty one. 2218 Packet snoop_pkt(tgt_pkt, true, false); 2219 snoop_pkt.setExpressSnoop(); 2220 snoop_pkt.senderState = mshr; 2221 cpuSidePort->sendTimingSnoopReq(&snoop_pkt); 2222 2223 // Check to see if the prefetch was squashed by an upper cache (to 2224 // prevent us from grabbing the line) or if a Check to see if a 2225 // writeback arrived between the time the prefetch was placed in 2226 // the MSHRs and when it was selected to be sent or if the 2227 // prefetch was squashed by an upper cache. 2228 2229 // It is important to check memInhibitAsserted before 2230 // prefetchSquashed. If another cache has asserted MEM_INGIBIT, it 2231 // will be sending a response which will arrive at the MSHR 2232 // allocated ofr this request. Checking the prefetchSquash first 2233 // may result in the MSHR being prematurely deallocated. 2234 2235 if (snoop_pkt.memInhibitAsserted()) { 2236 // If we are getting a non-shared response it is dirty 2237 bool pending_dirty_resp = !snoop_pkt.sharedAsserted(); 2238 markInService(mshr, pending_dirty_resp); 2239 DPRINTF(Cache, "Upward snoop of prefetch for addr" 2240 " %#x (%s) hit\n", 2241 tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns"); 2242 return NULL; 2243 } 2244 2245 if (snoop_pkt.isBlockCached() || blk != NULL) { 2246 DPRINTF(Cache, "Block present, prefetch squashed by cache. " 2247 "Deallocating mshr target %#x.\n", 2248 mshr->blkAddr); 2249 2250 // Deallocate the mshr target 2251 if (tgt_pkt->cmd != MemCmd::Writeback) { 2252 if (mshr->queue->forceDeallocateTarget(mshr)) { 2253 // Clear block if this deallocation resulted freed an 2254 // mshr when all had previously been utilized 2255 clearBlocked((BlockedCause)(mshr->queue->index)); 2256 } 2257 return NULL; 2258 } else { 2259 // If this is a Writeback, and the snoops indicate that the blk 2260 // is cached above, set the BLOCK_CACHED flag in the Writeback 2261 // packet, so that it does not reset the bits corresponding to 2262 // this block in the snoop filter below. 2263 tgt_pkt->setBlockCached(); 2264 } 2265 } 2266 } 2267 2268 if (mshr->isForwardNoResponse()) { 2269 // no response expected, just forward packet as it is 2270 assert(tags->findBlock(mshr->blkAddr, mshr->isSecure) == NULL); 2271 pkt = tgt_pkt; 2272 } else { 2273 pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive()); 2274 2275 mshr->isForward = (pkt == NULL); 2276 2277 if (mshr->isForward) { 2278 // not a cache block request, but a response is expected 2279 // make copy of current packet to forward, keep current 2280 // copy for response handling 2281 pkt = new Packet(tgt_pkt, false, true); 2282 if (pkt->isWrite()) { 2283 pkt->setData(tgt_pkt->getConstPtr<uint8_t>()); 2284 } 2285 } 2286 } 2287 2288 assert(pkt != NULL); 2289 pkt->senderState = mshr; 2290 return pkt; 2291} 2292 2293 2294Tick 2295Cache::nextMSHRReadyTime() const 2296{ 2297 Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(), 2298 writeBuffer.nextMSHRReadyTime()); 2299 2300 // Don't signal prefetch ready time if no MSHRs available 2301 // Will signal once enoguh MSHRs are deallocated 2302 if (prefetcher && mshrQueue.canPrefetch()) { 2303 nextReady = std::min(nextReady, 2304 prefetcher->nextPrefetchReadyTime()); 2305 } 2306 2307 return nextReady; 2308} 2309 2310void 2311Cache::serialize(CheckpointOut &cp) const 2312{ 2313 bool dirty(isDirty()); 2314 2315 if (dirty) { 2316 warn("*** The cache still contains dirty data. ***\n"); 2317 warn(" Make sure to drain the system using the correct flags.\n"); 2318 warn(" This checkpoint will not restore correctly and dirty data in " 2319 "the cache will be lost!\n"); 2320 } 2321 2322 // Since we don't checkpoint the data in the cache, any dirty data 2323 // will be lost when restoring from a checkpoint of a system that 2324 // wasn't drained properly. Flag the checkpoint as invalid if the 2325 // cache contains dirty data. 2326 bool bad_checkpoint(dirty); 2327 SERIALIZE_SCALAR(bad_checkpoint); 2328} 2329 2330void 2331Cache::unserialize(CheckpointIn &cp) 2332{ 2333 bool bad_checkpoint; 2334 UNSERIALIZE_SCALAR(bad_checkpoint); 2335 if (bad_checkpoint) { 2336 fatal("Restoring from checkpoints with dirty caches is not supported " 2337 "in the classic memory system. Please remove any caches or " 2338 " drain them properly before taking checkpoints.\n"); 2339 } 2340} 2341 2342/////////////// 2343// 2344// CpuSidePort 2345// 2346/////////////// 2347 2348AddrRangeList 2349Cache::CpuSidePort::getAddrRanges() const 2350{ 2351 return cache->getAddrRanges(); 2352} 2353 2354bool 2355Cache::CpuSidePort::recvTimingReq(PacketPtr pkt) 2356{ 2357 assert(!cache->system->bypassCaches()); 2358 2359 bool success = false; 2360 2361 // always let inhibited requests through, even if blocked, 2362 // ultimately we should check if this is an express snoop, but at 2363 // the moment that flag is only set in the cache itself 2364 if (pkt->memInhibitAsserted()) { 2365 // do not change the current retry state 2366 bool M5_VAR_USED bypass_success = cache->recvTimingReq(pkt); 2367 assert(bypass_success); 2368 return true; 2369 } else if (blocked || mustSendRetry) { 2370 // either already committed to send a retry, or blocked 2371 success = false; 2372 } else { 2373 // pass it on to the cache, and let the cache decide if we 2374 // have to retry or not 2375 success = cache->recvTimingReq(pkt); 2376 } 2377 2378 // remember if we have to retry 2379 mustSendRetry = !success; 2380 return success; 2381} 2382 2383Tick 2384Cache::CpuSidePort::recvAtomic(PacketPtr pkt) 2385{ 2386 return cache->recvAtomic(pkt); 2387} 2388 2389void 2390Cache::CpuSidePort::recvFunctional(PacketPtr pkt) 2391{ 2392 // functional request 2393 cache->functionalAccess(pkt, true); 2394} 2395 2396Cache:: 2397CpuSidePort::CpuSidePort(const std::string &_name, Cache *_cache, 2398 const std::string &_label) 2399 : BaseCache::CacheSlavePort(_name, _cache, _label), cache(_cache) 2400{ 2401} 2402 2403Cache* 2404CacheParams::create() 2405{ 2406 assert(tags); 2407 2408 return new Cache(this); 2409} 2410/////////////// 2411// 2412// MemSidePort 2413// 2414/////////////// 2415 2416bool 2417Cache::MemSidePort::recvTimingResp(PacketPtr pkt) 2418{ 2419 cache->recvTimingResp(pkt); 2420 return true; 2421} 2422 2423// Express snooping requests to memside port 2424void 2425Cache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt) 2426{ 2427 // handle snooping requests 2428 cache->recvTimingSnoopReq(pkt); 2429} 2430 2431Tick 2432Cache::MemSidePort::recvAtomicSnoop(PacketPtr pkt) 2433{ 2434 return cache->recvAtomicSnoop(pkt); 2435} 2436 2437void 2438Cache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt) 2439{ 2440 // functional snoop (note that in contrast to atomic we don't have 2441 // a specific functionalSnoop method, as they have the same 2442 // behaviour regardless) 2443 cache->functionalAccess(pkt, false); 2444} 2445 2446void 2447Cache::CacheReqPacketQueue::sendDeferredPacket() 2448{ 2449 // sanity check 2450 assert(!waitingOnRetry); 2451 2452 // there should never be any deferred request packets in the 2453 // queue, instead we resly on the cache to provide the packets 2454 // from the MSHR queue or write queue 2455 assert(deferredPacketReadyTime() == MaxTick); 2456 2457 // check for request packets (requests & writebacks) 2458 PacketPtr pkt = cache.getTimingPacket(); 2459 if (pkt == NULL) { 2460 // can happen if e.g. we attempt a writeback and fail, but 2461 // before the retry, the writeback is eliminated because 2462 // we snoop another cache's ReadEx. 2463 } else { 2464 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState); 2465 // in most cases getTimingPacket allocates a new packet, and 2466 // we must delete it unless it is successfully sent 2467 bool delete_pkt = !mshr->isForwardNoResponse(); 2468 2469 // let our snoop responses go first if there are responses to 2470 // the same addresses we are about to writeback, note that 2471 // this creates a dependency between requests and snoop 2472 // responses, but that should not be a problem since there is 2473 // a chain already and the key is that the snoop responses can 2474 // sink unconditionally 2475 if (snoopRespQueue.hasAddr(pkt->getAddr())) { 2476 DPRINTF(CachePort, "Waiting for snoop response to be sent\n"); 2477 Tick when = snoopRespQueue.deferredPacketReadyTime(); 2478 schedSendEvent(when); 2479 2480 if (delete_pkt) 2481 delete pkt; 2482 2483 return; 2484 } 2485 2486 2487 waitingOnRetry = !masterPort.sendTimingReq(pkt); 2488 2489 if (waitingOnRetry) { 2490 DPRINTF(CachePort, "now waiting on a retry\n"); 2491 if (delete_pkt) { 2492 // we are awaiting a retry, but we 2493 // delete the packet and will be creating a new packet 2494 // when we get the opportunity 2495 delete pkt; 2496 } 2497 // note that we have now masked any requestBus and 2498 // schedSendEvent (we will wait for a retry before 2499 // doing anything), and this is so even if we do not 2500 // care about this packet and might override it before 2501 // it gets retried 2502 } else { 2503 // As part of the call to sendTimingReq the packet is 2504 // forwarded to all neighbouring caches (and any 2505 // caches above them) as a snoop. The packet is also 2506 // sent to any potential cache below as the 2507 // interconnect is not allowed to buffer the 2508 // packet. Thus at this point we know if any of the 2509 // neighbouring, or the downstream cache is 2510 // responding, and if so, if it is with a dirty line 2511 // or not. 2512 bool pending_dirty_resp = !pkt->sharedAsserted() && 2513 pkt->memInhibitAsserted(); 2514 2515 cache.markInService(mshr, pending_dirty_resp); 2516 } 2517 } 2518 2519 // if we succeeded and are not waiting for a retry, schedule the 2520 // next send considering when the next MSHR is ready, note that 2521 // snoop responses have their own packet queue and thus schedule 2522 // their own events 2523 if (!waitingOnRetry) { 2524 schedSendEvent(cache.nextMSHRReadyTime()); 2525 } 2526} 2527 2528Cache:: 2529MemSidePort::MemSidePort(const std::string &_name, Cache *_cache, 2530 const std::string &_label) 2531 : BaseCache::CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue), 2532 _reqQueue(*_cache, *this, _snoopRespQueue, _label), 2533 _snoopRespQueue(*_cache, *this, _label), cache(_cache) 2534{ 2535}
|