1/* 2 * Copyright (c) 2010-2015 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2002-2005 The Regents of The University of Michigan 15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc. 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Erik Hallnor 42 * Dave Greene 43 * Nathan Binkert 44 * Steve Reinhardt 45 * Ron Dreslinski 46 * Andreas Sandberg 47 */ 48 49/** 50 * @file 51 * Cache definitions. 52 */ 53 54#include "mem/cache/cache.hh" 55 56#include "base/misc.hh" 57#include "base/types.hh" 58#include "debug/Cache.hh" 59#include "debug/CachePort.hh" 60#include "debug/CacheTags.hh" 61#include "mem/cache/blk.hh" 62#include "mem/cache/mshr.hh" 63#include "mem/cache/prefetch/base.hh" 64#include "sim/sim_exit.hh" 65 66Cache::Cache(const CacheParams *p) 67 : BaseCache(p, p->system->cacheLineSize()), 68 tags(p->tags), 69 prefetcher(p->prefetcher), 70 doFastWrites(true), 71 prefetchOnAccess(p->prefetch_on_access), 72 clusivity(p->clusivity), 73 writebackClean(p->writeback_clean), 74 tempBlockWriteback(nullptr), 75 writebackTempBlockAtomicEvent(this, false, 76 EventBase::Delayed_Writeback_Pri) 77{ 78 tempBlock = new CacheBlk(); 79 tempBlock->data = new uint8_t[blkSize]; 80 81 cpuSidePort = new CpuSidePort(p->name + ".cpu_side", this, 82 "CpuSidePort"); 83 memSidePort = new MemSidePort(p->name + ".mem_side", this, 84 "MemSidePort"); 85 86 tags->setCache(this); 87 if (prefetcher) 88 prefetcher->setCache(this); 89} 90 91Cache::~Cache() 92{ 93 delete [] tempBlock->data; 94 delete tempBlock; 95 96 delete cpuSidePort; 97 delete memSidePort; 98} 99 100void 101Cache::regStats() 102{ 103 BaseCache::regStats(); 104} 105 106void 107Cache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt) 108{ 109 assert(pkt->isRequest()); 110 111 uint64_t overwrite_val; 112 bool overwrite_mem; 113 uint64_t condition_val64; 114 uint32_t condition_val32; 115 116 int offset = tags->extractBlkOffset(pkt->getAddr()); 117 uint8_t *blk_data = blk->data + offset; 118 119 assert(sizeof(uint64_t) >= pkt->getSize()); 120 121 overwrite_mem = true; 122 // keep a copy of our possible write value, and copy what is at the 123 // memory address into the packet 124 pkt->writeData((uint8_t *)&overwrite_val); 125 pkt->setData(blk_data); 126 127 if (pkt->req->isCondSwap()) { 128 if (pkt->getSize() == sizeof(uint64_t)) { 129 condition_val64 = pkt->req->getExtraData(); 130 overwrite_mem = !std::memcmp(&condition_val64, blk_data, 131 sizeof(uint64_t)); 132 } else if (pkt->getSize() == sizeof(uint32_t)) { 133 condition_val32 = (uint32_t)pkt->req->getExtraData(); 134 overwrite_mem = !std::memcmp(&condition_val32, blk_data, 135 sizeof(uint32_t)); 136 } else 137 panic("Invalid size for conditional read/write\n"); 138 } 139 140 if (overwrite_mem) { 141 std::memcpy(blk_data, &overwrite_val, pkt->getSize()); 142 blk->status |= BlkDirty; 143 } 144} 145 146 147void 148Cache::satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk, 149 bool deferred_response, bool pending_downgrade) 150{ 151 assert(pkt->isRequest()); 152 153 assert(blk && blk->isValid()); 154 // Occasionally this is not true... if we are a lower-level cache 155 // satisfying a string of Read and ReadEx requests from 156 // upper-level caches, a Read will mark the block as shared but we 157 // can satisfy a following ReadEx anyway since we can rely on the 158 // Read requester(s) to have buffered the ReadEx snoop and to 159 // invalidate their blocks after receiving them.
| 1/* 2 * Copyright (c) 2010-2015 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2002-2005 The Regents of The University of Michigan 15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc. 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Erik Hallnor 42 * Dave Greene 43 * Nathan Binkert 44 * Steve Reinhardt 45 * Ron Dreslinski 46 * Andreas Sandberg 47 */ 48 49/** 50 * @file 51 * Cache definitions. 52 */ 53 54#include "mem/cache/cache.hh" 55 56#include "base/misc.hh" 57#include "base/types.hh" 58#include "debug/Cache.hh" 59#include "debug/CachePort.hh" 60#include "debug/CacheTags.hh" 61#include "mem/cache/blk.hh" 62#include "mem/cache/mshr.hh" 63#include "mem/cache/prefetch/base.hh" 64#include "sim/sim_exit.hh" 65 66Cache::Cache(const CacheParams *p) 67 : BaseCache(p, p->system->cacheLineSize()), 68 tags(p->tags), 69 prefetcher(p->prefetcher), 70 doFastWrites(true), 71 prefetchOnAccess(p->prefetch_on_access), 72 clusivity(p->clusivity), 73 writebackClean(p->writeback_clean), 74 tempBlockWriteback(nullptr), 75 writebackTempBlockAtomicEvent(this, false, 76 EventBase::Delayed_Writeback_Pri) 77{ 78 tempBlock = new CacheBlk(); 79 tempBlock->data = new uint8_t[blkSize]; 80 81 cpuSidePort = new CpuSidePort(p->name + ".cpu_side", this, 82 "CpuSidePort"); 83 memSidePort = new MemSidePort(p->name + ".mem_side", this, 84 "MemSidePort"); 85 86 tags->setCache(this); 87 if (prefetcher) 88 prefetcher->setCache(this); 89} 90 91Cache::~Cache() 92{ 93 delete [] tempBlock->data; 94 delete tempBlock; 95 96 delete cpuSidePort; 97 delete memSidePort; 98} 99 100void 101Cache::regStats() 102{ 103 BaseCache::regStats(); 104} 105 106void 107Cache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt) 108{ 109 assert(pkt->isRequest()); 110 111 uint64_t overwrite_val; 112 bool overwrite_mem; 113 uint64_t condition_val64; 114 uint32_t condition_val32; 115 116 int offset = tags->extractBlkOffset(pkt->getAddr()); 117 uint8_t *blk_data = blk->data + offset; 118 119 assert(sizeof(uint64_t) >= pkt->getSize()); 120 121 overwrite_mem = true; 122 // keep a copy of our possible write value, and copy what is at the 123 // memory address into the packet 124 pkt->writeData((uint8_t *)&overwrite_val); 125 pkt->setData(blk_data); 126 127 if (pkt->req->isCondSwap()) { 128 if (pkt->getSize() == sizeof(uint64_t)) { 129 condition_val64 = pkt->req->getExtraData(); 130 overwrite_mem = !std::memcmp(&condition_val64, blk_data, 131 sizeof(uint64_t)); 132 } else if (pkt->getSize() == sizeof(uint32_t)) { 133 condition_val32 = (uint32_t)pkt->req->getExtraData(); 134 overwrite_mem = !std::memcmp(&condition_val32, blk_data, 135 sizeof(uint32_t)); 136 } else 137 panic("Invalid size for conditional read/write\n"); 138 } 139 140 if (overwrite_mem) { 141 std::memcpy(blk_data, &overwrite_val, pkt->getSize()); 142 blk->status |= BlkDirty; 143 } 144} 145 146 147void 148Cache::satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk, 149 bool deferred_response, bool pending_downgrade) 150{ 151 assert(pkt->isRequest()); 152 153 assert(blk && blk->isValid()); 154 // Occasionally this is not true... if we are a lower-level cache 155 // satisfying a string of Read and ReadEx requests from 156 // upper-level caches, a Read will mark the block as shared but we 157 // can satisfy a following ReadEx anyway since we can rely on the 158 // Read requester(s) to have buffered the ReadEx snoop and to 159 // invalidate their blocks after receiving them.
|
160 // assert(!pkt->needsExclusive() || blk->isWritable());
| 160 // assert(!pkt->needsWritable() || blk->isWritable());
|
161 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize); 162 163 // Check RMW operations first since both isRead() and 164 // isWrite() will be true for them 165 if (pkt->cmd == MemCmd::SwapReq) { 166 cmpAndSwap(blk, pkt); 167 } else if (pkt->isWrite()) {
| 161 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize); 162 163 // Check RMW operations first since both isRead() and 164 // isWrite() will be true for them 165 if (pkt->cmd == MemCmd::SwapReq) { 166 cmpAndSwap(blk, pkt); 167 } else if (pkt->isWrite()) {
|
| 168 // we have the block in a writable state and can go ahead, 169 // note that the line may be also be considered writable in 170 // downstream caches along the path to memory, but always 171 // Exclusive, and never Modified
|
168 assert(blk->isWritable());
| 172 assert(blk->isWritable());
|
169 // Write or WriteLine at the first cache with block in Exclusive
| 173 // Write or WriteLine at the first cache with block in writable state
|
170 if (blk->checkWrite(pkt)) { 171 pkt->writeDataToBlock(blk->data, blkSize); 172 }
| 174 if (blk->checkWrite(pkt)) { 175 pkt->writeDataToBlock(blk->data, blkSize); 176 }
|
173 // Always mark the line as dirty even if we are a failed 174 // StoreCond so we supply data to any snoops that have 175 // appended themselves to this cache before knowing the store 176 // will fail.
| 177 // Always mark the line as dirty (and thus transition to the 178 // Modified state) even if we are a failed StoreCond so we 179 // supply data to any snoops that have appended themselves to 180 // this cache before knowing the store will fail.
|
177 blk->status |= BlkDirty; 178 DPRINTF(Cache, "%s for %s addr %#llx size %d (write)\n", __func__, 179 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 180 } else if (pkt->isRead()) { 181 if (pkt->isLLSC()) { 182 blk->trackLoadLocked(pkt); 183 } 184 pkt->setDataFromBlock(blk->data, blkSize); 185 // determine if this read is from a (coherent) cache, or not 186 // by looking at the command type; we could potentially add a 187 // packet attribute such as 'FromCache' to make this check a 188 // bit cleaner 189 if (pkt->cmd == MemCmd::ReadExReq || 190 pkt->cmd == MemCmd::ReadSharedReq || 191 pkt->cmd == MemCmd::ReadCleanReq || 192 pkt->cmd == MemCmd::SCUpgradeFailReq) { 193 assert(pkt->getSize() == blkSize); 194 // special handling for coherent block requests from 195 // upper-level caches
| 181 blk->status |= BlkDirty; 182 DPRINTF(Cache, "%s for %s addr %#llx size %d (write)\n", __func__, 183 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 184 } else if (pkt->isRead()) { 185 if (pkt->isLLSC()) { 186 blk->trackLoadLocked(pkt); 187 } 188 pkt->setDataFromBlock(blk->data, blkSize); 189 // determine if this read is from a (coherent) cache, or not 190 // by looking at the command type; we could potentially add a 191 // packet attribute such as 'FromCache' to make this check a 192 // bit cleaner 193 if (pkt->cmd == MemCmd::ReadExReq || 194 pkt->cmd == MemCmd::ReadSharedReq || 195 pkt->cmd == MemCmd::ReadCleanReq || 196 pkt->cmd == MemCmd::SCUpgradeFailReq) { 197 assert(pkt->getSize() == blkSize); 198 // special handling for coherent block requests from 199 // upper-level caches
|
196 if (pkt->needsExclusive()) {
| 200 if (pkt->needsWritable()) {
|
197 // sanity check 198 assert(pkt->cmd == MemCmd::ReadExReq || 199 pkt->cmd == MemCmd::SCUpgradeFailReq); 200 201 // if we have a dirty copy, make sure the recipient
| 201 // sanity check 202 assert(pkt->cmd == MemCmd::ReadExReq || 203 pkt->cmd == MemCmd::SCUpgradeFailReq); 204 205 // if we have a dirty copy, make sure the recipient
|
202 // keeps it marked dirty
| 206 // keeps it marked dirty (in the modified state)
|
203 if (blk->isDirty()) {
| 207 if (blk->isDirty()) {
|
204 pkt->assertMemInhibit();
| 208 pkt->setCacheResponding();
|
205 } 206 // on ReadExReq we give up our copy unconditionally, 207 // even if this cache is mostly inclusive, we may want 208 // to revisit this 209 invalidateBlock(blk); 210 } else if (blk->isWritable() && !pending_downgrade &&
| 209 } 210 // on ReadExReq we give up our copy unconditionally, 211 // even if this cache is mostly inclusive, we may want 212 // to revisit this 213 invalidateBlock(blk); 214 } else if (blk->isWritable() && !pending_downgrade &&
|
211 !pkt->sharedAsserted() &&
| 215 !pkt->hasSharers() &&
|
212 pkt->cmd != MemCmd::ReadCleanReq) {
| 216 pkt->cmd != MemCmd::ReadCleanReq) {
|
213 // we can give the requester an exclusive copy (by not 214 // asserting shared line) on a read request if: 215 // - we have an exclusive copy at this level (& below)
| 217 // we can give the requester a writable copy on a read 218 // request if: 219 // - we have a writable copy at this level (& below)
|
216 // - we don't have a pending snoop from below 217 // signaling another read request 218 // - no other cache above has a copy (otherwise it
| 220 // - we don't have a pending snoop from below 221 // signaling another read request 222 // - no other cache above has a copy (otherwise it
|
219 // would have asseretd shared line on request) 220 // - we are not satisfying an instruction fetch (this 221 // prevents dirty data in the i-cache) 222
| 223 // would have set hasSharers flag when 224 // snooping the packet) 225 // - the read has explicitly asked for a clean 226 // copy of the line
|
223 if (blk->isDirty()) { 224 // special considerations if we're owner: 225 if (!deferred_response) {
| 227 if (blk->isDirty()) { 228 // special considerations if we're owner: 229 if (!deferred_response) {
|
226 // if we are responding immediately and can 227 // signal that we're transferring ownership 228 // (inhibit set) along with exclusivity 229 // (shared not set), do so 230 pkt->assertMemInhibit();
| 230 // respond with the line in Modified state 231 // (cacheResponding set, hasSharers not set) 232 pkt->setCacheResponding();
|
231
| 233
|
232 // if this cache is mostly inclusive, we keep 233 // the block as writable (exclusive), and pass 234 // it upwards as writable and dirty 235 // (modified), hence we have multiple caches 236 // considering the same block writable, 237 // something that we get away with due to the 238 // fact that: 1) this cache has been 239 // considered the ordering points and 240 // responded to all snoops up till now, and 2) 241 // we always snoop upwards before consulting 242 // the local cache, both on a normal request 243 // (snooping done by the crossbar), and on a 244 // snoop 245 blk->status &= ~BlkDirty; 246 247 // if this cache is mostly exclusive with 248 // respect to the cache above, drop the block
| |
249 if (clusivity == Enums::mostly_excl) {
| 234 if (clusivity == Enums::mostly_excl) {
|
| 235 // if this cache is mostly exclusive with 236 // respect to the cache above, drop the 237 // block, no need to first unset the dirty 238 // bit
|
250 invalidateBlock(blk);
| 239 invalidateBlock(blk);
|
| 240 } else { 241 // if this cache is mostly inclusive, we 242 // keep the block in the Exclusive state, 243 // and pass it upwards as Modified 244 // (writable and dirty), hence we have 245 // multiple caches, all on the same path 246 // towards memory, all considering the 247 // same block writable, but only one 248 // considering it Modified 249 250 // we get away with multiple caches (on 251 // the same path to memory) considering 252 // the block writeable as we always enter 253 // the cache hierarchy through a cache, 254 // and first snoop upwards in all other 255 // branches 256 blk->status &= ~BlkDirty;
|
251 } 252 } else { 253 // if we're responding after our own miss, 254 // there's a window where the recipient didn't 255 // know it was getting ownership and may not 256 // have responded to snoops correctly, so we
| 257 } 258 } else { 259 // if we're responding after our own miss, 260 // there's a window where the recipient didn't 261 // know it was getting ownership and may not 262 // have responded to snoops correctly, so we
|
257 // can't pass off ownership *or* exclusivity 258 pkt->assertShared();
| 263 // have to respond with a shared line 264 pkt->setHasSharers();
|
259 } 260 } 261 } else { 262 // otherwise only respond with a shared copy
| 265 } 266 } 267 } else { 268 // otherwise only respond with a shared copy
|
263 pkt->assertShared();
| 269 pkt->setHasSharers();
|
264 } 265 } 266 } else {
| 270 } 271 } 272 } else {
|
267 // Upgrade or Invalidate, since we have it Exclusively (E or 268 // M), we ack then invalidate.
| 273 // Upgrade or Invalidate
|
269 assert(pkt->isUpgrade() || pkt->isInvalidate()); 270 271 // for invalidations we could be looking at the temp block 272 // (for upgrades we always allocate) 273 invalidateBlock(blk); 274 DPRINTF(Cache, "%s for %s addr %#llx size %d (invalidation)\n", 275 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 276 } 277} 278 279 280///////////////////////////////////////////////////// 281// 282// MSHR helper functions 283// 284///////////////////////////////////////////////////// 285 286 287void
| 274 assert(pkt->isUpgrade() || pkt->isInvalidate()); 275 276 // for invalidations we could be looking at the temp block 277 // (for upgrades we always allocate) 278 invalidateBlock(blk); 279 DPRINTF(Cache, "%s for %s addr %#llx size %d (invalidation)\n", 280 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 281 } 282} 283 284 285///////////////////////////////////////////////////// 286// 287// MSHR helper functions 288// 289///////////////////////////////////////////////////// 290 291 292void
|
288Cache::markInService(MSHR *mshr, bool pending_dirty_resp)
| 293Cache::markInService(MSHR *mshr, bool pending_modified_resp)
|
289{
| 294{
|
290 markInServiceInternal(mshr, pending_dirty_resp);
| 295 markInServiceInternal(mshr, pending_modified_resp);
|
291} 292 293///////////////////////////////////////////////////// 294// 295// Access path: requests coming in from the CPU side 296// 297///////////////////////////////////////////////////// 298 299bool 300Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, 301 PacketList &writebacks) 302{ 303 // sanity check 304 assert(pkt->isRequest()); 305 306 chatty_assert(!(isReadOnly && pkt->isWrite()), 307 "Should never see a write in a read-only cache %s\n", 308 name()); 309 310 DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, 311 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 312 313 if (pkt->req->isUncacheable()) { 314 DPRINTF(Cache, "%s%s addr %#llx uncacheable\n", pkt->cmdString(), 315 pkt->req->isInstFetch() ? " (ifetch)" : "", 316 pkt->getAddr()); 317 318 // flush and invalidate any existing block 319 CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure())); 320 if (old_blk && old_blk->isValid()) { 321 if (old_blk->isDirty() || writebackClean) 322 writebacks.push_back(writebackBlk(old_blk)); 323 else 324 writebacks.push_back(cleanEvictBlk(old_blk)); 325 tags->invalidate(old_blk); 326 old_blk->invalidate(); 327 } 328 329 blk = NULL; 330 // lookupLatency is the latency in case the request is uncacheable. 331 lat = lookupLatency; 332 return false; 333 } 334 335 ContextID id = pkt->req->hasContextId() ? 336 pkt->req->contextId() : InvalidContextID; 337 // Here lat is the value passed as parameter to accessBlock() function 338 // that can modify its value. 339 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), lat, id); 340 341 DPRINTF(Cache, "%s%s addr %#llx size %d (%s) %s\n", pkt->cmdString(), 342 pkt->req->isInstFetch() ? " (ifetch)" : "", 343 pkt->getAddr(), pkt->getSize(), pkt->isSecure() ? "s" : "ns", 344 blk ? "hit " + blk->print() : "miss"); 345 346 347 if (pkt->isEviction()) { 348 // We check for presence of block in above caches before issuing 349 // Writeback or CleanEvict to write buffer. Therefore the only 350 // possible cases can be of a CleanEvict packet coming from above 351 // encountering a Writeback generated in this cache peer cache and 352 // waiting in the write buffer. Cases of upper level peer caches 353 // generating CleanEvict and Writeback or simply CleanEvict and 354 // CleanEvict almost simultaneously will be caught by snoops sent out 355 // by crossbar. 356 std::vector<MSHR *> outgoing; 357 if (writeBuffer.findMatches(pkt->getAddr(), pkt->isSecure(), 358 outgoing)) { 359 assert(outgoing.size() == 1); 360 MSHR *wb_entry = outgoing[0]; 361 assert(wb_entry->getNumTargets() == 1); 362 PacketPtr wbPkt = wb_entry->getTarget()->pkt; 363 assert(wbPkt->isWriteback()); 364 365 if (pkt->isCleanEviction()) { 366 // The CleanEvict and WritebackClean snoops into other 367 // peer caches of the same level while traversing the 368 // crossbar. If a copy of the block is found, the 369 // packet is deleted in the crossbar. Hence, none of 370 // the other upper level caches connected to this 371 // cache have the block, so we can clear the 372 // BLOCK_CACHED flag in the Writeback if set and 373 // discard the CleanEvict by returning true. 374 wbPkt->clearBlockCached(); 375 return true; 376 } else { 377 assert(pkt->cmd == MemCmd::WritebackDirty); 378 // Dirty writeback from above trumps our clean 379 // writeback... discard here 380 // Note: markInService will remove entry from writeback buffer. 381 markInService(wb_entry, false); 382 delete wbPkt; 383 } 384 } 385 } 386 387 // Writeback handling is special case. We can write the block into 388 // the cache without having a writeable copy (or any copy at all). 389 if (pkt->isWriteback()) { 390 assert(blkSize == pkt->getSize()); 391 392 // we could get a clean writeback while we are having 393 // outstanding accesses to a block, do the simple thing for 394 // now and drop the clean writeback so that we do not upset 395 // any ordering/decisions about ownership already taken 396 if (pkt->cmd == MemCmd::WritebackClean && 397 mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) { 398 DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, " 399 "dropping\n", pkt->getAddr()); 400 return true; 401 } 402 403 if (blk == NULL) { 404 // need to do a replacement 405 blk = allocateBlock(pkt->getAddr(), pkt->isSecure(), writebacks); 406 if (blk == NULL) { 407 // no replaceable block available: give up, fwd to next level. 408 incMissCount(pkt); 409 return false; 410 } 411 tags->insertBlock(pkt, blk); 412 413 blk->status = (BlkValid | BlkReadable); 414 if (pkt->isSecure()) { 415 blk->status |= BlkSecure; 416 } 417 } 418 // only mark the block dirty if we got a writeback command, 419 // and leave it as is for a clean writeback 420 if (pkt->cmd == MemCmd::WritebackDirty) { 421 blk->status |= BlkDirty; 422 }
| 296} 297 298///////////////////////////////////////////////////// 299// 300// Access path: requests coming in from the CPU side 301// 302///////////////////////////////////////////////////// 303 304bool 305Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, 306 PacketList &writebacks) 307{ 308 // sanity check 309 assert(pkt->isRequest()); 310 311 chatty_assert(!(isReadOnly && pkt->isWrite()), 312 "Should never see a write in a read-only cache %s\n", 313 name()); 314 315 DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, 316 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 317 318 if (pkt->req->isUncacheable()) { 319 DPRINTF(Cache, "%s%s addr %#llx uncacheable\n", pkt->cmdString(), 320 pkt->req->isInstFetch() ? " (ifetch)" : "", 321 pkt->getAddr()); 322 323 // flush and invalidate any existing block 324 CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure())); 325 if (old_blk && old_blk->isValid()) { 326 if (old_blk->isDirty() || writebackClean) 327 writebacks.push_back(writebackBlk(old_blk)); 328 else 329 writebacks.push_back(cleanEvictBlk(old_blk)); 330 tags->invalidate(old_blk); 331 old_blk->invalidate(); 332 } 333 334 blk = NULL; 335 // lookupLatency is the latency in case the request is uncacheable. 336 lat = lookupLatency; 337 return false; 338 } 339 340 ContextID id = pkt->req->hasContextId() ? 341 pkt->req->contextId() : InvalidContextID; 342 // Here lat is the value passed as parameter to accessBlock() function 343 // that can modify its value. 344 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), lat, id); 345 346 DPRINTF(Cache, "%s%s addr %#llx size %d (%s) %s\n", pkt->cmdString(), 347 pkt->req->isInstFetch() ? " (ifetch)" : "", 348 pkt->getAddr(), pkt->getSize(), pkt->isSecure() ? "s" : "ns", 349 blk ? "hit " + blk->print() : "miss"); 350 351 352 if (pkt->isEviction()) { 353 // We check for presence of block in above caches before issuing 354 // Writeback or CleanEvict to write buffer. Therefore the only 355 // possible cases can be of a CleanEvict packet coming from above 356 // encountering a Writeback generated in this cache peer cache and 357 // waiting in the write buffer. Cases of upper level peer caches 358 // generating CleanEvict and Writeback or simply CleanEvict and 359 // CleanEvict almost simultaneously will be caught by snoops sent out 360 // by crossbar. 361 std::vector<MSHR *> outgoing; 362 if (writeBuffer.findMatches(pkt->getAddr(), pkt->isSecure(), 363 outgoing)) { 364 assert(outgoing.size() == 1); 365 MSHR *wb_entry = outgoing[0]; 366 assert(wb_entry->getNumTargets() == 1); 367 PacketPtr wbPkt = wb_entry->getTarget()->pkt; 368 assert(wbPkt->isWriteback()); 369 370 if (pkt->isCleanEviction()) { 371 // The CleanEvict and WritebackClean snoops into other 372 // peer caches of the same level while traversing the 373 // crossbar. If a copy of the block is found, the 374 // packet is deleted in the crossbar. Hence, none of 375 // the other upper level caches connected to this 376 // cache have the block, so we can clear the 377 // BLOCK_CACHED flag in the Writeback if set and 378 // discard the CleanEvict by returning true. 379 wbPkt->clearBlockCached(); 380 return true; 381 } else { 382 assert(pkt->cmd == MemCmd::WritebackDirty); 383 // Dirty writeback from above trumps our clean 384 // writeback... discard here 385 // Note: markInService will remove entry from writeback buffer. 386 markInService(wb_entry, false); 387 delete wbPkt; 388 } 389 } 390 } 391 392 // Writeback handling is special case. We can write the block into 393 // the cache without having a writeable copy (or any copy at all). 394 if (pkt->isWriteback()) { 395 assert(blkSize == pkt->getSize()); 396 397 // we could get a clean writeback while we are having 398 // outstanding accesses to a block, do the simple thing for 399 // now and drop the clean writeback so that we do not upset 400 // any ordering/decisions about ownership already taken 401 if (pkt->cmd == MemCmd::WritebackClean && 402 mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) { 403 DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, " 404 "dropping\n", pkt->getAddr()); 405 return true; 406 } 407 408 if (blk == NULL) { 409 // need to do a replacement 410 blk = allocateBlock(pkt->getAddr(), pkt->isSecure(), writebacks); 411 if (blk == NULL) { 412 // no replaceable block available: give up, fwd to next level. 413 incMissCount(pkt); 414 return false; 415 } 416 tags->insertBlock(pkt, blk); 417 418 blk->status = (BlkValid | BlkReadable); 419 if (pkt->isSecure()) { 420 blk->status |= BlkSecure; 421 } 422 } 423 // only mark the block dirty if we got a writeback command, 424 // and leave it as is for a clean writeback 425 if (pkt->cmd == MemCmd::WritebackDirty) { 426 blk->status |= BlkDirty; 427 }
|
423 // if shared is not asserted we got the writeback in modified 424 // state, if it is asserted we are in the owned state 425 if (!pkt->sharedAsserted()) {
| 428 // if the packet does not have sharers, it is passing 429 // writable, and we got the writeback in Modified or Exclusive 430 // state, if not we are in the Owned or Shared state 431 if (!pkt->hasSharers()) {
|
426 blk->status |= BlkWritable; 427 } 428 // nothing else to do; writeback doesn't expect response 429 assert(!pkt->needsResponse()); 430 std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize); 431 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 432 incHitCount(pkt); 433 return true; 434 } else if (pkt->cmd == MemCmd::CleanEvict) { 435 if (blk != NULL) { 436 // Found the block in the tags, need to stop CleanEvict from 437 // propagating further down the hierarchy. Returning true will 438 // treat the CleanEvict like a satisfied write request and delete 439 // it. 440 return true; 441 } 442 // We didn't find the block here, propagate the CleanEvict further 443 // down the memory hierarchy. Returning false will treat the CleanEvict 444 // like a Writeback which could not find a replaceable block so has to 445 // go to next level. 446 return false; 447 } else if ((blk != NULL) &&
| 432 blk->status |= BlkWritable; 433 } 434 // nothing else to do; writeback doesn't expect response 435 assert(!pkt->needsResponse()); 436 std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize); 437 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); 438 incHitCount(pkt); 439 return true; 440 } else if (pkt->cmd == MemCmd::CleanEvict) { 441 if (blk != NULL) { 442 // Found the block in the tags, need to stop CleanEvict from 443 // propagating further down the hierarchy. Returning true will 444 // treat the CleanEvict like a satisfied write request and delete 445 // it. 446 return true; 447 } 448 // We didn't find the block here, propagate the CleanEvict further 449 // down the memory hierarchy. Returning false will treat the CleanEvict 450 // like a Writeback which could not find a replaceable block so has to 451 // go to next level. 452 return false; 453 } else if ((blk != NULL) &&
|
448 (pkt->needsExclusive() ? blk->isWritable() 449 : blk->isReadable())) {
| 454 (pkt->needsWritable() ? blk->isWritable() : blk->isReadable())) {
|
450 // OK to satisfy access 451 incHitCount(pkt); 452 satisfyCpuSideRequest(pkt, blk); 453 return true; 454 } 455 456 // Can't satisfy access normally... either no block (blk == NULL)
| 455 // OK to satisfy access 456 incHitCount(pkt); 457 satisfyCpuSideRequest(pkt, blk); 458 return true; 459 } 460 461 // Can't satisfy access normally... either no block (blk == NULL)
|
457 // or have block but need exclusive & only have shared.
| 462 // or have block but need writable
|
458 459 incMissCount(pkt); 460 461 if (blk == NULL && pkt->isLLSC() && pkt->isWrite()) { 462 // complete miss on store conditional... just give up now 463 pkt->req->setExtraData(0); 464 return true; 465 } 466 467 return false; 468} 469 470void 471Cache::doWritebacks(PacketList& writebacks, Tick forward_time) 472{ 473 while (!writebacks.empty()) { 474 PacketPtr wbPkt = writebacks.front(); 475 // We use forwardLatency here because we are copying writebacks to 476 // write buffer. Call isCachedAbove for both Writebacks and 477 // CleanEvicts. If isCachedAbove returns true we set BLOCK_CACHED flag 478 // in Writebacks and discard CleanEvicts. 479 if (isCachedAbove(wbPkt)) { 480 if (wbPkt->cmd == MemCmd::CleanEvict) { 481 // Delete CleanEvict because cached copies exist above. The 482 // packet destructor will delete the request object because 483 // this is a non-snoop request packet which does not require a 484 // response. 485 delete wbPkt; 486 } else if (wbPkt->cmd == MemCmd::WritebackClean) { 487 // clean writeback, do not send since the block is 488 // still cached above 489 assert(writebackClean); 490 delete wbPkt; 491 } else { 492 assert(wbPkt->cmd == MemCmd::WritebackDirty); 493 // Set BLOCK_CACHED flag in Writeback and send below, so that 494 // the Writeback does not reset the bit corresponding to this 495 // address in the snoop filter below. 496 wbPkt->setBlockCached(); 497 allocateWriteBuffer(wbPkt, forward_time); 498 } 499 } else { 500 // If the block is not cached above, send packet below. Both 501 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will 502 // reset the bit corresponding to this address in the snoop filter 503 // below. 504 allocateWriteBuffer(wbPkt, forward_time); 505 } 506 writebacks.pop_front(); 507 } 508} 509 510void 511Cache::doWritebacksAtomic(PacketList& writebacks) 512{ 513 while (!writebacks.empty()) { 514 PacketPtr wbPkt = writebacks.front(); 515 // Call isCachedAbove for both Writebacks and CleanEvicts. If 516 // isCachedAbove returns true we set BLOCK_CACHED flag in Writebacks 517 // and discard CleanEvicts. 518 if (isCachedAbove(wbPkt, false)) { 519 if (wbPkt->cmd == MemCmd::WritebackDirty) { 520 // Set BLOCK_CACHED flag in Writeback and send below, 521 // so that the Writeback does not reset the bit 522 // corresponding to this address in the snoop filter 523 // below. We can discard CleanEvicts because cached 524 // copies exist above. Atomic mode isCachedAbove 525 // modifies packet to set BLOCK_CACHED flag 526 memSidePort->sendAtomic(wbPkt); 527 } 528 } else { 529 // If the block is not cached above, send packet below. Both 530 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will 531 // reset the bit corresponding to this address in the snoop filter 532 // below. 533 memSidePort->sendAtomic(wbPkt); 534 } 535 writebacks.pop_front(); 536 // In case of CleanEvicts, the packet destructor will delete the 537 // request object because this is a non-snoop request packet which 538 // does not require a response. 539 delete wbPkt; 540 } 541} 542 543 544void 545Cache::recvTimingSnoopResp(PacketPtr pkt) 546{ 547 DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, 548 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 549 550 assert(pkt->isResponse()); 551 assert(!system->bypassCaches()); 552 553 // determine if the response is from a snoop request we created 554 // (in which case it should be in the outstandingSnoop), or if we 555 // merely forwarded someone else's snoop request 556 const bool forwardAsSnoop = outstandingSnoop.find(pkt->req) == 557 outstandingSnoop.end(); 558 559 if (!forwardAsSnoop) { 560 // the packet came from this cache, so sink it here and do not 561 // forward it 562 assert(pkt->cmd == MemCmd::HardPFResp); 563 564 outstandingSnoop.erase(pkt->req); 565 566 DPRINTF(Cache, "Got prefetch response from above for addr " 567 "%#llx (%s)\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns"); 568 recvTimingResp(pkt); 569 return; 570 } 571 572 // forwardLatency is set here because there is a response from an 573 // upper level cache. 574 // To pay the delay that occurs if the packet comes from the bus, 575 // we charge also headerDelay. 576 Tick snoop_resp_time = clockEdge(forwardLatency) + pkt->headerDelay; 577 // Reset the timing of the packet. 578 pkt->headerDelay = pkt->payloadDelay = 0; 579 memSidePort->schedTimingSnoopResp(pkt, snoop_resp_time); 580} 581 582void 583Cache::promoteWholeLineWrites(PacketPtr pkt) 584{ 585 // Cache line clearing instructions 586 if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) && 587 (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0)) { 588 pkt->cmd = MemCmd::WriteLineReq; 589 DPRINTF(Cache, "packet promoted from Write to WriteLineReq\n"); 590 } 591} 592 593bool 594Cache::recvTimingReq(PacketPtr pkt) 595{ 596 DPRINTF(CacheTags, "%s tags: %s\n", __func__, tags->print()); 597 598 assert(pkt->isRequest()); 599 600 // Just forward the packet if caches are disabled. 601 if (system->bypassCaches()) { 602 // @todo This should really enqueue the packet rather 603 bool M5_VAR_USED success = memSidePort->sendTimingReq(pkt); 604 assert(success); 605 return true; 606 } 607 608 promoteWholeLineWrites(pkt); 609
| 463 464 incMissCount(pkt); 465 466 if (blk == NULL && pkt->isLLSC() && pkt->isWrite()) { 467 // complete miss on store conditional... just give up now 468 pkt->req->setExtraData(0); 469 return true; 470 } 471 472 return false; 473} 474 475void 476Cache::doWritebacks(PacketList& writebacks, Tick forward_time) 477{ 478 while (!writebacks.empty()) { 479 PacketPtr wbPkt = writebacks.front(); 480 // We use forwardLatency here because we are copying writebacks to 481 // write buffer. Call isCachedAbove for both Writebacks and 482 // CleanEvicts. If isCachedAbove returns true we set BLOCK_CACHED flag 483 // in Writebacks and discard CleanEvicts. 484 if (isCachedAbove(wbPkt)) { 485 if (wbPkt->cmd == MemCmd::CleanEvict) { 486 // Delete CleanEvict because cached copies exist above. The 487 // packet destructor will delete the request object because 488 // this is a non-snoop request packet which does not require a 489 // response. 490 delete wbPkt; 491 } else if (wbPkt->cmd == MemCmd::WritebackClean) { 492 // clean writeback, do not send since the block is 493 // still cached above 494 assert(writebackClean); 495 delete wbPkt; 496 } else { 497 assert(wbPkt->cmd == MemCmd::WritebackDirty); 498 // Set BLOCK_CACHED flag in Writeback and send below, so that 499 // the Writeback does not reset the bit corresponding to this 500 // address in the snoop filter below. 501 wbPkt->setBlockCached(); 502 allocateWriteBuffer(wbPkt, forward_time); 503 } 504 } else { 505 // If the block is not cached above, send packet below. Both 506 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will 507 // reset the bit corresponding to this address in the snoop filter 508 // below. 509 allocateWriteBuffer(wbPkt, forward_time); 510 } 511 writebacks.pop_front(); 512 } 513} 514 515void 516Cache::doWritebacksAtomic(PacketList& writebacks) 517{ 518 while (!writebacks.empty()) { 519 PacketPtr wbPkt = writebacks.front(); 520 // Call isCachedAbove for both Writebacks and CleanEvicts. If 521 // isCachedAbove returns true we set BLOCK_CACHED flag in Writebacks 522 // and discard CleanEvicts. 523 if (isCachedAbove(wbPkt, false)) { 524 if (wbPkt->cmd == MemCmd::WritebackDirty) { 525 // Set BLOCK_CACHED flag in Writeback and send below, 526 // so that the Writeback does not reset the bit 527 // corresponding to this address in the snoop filter 528 // below. We can discard CleanEvicts because cached 529 // copies exist above. Atomic mode isCachedAbove 530 // modifies packet to set BLOCK_CACHED flag 531 memSidePort->sendAtomic(wbPkt); 532 } 533 } else { 534 // If the block is not cached above, send packet below. Both 535 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will 536 // reset the bit corresponding to this address in the snoop filter 537 // below. 538 memSidePort->sendAtomic(wbPkt); 539 } 540 writebacks.pop_front(); 541 // In case of CleanEvicts, the packet destructor will delete the 542 // request object because this is a non-snoop request packet which 543 // does not require a response. 544 delete wbPkt; 545 } 546} 547 548 549void 550Cache::recvTimingSnoopResp(PacketPtr pkt) 551{ 552 DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, 553 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 554 555 assert(pkt->isResponse()); 556 assert(!system->bypassCaches()); 557 558 // determine if the response is from a snoop request we created 559 // (in which case it should be in the outstandingSnoop), or if we 560 // merely forwarded someone else's snoop request 561 const bool forwardAsSnoop = outstandingSnoop.find(pkt->req) == 562 outstandingSnoop.end(); 563 564 if (!forwardAsSnoop) { 565 // the packet came from this cache, so sink it here and do not 566 // forward it 567 assert(pkt->cmd == MemCmd::HardPFResp); 568 569 outstandingSnoop.erase(pkt->req); 570 571 DPRINTF(Cache, "Got prefetch response from above for addr " 572 "%#llx (%s)\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns"); 573 recvTimingResp(pkt); 574 return; 575 } 576 577 // forwardLatency is set here because there is a response from an 578 // upper level cache. 579 // To pay the delay that occurs if the packet comes from the bus, 580 // we charge also headerDelay. 581 Tick snoop_resp_time = clockEdge(forwardLatency) + pkt->headerDelay; 582 // Reset the timing of the packet. 583 pkt->headerDelay = pkt->payloadDelay = 0; 584 memSidePort->schedTimingSnoopResp(pkt, snoop_resp_time); 585} 586 587void 588Cache::promoteWholeLineWrites(PacketPtr pkt) 589{ 590 // Cache line clearing instructions 591 if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) && 592 (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0)) { 593 pkt->cmd = MemCmd::WriteLineReq; 594 DPRINTF(Cache, "packet promoted from Write to WriteLineReq\n"); 595 } 596} 597 598bool 599Cache::recvTimingReq(PacketPtr pkt) 600{ 601 DPRINTF(CacheTags, "%s tags: %s\n", __func__, tags->print()); 602 603 assert(pkt->isRequest()); 604 605 // Just forward the packet if caches are disabled. 606 if (system->bypassCaches()) { 607 // @todo This should really enqueue the packet rather 608 bool M5_VAR_USED success = memSidePort->sendTimingReq(pkt); 609 assert(success); 610 return true; 611 } 612 613 promoteWholeLineWrites(pkt); 614
|
610 if (pkt->memInhibitAsserted()) {
| 615 if (pkt->cacheResponding()) {
|
611 // a cache above us (but not where the packet came from) is
| 616 // a cache above us (but not where the packet came from) is
|
612 // responding to the request 613 DPRINTF(Cache, "mem inhibited on addr %#llx (%s): not responding\n",
| 617 // responding to the request, in other words it has the line 618 // in Modified or Owned state 619 DPRINTF(Cache, "Cache above responding to %#llx (%s): " 620 "not responding\n",
|
614 pkt->getAddr(), pkt->isSecure() ? "s" : "ns"); 615
| 621 pkt->getAddr(), pkt->isSecure() ? "s" : "ns"); 622
|
616 // if the packet needs exclusive, and the cache that has 617 // promised to respond (setting the inhibit flag) is not 618 // providing exclusive (it is in O vs M state), we know that 619 // there may be other shared copies in the system; go out and 620 // invalidate them all 621 if (pkt->needsExclusive() && !pkt->isSupplyExclusive()) {
| 623 // if the packet needs the block to be writable, and the cache 624 // that has promised to respond (setting the cache responding 625 // flag) is not providing writable (it is in Owned rather than 626 // the Modified state), we know that there may be other Shared 627 // copies in the system; go out and invalidate them all 628 if (pkt->needsWritable() && !pkt->responderHadWritable()) { 629 // an upstream cache that had the line in Owned state 630 // (dirty, but not writable), is responding and thus 631 // transferring the dirty line from one branch of the 632 // cache hierarchy to another 633 634 // send out an express snoop and invalidate all other 635 // copies (snooping a packet that needs writable is the 636 // same as an invalidation), thus turning the Owned line 637 // into a Modified line, note that we don't invalidate the 638 // block in the current cache or any other cache on the 639 // path to memory 640
|
622 // create a downstream express snoop with cleared packet 623 // flags, there is no need to allocate any data as the 624 // packet is merely used to co-ordinate state transitions 625 Packet *snoop_pkt = new Packet(pkt, true, false); 626 627 // also reset the bus time that the original packet has 628 // not yet paid for 629 snoop_pkt->headerDelay = snoop_pkt->payloadDelay = 0; 630 631 // make this an instantaneous express snoop, and let the
| 641 // create a downstream express snoop with cleared packet 642 // flags, there is no need to allocate any data as the 643 // packet is merely used to co-ordinate state transitions 644 Packet *snoop_pkt = new Packet(pkt, true, false); 645 646 // also reset the bus time that the original packet has 647 // not yet paid for 648 snoop_pkt->headerDelay = snoop_pkt->payloadDelay = 0; 649 650 // make this an instantaneous express snoop, and let the
|
632 // other caches in the system know that the packet is 633 // inhibited, because we have found the authorative copy 634 // (O) that will supply the right data
| 651 // other caches in the system know that the another cache 652 // is responding, because we have found the authorative 653 // copy (Modified or Owned) that will supply the right 654 // data
|
635 snoop_pkt->setExpressSnoop();
| 655 snoop_pkt->setExpressSnoop();
|
636 snoop_pkt->assertMemInhibit();
| 656 snoop_pkt->setCacheResponding();
|
637 638 // this express snoop travels towards the memory, and at 639 // every crossbar it is snooped upwards thus reaching 640 // every cache in the system 641 bool M5_VAR_USED success = memSidePort->sendTimingReq(snoop_pkt); 642 // express snoops always succeed 643 assert(success); 644
| 657 658 // this express snoop travels towards the memory, and at 659 // every crossbar it is snooped upwards thus reaching 660 // every cache in the system 661 bool M5_VAR_USED success = memSidePort->sendTimingReq(snoop_pkt); 662 // express snoops always succeed 663 assert(success); 664
|
645 // main memory will delete the packet
| 665 // main memory will delete the snoop packet
|
646 } 647
| 666 } 667
|
648 // queue for deletion, as the sending cache is still relying 649 // on the packet
| 668 // queue for deletion, as opposed to immediate deletion, as 669 // the sending cache is still relying on the packet
|
650 pendingDelete.reset(pkt); 651
| 670 pendingDelete.reset(pkt); 671
|
652 // no need to take any action in this particular cache as the 653 // caches along the path to memory are allowed to keep lines 654 // in a shared state, and a cache above us already committed 655 // to responding
| 672 // no need to take any action in this particular cache as an 673 // upstream cache has already committed to responding, and 674 // either the packet does not need writable (and we can let 675 // the cache that set the cache responding flag pass on the 676 // line without any need for intervention), or if the packet 677 // needs writable it is provided, or we have already sent out 678 // any express snoops in the section above
|
656 return true; 657 } 658 659 // anything that is merely forwarded pays for the forward latency and 660 // the delay provided by the crossbar 661 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 662 663 // We use lookupLatency here because it is used to specify the latency 664 // to access. 665 Cycles lat = lookupLatency; 666 CacheBlk *blk = NULL; 667 bool satisfied = false; 668 { 669 PacketList writebacks; 670 // Note that lat is passed by reference here. The function 671 // access() calls accessBlock() which can modify lat value. 672 satisfied = access(pkt, blk, lat, writebacks); 673 674 // copy writebacks to write buffer here to ensure they logically 675 // proceed anything happening below 676 doWritebacks(writebacks, forward_time); 677 } 678 679 // Here we charge the headerDelay that takes into account the latencies 680 // of the bus, if the packet comes from it. 681 // The latency charged it is just lat that is the value of lookupLatency 682 // modified by access() function, or if not just lookupLatency. 683 // In case of a hit we are neglecting response latency. 684 // In case of a miss we are neglecting forward latency. 685 Tick request_time = clockEdge(lat) + pkt->headerDelay; 686 // Here we reset the timing of the packet. 687 pkt->headerDelay = pkt->payloadDelay = 0; 688 689 // track time of availability of next prefetch, if any 690 Tick next_pf_time = MaxTick; 691 692 bool needsResponse = pkt->needsResponse(); 693 694 if (satisfied) { 695 // should never be satisfying an uncacheable access as we 696 // flush and invalidate any existing block as part of the 697 // lookup 698 assert(!pkt->req->isUncacheable()); 699 700 // hit (for all other request types) 701 702 if (prefetcher && (prefetchOnAccess || (blk && blk->wasPrefetched()))) { 703 if (blk) 704 blk->status &= ~BlkHWPrefetched; 705 706 // Don't notify on SWPrefetch 707 if (!pkt->cmd.isSWPrefetch()) 708 next_pf_time = prefetcher->notify(pkt); 709 } 710 711 if (needsResponse) { 712 pkt->makeTimingResponse(); 713 // @todo: Make someone pay for this 714 pkt->headerDelay = pkt->payloadDelay = 0; 715 716 // In this case we are considering request_time that takes 717 // into account the delay of the xbar, if any, and just 718 // lat, neglecting responseLatency, modelling hit latency 719 // just as lookupLatency or or the value of lat overriden 720 // by access(), that calls accessBlock() function. 721 cpuSidePort->schedTimingResp(pkt, request_time, true); 722 } else { 723 DPRINTF(Cache, "%s satisfied %s addr %#llx, no response needed\n", 724 __func__, pkt->cmdString(), pkt->getAddr(), 725 pkt->getSize()); 726 727 // queue the packet for deletion, as the sending cache is 728 // still relying on it; if the block is found in access(), 729 // CleanEvict and Writeback messages will be deleted 730 // here as well 731 pendingDelete.reset(pkt); 732 } 733 } else { 734 // miss 735 736 Addr blk_addr = blockAlign(pkt->getAddr()); 737 738 // ignore any existing MSHR if we are dealing with an 739 // uncacheable request 740 MSHR *mshr = pkt->req->isUncacheable() ? nullptr : 741 mshrQueue.findMatch(blk_addr, pkt->isSecure()); 742 743 // Software prefetch handling: 744 // To keep the core from waiting on data it won't look at 745 // anyway, send back a response with dummy data. Miss handling 746 // will continue asynchronously. Unfortunately, the core will 747 // insist upon freeing original Packet/Request, so we have to 748 // create a new pair with a different lifecycle. Note that this 749 // processing happens before any MSHR munging on the behalf of 750 // this request because this new Request will be the one stored 751 // into the MSHRs, not the original. 752 if (pkt->cmd.isSWPrefetch()) { 753 assert(needsResponse); 754 assert(pkt->req->hasPaddr()); 755 assert(!pkt->req->isUncacheable()); 756 757 // There's no reason to add a prefetch as an additional target 758 // to an existing MSHR. If an outstanding request is already 759 // in progress, there is nothing for the prefetch to do. 760 // If this is the case, we don't even create a request at all. 761 PacketPtr pf = nullptr; 762 763 if (!mshr) { 764 // copy the request and create a new SoftPFReq packet 765 RequestPtr req = new Request(pkt->req->getPaddr(), 766 pkt->req->getSize(), 767 pkt->req->getFlags(), 768 pkt->req->masterId()); 769 pf = new Packet(req, pkt->cmd); 770 pf->allocate(); 771 assert(pf->getAddr() == pkt->getAddr()); 772 assert(pf->getSize() == pkt->getSize()); 773 } 774 775 pkt->makeTimingResponse(); 776 // for debugging, set all the bits in the response data 777 // (also keeps valgrind from complaining when debugging settings 778 // print out instruction results) 779 std::memset(pkt->getPtr<uint8_t>(), 0xFF, pkt->getSize()); 780 // request_time is used here, taking into account lat and the delay 781 // charged if the packet comes from the xbar. 782 cpuSidePort->schedTimingResp(pkt, request_time, true); 783 784 // If an outstanding request is in progress (we found an 785 // MSHR) this is set to null 786 pkt = pf; 787 } 788 789 if (mshr) { 790 /// MSHR hit 791 /// @note writebacks will be checked in getNextMSHR() 792 /// for any conflicting requests to the same block 793 794 //@todo remove hw_pf here 795 796 // Coalesce unless it was a software prefetch (see above). 797 if (pkt) { 798 assert(!pkt->isWriteback()); 799 // CleanEvicts corresponding to blocks which have 800 // outstanding requests in MSHRs are simply sunk here 801 if (pkt->cmd == MemCmd::CleanEvict) { 802 pendingDelete.reset(pkt); 803 } else { 804 DPRINTF(Cache, "%s coalescing MSHR for %s addr %#llx size %d\n", 805 __func__, pkt->cmdString(), pkt->getAddr(), 806 pkt->getSize()); 807 808 assert(pkt->req->masterId() < system->maxMasters()); 809 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++; 810 // We use forward_time here because it is the same 811 // considering new targets. We have multiple 812 // requests for the same address here. It 813 // specifies the latency to allocate an internal 814 // buffer and to schedule an event to the queued 815 // port and also takes into account the additional 816 // delay of the xbar. 817 mshr->allocateTarget(pkt, forward_time, order++, 818 allocOnFill(pkt->cmd)); 819 if (mshr->getNumTargets() == numTarget) { 820 noTargetMSHR = mshr; 821 setBlocked(Blocked_NoTargets); 822 // need to be careful with this... if this mshr isn't 823 // ready yet (i.e. time > curTick()), we don't want to 824 // move it ahead of mshrs that are ready 825 // mshrQueue.moveToFront(mshr); 826 } 827 } 828 // We should call the prefetcher reguardless if the request is 829 // satisfied or not, reguardless if the request is in the MSHR or 830 // not. The request could be a ReadReq hit, but still not 831 // satisfied (potentially because of a prior write to the same 832 // cache line. So, even when not satisfied, tehre is an MSHR 833 // already allocated for this, we need to let the prefetcher know 834 // about the request 835 if (prefetcher) { 836 // Don't notify on SWPrefetch 837 if (!pkt->cmd.isSWPrefetch()) 838 next_pf_time = prefetcher->notify(pkt); 839 } 840 } 841 } else { 842 // no MSHR 843 assert(pkt->req->masterId() < system->maxMasters()); 844 if (pkt->req->isUncacheable()) { 845 mshr_uncacheable[pkt->cmdToIndex()][pkt->req->masterId()]++; 846 } else { 847 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 848 } 849 850 if (pkt->isEviction() || 851 (pkt->req->isUncacheable() && pkt->isWrite())) { 852 // We use forward_time here because there is an 853 // uncached memory write, forwarded to WriteBuffer. 854 allocateWriteBuffer(pkt, forward_time); 855 } else { 856 if (blk && blk->isValid()) { 857 // should have flushed and have no valid block 858 assert(!pkt->req->isUncacheable()); 859 860 // If we have a write miss to a valid block, we 861 // need to mark the block non-readable. Otherwise 862 // if we allow reads while there's an outstanding 863 // write miss, the read could return stale data 864 // out of the cache block... a more aggressive 865 // system could detect the overlap (if any) and 866 // forward data out of the MSHRs, but we don't do 867 // that yet. Note that we do need to leave the 868 // block valid so that it stays in the cache, in 869 // case we get an upgrade response (and hence no 870 // new data) when the write miss completes. 871 // As long as CPUs do proper store/load forwarding 872 // internally, and have a sufficiently weak memory 873 // model, this is probably unnecessary, but at some 874 // point it must have seemed like we needed it...
| 679 return true; 680 } 681 682 // anything that is merely forwarded pays for the forward latency and 683 // the delay provided by the crossbar 684 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 685 686 // We use lookupLatency here because it is used to specify the latency 687 // to access. 688 Cycles lat = lookupLatency; 689 CacheBlk *blk = NULL; 690 bool satisfied = false; 691 { 692 PacketList writebacks; 693 // Note that lat is passed by reference here. The function 694 // access() calls accessBlock() which can modify lat value. 695 satisfied = access(pkt, blk, lat, writebacks); 696 697 // copy writebacks to write buffer here to ensure they logically 698 // proceed anything happening below 699 doWritebacks(writebacks, forward_time); 700 } 701 702 // Here we charge the headerDelay that takes into account the latencies 703 // of the bus, if the packet comes from it. 704 // The latency charged it is just lat that is the value of lookupLatency 705 // modified by access() function, or if not just lookupLatency. 706 // In case of a hit we are neglecting response latency. 707 // In case of a miss we are neglecting forward latency. 708 Tick request_time = clockEdge(lat) + pkt->headerDelay; 709 // Here we reset the timing of the packet. 710 pkt->headerDelay = pkt->payloadDelay = 0; 711 712 // track time of availability of next prefetch, if any 713 Tick next_pf_time = MaxTick; 714 715 bool needsResponse = pkt->needsResponse(); 716 717 if (satisfied) { 718 // should never be satisfying an uncacheable access as we 719 // flush and invalidate any existing block as part of the 720 // lookup 721 assert(!pkt->req->isUncacheable()); 722 723 // hit (for all other request types) 724 725 if (prefetcher && (prefetchOnAccess || (blk && blk->wasPrefetched()))) { 726 if (blk) 727 blk->status &= ~BlkHWPrefetched; 728 729 // Don't notify on SWPrefetch 730 if (!pkt->cmd.isSWPrefetch()) 731 next_pf_time = prefetcher->notify(pkt); 732 } 733 734 if (needsResponse) { 735 pkt->makeTimingResponse(); 736 // @todo: Make someone pay for this 737 pkt->headerDelay = pkt->payloadDelay = 0; 738 739 // In this case we are considering request_time that takes 740 // into account the delay of the xbar, if any, and just 741 // lat, neglecting responseLatency, modelling hit latency 742 // just as lookupLatency or or the value of lat overriden 743 // by access(), that calls accessBlock() function. 744 cpuSidePort->schedTimingResp(pkt, request_time, true); 745 } else { 746 DPRINTF(Cache, "%s satisfied %s addr %#llx, no response needed\n", 747 __func__, pkt->cmdString(), pkt->getAddr(), 748 pkt->getSize()); 749 750 // queue the packet for deletion, as the sending cache is 751 // still relying on it; if the block is found in access(), 752 // CleanEvict and Writeback messages will be deleted 753 // here as well 754 pendingDelete.reset(pkt); 755 } 756 } else { 757 // miss 758 759 Addr blk_addr = blockAlign(pkt->getAddr()); 760 761 // ignore any existing MSHR if we are dealing with an 762 // uncacheable request 763 MSHR *mshr = pkt->req->isUncacheable() ? nullptr : 764 mshrQueue.findMatch(blk_addr, pkt->isSecure()); 765 766 // Software prefetch handling: 767 // To keep the core from waiting on data it won't look at 768 // anyway, send back a response with dummy data. Miss handling 769 // will continue asynchronously. Unfortunately, the core will 770 // insist upon freeing original Packet/Request, so we have to 771 // create a new pair with a different lifecycle. Note that this 772 // processing happens before any MSHR munging on the behalf of 773 // this request because this new Request will be the one stored 774 // into the MSHRs, not the original. 775 if (pkt->cmd.isSWPrefetch()) { 776 assert(needsResponse); 777 assert(pkt->req->hasPaddr()); 778 assert(!pkt->req->isUncacheable()); 779 780 // There's no reason to add a prefetch as an additional target 781 // to an existing MSHR. If an outstanding request is already 782 // in progress, there is nothing for the prefetch to do. 783 // If this is the case, we don't even create a request at all. 784 PacketPtr pf = nullptr; 785 786 if (!mshr) { 787 // copy the request and create a new SoftPFReq packet 788 RequestPtr req = new Request(pkt->req->getPaddr(), 789 pkt->req->getSize(), 790 pkt->req->getFlags(), 791 pkt->req->masterId()); 792 pf = new Packet(req, pkt->cmd); 793 pf->allocate(); 794 assert(pf->getAddr() == pkt->getAddr()); 795 assert(pf->getSize() == pkt->getSize()); 796 } 797 798 pkt->makeTimingResponse(); 799 // for debugging, set all the bits in the response data 800 // (also keeps valgrind from complaining when debugging settings 801 // print out instruction results) 802 std::memset(pkt->getPtr<uint8_t>(), 0xFF, pkt->getSize()); 803 // request_time is used here, taking into account lat and the delay 804 // charged if the packet comes from the xbar. 805 cpuSidePort->schedTimingResp(pkt, request_time, true); 806 807 // If an outstanding request is in progress (we found an 808 // MSHR) this is set to null 809 pkt = pf; 810 } 811 812 if (mshr) { 813 /// MSHR hit 814 /// @note writebacks will be checked in getNextMSHR() 815 /// for any conflicting requests to the same block 816 817 //@todo remove hw_pf here 818 819 // Coalesce unless it was a software prefetch (see above). 820 if (pkt) { 821 assert(!pkt->isWriteback()); 822 // CleanEvicts corresponding to blocks which have 823 // outstanding requests in MSHRs are simply sunk here 824 if (pkt->cmd == MemCmd::CleanEvict) { 825 pendingDelete.reset(pkt); 826 } else { 827 DPRINTF(Cache, "%s coalescing MSHR for %s addr %#llx size %d\n", 828 __func__, pkt->cmdString(), pkt->getAddr(), 829 pkt->getSize()); 830 831 assert(pkt->req->masterId() < system->maxMasters()); 832 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++; 833 // We use forward_time here because it is the same 834 // considering new targets. We have multiple 835 // requests for the same address here. It 836 // specifies the latency to allocate an internal 837 // buffer and to schedule an event to the queued 838 // port and also takes into account the additional 839 // delay of the xbar. 840 mshr->allocateTarget(pkt, forward_time, order++, 841 allocOnFill(pkt->cmd)); 842 if (mshr->getNumTargets() == numTarget) { 843 noTargetMSHR = mshr; 844 setBlocked(Blocked_NoTargets); 845 // need to be careful with this... if this mshr isn't 846 // ready yet (i.e. time > curTick()), we don't want to 847 // move it ahead of mshrs that are ready 848 // mshrQueue.moveToFront(mshr); 849 } 850 } 851 // We should call the prefetcher reguardless if the request is 852 // satisfied or not, reguardless if the request is in the MSHR or 853 // not. The request could be a ReadReq hit, but still not 854 // satisfied (potentially because of a prior write to the same 855 // cache line. So, even when not satisfied, tehre is an MSHR 856 // already allocated for this, we need to let the prefetcher know 857 // about the request 858 if (prefetcher) { 859 // Don't notify on SWPrefetch 860 if (!pkt->cmd.isSWPrefetch()) 861 next_pf_time = prefetcher->notify(pkt); 862 } 863 } 864 } else { 865 // no MSHR 866 assert(pkt->req->masterId() < system->maxMasters()); 867 if (pkt->req->isUncacheable()) { 868 mshr_uncacheable[pkt->cmdToIndex()][pkt->req->masterId()]++; 869 } else { 870 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 871 } 872 873 if (pkt->isEviction() || 874 (pkt->req->isUncacheable() && pkt->isWrite())) { 875 // We use forward_time here because there is an 876 // uncached memory write, forwarded to WriteBuffer. 877 allocateWriteBuffer(pkt, forward_time); 878 } else { 879 if (blk && blk->isValid()) { 880 // should have flushed and have no valid block 881 assert(!pkt->req->isUncacheable()); 882 883 // If we have a write miss to a valid block, we 884 // need to mark the block non-readable. Otherwise 885 // if we allow reads while there's an outstanding 886 // write miss, the read could return stale data 887 // out of the cache block... a more aggressive 888 // system could detect the overlap (if any) and 889 // forward data out of the MSHRs, but we don't do 890 // that yet. Note that we do need to leave the 891 // block valid so that it stays in the cache, in 892 // case we get an upgrade response (and hence no 893 // new data) when the write miss completes. 894 // As long as CPUs do proper store/load forwarding 895 // internally, and have a sufficiently weak memory 896 // model, this is probably unnecessary, but at some 897 // point it must have seemed like we needed it...
|
875 assert(pkt->needsExclusive());
| 898 assert(pkt->needsWritable());
|
876 assert(!blk->isWritable()); 877 blk->status &= ~BlkReadable; 878 } 879 // Here we are using forward_time, modelling the latency of 880 // a miss (outbound) just as forwardLatency, neglecting the 881 // lookupLatency component. 882 allocateMissBuffer(pkt, forward_time); 883 } 884 885 if (prefetcher) { 886 // Don't notify on SWPrefetch 887 if (!pkt->cmd.isSWPrefetch()) 888 next_pf_time = prefetcher->notify(pkt); 889 } 890 } 891 } 892 893 if (next_pf_time != MaxTick) 894 schedMemSideSendEvent(next_pf_time); 895 896 return true; 897} 898 899 900// See comment in cache.hh. 901PacketPtr 902Cache::getBusPacket(PacketPtr cpu_pkt, CacheBlk *blk,
| 899 assert(!blk->isWritable()); 900 blk->status &= ~BlkReadable; 901 } 902 // Here we are using forward_time, modelling the latency of 903 // a miss (outbound) just as forwardLatency, neglecting the 904 // lookupLatency component. 905 allocateMissBuffer(pkt, forward_time); 906 } 907 908 if (prefetcher) { 909 // Don't notify on SWPrefetch 910 if (!pkt->cmd.isSWPrefetch()) 911 next_pf_time = prefetcher->notify(pkt); 912 } 913 } 914 } 915 916 if (next_pf_time != MaxTick) 917 schedMemSideSendEvent(next_pf_time); 918 919 return true; 920} 921 922 923// See comment in cache.hh. 924PacketPtr 925Cache::getBusPacket(PacketPtr cpu_pkt, CacheBlk *blk,
|
903 bool needsExclusive) const
| 926 bool needsWritable) const
|
904{ 905 bool blkValid = blk && blk->isValid(); 906 907 if (cpu_pkt->req->isUncacheable()) { 908 // note that at the point we see the uncacheable request we 909 // flush any block, but there could be an outstanding MSHR, 910 // and the cache could have filled again before we actually 911 // send out the forwarded uncacheable request (blk could thus 912 // be non-null) 913 return NULL; 914 } 915 916 if (!blkValid && 917 (cpu_pkt->isUpgrade() || 918 cpu_pkt->isEviction())) { 919 // Writebacks that weren't allocated in access() and upgrades 920 // from upper-level caches that missed completely just go 921 // through. 922 return NULL; 923 } 924 925 assert(cpu_pkt->needsResponse()); 926 927 MemCmd cmd; 928 // @TODO make useUpgrades a parameter. 929 // Note that ownership protocols require upgrade, otherwise a 930 // write miss on a shared owned block will generate a ReadExcl, 931 // which will clobber the owned copy. 932 const bool useUpgrades = true; 933 if (blkValid && useUpgrades) {
| 927{ 928 bool blkValid = blk && blk->isValid(); 929 930 if (cpu_pkt->req->isUncacheable()) { 931 // note that at the point we see the uncacheable request we 932 // flush any block, but there could be an outstanding MSHR, 933 // and the cache could have filled again before we actually 934 // send out the forwarded uncacheable request (blk could thus 935 // be non-null) 936 return NULL; 937 } 938 939 if (!blkValid && 940 (cpu_pkt->isUpgrade() || 941 cpu_pkt->isEviction())) { 942 // Writebacks that weren't allocated in access() and upgrades 943 // from upper-level caches that missed completely just go 944 // through. 945 return NULL; 946 } 947 948 assert(cpu_pkt->needsResponse()); 949 950 MemCmd cmd; 951 // @TODO make useUpgrades a parameter. 952 // Note that ownership protocols require upgrade, otherwise a 953 // write miss on a shared owned block will generate a ReadExcl, 954 // which will clobber the owned copy. 955 const bool useUpgrades = true; 956 if (blkValid && useUpgrades) {
|
934 // only reason to be here is that blk is shared 935 // (read-only) and we need exclusive 936 assert(needsExclusive);
| 957 // only reason to be here is that blk is read only and we need 958 // it to be writable 959 assert(needsWritable);
|
937 assert(!blk->isWritable()); 938 cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq; 939 } else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq || 940 cpu_pkt->cmd == MemCmd::StoreCondFailReq) { 941 // Even though this SC will fail, we still need to send out the 942 // request and get the data to supply it to other snoopers in the case 943 // where the determination the StoreCond fails is delayed due to 944 // all caches not being on the same local bus. 945 cmd = MemCmd::SCUpgradeFailReq; 946 } else if (cpu_pkt->cmd == MemCmd::WriteLineReq) { 947 // forward as invalidate to all other caches, this gives us
| 960 assert(!blk->isWritable()); 961 cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq; 962 } else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq || 963 cpu_pkt->cmd == MemCmd::StoreCondFailReq) { 964 // Even though this SC will fail, we still need to send out the 965 // request and get the data to supply it to other snoopers in the case 966 // where the determination the StoreCond fails is delayed due to 967 // all caches not being on the same local bus. 968 cmd = MemCmd::SCUpgradeFailReq; 969 } else if (cpu_pkt->cmd == MemCmd::WriteLineReq) { 970 // forward as invalidate to all other caches, this gives us
|
948 // the line in exclusive state, and invalidates all other
| 971 // the line in Exclusive state, and invalidates all other
|
949 // copies 950 cmd = MemCmd::InvalidateReq; 951 } else { 952 // block is invalid
| 972 // copies 973 cmd = MemCmd::InvalidateReq; 974 } else { 975 // block is invalid
|
953 cmd = needsExclusive ? MemCmd::ReadExReq :
| 976 cmd = needsWritable ? MemCmd::ReadExReq :
|
954 (isReadOnly ? MemCmd::ReadCleanReq : MemCmd::ReadSharedReq); 955 } 956 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize); 957
| 977 (isReadOnly ? MemCmd::ReadCleanReq : MemCmd::ReadSharedReq); 978 } 979 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize); 980
|
958 // if there are sharers in the upper levels, pass that info downstream 959 if (cpu_pkt->sharedAsserted()) {
| 981 // if there are upstream caches that have already marked the 982 // packet as having sharers (not passing writable), pass that info 983 // downstream 984 if (cpu_pkt->hasSharers()) {
|
960 // note that cpu_pkt may have spent a considerable time in the 961 // MSHR queue and that the information could possibly be out 962 // of date, however, there is no harm in conservatively
| 985 // note that cpu_pkt may have spent a considerable time in the 986 // MSHR queue and that the information could possibly be out 987 // of date, however, there is no harm in conservatively
|
963 // assuming the block is shared 964 pkt->assertShared(); 965 DPRINTF(Cache, "%s passing shared from %s to %s addr %#llx size %d\n",
| 988 // assuming the block has sharers 989 pkt->setHasSharers(); 990 DPRINTF(Cache, "%s passing hasSharers from %s to %s addr %#llx " 991 "size %d\n",
|
966 __func__, cpu_pkt->cmdString(), pkt->cmdString(), 967 pkt->getAddr(), pkt->getSize()); 968 } 969 970 // the packet should be block aligned 971 assert(pkt->getAddr() == blockAlign(pkt->getAddr())); 972 973 pkt->allocate(); 974 DPRINTF(Cache, "%s created %s from %s for addr %#llx size %d\n", 975 __func__, pkt->cmdString(), cpu_pkt->cmdString(), pkt->getAddr(), 976 pkt->getSize()); 977 return pkt; 978} 979 980 981Tick 982Cache::recvAtomic(PacketPtr pkt) 983{ 984 // We are in atomic mode so we pay just for lookupLatency here. 985 Cycles lat = lookupLatency; 986 // @TODO: make this a parameter 987 bool last_level_cache = false; 988 989 // Forward the request if the system is in cache bypass mode. 990 if (system->bypassCaches()) 991 return ticksToCycles(memSidePort->sendAtomic(pkt)); 992 993 promoteWholeLineWrites(pkt); 994
| 992 __func__, cpu_pkt->cmdString(), pkt->cmdString(), 993 pkt->getAddr(), pkt->getSize()); 994 } 995 996 // the packet should be block aligned 997 assert(pkt->getAddr() == blockAlign(pkt->getAddr())); 998 999 pkt->allocate(); 1000 DPRINTF(Cache, "%s created %s from %s for addr %#llx size %d\n", 1001 __func__, pkt->cmdString(), cpu_pkt->cmdString(), pkt->getAddr(), 1002 pkt->getSize()); 1003 return pkt; 1004} 1005 1006 1007Tick 1008Cache::recvAtomic(PacketPtr pkt) 1009{ 1010 // We are in atomic mode so we pay just for lookupLatency here. 1011 Cycles lat = lookupLatency; 1012 // @TODO: make this a parameter 1013 bool last_level_cache = false; 1014 1015 // Forward the request if the system is in cache bypass mode. 1016 if (system->bypassCaches()) 1017 return ticksToCycles(memSidePort->sendAtomic(pkt)); 1018 1019 promoteWholeLineWrites(pkt); 1020
|
995 if (pkt->memInhibitAsserted()) {
| 1021 if (pkt->cacheResponding()) {
|
996 // have to invalidate ourselves and any lower caches even if 997 // upper cache will be responding 998 if (pkt->isInvalidate()) { 999 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 1000 if (blk && blk->isValid()) { 1001 tags->invalidate(blk); 1002 blk->invalidate();
| 1022 // have to invalidate ourselves and any lower caches even if 1023 // upper cache will be responding 1024 if (pkt->isInvalidate()) { 1025 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 1026 if (blk && blk->isValid()) { 1027 tags->invalidate(blk); 1028 blk->invalidate();
|
1003 DPRINTF(Cache, "rcvd mem-inhibited %s on %#llx (%s):"
| 1029 DPRINTF(Cache, "Other cache responding to %s on %#llx (%s):"
|
1004 " invalidating\n", 1005 pkt->cmdString(), pkt->getAddr(), 1006 pkt->isSecure() ? "s" : "ns"); 1007 } 1008 if (!last_level_cache) {
| 1030 " invalidating\n", 1031 pkt->cmdString(), pkt->getAddr(), 1032 pkt->isSecure() ? "s" : "ns"); 1033 } 1034 if (!last_level_cache) {
|
1009 DPRINTF(Cache, "forwarding mem-inhibited %s on %#llx (%s)\n",
| 1035 DPRINTF(Cache, "Other cache responding to %s on %#llx (%s):" 1036 " forwarding\n",
|
1010 pkt->cmdString(), pkt->getAddr(), 1011 pkt->isSecure() ? "s" : "ns"); 1012 lat += ticksToCycles(memSidePort->sendAtomic(pkt)); 1013 } 1014 } else {
| 1037 pkt->cmdString(), pkt->getAddr(), 1038 pkt->isSecure() ? "s" : "ns"); 1039 lat += ticksToCycles(memSidePort->sendAtomic(pkt)); 1040 } 1041 } else {
|
1015 DPRINTF(Cache, "rcvd mem-inhibited %s on %#llx: not responding\n",
| 1042 DPRINTF(Cache, "Other cache responding to %s on %#llx: " 1043 "not responding\n",
|
1016 pkt->cmdString(), pkt->getAddr()); 1017 } 1018 1019 return lat * clockPeriod(); 1020 } 1021 1022 // should assert here that there are no outstanding MSHRs or 1023 // writebacks... that would mean that someone used an atomic 1024 // access in timing mode 1025 1026 CacheBlk *blk = NULL; 1027 PacketList writebacks; 1028 bool satisfied = access(pkt, blk, lat, writebacks); 1029 1030 // handle writebacks resulting from the access here to ensure they 1031 // logically proceed anything happening below 1032 doWritebacksAtomic(writebacks); 1033 1034 if (!satisfied) { 1035 // MISS 1036
| 1044 pkt->cmdString(), pkt->getAddr()); 1045 } 1046 1047 return lat * clockPeriod(); 1048 } 1049 1050 // should assert here that there are no outstanding MSHRs or 1051 // writebacks... that would mean that someone used an atomic 1052 // access in timing mode 1053 1054 CacheBlk *blk = NULL; 1055 PacketList writebacks; 1056 bool satisfied = access(pkt, blk, lat, writebacks); 1057 1058 // handle writebacks resulting from the access here to ensure they 1059 // logically proceed anything happening below 1060 doWritebacksAtomic(writebacks); 1061 1062 if (!satisfied) { 1063 // MISS 1064
|
1037 PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsExclusive());
| 1065 PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsWritable());
|
1038 1039 bool is_forward = (bus_pkt == NULL); 1040 1041 if (is_forward) { 1042 // just forwarding the same request to the next level 1043 // no local cache operation involved 1044 bus_pkt = pkt; 1045 } 1046 1047 DPRINTF(Cache, "Sending an atomic %s for %#llx (%s)\n", 1048 bus_pkt->cmdString(), bus_pkt->getAddr(), 1049 bus_pkt->isSecure() ? "s" : "ns"); 1050 1051#if TRACING_ON 1052 CacheBlk::State old_state = blk ? blk->status : 0; 1053#endif 1054 1055 lat += ticksToCycles(memSidePort->sendAtomic(bus_pkt)); 1056 1057 // We are now dealing with the response handling 1058 DPRINTF(Cache, "Receive response: %s for addr %#llx (%s) in state %i\n", 1059 bus_pkt->cmdString(), bus_pkt->getAddr(), 1060 bus_pkt->isSecure() ? "s" : "ns", 1061 old_state); 1062 1063 // If packet was a forward, the response (if any) is already 1064 // in place in the bus_pkt == pkt structure, so we don't need 1065 // to do anything. Otherwise, use the separate bus_pkt to 1066 // generate response to pkt and then delete it. 1067 if (!is_forward) { 1068 if (pkt->needsResponse()) { 1069 assert(bus_pkt->isResponse()); 1070 if (bus_pkt->isError()) { 1071 pkt->makeAtomicResponse(); 1072 pkt->copyError(bus_pkt); 1073 } else if (pkt->cmd == MemCmd::InvalidateReq) { 1074 if (blk) { 1075 // invalidate response to a cache that received 1076 // an invalidate request 1077 satisfyCpuSideRequest(pkt, blk); 1078 } 1079 } else if (pkt->cmd == MemCmd::WriteLineReq) { 1080 // note the use of pkt, not bus_pkt here. 1081 1082 // write-line request to the cache that promoted 1083 // the write to a whole line 1084 blk = handleFill(pkt, blk, writebacks, 1085 allocOnFill(pkt->cmd)); 1086 satisfyCpuSideRequest(pkt, blk); 1087 } else if (bus_pkt->isRead() || 1088 bus_pkt->cmd == MemCmd::UpgradeResp) { 1089 // we're updating cache state to allow us to 1090 // satisfy the upstream request from the cache 1091 blk = handleFill(bus_pkt, blk, writebacks, 1092 allocOnFill(pkt->cmd)); 1093 satisfyCpuSideRequest(pkt, blk); 1094 } else { 1095 // we're satisfying the upstream request without 1096 // modifying cache state, e.g., a write-through 1097 pkt->makeAtomicResponse(); 1098 } 1099 } 1100 delete bus_pkt; 1101 } 1102 } 1103 1104 // Note that we don't invoke the prefetcher at all in atomic mode. 1105 // It's not clear how to do it properly, particularly for 1106 // prefetchers that aggressively generate prefetch candidates and 1107 // rely on bandwidth contention to throttle them; these will tend 1108 // to pollute the cache in atomic mode since there is no bandwidth 1109 // contention. If we ever do want to enable prefetching in atomic 1110 // mode, though, this is the place to do it... see timingAccess() 1111 // for an example (though we'd want to issue the prefetch(es) 1112 // immediately rather than calling requestMemSideBus() as we do 1113 // there). 1114 1115 // do any writebacks resulting from the response handling 1116 doWritebacksAtomic(writebacks); 1117 1118 // if we used temp block, check to see if its valid and if so 1119 // clear it out, but only do so after the call to recvAtomic is 1120 // finished so that any downstream observers (such as a snoop 1121 // filter), first see the fill, and only then see the eviction 1122 if (blk == tempBlock && tempBlock->isValid()) { 1123 // the atomic CPU calls recvAtomic for fetch and load/store 1124 // sequentuially, and we may already have a tempBlock 1125 // writeback from the fetch that we have not yet sent 1126 if (tempBlockWriteback) { 1127 // if that is the case, write the prevoius one back, and 1128 // do not schedule any new event 1129 writebackTempBlockAtomic(); 1130 } else { 1131 // the writeback/clean eviction happens after the call to 1132 // recvAtomic has finished (but before any successive 1133 // calls), so that the response handling from the fill is 1134 // allowed to happen first 1135 schedule(writebackTempBlockAtomicEvent, curTick()); 1136 } 1137 1138 tempBlockWriteback = (blk->isDirty() || writebackClean) ? 1139 writebackBlk(blk) : cleanEvictBlk(blk); 1140 blk->invalidate(); 1141 } 1142 1143 if (pkt->needsResponse()) { 1144 pkt->makeAtomicResponse(); 1145 } 1146 1147 return lat * clockPeriod(); 1148} 1149 1150 1151void 1152Cache::functionalAccess(PacketPtr pkt, bool fromCpuSide) 1153{ 1154 if (system->bypassCaches()) { 1155 // Packets from the memory side are snoop request and 1156 // shouldn't happen in bypass mode. 1157 assert(fromCpuSide); 1158 1159 // The cache should be flushed if we are in cache bypass mode, 1160 // so we don't need to check if we need to update anything. 1161 memSidePort->sendFunctional(pkt); 1162 return; 1163 } 1164 1165 Addr blk_addr = blockAlign(pkt->getAddr()); 1166 bool is_secure = pkt->isSecure(); 1167 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 1168 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 1169 1170 pkt->pushLabel(name()); 1171 1172 CacheBlkPrintWrapper cbpw(blk); 1173 1174 // Note that just because an L2/L3 has valid data doesn't mean an 1175 // L1 doesn't have a more up-to-date modified copy that still 1176 // needs to be found. As a result we always update the request if 1177 // we have it, but only declare it satisfied if we are the owner. 1178 1179 // see if we have data at all (owned or otherwise) 1180 bool have_data = blk && blk->isValid() 1181 && pkt->checkFunctional(&cbpw, blk_addr, is_secure, blkSize, 1182 blk->data); 1183
| 1066 1067 bool is_forward = (bus_pkt == NULL); 1068 1069 if (is_forward) { 1070 // just forwarding the same request to the next level 1071 // no local cache operation involved 1072 bus_pkt = pkt; 1073 } 1074 1075 DPRINTF(Cache, "Sending an atomic %s for %#llx (%s)\n", 1076 bus_pkt->cmdString(), bus_pkt->getAddr(), 1077 bus_pkt->isSecure() ? "s" : "ns"); 1078 1079#if TRACING_ON 1080 CacheBlk::State old_state = blk ? blk->status : 0; 1081#endif 1082 1083 lat += ticksToCycles(memSidePort->sendAtomic(bus_pkt)); 1084 1085 // We are now dealing with the response handling 1086 DPRINTF(Cache, "Receive response: %s for addr %#llx (%s) in state %i\n", 1087 bus_pkt->cmdString(), bus_pkt->getAddr(), 1088 bus_pkt->isSecure() ? "s" : "ns", 1089 old_state); 1090 1091 // If packet was a forward, the response (if any) is already 1092 // in place in the bus_pkt == pkt structure, so we don't need 1093 // to do anything. Otherwise, use the separate bus_pkt to 1094 // generate response to pkt and then delete it. 1095 if (!is_forward) { 1096 if (pkt->needsResponse()) { 1097 assert(bus_pkt->isResponse()); 1098 if (bus_pkt->isError()) { 1099 pkt->makeAtomicResponse(); 1100 pkt->copyError(bus_pkt); 1101 } else if (pkt->cmd == MemCmd::InvalidateReq) { 1102 if (blk) { 1103 // invalidate response to a cache that received 1104 // an invalidate request 1105 satisfyCpuSideRequest(pkt, blk); 1106 } 1107 } else if (pkt->cmd == MemCmd::WriteLineReq) { 1108 // note the use of pkt, not bus_pkt here. 1109 1110 // write-line request to the cache that promoted 1111 // the write to a whole line 1112 blk = handleFill(pkt, blk, writebacks, 1113 allocOnFill(pkt->cmd)); 1114 satisfyCpuSideRequest(pkt, blk); 1115 } else if (bus_pkt->isRead() || 1116 bus_pkt->cmd == MemCmd::UpgradeResp) { 1117 // we're updating cache state to allow us to 1118 // satisfy the upstream request from the cache 1119 blk = handleFill(bus_pkt, blk, writebacks, 1120 allocOnFill(pkt->cmd)); 1121 satisfyCpuSideRequest(pkt, blk); 1122 } else { 1123 // we're satisfying the upstream request without 1124 // modifying cache state, e.g., a write-through 1125 pkt->makeAtomicResponse(); 1126 } 1127 } 1128 delete bus_pkt; 1129 } 1130 } 1131 1132 // Note that we don't invoke the prefetcher at all in atomic mode. 1133 // It's not clear how to do it properly, particularly for 1134 // prefetchers that aggressively generate prefetch candidates and 1135 // rely on bandwidth contention to throttle them; these will tend 1136 // to pollute the cache in atomic mode since there is no bandwidth 1137 // contention. If we ever do want to enable prefetching in atomic 1138 // mode, though, this is the place to do it... see timingAccess() 1139 // for an example (though we'd want to issue the prefetch(es) 1140 // immediately rather than calling requestMemSideBus() as we do 1141 // there). 1142 1143 // do any writebacks resulting from the response handling 1144 doWritebacksAtomic(writebacks); 1145 1146 // if we used temp block, check to see if its valid and if so 1147 // clear it out, but only do so after the call to recvAtomic is 1148 // finished so that any downstream observers (such as a snoop 1149 // filter), first see the fill, and only then see the eviction 1150 if (blk == tempBlock && tempBlock->isValid()) { 1151 // the atomic CPU calls recvAtomic for fetch and load/store 1152 // sequentuially, and we may already have a tempBlock 1153 // writeback from the fetch that we have not yet sent 1154 if (tempBlockWriteback) { 1155 // if that is the case, write the prevoius one back, and 1156 // do not schedule any new event 1157 writebackTempBlockAtomic(); 1158 } else { 1159 // the writeback/clean eviction happens after the call to 1160 // recvAtomic has finished (but before any successive 1161 // calls), so that the response handling from the fill is 1162 // allowed to happen first 1163 schedule(writebackTempBlockAtomicEvent, curTick()); 1164 } 1165 1166 tempBlockWriteback = (blk->isDirty() || writebackClean) ? 1167 writebackBlk(blk) : cleanEvictBlk(blk); 1168 blk->invalidate(); 1169 } 1170 1171 if (pkt->needsResponse()) { 1172 pkt->makeAtomicResponse(); 1173 } 1174 1175 return lat * clockPeriod(); 1176} 1177 1178 1179void 1180Cache::functionalAccess(PacketPtr pkt, bool fromCpuSide) 1181{ 1182 if (system->bypassCaches()) { 1183 // Packets from the memory side are snoop request and 1184 // shouldn't happen in bypass mode. 1185 assert(fromCpuSide); 1186 1187 // The cache should be flushed if we are in cache bypass mode, 1188 // so we don't need to check if we need to update anything. 1189 memSidePort->sendFunctional(pkt); 1190 return; 1191 } 1192 1193 Addr blk_addr = blockAlign(pkt->getAddr()); 1194 bool is_secure = pkt->isSecure(); 1195 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 1196 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 1197 1198 pkt->pushLabel(name()); 1199 1200 CacheBlkPrintWrapper cbpw(blk); 1201 1202 // Note that just because an L2/L3 has valid data doesn't mean an 1203 // L1 doesn't have a more up-to-date modified copy that still 1204 // needs to be found. As a result we always update the request if 1205 // we have it, but only declare it satisfied if we are the owner. 1206 1207 // see if we have data at all (owned or otherwise) 1208 bool have_data = blk && blk->isValid() 1209 && pkt->checkFunctional(&cbpw, blk_addr, is_secure, blkSize, 1210 blk->data); 1211
|
1184 // data we have is dirty if marked as such or if valid & ownership 1185 // pending due to outstanding UpgradeReq
| 1212 // data we have is dirty if marked as such or if we have an 1213 // in-service MSHR that is pending a modified line
|
1186 bool have_dirty = 1187 have_data && (blk->isDirty() ||
| 1214 bool have_dirty = 1215 have_data && (blk->isDirty() ||
|
1188 (mshr && mshr->inService && mshr->isPendingDirty()));
| 1216 (mshr && mshr->inService && mshr->isPendingModified()));
|
1189 1190 bool done = have_dirty 1191 || cpuSidePort->checkFunctional(pkt) 1192 || mshrQueue.checkFunctional(pkt, blk_addr) 1193 || writeBuffer.checkFunctional(pkt, blk_addr) 1194 || memSidePort->checkFunctional(pkt); 1195 1196 DPRINTF(Cache, "functional %s %#llx (%s) %s%s%s\n", 1197 pkt->cmdString(), pkt->getAddr(), is_secure ? "s" : "ns", 1198 (blk && blk->isValid()) ? "valid " : "", 1199 have_data ? "data " : "", done ? "done " : ""); 1200 1201 // We're leaving the cache, so pop cache->name() label 1202 pkt->popLabel(); 1203 1204 if (done) { 1205 pkt->makeResponse(); 1206 } else { 1207 // if it came as a request from the CPU side then make sure it 1208 // continues towards the memory side 1209 if (fromCpuSide) { 1210 memSidePort->sendFunctional(pkt); 1211 } else if (forwardSnoops && cpuSidePort->isSnooping()) { 1212 // if it came from the memory side, it must be a snoop request 1213 // and we should only forward it if we are forwarding snoops 1214 cpuSidePort->sendFunctionalSnoop(pkt); 1215 } 1216 } 1217} 1218 1219 1220///////////////////////////////////////////////////// 1221// 1222// Response handling: responses from the memory side 1223// 1224///////////////////////////////////////////////////// 1225 1226 1227void 1228Cache::recvTimingResp(PacketPtr pkt) 1229{ 1230 assert(pkt->isResponse()); 1231 1232 // all header delay should be paid for by the crossbar, unless 1233 // this is a prefetch response from above 1234 panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp, 1235 "%s saw a non-zero packet delay\n", name()); 1236 1237 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState); 1238 bool is_error = pkt->isError(); 1239 1240 assert(mshr); 1241 1242 if (is_error) { 1243 DPRINTF(Cache, "Cache received packet with error for addr %#llx (%s), " 1244 "cmd: %s\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns", 1245 pkt->cmdString()); 1246 } 1247 1248 DPRINTF(Cache, "Handling response %s for addr %#llx size %d (%s)\n", 1249 pkt->cmdString(), pkt->getAddr(), pkt->getSize(), 1250 pkt->isSecure() ? "s" : "ns"); 1251 1252 MSHRQueue *mq = mshr->queue; 1253 bool wasFull = mq->isFull(); 1254 1255 if (mshr == noTargetMSHR) { 1256 // we always clear at least one target 1257 clearBlocked(Blocked_NoTargets); 1258 noTargetMSHR = NULL; 1259 } 1260 1261 // Initial target is used just for stats 1262 MSHR::Target *initial_tgt = mshr->getTarget(); 1263 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex(); 1264 Tick miss_latency = curTick() - initial_tgt->recvTime; 1265 PacketList writebacks; 1266 // We need forward_time here because we have a call of 1267 // allocateWriteBuffer() that need this parameter to specify the 1268 // time to request the bus. In this case we use forward latency 1269 // because there is a writeback. We pay also here for headerDelay 1270 // that is charged of bus latencies if the packet comes from the 1271 // bus. 1272 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 1273 1274 if (pkt->req->isUncacheable()) { 1275 assert(pkt->req->masterId() < system->maxMasters()); 1276 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] += 1277 miss_latency; 1278 } else { 1279 assert(pkt->req->masterId() < system->maxMasters()); 1280 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] += 1281 miss_latency; 1282 } 1283
| 1217 1218 bool done = have_dirty 1219 || cpuSidePort->checkFunctional(pkt) 1220 || mshrQueue.checkFunctional(pkt, blk_addr) 1221 || writeBuffer.checkFunctional(pkt, blk_addr) 1222 || memSidePort->checkFunctional(pkt); 1223 1224 DPRINTF(Cache, "functional %s %#llx (%s) %s%s%s\n", 1225 pkt->cmdString(), pkt->getAddr(), is_secure ? "s" : "ns", 1226 (blk && blk->isValid()) ? "valid " : "", 1227 have_data ? "data " : "", done ? "done " : ""); 1228 1229 // We're leaving the cache, so pop cache->name() label 1230 pkt->popLabel(); 1231 1232 if (done) { 1233 pkt->makeResponse(); 1234 } else { 1235 // if it came as a request from the CPU side then make sure it 1236 // continues towards the memory side 1237 if (fromCpuSide) { 1238 memSidePort->sendFunctional(pkt); 1239 } else if (forwardSnoops && cpuSidePort->isSnooping()) { 1240 // if it came from the memory side, it must be a snoop request 1241 // and we should only forward it if we are forwarding snoops 1242 cpuSidePort->sendFunctionalSnoop(pkt); 1243 } 1244 } 1245} 1246 1247 1248///////////////////////////////////////////////////// 1249// 1250// Response handling: responses from the memory side 1251// 1252///////////////////////////////////////////////////// 1253 1254 1255void 1256Cache::recvTimingResp(PacketPtr pkt) 1257{ 1258 assert(pkt->isResponse()); 1259 1260 // all header delay should be paid for by the crossbar, unless 1261 // this is a prefetch response from above 1262 panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp, 1263 "%s saw a non-zero packet delay\n", name()); 1264 1265 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState); 1266 bool is_error = pkt->isError(); 1267 1268 assert(mshr); 1269 1270 if (is_error) { 1271 DPRINTF(Cache, "Cache received packet with error for addr %#llx (%s), " 1272 "cmd: %s\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns", 1273 pkt->cmdString()); 1274 } 1275 1276 DPRINTF(Cache, "Handling response %s for addr %#llx size %d (%s)\n", 1277 pkt->cmdString(), pkt->getAddr(), pkt->getSize(), 1278 pkt->isSecure() ? "s" : "ns"); 1279 1280 MSHRQueue *mq = mshr->queue; 1281 bool wasFull = mq->isFull(); 1282 1283 if (mshr == noTargetMSHR) { 1284 // we always clear at least one target 1285 clearBlocked(Blocked_NoTargets); 1286 noTargetMSHR = NULL; 1287 } 1288 1289 // Initial target is used just for stats 1290 MSHR::Target *initial_tgt = mshr->getTarget(); 1291 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex(); 1292 Tick miss_latency = curTick() - initial_tgt->recvTime; 1293 PacketList writebacks; 1294 // We need forward_time here because we have a call of 1295 // allocateWriteBuffer() that need this parameter to specify the 1296 // time to request the bus. In this case we use forward latency 1297 // because there is a writeback. We pay also here for headerDelay 1298 // that is charged of bus latencies if the packet comes from the 1299 // bus. 1300 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 1301 1302 if (pkt->req->isUncacheable()) { 1303 assert(pkt->req->masterId() < system->maxMasters()); 1304 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] += 1305 miss_latency; 1306 } else { 1307 assert(pkt->req->masterId() < system->maxMasters()); 1308 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] += 1309 miss_latency; 1310 } 1311
|
1284 // upgrade deferred targets if we got exclusive 1285 if (!pkt->sharedAsserted()) { 1286 mshr->promoteExclusive();
| 1312 // upgrade deferred targets if the response has no sharers, and is 1313 // thus passing writable 1314 if (!pkt->hasSharers()) { 1315 mshr->promoteWritable();
|
1287 } 1288 1289 bool is_fill = !mshr->isForward && 1290 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp); 1291 1292 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 1293 1294 if (is_fill && !is_error) { 1295 DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n", 1296 pkt->getAddr()); 1297 1298 blk = handleFill(pkt, blk, writebacks, mshr->allocOnFill); 1299 assert(blk != NULL); 1300 } 1301 1302 // allow invalidation responses originating from write-line 1303 // requests to be discarded 1304 bool is_invalidate = pkt->isInvalidate(); 1305 1306 // First offset for critical word first calculations 1307 int initial_offset = initial_tgt->pkt->getOffset(blkSize); 1308 1309 while (mshr->hasTargets()) { 1310 MSHR::Target *target = mshr->getTarget(); 1311 Packet *tgt_pkt = target->pkt; 1312 1313 switch (target->source) { 1314 case MSHR::Target::FromCPU: 1315 Tick completion_time; 1316 // Here we charge on completion_time the delay of the xbar if the 1317 // packet comes from it, charged on headerDelay. 1318 completion_time = pkt->headerDelay; 1319 1320 // Software prefetch handling for cache closest to core 1321 if (tgt_pkt->cmd.isSWPrefetch()) { 1322 // a software prefetch would have already been ack'd immediately 1323 // with dummy data so the core would be able to retire it. 1324 // this request completes right here, so we deallocate it. 1325 delete tgt_pkt->req; 1326 delete tgt_pkt; 1327 break; // skip response 1328 } 1329 1330 // unlike the other packet flows, where data is found in other 1331 // caches or memory and brought back, write-line requests always 1332 // have the data right away, so the above check for "is fill?" 1333 // cannot actually be determined until examining the stored MSHR 1334 // state. We "catch up" with that logic here, which is duplicated 1335 // from above. 1336 if (tgt_pkt->cmd == MemCmd::WriteLineReq) { 1337 assert(!is_error);
| 1316 } 1317 1318 bool is_fill = !mshr->isForward && 1319 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp); 1320 1321 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 1322 1323 if (is_fill && !is_error) { 1324 DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n", 1325 pkt->getAddr()); 1326 1327 blk = handleFill(pkt, blk, writebacks, mshr->allocOnFill); 1328 assert(blk != NULL); 1329 } 1330 1331 // allow invalidation responses originating from write-line 1332 // requests to be discarded 1333 bool is_invalidate = pkt->isInvalidate(); 1334 1335 // First offset for critical word first calculations 1336 int initial_offset = initial_tgt->pkt->getOffset(blkSize); 1337 1338 while (mshr->hasTargets()) { 1339 MSHR::Target *target = mshr->getTarget(); 1340 Packet *tgt_pkt = target->pkt; 1341 1342 switch (target->source) { 1343 case MSHR::Target::FromCPU: 1344 Tick completion_time; 1345 // Here we charge on completion_time the delay of the xbar if the 1346 // packet comes from it, charged on headerDelay. 1347 completion_time = pkt->headerDelay; 1348 1349 // Software prefetch handling for cache closest to core 1350 if (tgt_pkt->cmd.isSWPrefetch()) { 1351 // a software prefetch would have already been ack'd immediately 1352 // with dummy data so the core would be able to retire it. 1353 // this request completes right here, so we deallocate it. 1354 delete tgt_pkt->req; 1355 delete tgt_pkt; 1356 break; // skip response 1357 } 1358 1359 // unlike the other packet flows, where data is found in other 1360 // caches or memory and brought back, write-line requests always 1361 // have the data right away, so the above check for "is fill?" 1362 // cannot actually be determined until examining the stored MSHR 1363 // state. We "catch up" with that logic here, which is duplicated 1364 // from above. 1365 if (tgt_pkt->cmd == MemCmd::WriteLineReq) { 1366 assert(!is_error);
|
1338 // we got the block in exclusive state, so promote any 1339 // deferred targets if possible 1340 mshr->promoteExclusive();
| 1367 // we got the block in a writable state, so promote 1368 // any deferred targets if possible 1369 mshr->promoteWritable();
|
1341 // NB: we use the original packet here and not the response! 1342 blk = handleFill(tgt_pkt, blk, writebacks, mshr->allocOnFill); 1343 assert(blk != NULL); 1344 1345 // treat as a fill, and discard the invalidation 1346 // response 1347 is_fill = true; 1348 is_invalidate = false; 1349 } 1350 1351 if (is_fill) { 1352 satisfyCpuSideRequest(tgt_pkt, blk, 1353 true, mshr->hasPostDowngrade()); 1354 1355 // How many bytes past the first request is this one 1356 int transfer_offset = 1357 tgt_pkt->getOffset(blkSize) - initial_offset; 1358 if (transfer_offset < 0) { 1359 transfer_offset += blkSize; 1360 } 1361 1362 // If not critical word (offset) return payloadDelay. 1363 // responseLatency is the latency of the return path 1364 // from lower level caches/memory to an upper level cache or 1365 // the core. 1366 completion_time += clockEdge(responseLatency) + 1367 (transfer_offset ? pkt->payloadDelay : 0); 1368 1369 assert(!tgt_pkt->req->isUncacheable()); 1370 1371 assert(tgt_pkt->req->masterId() < system->maxMasters()); 1372 missLatency[tgt_pkt->cmdToIndex()][tgt_pkt->req->masterId()] += 1373 completion_time - target->recvTime; 1374 } else if (pkt->cmd == MemCmd::UpgradeFailResp) { 1375 // failed StoreCond upgrade 1376 assert(tgt_pkt->cmd == MemCmd::StoreCondReq || 1377 tgt_pkt->cmd == MemCmd::StoreCondFailReq || 1378 tgt_pkt->cmd == MemCmd::SCUpgradeFailReq); 1379 // responseLatency is the latency of the return path 1380 // from lower level caches/memory to an upper level cache or 1381 // the core. 1382 completion_time += clockEdge(responseLatency) + 1383 pkt->payloadDelay; 1384 tgt_pkt->req->setExtraData(0); 1385 } else { 1386 // not a cache fill, just forwarding response 1387 // responseLatency is the latency of the return path 1388 // from lower level cahces/memory to the core. 1389 completion_time += clockEdge(responseLatency) + 1390 pkt->payloadDelay; 1391 if (pkt->isRead() && !is_error) { 1392 // sanity check 1393 assert(pkt->getAddr() == tgt_pkt->getAddr()); 1394 assert(pkt->getSize() >= tgt_pkt->getSize()); 1395 1396 tgt_pkt->setData(pkt->getConstPtr<uint8_t>()); 1397 } 1398 } 1399 tgt_pkt->makeTimingResponse(); 1400 // if this packet is an error copy that to the new packet 1401 if (is_error) 1402 tgt_pkt->copyError(pkt); 1403 if (tgt_pkt->cmd == MemCmd::ReadResp && 1404 (is_invalidate || mshr->hasPostInvalidate())) { 1405 // If intermediate cache got ReadRespWithInvalidate, 1406 // propagate that. Response should not have 1407 // isInvalidate() set otherwise. 1408 tgt_pkt->cmd = MemCmd::ReadRespWithInvalidate; 1409 DPRINTF(Cache, "%s updated cmd to %s for addr %#llx\n", 1410 __func__, tgt_pkt->cmdString(), tgt_pkt->getAddr()); 1411 } 1412 // Reset the bus additional time as it is now accounted for 1413 tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0; 1414 cpuSidePort->schedTimingResp(tgt_pkt, completion_time, true); 1415 break; 1416 1417 case MSHR::Target::FromPrefetcher: 1418 assert(tgt_pkt->cmd == MemCmd::HardPFReq); 1419 if (blk) 1420 blk->status |= BlkHWPrefetched; 1421 delete tgt_pkt->req; 1422 delete tgt_pkt; 1423 break; 1424 1425 case MSHR::Target::FromSnoop: 1426 // I don't believe that a snoop can be in an error state 1427 assert(!is_error); 1428 // response to snoop request 1429 DPRINTF(Cache, "processing deferred snoop...\n"); 1430 assert(!(is_invalidate && !mshr->hasPostInvalidate())); 1431 handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate()); 1432 break; 1433 1434 default: 1435 panic("Illegal target->source enum %d\n", target->source); 1436 } 1437 1438 mshr->popTarget(); 1439 } 1440 1441 if (blk && blk->isValid()) { 1442 // an invalidate response stemming from a write line request 1443 // should not invalidate the block, so check if the 1444 // invalidation should be discarded 1445 if (is_invalidate || mshr->hasPostInvalidate()) { 1446 invalidateBlock(blk); 1447 } else if (mshr->hasPostDowngrade()) { 1448 blk->status &= ~BlkWritable; 1449 } 1450 } 1451 1452 if (mshr->promoteDeferredTargets()) { 1453 // avoid later read getting stale data while write miss is 1454 // outstanding.. see comment in timingAccess() 1455 if (blk) { 1456 blk->status &= ~BlkReadable; 1457 } 1458 mq = mshr->queue; 1459 mq->markPending(mshr); 1460 schedMemSideSendEvent(clockEdge() + pkt->payloadDelay); 1461 } else { 1462 mq->deallocate(mshr); 1463 if (wasFull && !mq->isFull()) { 1464 clearBlocked((BlockedCause)mq->index); 1465 } 1466 1467 // Request the bus for a prefetch if this deallocation freed enough 1468 // MSHRs for a prefetch to take place 1469 if (prefetcher && mq == &mshrQueue && mshrQueue.canPrefetch()) { 1470 Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(), 1471 clockEdge()); 1472 if (next_pf_time != MaxTick) 1473 schedMemSideSendEvent(next_pf_time); 1474 } 1475 } 1476 // reset the xbar additional timinig as it is now accounted for 1477 pkt->headerDelay = pkt->payloadDelay = 0; 1478 1479 // copy writebacks to write buffer 1480 doWritebacks(writebacks, forward_time); 1481 1482 // if we used temp block, check to see if its valid and then clear it out 1483 if (blk == tempBlock && tempBlock->isValid()) { 1484 // We use forwardLatency here because we are copying 1485 // Writebacks/CleanEvicts to write buffer. It specifies the latency to 1486 // allocate an internal buffer and to schedule an event to the 1487 // queued port. 1488 if (blk->isDirty() || writebackClean) { 1489 PacketPtr wbPkt = writebackBlk(blk); 1490 allocateWriteBuffer(wbPkt, forward_time); 1491 // Set BLOCK_CACHED flag if cached above. 1492 if (isCachedAbove(wbPkt)) 1493 wbPkt->setBlockCached(); 1494 } else { 1495 PacketPtr wcPkt = cleanEvictBlk(blk); 1496 // Check to see if block is cached above. If not allocate 1497 // write buffer 1498 if (isCachedAbove(wcPkt)) 1499 delete wcPkt; 1500 else 1501 allocateWriteBuffer(wcPkt, forward_time); 1502 } 1503 blk->invalidate(); 1504 } 1505 1506 DPRINTF(Cache, "Leaving %s with %s for addr %#llx\n", __func__, 1507 pkt->cmdString(), pkt->getAddr()); 1508 delete pkt; 1509} 1510 1511PacketPtr 1512Cache::writebackBlk(CacheBlk *blk) 1513{ 1514 chatty_assert(!isReadOnly || writebackClean, 1515 "Writeback from read-only cache"); 1516 assert(blk && blk->isValid() && (blk->isDirty() || writebackClean)); 1517 1518 writebacks[Request::wbMasterId]++; 1519 1520 Request *req = new Request(tags->regenerateBlkAddr(blk->tag, blk->set), 1521 blkSize, 0, Request::wbMasterId); 1522 if (blk->isSecure()) 1523 req->setFlags(Request::SECURE); 1524 1525 req->taskId(blk->task_id); 1526 blk->task_id= ContextSwitchTaskId::Unknown; 1527 blk->tickInserted = curTick(); 1528 1529 PacketPtr pkt = 1530 new Packet(req, blk->isDirty() ? 1531 MemCmd::WritebackDirty : MemCmd::WritebackClean); 1532 1533 DPRINTF(Cache, "Create Writeback %#llx writable: %d, dirty: %d\n", 1534 pkt->getAddr(), blk->isWritable(), blk->isDirty()); 1535 1536 if (blk->isWritable()) { 1537 // not asserting shared means we pass the block in modified 1538 // state, mark our own block non-writeable 1539 blk->status &= ~BlkWritable; 1540 } else {
| 1370 // NB: we use the original packet here and not the response! 1371 blk = handleFill(tgt_pkt, blk, writebacks, mshr->allocOnFill); 1372 assert(blk != NULL); 1373 1374 // treat as a fill, and discard the invalidation 1375 // response 1376 is_fill = true; 1377 is_invalidate = false; 1378 } 1379 1380 if (is_fill) { 1381 satisfyCpuSideRequest(tgt_pkt, blk, 1382 true, mshr->hasPostDowngrade()); 1383 1384 // How many bytes past the first request is this one 1385 int transfer_offset = 1386 tgt_pkt->getOffset(blkSize) - initial_offset; 1387 if (transfer_offset < 0) { 1388 transfer_offset += blkSize; 1389 } 1390 1391 // If not critical word (offset) return payloadDelay. 1392 // responseLatency is the latency of the return path 1393 // from lower level caches/memory to an upper level cache or 1394 // the core. 1395 completion_time += clockEdge(responseLatency) + 1396 (transfer_offset ? pkt->payloadDelay : 0); 1397 1398 assert(!tgt_pkt->req->isUncacheable()); 1399 1400 assert(tgt_pkt->req->masterId() < system->maxMasters()); 1401 missLatency[tgt_pkt->cmdToIndex()][tgt_pkt->req->masterId()] += 1402 completion_time - target->recvTime; 1403 } else if (pkt->cmd == MemCmd::UpgradeFailResp) { 1404 // failed StoreCond upgrade 1405 assert(tgt_pkt->cmd == MemCmd::StoreCondReq || 1406 tgt_pkt->cmd == MemCmd::StoreCondFailReq || 1407 tgt_pkt->cmd == MemCmd::SCUpgradeFailReq); 1408 // responseLatency is the latency of the return path 1409 // from lower level caches/memory to an upper level cache or 1410 // the core. 1411 completion_time += clockEdge(responseLatency) + 1412 pkt->payloadDelay; 1413 tgt_pkt->req->setExtraData(0); 1414 } else { 1415 // not a cache fill, just forwarding response 1416 // responseLatency is the latency of the return path 1417 // from lower level cahces/memory to the core. 1418 completion_time += clockEdge(responseLatency) + 1419 pkt->payloadDelay; 1420 if (pkt->isRead() && !is_error) { 1421 // sanity check 1422 assert(pkt->getAddr() == tgt_pkt->getAddr()); 1423 assert(pkt->getSize() >= tgt_pkt->getSize()); 1424 1425 tgt_pkt->setData(pkt->getConstPtr<uint8_t>()); 1426 } 1427 } 1428 tgt_pkt->makeTimingResponse(); 1429 // if this packet is an error copy that to the new packet 1430 if (is_error) 1431 tgt_pkt->copyError(pkt); 1432 if (tgt_pkt->cmd == MemCmd::ReadResp && 1433 (is_invalidate || mshr->hasPostInvalidate())) { 1434 // If intermediate cache got ReadRespWithInvalidate, 1435 // propagate that. Response should not have 1436 // isInvalidate() set otherwise. 1437 tgt_pkt->cmd = MemCmd::ReadRespWithInvalidate; 1438 DPRINTF(Cache, "%s updated cmd to %s for addr %#llx\n", 1439 __func__, tgt_pkt->cmdString(), tgt_pkt->getAddr()); 1440 } 1441 // Reset the bus additional time as it is now accounted for 1442 tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0; 1443 cpuSidePort->schedTimingResp(tgt_pkt, completion_time, true); 1444 break; 1445 1446 case MSHR::Target::FromPrefetcher: 1447 assert(tgt_pkt->cmd == MemCmd::HardPFReq); 1448 if (blk) 1449 blk->status |= BlkHWPrefetched; 1450 delete tgt_pkt->req; 1451 delete tgt_pkt; 1452 break; 1453 1454 case MSHR::Target::FromSnoop: 1455 // I don't believe that a snoop can be in an error state 1456 assert(!is_error); 1457 // response to snoop request 1458 DPRINTF(Cache, "processing deferred snoop...\n"); 1459 assert(!(is_invalidate && !mshr->hasPostInvalidate())); 1460 handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate()); 1461 break; 1462 1463 default: 1464 panic("Illegal target->source enum %d\n", target->source); 1465 } 1466 1467 mshr->popTarget(); 1468 } 1469 1470 if (blk && blk->isValid()) { 1471 // an invalidate response stemming from a write line request 1472 // should not invalidate the block, so check if the 1473 // invalidation should be discarded 1474 if (is_invalidate || mshr->hasPostInvalidate()) { 1475 invalidateBlock(blk); 1476 } else if (mshr->hasPostDowngrade()) { 1477 blk->status &= ~BlkWritable; 1478 } 1479 } 1480 1481 if (mshr->promoteDeferredTargets()) { 1482 // avoid later read getting stale data while write miss is 1483 // outstanding.. see comment in timingAccess() 1484 if (blk) { 1485 blk->status &= ~BlkReadable; 1486 } 1487 mq = mshr->queue; 1488 mq->markPending(mshr); 1489 schedMemSideSendEvent(clockEdge() + pkt->payloadDelay); 1490 } else { 1491 mq->deallocate(mshr); 1492 if (wasFull && !mq->isFull()) { 1493 clearBlocked((BlockedCause)mq->index); 1494 } 1495 1496 // Request the bus for a prefetch if this deallocation freed enough 1497 // MSHRs for a prefetch to take place 1498 if (prefetcher && mq == &mshrQueue && mshrQueue.canPrefetch()) { 1499 Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(), 1500 clockEdge()); 1501 if (next_pf_time != MaxTick) 1502 schedMemSideSendEvent(next_pf_time); 1503 } 1504 } 1505 // reset the xbar additional timinig as it is now accounted for 1506 pkt->headerDelay = pkt->payloadDelay = 0; 1507 1508 // copy writebacks to write buffer 1509 doWritebacks(writebacks, forward_time); 1510 1511 // if we used temp block, check to see if its valid and then clear it out 1512 if (blk == tempBlock && tempBlock->isValid()) { 1513 // We use forwardLatency here because we are copying 1514 // Writebacks/CleanEvicts to write buffer. It specifies the latency to 1515 // allocate an internal buffer and to schedule an event to the 1516 // queued port. 1517 if (blk->isDirty() || writebackClean) { 1518 PacketPtr wbPkt = writebackBlk(blk); 1519 allocateWriteBuffer(wbPkt, forward_time); 1520 // Set BLOCK_CACHED flag if cached above. 1521 if (isCachedAbove(wbPkt)) 1522 wbPkt->setBlockCached(); 1523 } else { 1524 PacketPtr wcPkt = cleanEvictBlk(blk); 1525 // Check to see if block is cached above. If not allocate 1526 // write buffer 1527 if (isCachedAbove(wcPkt)) 1528 delete wcPkt; 1529 else 1530 allocateWriteBuffer(wcPkt, forward_time); 1531 } 1532 blk->invalidate(); 1533 } 1534 1535 DPRINTF(Cache, "Leaving %s with %s for addr %#llx\n", __func__, 1536 pkt->cmdString(), pkt->getAddr()); 1537 delete pkt; 1538} 1539 1540PacketPtr 1541Cache::writebackBlk(CacheBlk *blk) 1542{ 1543 chatty_assert(!isReadOnly || writebackClean, 1544 "Writeback from read-only cache"); 1545 assert(blk && blk->isValid() && (blk->isDirty() || writebackClean)); 1546 1547 writebacks[Request::wbMasterId]++; 1548 1549 Request *req = new Request(tags->regenerateBlkAddr(blk->tag, blk->set), 1550 blkSize, 0, Request::wbMasterId); 1551 if (blk->isSecure()) 1552 req->setFlags(Request::SECURE); 1553 1554 req->taskId(blk->task_id); 1555 blk->task_id= ContextSwitchTaskId::Unknown; 1556 blk->tickInserted = curTick(); 1557 1558 PacketPtr pkt = 1559 new Packet(req, blk->isDirty() ? 1560 MemCmd::WritebackDirty : MemCmd::WritebackClean); 1561 1562 DPRINTF(Cache, "Create Writeback %#llx writable: %d, dirty: %d\n", 1563 pkt->getAddr(), blk->isWritable(), blk->isDirty()); 1564 1565 if (blk->isWritable()) { 1566 // not asserting shared means we pass the block in modified 1567 // state, mark our own block non-writeable 1568 blk->status &= ~BlkWritable; 1569 } else {
|
1541 // we are in the owned state, tell the receiver 1542 pkt->assertShared();
| 1570 // we are in the Owned state, tell the receiver 1571 pkt->setHasSharers();
|
1543 } 1544 1545 // make sure the block is not marked dirty 1546 blk->status &= ~BlkDirty; 1547 1548 pkt->allocate(); 1549 std::memcpy(pkt->getPtr<uint8_t>(), blk->data, blkSize); 1550 1551 return pkt; 1552} 1553 1554PacketPtr 1555Cache::cleanEvictBlk(CacheBlk *blk) 1556{ 1557 assert(!writebackClean); 1558 assert(blk && blk->isValid() && !blk->isDirty()); 1559 // Creating a zero sized write, a message to the snoop filter 1560 Request *req = 1561 new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0, 1562 Request::wbMasterId); 1563 if (blk->isSecure()) 1564 req->setFlags(Request::SECURE); 1565 1566 req->taskId(blk->task_id); 1567 blk->task_id = ContextSwitchTaskId::Unknown; 1568 blk->tickInserted = curTick(); 1569 1570 PacketPtr pkt = new Packet(req, MemCmd::CleanEvict); 1571 pkt->allocate(); 1572 DPRINTF(Cache, "%s%s %x Create CleanEvict\n", pkt->cmdString(), 1573 pkt->req->isInstFetch() ? " (ifetch)" : "", 1574 pkt->getAddr()); 1575 1576 return pkt; 1577} 1578 1579void 1580Cache::memWriteback() 1581{ 1582 CacheBlkVisitorWrapper visitor(*this, &Cache::writebackVisitor); 1583 tags->forEachBlk(visitor); 1584} 1585 1586void 1587Cache::memInvalidate() 1588{ 1589 CacheBlkVisitorWrapper visitor(*this, &Cache::invalidateVisitor); 1590 tags->forEachBlk(visitor); 1591} 1592 1593bool 1594Cache::isDirty() const 1595{ 1596 CacheBlkIsDirtyVisitor visitor; 1597 tags->forEachBlk(visitor); 1598 1599 return visitor.isDirty(); 1600} 1601 1602bool 1603Cache::writebackVisitor(CacheBlk &blk) 1604{ 1605 if (blk.isDirty()) { 1606 assert(blk.isValid()); 1607 1608 Request request(tags->regenerateBlkAddr(blk.tag, blk.set), 1609 blkSize, 0, Request::funcMasterId); 1610 request.taskId(blk.task_id); 1611 1612 Packet packet(&request, MemCmd::WriteReq); 1613 packet.dataStatic(blk.data); 1614 1615 memSidePort->sendFunctional(&packet); 1616 1617 blk.status &= ~BlkDirty; 1618 } 1619 1620 return true; 1621} 1622 1623bool 1624Cache::invalidateVisitor(CacheBlk &blk) 1625{ 1626 1627 if (blk.isDirty()) 1628 warn_once("Invalidating dirty cache lines. Expect things to break.\n"); 1629 1630 if (blk.isValid()) { 1631 assert(!blk.isDirty()); 1632 tags->invalidate(&blk); 1633 blk.invalidate(); 1634 } 1635 1636 return true; 1637} 1638 1639CacheBlk* 1640Cache::allocateBlock(Addr addr, bool is_secure, PacketList &writebacks) 1641{ 1642 CacheBlk *blk = tags->findVictim(addr); 1643 1644 // It is valid to return NULL if there is no victim 1645 if (!blk) 1646 return nullptr; 1647 1648 if (blk->isValid()) { 1649 Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set); 1650 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure()); 1651 if (repl_mshr) { 1652 // must be an outstanding upgrade request 1653 // on a block we're about to replace... 1654 assert(!blk->isWritable() || blk->isDirty());
| 1572 } 1573 1574 // make sure the block is not marked dirty 1575 blk->status &= ~BlkDirty; 1576 1577 pkt->allocate(); 1578 std::memcpy(pkt->getPtr<uint8_t>(), blk->data, blkSize); 1579 1580 return pkt; 1581} 1582 1583PacketPtr 1584Cache::cleanEvictBlk(CacheBlk *blk) 1585{ 1586 assert(!writebackClean); 1587 assert(blk && blk->isValid() && !blk->isDirty()); 1588 // Creating a zero sized write, a message to the snoop filter 1589 Request *req = 1590 new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0, 1591 Request::wbMasterId); 1592 if (blk->isSecure()) 1593 req->setFlags(Request::SECURE); 1594 1595 req->taskId(blk->task_id); 1596 blk->task_id = ContextSwitchTaskId::Unknown; 1597 blk->tickInserted = curTick(); 1598 1599 PacketPtr pkt = new Packet(req, MemCmd::CleanEvict); 1600 pkt->allocate(); 1601 DPRINTF(Cache, "%s%s %x Create CleanEvict\n", pkt->cmdString(), 1602 pkt->req->isInstFetch() ? " (ifetch)" : "", 1603 pkt->getAddr()); 1604 1605 return pkt; 1606} 1607 1608void 1609Cache::memWriteback() 1610{ 1611 CacheBlkVisitorWrapper visitor(*this, &Cache::writebackVisitor); 1612 tags->forEachBlk(visitor); 1613} 1614 1615void 1616Cache::memInvalidate() 1617{ 1618 CacheBlkVisitorWrapper visitor(*this, &Cache::invalidateVisitor); 1619 tags->forEachBlk(visitor); 1620} 1621 1622bool 1623Cache::isDirty() const 1624{ 1625 CacheBlkIsDirtyVisitor visitor; 1626 tags->forEachBlk(visitor); 1627 1628 return visitor.isDirty(); 1629} 1630 1631bool 1632Cache::writebackVisitor(CacheBlk &blk) 1633{ 1634 if (blk.isDirty()) { 1635 assert(blk.isValid()); 1636 1637 Request request(tags->regenerateBlkAddr(blk.tag, blk.set), 1638 blkSize, 0, Request::funcMasterId); 1639 request.taskId(blk.task_id); 1640 1641 Packet packet(&request, MemCmd::WriteReq); 1642 packet.dataStatic(blk.data); 1643 1644 memSidePort->sendFunctional(&packet); 1645 1646 blk.status &= ~BlkDirty; 1647 } 1648 1649 return true; 1650} 1651 1652bool 1653Cache::invalidateVisitor(CacheBlk &blk) 1654{ 1655 1656 if (blk.isDirty()) 1657 warn_once("Invalidating dirty cache lines. Expect things to break.\n"); 1658 1659 if (blk.isValid()) { 1660 assert(!blk.isDirty()); 1661 tags->invalidate(&blk); 1662 blk.invalidate(); 1663 } 1664 1665 return true; 1666} 1667 1668CacheBlk* 1669Cache::allocateBlock(Addr addr, bool is_secure, PacketList &writebacks) 1670{ 1671 CacheBlk *blk = tags->findVictim(addr); 1672 1673 // It is valid to return NULL if there is no victim 1674 if (!blk) 1675 return nullptr; 1676 1677 if (blk->isValid()) { 1678 Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set); 1679 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure()); 1680 if (repl_mshr) { 1681 // must be an outstanding upgrade request 1682 // on a block we're about to replace... 1683 assert(!blk->isWritable() || blk->isDirty());
|
1655 assert(repl_mshr->needsExclusive());
| 1684 assert(repl_mshr->needsWritable());
|
1656 // too hard to replace block with transient state 1657 // allocation failed, block not inserted 1658 return NULL; 1659 } else { 1660 DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx (%s): %s\n", 1661 repl_addr, blk->isSecure() ? "s" : "ns", 1662 addr, is_secure ? "s" : "ns", 1663 blk->isDirty() ? "writeback" : "clean"); 1664 1665 // Will send up Writeback/CleanEvict snoops via isCachedAbove 1666 // when pushing this writeback list into the write buffer. 1667 if (blk->isDirty() || writebackClean) { 1668 // Save writeback packet for handling by caller 1669 writebacks.push_back(writebackBlk(blk)); 1670 } else { 1671 writebacks.push_back(cleanEvictBlk(blk)); 1672 } 1673 } 1674 } 1675 1676 return blk; 1677} 1678 1679void 1680Cache::invalidateBlock(CacheBlk *blk) 1681{ 1682 if (blk != tempBlock) 1683 tags->invalidate(blk); 1684 blk->invalidate(); 1685} 1686 1687// Note that the reason we return a list of writebacks rather than 1688// inserting them directly in the write buffer is that this function 1689// is called by both atomic and timing-mode accesses, and in atomic 1690// mode we don't mess with the write buffer (we just perform the 1691// writebacks atomically once the original request is complete). 1692CacheBlk* 1693Cache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks, 1694 bool allocate) 1695{ 1696 assert(pkt->isResponse() || pkt->cmd == MemCmd::WriteLineReq); 1697 Addr addr = pkt->getAddr(); 1698 bool is_secure = pkt->isSecure(); 1699#if TRACING_ON 1700 CacheBlk::State old_state = blk ? blk->status : 0; 1701#endif 1702 1703 // When handling a fill, discard any CleanEvicts for the 1704 // same address in write buffer. 1705 Addr M5_VAR_USED blk_addr = blockAlign(pkt->getAddr()); 1706 std::vector<MSHR *> M5_VAR_USED wbs; 1707 assert (!writeBuffer.findMatches(blk_addr, is_secure, wbs)); 1708 1709 if (blk == NULL) { 1710 // better have read new data... 1711 assert(pkt->hasData()); 1712 1713 // only read responses and write-line requests have data; 1714 // note that we don't write the data here for write-line - that 1715 // happens in the subsequent satisfyCpuSideRequest. 1716 assert(pkt->isRead() || pkt->cmd == MemCmd::WriteLineReq); 1717 1718 // need to do a replacement if allocating, otherwise we stick 1719 // with the temporary storage 1720 blk = allocate ? allocateBlock(addr, is_secure, writebacks) : NULL; 1721 1722 if (blk == NULL) { 1723 // No replaceable block or a mostly exclusive 1724 // cache... just use temporary storage to complete the 1725 // current request and then get rid of it 1726 assert(!tempBlock->isValid()); 1727 blk = tempBlock; 1728 tempBlock->set = tags->extractSet(addr); 1729 tempBlock->tag = tags->extractTag(addr); 1730 // @todo: set security state as well... 1731 DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr, 1732 is_secure ? "s" : "ns"); 1733 } else { 1734 tags->insertBlock(pkt, blk); 1735 } 1736 1737 // we should never be overwriting a valid block 1738 assert(!blk->isValid()); 1739 } else { 1740 // existing block... probably an upgrade 1741 assert(blk->tag == tags->extractTag(addr)); 1742 // either we're getting new data or the block should already be valid 1743 assert(pkt->hasData() || blk->isValid()); 1744 // don't clear block status... if block is already dirty we 1745 // don't want to lose that 1746 } 1747 1748 if (is_secure) 1749 blk->status |= BlkSecure; 1750 blk->status |= BlkValid | BlkReadable; 1751 1752 // sanity check for whole-line writes, which should always be 1753 // marked as writable as part of the fill, and then later marked 1754 // dirty as part of satisfyCpuSideRequest 1755 if (pkt->cmd == MemCmd::WriteLineReq) {
| 1685 // too hard to replace block with transient state 1686 // allocation failed, block not inserted 1687 return NULL; 1688 } else { 1689 DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx (%s): %s\n", 1690 repl_addr, blk->isSecure() ? "s" : "ns", 1691 addr, is_secure ? "s" : "ns", 1692 blk->isDirty() ? "writeback" : "clean"); 1693 1694 // Will send up Writeback/CleanEvict snoops via isCachedAbove 1695 // when pushing this writeback list into the write buffer. 1696 if (blk->isDirty() || writebackClean) { 1697 // Save writeback packet for handling by caller 1698 writebacks.push_back(writebackBlk(blk)); 1699 } else { 1700 writebacks.push_back(cleanEvictBlk(blk)); 1701 } 1702 } 1703 } 1704 1705 return blk; 1706} 1707 1708void 1709Cache::invalidateBlock(CacheBlk *blk) 1710{ 1711 if (blk != tempBlock) 1712 tags->invalidate(blk); 1713 blk->invalidate(); 1714} 1715 1716// Note that the reason we return a list of writebacks rather than 1717// inserting them directly in the write buffer is that this function 1718// is called by both atomic and timing-mode accesses, and in atomic 1719// mode we don't mess with the write buffer (we just perform the 1720// writebacks atomically once the original request is complete). 1721CacheBlk* 1722Cache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks, 1723 bool allocate) 1724{ 1725 assert(pkt->isResponse() || pkt->cmd == MemCmd::WriteLineReq); 1726 Addr addr = pkt->getAddr(); 1727 bool is_secure = pkt->isSecure(); 1728#if TRACING_ON 1729 CacheBlk::State old_state = blk ? blk->status : 0; 1730#endif 1731 1732 // When handling a fill, discard any CleanEvicts for the 1733 // same address in write buffer. 1734 Addr M5_VAR_USED blk_addr = blockAlign(pkt->getAddr()); 1735 std::vector<MSHR *> M5_VAR_USED wbs; 1736 assert (!writeBuffer.findMatches(blk_addr, is_secure, wbs)); 1737 1738 if (blk == NULL) { 1739 // better have read new data... 1740 assert(pkt->hasData()); 1741 1742 // only read responses and write-line requests have data; 1743 // note that we don't write the data here for write-line - that 1744 // happens in the subsequent satisfyCpuSideRequest. 1745 assert(pkt->isRead() || pkt->cmd == MemCmd::WriteLineReq); 1746 1747 // need to do a replacement if allocating, otherwise we stick 1748 // with the temporary storage 1749 blk = allocate ? allocateBlock(addr, is_secure, writebacks) : NULL; 1750 1751 if (blk == NULL) { 1752 // No replaceable block or a mostly exclusive 1753 // cache... just use temporary storage to complete the 1754 // current request and then get rid of it 1755 assert(!tempBlock->isValid()); 1756 blk = tempBlock; 1757 tempBlock->set = tags->extractSet(addr); 1758 tempBlock->tag = tags->extractTag(addr); 1759 // @todo: set security state as well... 1760 DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr, 1761 is_secure ? "s" : "ns"); 1762 } else { 1763 tags->insertBlock(pkt, blk); 1764 } 1765 1766 // we should never be overwriting a valid block 1767 assert(!blk->isValid()); 1768 } else { 1769 // existing block... probably an upgrade 1770 assert(blk->tag == tags->extractTag(addr)); 1771 // either we're getting new data or the block should already be valid 1772 assert(pkt->hasData() || blk->isValid()); 1773 // don't clear block status... if block is already dirty we 1774 // don't want to lose that 1775 } 1776 1777 if (is_secure) 1778 blk->status |= BlkSecure; 1779 blk->status |= BlkValid | BlkReadable; 1780 1781 // sanity check for whole-line writes, which should always be 1782 // marked as writable as part of the fill, and then later marked 1783 // dirty as part of satisfyCpuSideRequest 1784 if (pkt->cmd == MemCmd::WriteLineReq) {
|
1756 assert(!pkt->sharedAsserted());
| 1785 assert(!pkt->hasSharers());
|
1757 // at the moment other caches do not respond to the 1758 // invalidation requests corresponding to a whole-line write
| 1786 // at the moment other caches do not respond to the 1787 // invalidation requests corresponding to a whole-line write
|
1759 assert(!pkt->memInhibitAsserted());
| 1788 assert(!pkt->cacheResponding());
|
1760 } 1761
| 1789 } 1790
|
1762 if (!pkt->sharedAsserted()) { 1763 // we could get non-shared responses from memory (rather than 1764 // a cache) even in a read-only cache, note that we set this 1765 // bit even for a read-only cache as we use it to represent 1766 // the exclusive state
| 1791 // here we deal with setting the appropriate state of the line, 1792 // and we start by looking at the hasSharers flag, and ignore the 1793 // cacheResponding flag (normally signalling dirty data) if the 1794 // packet has sharers, thus the line is never allocated as Owned 1795 // (dirty but not writable), and always ends up being either 1796 // Shared, Exclusive or Modified, see Packet::setCacheResponding 1797 // for more details 1798 if (!pkt->hasSharers()) { 1799 // we could get a writable line from memory (rather than a 1800 // cache) even in a read-only cache, note that we set this bit 1801 // even for a read-only cache, possibly revisit this decision
|
1767 blk->status |= BlkWritable; 1768
| 1802 blk->status |= BlkWritable; 1803
|
1769 // If we got this via cache-to-cache transfer (i.e., from a 1770 // cache that was an owner) and took away that owner's copy, 1771 // then we need to write it back. Normally this happens 1772 // anyway as a side effect of getting a copy to write it, but 1773 // there are cases (such as failed store conditionals or 1774 // compare-and-swaps) where we'll demand an exclusive copy but 1775 // end up not writing it. 1776 if (pkt->memInhibitAsserted()) {
| 1804 // check if we got this via cache-to-cache transfer (i.e., from a 1805 // cache that had the block in Modified or Owned state) 1806 if (pkt->cacheResponding()) { 1807 // we got the block in Modified state, and invalidated the 1808 // owners copy
|
1777 blk->status |= BlkDirty; 1778 1779 chatty_assert(!isReadOnly, "Should never see dirty snoop response " 1780 "in read-only cache %s\n", name()); 1781 } 1782 } 1783 1784 DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n", 1785 addr, is_secure ? "s" : "ns", old_state, blk->print()); 1786 1787 // if we got new data, copy it in (checking for a read response 1788 // and a response that has data is the same in the end) 1789 if (pkt->isRead()) { 1790 // sanity checks 1791 assert(pkt->hasData()); 1792 assert(pkt->getSize() == blkSize); 1793 1794 std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize); 1795 } 1796 // We pay for fillLatency here. 1797 blk->whenReady = clockEdge() + fillLatency * clockPeriod() + 1798 pkt->payloadDelay; 1799 1800 return blk; 1801} 1802 1803 1804///////////////////////////////////////////////////// 1805// 1806// Snoop path: requests coming in from the memory side 1807// 1808///////////////////////////////////////////////////// 1809 1810void 1811Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data, 1812 bool already_copied, bool pending_inval) 1813{ 1814 // sanity check 1815 assert(req_pkt->isRequest()); 1816 assert(req_pkt->needsResponse()); 1817 1818 DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, 1819 req_pkt->cmdString(), req_pkt->getAddr(), req_pkt->getSize()); 1820 // timing-mode snoop responses require a new packet, unless we 1821 // already made a copy... 1822 PacketPtr pkt = req_pkt; 1823 if (!already_copied) 1824 // do not clear flags, and allocate space for data if the 1825 // packet needs it (the only packets that carry data are read 1826 // responses) 1827 pkt = new Packet(req_pkt, false, req_pkt->isRead()); 1828 1829 assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() ||
| 1809 blk->status |= BlkDirty; 1810 1811 chatty_assert(!isReadOnly, "Should never see dirty snoop response " 1812 "in read-only cache %s\n", name()); 1813 } 1814 } 1815 1816 DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n", 1817 addr, is_secure ? "s" : "ns", old_state, blk->print()); 1818 1819 // if we got new data, copy it in (checking for a read response 1820 // and a response that has data is the same in the end) 1821 if (pkt->isRead()) { 1822 // sanity checks 1823 assert(pkt->hasData()); 1824 assert(pkt->getSize() == blkSize); 1825 1826 std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize); 1827 } 1828 // We pay for fillLatency here. 1829 blk->whenReady = clockEdge() + fillLatency * clockPeriod() + 1830 pkt->payloadDelay; 1831 1832 return blk; 1833} 1834 1835 1836///////////////////////////////////////////////////// 1837// 1838// Snoop path: requests coming in from the memory side 1839// 1840///////////////////////////////////////////////////// 1841 1842void 1843Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data, 1844 bool already_copied, bool pending_inval) 1845{ 1846 // sanity check 1847 assert(req_pkt->isRequest()); 1848 assert(req_pkt->needsResponse()); 1849 1850 DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, 1851 req_pkt->cmdString(), req_pkt->getAddr(), req_pkt->getSize()); 1852 // timing-mode snoop responses require a new packet, unless we 1853 // already made a copy... 1854 PacketPtr pkt = req_pkt; 1855 if (!already_copied) 1856 // do not clear flags, and allocate space for data if the 1857 // packet needs it (the only packets that carry data are read 1858 // responses) 1859 pkt = new Packet(req_pkt, false, req_pkt->isRead()); 1860 1861 assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() ||
|
1830 pkt->sharedAsserted());
| 1862 pkt->hasSharers());
|
1831 pkt->makeTimingResponse(); 1832 if (pkt->isRead()) { 1833 pkt->setDataFromBlock(blk_data, blkSize); 1834 } 1835 if (pkt->cmd == MemCmd::ReadResp && pending_inval) { 1836 // Assume we defer a response to a read from a far-away cache 1837 // A, then later defer a ReadExcl from a cache B on the same
| 1863 pkt->makeTimingResponse(); 1864 if (pkt->isRead()) { 1865 pkt->setDataFromBlock(blk_data, blkSize); 1866 } 1867 if (pkt->cmd == MemCmd::ReadResp && pending_inval) { 1868 // Assume we defer a response to a read from a far-away cache 1869 // A, then later defer a ReadExcl from a cache B on the same
|
1838 // bus as us. We'll assert MemInhibit in both cases, but in 1839 // the latter case MemInhibit will keep the invalidation from 1840 // reaching cache A. This special response tells cache A that 1841 // it gets the block to satisfy its read, but must immediately 1842 // invalidate it.
| 1870 // bus as us. We'll assert cacheResponding in both cases, but 1871 // in the latter case cacheResponding will keep the 1872 // invalidation from reaching cache A. This special response 1873 // tells cache A that it gets the block to satisfy its read, 1874 // but must immediately invalidate it.
|
1843 pkt->cmd = MemCmd::ReadRespWithInvalidate; 1844 } 1845 // Here we consider forward_time, paying for just forward latency and 1846 // also charging the delay provided by the xbar. 1847 // forward_time is used as send_time in next allocateWriteBuffer(). 1848 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 1849 // Here we reset the timing of the packet. 1850 pkt->headerDelay = pkt->payloadDelay = 0; 1851 DPRINTF(Cache, "%s created response: %s addr %#llx size %d tick: %lu\n", 1852 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize(), 1853 forward_time); 1854 memSidePort->schedTimingSnoopResp(pkt, forward_time, true); 1855} 1856 1857uint32_t 1858Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing, 1859 bool is_deferred, bool pending_inval) 1860{ 1861 DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, 1862 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 1863 // deferred snoops can only happen in timing mode 1864 assert(!(is_deferred && !is_timing)); 1865 // pending_inval only makes sense on deferred snoops 1866 assert(!(pending_inval && !is_deferred)); 1867 assert(pkt->isRequest()); 1868 1869 // the packet may get modified if we or a forwarded snooper 1870 // responds in atomic mode, so remember a few things about the 1871 // original packet up front 1872 bool invalidate = pkt->isInvalidate();
| 1875 pkt->cmd = MemCmd::ReadRespWithInvalidate; 1876 } 1877 // Here we consider forward_time, paying for just forward latency and 1878 // also charging the delay provided by the xbar. 1879 // forward_time is used as send_time in next allocateWriteBuffer(). 1880 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay; 1881 // Here we reset the timing of the packet. 1882 pkt->headerDelay = pkt->payloadDelay = 0; 1883 DPRINTF(Cache, "%s created response: %s addr %#llx size %d tick: %lu\n", 1884 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize(), 1885 forward_time); 1886 memSidePort->schedTimingSnoopResp(pkt, forward_time, true); 1887} 1888 1889uint32_t 1890Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing, 1891 bool is_deferred, bool pending_inval) 1892{ 1893 DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, 1894 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 1895 // deferred snoops can only happen in timing mode 1896 assert(!(is_deferred && !is_timing)); 1897 // pending_inval only makes sense on deferred snoops 1898 assert(!(pending_inval && !is_deferred)); 1899 assert(pkt->isRequest()); 1900 1901 // the packet may get modified if we or a forwarded snooper 1902 // responds in atomic mode, so remember a few things about the 1903 // original packet up front 1904 bool invalidate = pkt->isInvalidate();
|
1873 bool M5_VAR_USED needs_exclusive = pkt->needsExclusive();
| 1905 bool M5_VAR_USED needs_writable = pkt->needsWritable();
|
1874 1875 uint32_t snoop_delay = 0; 1876 1877 if (forwardSnoops) { 1878 // first propagate snoop upward to see if anyone above us wants to 1879 // handle it. save & restore packet src since it will get 1880 // rewritten to be relative to cpu-side bus (if any)
| 1906 1907 uint32_t snoop_delay = 0; 1908 1909 if (forwardSnoops) { 1910 // first propagate snoop upward to see if anyone above us wants to 1911 // handle it. save & restore packet src since it will get 1912 // rewritten to be relative to cpu-side bus (if any)
|
1881 bool alreadyResponded = pkt->memInhibitAsserted();
| 1913 bool alreadyResponded = pkt->cacheResponding();
|
1882 if (is_timing) { 1883 // copy the packet so that we can clear any flags before 1884 // forwarding it upwards, we also allocate data (passing 1885 // the pointer along in case of static data), in case 1886 // there is a snoop hit in upper levels 1887 Packet snoopPkt(pkt, true, true); 1888 snoopPkt.setExpressSnoop(); 1889 // the snoop packet does not need to wait any additional 1890 // time 1891 snoopPkt.headerDelay = snoopPkt.payloadDelay = 0; 1892 cpuSidePort->sendTimingSnoopReq(&snoopPkt); 1893 1894 // add the header delay (including crossbar and snoop 1895 // delays) of the upward snoop to the snoop delay for this 1896 // cache 1897 snoop_delay += snoopPkt.headerDelay; 1898
| 1914 if (is_timing) { 1915 // copy the packet so that we can clear any flags before 1916 // forwarding it upwards, we also allocate data (passing 1917 // the pointer along in case of static data), in case 1918 // there is a snoop hit in upper levels 1919 Packet snoopPkt(pkt, true, true); 1920 snoopPkt.setExpressSnoop(); 1921 // the snoop packet does not need to wait any additional 1922 // time 1923 snoopPkt.headerDelay = snoopPkt.payloadDelay = 0; 1924 cpuSidePort->sendTimingSnoopReq(&snoopPkt); 1925 1926 // add the header delay (including crossbar and snoop 1927 // delays) of the upward snoop to the snoop delay for this 1928 // cache 1929 snoop_delay += snoopPkt.headerDelay; 1930
|
1899 if (snoopPkt.memInhibitAsserted()) {
| 1931 if (snoopPkt.cacheResponding()) {
|
1900 // cache-to-cache response from some upper cache 1901 assert(!alreadyResponded);
| 1932 // cache-to-cache response from some upper cache 1933 assert(!alreadyResponded);
|
1902 pkt->assertMemInhibit();
| 1934 pkt->setCacheResponding();
|
1903 }
| 1935 }
|
1904 if (snoopPkt.sharedAsserted()) { 1905 pkt->assertShared();
| 1936 // upstream cache has the block, or has an outstanding 1937 // MSHR, pass the flag on 1938 if (snoopPkt.hasSharers()) { 1939 pkt->setHasSharers();
|
1906 } 1907 // If this request is a prefetch or clean evict and an upper level 1908 // signals block present, make sure to propagate the block 1909 // presence to the requester. 1910 if (snoopPkt.isBlockCached()) { 1911 pkt->setBlockCached(); 1912 } 1913 } else { 1914 cpuSidePort->sendAtomicSnoop(pkt);
| 1940 } 1941 // If this request is a prefetch or clean evict and an upper level 1942 // signals block present, make sure to propagate the block 1943 // presence to the requester. 1944 if (snoopPkt.isBlockCached()) { 1945 pkt->setBlockCached(); 1946 } 1947 } else { 1948 cpuSidePort->sendAtomicSnoop(pkt);
|
1915 if (!alreadyResponded && pkt->memInhibitAsserted()) {
| 1949 if (!alreadyResponded && pkt->cacheResponding()) {
|
1916 // cache-to-cache response from some upper cache: 1917 // forward response to original requester 1918 assert(pkt->isResponse()); 1919 } 1920 } 1921 } 1922 1923 if (!blk || !blk->isValid()) { 1924 DPRINTF(Cache, "%s snoop miss for %s addr %#llx size %d\n", 1925 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 1926 return snoop_delay; 1927 } else { 1928 DPRINTF(Cache, "%s snoop hit for %s for addr %#llx size %d, " 1929 "old state is %s\n", __func__, pkt->cmdString(), 1930 pkt->getAddr(), pkt->getSize(), blk->print()); 1931 } 1932 1933 chatty_assert(!(isReadOnly && blk->isDirty()), 1934 "Should never have a dirty block in a read-only cache %s\n", 1935 name()); 1936 1937 // We may end up modifying both the block state and the packet (if 1938 // we respond in atomic mode), so just figure out what to do now 1939 // and then do it later. If we find dirty data while snooping for 1940 // an invalidate, we don't need to send a response. The 1941 // invalidation itself is taken care of below. 1942 bool respond = blk->isDirty() && pkt->needsResponse() && 1943 pkt->cmd != MemCmd::InvalidateReq;
| 1950 // cache-to-cache response from some upper cache: 1951 // forward response to original requester 1952 assert(pkt->isResponse()); 1953 } 1954 } 1955 } 1956 1957 if (!blk || !blk->isValid()) { 1958 DPRINTF(Cache, "%s snoop miss for %s addr %#llx size %d\n", 1959 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 1960 return snoop_delay; 1961 } else { 1962 DPRINTF(Cache, "%s snoop hit for %s for addr %#llx size %d, " 1963 "old state is %s\n", __func__, pkt->cmdString(), 1964 pkt->getAddr(), pkt->getSize(), blk->print()); 1965 } 1966 1967 chatty_assert(!(isReadOnly && blk->isDirty()), 1968 "Should never have a dirty block in a read-only cache %s\n", 1969 name()); 1970 1971 // We may end up modifying both the block state and the packet (if 1972 // we respond in atomic mode), so just figure out what to do now 1973 // and then do it later. If we find dirty data while snooping for 1974 // an invalidate, we don't need to send a response. The 1975 // invalidation itself is taken care of below. 1976 bool respond = blk->isDirty() && pkt->needsResponse() && 1977 pkt->cmd != MemCmd::InvalidateReq;
|
1944 bool have_exclusive = blk->isWritable();
| 1978 bool have_writable = blk->isWritable();
|
1945 1946 // Invalidate any prefetch's from below that would strip write permissions 1947 // MemCmd::HardPFReq is only observed by upstream caches. After missing 1948 // above and in it's own cache, a new MemCmd::ReadReq is created that 1949 // downstream caches observe. 1950 if (pkt->mustCheckAbove()) { 1951 DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s from" 1952 " lower cache\n", pkt->getAddr(), pkt->cmdString()); 1953 pkt->setBlockCached(); 1954 return snoop_delay; 1955 } 1956 1957 if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) {
| 1979 1980 // Invalidate any prefetch's from below that would strip write permissions 1981 // MemCmd::HardPFReq is only observed by upstream caches. After missing 1982 // above and in it's own cache, a new MemCmd::ReadReq is created that 1983 // downstream caches observe. 1984 if (pkt->mustCheckAbove()) { 1985 DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s from" 1986 " lower cache\n", pkt->getAddr(), pkt->cmdString()); 1987 pkt->setBlockCached(); 1988 return snoop_delay; 1989 } 1990 1991 if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) {
|
1958 // reading non-exclusive shared data, note that we retain 1959 // the block in owned state if it is dirty, with the response 1960 // taken care of below, and otherwhise simply downgrade to 1961 // shared 1962 assert(!needs_exclusive); 1963 pkt->assertShared();
| 1992 // reading without requiring the line in a writable state, 1993 // note that we retain the block as Owned if it is Modified 1994 // (dirty data), with the response taken care of below, and 1995 // otherwhise simply downgrade from Exclusive to Shared (or 1996 // remain in Shared) 1997 assert(!needs_writable); 1998 pkt->setHasSharers();
|
1964 blk->status &= ~BlkWritable; 1965 } 1966 1967 if (respond) { 1968 // prevent anyone else from responding, cache as well as 1969 // memory, and also prevent any memory from even seeing the
| 1999 blk->status &= ~BlkWritable; 2000 } 2001 2002 if (respond) { 2003 // prevent anyone else from responding, cache as well as 2004 // memory, and also prevent any memory from even seeing the
|
1970 // request (with current inhibited semantics), note that this 1971 // applies both to reads and writes and that for writes it 1972 // works thanks to the fact that we still have dirty data and 1973 // will write it back at a later point 1974 assert(!pkt->memInhibitAsserted()); 1975 pkt->assertMemInhibit(); 1976 if (have_exclusive) {
| 2005 // request 2006 pkt->setCacheResponding(); 2007 if (have_writable) { 2008 // inform the cache hierarchy that this cache had the line 2009 // in the Modified state so that we avoid unnecessary 2010 // invalidations (see Packet::setResponderHadWritable) 2011 pkt->setResponderHadWritable(); 2012
|
1977 // in the case of an uncacheable request there is no point
| 2013 // in the case of an uncacheable request there is no point
|
1978 // in setting the exclusive flag, but since the recipient 1979 // does not care there is no harm in doing so, in any case 1980 // it is just a hint 1981 pkt->setSupplyExclusive();
| 2014 // in setting the responderHadWritable flag, but since the 2015 // recipient does not care there is no harm in doing so 2016 } else { 2017 // if the packet has needsWritable set we invalidate our 2018 // copy below and all other copies will be invalidates 2019 // through express snoops, and if needsWritable is not set 2020 // we already called setHasSharers above
|
1982 }
| 2021 }
|
| 2022
|
1983 if (is_timing) { 1984 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval); 1985 } else { 1986 pkt->makeAtomicResponse(); 1987 pkt->setDataFromBlock(blk->data, blkSize); 1988 } 1989 } 1990 1991 if (!respond && is_timing && is_deferred) { 1992 // if it's a deferred timing snoop to which we are not 1993 // responding, then we've made a copy of both the request and 1994 // the packet, delete them here 1995 assert(pkt->needsResponse()); 1996 delete pkt->req; 1997 delete pkt; 1998 } 1999 2000 // Do this last in case it deallocates block data or something 2001 // like that 2002 if (invalidate) { 2003 invalidateBlock(blk); 2004 } 2005 2006 DPRINTF(Cache, "new state is %s\n", blk->print()); 2007 2008 return snoop_delay; 2009} 2010 2011 2012void 2013Cache::recvTimingSnoopReq(PacketPtr pkt) 2014{ 2015 DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, 2016 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 2017 2018 // Snoops shouldn't happen when bypassing caches 2019 assert(!system->bypassCaches()); 2020 2021 // no need to snoop requests that are not in range 2022 if (!inRange(pkt->getAddr())) { 2023 return; 2024 } 2025 2026 bool is_secure = pkt->isSecure(); 2027 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 2028 2029 Addr blk_addr = blockAlign(pkt->getAddr()); 2030 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 2031 2032 // Update the latency cost of the snoop so that the crossbar can 2033 // account for it. Do not overwrite what other neighbouring caches 2034 // have already done, rather take the maximum. The update is 2035 // tentative, for cases where we return before an upward snoop 2036 // happens below. 2037 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, 2038 lookupLatency * clockPeriod()); 2039 2040 // Inform request(Prefetch, CleanEvict or Writeback) from below of 2041 // MSHR hit, set setBlockCached. 2042 if (mshr && pkt->mustCheckAbove()) { 2043 DPRINTF(Cache, "Setting block cached for %s from" 2044 "lower cache on mshr hit %#x\n", 2045 pkt->cmdString(), pkt->getAddr()); 2046 pkt->setBlockCached(); 2047 return; 2048 } 2049 2050 // Let the MSHR itself track the snoop and decide whether we want 2051 // to go ahead and do the regular cache snoop 2052 if (mshr && mshr->handleSnoop(pkt, order++)) { 2053 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)." 2054 "mshrs: %s\n", blk_addr, is_secure ? "s" : "ns", 2055 mshr->print()); 2056 2057 if (mshr->getNumTargets() > numTarget) 2058 warn("allocating bonus target for snoop"); //handle later 2059 return; 2060 } 2061 2062 //We also need to check the writeback buffers and handle those 2063 std::vector<MSHR *> writebacks; 2064 if (writeBuffer.findMatches(blk_addr, is_secure, writebacks)) { 2065 DPRINTF(Cache, "Snoop hit in writeback to addr %#llx (%s)\n", 2066 pkt->getAddr(), is_secure ? "s" : "ns"); 2067 2068 // Look through writebacks for any cachable writes. 2069 // We should only ever find a single match 2070 assert(writebacks.size() == 1); 2071 MSHR *wb_entry = writebacks[0]; 2072 // Expect to see only Writebacks and/or CleanEvicts here, both of 2073 // which should not be generated for uncacheable data. 2074 assert(!wb_entry->isUncacheable()); 2075 // There should only be a single request responsible for generating 2076 // Writebacks/CleanEvicts. 2077 assert(wb_entry->getNumTargets() == 1); 2078 PacketPtr wb_pkt = wb_entry->getTarget()->pkt; 2079 assert(wb_pkt->isEviction()); 2080 2081 if (pkt->isEviction()) { 2082 // if the block is found in the write queue, set the BLOCK_CACHED 2083 // flag for Writeback/CleanEvict snoop. On return the snoop will 2084 // propagate the BLOCK_CACHED flag in Writeback packets and prevent 2085 // any CleanEvicts from travelling down the memory hierarchy. 2086 pkt->setBlockCached(); 2087 DPRINTF(Cache, "Squashing %s from lower cache on writequeue hit" 2088 " %#x\n", pkt->cmdString(), pkt->getAddr()); 2089 return; 2090 } 2091 2092 if (wb_pkt->cmd == MemCmd::WritebackDirty) {
| 2023 if (is_timing) { 2024 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval); 2025 } else { 2026 pkt->makeAtomicResponse(); 2027 pkt->setDataFromBlock(blk->data, blkSize); 2028 } 2029 } 2030 2031 if (!respond && is_timing && is_deferred) { 2032 // if it's a deferred timing snoop to which we are not 2033 // responding, then we've made a copy of both the request and 2034 // the packet, delete them here 2035 assert(pkt->needsResponse()); 2036 delete pkt->req; 2037 delete pkt; 2038 } 2039 2040 // Do this last in case it deallocates block data or something 2041 // like that 2042 if (invalidate) { 2043 invalidateBlock(blk); 2044 } 2045 2046 DPRINTF(Cache, "new state is %s\n", blk->print()); 2047 2048 return snoop_delay; 2049} 2050 2051 2052void 2053Cache::recvTimingSnoopReq(PacketPtr pkt) 2054{ 2055 DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, 2056 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 2057 2058 // Snoops shouldn't happen when bypassing caches 2059 assert(!system->bypassCaches()); 2060 2061 // no need to snoop requests that are not in range 2062 if (!inRange(pkt->getAddr())) { 2063 return; 2064 } 2065 2066 bool is_secure = pkt->isSecure(); 2067 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure); 2068 2069 Addr blk_addr = blockAlign(pkt->getAddr()); 2070 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); 2071 2072 // Update the latency cost of the snoop so that the crossbar can 2073 // account for it. Do not overwrite what other neighbouring caches 2074 // have already done, rather take the maximum. The update is 2075 // tentative, for cases where we return before an upward snoop 2076 // happens below. 2077 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, 2078 lookupLatency * clockPeriod()); 2079 2080 // Inform request(Prefetch, CleanEvict or Writeback) from below of 2081 // MSHR hit, set setBlockCached. 2082 if (mshr && pkt->mustCheckAbove()) { 2083 DPRINTF(Cache, "Setting block cached for %s from" 2084 "lower cache on mshr hit %#x\n", 2085 pkt->cmdString(), pkt->getAddr()); 2086 pkt->setBlockCached(); 2087 return; 2088 } 2089 2090 // Let the MSHR itself track the snoop and decide whether we want 2091 // to go ahead and do the regular cache snoop 2092 if (mshr && mshr->handleSnoop(pkt, order++)) { 2093 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)." 2094 "mshrs: %s\n", blk_addr, is_secure ? "s" : "ns", 2095 mshr->print()); 2096 2097 if (mshr->getNumTargets() > numTarget) 2098 warn("allocating bonus target for snoop"); //handle later 2099 return; 2100 } 2101 2102 //We also need to check the writeback buffers and handle those 2103 std::vector<MSHR *> writebacks; 2104 if (writeBuffer.findMatches(blk_addr, is_secure, writebacks)) { 2105 DPRINTF(Cache, "Snoop hit in writeback to addr %#llx (%s)\n", 2106 pkt->getAddr(), is_secure ? "s" : "ns"); 2107 2108 // Look through writebacks for any cachable writes. 2109 // We should only ever find a single match 2110 assert(writebacks.size() == 1); 2111 MSHR *wb_entry = writebacks[0]; 2112 // Expect to see only Writebacks and/or CleanEvicts here, both of 2113 // which should not be generated for uncacheable data. 2114 assert(!wb_entry->isUncacheable()); 2115 // There should only be a single request responsible for generating 2116 // Writebacks/CleanEvicts. 2117 assert(wb_entry->getNumTargets() == 1); 2118 PacketPtr wb_pkt = wb_entry->getTarget()->pkt; 2119 assert(wb_pkt->isEviction()); 2120 2121 if (pkt->isEviction()) { 2122 // if the block is found in the write queue, set the BLOCK_CACHED 2123 // flag for Writeback/CleanEvict snoop. On return the snoop will 2124 // propagate the BLOCK_CACHED flag in Writeback packets and prevent 2125 // any CleanEvicts from travelling down the memory hierarchy. 2126 pkt->setBlockCached(); 2127 DPRINTF(Cache, "Squashing %s from lower cache on writequeue hit" 2128 " %#x\n", pkt->cmdString(), pkt->getAddr()); 2129 return; 2130 } 2131 2132 if (wb_pkt->cmd == MemCmd::WritebackDirty) {
|
2093 assert(!pkt->memInhibitAsserted()); 2094 pkt->assertMemInhibit(); 2095 if (!pkt->needsExclusive()) { 2096 pkt->assertShared(); 2097 // the writeback is no longer passing exclusivity (the 2098 // receiving cache should consider the block owned 2099 // rather than modified) 2100 wb_pkt->assertShared();
| 2133 // we have dirty data, and so will proceed to respond 2134 pkt->setCacheResponding(); 2135 if (!pkt->needsWritable()) { 2136 // the packet should end up in the Shared state (non 2137 // writable) on the completion of the fill 2138 pkt->setHasSharers(); 2139 // similarly, the writeback is no longer passing 2140 // writeable (the receiving cache should consider the 2141 // block Owned rather than Modified) 2142 wb_pkt->setHasSharers();
|
2101 } else {
| 2143 } else {
|
2102 // if we're not asserting the shared line, we need to 2103 // invalidate our copy. we'll do that below as long as 2104 // the packet's invalidate flag is set...
| 2144 // we need to invalidate our copy. we do that 2145 // below.
|
2105 assert(pkt->isInvalidate()); 2106 } 2107 doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(), 2108 false, false); 2109 } else { 2110 // on hitting a clean writeback we play it safe and do not 2111 // provide a response, the block may be dirty somewhere 2112 // else 2113 assert(wb_pkt->isCleanEviction()); 2114 // The cache technically holds the block until the 2115 // corresponding message reaches the crossbar 2116 // below. Therefore when a snoop encounters a CleanEvict
| 2146 assert(pkt->isInvalidate()); 2147 } 2148 doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(), 2149 false, false); 2150 } else { 2151 // on hitting a clean writeback we play it safe and do not 2152 // provide a response, the block may be dirty somewhere 2153 // else 2154 assert(wb_pkt->isCleanEviction()); 2155 // The cache technically holds the block until the 2156 // corresponding message reaches the crossbar 2157 // below. Therefore when a snoop encounters a CleanEvict
|
2117 // or WritebackClean message we must set assertShared 2118 // (just like when it encounters a Writeback) to avoid the 2119 // snoop filter prematurely clearing the holder bit in the 2120 // crossbar below 2121 if (!pkt->needsExclusive()) { 2122 pkt->assertShared(); 2123 // the writeback is no longer passing exclusivity (the 2124 // receiving cache should consider the block owned 2125 // rather than modified) 2126 wb_pkt->assertShared();
| 2158 // or WritebackClean message we must call 2159 // setHasSharers (just like when it encounters a 2160 // Writeback) to avoid the snoop filter prematurely 2161 // clearing the holder bit in the crossbar below 2162 if (!pkt->needsWritable()) { 2163 pkt->setHasSharers(); 2164 // the writeback is no longer passing writeable (the 2165 // receiving cache should consider the block Owned 2166 // rather than Modified) 2167 wb_pkt->setHasSharers();
|
2127 } else { 2128 assert(pkt->isInvalidate()); 2129 } 2130 } 2131 2132 if (pkt->isInvalidate()) { 2133 // Invalidation trumps our writeback... discard here 2134 // Note: markInService will remove entry from writeback buffer. 2135 markInService(wb_entry, false); 2136 delete wb_pkt; 2137 } 2138 } 2139 2140 // If this was a shared writeback, there may still be 2141 // other shared copies above that require invalidation. 2142 // We could be more selective and return here if the 2143 // request is non-exclusive or if the writeback is 2144 // exclusive. 2145 uint32_t snoop_delay = handleSnoop(pkt, blk, true, false, false); 2146 2147 // Override what we did when we first saw the snoop, as we now 2148 // also have the cost of the upwards snoops to account for 2149 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, snoop_delay + 2150 lookupLatency * clockPeriod()); 2151} 2152 2153bool 2154Cache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt) 2155{ 2156 // Express snoop responses from master to slave, e.g., from L1 to L2 2157 cache->recvTimingSnoopResp(pkt); 2158 return true; 2159} 2160 2161Tick 2162Cache::recvAtomicSnoop(PacketPtr pkt) 2163{ 2164 // Snoops shouldn't happen when bypassing caches 2165 assert(!system->bypassCaches()); 2166 2167 // no need to snoop requests that are not in range. 2168 if (!inRange(pkt->getAddr())) { 2169 return 0; 2170 } 2171 2172 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 2173 uint32_t snoop_delay = handleSnoop(pkt, blk, false, false, false); 2174 return snoop_delay + lookupLatency * clockPeriod(); 2175} 2176 2177 2178MSHR * 2179Cache::getNextMSHR() 2180{ 2181 // Check both MSHR queue and write buffer for potential requests, 2182 // note that null does not mean there is no request, it could 2183 // simply be that it is not ready 2184 MSHR *miss_mshr = mshrQueue.getNextMSHR(); 2185 MSHR *write_mshr = writeBuffer.getNextMSHR(); 2186 2187 // If we got a write buffer request ready, first priority is a 2188 // full write buffer, otherwhise we favour the miss requests 2189 if (write_mshr && 2190 ((writeBuffer.isFull() && writeBuffer.inServiceEntries == 0) || 2191 !miss_mshr)) { 2192 // need to search MSHR queue for conflicting earlier miss. 2193 MSHR *conflict_mshr = 2194 mshrQueue.findPending(write_mshr->blkAddr, 2195 write_mshr->isSecure); 2196 2197 if (conflict_mshr && conflict_mshr->order < write_mshr->order) { 2198 // Service misses in order until conflict is cleared. 2199 return conflict_mshr; 2200 2201 // @todo Note that we ignore the ready time of the conflict here 2202 } 2203 2204 // No conflicts; issue write 2205 return write_mshr; 2206 } else if (miss_mshr) { 2207 // need to check for conflicting earlier writeback 2208 MSHR *conflict_mshr = 2209 writeBuffer.findPending(miss_mshr->blkAddr, 2210 miss_mshr->isSecure); 2211 if (conflict_mshr) { 2212 // not sure why we don't check order here... it was in the 2213 // original code but commented out. 2214 2215 // The only way this happens is if we are 2216 // doing a write and we didn't have permissions 2217 // then subsequently saw a writeback (owned got evicted) 2218 // We need to make sure to perform the writeback first 2219 // To preserve the dirty data, then we can issue the write 2220 2221 // should we return write_mshr here instead? I.e. do we 2222 // have to flush writes in order? I don't think so... not 2223 // for Alpha anyway. Maybe for x86? 2224 return conflict_mshr; 2225 2226 // @todo Note that we ignore the ready time of the conflict here 2227 } 2228 2229 // No conflicts; issue read 2230 return miss_mshr; 2231 } 2232 2233 // fall through... no pending requests. Try a prefetch. 2234 assert(!miss_mshr && !write_mshr); 2235 if (prefetcher && mshrQueue.canPrefetch()) { 2236 // If we have a miss queue slot, we can try a prefetch 2237 PacketPtr pkt = prefetcher->getPacket(); 2238 if (pkt) { 2239 Addr pf_addr = blockAlign(pkt->getAddr()); 2240 if (!tags->findBlock(pf_addr, pkt->isSecure()) && 2241 !mshrQueue.findMatch(pf_addr, pkt->isSecure()) && 2242 !writeBuffer.findMatch(pf_addr, pkt->isSecure())) { 2243 // Update statistic on number of prefetches issued 2244 // (hwpf_mshr_misses) 2245 assert(pkt->req->masterId() < system->maxMasters()); 2246 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 2247 2248 // allocate an MSHR and return it, note 2249 // that we send the packet straight away, so do not 2250 // schedule the send 2251 return allocateMissBuffer(pkt, curTick(), false); 2252 } else { 2253 // free the request and packet 2254 delete pkt->req; 2255 delete pkt; 2256 } 2257 } 2258 } 2259 2260 return NULL; 2261} 2262 2263bool 2264Cache::isCachedAbove(PacketPtr pkt, bool is_timing) const 2265{ 2266 if (!forwardSnoops) 2267 return false; 2268 // Mirroring the flow of HardPFReqs, the cache sends CleanEvict and 2269 // Writeback snoops into upper level caches to check for copies of the 2270 // same block. Using the BLOCK_CACHED flag with the Writeback/CleanEvict 2271 // packet, the cache can inform the crossbar below of presence or absence 2272 // of the block. 2273 if (is_timing) { 2274 Packet snoop_pkt(pkt, true, false); 2275 snoop_pkt.setExpressSnoop(); 2276 // Assert that packet is either Writeback or CleanEvict and not a 2277 // prefetch request because prefetch requests need an MSHR and may 2278 // generate a snoop response. 2279 assert(pkt->isEviction()); 2280 snoop_pkt.senderState = NULL; 2281 cpuSidePort->sendTimingSnoopReq(&snoop_pkt); 2282 // Writeback/CleanEvict snoops do not generate a snoop response.
| 2168 } else { 2169 assert(pkt->isInvalidate()); 2170 } 2171 } 2172 2173 if (pkt->isInvalidate()) { 2174 // Invalidation trumps our writeback... discard here 2175 // Note: markInService will remove entry from writeback buffer. 2176 markInService(wb_entry, false); 2177 delete wb_pkt; 2178 } 2179 } 2180 2181 // If this was a shared writeback, there may still be 2182 // other shared copies above that require invalidation. 2183 // We could be more selective and return here if the 2184 // request is non-exclusive or if the writeback is 2185 // exclusive. 2186 uint32_t snoop_delay = handleSnoop(pkt, blk, true, false, false); 2187 2188 // Override what we did when we first saw the snoop, as we now 2189 // also have the cost of the upwards snoops to account for 2190 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, snoop_delay + 2191 lookupLatency * clockPeriod()); 2192} 2193 2194bool 2195Cache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt) 2196{ 2197 // Express snoop responses from master to slave, e.g., from L1 to L2 2198 cache->recvTimingSnoopResp(pkt); 2199 return true; 2200} 2201 2202Tick 2203Cache::recvAtomicSnoop(PacketPtr pkt) 2204{ 2205 // Snoops shouldn't happen when bypassing caches 2206 assert(!system->bypassCaches()); 2207 2208 // no need to snoop requests that are not in range. 2209 if (!inRange(pkt->getAddr())) { 2210 return 0; 2211 } 2212 2213 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); 2214 uint32_t snoop_delay = handleSnoop(pkt, blk, false, false, false); 2215 return snoop_delay + lookupLatency * clockPeriod(); 2216} 2217 2218 2219MSHR * 2220Cache::getNextMSHR() 2221{ 2222 // Check both MSHR queue and write buffer for potential requests, 2223 // note that null does not mean there is no request, it could 2224 // simply be that it is not ready 2225 MSHR *miss_mshr = mshrQueue.getNextMSHR(); 2226 MSHR *write_mshr = writeBuffer.getNextMSHR(); 2227 2228 // If we got a write buffer request ready, first priority is a 2229 // full write buffer, otherwhise we favour the miss requests 2230 if (write_mshr && 2231 ((writeBuffer.isFull() && writeBuffer.inServiceEntries == 0) || 2232 !miss_mshr)) { 2233 // need to search MSHR queue for conflicting earlier miss. 2234 MSHR *conflict_mshr = 2235 mshrQueue.findPending(write_mshr->blkAddr, 2236 write_mshr->isSecure); 2237 2238 if (conflict_mshr && conflict_mshr->order < write_mshr->order) { 2239 // Service misses in order until conflict is cleared. 2240 return conflict_mshr; 2241 2242 // @todo Note that we ignore the ready time of the conflict here 2243 } 2244 2245 // No conflicts; issue write 2246 return write_mshr; 2247 } else if (miss_mshr) { 2248 // need to check for conflicting earlier writeback 2249 MSHR *conflict_mshr = 2250 writeBuffer.findPending(miss_mshr->blkAddr, 2251 miss_mshr->isSecure); 2252 if (conflict_mshr) { 2253 // not sure why we don't check order here... it was in the 2254 // original code but commented out. 2255 2256 // The only way this happens is if we are 2257 // doing a write and we didn't have permissions 2258 // then subsequently saw a writeback (owned got evicted) 2259 // We need to make sure to perform the writeback first 2260 // To preserve the dirty data, then we can issue the write 2261 2262 // should we return write_mshr here instead? I.e. do we 2263 // have to flush writes in order? I don't think so... not 2264 // for Alpha anyway. Maybe for x86? 2265 return conflict_mshr; 2266 2267 // @todo Note that we ignore the ready time of the conflict here 2268 } 2269 2270 // No conflicts; issue read 2271 return miss_mshr; 2272 } 2273 2274 // fall through... no pending requests. Try a prefetch. 2275 assert(!miss_mshr && !write_mshr); 2276 if (prefetcher && mshrQueue.canPrefetch()) { 2277 // If we have a miss queue slot, we can try a prefetch 2278 PacketPtr pkt = prefetcher->getPacket(); 2279 if (pkt) { 2280 Addr pf_addr = blockAlign(pkt->getAddr()); 2281 if (!tags->findBlock(pf_addr, pkt->isSecure()) && 2282 !mshrQueue.findMatch(pf_addr, pkt->isSecure()) && 2283 !writeBuffer.findMatch(pf_addr, pkt->isSecure())) { 2284 // Update statistic on number of prefetches issued 2285 // (hwpf_mshr_misses) 2286 assert(pkt->req->masterId() < system->maxMasters()); 2287 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 2288 2289 // allocate an MSHR and return it, note 2290 // that we send the packet straight away, so do not 2291 // schedule the send 2292 return allocateMissBuffer(pkt, curTick(), false); 2293 } else { 2294 // free the request and packet 2295 delete pkt->req; 2296 delete pkt; 2297 } 2298 } 2299 } 2300 2301 return NULL; 2302} 2303 2304bool 2305Cache::isCachedAbove(PacketPtr pkt, bool is_timing) const 2306{ 2307 if (!forwardSnoops) 2308 return false; 2309 // Mirroring the flow of HardPFReqs, the cache sends CleanEvict and 2310 // Writeback snoops into upper level caches to check for copies of the 2311 // same block. Using the BLOCK_CACHED flag with the Writeback/CleanEvict 2312 // packet, the cache can inform the crossbar below of presence or absence 2313 // of the block. 2314 if (is_timing) { 2315 Packet snoop_pkt(pkt, true, false); 2316 snoop_pkt.setExpressSnoop(); 2317 // Assert that packet is either Writeback or CleanEvict and not a 2318 // prefetch request because prefetch requests need an MSHR and may 2319 // generate a snoop response. 2320 assert(pkt->isEviction()); 2321 snoop_pkt.senderState = NULL; 2322 cpuSidePort->sendTimingSnoopReq(&snoop_pkt); 2323 // Writeback/CleanEvict snoops do not generate a snoop response.
|
2283 assert(!(snoop_pkt.memInhibitAsserted()));
| 2324 assert(!(snoop_pkt.cacheResponding()));
|
2284 return snoop_pkt.isBlockCached(); 2285 } else { 2286 cpuSidePort->sendAtomicSnoop(pkt); 2287 return pkt->isBlockCached(); 2288 } 2289} 2290 2291PacketPtr 2292Cache::getTimingPacket() 2293{ 2294 MSHR *mshr = getNextMSHR(); 2295 2296 if (mshr == NULL) { 2297 return NULL; 2298 } 2299 2300 // use request from 1st target 2301 PacketPtr tgt_pkt = mshr->getTarget()->pkt; 2302 PacketPtr pkt = NULL; 2303 2304 DPRINTF(CachePort, "%s %s for addr %#llx size %d\n", __func__, 2305 tgt_pkt->cmdString(), tgt_pkt->getAddr(), tgt_pkt->getSize()); 2306 2307 CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure); 2308 2309 if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) { 2310 // We need to check the caches above us to verify that 2311 // they don't have a copy of this block in the dirty state 2312 // at the moment. Without this check we could get a stale 2313 // copy from memory that might get used in place of the 2314 // dirty one. 2315 Packet snoop_pkt(tgt_pkt, true, false); 2316 snoop_pkt.setExpressSnoop(); 2317 // We are sending this packet upwards, but if it hits we will 2318 // get a snoop response that we end up treating just like a 2319 // normal response, hence it needs the MSHR as its sender 2320 // state 2321 snoop_pkt.senderState = mshr; 2322 cpuSidePort->sendTimingSnoopReq(&snoop_pkt); 2323 2324 // Check to see if the prefetch was squashed by an upper cache (to 2325 // prevent us from grabbing the line) or if a Check to see if a 2326 // writeback arrived between the time the prefetch was placed in 2327 // the MSHRs and when it was selected to be sent or if the 2328 // prefetch was squashed by an upper cache. 2329
| 2325 return snoop_pkt.isBlockCached(); 2326 } else { 2327 cpuSidePort->sendAtomicSnoop(pkt); 2328 return pkt->isBlockCached(); 2329 } 2330} 2331 2332PacketPtr 2333Cache::getTimingPacket() 2334{ 2335 MSHR *mshr = getNextMSHR(); 2336 2337 if (mshr == NULL) { 2338 return NULL; 2339 } 2340 2341 // use request from 1st target 2342 PacketPtr tgt_pkt = mshr->getTarget()->pkt; 2343 PacketPtr pkt = NULL; 2344 2345 DPRINTF(CachePort, "%s %s for addr %#llx size %d\n", __func__, 2346 tgt_pkt->cmdString(), tgt_pkt->getAddr(), tgt_pkt->getSize()); 2347 2348 CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure); 2349 2350 if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) { 2351 // We need to check the caches above us to verify that 2352 // they don't have a copy of this block in the dirty state 2353 // at the moment. Without this check we could get a stale 2354 // copy from memory that might get used in place of the 2355 // dirty one. 2356 Packet snoop_pkt(tgt_pkt, true, false); 2357 snoop_pkt.setExpressSnoop(); 2358 // We are sending this packet upwards, but if it hits we will 2359 // get a snoop response that we end up treating just like a 2360 // normal response, hence it needs the MSHR as its sender 2361 // state 2362 snoop_pkt.senderState = mshr; 2363 cpuSidePort->sendTimingSnoopReq(&snoop_pkt); 2364 2365 // Check to see if the prefetch was squashed by an upper cache (to 2366 // prevent us from grabbing the line) or if a Check to see if a 2367 // writeback arrived between the time the prefetch was placed in 2368 // the MSHRs and when it was selected to be sent or if the 2369 // prefetch was squashed by an upper cache. 2370
|
2330 // It is important to check memInhibitAsserted before 2331 // prefetchSquashed. If another cache has asserted MEM_INGIBIT, it 2332 // will be sending a response which will arrive at the MSHR 2333 // allocated ofr this request. Checking the prefetchSquash first 2334 // may result in the MSHR being prematurely deallocated. 2335 2336 if (snoop_pkt.memInhibitAsserted()) {
| 2371 // It is important to check cacheResponding before 2372 // prefetchSquashed. If another cache has committed to 2373 // responding, it will be sending a dirty response which will 2374 // arrive at the MSHR allocated for this request. Checking the 2375 // prefetchSquash first may result in the MSHR being 2376 // prematurely deallocated. 2377 if (snoop_pkt.cacheResponding()) {
|
2337 auto M5_VAR_USED r = outstandingSnoop.insert(snoop_pkt.req); 2338 assert(r.second);
| 2378 auto M5_VAR_USED r = outstandingSnoop.insert(snoop_pkt.req); 2379 assert(r.second);
|
2339 // If we are getting a non-shared response it is dirty 2340 bool pending_dirty_resp = !snoop_pkt.sharedAsserted(); 2341 markInService(mshr, pending_dirty_resp);
| 2380 2381 // if we are getting a snoop response with no sharers it 2382 // will be allocated as Modified 2383 bool pending_modified_resp = !snoop_pkt.hasSharers(); 2384 markInService(mshr, pending_modified_resp); 2385
|
2342 DPRINTF(Cache, "Upward snoop of prefetch for addr" 2343 " %#x (%s) hit\n", 2344 tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns"); 2345 return NULL; 2346 } 2347 2348 if (snoop_pkt.isBlockCached() || blk != NULL) { 2349 DPRINTF(Cache, "Block present, prefetch squashed by cache. " 2350 "Deallocating mshr target %#x.\n", 2351 mshr->blkAddr); 2352 // Deallocate the mshr target 2353 if (mshr->queue->forceDeallocateTarget(mshr)) { 2354 // Clear block if this deallocation resulted freed an 2355 // mshr when all had previously been utilized 2356 clearBlocked((BlockedCause)(mshr->queue->index)); 2357 } 2358 return NULL; 2359 } 2360 } 2361 2362 if (mshr->isForwardNoResponse()) { 2363 // no response expected, just forward packet as it is 2364 assert(tags->findBlock(mshr->blkAddr, mshr->isSecure) == NULL); 2365 pkt = tgt_pkt; 2366 } else {
| 2386 DPRINTF(Cache, "Upward snoop of prefetch for addr" 2387 " %#x (%s) hit\n", 2388 tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns"); 2389 return NULL; 2390 } 2391 2392 if (snoop_pkt.isBlockCached() || blk != NULL) { 2393 DPRINTF(Cache, "Block present, prefetch squashed by cache. " 2394 "Deallocating mshr target %#x.\n", 2395 mshr->blkAddr); 2396 // Deallocate the mshr target 2397 if (mshr->queue->forceDeallocateTarget(mshr)) { 2398 // Clear block if this deallocation resulted freed an 2399 // mshr when all had previously been utilized 2400 clearBlocked((BlockedCause)(mshr->queue->index)); 2401 } 2402 return NULL; 2403 } 2404 } 2405 2406 if (mshr->isForwardNoResponse()) { 2407 // no response expected, just forward packet as it is 2408 assert(tags->findBlock(mshr->blkAddr, mshr->isSecure) == NULL); 2409 pkt = tgt_pkt; 2410 } else {
|
2367 pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive());
| 2411 pkt = getBusPacket(tgt_pkt, blk, mshr->needsWritable());
|
2368 2369 mshr->isForward = (pkt == NULL); 2370 2371 if (mshr->isForward) { 2372 // not a cache block request, but a response is expected 2373 // make copy of current packet to forward, keep current 2374 // copy for response handling 2375 pkt = new Packet(tgt_pkt, false, true); 2376 if (pkt->isWrite()) { 2377 pkt->setData(tgt_pkt->getConstPtr<uint8_t>()); 2378 } 2379 } 2380 } 2381 2382 assert(pkt != NULL); 2383 // play it safe and append (rather than set) the sender state, as 2384 // forwarded packets may already have existing state 2385 pkt->pushSenderState(mshr); 2386 return pkt; 2387} 2388 2389 2390Tick 2391Cache::nextMSHRReadyTime() const 2392{ 2393 Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(), 2394 writeBuffer.nextMSHRReadyTime()); 2395 2396 // Don't signal prefetch ready time if no MSHRs available 2397 // Will signal once enoguh MSHRs are deallocated 2398 if (prefetcher && mshrQueue.canPrefetch()) { 2399 nextReady = std::min(nextReady, 2400 prefetcher->nextPrefetchReadyTime()); 2401 } 2402 2403 return nextReady; 2404} 2405 2406void 2407Cache::serialize(CheckpointOut &cp) const 2408{ 2409 bool dirty(isDirty()); 2410 2411 if (dirty) { 2412 warn("*** The cache still contains dirty data. ***\n"); 2413 warn(" Make sure to drain the system using the correct flags.\n"); 2414 warn(" This checkpoint will not restore correctly and dirty data in " 2415 "the cache will be lost!\n"); 2416 } 2417 2418 // Since we don't checkpoint the data in the cache, any dirty data 2419 // will be lost when restoring from a checkpoint of a system that 2420 // wasn't drained properly. Flag the checkpoint as invalid if the 2421 // cache contains dirty data. 2422 bool bad_checkpoint(dirty); 2423 SERIALIZE_SCALAR(bad_checkpoint); 2424} 2425 2426void 2427Cache::unserialize(CheckpointIn &cp) 2428{ 2429 bool bad_checkpoint; 2430 UNSERIALIZE_SCALAR(bad_checkpoint); 2431 if (bad_checkpoint) { 2432 fatal("Restoring from checkpoints with dirty caches is not supported " 2433 "in the classic memory system. Please remove any caches or " 2434 " drain them properly before taking checkpoints.\n"); 2435 } 2436} 2437 2438/////////////// 2439// 2440// CpuSidePort 2441// 2442/////////////// 2443 2444AddrRangeList 2445Cache::CpuSidePort::getAddrRanges() const 2446{ 2447 return cache->getAddrRanges(); 2448} 2449 2450bool 2451Cache::CpuSidePort::recvTimingReq(PacketPtr pkt) 2452{ 2453 assert(!cache->system->bypassCaches()); 2454 2455 bool success = false; 2456
| 2412 2413 mshr->isForward = (pkt == NULL); 2414 2415 if (mshr->isForward) { 2416 // not a cache block request, but a response is expected 2417 // make copy of current packet to forward, keep current 2418 // copy for response handling 2419 pkt = new Packet(tgt_pkt, false, true); 2420 if (pkt->isWrite()) { 2421 pkt->setData(tgt_pkt->getConstPtr<uint8_t>()); 2422 } 2423 } 2424 } 2425 2426 assert(pkt != NULL); 2427 // play it safe and append (rather than set) the sender state, as 2428 // forwarded packets may already have existing state 2429 pkt->pushSenderState(mshr); 2430 return pkt; 2431} 2432 2433 2434Tick 2435Cache::nextMSHRReadyTime() const 2436{ 2437 Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(), 2438 writeBuffer.nextMSHRReadyTime()); 2439 2440 // Don't signal prefetch ready time if no MSHRs available 2441 // Will signal once enoguh MSHRs are deallocated 2442 if (prefetcher && mshrQueue.canPrefetch()) { 2443 nextReady = std::min(nextReady, 2444 prefetcher->nextPrefetchReadyTime()); 2445 } 2446 2447 return nextReady; 2448} 2449 2450void 2451Cache::serialize(CheckpointOut &cp) const 2452{ 2453 bool dirty(isDirty()); 2454 2455 if (dirty) { 2456 warn("*** The cache still contains dirty data. ***\n"); 2457 warn(" Make sure to drain the system using the correct flags.\n"); 2458 warn(" This checkpoint will not restore correctly and dirty data in " 2459 "the cache will be lost!\n"); 2460 } 2461 2462 // Since we don't checkpoint the data in the cache, any dirty data 2463 // will be lost when restoring from a checkpoint of a system that 2464 // wasn't drained properly. Flag the checkpoint as invalid if the 2465 // cache contains dirty data. 2466 bool bad_checkpoint(dirty); 2467 SERIALIZE_SCALAR(bad_checkpoint); 2468} 2469 2470void 2471Cache::unserialize(CheckpointIn &cp) 2472{ 2473 bool bad_checkpoint; 2474 UNSERIALIZE_SCALAR(bad_checkpoint); 2475 if (bad_checkpoint) { 2476 fatal("Restoring from checkpoints with dirty caches is not supported " 2477 "in the classic memory system. Please remove any caches or " 2478 " drain them properly before taking checkpoints.\n"); 2479 } 2480} 2481 2482/////////////// 2483// 2484// CpuSidePort 2485// 2486/////////////// 2487 2488AddrRangeList 2489Cache::CpuSidePort::getAddrRanges() const 2490{ 2491 return cache->getAddrRanges(); 2492} 2493 2494bool 2495Cache::CpuSidePort::recvTimingReq(PacketPtr pkt) 2496{ 2497 assert(!cache->system->bypassCaches()); 2498 2499 bool success = false; 2500
|
2457 // always let inhibited requests through, even if blocked, 2458 // ultimately we should check if this is an express snoop, but at 2459 // the moment that flag is only set in the cache itself 2460 if (pkt->memInhibitAsserted()) {
| 2501 // always let packets through if an upstream cache has committed 2502 // to responding, even if blocked (we should technically look at 2503 // the isExpressSnoop flag, but it is set by the cache itself, and 2504 // consequently we have to rely on the cacheResponding flag) 2505 if (pkt->cacheResponding()) {
|
2461 // do not change the current retry state 2462 bool M5_VAR_USED bypass_success = cache->recvTimingReq(pkt); 2463 assert(bypass_success); 2464 return true; 2465 } else if (blocked || mustSendRetry) { 2466 // either already committed to send a retry, or blocked 2467 success = false; 2468 } else { 2469 // pass it on to the cache, and let the cache decide if we 2470 // have to retry or not 2471 success = cache->recvTimingReq(pkt); 2472 } 2473 2474 // remember if we have to retry 2475 mustSendRetry = !success; 2476 return success; 2477} 2478 2479Tick 2480Cache::CpuSidePort::recvAtomic(PacketPtr pkt) 2481{ 2482 return cache->recvAtomic(pkt); 2483} 2484 2485void 2486Cache::CpuSidePort::recvFunctional(PacketPtr pkt) 2487{ 2488 // functional request 2489 cache->functionalAccess(pkt, true); 2490} 2491 2492Cache:: 2493CpuSidePort::CpuSidePort(const std::string &_name, Cache *_cache, 2494 const std::string &_label) 2495 : BaseCache::CacheSlavePort(_name, _cache, _label), cache(_cache) 2496{ 2497} 2498 2499Cache* 2500CacheParams::create() 2501{ 2502 assert(tags); 2503 2504 return new Cache(this); 2505} 2506/////////////// 2507// 2508// MemSidePort 2509// 2510/////////////// 2511 2512bool 2513Cache::MemSidePort::recvTimingResp(PacketPtr pkt) 2514{ 2515 cache->recvTimingResp(pkt); 2516 return true; 2517} 2518 2519// Express snooping requests to memside port 2520void 2521Cache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt) 2522{ 2523 // handle snooping requests 2524 cache->recvTimingSnoopReq(pkt); 2525} 2526 2527Tick 2528Cache::MemSidePort::recvAtomicSnoop(PacketPtr pkt) 2529{ 2530 return cache->recvAtomicSnoop(pkt); 2531} 2532 2533void 2534Cache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt) 2535{ 2536 // functional snoop (note that in contrast to atomic we don't have 2537 // a specific functionalSnoop method, as they have the same 2538 // behaviour regardless) 2539 cache->functionalAccess(pkt, false); 2540} 2541 2542void 2543Cache::CacheReqPacketQueue::sendDeferredPacket() 2544{ 2545 // sanity check 2546 assert(!waitingOnRetry); 2547 2548 // there should never be any deferred request packets in the 2549 // queue, instead we resly on the cache to provide the packets 2550 // from the MSHR queue or write queue 2551 assert(deferredPacketReadyTime() == MaxTick); 2552 2553 // check for request packets (requests & writebacks) 2554 PacketPtr pkt = cache.getTimingPacket(); 2555 if (pkt == NULL) { 2556 // can happen if e.g. we attempt a writeback and fail, but 2557 // before the retry, the writeback is eliminated because 2558 // we snoop another cache's ReadEx. 2559 } else { 2560 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState); 2561 // in most cases getTimingPacket allocates a new packet, and 2562 // we must delete it unless it is successfully sent 2563 bool delete_pkt = !mshr->isForwardNoResponse(); 2564 2565 // let our snoop responses go first if there are responses to 2566 // the same addresses we are about to writeback, note that 2567 // this creates a dependency between requests and snoop 2568 // responses, but that should not be a problem since there is 2569 // a chain already and the key is that the snoop responses can 2570 // sink unconditionally 2571 if (snoopRespQueue.hasAddr(pkt->getAddr())) { 2572 DPRINTF(CachePort, "Waiting for snoop response to be sent\n"); 2573 Tick when = snoopRespQueue.deferredPacketReadyTime(); 2574 schedSendEvent(when); 2575 2576 if (delete_pkt) 2577 delete pkt; 2578 2579 return; 2580 } 2581 2582 2583 waitingOnRetry = !masterPort.sendTimingReq(pkt); 2584 2585 if (waitingOnRetry) { 2586 DPRINTF(CachePort, "now waiting on a retry\n"); 2587 if (delete_pkt) { 2588 // we are awaiting a retry, but we 2589 // delete the packet and will be creating a new packet 2590 // when we get the opportunity 2591 delete pkt; 2592 } 2593 // note that we have now masked any requestBus and 2594 // schedSendEvent (we will wait for a retry before 2595 // doing anything), and this is so even if we do not 2596 // care about this packet and might override it before 2597 // it gets retried 2598 } else { 2599 // As part of the call to sendTimingReq the packet is
| 2506 // do not change the current retry state 2507 bool M5_VAR_USED bypass_success = cache->recvTimingReq(pkt); 2508 assert(bypass_success); 2509 return true; 2510 } else if (blocked || mustSendRetry) { 2511 // either already committed to send a retry, or blocked 2512 success = false; 2513 } else { 2514 // pass it on to the cache, and let the cache decide if we 2515 // have to retry or not 2516 success = cache->recvTimingReq(pkt); 2517 } 2518 2519 // remember if we have to retry 2520 mustSendRetry = !success; 2521 return success; 2522} 2523 2524Tick 2525Cache::CpuSidePort::recvAtomic(PacketPtr pkt) 2526{ 2527 return cache->recvAtomic(pkt); 2528} 2529 2530void 2531Cache::CpuSidePort::recvFunctional(PacketPtr pkt) 2532{ 2533 // functional request 2534 cache->functionalAccess(pkt, true); 2535} 2536 2537Cache:: 2538CpuSidePort::CpuSidePort(const std::string &_name, Cache *_cache, 2539 const std::string &_label) 2540 : BaseCache::CacheSlavePort(_name, _cache, _label), cache(_cache) 2541{ 2542} 2543 2544Cache* 2545CacheParams::create() 2546{ 2547 assert(tags); 2548 2549 return new Cache(this); 2550} 2551/////////////// 2552// 2553// MemSidePort 2554// 2555/////////////// 2556 2557bool 2558Cache::MemSidePort::recvTimingResp(PacketPtr pkt) 2559{ 2560 cache->recvTimingResp(pkt); 2561 return true; 2562} 2563 2564// Express snooping requests to memside port 2565void 2566Cache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt) 2567{ 2568 // handle snooping requests 2569 cache->recvTimingSnoopReq(pkt); 2570} 2571 2572Tick 2573Cache::MemSidePort::recvAtomicSnoop(PacketPtr pkt) 2574{ 2575 return cache->recvAtomicSnoop(pkt); 2576} 2577 2578void 2579Cache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt) 2580{ 2581 // functional snoop (note that in contrast to atomic we don't have 2582 // a specific functionalSnoop method, as they have the same 2583 // behaviour regardless) 2584 cache->functionalAccess(pkt, false); 2585} 2586 2587void 2588Cache::CacheReqPacketQueue::sendDeferredPacket() 2589{ 2590 // sanity check 2591 assert(!waitingOnRetry); 2592 2593 // there should never be any deferred request packets in the 2594 // queue, instead we resly on the cache to provide the packets 2595 // from the MSHR queue or write queue 2596 assert(deferredPacketReadyTime() == MaxTick); 2597 2598 // check for request packets (requests & writebacks) 2599 PacketPtr pkt = cache.getTimingPacket(); 2600 if (pkt == NULL) { 2601 // can happen if e.g. we attempt a writeback and fail, but 2602 // before the retry, the writeback is eliminated because 2603 // we snoop another cache's ReadEx. 2604 } else { 2605 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState); 2606 // in most cases getTimingPacket allocates a new packet, and 2607 // we must delete it unless it is successfully sent 2608 bool delete_pkt = !mshr->isForwardNoResponse(); 2609 2610 // let our snoop responses go first if there are responses to 2611 // the same addresses we are about to writeback, note that 2612 // this creates a dependency between requests and snoop 2613 // responses, but that should not be a problem since there is 2614 // a chain already and the key is that the snoop responses can 2615 // sink unconditionally 2616 if (snoopRespQueue.hasAddr(pkt->getAddr())) { 2617 DPRINTF(CachePort, "Waiting for snoop response to be sent\n"); 2618 Tick when = snoopRespQueue.deferredPacketReadyTime(); 2619 schedSendEvent(when); 2620 2621 if (delete_pkt) 2622 delete pkt; 2623 2624 return; 2625 } 2626 2627 2628 waitingOnRetry = !masterPort.sendTimingReq(pkt); 2629 2630 if (waitingOnRetry) { 2631 DPRINTF(CachePort, "now waiting on a retry\n"); 2632 if (delete_pkt) { 2633 // we are awaiting a retry, but we 2634 // delete the packet and will be creating a new packet 2635 // when we get the opportunity 2636 delete pkt; 2637 } 2638 // note that we have now masked any requestBus and 2639 // schedSendEvent (we will wait for a retry before 2640 // doing anything), and this is so even if we do not 2641 // care about this packet and might override it before 2642 // it gets retried 2643 } else { 2644 // As part of the call to sendTimingReq the packet is
|
2600 // forwarded to all neighbouring caches (and any 2601 // caches above them) as a snoop. The packet is also 2602 // sent to any potential cache below as the 2603 // interconnect is not allowed to buffer the 2604 // packet. Thus at this point we know if any of the 2605 // neighbouring, or the downstream cache is 2606 // responding, and if so, if it is with a dirty line 2607 // or not. 2608 bool pending_dirty_resp = !pkt->sharedAsserted() && 2609 pkt->memInhibitAsserted();
| 2645 // forwarded to all neighbouring caches (and any caches 2646 // above them) as a snoop. Thus at this point we know if 2647 // any of the neighbouring caches are responding, and if 2648 // so, we know it is dirty, and we can determine if it is 2649 // being passed as Modified, making our MSHR the ordering 2650 // point 2651 bool pending_modified_resp = !pkt->hasSharers() && 2652 pkt->cacheResponding();
|
2610
| 2653
|
2611 cache.markInService(mshr, pending_dirty_resp);
| 2654 cache.markInService(mshr, pending_modified_resp);
|
2612 } 2613 } 2614 2615 // if we succeeded and are not waiting for a retry, schedule the 2616 // next send considering when the next MSHR is ready, note that 2617 // snoop responses have their own packet queue and thus schedule 2618 // their own events 2619 if (!waitingOnRetry) { 2620 schedSendEvent(cache.nextMSHRReadyTime()); 2621 } 2622} 2623 2624Cache:: 2625MemSidePort::MemSidePort(const std::string &_name, Cache *_cache, 2626 const std::string &_label) 2627 : BaseCache::CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue), 2628 _reqQueue(*_cache, *this, _snoopRespQueue, _label), 2629 _snoopRespQueue(*_cache, *this, _label), cache(_cache) 2630{ 2631}
| 2655 } 2656 } 2657 2658 // if we succeeded and are not waiting for a retry, schedule the 2659 // next send considering when the next MSHR is ready, note that 2660 // snoop responses have their own packet queue and thus schedule 2661 // their own events 2662 if (!waitingOnRetry) { 2663 schedSendEvent(cache.nextMSHRReadyTime()); 2664 } 2665} 2666 2667Cache:: 2668MemSidePort::MemSidePort(const std::string &_name, Cache *_cache, 2669 const std::string &_label) 2670 : BaseCache::CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue), 2671 _reqQueue(*_cache, *this, _snoopRespQueue, _label), 2672 _snoopRespQueue(*_cache, *this, _label), cache(_cache) 2673{ 2674}
|