noncoherent_cache.cc revision 13948:f8666d4d5855
1/* 2 * Copyright (c) 2010-2018 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2002-2005 The Regents of The University of Michigan 15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc. 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Erik Hallnor 42 * Dave Greene 43 * Nathan Binkert 44 * Steve Reinhardt 45 * Ron Dreslinski 46 * Andreas Sandberg 47 * Nikos Nikoleris 48 */ 49 50/** 51 * @file 52 * Cache definitions. 53 */ 54 55#include "mem/cache/noncoherent_cache.hh" 56 57#include <cassert> 58 59#include "base/logging.hh" 60#include "base/trace.hh" 61#include "base/types.hh" 62#include "debug/Cache.hh" 63#include "mem/cache/cache_blk.hh" 64#include "mem/cache/mshr.hh" 65#include "params/NoncoherentCache.hh" 66 67NoncoherentCache::NoncoherentCache(const NoncoherentCacheParams *p) 68 : BaseCache(p, p->system->cacheLineSize()) 69{ 70} 71 72void 73NoncoherentCache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool, bool) 74{ 75 // As this a non-coherent cache located below the point of 76 // coherency, we do not expect requests that are typically used to 77 // keep caches coherent (e.g., InvalidateReq or UpdateReq). 78 assert(pkt->isRead() || pkt->isWrite()); 79 BaseCache::satisfyRequest(pkt, blk); 80} 81 82bool 83NoncoherentCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat) 84{ 85 bool success = BaseCache::access(pkt, blk, lat); 86 87 if (pkt->isWriteback() || pkt->cmd == MemCmd::WriteClean) { 88 assert(blk && blk->isValid()); 89 // Writeback and WriteClean can allocate and fill even if the 90 // referenced block was not present or it was invalid. If that 91 // is the case, make sure that the new block is marked as 92 // writable 93 blk->status |= BlkWritable; 94 } 95 96 return success; 97} 98 99void 100NoncoherentCache::doWritebacks(PacketPtr pkt, Tick forward_time) 101{ 102 allocateWriteBuffer(pkt, forward_time); 103} 104 105void 106NoncoherentCache::doWritebacksAtomic(PacketPtr pkt) 107{ 108 memSidePort.sendAtomic(pkt); 109 delete pkt; 110} 111 112void 113NoncoherentCache::handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk, 114 Tick forward_time, Tick request_time) 115{ 116 // miss 117 Addr blk_addr = pkt->getBlockAddr(blkSize); 118 MSHR *mshr = mshrQueue.findMatch(blk_addr, pkt->isSecure(), false); 119 120 // We can always write to a non coherent cache if the block is 121 // present and therefore if we have reached this point then the 122 // block should not be in the cache. 123 assert(mshr || !blk || !blk->isValid()); 124 125 BaseCache::handleTimingReqMiss(pkt, mshr, blk, forward_time, request_time); 126} 127 128void 129NoncoherentCache::recvTimingReq(PacketPtr pkt) 130{ 131 panic_if(pkt->cacheResponding(), "Should not see packets where cache " 132 "is responding"); 133 134 panic_if(!(pkt->isRead() || pkt->isWrite()), 135 "Should only see read and writes at non-coherent cache\n"); 136 137 BaseCache::recvTimingReq(pkt); 138} 139 140PacketPtr 141NoncoherentCache::createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk, 142 bool needs_writable, 143 bool is_whole_line_write) const 144{ 145 // We also fill for writebacks from the coherent caches above us, 146 // and they do not need responses 147 assert(cpu_pkt->needsResponse()); 148 149 // A miss can happen only due to missing block 150 assert(!blk || !blk->isValid()); 151 152 PacketPtr pkt = new Packet(cpu_pkt->req, MemCmd::ReadReq, blkSize); 153 154 // the packet should be block aligned 155 assert(pkt->getAddr() == pkt->getBlockAddr(blkSize)); 156 157 pkt->allocate(); 158 DPRINTF(Cache, "%s created %s from %s\n", __func__, pkt->print(), 159 cpu_pkt->print()); 160 return pkt; 161} 162 163 164Cycles 165NoncoherentCache::handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk) 166{ 167 PacketPtr bus_pkt = createMissPacket(pkt, blk, true, 168 pkt->isWholeLineWrite(blkSize)); 169 DPRINTF(Cache, "Sending an atomic %s\n", bus_pkt->print()); 170 171 Cycles latency = ticksToCycles(memSidePort.sendAtomic(bus_pkt)); 172 173 assert(bus_pkt->isResponse()); 174 // At the moment the only supported downstream requests we issue 175 // are ReadReq and therefore here we should only see the 176 // corresponding responses 177 assert(bus_pkt->isRead()); 178 assert(pkt->cmd != MemCmd::UpgradeResp); 179 assert(!bus_pkt->isInvalidate()); 180 assert(!bus_pkt->hasSharers()); 181 182 // We are now dealing with the response handling 183 DPRINTF(Cache, "Receive response: %s\n", bus_pkt->print()); 184 185 if (!bus_pkt->isError()) { 186 // Any reponse that does not have an error should be filling, 187 // afterall it is a read response 188 DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n", 189 bus_pkt->getAddr()); 190 blk = handleFill(bus_pkt, blk, allocOnFill(bus_pkt->cmd)); 191 assert(blk); 192 } 193 satisfyRequest(pkt, blk); 194 195 maintainClusivity(true, blk); 196 197 // Use the separate bus_pkt to generate response to pkt and 198 // then delete it. 199 if (!pkt->isWriteback() && pkt->cmd != MemCmd::WriteClean) { 200 assert(pkt->needsResponse()); 201 pkt->makeAtomicResponse(); 202 if (bus_pkt->isError()) { 203 pkt->copyError(bus_pkt); 204 } 205 } 206 207 delete bus_pkt; 208 209 return latency; 210} 211 212Tick 213NoncoherentCache::recvAtomic(PacketPtr pkt) 214{ 215 panic_if(pkt->cacheResponding(), "Should not see packets where cache " 216 "is responding"); 217 218 panic_if(!(pkt->isRead() || pkt->isWrite()), 219 "Should only see read and writes at non-coherent cache\n"); 220 221 return BaseCache::recvAtomic(pkt); 222} 223 224 225void 226NoncoherentCache::functionalAccess(PacketPtr pkt, bool from_cpu_side) 227{ 228 panic_if(!from_cpu_side, "Non-coherent cache received functional snoop" 229 " request\n"); 230 231 BaseCache::functionalAccess(pkt, from_cpu_side); 232} 233 234void 235NoncoherentCache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, 236 CacheBlk *blk) 237{ 238 // First offset for critical word first calculations 239 const int initial_offset = mshr->getTarget()->pkt->getOffset(blkSize); 240 241 MSHR::TargetList targets = mshr->extractServiceableTargets(pkt); 242 for (auto &target: targets) { 243 Packet *tgt_pkt = target.pkt; 244 245 switch (target.source) { 246 case MSHR::Target::FromCPU: 247 // handle deferred requests comming from a cache or core 248 // above 249 250 Tick completion_time; 251 // Here we charge on completion_time the delay of the xbar if the 252 // packet comes from it, charged on headerDelay. 253 completion_time = pkt->headerDelay; 254 255 satisfyRequest(tgt_pkt, blk); 256 257 // How many bytes past the first request is this one 258 int transfer_offset; 259 transfer_offset = tgt_pkt->getOffset(blkSize) - initial_offset; 260 if (transfer_offset < 0) { 261 transfer_offset += blkSize; 262 } 263 // If not critical word (offset) return payloadDelay. 264 // responseLatency is the latency of the return path 265 // from lower level caches/memory to an upper level cache or 266 // the core. 267 completion_time += clockEdge(responseLatency) + 268 (transfer_offset ? pkt->payloadDelay : 0); 269 270 assert(tgt_pkt->req->masterId() < system->maxMasters()); 271 missLatency[tgt_pkt->cmdToIndex()][tgt_pkt->req->masterId()] += 272 completion_time - target.recvTime; 273 274 tgt_pkt->makeTimingResponse(); 275 if (pkt->isError()) 276 tgt_pkt->copyError(pkt); 277 278 // Reset the bus additional time as it is now accounted for 279 tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0; 280 cpuSidePort.schedTimingResp(tgt_pkt, completion_time); 281 break; 282 283 case MSHR::Target::FromPrefetcher: 284 // handle deferred requests comming from a prefetcher 285 // attached to this cache 286 assert(tgt_pkt->cmd == MemCmd::HardPFReq); 287 288 if (blk) 289 blk->status |= BlkHWPrefetched; 290 291 // We have filled the block and the prefetcher does not 292 // require responses. 293 delete tgt_pkt; 294 break; 295 296 default: 297 // we should never see FromSnoop Targets as this is a 298 // non-coherent cache 299 panic("Illegal target->source enum %d\n", target.source); 300 } 301 } 302 303 // Reponses are filling and bring in writable blocks, therefore 304 // there should be no deferred targets and all the non-deferred 305 // targets are now serviced. 306 assert(mshr->getNumTargets() == 0); 307} 308 309void 310NoncoherentCache::recvTimingResp(PacketPtr pkt) 311{ 312 assert(pkt->isResponse()); 313 // At the moment the only supported downstream requests we issue 314 // are ReadReq and therefore here we should only see the 315 // corresponding responses 316 assert(pkt->isRead()); 317 assert(pkt->cmd != MemCmd::UpgradeResp); 318 assert(!pkt->isInvalidate()); 319 // This cache is non-coherent and any memories below are 320 // non-coherent too (non-coherent caches or the main memory), 321 // therefore the fetched block can be marked as writable. 322 assert(!pkt->hasSharers()); 323 324 BaseCache::recvTimingResp(pkt); 325} 326 327PacketPtr 328NoncoherentCache::evictBlock(CacheBlk *blk) 329{ 330 // A dirty block is always written back. 331 332 // A clean block can we written back, if we turned on writebacks 333 // for clean blocks. This could be useful if there is a cache 334 // below and we want to make sure the block is cached but if the 335 // memory below is the main memory WritebackCleans are 336 // unnecessary. 337 338 // If we clean writebacks are not enabled, we do not take any 339 // further action for evictions of clean blocks (i.e., CleanEvicts 340 // are unnecessary). 341 PacketPtr pkt = (blk->isDirty() || writebackClean) ? 342 writebackBlk(blk) : nullptr; 343 344 invalidateBlock(blk); 345 346 return pkt; 347} 348 349NoncoherentCache* 350NoncoherentCacheParams::create() 351{ 352 assert(tags); 353 assert(replacement_policy); 354 355 return new NoncoherentCache(this); 356} 357