mshr.hh revision 13351:1d456a63bfbc
1/* 2 * Copyright (c) 2012-2013, 2015-2016, 2018 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2002-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Erik Hallnor 41 * Nikos Nikoleris 42 */ 43 44/** 45 * @file 46 * Miss Status and Handling Register (MSHR) declaration. 47 */ 48 49#ifndef __MEM_CACHE_MSHR_HH__ 50#define __MEM_CACHE_MSHR_HH__ 51 52#include <cassert> 53#include <iosfwd> 54#include <list> 55#include <string> 56#include <vector> 57 58#include "base/printable.hh" 59#include "base/types.hh" 60#include "mem/cache/queue_entry.hh" 61#include "mem/packet.hh" 62#include "mem/request.hh" 63#include "sim/core.hh" 64 65class BaseCache; 66 67/** 68 * Miss Status and handling Register. This class keeps all the information 69 * needed to handle a cache miss including a list of target requests. 70 * @sa \ref gem5MemorySystem "gem5 Memory System" 71 */ 72class MSHR : public QueueEntry, public Printable 73{ 74 75 /** 76 * Consider the queues friends to avoid making everything public. 77 */ 78 template<typename Entry> 79 friend class Queue; 80 friend class MSHRQueue; 81 82 private: 83 84 /** Flag set by downstream caches */ 85 bool downstreamPending; 86 87 /** 88 * Here we use one flag to track both if: 89 * 90 * 1. We are going to become owner or not, i.e., we will get the 91 * block in an ownership state (Owned or Modified) with BlkDirty 92 * set. This determines whether or not we are going to become the 93 * responder and ordering point for future requests that we snoop. 94 * 95 * 2. We know that we are going to get a writable block, i.e. we 96 * will get the block in writable state (Exclusive or Modified 97 * state) with BlkWritable set. That determines whether additional 98 * targets with needsWritable set will be able to be satisfied, or 99 * if not should be put on the deferred list to possibly wait for 100 * another request that does give us writable access. 101 * 102 * Condition 2 is actually just a shortcut that saves us from 103 * possibly building a deferred target list and calling 104 * promoteWritable() every time we get a writable block. Condition 105 * 1, tracking ownership, is what is important. However, we never 106 * receive ownership without marking the block dirty, and 107 * consequently use pendingModified to track both ownership and 108 * writability rather than having separate pendingDirty and 109 * pendingWritable flags. 110 */ 111 bool pendingModified; 112 113 /** Did we snoop an invalidate while waiting for data? */ 114 bool postInvalidate; 115 116 /** Did we snoop a read while waiting for data? */ 117 bool postDowngrade; 118 119 public: 120 121 /** Track if we sent this as a whole line write or not */ 122 bool wasWholeLineWrite; 123 124 /** True if the entry is just a simple forward from an upper level */ 125 bool isForward; 126 127 class Target { 128 public: 129 130 enum Source { 131 FromCPU, 132 FromSnoop, 133 FromPrefetcher 134 }; 135 136 const Tick recvTime; //!< Time when request was received (for stats) 137 const Tick readyTime; //!< Time when request is ready to be serviced 138 const Counter order; //!< Global order (for memory consistency mgmt) 139 const PacketPtr pkt; //!< Pending request packet. 140 const Source source; //!< Request from cpu, memory, or prefetcher? 141 142 /** 143 * We use this flag to track whether we have cleared the 144 * downstreamPending flag for the MSHR of the cache above 145 * where this packet originates from and guard noninitial 146 * attempts to clear it. 147 * 148 * The flag markedPending needs to be updated when the 149 * TargetList is in service which can be: 150 * 1) during the Target instantiation if the MSHR is in 151 * service and the target is not deferred, 152 * 2) when the MSHR becomes in service if the target is not 153 * deferred, 154 * 3) or when the TargetList is promoted (deferredTargets -> 155 * targets). 156 */ 157 bool markedPending; 158 159 const bool allocOnFill; //!< Should the response servicing this 160 //!< target list allocate in the cache? 161 162 Target(PacketPtr _pkt, Tick _readyTime, Counter _order, 163 Source _source, bool _markedPending, bool alloc_on_fill) 164 : recvTime(curTick()), readyTime(_readyTime), order(_order), 165 pkt(_pkt), source(_source), markedPending(_markedPending), 166 allocOnFill(alloc_on_fill) 167 {} 168 }; 169 170 class TargetList : public std::list<Target> { 171 172 public: 173 bool needsWritable; 174 bool hasUpgrade; 175 /** Set when the response should allocate on fill */ 176 bool allocOnFill; 177 /** 178 * Determine whether there was at least one non-snooping 179 * target coming from another cache. 180 */ 181 bool hasFromCache; 182 183 TargetList(); 184 185 /** 186 * Use the provided packet and the source to update the 187 * flags of this TargetList. 188 * 189 * @param pkt Packet considered for the flag update 190 * @param source Indicates the source of the packet 191 * @param alloc_on_fill Whether the pkt would allocate on a fill 192 */ 193 void updateFlags(PacketPtr pkt, Target::Source source, 194 bool alloc_on_fill); 195 196 /** 197 * Reset state 198 * 199 * @param blk_addr Address of the cache block 200 * @param blk_size Size of the cache block 201 */ 202 void init(Addr blk_addr, Addr blk_size) { 203 blkAddr = blk_addr; 204 blkSize = blk_size; 205 writesBitmap.resize(blk_size); 206 207 resetFlags(); 208 } 209 210 void resetFlags() { 211 onlyWrites = true; 212 std::fill(writesBitmap.begin(), writesBitmap.end(), false); 213 214 needsWritable = false; 215 hasUpgrade = false; 216 allocOnFill = false; 217 hasFromCache = false; 218 } 219 220 /** 221 * Goes through the list of targets and uses them to populate 222 * the flags of this TargetList. When the function returns the 223 * flags are consistent with the properties of packets in the 224 * list. 225 */ 226 void populateFlags(); 227 228 /** 229 * Add the specified packet in the TargetList. This function 230 * stores information related to the added packet and updates 231 * accordingly the flags. 232 * 233 * @param pkt Packet considered for adding 234 */ 235 void updateWriteFlags(PacketPtr pkt) { 236 const Request::FlagsType noMergeFlags = 237 Request::UNCACHEABLE | 238 Request::STRICT_ORDER | Request::MMAPPED_IPR | 239 Request::PRIVILEGED | Request::LLSC | 240 Request::MEM_SWAP | Request::MEM_SWAP_COND | 241 Request::SECURE; 242 243 // if we have already seen writes for the full block stop 244 // here, this might be a full line write followed by 245 // other compatible requests (e.g., reads) 246 if (!isWholeLineWrite()) { 247 bool can_merge_write = pkt->isWrite() && 248 ((pkt->req->getFlags() & noMergeFlags) == 0); 249 onlyWrites &= can_merge_write; 250 if (onlyWrites) { 251 auto offset = pkt->getOffset(blkSize); 252 auto begin = writesBitmap.begin() + offset; 253 std::fill(begin, begin + pkt->getSize(), true); 254 } 255 } 256 } 257 258 /** 259 * Tests if the flags of this TargetList have their default 260 * values. 261 * 262 * @return True if the TargetList are reset, false otherwise. 263 */ 264 bool isReset() const { 265 return !needsWritable && !hasUpgrade && !allocOnFill && 266 !hasFromCache && onlyWrites; 267 } 268 269 /** 270 * Add the specified packet in the TargetList. This function 271 * stores information related to the added packet and updates 272 * accordingly the flags. 273 * 274 * @param pkt Packet considered for adding 275 * @param readTime Tick at which the packet is processed by this cache 276 * @param order A counter giving a unique id to each target 277 * @param source Indicates the source agent of the packet 278 * @param markPending Set for deferred targets or pending MSHRs 279 * @param alloc_on_fill Whether it should allocate on a fill 280 */ 281 void add(PacketPtr pkt, Tick readyTime, Counter order, 282 Target::Source source, bool markPending, bool alloc_on_fill); 283 284 /** 285 * Convert upgrades to the equivalent request if the cache line they 286 * refer to would have been invalid (Upgrade -> ReadEx, SC* -> Fail). 287 * Used to rejig ordering between targets waiting on an MSHR. */ 288 void replaceUpgrades(); 289 290 void clearDownstreamPending(); 291 void clearDownstreamPending(iterator begin, iterator end); 292 bool trySatisfyFunctional(PacketPtr pkt); 293 void print(std::ostream &os, int verbosity, 294 const std::string &prefix) const; 295 296 /** 297 * Check if this list contains only compatible writes, and if they 298 * span the entire cache line. This is used as part of the 299 * miss-packet creation. Note that new requests may arrive after a 300 * miss-packet has been created, and for the fill we therefore use 301 * the wasWholeLineWrite field. 302 */ 303 bool isWholeLineWrite() const 304 { 305 return onlyWrites && 306 std::all_of(writesBitmap.begin(), 307 writesBitmap.end(), [](bool i) { return i; }); 308 } 309 310 private: 311 /** Address of the cache block for this list of targets. */ 312 Addr blkAddr; 313 314 /** Size of the cache block. */ 315 Addr blkSize; 316 317 /** Are we only dealing with writes. */ 318 bool onlyWrites; 319 320 // NOTE: std::vector<bool> might not meet satisfy the 321 // ForwardIterator requirement and therefore cannot be used 322 // for writesBitmap. 323 /** 324 * Track which bytes are written by requests in this target 325 * list. 326 */ 327 std::vector<char> writesBitmap; 328 }; 329 330 /** A list of MSHRs. */ 331 typedef std::list<MSHR *> List; 332 /** MSHR list iterator. */ 333 typedef List::iterator Iterator; 334 335 /** The pending* and post* flags are only valid if inService is 336 * true. Using the accessor functions lets us detect if these 337 * flags are accessed improperly. 338 */ 339 340 /** True if we need to get a writable copy of the block. */ 341 bool needsWritable() const { return targets.needsWritable; } 342 343 bool isCleaning() const { 344 PacketPtr pkt = targets.front().pkt; 345 return pkt->isClean(); 346 } 347 348 bool isPendingModified() const { 349 assert(inService); return pendingModified; 350 } 351 352 bool hasPostInvalidate() const { 353 assert(inService); return postInvalidate; 354 } 355 356 bool hasPostDowngrade() const { 357 assert(inService); return postDowngrade; 358 } 359 360 bool sendPacket(BaseCache &cache); 361 362 bool allocOnFill() const { 363 return targets.allocOnFill; 364 } 365 366 /** 367 * Determine if there are non-deferred requests from other caches 368 * 369 * @return true if any of the targets is from another cache 370 */ 371 bool hasFromCache() const { 372 return targets.hasFromCache; 373 } 374 375 private: 376 /** 377 * Promotes deferred targets that satisfy a predicate 378 * 379 * Deferred targets are promoted to the target list if they 380 * satisfy a given condition. The operation stops at the first 381 * deferred target that doesn't satisfy the condition. 382 * 383 * @param pred A condition on a Target 384 */ 385 void promoteIf(const std::function<bool (Target &)>& pred); 386 387 /** 388 * Pointer to this MSHR on the ready list. 389 * @sa MissQueue, MSHRQueue::readyList 390 */ 391 Iterator readyIter; 392 393 /** 394 * Pointer to this MSHR on the allocated list. 395 * @sa MissQueue, MSHRQueue::allocatedList 396 */ 397 Iterator allocIter; 398 399 /** List of all requests that match the address */ 400 TargetList targets; 401 402 TargetList deferredTargets; 403 404 public: 405 /** 406 * Check if this MSHR contains only compatible writes, and if they 407 * span the entire cache line. This is used as part of the 408 * miss-packet creation. Note that new requests may arrive after a 409 * miss-packet has been created, and for the fill we therefore use 410 * the wasWholeLineWrite field. 411 */ 412 bool isWholeLineWrite() const { 413 return targets.isWholeLineWrite(); 414 } 415 416 /** 417 * Allocate a miss to this MSHR. 418 * @param blk_addr The address of the block. 419 * @param blk_size The number of bytes to request. 420 * @param pkt The original miss. 421 * @param when_ready When should the MSHR be ready to act upon. 422 * @param _order The logical order of this MSHR 423 * @param alloc_on_fill Should the cache allocate a block on fill 424 */ 425 void allocate(Addr blk_addr, unsigned blk_size, PacketPtr pkt, 426 Tick when_ready, Counter _order, bool alloc_on_fill); 427 428 void markInService(bool pending_modified_resp); 429 430 void clearDownstreamPending(); 431 432 /** 433 * Mark this MSHR as free. 434 */ 435 void deallocate(); 436 437 /** 438 * Add a request to the list of targets. 439 * @param target The target. 440 */ 441 void allocateTarget(PacketPtr target, Tick when, Counter order, 442 bool alloc_on_fill); 443 bool handleSnoop(PacketPtr target, Counter order); 444 445 /** A simple constructor. */ 446 MSHR(); 447 448 /** 449 * Returns the current number of allocated targets. 450 * @return The current number of allocated targets. 451 */ 452 int getNumTargets() const 453 { return targets.size() + deferredTargets.size(); } 454 455 /** 456 * Extracts the subset of the targets that can be serviced given a 457 * received response. This function returns the targets list 458 * unless the response is a ReadRespWithInvalidate. The 459 * ReadRespWithInvalidate is only invalidating response that its 460 * invalidation was not expected when the request (a 461 * ReadSharedReq) was sent out. For ReadRespWithInvalidate we can 462 * safely service only the first FromCPU target and all FromSnoop 463 * targets (inform all snoopers that we no longer have the block). 464 * 465 * @param pkt The response from the downstream memory 466 */ 467 TargetList extractServiceableTargets(PacketPtr pkt); 468 469 /** 470 * Returns true if there are targets left. 471 * @return true if there are targets 472 */ 473 bool hasTargets() const { return !targets.empty(); } 474 475 /** 476 * Returns a reference to the first target. 477 * @return A pointer to the first target. 478 */ 479 Target *getTarget() 480 { 481 assert(hasTargets()); 482 return &targets.front(); 483 } 484 485 /** 486 * Pop first target. 487 */ 488 void popTarget() 489 { 490 targets.pop_front(); 491 } 492 493 bool promoteDeferredTargets(); 494 495 /** 496 * Promotes deferred targets that do not require writable 497 * 498 * Move targets from the deferred targets list to the target list 499 * starting from the first deferred target until the first target 500 * that is a cache maintenance operation or needs a writable copy 501 * of the block 502 */ 503 void promoteReadable(); 504 505 /** 506 * Promotes deferred targets that do not require writable 507 * 508 * Requests in the deferred target list are moved to the target 509 * list up until the first target that is a cache maintenance 510 * operation or needs a writable copy of the block 511 */ 512 void promoteWritable(); 513 514 bool trySatisfyFunctional(PacketPtr pkt); 515 516 /** 517 * Adds a delay relative to the current tick to the current MSHR 518 * @param delay_ticks the desired delay in ticks 519 */ 520 void delay(Tick delay_ticks) 521 { 522 assert(readyTime <= curTick()); 523 readyTime = curTick() + delay_ticks; 524 } 525 526 /** 527 * Prints the contents of this MSHR for debugging. 528 */ 529 void print(std::ostream &os, 530 int verbosity = 0, 531 const std::string &prefix = "") const; 532 /** 533 * A no-args wrapper of print(std::ostream...) meant to be 534 * invoked from DPRINTFs avoiding string overheads in fast mode 535 * 536 * @return string with mshr fields + [deferred]targets 537 */ 538 std::string print() const; 539}; 540 541#endif // __MEM_CACHE_MSHR_HH__ 542