base.hh revision 13717
1/* 2 * Copyright (c) 2012-2013, 2015-2016, 2018 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2003-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Erik Hallnor 41 * Steve Reinhardt 42 * Ron Dreslinski 43 * Andreas Hansson 44 * Nikos Nikoleris 45 */ 46 47/** 48 * @file 49 * Declares a basic cache interface BaseCache. 50 */ 51 52#ifndef __MEM_CACHE_BASE_HH__ 53#define __MEM_CACHE_BASE_HH__ 54 55#include <cassert> 56#include <cstdint> 57#include <string> 58 59#include "base/addr_range.hh" 60#include "base/statistics.hh" 61#include "base/trace.hh" 62#include "base/types.hh" 63#include "debug/Cache.hh" 64#include "debug/CachePort.hh" 65#include "enums/Clusivity.hh" 66#include "mem/cache/cache_blk.hh" 67#include "mem/cache/mshr_queue.hh" 68#include "mem/cache/tags/base.hh" 69#include "mem/cache/write_queue.hh" 70#include "mem/cache/write_queue_entry.hh" 71#include "mem/mem_object.hh" 72#include "mem/packet.hh" 73#include "mem/packet_queue.hh" 74#include "mem/qport.hh" 75#include "mem/request.hh" 76#include "params/WriteAllocator.hh" 77#include "sim/eventq.hh" 78#include "sim/probe/probe.hh" 79#include "sim/serialize.hh" 80#include "sim/sim_exit.hh" 81#include "sim/system.hh" 82 83class BaseMasterPort; 84class BasePrefetcher; 85class BaseSlavePort; 86class MSHR; 87class MasterPort; 88class QueueEntry; 89struct BaseCacheParams; 90 91/** 92 * A basic cache interface. Implements some common functions for speed. 93 */ 94class BaseCache : public MemObject 95{ 96 protected: 97 /** 98 * Indexes to enumerate the MSHR queues. 99 */ 100 enum MSHRQueueIndex { 101 MSHRQueue_MSHRs, 102 MSHRQueue_WriteBuffer 103 }; 104 105 public: 106 /** 107 * Reasons for caches to be blocked. 108 */ 109 enum BlockedCause { 110 Blocked_NoMSHRs = MSHRQueue_MSHRs, 111 Blocked_NoWBBuffers = MSHRQueue_WriteBuffer, 112 Blocked_NoTargets, 113 NUM_BLOCKED_CAUSES 114 }; 115 116 protected: 117 118 /** 119 * A cache master port is used for the memory-side port of the 120 * cache, and in addition to the basic timing port that only sends 121 * response packets through a transmit list, it also offers the 122 * ability to schedule and send request packets (requests & 123 * writebacks). The send event is scheduled through schedSendEvent, 124 * and the sendDeferredPacket of the timing port is modified to 125 * consider both the transmit list and the requests from the MSHR. 126 */ 127 class CacheMasterPort : public QueuedMasterPort 128 { 129 130 public: 131 132 /** 133 * Schedule a send of a request packet (from the MSHR). Note 134 * that we could already have a retry outstanding. 135 */ 136 void schedSendEvent(Tick time) 137 { 138 DPRINTF(CachePort, "Scheduling send event at %llu\n", time); 139 reqQueue.schedSendEvent(time); 140 } 141 142 protected: 143 144 CacheMasterPort(const std::string &_name, BaseCache *_cache, 145 ReqPacketQueue &_reqQueue, 146 SnoopRespPacketQueue &_snoopRespQueue) : 147 QueuedMasterPort(_name, _cache, _reqQueue, _snoopRespQueue) 148 { } 149 150 /** 151 * Memory-side port always snoops. 152 * 153 * @return always true 154 */ 155 virtual bool isSnooping() const { return true; } 156 }; 157 158 /** 159 * Override the default behaviour of sendDeferredPacket to enable 160 * the memory-side cache port to also send requests based on the 161 * current MSHR status. This queue has a pointer to our specific 162 * cache implementation and is used by the MemSidePort. 163 */ 164 class CacheReqPacketQueue : public ReqPacketQueue 165 { 166 167 protected: 168 169 BaseCache &cache; 170 SnoopRespPacketQueue &snoopRespQueue; 171 172 public: 173 174 CacheReqPacketQueue(BaseCache &cache, MasterPort &port, 175 SnoopRespPacketQueue &snoop_resp_queue, 176 const std::string &label) : 177 ReqPacketQueue(cache, port, label), cache(cache), 178 snoopRespQueue(snoop_resp_queue) { } 179 180 /** 181 * Override the normal sendDeferredPacket and do not only 182 * consider the transmit list (used for responses), but also 183 * requests. 184 */ 185 virtual void sendDeferredPacket(); 186 187 /** 188 * Check if there is a conflicting snoop response about to be 189 * send out, and if so simply stall any requests, and schedule 190 * a send event at the same time as the next snoop response is 191 * being sent out. 192 */ 193 bool checkConflictingSnoop(Addr addr) 194 { 195 if (snoopRespQueue.hasAddr(addr)) { 196 DPRINTF(CachePort, "Waiting for snoop response to be " 197 "sent\n"); 198 Tick when = snoopRespQueue.deferredPacketReadyTime(); 199 schedSendEvent(when); 200 return true; 201 } 202 return false; 203 } 204 }; 205 206 207 /** 208 * The memory-side port extends the base cache master port with 209 * access functions for functional, atomic and timing snoops. 210 */ 211 class MemSidePort : public CacheMasterPort 212 { 213 private: 214 215 /** The cache-specific queue. */ 216 CacheReqPacketQueue _reqQueue; 217 218 SnoopRespPacketQueue _snoopRespQueue; 219 220 // a pointer to our specific cache implementation 221 BaseCache *cache; 222 223 protected: 224 225 virtual void recvTimingSnoopReq(PacketPtr pkt); 226 227 virtual bool recvTimingResp(PacketPtr pkt); 228 229 virtual Tick recvAtomicSnoop(PacketPtr pkt); 230 231 virtual void recvFunctionalSnoop(PacketPtr pkt); 232 233 public: 234 235 MemSidePort(const std::string &_name, BaseCache *_cache, 236 const std::string &_label); 237 }; 238 239 /** 240 * A cache slave port is used for the CPU-side port of the cache, 241 * and it is basically a simple timing port that uses a transmit 242 * list for responses to the CPU (or connected master). In 243 * addition, it has the functionality to block the port for 244 * incoming requests. If blocked, the port will issue a retry once 245 * unblocked. 246 */ 247 class CacheSlavePort : public QueuedSlavePort 248 { 249 250 public: 251 252 /** Do not accept any new requests. */ 253 void setBlocked(); 254 255 /** Return to normal operation and accept new requests. */ 256 void clearBlocked(); 257 258 bool isBlocked() const { return blocked; } 259 260 protected: 261 262 CacheSlavePort(const std::string &_name, BaseCache *_cache, 263 const std::string &_label); 264 265 /** A normal packet queue used to store responses. */ 266 RespPacketQueue queue; 267 268 bool blocked; 269 270 bool mustSendRetry; 271 272 private: 273 274 void processSendRetry(); 275 276 EventFunctionWrapper sendRetryEvent; 277 278 }; 279 280 /** 281 * The CPU-side port extends the base cache slave port with access 282 * functions for functional, atomic and timing requests. 283 */ 284 class CpuSidePort : public CacheSlavePort 285 { 286 private: 287 288 // a pointer to our specific cache implementation 289 BaseCache *cache; 290 291 protected: 292 virtual bool recvTimingSnoopResp(PacketPtr pkt) override; 293 294 virtual bool tryTiming(PacketPtr pkt) override; 295 296 virtual bool recvTimingReq(PacketPtr pkt) override; 297 298 virtual Tick recvAtomic(PacketPtr pkt) override; 299 300 virtual void recvFunctional(PacketPtr pkt) override; 301 302 virtual AddrRangeList getAddrRanges() const override; 303 304 public: 305 306 CpuSidePort(const std::string &_name, BaseCache *_cache, 307 const std::string &_label); 308 309 }; 310 311 CpuSidePort cpuSidePort; 312 MemSidePort memSidePort; 313 314 protected: 315 316 /** Miss status registers */ 317 MSHRQueue mshrQueue; 318 319 /** Write/writeback buffer */ 320 WriteQueue writeBuffer; 321 322 /** Tag and data Storage */ 323 BaseTags *tags; 324 325 /** Prefetcher */ 326 BasePrefetcher *prefetcher; 327 328 /** To probe when a cache hit occurs */ 329 ProbePointArg<PacketPtr> *ppHit; 330 331 /** To probe when a cache miss occurs */ 332 ProbePointArg<PacketPtr> *ppMiss; 333 334 /** To probe when a cache fill occurs */ 335 ProbePointArg<PacketPtr> *ppFill; 336 337 /** 338 * The writeAllocator drive optimizations for streaming writes. 339 * It first determines whether a WriteReq MSHR should be delayed, 340 * thus ensuring that we wait longer in cases when we are write 341 * coalescing and allowing all the bytes of the line to be written 342 * before the MSHR packet is sent downstream. This works in unison 343 * with the tracking in the MSHR to check if the entire line is 344 * written. The write mode also affects the behaviour on filling 345 * any whole-line writes. Normally the cache allocates the line 346 * when receiving the InvalidateResp, but after seeing enough 347 * consecutive lines we switch to using the tempBlock, and thus 348 * end up not allocating the line, and instead turning the 349 * whole-line write into a writeback straight away. 350 */ 351 WriteAllocator * const writeAllocator; 352 353 /** 354 * Temporary cache block for occasional transitory use. We use 355 * the tempBlock to fill when allocation fails (e.g., when there 356 * is an outstanding request that accesses the victim block) or 357 * when we want to avoid allocation (e.g., exclusive caches) 358 */ 359 TempCacheBlk *tempBlock; 360 361 /** 362 * Upstream caches need this packet until true is returned, so 363 * hold it for deletion until a subsequent call 364 */ 365 std::unique_ptr<Packet> pendingDelete; 366 367 /** 368 * Mark a request as in service (sent downstream in the memory 369 * system), effectively making this MSHR the ordering point. 370 */ 371 void markInService(MSHR *mshr, bool pending_modified_resp) 372 { 373 bool wasFull = mshrQueue.isFull(); 374 mshrQueue.markInService(mshr, pending_modified_resp); 375 376 if (wasFull && !mshrQueue.isFull()) { 377 clearBlocked(Blocked_NoMSHRs); 378 } 379 } 380 381 void markInService(WriteQueueEntry *entry) 382 { 383 bool wasFull = writeBuffer.isFull(); 384 writeBuffer.markInService(entry); 385 386 if (wasFull && !writeBuffer.isFull()) { 387 clearBlocked(Blocked_NoWBBuffers); 388 } 389 } 390 391 /** 392 * Determine whether we should allocate on a fill or not. If this 393 * cache is mostly inclusive with regards to the upstream cache(s) 394 * we always allocate (for any non-forwarded and cacheable 395 * requests). In the case of a mostly exclusive cache, we allocate 396 * on fill if the packet did not come from a cache, thus if we: 397 * are dealing with a whole-line write (the latter behaves much 398 * like a writeback), the original target packet came from a 399 * non-caching source, or if we are performing a prefetch or LLSC. 400 * 401 * @param cmd Command of the incoming requesting packet 402 * @return Whether we should allocate on the fill 403 */ 404 inline bool allocOnFill(MemCmd cmd) const 405 { 406 return clusivity == Enums::mostly_incl || 407 cmd == MemCmd::WriteLineReq || 408 cmd == MemCmd::ReadReq || 409 cmd == MemCmd::WriteReq || 410 cmd.isPrefetch() || 411 cmd.isLLSC(); 412 } 413 414 /** 415 * Regenerate block address using tags. 416 * Block address regeneration depends on whether we're using a temporary 417 * block or not. 418 * 419 * @param blk The block to regenerate address. 420 * @return The block's address. 421 */ 422 Addr regenerateBlkAddr(CacheBlk* blk); 423 424 /** 425 * Calculate access latency in ticks given a tag lookup latency, and 426 * whether access was a hit or miss. 427 * 428 * @param blk The cache block that was accessed. 429 * @param lookup_lat Latency of the respective tag lookup. 430 * @return The number of ticks that pass due to a block access. 431 */ 432 Cycles calculateAccessLatency(const CacheBlk* blk, 433 const Cycles lookup_lat) const; 434 435 /** 436 * Does all the processing necessary to perform the provided request. 437 * @param pkt The memory request to perform. 438 * @param blk The cache block to be updated. 439 * @param lat The latency of the access. 440 * @param writebacks List for any writebacks that need to be performed. 441 * @return Boolean indicating whether the request was satisfied. 442 */ 443 virtual bool access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, 444 PacketList &writebacks); 445 446 /* 447 * Handle a timing request that hit in the cache 448 * 449 * @param ptk The request packet 450 * @param blk The referenced block 451 * @param request_time The tick at which the block lookup is compete 452 */ 453 virtual void handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, 454 Tick request_time); 455 456 /* 457 * Handle a timing request that missed in the cache 458 * 459 * Implementation specific handling for different cache 460 * implementations 461 * 462 * @param ptk The request packet 463 * @param blk The referenced block 464 * @param forward_time The tick at which we can process dependent requests 465 * @param request_time The tick at which the block lookup is compete 466 */ 467 virtual void handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk, 468 Tick forward_time, 469 Tick request_time) = 0; 470 471 /* 472 * Handle a timing request that missed in the cache 473 * 474 * Common functionality across different cache implementations 475 * 476 * @param ptk The request packet 477 * @param blk The referenced block 478 * @param mshr Any existing mshr for the referenced cache block 479 * @param forward_time The tick at which we can process dependent requests 480 * @param request_time The tick at which the block lookup is compete 481 */ 482 void handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk, 483 Tick forward_time, Tick request_time); 484 485 /** 486 * Performs the access specified by the request. 487 * @param pkt The request to perform. 488 */ 489 virtual void recvTimingReq(PacketPtr pkt); 490 491 /** 492 * Handling the special case of uncacheable write responses to 493 * make recvTimingResp less cluttered. 494 */ 495 void handleUncacheableWriteResp(PacketPtr pkt); 496 497 /** 498 * Service non-deferred MSHR targets using the received response 499 * 500 * Iterates through the list of targets that can be serviced with 501 * the current response. 502 * 503 * @param mshr The MSHR that corresponds to the reponse 504 * @param pkt The response packet 505 * @param blk The reference block 506 */ 507 virtual void serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, 508 CacheBlk *blk) = 0; 509 510 /** 511 * Handles a response (cache line fill/write ack) from the bus. 512 * @param pkt The response packet 513 */ 514 virtual void recvTimingResp(PacketPtr pkt); 515 516 /** 517 * Snoops bus transactions to maintain coherence. 518 * @param pkt The current bus transaction. 519 */ 520 virtual void recvTimingSnoopReq(PacketPtr pkt) = 0; 521 522 /** 523 * Handle a snoop response. 524 * @param pkt Snoop response packet 525 */ 526 virtual void recvTimingSnoopResp(PacketPtr pkt) = 0; 527 528 /** 529 * Handle a request in atomic mode that missed in this cache 530 * 531 * Creates a downstream request, sends it to the memory below and 532 * handles the response. As we are in atomic mode all operations 533 * are performed immediately. 534 * 535 * @param pkt The packet with the requests 536 * @param blk The referenced block 537 * @param writebacks A list with packets for any performed writebacks 538 * @return Cycles for handling the request 539 */ 540 virtual Cycles handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk, 541 PacketList &writebacks) = 0; 542 543 /** 544 * Performs the access specified by the request. 545 * @param pkt The request to perform. 546 * @return The number of ticks required for the access. 547 */ 548 virtual Tick recvAtomic(PacketPtr pkt); 549 550 /** 551 * Snoop for the provided request in the cache and return the estimated 552 * time taken. 553 * @param pkt The memory request to snoop 554 * @return The number of ticks required for the snoop. 555 */ 556 virtual Tick recvAtomicSnoop(PacketPtr pkt) = 0; 557 558 /** 559 * Performs the access specified by the request. 560 * 561 * @param pkt The request to perform. 562 * @param fromCpuSide from the CPU side port or the memory side port 563 */ 564 virtual void functionalAccess(PacketPtr pkt, bool from_cpu_side); 565 566 /** 567 * Handle doing the Compare and Swap function for SPARC. 568 */ 569 void cmpAndSwap(CacheBlk *blk, PacketPtr pkt); 570 571 /** 572 * Return the next queue entry to service, either a pending miss 573 * from the MSHR queue, a buffered write from the write buffer, or 574 * something from the prefetcher. This function is responsible 575 * for prioritizing among those sources on the fly. 576 */ 577 QueueEntry* getNextQueueEntry(); 578 579 /** 580 * Insert writebacks into the write buffer 581 */ 582 virtual void doWritebacks(PacketList& writebacks, Tick forward_time) = 0; 583 584 /** 585 * Send writebacks down the memory hierarchy in atomic mode 586 */ 587 virtual void doWritebacksAtomic(PacketList& writebacks) = 0; 588 589 /** 590 * Create an appropriate downstream bus request packet. 591 * 592 * Creates a new packet with the request to be send to the memory 593 * below, or nullptr if the current request in cpu_pkt should just 594 * be forwarded on. 595 * 596 * @param cpu_pkt The miss packet that needs to be satisfied. 597 * @param blk The referenced block, can be nullptr. 598 * @param needs_writable Indicates that the block must be writable 599 * even if the request in cpu_pkt doesn't indicate that. 600 * @param is_whole_line_write True if there are writes for the 601 * whole line 602 * @return A packet send to the memory below 603 */ 604 virtual PacketPtr createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk, 605 bool needs_writable, 606 bool is_whole_line_write) const = 0; 607 608 /** 609 * Determine if clean lines should be written back or not. In 610 * cases where a downstream cache is mostly inclusive we likely 611 * want it to act as a victim cache also for lines that have not 612 * been modified. Hence, we cannot simply drop the line (or send a 613 * clean evict), but rather need to send the actual data. 614 */ 615 const bool writebackClean; 616 617 /** 618 * Writebacks from the tempBlock, resulting on the response path 619 * in atomic mode, must happen after the call to recvAtomic has 620 * finished (for the right ordering of the packets). We therefore 621 * need to hold on to the packets, and have a method and an event 622 * to send them. 623 */ 624 PacketPtr tempBlockWriteback; 625 626 /** 627 * Send the outstanding tempBlock writeback. To be called after 628 * recvAtomic finishes in cases where the block we filled is in 629 * fact the tempBlock, and now needs to be written back. 630 */ 631 void writebackTempBlockAtomic() { 632 assert(tempBlockWriteback != nullptr); 633 PacketList writebacks{tempBlockWriteback}; 634 doWritebacksAtomic(writebacks); 635 tempBlockWriteback = nullptr; 636 } 637 638 /** 639 * An event to writeback the tempBlock after recvAtomic 640 * finishes. To avoid other calls to recvAtomic getting in 641 * between, we create this event with a higher priority. 642 */ 643 EventFunctionWrapper writebackTempBlockAtomicEvent; 644 645 /** 646 * Perform any necessary updates to the block and perform any data 647 * exchange between the packet and the block. The flags of the 648 * packet are also set accordingly. 649 * 650 * @param pkt Request packet from upstream that hit a block 651 * @param blk Cache block that the packet hit 652 * @param deferred_response Whether this request originally missed 653 * @param pending_downgrade Whether the writable flag is to be removed 654 */ 655 virtual void satisfyRequest(PacketPtr pkt, CacheBlk *blk, 656 bool deferred_response = false, 657 bool pending_downgrade = false); 658 659 /** 660 * Maintain the clusivity of this cache by potentially 661 * invalidating a block. This method works in conjunction with 662 * satisfyRequest, but is separate to allow us to handle all MSHR 663 * targets before potentially dropping a block. 664 * 665 * @param from_cache Whether we have dealt with a packet from a cache 666 * @param blk The block that should potentially be dropped 667 */ 668 void maintainClusivity(bool from_cache, CacheBlk *blk); 669 670 /** 671 * Handle a fill operation caused by a received packet. 672 * 673 * Populates a cache block and handles all outstanding requests for the 674 * satisfied fill request. This version takes two memory requests. One 675 * contains the fill data, the other is an optional target to satisfy. 676 * Note that the reason we return a list of writebacks rather than 677 * inserting them directly in the write buffer is that this function 678 * is called by both atomic and timing-mode accesses, and in atomic 679 * mode we don't mess with the write buffer (we just perform the 680 * writebacks atomically once the original request is complete). 681 * 682 * @param pkt The memory request with the fill data. 683 * @param blk The cache block if it already exists. 684 * @param writebacks List for any writebacks that need to be performed. 685 * @param allocate Whether to allocate a block or use the temp block 686 * @return Pointer to the new cache block. 687 */ 688 CacheBlk *handleFill(PacketPtr pkt, CacheBlk *blk, 689 PacketList &writebacks, bool allocate); 690 691 /** 692 * Allocate a new block and perform any necessary writebacks 693 * 694 * Find a victim block and if necessary prepare writebacks for any 695 * existing data. May return nullptr if there are no replaceable 696 * blocks. If a replaceable block is found, it inserts the new block in 697 * its place. The new block, however, is not set as valid yet. 698 * 699 * @param pkt Packet holding the address to update 700 * @param writebacks A list of writeback packets for the evicted blocks 701 * @return the allocated block 702 */ 703 CacheBlk *allocateBlock(const PacketPtr pkt, PacketList &writebacks); 704 /** 705 * Evict a cache block. 706 * 707 * Performs a writeback if necesssary and invalidates the block 708 * 709 * @param blk Block to invalidate 710 * @return A packet with the writeback, can be nullptr 711 */ 712 M5_NODISCARD virtual PacketPtr evictBlock(CacheBlk *blk) = 0; 713 714 /** 715 * Evict a cache block. 716 * 717 * Performs a writeback if necesssary and invalidates the block 718 * 719 * @param blk Block to invalidate 720 * @param writebacks Return a list of packets with writebacks 721 */ 722 void evictBlock(CacheBlk *blk, PacketList &writebacks); 723 724 /** 725 * Invalidate a cache block. 726 * 727 * @param blk Block to invalidate 728 */ 729 void invalidateBlock(CacheBlk *blk); 730 731 /** 732 * Create a writeback request for the given block. 733 * 734 * @param blk The block to writeback. 735 * @return The writeback request for the block. 736 */ 737 PacketPtr writebackBlk(CacheBlk *blk); 738 739 /** 740 * Create a writeclean request for the given block. 741 * 742 * Creates a request that writes the block to the cache below 743 * without evicting the block from the current cache. 744 * 745 * @param blk The block to write clean. 746 * @param dest The destination of the write clean operation. 747 * @param id Use the given packet id for the write clean operation. 748 * @return The generated write clean packet. 749 */ 750 PacketPtr writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id); 751 752 /** 753 * Write back dirty blocks in the cache using functional accesses. 754 */ 755 virtual void memWriteback() override; 756 757 /** 758 * Invalidates all blocks in the cache. 759 * 760 * @warn Dirty cache lines will not be written back to 761 * memory. Make sure to call functionalWriteback() first if you 762 * want the to write them to memory. 763 */ 764 virtual void memInvalidate() override; 765 766 /** 767 * Determine if there are any dirty blocks in the cache. 768 * 769 * @return true if at least one block is dirty, false otherwise. 770 */ 771 bool isDirty() const; 772 773 /** 774 * Determine if an address is in the ranges covered by this 775 * cache. This is useful to filter snoops. 776 * 777 * @param addr Address to check against 778 * 779 * @return If the address in question is in range 780 */ 781 bool inRange(Addr addr) const; 782 783 /** 784 * Find next request ready time from among possible sources. 785 */ 786 Tick nextQueueReadyTime() const; 787 788 /** Block size of this cache */ 789 const unsigned blkSize; 790 791 /** 792 * The latency of tag lookup of a cache. It occurs when there is 793 * an access to the cache. 794 */ 795 const Cycles lookupLatency; 796 797 /** 798 * The latency of data access of a cache. It occurs when there is 799 * an access to the cache. 800 */ 801 const Cycles dataLatency; 802 803 /** 804 * This is the forward latency of the cache. It occurs when there 805 * is a cache miss and a request is forwarded downstream, in 806 * particular an outbound miss. 807 */ 808 const Cycles forwardLatency; 809 810 /** The latency to fill a cache block */ 811 const Cycles fillLatency; 812 813 /** 814 * The latency of sending reponse to its upper level cache/core on 815 * a linefill. The responseLatency parameter captures this 816 * latency. 817 */ 818 const Cycles responseLatency; 819 820 /** 821 * Whether tags and data are accessed sequentially. 822 */ 823 const bool sequentialAccess; 824 825 /** The number of targets for each MSHR. */ 826 const int numTarget; 827 828 /** Do we forward snoops from mem side port through to cpu side port? */ 829 bool forwardSnoops; 830 831 /** 832 * Clusivity with respect to the upstream cache, determining if we 833 * fill into both this cache and the cache above on a miss. Note 834 * that we currently do not support strict clusivity policies. 835 */ 836 const Enums::Clusivity clusivity; 837 838 /** 839 * Is this cache read only, for example the instruction cache, or 840 * table-walker cache. A cache that is read only should never see 841 * any writes, and should never get any dirty data (and hence 842 * never have to do any writebacks). 843 */ 844 const bool isReadOnly; 845 846 /** 847 * Bit vector of the blocking reasons for the access path. 848 * @sa #BlockedCause 849 */ 850 uint8_t blocked; 851 852 /** Increasing order number assigned to each incoming request. */ 853 uint64_t order; 854 855 /** Stores time the cache blocked for statistics. */ 856 Cycles blockedCycle; 857 858 /** Pointer to the MSHR that has no targets. */ 859 MSHR *noTargetMSHR; 860 861 /** The number of misses to trigger an exit event. */ 862 Counter missCount; 863 864 /** 865 * The address range to which the cache responds on the CPU side. 866 * Normally this is all possible memory addresses. */ 867 const AddrRangeList addrRanges; 868 869 public: 870 /** System we are currently operating in. */ 871 System *system; 872 873 // Statistics 874 /** 875 * @addtogroup CacheStatistics 876 * @{ 877 */ 878 879 /** Number of hits per thread for each type of command. 880 @sa Packet::Command */ 881 Stats::Vector hits[MemCmd::NUM_MEM_CMDS]; 882 /** Number of hits for demand accesses. */ 883 Stats::Formula demandHits; 884 /** Number of hit for all accesses. */ 885 Stats::Formula overallHits; 886 887 /** Number of misses per thread for each type of command. 888 @sa Packet::Command */ 889 Stats::Vector misses[MemCmd::NUM_MEM_CMDS]; 890 /** Number of misses for demand accesses. */ 891 Stats::Formula demandMisses; 892 /** Number of misses for all accesses. */ 893 Stats::Formula overallMisses; 894 895 /** 896 * Total number of cycles per thread/command spent waiting for a miss. 897 * Used to calculate the average miss latency. 898 */ 899 Stats::Vector missLatency[MemCmd::NUM_MEM_CMDS]; 900 /** Total number of cycles spent waiting for demand misses. */ 901 Stats::Formula demandMissLatency; 902 /** Total number of cycles spent waiting for all misses. */ 903 Stats::Formula overallMissLatency; 904 905 /** The number of accesses per command and thread. */ 906 Stats::Formula accesses[MemCmd::NUM_MEM_CMDS]; 907 /** The number of demand accesses. */ 908 Stats::Formula demandAccesses; 909 /** The number of overall accesses. */ 910 Stats::Formula overallAccesses; 911 912 /** The miss rate per command and thread. */ 913 Stats::Formula missRate[MemCmd::NUM_MEM_CMDS]; 914 /** The miss rate of all demand accesses. */ 915 Stats::Formula demandMissRate; 916 /** The miss rate for all accesses. */ 917 Stats::Formula overallMissRate; 918 919 /** The average miss latency per command and thread. */ 920 Stats::Formula avgMissLatency[MemCmd::NUM_MEM_CMDS]; 921 /** The average miss latency for demand misses. */ 922 Stats::Formula demandAvgMissLatency; 923 /** The average miss latency for all misses. */ 924 Stats::Formula overallAvgMissLatency; 925 926 /** The total number of cycles blocked for each blocked cause. */ 927 Stats::Vector blocked_cycles; 928 /** The number of times this cache blocked for each blocked cause. */ 929 Stats::Vector blocked_causes; 930 931 /** The average number of cycles blocked for each blocked cause. */ 932 Stats::Formula avg_blocked; 933 934 /** The number of times a HW-prefetched block is evicted w/o reference. */ 935 Stats::Scalar unusedPrefetches; 936 937 /** Number of blocks written back per thread. */ 938 Stats::Vector writebacks; 939 940 /** Number of misses that hit in the MSHRs per command and thread. */ 941 Stats::Vector mshr_hits[MemCmd::NUM_MEM_CMDS]; 942 /** Demand misses that hit in the MSHRs. */ 943 Stats::Formula demandMshrHits; 944 /** Total number of misses that hit in the MSHRs. */ 945 Stats::Formula overallMshrHits; 946 947 /** Number of misses that miss in the MSHRs, per command and thread. */ 948 Stats::Vector mshr_misses[MemCmd::NUM_MEM_CMDS]; 949 /** Demand misses that miss in the MSHRs. */ 950 Stats::Formula demandMshrMisses; 951 /** Total number of misses that miss in the MSHRs. */ 952 Stats::Formula overallMshrMisses; 953 954 /** Number of misses that miss in the MSHRs, per command and thread. */ 955 Stats::Vector mshr_uncacheable[MemCmd::NUM_MEM_CMDS]; 956 /** Total number of misses that miss in the MSHRs. */ 957 Stats::Formula overallMshrUncacheable; 958 959 /** Total cycle latency of each MSHR miss, per command and thread. */ 960 Stats::Vector mshr_miss_latency[MemCmd::NUM_MEM_CMDS]; 961 /** Total cycle latency of demand MSHR misses. */ 962 Stats::Formula demandMshrMissLatency; 963 /** Total cycle latency of overall MSHR misses. */ 964 Stats::Formula overallMshrMissLatency; 965 966 /** Total cycle latency of each MSHR miss, per command and thread. */ 967 Stats::Vector mshr_uncacheable_lat[MemCmd::NUM_MEM_CMDS]; 968 /** Total cycle latency of overall MSHR misses. */ 969 Stats::Formula overallMshrUncacheableLatency; 970 971#if 0 972 /** The total number of MSHR accesses per command and thread. */ 973 Stats::Formula mshrAccesses[MemCmd::NUM_MEM_CMDS]; 974 /** The total number of demand MSHR accesses. */ 975 Stats::Formula demandMshrAccesses; 976 /** The total number of MSHR accesses. */ 977 Stats::Formula overallMshrAccesses; 978#endif 979 980 /** The miss rate in the MSHRs pre command and thread. */ 981 Stats::Formula mshrMissRate[MemCmd::NUM_MEM_CMDS]; 982 /** The demand miss rate in the MSHRs. */ 983 Stats::Formula demandMshrMissRate; 984 /** The overall miss rate in the MSHRs. */ 985 Stats::Formula overallMshrMissRate; 986 987 /** The average latency of an MSHR miss, per command and thread. */ 988 Stats::Formula avgMshrMissLatency[MemCmd::NUM_MEM_CMDS]; 989 /** The average latency of a demand MSHR miss. */ 990 Stats::Formula demandAvgMshrMissLatency; 991 /** The average overall latency of an MSHR miss. */ 992 Stats::Formula overallAvgMshrMissLatency; 993 994 /** The average latency of an MSHR miss, per command and thread. */ 995 Stats::Formula avgMshrUncacheableLatency[MemCmd::NUM_MEM_CMDS]; 996 /** The average overall latency of an MSHR miss. */ 997 Stats::Formula overallAvgMshrUncacheableLatency; 998 999 /** Number of replacements of valid blocks. */ 1000 Stats::Scalar replacements; 1001 1002 /** 1003 * @} 1004 */ 1005 1006 /** 1007 * Register stats for this object. 1008 */ 1009 void regStats() override; 1010 1011 /** Registers probes. */ 1012 void regProbePoints() override; 1013 1014 public: 1015 BaseCache(const BaseCacheParams *p, unsigned blk_size); 1016 ~BaseCache(); 1017 1018 void init() override; 1019 1020 BaseMasterPort &getMasterPort(const std::string &if_name, 1021 PortID idx = InvalidPortID) override; 1022 BaseSlavePort &getSlavePort(const std::string &if_name, 1023 PortID idx = InvalidPortID) override; 1024 1025 /** 1026 * Query block size of a cache. 1027 * @return The block size 1028 */ 1029 unsigned 1030 getBlockSize() const 1031 { 1032 return blkSize; 1033 } 1034 1035 const AddrRangeList &getAddrRanges() const { return addrRanges; } 1036 1037 MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool sched_send = true) 1038 { 1039 MSHR *mshr = mshrQueue.allocate(pkt->getBlockAddr(blkSize), blkSize, 1040 pkt, time, order++, 1041 allocOnFill(pkt->cmd)); 1042 1043 if (mshrQueue.isFull()) { 1044 setBlocked((BlockedCause)MSHRQueue_MSHRs); 1045 } 1046 1047 if (sched_send) { 1048 // schedule the send 1049 schedMemSideSendEvent(time); 1050 } 1051 1052 return mshr; 1053 } 1054 1055 void allocateWriteBuffer(PacketPtr pkt, Tick time) 1056 { 1057 // should only see writes or clean evicts here 1058 assert(pkt->isWrite() || pkt->cmd == MemCmd::CleanEvict); 1059 1060 Addr blk_addr = pkt->getBlockAddr(blkSize); 1061 1062 WriteQueueEntry *wq_entry = 1063 writeBuffer.findMatch(blk_addr, pkt->isSecure()); 1064 if (wq_entry && !wq_entry->inService) { 1065 DPRINTF(Cache, "Potential to merge writeback %s", pkt->print()); 1066 } 1067 1068 writeBuffer.allocate(blk_addr, blkSize, pkt, time, order++); 1069 1070 if (writeBuffer.isFull()) { 1071 setBlocked((BlockedCause)MSHRQueue_WriteBuffer); 1072 } 1073 1074 // schedule the send 1075 schedMemSideSendEvent(time); 1076 } 1077 1078 /** 1079 * Returns true if the cache is blocked for accesses. 1080 */ 1081 bool isBlocked() const 1082 { 1083 return blocked != 0; 1084 } 1085 1086 /** 1087 * Marks the access path of the cache as blocked for the given cause. This 1088 * also sets the blocked flag in the slave interface. 1089 * @param cause The reason for the cache blocking. 1090 */ 1091 void setBlocked(BlockedCause cause) 1092 { 1093 uint8_t flag = 1 << cause; 1094 if (blocked == 0) { 1095 blocked_causes[cause]++; 1096 blockedCycle = curCycle(); 1097 cpuSidePort.setBlocked(); 1098 } 1099 blocked |= flag; 1100 DPRINTF(Cache,"Blocking for cause %d, mask=%d\n", cause, blocked); 1101 } 1102 1103 /** 1104 * Marks the cache as unblocked for the given cause. This also clears the 1105 * blocked flags in the appropriate interfaces. 1106 * @param cause The newly unblocked cause. 1107 * @warning Calling this function can cause a blocked request on the bus to 1108 * access the cache. The cache must be in a state to handle that request. 1109 */ 1110 void clearBlocked(BlockedCause cause) 1111 { 1112 uint8_t flag = 1 << cause; 1113 blocked &= ~flag; 1114 DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked); 1115 if (blocked == 0) { 1116 blocked_cycles[cause] += curCycle() - blockedCycle; 1117 cpuSidePort.clearBlocked(); 1118 } 1119 } 1120 1121 /** 1122 * Schedule a send event for the memory-side port. If already 1123 * scheduled, this may reschedule the event at an earlier 1124 * time. When the specified time is reached, the port is free to 1125 * send either a response, a request, or a prefetch request. 1126 * 1127 * @param time The time when to attempt sending a packet. 1128 */ 1129 void schedMemSideSendEvent(Tick time) 1130 { 1131 memSidePort.schedSendEvent(time); 1132 } 1133 1134 bool inCache(Addr addr, bool is_secure) const { 1135 return tags->findBlock(addr, is_secure); 1136 } 1137 1138 bool hasBeenPrefetched(Addr addr, bool is_secure) const { 1139 CacheBlk *block = tags->findBlock(addr, is_secure); 1140 if (block) { 1141 return block->wasPrefetched(); 1142 } else { 1143 return false; 1144 } 1145 } 1146 1147 bool inMissQueue(Addr addr, bool is_secure) const { 1148 return mshrQueue.findMatch(addr, is_secure); 1149 } 1150 1151 void incMissCount(PacketPtr pkt) 1152 { 1153 assert(pkt->req->masterId() < system->maxMasters()); 1154 misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 1155 pkt->req->incAccessDepth(); 1156 if (missCount) { 1157 --missCount; 1158 if (missCount == 0) 1159 exitSimLoop("A cache reached the maximum miss count"); 1160 } 1161 } 1162 void incHitCount(PacketPtr pkt) 1163 { 1164 assert(pkt->req->masterId() < system->maxMasters()); 1165 hits[pkt->cmdToIndex()][pkt->req->masterId()]++; 1166 1167 } 1168 1169 /** 1170 * Checks if the cache is coalescing writes 1171 * 1172 * @return True if the cache is coalescing writes 1173 */ 1174 bool coalesce() const; 1175 1176 1177 /** 1178 * Cache block visitor that writes back dirty cache blocks using 1179 * functional writes. 1180 */ 1181 void writebackVisitor(CacheBlk &blk); 1182 1183 /** 1184 * Cache block visitor that invalidates all blocks in the cache. 1185 * 1186 * @warn Dirty cache lines will not be written back to memory. 1187 */ 1188 void invalidateVisitor(CacheBlk &blk); 1189 1190 /** 1191 * Take an MSHR, turn it into a suitable downstream packet, and 1192 * send it out. This construct allows a queue entry to choose a suitable 1193 * approach based on its type. 1194 * 1195 * @param mshr The MSHR to turn into a packet and send 1196 * @return True if the port is waiting for a retry 1197 */ 1198 virtual bool sendMSHRQueuePacket(MSHR* mshr); 1199 1200 /** 1201 * Similar to sendMSHR, but for a write-queue entry 1202 * instead. Create the packet, and send it, and if successful also 1203 * mark the entry in service. 1204 * 1205 * @param wq_entry The write-queue entry to turn into a packet and send 1206 * @return True if the port is waiting for a retry 1207 */ 1208 bool sendWriteQueuePacket(WriteQueueEntry* wq_entry); 1209 1210 /** 1211 * Serialize the state of the caches 1212 * 1213 * We currently don't support checkpointing cache state, so this panics. 1214 */ 1215 void serialize(CheckpointOut &cp) const override; 1216 void unserialize(CheckpointIn &cp) override; 1217}; 1218 1219/** 1220 * The write allocator inspects write packets and detects streaming 1221 * patterns. The write allocator supports a single stream where writes 1222 * are expected to access consecutive locations and keeps track of 1223 * size of the area covered by the concecutive writes in byteCount. 1224 * 1225 * 1) When byteCount has surpassed the coallesceLimit the mode 1226 * switches from ALLOCATE to COALESCE where writes should be delayed 1227 * until the whole block is written at which point a single packet 1228 * (whole line write) can service them. 1229 * 1230 * 2) When byteCount has also exceeded the noAllocateLimit (whole 1231 * line) we switch to NO_ALLOCATE when writes should not allocate in 1232 * the cache but rather send a whole line write to the memory below. 1233 */ 1234class WriteAllocator : public SimObject { 1235 public: 1236 WriteAllocator(const WriteAllocatorParams *p) : 1237 SimObject(p), 1238 coalesceLimit(p->coalesce_limit * p->block_size), 1239 noAllocateLimit(p->no_allocate_limit * p->block_size), 1240 delayThreshold(p->delay_threshold) 1241 { 1242 reset(); 1243 } 1244 1245 /** 1246 * Should writes be coalesced? This is true if the mode is set to 1247 * NO_ALLOCATE. 1248 * 1249 * @return return true if the cache should coalesce writes. 1250 */ 1251 bool coalesce() const { 1252 return mode != WriteMode::ALLOCATE; 1253 } 1254 1255 /** 1256 * Should writes allocate? 1257 * 1258 * @return return true if the cache should not allocate for writes. 1259 */ 1260 bool allocate() const { 1261 return mode != WriteMode::NO_ALLOCATE; 1262 } 1263 1264 /** 1265 * Reset the write allocator state, meaning that it allocates for 1266 * writes and has not recorded any information about qualifying 1267 * writes that might trigger a switch to coalescing and later no 1268 * allocation. 1269 */ 1270 void reset() { 1271 mode = WriteMode::ALLOCATE; 1272 byteCount = 0; 1273 nextAddr = 0; 1274 } 1275 1276 /** 1277 * Access whether we need to delay the current write. 1278 * 1279 * @param blk_addr The block address the packet writes to 1280 * @return true if the current packet should be delayed 1281 */ 1282 bool delay(Addr blk_addr) { 1283 if (delayCtr[blk_addr] > 0) { 1284 --delayCtr[blk_addr]; 1285 return true; 1286 } else { 1287 return false; 1288 } 1289 } 1290 1291 /** 1292 * Clear delay counter for the input block 1293 * 1294 * @param blk_addr The accessed cache block 1295 */ 1296 void resetDelay(Addr blk_addr) { 1297 delayCtr.erase(blk_addr); 1298 } 1299 1300 /** 1301 * Update the write mode based on the current write 1302 * packet. This method compares the packet's address with any 1303 * current stream, and updates the tracking and the mode 1304 * accordingly. 1305 * 1306 * @param write_addr Start address of the write request 1307 * @param write_size Size of the write request 1308 * @param blk_addr The block address that this packet writes to 1309 */ 1310 void updateMode(Addr write_addr, unsigned write_size, Addr blk_addr); 1311 1312 private: 1313 /** 1314 * The current mode for write coalescing and allocation, either 1315 * normal operation (ALLOCATE), write coalescing (COALESCE), or 1316 * write coalescing without allocation (NO_ALLOCATE). 1317 */ 1318 enum class WriteMode : char { 1319 ALLOCATE, 1320 COALESCE, 1321 NO_ALLOCATE, 1322 }; 1323 WriteMode mode; 1324 1325 /** Address to match writes against to detect streams. */ 1326 Addr nextAddr; 1327 1328 /** 1329 * Bytes written contiguously. Saturating once we no longer 1330 * allocate. 1331 */ 1332 uint32_t byteCount; 1333 1334 /** 1335 * Limits for when to switch between the different write modes. 1336 */ 1337 const uint32_t coalesceLimit; 1338 const uint32_t noAllocateLimit; 1339 /** 1340 * The number of times the allocator will delay an WriteReq MSHR. 1341 */ 1342 const uint32_t delayThreshold; 1343 1344 /** 1345 * Keep track of the number of times the allocator has delayed an 1346 * WriteReq MSHR. 1347 */ 1348 std::unordered_map<Addr, Counter> delayCtr; 1349}; 1350 1351#endif //__MEM_CACHE_BASE_HH__ 1352