base.hh revision 13746:723109f11d56
1/* 2 * Copyright (c) 2012-2013, 2015-2016, 2018 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2003-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Erik Hallnor 41 * Steve Reinhardt 42 * Ron Dreslinski 43 * Andreas Hansson 44 * Nikos Nikoleris 45 */ 46 47/** 48 * @file 49 * Declares a basic cache interface BaseCache. 50 */ 51 52#ifndef __MEM_CACHE_BASE_HH__ 53#define __MEM_CACHE_BASE_HH__ 54 55#include <cassert> 56#include <cstdint> 57#include <string> 58 59#include "base/addr_range.hh" 60#include "base/statistics.hh" 61#include "base/trace.hh" 62#include "base/types.hh" 63#include "debug/Cache.hh" 64#include "debug/CachePort.hh" 65#include "enums/Clusivity.hh" 66#include "mem/cache/cache_blk.hh" 67#include "mem/cache/mshr_queue.hh" 68#include "mem/cache/tags/base.hh" 69#include "mem/cache/write_queue.hh" 70#include "mem/cache/write_queue_entry.hh" 71#include "mem/mem_object.hh" 72#include "mem/packet.hh" 73#include "mem/packet_queue.hh" 74#include "mem/qport.hh" 75#include "mem/request.hh" 76#include "params/WriteAllocator.hh" 77#include "sim/eventq.hh" 78#include "sim/probe/probe.hh" 79#include "sim/serialize.hh" 80#include "sim/sim_exit.hh" 81#include "sim/system.hh" 82 83class BaseMasterPort; 84class BasePrefetcher; 85class BaseSlavePort; 86class MSHR; 87class MasterPort; 88class QueueEntry; 89struct BaseCacheParams; 90 91/** 92 * A basic cache interface. Implements some common functions for speed. 93 */ 94class BaseCache : public MemObject 95{ 96 protected: 97 /** 98 * Indexes to enumerate the MSHR queues. 99 */ 100 enum MSHRQueueIndex { 101 MSHRQueue_MSHRs, 102 MSHRQueue_WriteBuffer 103 }; 104 105 public: 106 /** 107 * Reasons for caches to be blocked. 108 */ 109 enum BlockedCause { 110 Blocked_NoMSHRs = MSHRQueue_MSHRs, 111 Blocked_NoWBBuffers = MSHRQueue_WriteBuffer, 112 Blocked_NoTargets, 113 NUM_BLOCKED_CAUSES 114 }; 115 116 protected: 117 118 /** 119 * A cache master port is used for the memory-side port of the 120 * cache, and in addition to the basic timing port that only sends 121 * response packets through a transmit list, it also offers the 122 * ability to schedule and send request packets (requests & 123 * writebacks). The send event is scheduled through schedSendEvent, 124 * and the sendDeferredPacket of the timing port is modified to 125 * consider both the transmit list and the requests from the MSHR. 126 */ 127 class CacheMasterPort : public QueuedMasterPort 128 { 129 130 public: 131 132 /** 133 * Schedule a send of a request packet (from the MSHR). Note 134 * that we could already have a retry outstanding. 135 */ 136 void schedSendEvent(Tick time) 137 { 138 DPRINTF(CachePort, "Scheduling send event at %llu\n", time); 139 reqQueue.schedSendEvent(time); 140 } 141 142 protected: 143 144 CacheMasterPort(const std::string &_name, BaseCache *_cache, 145 ReqPacketQueue &_reqQueue, 146 SnoopRespPacketQueue &_snoopRespQueue) : 147 QueuedMasterPort(_name, _cache, _reqQueue, _snoopRespQueue) 148 { } 149 150 /** 151 * Memory-side port always snoops. 152 * 153 * @return always true 154 */ 155 virtual bool isSnooping() const { return true; } 156 }; 157 158 /** 159 * Override the default behaviour of sendDeferredPacket to enable 160 * the memory-side cache port to also send requests based on the 161 * current MSHR status. This queue has a pointer to our specific 162 * cache implementation and is used by the MemSidePort. 163 */ 164 class CacheReqPacketQueue : public ReqPacketQueue 165 { 166 167 protected: 168 169 BaseCache &cache; 170 SnoopRespPacketQueue &snoopRespQueue; 171 172 public: 173 174 CacheReqPacketQueue(BaseCache &cache, MasterPort &port, 175 SnoopRespPacketQueue &snoop_resp_queue, 176 const std::string &label) : 177 ReqPacketQueue(cache, port, label), cache(cache), 178 snoopRespQueue(snoop_resp_queue) { } 179 180 /** 181 * Override the normal sendDeferredPacket and do not only 182 * consider the transmit list (used for responses), but also 183 * requests. 184 */ 185 virtual void sendDeferredPacket(); 186 187 /** 188 * Check if there is a conflicting snoop response about to be 189 * send out, and if so simply stall any requests, and schedule 190 * a send event at the same time as the next snoop response is 191 * being sent out. 192 */ 193 bool checkConflictingSnoop(Addr addr) 194 { 195 if (snoopRespQueue.hasAddr(addr)) { 196 DPRINTF(CachePort, "Waiting for snoop response to be " 197 "sent\n"); 198 Tick when = snoopRespQueue.deferredPacketReadyTime(); 199 schedSendEvent(when); 200 return true; 201 } 202 return false; 203 } 204 }; 205 206 207 /** 208 * The memory-side port extends the base cache master port with 209 * access functions for functional, atomic and timing snoops. 210 */ 211 class MemSidePort : public CacheMasterPort 212 { 213 private: 214 215 /** The cache-specific queue. */ 216 CacheReqPacketQueue _reqQueue; 217 218 SnoopRespPacketQueue _snoopRespQueue; 219 220 // a pointer to our specific cache implementation 221 BaseCache *cache; 222 223 protected: 224 225 virtual void recvTimingSnoopReq(PacketPtr pkt); 226 227 virtual bool recvTimingResp(PacketPtr pkt); 228 229 virtual Tick recvAtomicSnoop(PacketPtr pkt); 230 231 virtual void recvFunctionalSnoop(PacketPtr pkt); 232 233 public: 234 235 MemSidePort(const std::string &_name, BaseCache *_cache, 236 const std::string &_label); 237 }; 238 239 /** 240 * A cache slave port is used for the CPU-side port of the cache, 241 * and it is basically a simple timing port that uses a transmit 242 * list for responses to the CPU (or connected master). In 243 * addition, it has the functionality to block the port for 244 * incoming requests. If blocked, the port will issue a retry once 245 * unblocked. 246 */ 247 class CacheSlavePort : public QueuedSlavePort 248 { 249 250 public: 251 252 /** Do not accept any new requests. */ 253 void setBlocked(); 254 255 /** Return to normal operation and accept new requests. */ 256 void clearBlocked(); 257 258 bool isBlocked() const { return blocked; } 259 260 protected: 261 262 CacheSlavePort(const std::string &_name, BaseCache *_cache, 263 const std::string &_label); 264 265 /** A normal packet queue used to store responses. */ 266 RespPacketQueue queue; 267 268 bool blocked; 269 270 bool mustSendRetry; 271 272 private: 273 274 void processSendRetry(); 275 276 EventFunctionWrapper sendRetryEvent; 277 278 }; 279 280 /** 281 * The CPU-side port extends the base cache slave port with access 282 * functions for functional, atomic and timing requests. 283 */ 284 class CpuSidePort : public CacheSlavePort 285 { 286 private: 287 288 // a pointer to our specific cache implementation 289 BaseCache *cache; 290 291 protected: 292 virtual bool recvTimingSnoopResp(PacketPtr pkt) override; 293 294 virtual bool tryTiming(PacketPtr pkt) override; 295 296 virtual bool recvTimingReq(PacketPtr pkt) override; 297 298 virtual Tick recvAtomic(PacketPtr pkt) override; 299 300 virtual void recvFunctional(PacketPtr pkt) override; 301 302 virtual AddrRangeList getAddrRanges() const override; 303 304 public: 305 306 CpuSidePort(const std::string &_name, BaseCache *_cache, 307 const std::string &_label); 308 309 }; 310 311 CpuSidePort cpuSidePort; 312 MemSidePort memSidePort; 313 314 protected: 315 316 /** Miss status registers */ 317 MSHRQueue mshrQueue; 318 319 /** Write/writeback buffer */ 320 WriteQueue writeBuffer; 321 322 /** Tag and data Storage */ 323 BaseTags *tags; 324 325 /** Prefetcher */ 326 BasePrefetcher *prefetcher; 327 328 /** To probe when a cache hit occurs */ 329 ProbePointArg<PacketPtr> *ppHit; 330 331 /** To probe when a cache miss occurs */ 332 ProbePointArg<PacketPtr> *ppMiss; 333 334 /** To probe when a cache fill occurs */ 335 ProbePointArg<PacketPtr> *ppFill; 336 337 /** 338 * The writeAllocator drive optimizations for streaming writes. 339 * It first determines whether a WriteReq MSHR should be delayed, 340 * thus ensuring that we wait longer in cases when we are write 341 * coalescing and allowing all the bytes of the line to be written 342 * before the MSHR packet is sent downstream. This works in unison 343 * with the tracking in the MSHR to check if the entire line is 344 * written. The write mode also affects the behaviour on filling 345 * any whole-line writes. Normally the cache allocates the line 346 * when receiving the InvalidateResp, but after seeing enough 347 * consecutive lines we switch to using the tempBlock, and thus 348 * end up not allocating the line, and instead turning the 349 * whole-line write into a writeback straight away. 350 */ 351 WriteAllocator * const writeAllocator; 352 353 /** 354 * Temporary cache block for occasional transitory use. We use 355 * the tempBlock to fill when allocation fails (e.g., when there 356 * is an outstanding request that accesses the victim block) or 357 * when we want to avoid allocation (e.g., exclusive caches) 358 */ 359 TempCacheBlk *tempBlock; 360 361 /** 362 * Upstream caches need this packet until true is returned, so 363 * hold it for deletion until a subsequent call 364 */ 365 std::unique_ptr<Packet> pendingDelete; 366 367 /** 368 * Mark a request as in service (sent downstream in the memory 369 * system), effectively making this MSHR the ordering point. 370 */ 371 void markInService(MSHR *mshr, bool pending_modified_resp) 372 { 373 bool wasFull = mshrQueue.isFull(); 374 mshrQueue.markInService(mshr, pending_modified_resp); 375 376 if (wasFull && !mshrQueue.isFull()) { 377 clearBlocked(Blocked_NoMSHRs); 378 } 379 } 380 381 void markInService(WriteQueueEntry *entry) 382 { 383 bool wasFull = writeBuffer.isFull(); 384 writeBuffer.markInService(entry); 385 386 if (wasFull && !writeBuffer.isFull()) { 387 clearBlocked(Blocked_NoWBBuffers); 388 } 389 } 390 391 /** 392 * Determine whether we should allocate on a fill or not. If this 393 * cache is mostly inclusive with regards to the upstream cache(s) 394 * we always allocate (for any non-forwarded and cacheable 395 * requests). In the case of a mostly exclusive cache, we allocate 396 * on fill if the packet did not come from a cache, thus if we: 397 * are dealing with a whole-line write (the latter behaves much 398 * like a writeback), the original target packet came from a 399 * non-caching source, or if we are performing a prefetch or LLSC. 400 * 401 * @param cmd Command of the incoming requesting packet 402 * @return Whether we should allocate on the fill 403 */ 404 inline bool allocOnFill(MemCmd cmd) const 405 { 406 return clusivity == Enums::mostly_incl || 407 cmd == MemCmd::WriteLineReq || 408 cmd == MemCmd::ReadReq || 409 cmd == MemCmd::WriteReq || 410 cmd.isPrefetch() || 411 cmd.isLLSC(); 412 } 413 414 /** 415 * Regenerate block address using tags. 416 * Block address regeneration depends on whether we're using a temporary 417 * block or not. 418 * 419 * @param blk The block to regenerate address. 420 * @return The block's address. 421 */ 422 Addr regenerateBlkAddr(CacheBlk* blk); 423 424 /** 425 * Calculate access latency in ticks given a tag lookup latency, and 426 * whether access was a hit or miss. 427 * 428 * @param blk The cache block that was accessed. 429 * @param delay The delay until the packet's metadata is present. 430 * @param lookup_lat Latency of the respective tag lookup. 431 * @return The number of ticks that pass due to a block access. 432 */ 433 Cycles calculateAccessLatency(const CacheBlk* blk, const uint32_t delay, 434 const Cycles lookup_lat) const; 435 436 /** 437 * Does all the processing necessary to perform the provided request. 438 * @param pkt The memory request to perform. 439 * @param blk The cache block to be updated. 440 * @param lat The latency of the access. 441 * @param writebacks List for any writebacks that need to be performed. 442 * @return Boolean indicating whether the request was satisfied. 443 */ 444 virtual bool access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, 445 PacketList &writebacks); 446 447 /* 448 * Handle a timing request that hit in the cache 449 * 450 * @param ptk The request packet 451 * @param blk The referenced block 452 * @param request_time The tick at which the block lookup is compete 453 */ 454 virtual void handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, 455 Tick request_time); 456 457 /* 458 * Handle a timing request that missed in the cache 459 * 460 * Implementation specific handling for different cache 461 * implementations 462 * 463 * @param ptk The request packet 464 * @param blk The referenced block 465 * @param forward_time The tick at which we can process dependent requests 466 * @param request_time The tick at which the block lookup is compete 467 */ 468 virtual void handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk, 469 Tick forward_time, 470 Tick request_time) = 0; 471 472 /* 473 * Handle a timing request that missed in the cache 474 * 475 * Common functionality across different cache implementations 476 * 477 * @param ptk The request packet 478 * @param blk The referenced block 479 * @param mshr Any existing mshr for the referenced cache block 480 * @param forward_time The tick at which we can process dependent requests 481 * @param request_time The tick at which the block lookup is compete 482 */ 483 void handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk, 484 Tick forward_time, Tick request_time); 485 486 /** 487 * Performs the access specified by the request. 488 * @param pkt The request to perform. 489 */ 490 virtual void recvTimingReq(PacketPtr pkt); 491 492 /** 493 * Handling the special case of uncacheable write responses to 494 * make recvTimingResp less cluttered. 495 */ 496 void handleUncacheableWriteResp(PacketPtr pkt); 497 498 /** 499 * Service non-deferred MSHR targets using the received response 500 * 501 * Iterates through the list of targets that can be serviced with 502 * the current response. 503 * 504 * @param mshr The MSHR that corresponds to the reponse 505 * @param pkt The response packet 506 * @param blk The reference block 507 */ 508 virtual void serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, 509 CacheBlk *blk) = 0; 510 511 /** 512 * Handles a response (cache line fill/write ack) from the bus. 513 * @param pkt The response packet 514 */ 515 virtual void recvTimingResp(PacketPtr pkt); 516 517 /** 518 * Snoops bus transactions to maintain coherence. 519 * @param pkt The current bus transaction. 520 */ 521 virtual void recvTimingSnoopReq(PacketPtr pkt) = 0; 522 523 /** 524 * Handle a snoop response. 525 * @param pkt Snoop response packet 526 */ 527 virtual void recvTimingSnoopResp(PacketPtr pkt) = 0; 528 529 /** 530 * Handle a request in atomic mode that missed in this cache 531 * 532 * Creates a downstream request, sends it to the memory below and 533 * handles the response. As we are in atomic mode all operations 534 * are performed immediately. 535 * 536 * @param pkt The packet with the requests 537 * @param blk The referenced block 538 * @param writebacks A list with packets for any performed writebacks 539 * @return Cycles for handling the request 540 */ 541 virtual Cycles handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk, 542 PacketList &writebacks) = 0; 543 544 /** 545 * Performs the access specified by the request. 546 * @param pkt The request to perform. 547 * @return The number of ticks required for the access. 548 */ 549 virtual Tick recvAtomic(PacketPtr pkt); 550 551 /** 552 * Snoop for the provided request in the cache and return the estimated 553 * time taken. 554 * @param pkt The memory request to snoop 555 * @return The number of ticks required for the snoop. 556 */ 557 virtual Tick recvAtomicSnoop(PacketPtr pkt) = 0; 558 559 /** 560 * Performs the access specified by the request. 561 * 562 * @param pkt The request to perform. 563 * @param fromCpuSide from the CPU side port or the memory side port 564 */ 565 virtual void functionalAccess(PacketPtr pkt, bool from_cpu_side); 566 567 /** 568 * Handle doing the Compare and Swap function for SPARC. 569 */ 570 void cmpAndSwap(CacheBlk *blk, PacketPtr pkt); 571 572 /** 573 * Return the next queue entry to service, either a pending miss 574 * from the MSHR queue, a buffered write from the write buffer, or 575 * something from the prefetcher. This function is responsible 576 * for prioritizing among those sources on the fly. 577 */ 578 QueueEntry* getNextQueueEntry(); 579 580 /** 581 * Insert writebacks into the write buffer 582 */ 583 virtual void doWritebacks(PacketList& writebacks, Tick forward_time) = 0; 584 585 /** 586 * Send writebacks down the memory hierarchy in atomic mode 587 */ 588 virtual void doWritebacksAtomic(PacketList& writebacks) = 0; 589 590 /** 591 * Create an appropriate downstream bus request packet. 592 * 593 * Creates a new packet with the request to be send to the memory 594 * below, or nullptr if the current request in cpu_pkt should just 595 * be forwarded on. 596 * 597 * @param cpu_pkt The miss packet that needs to be satisfied. 598 * @param blk The referenced block, can be nullptr. 599 * @param needs_writable Indicates that the block must be writable 600 * even if the request in cpu_pkt doesn't indicate that. 601 * @param is_whole_line_write True if there are writes for the 602 * whole line 603 * @return A packet send to the memory below 604 */ 605 virtual PacketPtr createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk, 606 bool needs_writable, 607 bool is_whole_line_write) const = 0; 608 609 /** 610 * Determine if clean lines should be written back or not. In 611 * cases where a downstream cache is mostly inclusive we likely 612 * want it to act as a victim cache also for lines that have not 613 * been modified. Hence, we cannot simply drop the line (or send a 614 * clean evict), but rather need to send the actual data. 615 */ 616 const bool writebackClean; 617 618 /** 619 * Writebacks from the tempBlock, resulting on the response path 620 * in atomic mode, must happen after the call to recvAtomic has 621 * finished (for the right ordering of the packets). We therefore 622 * need to hold on to the packets, and have a method and an event 623 * to send them. 624 */ 625 PacketPtr tempBlockWriteback; 626 627 /** 628 * Send the outstanding tempBlock writeback. To be called after 629 * recvAtomic finishes in cases where the block we filled is in 630 * fact the tempBlock, and now needs to be written back. 631 */ 632 void writebackTempBlockAtomic() { 633 assert(tempBlockWriteback != nullptr); 634 PacketList writebacks{tempBlockWriteback}; 635 doWritebacksAtomic(writebacks); 636 tempBlockWriteback = nullptr; 637 } 638 639 /** 640 * An event to writeback the tempBlock after recvAtomic 641 * finishes. To avoid other calls to recvAtomic getting in 642 * between, we create this event with a higher priority. 643 */ 644 EventFunctionWrapper writebackTempBlockAtomicEvent; 645 646 /** 647 * Perform any necessary updates to the block and perform any data 648 * exchange between the packet and the block. The flags of the 649 * packet are also set accordingly. 650 * 651 * @param pkt Request packet from upstream that hit a block 652 * @param blk Cache block that the packet hit 653 * @param deferred_response Whether this request originally missed 654 * @param pending_downgrade Whether the writable flag is to be removed 655 */ 656 virtual void satisfyRequest(PacketPtr pkt, CacheBlk *blk, 657 bool deferred_response = false, 658 bool pending_downgrade = false); 659 660 /** 661 * Maintain the clusivity of this cache by potentially 662 * invalidating a block. This method works in conjunction with 663 * satisfyRequest, but is separate to allow us to handle all MSHR 664 * targets before potentially dropping a block. 665 * 666 * @param from_cache Whether we have dealt with a packet from a cache 667 * @param blk The block that should potentially be dropped 668 */ 669 void maintainClusivity(bool from_cache, CacheBlk *blk); 670 671 /** 672 * Handle a fill operation caused by a received packet. 673 * 674 * Populates a cache block and handles all outstanding requests for the 675 * satisfied fill request. This version takes two memory requests. One 676 * contains the fill data, the other is an optional target to satisfy. 677 * Note that the reason we return a list of writebacks rather than 678 * inserting them directly in the write buffer is that this function 679 * is called by both atomic and timing-mode accesses, and in atomic 680 * mode we don't mess with the write buffer (we just perform the 681 * writebacks atomically once the original request is complete). 682 * 683 * @param pkt The memory request with the fill data. 684 * @param blk The cache block if it already exists. 685 * @param writebacks List for any writebacks that need to be performed. 686 * @param allocate Whether to allocate a block or use the temp block 687 * @return Pointer to the new cache block. 688 */ 689 CacheBlk *handleFill(PacketPtr pkt, CacheBlk *blk, 690 PacketList &writebacks, bool allocate); 691 692 /** 693 * Allocate a new block and perform any necessary writebacks 694 * 695 * Find a victim block and if necessary prepare writebacks for any 696 * existing data. May return nullptr if there are no replaceable 697 * blocks. If a replaceable block is found, it inserts the new block in 698 * its place. The new block, however, is not set as valid yet. 699 * 700 * @param pkt Packet holding the address to update 701 * @param writebacks A list of writeback packets for the evicted blocks 702 * @return the allocated block 703 */ 704 CacheBlk *allocateBlock(const PacketPtr pkt, PacketList &writebacks); 705 /** 706 * Evict a cache block. 707 * 708 * Performs a writeback if necesssary and invalidates the block 709 * 710 * @param blk Block to invalidate 711 * @return A packet with the writeback, can be nullptr 712 */ 713 M5_NODISCARD virtual PacketPtr evictBlock(CacheBlk *blk) = 0; 714 715 /** 716 * Evict a cache block. 717 * 718 * Performs a writeback if necesssary and invalidates the block 719 * 720 * @param blk Block to invalidate 721 * @param writebacks Return a list of packets with writebacks 722 */ 723 void evictBlock(CacheBlk *blk, PacketList &writebacks); 724 725 /** 726 * Invalidate a cache block. 727 * 728 * @param blk Block to invalidate 729 */ 730 void invalidateBlock(CacheBlk *blk); 731 732 /** 733 * Create a writeback request for the given block. 734 * 735 * @param blk The block to writeback. 736 * @return The writeback request for the block. 737 */ 738 PacketPtr writebackBlk(CacheBlk *blk); 739 740 /** 741 * Create a writeclean request for the given block. 742 * 743 * Creates a request that writes the block to the cache below 744 * without evicting the block from the current cache. 745 * 746 * @param blk The block to write clean. 747 * @param dest The destination of the write clean operation. 748 * @param id Use the given packet id for the write clean operation. 749 * @return The generated write clean packet. 750 */ 751 PacketPtr writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id); 752 753 /** 754 * Write back dirty blocks in the cache using functional accesses. 755 */ 756 virtual void memWriteback() override; 757 758 /** 759 * Invalidates all blocks in the cache. 760 * 761 * @warn Dirty cache lines will not be written back to 762 * memory. Make sure to call functionalWriteback() first if you 763 * want the to write them to memory. 764 */ 765 virtual void memInvalidate() override; 766 767 /** 768 * Determine if there are any dirty blocks in the cache. 769 * 770 * @return true if at least one block is dirty, false otherwise. 771 */ 772 bool isDirty() const; 773 774 /** 775 * Determine if an address is in the ranges covered by this 776 * cache. This is useful to filter snoops. 777 * 778 * @param addr Address to check against 779 * 780 * @return If the address in question is in range 781 */ 782 bool inRange(Addr addr) const; 783 784 /** 785 * Find next request ready time from among possible sources. 786 */ 787 Tick nextQueueReadyTime() const; 788 789 /** Block size of this cache */ 790 const unsigned blkSize; 791 792 /** 793 * The latency of tag lookup of a cache. It occurs when there is 794 * an access to the cache. 795 */ 796 const Cycles lookupLatency; 797 798 /** 799 * The latency of data access of a cache. It occurs when there is 800 * an access to the cache. 801 */ 802 const Cycles dataLatency; 803 804 /** 805 * This is the forward latency of the cache. It occurs when there 806 * is a cache miss and a request is forwarded downstream, in 807 * particular an outbound miss. 808 */ 809 const Cycles forwardLatency; 810 811 /** The latency to fill a cache block */ 812 const Cycles fillLatency; 813 814 /** 815 * The latency of sending reponse to its upper level cache/core on 816 * a linefill. The responseLatency parameter captures this 817 * latency. 818 */ 819 const Cycles responseLatency; 820 821 /** 822 * Whether tags and data are accessed sequentially. 823 */ 824 const bool sequentialAccess; 825 826 /** The number of targets for each MSHR. */ 827 const int numTarget; 828 829 /** Do we forward snoops from mem side port through to cpu side port? */ 830 bool forwardSnoops; 831 832 /** 833 * Clusivity with respect to the upstream cache, determining if we 834 * fill into both this cache and the cache above on a miss. Note 835 * that we currently do not support strict clusivity policies. 836 */ 837 const Enums::Clusivity clusivity; 838 839 /** 840 * Is this cache read only, for example the instruction cache, or 841 * table-walker cache. A cache that is read only should never see 842 * any writes, and should never get any dirty data (and hence 843 * never have to do any writebacks). 844 */ 845 const bool isReadOnly; 846 847 /** 848 * Bit vector of the blocking reasons for the access path. 849 * @sa #BlockedCause 850 */ 851 uint8_t blocked; 852 853 /** Increasing order number assigned to each incoming request. */ 854 uint64_t order; 855 856 /** Stores time the cache blocked for statistics. */ 857 Cycles blockedCycle; 858 859 /** Pointer to the MSHR that has no targets. */ 860 MSHR *noTargetMSHR; 861 862 /** The number of misses to trigger an exit event. */ 863 Counter missCount; 864 865 /** 866 * The address range to which the cache responds on the CPU side. 867 * Normally this is all possible memory addresses. */ 868 const AddrRangeList addrRanges; 869 870 public: 871 /** System we are currently operating in. */ 872 System *system; 873 874 // Statistics 875 /** 876 * @addtogroup CacheStatistics 877 * @{ 878 */ 879 880 /** Number of hits per thread for each type of command. 881 @sa Packet::Command */ 882 Stats::Vector hits[MemCmd::NUM_MEM_CMDS]; 883 /** Number of hits for demand accesses. */ 884 Stats::Formula demandHits; 885 /** Number of hit for all accesses. */ 886 Stats::Formula overallHits; 887 888 /** Number of misses per thread for each type of command. 889 @sa Packet::Command */ 890 Stats::Vector misses[MemCmd::NUM_MEM_CMDS]; 891 /** Number of misses for demand accesses. */ 892 Stats::Formula demandMisses; 893 /** Number of misses for all accesses. */ 894 Stats::Formula overallMisses; 895 896 /** 897 * Total number of cycles per thread/command spent waiting for a miss. 898 * Used to calculate the average miss latency. 899 */ 900 Stats::Vector missLatency[MemCmd::NUM_MEM_CMDS]; 901 /** Total number of cycles spent waiting for demand misses. */ 902 Stats::Formula demandMissLatency; 903 /** Total number of cycles spent waiting for all misses. */ 904 Stats::Formula overallMissLatency; 905 906 /** The number of accesses per command and thread. */ 907 Stats::Formula accesses[MemCmd::NUM_MEM_CMDS]; 908 /** The number of demand accesses. */ 909 Stats::Formula demandAccesses; 910 /** The number of overall accesses. */ 911 Stats::Formula overallAccesses; 912 913 /** The miss rate per command and thread. */ 914 Stats::Formula missRate[MemCmd::NUM_MEM_CMDS]; 915 /** The miss rate of all demand accesses. */ 916 Stats::Formula demandMissRate; 917 /** The miss rate for all accesses. */ 918 Stats::Formula overallMissRate; 919 920 /** The average miss latency per command and thread. */ 921 Stats::Formula avgMissLatency[MemCmd::NUM_MEM_CMDS]; 922 /** The average miss latency for demand misses. */ 923 Stats::Formula demandAvgMissLatency; 924 /** The average miss latency for all misses. */ 925 Stats::Formula overallAvgMissLatency; 926 927 /** The total number of cycles blocked for each blocked cause. */ 928 Stats::Vector blocked_cycles; 929 /** The number of times this cache blocked for each blocked cause. */ 930 Stats::Vector blocked_causes; 931 932 /** The average number of cycles blocked for each blocked cause. */ 933 Stats::Formula avg_blocked; 934 935 /** The number of times a HW-prefetched block is evicted w/o reference. */ 936 Stats::Scalar unusedPrefetches; 937 938 /** Number of blocks written back per thread. */ 939 Stats::Vector writebacks; 940 941 /** Number of misses that hit in the MSHRs per command and thread. */ 942 Stats::Vector mshr_hits[MemCmd::NUM_MEM_CMDS]; 943 /** Demand misses that hit in the MSHRs. */ 944 Stats::Formula demandMshrHits; 945 /** Total number of misses that hit in the MSHRs. */ 946 Stats::Formula overallMshrHits; 947 948 /** Number of misses that miss in the MSHRs, per command and thread. */ 949 Stats::Vector mshr_misses[MemCmd::NUM_MEM_CMDS]; 950 /** Demand misses that miss in the MSHRs. */ 951 Stats::Formula demandMshrMisses; 952 /** Total number of misses that miss in the MSHRs. */ 953 Stats::Formula overallMshrMisses; 954 955 /** Number of misses that miss in the MSHRs, per command and thread. */ 956 Stats::Vector mshr_uncacheable[MemCmd::NUM_MEM_CMDS]; 957 /** Total number of misses that miss in the MSHRs. */ 958 Stats::Formula overallMshrUncacheable; 959 960 /** Total cycle latency of each MSHR miss, per command and thread. */ 961 Stats::Vector mshr_miss_latency[MemCmd::NUM_MEM_CMDS]; 962 /** Total cycle latency of demand MSHR misses. */ 963 Stats::Formula demandMshrMissLatency; 964 /** Total cycle latency of overall MSHR misses. */ 965 Stats::Formula overallMshrMissLatency; 966 967 /** Total cycle latency of each MSHR miss, per command and thread. */ 968 Stats::Vector mshr_uncacheable_lat[MemCmd::NUM_MEM_CMDS]; 969 /** Total cycle latency of overall MSHR misses. */ 970 Stats::Formula overallMshrUncacheableLatency; 971 972#if 0 973 /** The total number of MSHR accesses per command and thread. */ 974 Stats::Formula mshrAccesses[MemCmd::NUM_MEM_CMDS]; 975 /** The total number of demand MSHR accesses. */ 976 Stats::Formula demandMshrAccesses; 977 /** The total number of MSHR accesses. */ 978 Stats::Formula overallMshrAccesses; 979#endif 980 981 /** The miss rate in the MSHRs pre command and thread. */ 982 Stats::Formula mshrMissRate[MemCmd::NUM_MEM_CMDS]; 983 /** The demand miss rate in the MSHRs. */ 984 Stats::Formula demandMshrMissRate; 985 /** The overall miss rate in the MSHRs. */ 986 Stats::Formula overallMshrMissRate; 987 988 /** The average latency of an MSHR miss, per command and thread. */ 989 Stats::Formula avgMshrMissLatency[MemCmd::NUM_MEM_CMDS]; 990 /** The average latency of a demand MSHR miss. */ 991 Stats::Formula demandAvgMshrMissLatency; 992 /** The average overall latency of an MSHR miss. */ 993 Stats::Formula overallAvgMshrMissLatency; 994 995 /** The average latency of an MSHR miss, per command and thread. */ 996 Stats::Formula avgMshrUncacheableLatency[MemCmd::NUM_MEM_CMDS]; 997 /** The average overall latency of an MSHR miss. */ 998 Stats::Formula overallAvgMshrUncacheableLatency; 999 1000 /** Number of replacements of valid blocks. */ 1001 Stats::Scalar replacements; 1002 1003 /** 1004 * @} 1005 */ 1006 1007 /** 1008 * Register stats for this object. 1009 */ 1010 void regStats() override; 1011 1012 /** Registers probes. */ 1013 void regProbePoints() override; 1014 1015 public: 1016 BaseCache(const BaseCacheParams *p, unsigned blk_size); 1017 ~BaseCache(); 1018 1019 void init() override; 1020 1021 BaseMasterPort &getMasterPort(const std::string &if_name, 1022 PortID idx = InvalidPortID) override; 1023 BaseSlavePort &getSlavePort(const std::string &if_name, 1024 PortID idx = InvalidPortID) override; 1025 1026 /** 1027 * Query block size of a cache. 1028 * @return The block size 1029 */ 1030 unsigned 1031 getBlockSize() const 1032 { 1033 return blkSize; 1034 } 1035 1036 const AddrRangeList &getAddrRanges() const { return addrRanges; } 1037 1038 MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool sched_send = true) 1039 { 1040 MSHR *mshr = mshrQueue.allocate(pkt->getBlockAddr(blkSize), blkSize, 1041 pkt, time, order++, 1042 allocOnFill(pkt->cmd)); 1043 1044 if (mshrQueue.isFull()) { 1045 setBlocked((BlockedCause)MSHRQueue_MSHRs); 1046 } 1047 1048 if (sched_send) { 1049 // schedule the send 1050 schedMemSideSendEvent(time); 1051 } 1052 1053 return mshr; 1054 } 1055 1056 void allocateWriteBuffer(PacketPtr pkt, Tick time) 1057 { 1058 // should only see writes or clean evicts here 1059 assert(pkt->isWrite() || pkt->cmd == MemCmd::CleanEvict); 1060 1061 Addr blk_addr = pkt->getBlockAddr(blkSize); 1062 1063 WriteQueueEntry *wq_entry = 1064 writeBuffer.findMatch(blk_addr, pkt->isSecure()); 1065 if (wq_entry && !wq_entry->inService) { 1066 DPRINTF(Cache, "Potential to merge writeback %s", pkt->print()); 1067 } 1068 1069 writeBuffer.allocate(blk_addr, blkSize, pkt, time, order++); 1070 1071 if (writeBuffer.isFull()) { 1072 setBlocked((BlockedCause)MSHRQueue_WriteBuffer); 1073 } 1074 1075 // schedule the send 1076 schedMemSideSendEvent(time); 1077 } 1078 1079 /** 1080 * Returns true if the cache is blocked for accesses. 1081 */ 1082 bool isBlocked() const 1083 { 1084 return blocked != 0; 1085 } 1086 1087 /** 1088 * Marks the access path of the cache as blocked for the given cause. This 1089 * also sets the blocked flag in the slave interface. 1090 * @param cause The reason for the cache blocking. 1091 */ 1092 void setBlocked(BlockedCause cause) 1093 { 1094 uint8_t flag = 1 << cause; 1095 if (blocked == 0) { 1096 blocked_causes[cause]++; 1097 blockedCycle = curCycle(); 1098 cpuSidePort.setBlocked(); 1099 } 1100 blocked |= flag; 1101 DPRINTF(Cache,"Blocking for cause %d, mask=%d\n", cause, blocked); 1102 } 1103 1104 /** 1105 * Marks the cache as unblocked for the given cause. This also clears the 1106 * blocked flags in the appropriate interfaces. 1107 * @param cause The newly unblocked cause. 1108 * @warning Calling this function can cause a blocked request on the bus to 1109 * access the cache. The cache must be in a state to handle that request. 1110 */ 1111 void clearBlocked(BlockedCause cause) 1112 { 1113 uint8_t flag = 1 << cause; 1114 blocked &= ~flag; 1115 DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked); 1116 if (blocked == 0) { 1117 blocked_cycles[cause] += curCycle() - blockedCycle; 1118 cpuSidePort.clearBlocked(); 1119 } 1120 } 1121 1122 /** 1123 * Schedule a send event for the memory-side port. If already 1124 * scheduled, this may reschedule the event at an earlier 1125 * time. When the specified time is reached, the port is free to 1126 * send either a response, a request, or a prefetch request. 1127 * 1128 * @param time The time when to attempt sending a packet. 1129 */ 1130 void schedMemSideSendEvent(Tick time) 1131 { 1132 memSidePort.schedSendEvent(time); 1133 } 1134 1135 bool inCache(Addr addr, bool is_secure) const { 1136 return tags->findBlock(addr, is_secure); 1137 } 1138 1139 bool hasBeenPrefetched(Addr addr, bool is_secure) const { 1140 CacheBlk *block = tags->findBlock(addr, is_secure); 1141 if (block) { 1142 return block->wasPrefetched(); 1143 } else { 1144 return false; 1145 } 1146 } 1147 1148 bool inMissQueue(Addr addr, bool is_secure) const { 1149 return mshrQueue.findMatch(addr, is_secure); 1150 } 1151 1152 void incMissCount(PacketPtr pkt) 1153 { 1154 assert(pkt->req->masterId() < system->maxMasters()); 1155 misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 1156 pkt->req->incAccessDepth(); 1157 if (missCount) { 1158 --missCount; 1159 if (missCount == 0) 1160 exitSimLoop("A cache reached the maximum miss count"); 1161 } 1162 } 1163 void incHitCount(PacketPtr pkt) 1164 { 1165 assert(pkt->req->masterId() < system->maxMasters()); 1166 hits[pkt->cmdToIndex()][pkt->req->masterId()]++; 1167 1168 } 1169 1170 /** 1171 * Checks if the cache is coalescing writes 1172 * 1173 * @return True if the cache is coalescing writes 1174 */ 1175 bool coalesce() const; 1176 1177 1178 /** 1179 * Cache block visitor that writes back dirty cache blocks using 1180 * functional writes. 1181 */ 1182 void writebackVisitor(CacheBlk &blk); 1183 1184 /** 1185 * Cache block visitor that invalidates all blocks in the cache. 1186 * 1187 * @warn Dirty cache lines will not be written back to memory. 1188 */ 1189 void invalidateVisitor(CacheBlk &blk); 1190 1191 /** 1192 * Take an MSHR, turn it into a suitable downstream packet, and 1193 * send it out. This construct allows a queue entry to choose a suitable 1194 * approach based on its type. 1195 * 1196 * @param mshr The MSHR to turn into a packet and send 1197 * @return True if the port is waiting for a retry 1198 */ 1199 virtual bool sendMSHRQueuePacket(MSHR* mshr); 1200 1201 /** 1202 * Similar to sendMSHR, but for a write-queue entry 1203 * instead. Create the packet, and send it, and if successful also 1204 * mark the entry in service. 1205 * 1206 * @param wq_entry The write-queue entry to turn into a packet and send 1207 * @return True if the port is waiting for a retry 1208 */ 1209 bool sendWriteQueuePacket(WriteQueueEntry* wq_entry); 1210 1211 /** 1212 * Serialize the state of the caches 1213 * 1214 * We currently don't support checkpointing cache state, so this panics. 1215 */ 1216 void serialize(CheckpointOut &cp) const override; 1217 void unserialize(CheckpointIn &cp) override; 1218}; 1219 1220/** 1221 * The write allocator inspects write packets and detects streaming 1222 * patterns. The write allocator supports a single stream where writes 1223 * are expected to access consecutive locations and keeps track of 1224 * size of the area covered by the concecutive writes in byteCount. 1225 * 1226 * 1) When byteCount has surpassed the coallesceLimit the mode 1227 * switches from ALLOCATE to COALESCE where writes should be delayed 1228 * until the whole block is written at which point a single packet 1229 * (whole line write) can service them. 1230 * 1231 * 2) When byteCount has also exceeded the noAllocateLimit (whole 1232 * line) we switch to NO_ALLOCATE when writes should not allocate in 1233 * the cache but rather send a whole line write to the memory below. 1234 */ 1235class WriteAllocator : public SimObject { 1236 public: 1237 WriteAllocator(const WriteAllocatorParams *p) : 1238 SimObject(p), 1239 coalesceLimit(p->coalesce_limit * p->block_size), 1240 noAllocateLimit(p->no_allocate_limit * p->block_size), 1241 delayThreshold(p->delay_threshold) 1242 { 1243 reset(); 1244 } 1245 1246 /** 1247 * Should writes be coalesced? This is true if the mode is set to 1248 * NO_ALLOCATE. 1249 * 1250 * @return return true if the cache should coalesce writes. 1251 */ 1252 bool coalesce() const { 1253 return mode != WriteMode::ALLOCATE; 1254 } 1255 1256 /** 1257 * Should writes allocate? 1258 * 1259 * @return return true if the cache should not allocate for writes. 1260 */ 1261 bool allocate() const { 1262 return mode != WriteMode::NO_ALLOCATE; 1263 } 1264 1265 /** 1266 * Reset the write allocator state, meaning that it allocates for 1267 * writes and has not recorded any information about qualifying 1268 * writes that might trigger a switch to coalescing and later no 1269 * allocation. 1270 */ 1271 void reset() { 1272 mode = WriteMode::ALLOCATE; 1273 byteCount = 0; 1274 nextAddr = 0; 1275 } 1276 1277 /** 1278 * Access whether we need to delay the current write. 1279 * 1280 * @param blk_addr The block address the packet writes to 1281 * @return true if the current packet should be delayed 1282 */ 1283 bool delay(Addr blk_addr) { 1284 if (delayCtr[blk_addr] > 0) { 1285 --delayCtr[blk_addr]; 1286 return true; 1287 } else { 1288 return false; 1289 } 1290 } 1291 1292 /** 1293 * Clear delay counter for the input block 1294 * 1295 * @param blk_addr The accessed cache block 1296 */ 1297 void resetDelay(Addr blk_addr) { 1298 delayCtr.erase(blk_addr); 1299 } 1300 1301 /** 1302 * Update the write mode based on the current write 1303 * packet. This method compares the packet's address with any 1304 * current stream, and updates the tracking and the mode 1305 * accordingly. 1306 * 1307 * @param write_addr Start address of the write request 1308 * @param write_size Size of the write request 1309 * @param blk_addr The block address that this packet writes to 1310 */ 1311 void updateMode(Addr write_addr, unsigned write_size, Addr blk_addr); 1312 1313 private: 1314 /** 1315 * The current mode for write coalescing and allocation, either 1316 * normal operation (ALLOCATE), write coalescing (COALESCE), or 1317 * write coalescing without allocation (NO_ALLOCATE). 1318 */ 1319 enum class WriteMode : char { 1320 ALLOCATE, 1321 COALESCE, 1322 NO_ALLOCATE, 1323 }; 1324 WriteMode mode; 1325 1326 /** Address to match writes against to detect streams. */ 1327 Addr nextAddr; 1328 1329 /** 1330 * Bytes written contiguously. Saturating once we no longer 1331 * allocate. 1332 */ 1333 uint32_t byteCount; 1334 1335 /** 1336 * Limits for when to switch between the different write modes. 1337 */ 1338 const uint32_t coalesceLimit; 1339 const uint32_t noAllocateLimit; 1340 /** 1341 * The number of times the allocator will delay an WriteReq MSHR. 1342 */ 1343 const uint32_t delayThreshold; 1344 1345 /** 1346 * Keep track of the number of times the allocator has delayed an 1347 * WriteReq MSHR. 1348 */ 1349 std::unordered_map<Addr, Counter> delayCtr; 1350}; 1351 1352#endif //__MEM_CACHE_BASE_HH__ 1353