base.hh revision 11436:f351b7f248db
1/* 2 * Copyright (c) 2012-2013, 2015-2016 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2003-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Erik Hallnor 41 * Steve Reinhardt 42 * Ron Dreslinski 43 */ 44 45/** 46 * @file 47 * Declares a basic cache interface BaseCache. 48 */ 49 50#ifndef __MEM_CACHE_BASE_HH__ 51#define __MEM_CACHE_BASE_HH__ 52 53#include <algorithm> 54#include <list> 55#include <string> 56#include <vector> 57 58#include "base/misc.hh" 59#include "base/statistics.hh" 60#include "base/trace.hh" 61#include "base/types.hh" 62#include "debug/Cache.hh" 63#include "debug/CachePort.hh" 64#include "mem/cache/mshr_queue.hh" 65#include "mem/cache/write_queue.hh" 66#include "mem/mem_object.hh" 67#include "mem/packet.hh" 68#include "mem/qport.hh" 69#include "mem/request.hh" 70#include "params/BaseCache.hh" 71#include "sim/eventq.hh" 72#include "sim/full_system.hh" 73#include "sim/sim_exit.hh" 74#include "sim/system.hh" 75 76/** 77 * A basic cache interface. Implements some common functions for speed. 78 */ 79class BaseCache : public MemObject 80{ 81 protected: 82 /** 83 * Indexes to enumerate the MSHR queues. 84 */ 85 enum MSHRQueueIndex { 86 MSHRQueue_MSHRs, 87 MSHRQueue_WriteBuffer 88 }; 89 90 public: 91 /** 92 * Reasons for caches to be blocked. 93 */ 94 enum BlockedCause { 95 Blocked_NoMSHRs = MSHRQueue_MSHRs, 96 Blocked_NoWBBuffers = MSHRQueue_WriteBuffer, 97 Blocked_NoTargets, 98 NUM_BLOCKED_CAUSES 99 }; 100 101 protected: 102 103 /** 104 * A cache master port is used for the memory-side port of the 105 * cache, and in addition to the basic timing port that only sends 106 * response packets through a transmit list, it also offers the 107 * ability to schedule and send request packets (requests & 108 * writebacks). The send event is scheduled through schedSendEvent, 109 * and the sendDeferredPacket of the timing port is modified to 110 * consider both the transmit list and the requests from the MSHR. 111 */ 112 class CacheMasterPort : public QueuedMasterPort 113 { 114 115 public: 116 117 /** 118 * Schedule a send of a request packet (from the MSHR). Note 119 * that we could already have a retry outstanding. 120 */ 121 void schedSendEvent(Tick time) 122 { 123 DPRINTF(CachePort, "Scheduling send event at %llu\n", time); 124 reqQueue.schedSendEvent(time); 125 } 126 127 protected: 128 129 CacheMasterPort(const std::string &_name, BaseCache *_cache, 130 ReqPacketQueue &_reqQueue, 131 SnoopRespPacketQueue &_snoopRespQueue) : 132 QueuedMasterPort(_name, _cache, _reqQueue, _snoopRespQueue) 133 { } 134 135 /** 136 * Memory-side port always snoops. 137 * 138 * @return always true 139 */ 140 virtual bool isSnooping() const { return true; } 141 }; 142 143 /** 144 * A cache slave port is used for the CPU-side port of the cache, 145 * and it is basically a simple timing port that uses a transmit 146 * list for responses to the CPU (or connected master). In 147 * addition, it has the functionality to block the port for 148 * incoming requests. If blocked, the port will issue a retry once 149 * unblocked. 150 */ 151 class CacheSlavePort : public QueuedSlavePort 152 { 153 154 public: 155 156 /** Do not accept any new requests. */ 157 void setBlocked(); 158 159 /** Return to normal operation and accept new requests. */ 160 void clearBlocked(); 161 162 bool isBlocked() const { return blocked; } 163 164 protected: 165 166 CacheSlavePort(const std::string &_name, BaseCache *_cache, 167 const std::string &_label); 168 169 /** A normal packet queue used to store responses. */ 170 RespPacketQueue queue; 171 172 bool blocked; 173 174 bool mustSendRetry; 175 176 private: 177 178 void processSendRetry(); 179 180 EventWrapper<CacheSlavePort, 181 &CacheSlavePort::processSendRetry> sendRetryEvent; 182 183 }; 184 185 CacheSlavePort *cpuSidePort; 186 CacheMasterPort *memSidePort; 187 188 protected: 189 190 /** Miss status registers */ 191 MSHRQueue mshrQueue; 192 193 /** Write/writeback buffer */ 194 WriteQueue writeBuffer; 195 196 /** 197 * Mark a request as in service (sent downstream in the memory 198 * system), effectively making this MSHR the ordering point. 199 */ 200 void markInService(MSHR *mshr, bool pending_modified_resp) 201 { 202 bool wasFull = mshrQueue.isFull(); 203 mshrQueue.markInService(mshr, pending_modified_resp); 204 205 if (wasFull && !mshrQueue.isFull()) { 206 clearBlocked(Blocked_NoMSHRs); 207 } 208 } 209 210 void markInService(WriteQueueEntry *entry) 211 { 212 bool wasFull = writeBuffer.isFull(); 213 writeBuffer.markInService(entry); 214 215 if (wasFull && !writeBuffer.isFull()) { 216 clearBlocked(Blocked_NoWBBuffers); 217 } 218 } 219 220 /** 221 * Determine if we should allocate on a fill or not. 222 * 223 * @param cmd Packet command being added as an MSHR target 224 * 225 * @return Whether we should allocate on a fill or not 226 */ 227 virtual bool allocOnFill(MemCmd cmd) const = 0; 228 229 /** 230 * Write back dirty blocks in the cache using functional accesses. 231 */ 232 virtual void memWriteback() = 0; 233 /** 234 * Invalidates all blocks in the cache. 235 * 236 * @warn Dirty cache lines will not be written back to 237 * memory. Make sure to call functionalWriteback() first if you 238 * want the to write them to memory. 239 */ 240 virtual void memInvalidate() = 0; 241 /** 242 * Determine if there are any dirty blocks in the cache. 243 * 244 * \return true if at least one block is dirty, false otherwise. 245 */ 246 virtual bool isDirty() const = 0; 247 248 /** 249 * Determine if an address is in the ranges covered by this 250 * cache. This is useful to filter snoops. 251 * 252 * @param addr Address to check against 253 * 254 * @return If the address in question is in range 255 */ 256 bool inRange(Addr addr) const; 257 258 /** Block size of this cache */ 259 const unsigned blkSize; 260 261 /** 262 * The latency of tag lookup of a cache. It occurs when there is 263 * an access to the cache. 264 */ 265 const Cycles lookupLatency; 266 267 /** 268 * This is the forward latency of the cache. It occurs when there 269 * is a cache miss and a request is forwarded downstream, in 270 * particular an outbound miss. 271 */ 272 const Cycles forwardLatency; 273 274 /** The latency to fill a cache block */ 275 const Cycles fillLatency; 276 277 /** 278 * The latency of sending reponse to its upper level cache/core on 279 * a linefill. The responseLatency parameter captures this 280 * latency. 281 */ 282 const Cycles responseLatency; 283 284 /** The number of targets for each MSHR. */ 285 const int numTarget; 286 287 /** Do we forward snoops from mem side port through to cpu side port? */ 288 bool forwardSnoops; 289 290 /** 291 * Is this cache read only, for example the instruction cache, or 292 * table-walker cache. A cache that is read only should never see 293 * any writes, and should never get any dirty data (and hence 294 * never have to do any writebacks). 295 */ 296 const bool isReadOnly; 297 298 /** 299 * Bit vector of the blocking reasons for the access path. 300 * @sa #BlockedCause 301 */ 302 uint8_t blocked; 303 304 /** Increasing order number assigned to each incoming request. */ 305 uint64_t order; 306 307 /** Stores time the cache blocked for statistics. */ 308 Cycles blockedCycle; 309 310 /** Pointer to the MSHR that has no targets. */ 311 MSHR *noTargetMSHR; 312 313 /** The number of misses to trigger an exit event. */ 314 Counter missCount; 315 316 /** 317 * The address range to which the cache responds on the CPU side. 318 * Normally this is all possible memory addresses. */ 319 const AddrRangeList addrRanges; 320 321 public: 322 /** System we are currently operating in. */ 323 System *system; 324 325 // Statistics 326 /** 327 * @addtogroup CacheStatistics 328 * @{ 329 */ 330 331 /** Number of hits per thread for each type of command. @sa Packet::Command */ 332 Stats::Vector hits[MemCmd::NUM_MEM_CMDS]; 333 /** Number of hits for demand accesses. */ 334 Stats::Formula demandHits; 335 /** Number of hit for all accesses. */ 336 Stats::Formula overallHits; 337 338 /** Number of misses per thread for each type of command. @sa Packet::Command */ 339 Stats::Vector misses[MemCmd::NUM_MEM_CMDS]; 340 /** Number of misses for demand accesses. */ 341 Stats::Formula demandMisses; 342 /** Number of misses for all accesses. */ 343 Stats::Formula overallMisses; 344 345 /** 346 * Total number of cycles per thread/command spent waiting for a miss. 347 * Used to calculate the average miss latency. 348 */ 349 Stats::Vector missLatency[MemCmd::NUM_MEM_CMDS]; 350 /** Total number of cycles spent waiting for demand misses. */ 351 Stats::Formula demandMissLatency; 352 /** Total number of cycles spent waiting for all misses. */ 353 Stats::Formula overallMissLatency; 354 355 /** The number of accesses per command and thread. */ 356 Stats::Formula accesses[MemCmd::NUM_MEM_CMDS]; 357 /** The number of demand accesses. */ 358 Stats::Formula demandAccesses; 359 /** The number of overall accesses. */ 360 Stats::Formula overallAccesses; 361 362 /** The miss rate per command and thread. */ 363 Stats::Formula missRate[MemCmd::NUM_MEM_CMDS]; 364 /** The miss rate of all demand accesses. */ 365 Stats::Formula demandMissRate; 366 /** The miss rate for all accesses. */ 367 Stats::Formula overallMissRate; 368 369 /** The average miss latency per command and thread. */ 370 Stats::Formula avgMissLatency[MemCmd::NUM_MEM_CMDS]; 371 /** The average miss latency for demand misses. */ 372 Stats::Formula demandAvgMissLatency; 373 /** The average miss latency for all misses. */ 374 Stats::Formula overallAvgMissLatency; 375 376 /** The total number of cycles blocked for each blocked cause. */ 377 Stats::Vector blocked_cycles; 378 /** The number of times this cache blocked for each blocked cause. */ 379 Stats::Vector blocked_causes; 380 381 /** The average number of cycles blocked for each blocked cause. */ 382 Stats::Formula avg_blocked; 383 384 /** The number of fast writes (WH64) performed. */ 385 Stats::Scalar fastWrites; 386 387 /** The number of cache copies performed. */ 388 Stats::Scalar cacheCopies; 389 390 /** The number of times a HW-prefetched block is evicted w/o reference. */ 391 Stats::Scalar unusedPrefetches; 392 393 /** Number of blocks written back per thread. */ 394 Stats::Vector writebacks; 395 396 /** Number of misses that hit in the MSHRs per command and thread. */ 397 Stats::Vector mshr_hits[MemCmd::NUM_MEM_CMDS]; 398 /** Demand misses that hit in the MSHRs. */ 399 Stats::Formula demandMshrHits; 400 /** Total number of misses that hit in the MSHRs. */ 401 Stats::Formula overallMshrHits; 402 403 /** Number of misses that miss in the MSHRs, per command and thread. */ 404 Stats::Vector mshr_misses[MemCmd::NUM_MEM_CMDS]; 405 /** Demand misses that miss in the MSHRs. */ 406 Stats::Formula demandMshrMisses; 407 /** Total number of misses that miss in the MSHRs. */ 408 Stats::Formula overallMshrMisses; 409 410 /** Number of misses that miss in the MSHRs, per command and thread. */ 411 Stats::Vector mshr_uncacheable[MemCmd::NUM_MEM_CMDS]; 412 /** Total number of misses that miss in the MSHRs. */ 413 Stats::Formula overallMshrUncacheable; 414 415 /** Total cycle latency of each MSHR miss, per command and thread. */ 416 Stats::Vector mshr_miss_latency[MemCmd::NUM_MEM_CMDS]; 417 /** Total cycle latency of demand MSHR misses. */ 418 Stats::Formula demandMshrMissLatency; 419 /** Total cycle latency of overall MSHR misses. */ 420 Stats::Formula overallMshrMissLatency; 421 422 /** Total cycle latency of each MSHR miss, per command and thread. */ 423 Stats::Vector mshr_uncacheable_lat[MemCmd::NUM_MEM_CMDS]; 424 /** Total cycle latency of overall MSHR misses. */ 425 Stats::Formula overallMshrUncacheableLatency; 426 427#if 0 428 /** The total number of MSHR accesses per command and thread. */ 429 Stats::Formula mshrAccesses[MemCmd::NUM_MEM_CMDS]; 430 /** The total number of demand MSHR accesses. */ 431 Stats::Formula demandMshrAccesses; 432 /** The total number of MSHR accesses. */ 433 Stats::Formula overallMshrAccesses; 434#endif 435 436 /** The miss rate in the MSHRs pre command and thread. */ 437 Stats::Formula mshrMissRate[MemCmd::NUM_MEM_CMDS]; 438 /** The demand miss rate in the MSHRs. */ 439 Stats::Formula demandMshrMissRate; 440 /** The overall miss rate in the MSHRs. */ 441 Stats::Formula overallMshrMissRate; 442 443 /** The average latency of an MSHR miss, per command and thread. */ 444 Stats::Formula avgMshrMissLatency[MemCmd::NUM_MEM_CMDS]; 445 /** The average latency of a demand MSHR miss. */ 446 Stats::Formula demandAvgMshrMissLatency; 447 /** The average overall latency of an MSHR miss. */ 448 Stats::Formula overallAvgMshrMissLatency; 449 450 /** The average latency of an MSHR miss, per command and thread. */ 451 Stats::Formula avgMshrUncacheableLatency[MemCmd::NUM_MEM_CMDS]; 452 /** The average overall latency of an MSHR miss. */ 453 Stats::Formula overallAvgMshrUncacheableLatency; 454 455 /** The number of times a thread hit its MSHR cap. */ 456 Stats::Vector mshr_cap_events; 457 /** The number of times software prefetches caused the MSHR to block. */ 458 Stats::Vector soft_prefetch_mshr_full; 459 460 Stats::Scalar mshr_no_allocate_misses; 461 462 /** 463 * @} 464 */ 465 466 /** 467 * Register stats for this object. 468 */ 469 virtual void regStats(); 470 471 public: 472 BaseCache(const BaseCacheParams *p, unsigned blk_size); 473 ~BaseCache() {} 474 475 virtual void init(); 476 477 virtual BaseMasterPort &getMasterPort(const std::string &if_name, 478 PortID idx = InvalidPortID); 479 virtual BaseSlavePort &getSlavePort(const std::string &if_name, 480 PortID idx = InvalidPortID); 481 482 /** 483 * Query block size of a cache. 484 * @return The block size 485 */ 486 unsigned 487 getBlockSize() const 488 { 489 return blkSize; 490 } 491 492 493 Addr blockAlign(Addr addr) const { return (addr & ~(Addr(blkSize - 1))); } 494 495 496 const AddrRangeList &getAddrRanges() const { return addrRanges; } 497 498 MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool sched_send = true) 499 { 500 MSHR *mshr = mshrQueue.allocate(blockAlign(pkt->getAddr()), blkSize, 501 pkt, time, order++, 502 allocOnFill(pkt->cmd)); 503 504 if (mshrQueue.isFull()) { 505 setBlocked((BlockedCause)MSHRQueue_MSHRs); 506 } 507 508 if (sched_send) { 509 // schedule the send 510 schedMemSideSendEvent(time); 511 } 512 513 return mshr; 514 } 515 516 void allocateWriteBuffer(PacketPtr pkt, Tick time) 517 { 518 // should only see writes or clean evicts here 519 assert(pkt->isWrite() || pkt->cmd == MemCmd::CleanEvict); 520 521 Addr blk_addr = blockAlign(pkt->getAddr()); 522 523 WriteQueueEntry *wq_entry = 524 writeBuffer.findMatch(blk_addr, pkt->isSecure()); 525 if (wq_entry && !wq_entry->inService) { 526 DPRINTF(Cache, "Potential to merge writeback %s to %#llx", 527 pkt->cmdString(), pkt->getAddr()); 528 } 529 530 writeBuffer.allocate(blk_addr, blkSize, pkt, time, order++); 531 532 if (writeBuffer.isFull()) { 533 setBlocked((BlockedCause)MSHRQueue_WriteBuffer); 534 } 535 536 // schedule the send 537 schedMemSideSendEvent(time); 538 } 539 540 /** 541 * Returns true if the cache is blocked for accesses. 542 */ 543 bool isBlocked() const 544 { 545 return blocked != 0; 546 } 547 548 /** 549 * Marks the access path of the cache as blocked for the given cause. This 550 * also sets the blocked flag in the slave interface. 551 * @param cause The reason for the cache blocking. 552 */ 553 void setBlocked(BlockedCause cause) 554 { 555 uint8_t flag = 1 << cause; 556 if (blocked == 0) { 557 blocked_causes[cause]++; 558 blockedCycle = curCycle(); 559 cpuSidePort->setBlocked(); 560 } 561 blocked |= flag; 562 DPRINTF(Cache,"Blocking for cause %d, mask=%d\n", cause, blocked); 563 } 564 565 /** 566 * Marks the cache as unblocked for the given cause. This also clears the 567 * blocked flags in the appropriate interfaces. 568 * @param cause The newly unblocked cause. 569 * @warning Calling this function can cause a blocked request on the bus to 570 * access the cache. The cache must be in a state to handle that request. 571 */ 572 void clearBlocked(BlockedCause cause) 573 { 574 uint8_t flag = 1 << cause; 575 blocked &= ~flag; 576 DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked); 577 if (blocked == 0) { 578 blocked_cycles[cause] += curCycle() - blockedCycle; 579 cpuSidePort->clearBlocked(); 580 } 581 } 582 583 /** 584 * Schedule a send event for the memory-side port. If already 585 * scheduled, this may reschedule the event at an earlier 586 * time. When the specified time is reached, the port is free to 587 * send either a response, a request, or a prefetch request. 588 * 589 * @param time The time when to attempt sending a packet. 590 */ 591 void schedMemSideSendEvent(Tick time) 592 { 593 memSidePort->schedSendEvent(time); 594 } 595 596 virtual bool inCache(Addr addr, bool is_secure) const = 0; 597 598 virtual bool inMissQueue(Addr addr, bool is_secure) const = 0; 599 600 void incMissCount(PacketPtr pkt) 601 { 602 assert(pkt->req->masterId() < system->maxMasters()); 603 misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 604 pkt->req->incAccessDepth(); 605 if (missCount) { 606 --missCount; 607 if (missCount == 0) 608 exitSimLoop("A cache reached the maximum miss count"); 609 } 610 } 611 void incHitCount(PacketPtr pkt) 612 { 613 assert(pkt->req->masterId() < system->maxMasters()); 614 hits[pkt->cmdToIndex()][pkt->req->masterId()]++; 615 616 } 617 618}; 619 620#endif //__MEM_CACHE_BASE_HH__ 621