base.hh revision 10884
1/* 2 * Copyright (c) 2012-2013, 2015 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2003-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Erik Hallnor 41 * Steve Reinhardt 42 * Ron Dreslinski 43 */ 44 45/** 46 * @file 47 * Declares a basic cache interface BaseCache. 48 */ 49 50#ifndef __BASE_CACHE_HH__ 51#define __BASE_CACHE_HH__ 52 53#include <algorithm> 54#include <list> 55#include <string> 56#include <vector> 57 58#include "base/misc.hh" 59#include "base/statistics.hh" 60#include "base/trace.hh" 61#include "base/types.hh" 62#include "debug/Cache.hh" 63#include "debug/CachePort.hh" 64#include "mem/cache/mshr_queue.hh" 65#include "mem/mem_object.hh" 66#include "mem/packet.hh" 67#include "mem/qport.hh" 68#include "mem/request.hh" 69#include "params/BaseCache.hh" 70#include "sim/eventq.hh" 71#include "sim/full_system.hh" 72#include "sim/sim_exit.hh" 73#include "sim/system.hh" 74 75class MSHR; 76/** 77 * A basic cache interface. Implements some common functions for speed. 78 */ 79class BaseCache : public MemObject 80{ 81 /** 82 * Indexes to enumerate the MSHR queues. 83 */ 84 enum MSHRQueueIndex { 85 MSHRQueue_MSHRs, 86 MSHRQueue_WriteBuffer 87 }; 88 89 public: 90 /** 91 * Reasons for caches to be blocked. 92 */ 93 enum BlockedCause { 94 Blocked_NoMSHRs = MSHRQueue_MSHRs, 95 Blocked_NoWBBuffers = MSHRQueue_WriteBuffer, 96 Blocked_NoTargets, 97 NUM_BLOCKED_CAUSES 98 }; 99 100 /** 101 * Reasons for cache to request a bus. 102 */ 103 enum RequestCause { 104 Request_MSHR = MSHRQueue_MSHRs, 105 Request_WB = MSHRQueue_WriteBuffer, 106 Request_PF, 107 NUM_REQUEST_CAUSES 108 }; 109 110 protected: 111 112 /** 113 * A cache master port is used for the memory-side port of the 114 * cache, and in addition to the basic timing port that only sends 115 * response packets through a transmit list, it also offers the 116 * ability to schedule and send request packets (requests & 117 * writebacks). The send event is scheduled through requestBus, 118 * and the sendDeferredPacket of the timing port is modified to 119 * consider both the transmit list and the requests from the MSHR. 120 */ 121 class CacheMasterPort : public QueuedMasterPort 122 { 123 124 public: 125 126 /** 127 * Schedule a send of a request packet (from the MSHR). Note 128 * that we could already have a retry outstanding. 129 */ 130 void requestBus(RequestCause cause, Tick time) 131 { 132 DPRINTF(CachePort, "Scheduling request at %llu due to %d\n", 133 time, cause); 134 reqQueue.schedSendEvent(time); 135 } 136 137 protected: 138 139 CacheMasterPort(const std::string &_name, BaseCache *_cache, 140 ReqPacketQueue &_reqQueue, 141 SnoopRespPacketQueue &_snoopRespQueue) : 142 QueuedMasterPort(_name, _cache, _reqQueue, _snoopRespQueue) 143 { } 144 145 /** 146 * Memory-side port always snoops. 147 * 148 * @return always true 149 */ 150 virtual bool isSnooping() const { return true; } 151 }; 152 153 /** 154 * A cache slave port is used for the CPU-side port of the cache, 155 * and it is basically a simple timing port that uses a transmit 156 * list for responses to the CPU (or connected master). In 157 * addition, it has the functionality to block the port for 158 * incoming requests. If blocked, the port will issue a retry once 159 * unblocked. 160 */ 161 class CacheSlavePort : public QueuedSlavePort 162 { 163 164 public: 165 166 /** Do not accept any new requests. */ 167 void setBlocked(); 168 169 /** Return to normal operation and accept new requests. */ 170 void clearBlocked(); 171 172 bool isBlocked() const { return blocked; } 173 174 protected: 175 176 CacheSlavePort(const std::string &_name, BaseCache *_cache, 177 const std::string &_label); 178 179 /** A normal packet queue used to store responses. */ 180 RespPacketQueue queue; 181 182 bool blocked; 183 184 bool mustSendRetry; 185 186 private: 187 188 void processSendRetry(); 189 190 EventWrapper<CacheSlavePort, 191 &CacheSlavePort::processSendRetry> sendRetryEvent; 192 193 }; 194 195 CacheSlavePort *cpuSidePort; 196 CacheMasterPort *memSidePort; 197 198 protected: 199 200 /** Miss status registers */ 201 MSHRQueue mshrQueue; 202 203 /** Write/writeback buffer */ 204 MSHRQueue writeBuffer; 205 206 /** 207 * Allocate a buffer, passing the time indicating when schedule an 208 * event to the queued port to go and ask the MSHR and write queue 209 * if they have packets to send. 210 * 211 * allocateBufferInternal() function is called in: 212 * - MSHR allocateWriteBuffer (unchached write forwarded to WriteBuffer); 213 * - MSHR allocateMissBuffer (miss in MSHR queue); 214 */ 215 MSHR *allocateBufferInternal(MSHRQueue *mq, Addr addr, int size, 216 PacketPtr pkt, Tick time, bool requestBus) 217 { 218 // check that the address is block aligned since we rely on 219 // this in a number of places when checking for matches and 220 // overlap 221 assert(addr == blockAlign(addr)); 222 223 MSHR *mshr = mq->allocate(addr, size, pkt, time, order++); 224 225 if (mq->isFull()) { 226 setBlocked((BlockedCause)mq->index); 227 } 228 229 if (requestBus) { 230 requestMemSideBus((RequestCause)mq->index, time); 231 } 232 233 return mshr; 234 } 235 236 void markInServiceInternal(MSHR *mshr, bool pending_dirty_resp) 237 { 238 MSHRQueue *mq = mshr->queue; 239 bool wasFull = mq->isFull(); 240 mq->markInService(mshr, pending_dirty_resp); 241 if (wasFull && !mq->isFull()) { 242 clearBlocked((BlockedCause)mq->index); 243 } 244 } 245 246 /** 247 * Write back dirty blocks in the cache using functional accesses. 248 */ 249 virtual void memWriteback() = 0; 250 /** 251 * Invalidates all blocks in the cache. 252 * 253 * @warn Dirty cache lines will not be written back to 254 * memory. Make sure to call functionalWriteback() first if you 255 * want the to write them to memory. 256 */ 257 virtual void memInvalidate() = 0; 258 /** 259 * Determine if there are any dirty blocks in the cache. 260 * 261 * \return true if at least one block is dirty, false otherwise. 262 */ 263 virtual bool isDirty() const = 0; 264 265 /** 266 * Determine if an address is in the ranges covered by this 267 * cache. This is useful to filter snoops. 268 * 269 * @param addr Address to check against 270 * 271 * @return If the address in question is in range 272 */ 273 bool inRange(Addr addr) const; 274 275 /** Block size of this cache */ 276 const unsigned blkSize; 277 278 /** 279 * The latency of tag lookup of a cache. It occurs when there is 280 * an access to the cache. 281 */ 282 const Cycles lookupLatency; 283 284 /** 285 * This is the forward latency of the cache. It occurs when there 286 * is a cache miss and a request is forwarded downstream, in 287 * particular an outbound miss. 288 */ 289 const Cycles forwardLatency; 290 291 /** The latency to fill a cache block */ 292 const Cycles fillLatency; 293 294 /** 295 * The latency of sending reponse to its upper level cache/core on 296 * a linefill. The responseLatency parameter captures this 297 * latency. 298 */ 299 const Cycles responseLatency; 300 301 /** The number of targets for each MSHR. */ 302 const int numTarget; 303 304 /** Do we forward snoops from mem side port through to cpu side port? */ 305 const bool forwardSnoops; 306 307 /** Is this cache a toplevel cache (e.g. L1, I/O cache). If so we should 308 * never try to forward ownership and similar optimizations to the cpu 309 * side */ 310 const bool isTopLevel; 311 312 /** 313 * Is this cache read only, for example the instruction cache, or 314 * table-walker cache. A cache that is read only should never see 315 * any writes, and should never get any dirty data (and hence 316 * never have to do any writebacks). 317 */ 318 const bool isReadOnly; 319 320 /** 321 * Bit vector of the blocking reasons for the access path. 322 * @sa #BlockedCause 323 */ 324 uint8_t blocked; 325 326 /** Increasing order number assigned to each incoming request. */ 327 uint64_t order; 328 329 /** Stores time the cache blocked for statistics. */ 330 Cycles blockedCycle; 331 332 /** Pointer to the MSHR that has no targets. */ 333 MSHR *noTargetMSHR; 334 335 /** The number of misses to trigger an exit event. */ 336 Counter missCount; 337 338 /** 339 * The address range to which the cache responds on the CPU side. 340 * Normally this is all possible memory addresses. */ 341 const AddrRangeList addrRanges; 342 343 public: 344 /** System we are currently operating in. */ 345 System *system; 346 347 // Statistics 348 /** 349 * @addtogroup CacheStatistics 350 * @{ 351 */ 352 353 /** Number of hits per thread for each type of command. @sa Packet::Command */ 354 Stats::Vector hits[MemCmd::NUM_MEM_CMDS]; 355 /** Number of hits for demand accesses. */ 356 Stats::Formula demandHits; 357 /** Number of hit for all accesses. */ 358 Stats::Formula overallHits; 359 360 /** Number of misses per thread for each type of command. @sa Packet::Command */ 361 Stats::Vector misses[MemCmd::NUM_MEM_CMDS]; 362 /** Number of misses for demand accesses. */ 363 Stats::Formula demandMisses; 364 /** Number of misses for all accesses. */ 365 Stats::Formula overallMisses; 366 367 /** 368 * Total number of cycles per thread/command spent waiting for a miss. 369 * Used to calculate the average miss latency. 370 */ 371 Stats::Vector missLatency[MemCmd::NUM_MEM_CMDS]; 372 /** Total number of cycles spent waiting for demand misses. */ 373 Stats::Formula demandMissLatency; 374 /** Total number of cycles spent waiting for all misses. */ 375 Stats::Formula overallMissLatency; 376 377 /** The number of accesses per command and thread. */ 378 Stats::Formula accesses[MemCmd::NUM_MEM_CMDS]; 379 /** The number of demand accesses. */ 380 Stats::Formula demandAccesses; 381 /** The number of overall accesses. */ 382 Stats::Formula overallAccesses; 383 384 /** The miss rate per command and thread. */ 385 Stats::Formula missRate[MemCmd::NUM_MEM_CMDS]; 386 /** The miss rate of all demand accesses. */ 387 Stats::Formula demandMissRate; 388 /** The miss rate for all accesses. */ 389 Stats::Formula overallMissRate; 390 391 /** The average miss latency per command and thread. */ 392 Stats::Formula avgMissLatency[MemCmd::NUM_MEM_CMDS]; 393 /** The average miss latency for demand misses. */ 394 Stats::Formula demandAvgMissLatency; 395 /** The average miss latency for all misses. */ 396 Stats::Formula overallAvgMissLatency; 397 398 /** The total number of cycles blocked for each blocked cause. */ 399 Stats::Vector blocked_cycles; 400 /** The number of times this cache blocked for each blocked cause. */ 401 Stats::Vector blocked_causes; 402 403 /** The average number of cycles blocked for each blocked cause. */ 404 Stats::Formula avg_blocked; 405 406 /** The number of fast writes (WH64) performed. */ 407 Stats::Scalar fastWrites; 408 409 /** The number of cache copies performed. */ 410 Stats::Scalar cacheCopies; 411 412 /** Number of blocks written back per thread. */ 413 Stats::Vector writebacks; 414 415 /** Number of misses that hit in the MSHRs per command and thread. */ 416 Stats::Vector mshr_hits[MemCmd::NUM_MEM_CMDS]; 417 /** Demand misses that hit in the MSHRs. */ 418 Stats::Formula demandMshrHits; 419 /** Total number of misses that hit in the MSHRs. */ 420 Stats::Formula overallMshrHits; 421 422 /** Number of misses that miss in the MSHRs, per command and thread. */ 423 Stats::Vector mshr_misses[MemCmd::NUM_MEM_CMDS]; 424 /** Demand misses that miss in the MSHRs. */ 425 Stats::Formula demandMshrMisses; 426 /** Total number of misses that miss in the MSHRs. */ 427 Stats::Formula overallMshrMisses; 428 429 /** Number of misses that miss in the MSHRs, per command and thread. */ 430 Stats::Vector mshr_uncacheable[MemCmd::NUM_MEM_CMDS]; 431 /** Total number of misses that miss in the MSHRs. */ 432 Stats::Formula overallMshrUncacheable; 433 434 /** Total cycle latency of each MSHR miss, per command and thread. */ 435 Stats::Vector mshr_miss_latency[MemCmd::NUM_MEM_CMDS]; 436 /** Total cycle latency of demand MSHR misses. */ 437 Stats::Formula demandMshrMissLatency; 438 /** Total cycle latency of overall MSHR misses. */ 439 Stats::Formula overallMshrMissLatency; 440 441 /** Total cycle latency of each MSHR miss, per command and thread. */ 442 Stats::Vector mshr_uncacheable_lat[MemCmd::NUM_MEM_CMDS]; 443 /** Total cycle latency of overall MSHR misses. */ 444 Stats::Formula overallMshrUncacheableLatency; 445 446#if 0 447 /** The total number of MSHR accesses per command and thread. */ 448 Stats::Formula mshrAccesses[MemCmd::NUM_MEM_CMDS]; 449 /** The total number of demand MSHR accesses. */ 450 Stats::Formula demandMshrAccesses; 451 /** The total number of MSHR accesses. */ 452 Stats::Formula overallMshrAccesses; 453#endif 454 455 /** The miss rate in the MSHRs pre command and thread. */ 456 Stats::Formula mshrMissRate[MemCmd::NUM_MEM_CMDS]; 457 /** The demand miss rate in the MSHRs. */ 458 Stats::Formula demandMshrMissRate; 459 /** The overall miss rate in the MSHRs. */ 460 Stats::Formula overallMshrMissRate; 461 462 /** The average latency of an MSHR miss, per command and thread. */ 463 Stats::Formula avgMshrMissLatency[MemCmd::NUM_MEM_CMDS]; 464 /** The average latency of a demand MSHR miss. */ 465 Stats::Formula demandAvgMshrMissLatency; 466 /** The average overall latency of an MSHR miss. */ 467 Stats::Formula overallAvgMshrMissLatency; 468 469 /** The average latency of an MSHR miss, per command and thread. */ 470 Stats::Formula avgMshrUncacheableLatency[MemCmd::NUM_MEM_CMDS]; 471 /** The average overall latency of an MSHR miss. */ 472 Stats::Formula overallAvgMshrUncacheableLatency; 473 474 /** The number of times a thread hit its MSHR cap. */ 475 Stats::Vector mshr_cap_events; 476 /** The number of times software prefetches caused the MSHR to block. */ 477 Stats::Vector soft_prefetch_mshr_full; 478 479 Stats::Scalar mshr_no_allocate_misses; 480 481 /** 482 * @} 483 */ 484 485 /** 486 * Register stats for this object. 487 */ 488 virtual void regStats(); 489 490 public: 491 typedef BaseCacheParams Params; 492 BaseCache(const Params *p); 493 ~BaseCache() {} 494 495 virtual void init(); 496 497 virtual BaseMasterPort &getMasterPort(const std::string &if_name, 498 PortID idx = InvalidPortID); 499 virtual BaseSlavePort &getSlavePort(const std::string &if_name, 500 PortID idx = InvalidPortID); 501 502 /** 503 * Query block size of a cache. 504 * @return The block size 505 */ 506 unsigned 507 getBlockSize() const 508 { 509 return blkSize; 510 } 511 512 513 Addr blockAlign(Addr addr) const { return (addr & ~(Addr(blkSize - 1))); } 514 515 516 const AddrRangeList &getAddrRanges() const { return addrRanges; } 517 518 MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool requestBus) 519 { 520 return allocateBufferInternal(&mshrQueue, 521 blockAlign(pkt->getAddr()), blkSize, 522 pkt, time, requestBus); 523 } 524 525 MSHR *allocateWriteBuffer(PacketPtr pkt, Tick time, bool requestBus) 526 { 527 // should only see clean evictions in a read-only cache 528 assert(!isReadOnly || pkt->cmd == MemCmd::CleanEvict); 529 assert(pkt->isWrite() && !pkt->isRead()); 530 return allocateBufferInternal(&writeBuffer, 531 blockAlign(pkt->getAddr()), blkSize, 532 pkt, time, requestBus); 533 } 534 535 /** 536 * Returns true if the cache is blocked for accesses. 537 */ 538 bool isBlocked() const 539 { 540 return blocked != 0; 541 } 542 543 /** 544 * Marks the access path of the cache as blocked for the given cause. This 545 * also sets the blocked flag in the slave interface. 546 * @param cause The reason for the cache blocking. 547 */ 548 void setBlocked(BlockedCause cause) 549 { 550 uint8_t flag = 1 << cause; 551 if (blocked == 0) { 552 blocked_causes[cause]++; 553 blockedCycle = curCycle(); 554 cpuSidePort->setBlocked(); 555 } 556 blocked |= flag; 557 DPRINTF(Cache,"Blocking for cause %d, mask=%d\n", cause, blocked); 558 } 559 560 /** 561 * Marks the cache as unblocked for the given cause. This also clears the 562 * blocked flags in the appropriate interfaces. 563 * @param cause The newly unblocked cause. 564 * @warning Calling this function can cause a blocked request on the bus to 565 * access the cache. The cache must be in a state to handle that request. 566 */ 567 void clearBlocked(BlockedCause cause) 568 { 569 uint8_t flag = 1 << cause; 570 blocked &= ~flag; 571 DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked); 572 if (blocked == 0) { 573 blocked_cycles[cause] += curCycle() - blockedCycle; 574 cpuSidePort->clearBlocked(); 575 } 576 } 577 578 /** 579 * Request the master bus for the given cause and time. 580 * @param cause The reason for the request. 581 * @param time The time to make the request. 582 */ 583 void requestMemSideBus(RequestCause cause, Tick time) 584 { 585 memSidePort->requestBus(cause, time); 586 } 587 588 /** 589 * Clear the master bus request for the given cause. 590 * @param cause The request reason to clear. 591 */ 592 void deassertMemSideBusRequest(RequestCause cause) 593 { 594 // Obsolete... we no longer signal bus requests explicitly so 595 // we can't deassert them. Leaving this in as a no-op since 596 // the prefetcher calls it to indicate that it no longer wants 597 // to request a prefetch, and someday that might be 598 // interesting again. 599 } 600 601 virtual unsigned int drain(DrainManager *dm); 602 603 virtual bool inCache(Addr addr, bool is_secure) const = 0; 604 605 virtual bool inMissQueue(Addr addr, bool is_secure) const = 0; 606 607 void incMissCount(PacketPtr pkt) 608 { 609 assert(pkt->req->masterId() < system->maxMasters()); 610 misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 611 pkt->req->incAccessDepth(); 612 if (missCount) { 613 --missCount; 614 if (missCount == 0) 615 exitSimLoop("A cache reached the maximum miss count"); 616 } 617 } 618 void incHitCount(PacketPtr pkt) 619 { 620 assert(pkt->req->masterId() < system->maxMasters()); 621 hits[pkt->cmdToIndex()][pkt->req->masterId()]++; 622 623 } 624 625}; 626 627#endif //__BASE_CACHE_HH__ 628