base.hh revision 10764:b32578b2af99
1/* 2 * Copyright (c) 2012-2013, 2015 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2003-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Erik Hallnor 41 * Steve Reinhardt 42 * Ron Dreslinski 43 */ 44 45/** 46 * @file 47 * Declares a basic cache interface BaseCache. 48 */ 49 50#ifndef __BASE_CACHE_HH__ 51#define __BASE_CACHE_HH__ 52 53#include <algorithm> 54#include <list> 55#include <string> 56#include <vector> 57 58#include "base/misc.hh" 59#include "base/statistics.hh" 60#include "base/trace.hh" 61#include "base/types.hh" 62#include "debug/Cache.hh" 63#include "debug/CachePort.hh" 64#include "mem/cache/mshr_queue.hh" 65#include "mem/mem_object.hh" 66#include "mem/packet.hh" 67#include "mem/qport.hh" 68#include "mem/request.hh" 69#include "params/BaseCache.hh" 70#include "sim/eventq.hh" 71#include "sim/full_system.hh" 72#include "sim/sim_exit.hh" 73#include "sim/system.hh" 74 75class MSHR; 76/** 77 * A basic cache interface. Implements some common functions for speed. 78 */ 79class BaseCache : public MemObject 80{ 81 /** 82 * Indexes to enumerate the MSHR queues. 83 */ 84 enum MSHRQueueIndex { 85 MSHRQueue_MSHRs, 86 MSHRQueue_WriteBuffer 87 }; 88 89 public: 90 /** 91 * Reasons for caches to be blocked. 92 */ 93 enum BlockedCause { 94 Blocked_NoMSHRs = MSHRQueue_MSHRs, 95 Blocked_NoWBBuffers = MSHRQueue_WriteBuffer, 96 Blocked_NoTargets, 97 NUM_BLOCKED_CAUSES 98 }; 99 100 /** 101 * Reasons for cache to request a bus. 102 */ 103 enum RequestCause { 104 Request_MSHR = MSHRQueue_MSHRs, 105 Request_WB = MSHRQueue_WriteBuffer, 106 Request_PF, 107 NUM_REQUEST_CAUSES 108 }; 109 110 protected: 111 112 /** 113 * A cache master port is used for the memory-side port of the 114 * cache, and in addition to the basic timing port that only sends 115 * response packets through a transmit list, it also offers the 116 * ability to schedule and send request packets (requests & 117 * writebacks). The send event is scheduled through requestBus, 118 * and the sendDeferredPacket of the timing port is modified to 119 * consider both the transmit list and the requests from the MSHR. 120 */ 121 class CacheMasterPort : public QueuedMasterPort 122 { 123 124 public: 125 126 /** 127 * Schedule a send of a request packet (from the MSHR). Note 128 * that we could already have a retry outstanding. 129 */ 130 void requestBus(RequestCause cause, Tick time) 131 { 132 DPRINTF(CachePort, "Scheduling request at %llu due to %d\n", 133 time, cause); 134 reqQueue.schedSendEvent(time); 135 } 136 137 protected: 138 139 CacheMasterPort(const std::string &_name, BaseCache *_cache, 140 ReqPacketQueue &_reqQueue, 141 SnoopRespPacketQueue &_snoopRespQueue) : 142 QueuedMasterPort(_name, _cache, _reqQueue, _snoopRespQueue) 143 { } 144 145 /** 146 * Memory-side port always snoops. 147 * 148 * @return always true 149 */ 150 virtual bool isSnooping() const { return true; } 151 }; 152 153 /** 154 * A cache slave port is used for the CPU-side port of the cache, 155 * and it is basically a simple timing port that uses a transmit 156 * list for responses to the CPU (or connected master). In 157 * addition, it has the functionality to block the port for 158 * incoming requests. If blocked, the port will issue a retry once 159 * unblocked. 160 */ 161 class CacheSlavePort : public QueuedSlavePort 162 { 163 164 public: 165 166 /** Do not accept any new requests. */ 167 void setBlocked(); 168 169 /** Return to normal operation and accept new requests. */ 170 void clearBlocked(); 171 172 bool isBlocked() const { return blocked; } 173 174 protected: 175 176 CacheSlavePort(const std::string &_name, BaseCache *_cache, 177 const std::string &_label); 178 179 /** A normal packet queue used to store responses. */ 180 RespPacketQueue queue; 181 182 bool blocked; 183 184 bool mustSendRetry; 185 186 private: 187 188 void processSendRetry(); 189 190 EventWrapper<CacheSlavePort, 191 &CacheSlavePort::processSendRetry> sendRetryEvent; 192 193 }; 194 195 CacheSlavePort *cpuSidePort; 196 CacheMasterPort *memSidePort; 197 198 protected: 199 200 /** Miss status registers */ 201 MSHRQueue mshrQueue; 202 203 /** Write/writeback buffer */ 204 MSHRQueue writeBuffer; 205 206 /** 207 * Allocate a buffer, passing the time indicating when schedule an 208 * event to the queued port to go and ask the MSHR and write queue 209 * if they have packets to send. 210 * 211 * allocateBufferInternal() function is called in: 212 * - MSHR allocateWriteBuffer (unchached write forwarded to WriteBuffer); 213 * - MSHR allocateMissBuffer (cacheable miss in MSHR queue); 214 * - MSHR allocateUncachedReadBuffer (unchached read allocated in MSHR 215 * queue) 216 */ 217 MSHR *allocateBufferInternal(MSHRQueue *mq, Addr addr, int size, 218 PacketPtr pkt, Tick time, bool requestBus) 219 { 220 // check that the address is block aligned since we rely on 221 // this in a number of places when checking for matches and 222 // overlap 223 assert(addr == blockAlign(addr)); 224 225 MSHR *mshr = mq->allocate(addr, size, pkt, time, order++); 226 227 if (mq->isFull()) { 228 setBlocked((BlockedCause)mq->index); 229 } 230 231 if (requestBus) { 232 requestMemSideBus((RequestCause)mq->index, time); 233 } 234 235 return mshr; 236 } 237 238 void markInServiceInternal(MSHR *mshr, bool pending_dirty_resp) 239 { 240 MSHRQueue *mq = mshr->queue; 241 bool wasFull = mq->isFull(); 242 mq->markInService(mshr, pending_dirty_resp); 243 if (wasFull && !mq->isFull()) { 244 clearBlocked((BlockedCause)mq->index); 245 } 246 } 247 248 /** 249 * Write back dirty blocks in the cache using functional accesses. 250 */ 251 virtual void memWriteback() = 0; 252 /** 253 * Invalidates all blocks in the cache. 254 * 255 * @warn Dirty cache lines will not be written back to 256 * memory. Make sure to call functionalWriteback() first if you 257 * want the to write them to memory. 258 */ 259 virtual void memInvalidate() = 0; 260 /** 261 * Determine if there are any dirty blocks in the cache. 262 * 263 * \return true if at least one block is dirty, false otherwise. 264 */ 265 virtual bool isDirty() const = 0; 266 267 /** Block size of this cache */ 268 const unsigned blkSize; 269 270 /** 271 * The latency of tag lookup of a cache. It occurs when there is 272 * an access to the cache. 273 */ 274 const Cycles lookupLatency; 275 276 /** 277 * This is the forward latency of the cache. It occurs when there 278 * is a cache miss and a request is forwarded downstream, in 279 * particular an outbound miss. 280 */ 281 const Cycles forwardLatency; 282 283 /** The latency to fill a cache block */ 284 const Cycles fillLatency; 285 286 /** 287 * The latency of sending reponse to its upper level cache/core on 288 * a linefill. The responseLatency parameter captures this 289 * latency. 290 */ 291 const Cycles responseLatency; 292 293 /** The number of targets for each MSHR. */ 294 const int numTarget; 295 296 /** Do we forward snoops from mem side port through to cpu side port? */ 297 const bool forwardSnoops; 298 299 /** Is this cache a toplevel cache (e.g. L1, I/O cache). If so we should 300 * never try to forward ownership and similar optimizations to the cpu 301 * side */ 302 const bool isTopLevel; 303 304 /** 305 * Bit vector of the blocking reasons for the access path. 306 * @sa #BlockedCause 307 */ 308 uint8_t blocked; 309 310 /** Increasing order number assigned to each incoming request. */ 311 uint64_t order; 312 313 /** Stores time the cache blocked for statistics. */ 314 Cycles blockedCycle; 315 316 /** Pointer to the MSHR that has no targets. */ 317 MSHR *noTargetMSHR; 318 319 /** The number of misses to trigger an exit event. */ 320 Counter missCount; 321 322 /** 323 * The address range to which the cache responds on the CPU side. 324 * Normally this is all possible memory addresses. */ 325 const AddrRangeList addrRanges; 326 327 public: 328 /** System we are currently operating in. */ 329 System *system; 330 331 // Statistics 332 /** 333 * @addtogroup CacheStatistics 334 * @{ 335 */ 336 337 /** Number of hits per thread for each type of command. @sa Packet::Command */ 338 Stats::Vector hits[MemCmd::NUM_MEM_CMDS]; 339 /** Number of hits for demand accesses. */ 340 Stats::Formula demandHits; 341 /** Number of hit for all accesses. */ 342 Stats::Formula overallHits; 343 344 /** Number of misses per thread for each type of command. @sa Packet::Command */ 345 Stats::Vector misses[MemCmd::NUM_MEM_CMDS]; 346 /** Number of misses for demand accesses. */ 347 Stats::Formula demandMisses; 348 /** Number of misses for all accesses. */ 349 Stats::Formula overallMisses; 350 351 /** 352 * Total number of cycles per thread/command spent waiting for a miss. 353 * Used to calculate the average miss latency. 354 */ 355 Stats::Vector missLatency[MemCmd::NUM_MEM_CMDS]; 356 /** Total number of cycles spent waiting for demand misses. */ 357 Stats::Formula demandMissLatency; 358 /** Total number of cycles spent waiting for all misses. */ 359 Stats::Formula overallMissLatency; 360 361 /** The number of accesses per command and thread. */ 362 Stats::Formula accesses[MemCmd::NUM_MEM_CMDS]; 363 /** The number of demand accesses. */ 364 Stats::Formula demandAccesses; 365 /** The number of overall accesses. */ 366 Stats::Formula overallAccesses; 367 368 /** The miss rate per command and thread. */ 369 Stats::Formula missRate[MemCmd::NUM_MEM_CMDS]; 370 /** The miss rate of all demand accesses. */ 371 Stats::Formula demandMissRate; 372 /** The miss rate for all accesses. */ 373 Stats::Formula overallMissRate; 374 375 /** The average miss latency per command and thread. */ 376 Stats::Formula avgMissLatency[MemCmd::NUM_MEM_CMDS]; 377 /** The average miss latency for demand misses. */ 378 Stats::Formula demandAvgMissLatency; 379 /** The average miss latency for all misses. */ 380 Stats::Formula overallAvgMissLatency; 381 382 /** The total number of cycles blocked for each blocked cause. */ 383 Stats::Vector blocked_cycles; 384 /** The number of times this cache blocked for each blocked cause. */ 385 Stats::Vector blocked_causes; 386 387 /** The average number of cycles blocked for each blocked cause. */ 388 Stats::Formula avg_blocked; 389 390 /** The number of fast writes (WH64) performed. */ 391 Stats::Scalar fastWrites; 392 393 /** The number of cache copies performed. */ 394 Stats::Scalar cacheCopies; 395 396 /** Number of blocks written back per thread. */ 397 Stats::Vector writebacks; 398 399 /** Number of misses that hit in the MSHRs per command and thread. */ 400 Stats::Vector mshr_hits[MemCmd::NUM_MEM_CMDS]; 401 /** Demand misses that hit in the MSHRs. */ 402 Stats::Formula demandMshrHits; 403 /** Total number of misses that hit in the MSHRs. */ 404 Stats::Formula overallMshrHits; 405 406 /** Number of misses that miss in the MSHRs, per command and thread. */ 407 Stats::Vector mshr_misses[MemCmd::NUM_MEM_CMDS]; 408 /** Demand misses that miss in the MSHRs. */ 409 Stats::Formula demandMshrMisses; 410 /** Total number of misses that miss in the MSHRs. */ 411 Stats::Formula overallMshrMisses; 412 413 /** Number of misses that miss in the MSHRs, per command and thread. */ 414 Stats::Vector mshr_uncacheable[MemCmd::NUM_MEM_CMDS]; 415 /** Total number of misses that miss in the MSHRs. */ 416 Stats::Formula overallMshrUncacheable; 417 418 /** Total cycle latency of each MSHR miss, per command and thread. */ 419 Stats::Vector mshr_miss_latency[MemCmd::NUM_MEM_CMDS]; 420 /** Total cycle latency of demand MSHR misses. */ 421 Stats::Formula demandMshrMissLatency; 422 /** Total cycle latency of overall MSHR misses. */ 423 Stats::Formula overallMshrMissLatency; 424 425 /** Total cycle latency of each MSHR miss, per command and thread. */ 426 Stats::Vector mshr_uncacheable_lat[MemCmd::NUM_MEM_CMDS]; 427 /** Total cycle latency of overall MSHR misses. */ 428 Stats::Formula overallMshrUncacheableLatency; 429 430#if 0 431 /** The total number of MSHR accesses per command and thread. */ 432 Stats::Formula mshrAccesses[MemCmd::NUM_MEM_CMDS]; 433 /** The total number of demand MSHR accesses. */ 434 Stats::Formula demandMshrAccesses; 435 /** The total number of MSHR accesses. */ 436 Stats::Formula overallMshrAccesses; 437#endif 438 439 /** The miss rate in the MSHRs pre command and thread. */ 440 Stats::Formula mshrMissRate[MemCmd::NUM_MEM_CMDS]; 441 /** The demand miss rate in the MSHRs. */ 442 Stats::Formula demandMshrMissRate; 443 /** The overall miss rate in the MSHRs. */ 444 Stats::Formula overallMshrMissRate; 445 446 /** The average latency of an MSHR miss, per command and thread. */ 447 Stats::Formula avgMshrMissLatency[MemCmd::NUM_MEM_CMDS]; 448 /** The average latency of a demand MSHR miss. */ 449 Stats::Formula demandAvgMshrMissLatency; 450 /** The average overall latency of an MSHR miss. */ 451 Stats::Formula overallAvgMshrMissLatency; 452 453 /** The average latency of an MSHR miss, per command and thread. */ 454 Stats::Formula avgMshrUncacheableLatency[MemCmd::NUM_MEM_CMDS]; 455 /** The average overall latency of an MSHR miss. */ 456 Stats::Formula overallAvgMshrUncacheableLatency; 457 458 /** The number of times a thread hit its MSHR cap. */ 459 Stats::Vector mshr_cap_events; 460 /** The number of times software prefetches caused the MSHR to block. */ 461 Stats::Vector soft_prefetch_mshr_full; 462 463 Stats::Scalar mshr_no_allocate_misses; 464 465 /** 466 * @} 467 */ 468 469 /** 470 * Register stats for this object. 471 */ 472 virtual void regStats(); 473 474 public: 475 typedef BaseCacheParams Params; 476 BaseCache(const Params *p); 477 ~BaseCache() {} 478 479 virtual void init(); 480 481 virtual BaseMasterPort &getMasterPort(const std::string &if_name, 482 PortID idx = InvalidPortID); 483 virtual BaseSlavePort &getSlavePort(const std::string &if_name, 484 PortID idx = InvalidPortID); 485 486 /** 487 * Query block size of a cache. 488 * @return The block size 489 */ 490 unsigned 491 getBlockSize() const 492 { 493 return blkSize; 494 } 495 496 497 Addr blockAlign(Addr addr) const { return (addr & ~(Addr(blkSize - 1))); } 498 499 500 const AddrRangeList &getAddrRanges() const { return addrRanges; } 501 502 MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool requestBus) 503 { 504 assert(!pkt->req->isUncacheable()); 505 return allocateBufferInternal(&mshrQueue, 506 blockAlign(pkt->getAddr()), blkSize, 507 pkt, time, requestBus); 508 } 509 510 MSHR *allocateWriteBuffer(PacketPtr pkt, Tick time, bool requestBus) 511 { 512 assert(pkt->isWrite() && !pkt->isRead()); 513 return allocateBufferInternal(&writeBuffer, 514 blockAlign(pkt->getAddr()), blkSize, 515 pkt, time, requestBus); 516 } 517 518 MSHR *allocateUncachedReadBuffer(PacketPtr pkt, Tick time, bool requestBus) 519 { 520 assert(pkt->req->isUncacheable()); 521 assert(pkt->isRead()); 522 return allocateBufferInternal(&mshrQueue, 523 blockAlign(pkt->getAddr()), blkSize, 524 pkt, time, requestBus); 525 } 526 527 /** 528 * Returns true if the cache is blocked for accesses. 529 */ 530 bool isBlocked() const 531 { 532 return blocked != 0; 533 } 534 535 /** 536 * Marks the access path of the cache as blocked for the given cause. This 537 * also sets the blocked flag in the slave interface. 538 * @param cause The reason for the cache blocking. 539 */ 540 void setBlocked(BlockedCause cause) 541 { 542 uint8_t flag = 1 << cause; 543 if (blocked == 0) { 544 blocked_causes[cause]++; 545 blockedCycle = curCycle(); 546 cpuSidePort->setBlocked(); 547 } 548 blocked |= flag; 549 DPRINTF(Cache,"Blocking for cause %d, mask=%d\n", cause, blocked); 550 } 551 552 /** 553 * Marks the cache as unblocked for the given cause. This also clears the 554 * blocked flags in the appropriate interfaces. 555 * @param cause The newly unblocked cause. 556 * @warning Calling this function can cause a blocked request on the bus to 557 * access the cache. The cache must be in a state to handle that request. 558 */ 559 void clearBlocked(BlockedCause cause) 560 { 561 uint8_t flag = 1 << cause; 562 blocked &= ~flag; 563 DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked); 564 if (blocked == 0) { 565 blocked_cycles[cause] += curCycle() - blockedCycle; 566 cpuSidePort->clearBlocked(); 567 } 568 } 569 570 /** 571 * Request the master bus for the given cause and time. 572 * @param cause The reason for the request. 573 * @param time The time to make the request. 574 */ 575 void requestMemSideBus(RequestCause cause, Tick time) 576 { 577 memSidePort->requestBus(cause, time); 578 } 579 580 /** 581 * Clear the master bus request for the given cause. 582 * @param cause The request reason to clear. 583 */ 584 void deassertMemSideBusRequest(RequestCause cause) 585 { 586 // Obsolete... we no longer signal bus requests explicitly so 587 // we can't deassert them. Leaving this in as a no-op since 588 // the prefetcher calls it to indicate that it no longer wants 589 // to request a prefetch, and someday that might be 590 // interesting again. 591 } 592 593 virtual unsigned int drain(DrainManager *dm); 594 595 virtual bool inCache(Addr addr, bool is_secure) const = 0; 596 597 virtual bool inMissQueue(Addr addr, bool is_secure) const = 0; 598 599 void incMissCount(PacketPtr pkt) 600 { 601 assert(pkt->req->masterId() < system->maxMasters()); 602 misses[pkt->cmdToIndex()][pkt->req->masterId()]++; 603 pkt->req->incAccessDepth(); 604 if (missCount) { 605 --missCount; 606 if (missCount == 0) 607 exitSimLoop("A cache reached the maximum miss count"); 608 } 609 } 610 void incHitCount(PacketPtr pkt) 611 { 612 assert(pkt->req->masterId() < system->maxMasters()); 613 hits[pkt->cmdToIndex()][pkt->req->masterId()]++; 614 615 } 616 617}; 618 619#endif //__BASE_CACHE_HH__ 620