base.hh revision 3606
1/* 2 * Copyright (c) 2003-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Erik Hallnor 29 */ 30 31/** 32 * @file 33 * Declares a basic cache interface BaseCache. 34 */ 35 36#ifndef __BASE_CACHE_HH__ 37#define __BASE_CACHE_HH__ 38 39#include <vector> 40#include <string> 41#include <list> 42#include <inttypes.h> 43 44#include "base/misc.hh" 45#include "base/statistics.hh" 46#include "base/trace.hh" 47#include "mem/mem_object.hh" 48#include "mem/packet.hh" 49#include "mem/port.hh" 50#include "mem/request.hh" 51#include "sim/eventq.hh" 52 53/** 54 * Reasons for Caches to be Blocked. 55 */ 56enum BlockedCause{ 57 Blocked_NoMSHRs, 58 Blocked_NoTargets, 59 Blocked_NoWBBuffers, 60 Blocked_Coherence, 61 NUM_BLOCKED_CAUSES 62}; 63 64/** 65 * Reasons for cache to request a bus. 66 */ 67enum RequestCause{ 68 Request_MSHR, 69 Request_WB, 70 Request_Coherence, 71 Request_PF 72}; 73 74class MSHR; 75/** 76 * A basic cache interface. Implements some common functions for speed. 77 */ 78class BaseCache : public MemObject 79{ 80 class CachePort : public Port 81 { 82 public: 83 BaseCache *cache; 84 85 CachePort(const std::string &_name, BaseCache *_cache, bool _isCpuSide); 86 87 protected: 88 virtual bool recvTiming(PacketPtr pkt); 89 90 virtual Tick recvAtomic(PacketPtr pkt); 91 92 virtual void recvFunctional(PacketPtr pkt); 93 94 virtual void recvStatusChange(Status status); 95 96 virtual void getDeviceAddressRanges(AddrRangeList &resp, 97 AddrRangeList &snoop); 98 99 virtual int deviceBlockSize(); 100 101 virtual void recvRetry(); 102 103 public: 104 void setBlocked(); 105 106 void clearBlocked(); 107 108 bool checkFunctional(PacketPtr pkt); 109 110 void checkAndSendFunctional(PacketPtr pkt); 111 112 bool canDrain() { return drainList.empty() && transmitList.empty(); } 113 114 bool blocked; 115 116 bool mustSendRetry; 117 118 bool isCpuSide; 119 120 bool waitingOnRetry; 121 122 std::list<PacketPtr> drainList; 123 124 std::list<std::pair<Tick,PacketPtr> > transmitList; 125 }; 126 127 struct CacheEvent : public Event 128 { 129 CachePort *cachePort; 130 PacketPtr pkt; 131 bool newResponse; 132 133 CacheEvent(CachePort *_cachePort, bool response); 134 void process(); 135 const char *description(); 136 }; 137 138 public: //Made public so coherence can get at it. 139 CachePort *cpuSidePort; 140 141 CacheEvent *sendEvent; 142 CacheEvent *memSendEvent; 143 144 protected: 145 CachePort *memSidePort; 146 147 bool snoopRangesSent; 148 149 public: 150 virtual Port *getPort(const std::string &if_name, int idx = -1); 151 152 private: 153 //To be defined in cache_impl.hh not in base class 154 virtual bool doTimingAccess(PacketPtr pkt, CachePort *cachePort, bool isCpuSide) 155 { 156 fatal("No implementation"); 157 } 158 159 virtual Tick doAtomicAccess(PacketPtr pkt, bool isCpuSide) 160 { 161 fatal("No implementation"); 162 } 163 164 virtual void doFunctionalAccess(PacketPtr pkt, bool isCpuSide) 165 { 166 fatal("No implementation"); 167 } 168 169 void recvStatusChange(Port::Status status, bool isCpuSide) 170 { 171 if (status == Port::RangeChange){ 172 if (!isCpuSide) { 173 cpuSidePort->sendStatusChange(Port::RangeChange); 174 if (!snoopRangesSent) { 175 snoopRangesSent = true; 176 memSidePort->sendStatusChange(Port::RangeChange); 177 } 178 } 179 else { 180 memSidePort->sendStatusChange(Port::RangeChange); 181 } 182 } 183 } 184 185 virtual PacketPtr getPacket() 186 { 187 fatal("No implementation"); 188 } 189 190 virtual PacketPtr getCoherencePacket() 191 { 192 fatal("No implementation"); 193 } 194 195 virtual void sendResult(PacketPtr &pkt, MSHR* mshr, bool success) 196 { 197 198 fatal("No implementation"); 199 } 200 201 virtual void sendCoherenceResult(PacketPtr &pkt, MSHR* mshr, bool success) 202 { 203 204 fatal("No implementation"); 205 } 206 207 /** 208 * Bit vector of the blocking reasons for the access path. 209 * @sa #BlockedCause 210 */ 211 uint8_t blocked; 212 213 /** 214 * Bit vector for the blocking reasons for the snoop path. 215 * @sa #BlockedCause 216 */ 217 uint8_t blockedSnoop; 218 219 /** 220 * Bit vector for the outstanding requests for the master interface. 221 */ 222 uint8_t masterRequests; 223 224 /** 225 * Bit vector for the outstanding requests for the slave interface. 226 */ 227 uint8_t slaveRequests; 228 229 protected: 230 231 /** Stores time the cache blocked for statistics. */ 232 Tick blockedCycle; 233 234 /** Block size of this cache */ 235 const int blkSize; 236 237 /** The number of misses to trigger an exit event. */ 238 Counter missCount; 239 240 /** The drain event. */ 241 Event *drainEvent; 242 243 public: 244 // Statistics 245 /** 246 * @addtogroup CacheStatistics 247 * @{ 248 */ 249 250 /** Number of hits per thread for each type of command. @sa Packet::Command */ 251 Stats::Vector<> hits[NUM_MEM_CMDS]; 252 /** Number of hits for demand accesses. */ 253 Stats::Formula demandHits; 254 /** Number of hit for all accesses. */ 255 Stats::Formula overallHits; 256 257 /** Number of misses per thread for each type of command. @sa Packet::Command */ 258 Stats::Vector<> misses[NUM_MEM_CMDS]; 259 /** Number of misses for demand accesses. */ 260 Stats::Formula demandMisses; 261 /** Number of misses for all accesses. */ 262 Stats::Formula overallMisses; 263 264 /** 265 * Total number of cycles per thread/command spent waiting for a miss. 266 * Used to calculate the average miss latency. 267 */ 268 Stats::Vector<> missLatency[NUM_MEM_CMDS]; 269 /** Total number of cycles spent waiting for demand misses. */ 270 Stats::Formula demandMissLatency; 271 /** Total number of cycles spent waiting for all misses. */ 272 Stats::Formula overallMissLatency; 273 274 /** The number of accesses per command and thread. */ 275 Stats::Formula accesses[NUM_MEM_CMDS]; 276 /** The number of demand accesses. */ 277 Stats::Formula demandAccesses; 278 /** The number of overall accesses. */ 279 Stats::Formula overallAccesses; 280 281 /** The miss rate per command and thread. */ 282 Stats::Formula missRate[NUM_MEM_CMDS]; 283 /** The miss rate of all demand accesses. */ 284 Stats::Formula demandMissRate; 285 /** The miss rate for all accesses. */ 286 Stats::Formula overallMissRate; 287 288 /** The average miss latency per command and thread. */ 289 Stats::Formula avgMissLatency[NUM_MEM_CMDS]; 290 /** The average miss latency for demand misses. */ 291 Stats::Formula demandAvgMissLatency; 292 /** The average miss latency for all misses. */ 293 Stats::Formula overallAvgMissLatency; 294 295 /** The total number of cycles blocked for each blocked cause. */ 296 Stats::Vector<> blocked_cycles; 297 /** The number of times this cache blocked for each blocked cause. */ 298 Stats::Vector<> blocked_causes; 299 300 /** The average number of cycles blocked for each blocked cause. */ 301 Stats::Formula avg_blocked; 302 303 /** The number of fast writes (WH64) performed. */ 304 Stats::Scalar<> fastWrites; 305 306 /** The number of cache copies performed. */ 307 Stats::Scalar<> cacheCopies; 308 309 /** 310 * @} 311 */ 312 313 /** 314 * Register stats for this object. 315 */ 316 virtual void regStats(); 317 318 public: 319 320 class Params 321 { 322 public: 323 /** List of address ranges of this cache. */ 324 std::vector<Range<Addr> > addrRange; 325 /** The hit latency for this cache. */ 326 int hitLatency; 327 /** The block size of this cache. */ 328 int blkSize; 329 /** 330 * The maximum number of misses this cache should handle before 331 * ending the simulation. 332 */ 333 Counter maxMisses; 334 335 /** 336 * Construct an instance of this parameter class. 337 */ 338 Params(std::vector<Range<Addr> > addr_range, 339 int hit_latency, int _blkSize, Counter max_misses) 340 : addrRange(addr_range), hitLatency(hit_latency), blkSize(_blkSize), 341 maxMisses(max_misses) 342 { 343 } 344 }; 345 346 /** 347 * Create and initialize a basic cache object. 348 * @param name The name of this cache. 349 * @param hier_params Pointer to the HierParams object for this hierarchy 350 * of this cache. 351 * @param params The parameter object for this BaseCache. 352 */ 353 BaseCache(const std::string &name, Params ¶ms) 354 : MemObject(name), blocked(0), blockedSnoop(0), masterRequests(0), 355 slaveRequests(0), blkSize(params.blkSize), 356 missCount(params.maxMisses), drainEvent(NULL) 357 { 358 //Start ports at null if more than one is created we should panic 359 cpuSidePort = NULL; 360 memSidePort = NULL; 361 snoopRangesSent = false; 362 } 363 364 ~BaseCache() 365 { 366 delete sendEvent; 367 delete memSendEvent; 368 } 369 370 virtual void init(); 371 372 /** 373 * Query block size of a cache. 374 * @return The block size 375 */ 376 int getBlockSize() const 377 { 378 return blkSize; 379 } 380 381 /** 382 * Returns true if the cache is blocked for accesses. 383 */ 384 bool isBlocked() 385 { 386 return blocked != 0; 387 } 388 389 /** 390 * Returns true if the cache is blocked for snoops. 391 */ 392 bool isBlockedForSnoop() 393 { 394 return blockedSnoop != 0; 395 } 396 397 /** 398 * Marks the access path of the cache as blocked for the given cause. This 399 * also sets the blocked flag in the slave interface. 400 * @param cause The reason for the cache blocking. 401 */ 402 void setBlocked(BlockedCause cause) 403 { 404 uint8_t flag = 1 << cause; 405 if (blocked == 0) { 406 blocked_causes[cause]++; 407 blockedCycle = curTick; 408 } 409 int old_state = blocked; 410 if (!(blocked & flag)) { 411 //Wasn't already blocked for this cause 412 blocked |= flag; 413 DPRINTF(Cache,"Blocking for cause %s\n", cause); 414 if (!old_state) 415 cpuSidePort->setBlocked(); 416 } 417 } 418 419 /** 420 * Marks the snoop path of the cache as blocked for the given cause. This 421 * also sets the blocked flag in the master interface. 422 * @param cause The reason to block the snoop path. 423 */ 424 void setBlockedForSnoop(BlockedCause cause) 425 { 426 uint8_t flag = 1 << cause; 427 uint8_t old_state = blockedSnoop; 428 if (!(blockedSnoop & flag)) { 429 //Wasn't already blocked for this cause 430 blockedSnoop |= flag; 431 if (!old_state) 432 memSidePort->setBlocked(); 433 } 434 } 435 436 /** 437 * Marks the cache as unblocked for the given cause. This also clears the 438 * blocked flags in the appropriate interfaces. 439 * @param cause The newly unblocked cause. 440 * @warning Calling this function can cause a blocked request on the bus to 441 * access the cache. The cache must be in a state to handle that request. 442 */ 443 void clearBlocked(BlockedCause cause) 444 { 445 uint8_t flag = 1 << cause; 446 DPRINTF(Cache,"Unblocking for cause %s, causes left=%i\n", 447 cause, blocked); 448 if (blocked & flag) 449 { 450 blocked &= ~flag; 451 if (!isBlocked()) { 452 blocked_cycles[cause] += curTick - blockedCycle; 453 DPRINTF(Cache,"Unblocking from all causes\n"); 454 cpuSidePort->clearBlocked(); 455 } 456 } 457 if (blockedSnoop & flag) 458 { 459 blockedSnoop &= ~flag; 460 if (!isBlockedForSnoop()) { 461 memSidePort->clearBlocked(); 462 } 463 } 464 } 465 466 /** 467 * True if the master bus should be requested. 468 * @return True if there are outstanding requests for the master bus. 469 */ 470 bool doMasterRequest() 471 { 472 return masterRequests != 0; 473 } 474 475 /** 476 * Request the master bus for the given cause and time. 477 * @param cause The reason for the request. 478 * @param time The time to make the request. 479 */ 480 void setMasterRequest(RequestCause cause, Tick time) 481 { 482 if (!doMasterRequest() && !memSidePort->waitingOnRetry) 483 { 484 BaseCache::CacheEvent * reqCpu = 485 new BaseCache::CacheEvent(memSidePort, false); 486 reqCpu->schedule(time); 487 } 488 uint8_t flag = 1<<cause; 489 masterRequests |= flag; 490 } 491 492 /** 493 * Clear the master bus request for the given cause. 494 * @param cause The request reason to clear. 495 */ 496 void clearMasterRequest(RequestCause cause) 497 { 498 uint8_t flag = 1<<cause; 499 masterRequests &= ~flag; 500 checkDrain(); 501 } 502 503 /** 504 * Return true if the slave bus should be requested. 505 * @return True if there are outstanding requests for the slave bus. 506 */ 507 bool doSlaveRequest() 508 { 509 return slaveRequests != 0; 510 } 511 512 /** 513 * Request the slave bus for the given reason and time. 514 * @param cause The reason for the request. 515 * @param time The time to make the request. 516 */ 517 void setSlaveRequest(RequestCause cause, Tick time) 518 { 519 if (!doSlaveRequest() && !cpuSidePort->waitingOnRetry) 520 { 521 BaseCache::CacheEvent * reqCpu = 522 new BaseCache::CacheEvent(cpuSidePort, false); 523 reqCpu->schedule(time); 524 } 525 uint8_t flag = 1<<cause; 526 slaveRequests |= flag; 527 } 528 529 /** 530 * Clear the slave bus request for the given reason. 531 * @param cause The request reason to clear. 532 */ 533 void clearSlaveRequest(RequestCause cause) 534 { 535 uint8_t flag = 1<<cause; 536 slaveRequests &= ~flag; 537 checkDrain(); 538 } 539 540 /** 541 * Send a response to the slave interface. 542 * @param pkt The request being responded to. 543 * @param time The time the response is ready. 544 */ 545 void respond(PacketPtr pkt, Tick time) 546 { 547 assert(time >= curTick); 548 if (pkt->needsResponse()) { 549/* CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt); 550 reqCpu->schedule(time); 551*/ 552 if (cpuSidePort->transmitList.empty()) { 553 assert(!sendEvent->scheduled()); 554 sendEvent->schedule(time); 555 cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr> 556 (time,pkt)); 557 return; 558 } 559 560 // something is on the list and this belongs at the end 561 if (time >= cpuSidePort->transmitList.back().first) { 562 cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr> 563 (time,pkt)); 564 return; 565 } 566 // Something is on the list and this belongs somewhere else 567 std::list<std::pair<Tick,PacketPtr> >::iterator i = 568 cpuSidePort->transmitList.begin(); 569 std::list<std::pair<Tick,PacketPtr> >::iterator end = 570 cpuSidePort->transmitList.end(); 571 bool done = false; 572 573 while (i != end && !done) { 574 if (time < i->first) 575 cpuSidePort->transmitList.insert(i,std::pair<Tick,PacketPtr> 576 (time,pkt)); 577 i++; 578 } 579 } 580 else { 581 if (pkt->cmd != Packet::UpgradeReq) 582 { 583 delete pkt->req; 584 delete pkt; 585 } 586 } 587 } 588 589 /** 590 * Send a reponse to the slave interface and calculate miss latency. 591 * @param pkt The request to respond to. 592 * @param time The time the response is ready. 593 */ 594 void respondToMiss(PacketPtr pkt, Tick time) 595 { 596 assert(time >= curTick); 597 if (!pkt->req->isUncacheable()) { 598 missLatency[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/] += 599 time - pkt->time; 600 } 601 if (pkt->needsResponse()) { 602/* CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt); 603 reqCpu->schedule(time); 604*/ 605 if (cpuSidePort->transmitList.empty()) { 606 assert(!sendEvent->scheduled()); 607 sendEvent->schedule(time); 608 cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr> 609 (time,pkt)); 610 return; 611 } 612 613 // something is on the list and this belongs at the end 614 if (time >= cpuSidePort->transmitList.back().first) { 615 cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr> 616 (time,pkt)); 617 return; 618 } 619 // Something is on the list and this belongs somewhere else 620 std::list<std::pair<Tick,PacketPtr> >::iterator i = 621 cpuSidePort->transmitList.begin(); 622 std::list<std::pair<Tick,PacketPtr> >::iterator end = 623 cpuSidePort->transmitList.end(); 624 bool done = false; 625 626 while (i != end && !done) { 627 if (time < i->first) 628 cpuSidePort->transmitList.insert(i,std::pair<Tick,PacketPtr> 629 (time,pkt)); 630 i++; 631 } 632 } 633 else { 634 if (pkt->cmd != Packet::UpgradeReq) 635 { 636 delete pkt->req; 637 delete pkt; 638 } 639 } 640 } 641 642 /** 643 * Suppliess the data if cache to cache transfers are enabled. 644 * @param pkt The bus transaction to fulfill. 645 */ 646 void respondToSnoop(PacketPtr pkt, Tick time) 647 { 648 assert(time >= curTick); 649 assert (pkt->needsResponse()); 650/* CacheEvent *reqMem = new CacheEvent(memSidePort, pkt); 651 reqMem->schedule(time); 652*/ 653 if (memSidePort->transmitList.empty()) { 654 assert(!memSendEvent->scheduled()); 655 memSendEvent->schedule(time); 656 memSidePort->transmitList.push_back(std::pair<Tick,PacketPtr> 657 (time,pkt)); 658 return; 659 } 660 661 // something is on the list and this belongs at the end 662 if (time >= memSidePort->transmitList.back().first) { 663 memSidePort->transmitList.push_back(std::pair<Tick,PacketPtr> 664 (time,pkt)); 665 return; 666 } 667 // Something is on the list and this belongs somewhere else 668 std::list<std::pair<Tick,PacketPtr> >::iterator i = 669 memSidePort->transmitList.begin(); 670 std::list<std::pair<Tick,PacketPtr> >::iterator end = 671 memSidePort->transmitList.end(); 672 bool done = false; 673 674 while (i != end && !done) { 675 if (time < i->first) 676 memSidePort->transmitList.insert(i,std::pair<Tick,PacketPtr>(time,pkt)); 677 i++; 678 } 679 } 680 681 /** 682 * Notification from master interface that a address range changed. Nothing 683 * to do for a cache. 684 */ 685 void rangeChange() {} 686 687 void getAddressRanges(AddrRangeList &resp, AddrRangeList &snoop, bool isCpuSide) 688 { 689 if (isCpuSide) 690 { 691 AddrRangeList dummy; 692 memSidePort->getPeerAddressRanges(resp, dummy); 693 } 694 else 695 { 696 //This is where snoops get updated 697 AddrRangeList dummy; 698 cpuSidePort->getPeerAddressRanges(dummy, snoop); 699 return; 700 } 701 } 702 703 virtual unsigned int drain(Event *de); 704 705 void checkDrain() 706 { 707 if (drainEvent && canDrain()) { 708 drainEvent->process(); 709 changeState(SimObject::Drained); 710 // Clear the drain event 711 drainEvent = NULL; 712 } 713 } 714 715 bool canDrain() 716 { 717 if (doMasterRequest() || doSlaveRequest()) { 718 return false; 719 } else if (memSidePort && !memSidePort->canDrain()) { 720 return false; 721 } else if (cpuSidePort && !cpuSidePort->canDrain()) { 722 return false; 723 } 724 return true; 725 } 726}; 727 728#endif //__BASE_CACHE_HH__ 729