dram_ctrl.hh revision 10619:6dd27a0e0d23
1/* 2 * Copyright (c) 2012-2014 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2013 Amin Farmahini-Farahani 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Andreas Hansson 41 * Ani Udipi 42 * Neha Agarwal 43 * Omar Naji 44 */ 45 46/** 47 * @file 48 * DRAMCtrl declaration 49 */ 50 51#ifndef __MEM_DRAM_CTRL_HH__ 52#define __MEM_DRAM_CTRL_HH__ 53 54#include <deque> 55#include <string> 56 57#include "base/statistics.hh" 58#include "enums/AddrMap.hh" 59#include "enums/MemSched.hh" 60#include "enums/PageManage.hh" 61#include "mem/abstract_mem.hh" 62#include "mem/qport.hh" 63#include "params/DRAMCtrl.hh" 64#include "sim/eventq.hh" 65#include "mem/drampower.hh" 66 67/** 68 * The DRAM controller is a single-channel memory controller capturing 69 * the most important timing constraints associated with a 70 * contemporary DRAM. For multi-channel memory systems, the controller 71 * is combined with a crossbar model, with the channel address 72 * interleaving taking part in the crossbar. 73 * 74 * As a basic design principle, this controller 75 * model is not cycle callable, but instead uses events to: 1) decide 76 * when new decisions can be made, 2) when resources become available, 77 * 3) when things are to be considered done, and 4) when to send 78 * things back. Through these simple principles, the model delivers 79 * high performance, and lots of flexibility, allowing users to 80 * evaluate the system impact of a wide range of memory technologies, 81 * such as DDR3/4, LPDDR2/3/4, WideIO1/2, HBM and HMC. 82 * 83 * For more details, please see Hansson et al, "Simulating DRAM 84 * controllers for future system architecture exploration", 85 * Proc. ISPASS, 2014. If you use this model as part of your research 86 * please cite the paper. 87 */ 88class DRAMCtrl : public AbstractMemory 89{ 90 91 private: 92 93 // For now, make use of a queued slave port to avoid dealing with 94 // flow control for the responses being sent back 95 class MemoryPort : public QueuedSlavePort 96 { 97 98 SlavePacketQueue queue; 99 DRAMCtrl& memory; 100 101 public: 102 103 MemoryPort(const std::string& name, DRAMCtrl& _memory); 104 105 protected: 106 107 Tick recvAtomic(PacketPtr pkt); 108 109 void recvFunctional(PacketPtr pkt); 110 111 bool recvTimingReq(PacketPtr); 112 113 virtual AddrRangeList getAddrRanges() const; 114 115 }; 116 117 /** 118 * Our incoming port, for a multi-ported controller add a crossbar 119 * in front of it 120 */ 121 MemoryPort port; 122 123 /** 124 * Remeber if the memory system is in timing mode 125 */ 126 bool isTimingMode; 127 128 /** 129 * Remember if we have to retry a request when available. 130 */ 131 bool retryRdReq; 132 bool retryWrReq; 133 134 /** 135 * Bus state used to control the read/write switching and drive 136 * the scheduling of the next request. 137 */ 138 enum BusState { 139 READ = 0, 140 READ_TO_WRITE, 141 WRITE, 142 WRITE_TO_READ 143 }; 144 145 BusState busState; 146 147 /** 148 * A basic class to track the bank state, i.e. what row is 149 * currently open (if any), when is the bank free to accept a new 150 * column (read/write) command, when can it be precharged, and 151 * when can it be activated. 152 * 153 * The bank also keeps track of how many bytes have been accessed 154 * in the open row since it was opened. 155 */ 156 class Bank 157 { 158 159 public: 160 161 static const uint32_t NO_ROW = -1; 162 163 uint32_t openRow; 164 uint8_t bank; 165 uint8_t bankgr; 166 167 Tick colAllowedAt; 168 Tick preAllowedAt; 169 Tick actAllowedAt; 170 171 uint32_t rowAccesses; 172 uint32_t bytesAccessed; 173 174 Bank() : 175 openRow(NO_ROW), bank(0), bankgr(0), 176 colAllowedAt(0), preAllowedAt(0), actAllowedAt(0), 177 rowAccesses(0), bytesAccessed(0) 178 { } 179 }; 180 181 182 /** 183 * Rank class includes a vector of banks. Refresh and Power state 184 * machines are defined per rank. Events required to change the 185 * state of the refresh and power state machine are scheduled per 186 * rank. This class allows the implementation of rank-wise refresh 187 * and rank-wise power-down. 188 */ 189 class Rank : public EventManager 190 { 191 192 private: 193 194 /** 195 * The power state captures the different operational states of 196 * the DRAM and interacts with the bus read/write state machine, 197 * and the refresh state machine. In the idle state all banks are 198 * precharged. From there we either go to an auto refresh (as 199 * determined by the refresh state machine), or to a precharge 200 * power down mode. From idle the memory can also go to the active 201 * state (with one or more banks active), and in turn from there 202 * to active power down. At the moment we do not capture the deep 203 * power down and self-refresh state. 204 */ 205 enum PowerState { 206 PWR_IDLE = 0, 207 PWR_REF, 208 PWR_PRE_PDN, 209 PWR_ACT, 210 PWR_ACT_PDN 211 }; 212 213 /** 214 * The refresh state is used to control the progress of the 215 * refresh scheduling. When normal operation is in progress the 216 * refresh state is idle. From there, it progresses to the refresh 217 * drain state once tREFI has passed. The refresh drain state 218 * captures the DRAM row active state, as it will stay there until 219 * all ongoing accesses complete. Thereafter all banks are 220 * precharged, and lastly, the DRAM is refreshed. 221 */ 222 enum RefreshState { 223 REF_IDLE = 0, 224 REF_DRAIN, 225 REF_PRE, 226 REF_RUN 227 }; 228 229 /** 230 * A reference to the parent DRAMCtrl instance 231 */ 232 DRAMCtrl& memory; 233 234 /** 235 * Since we are taking decisions out of order, we need to keep 236 * track of what power transition is happening at what time, such 237 * that we can go back in time and change history. For example, if 238 * we precharge all banks and schedule going to the idle state, we 239 * might at a later point decide to activate a bank before the 240 * transition to idle would have taken place. 241 */ 242 PowerState pwrStateTrans; 243 244 /** 245 * Current power state. 246 */ 247 PowerState pwrState; 248 249 /** 250 * Track when we transitioned to the current power state 251 */ 252 Tick pwrStateTick; 253 254 /** 255 * current refresh state 256 */ 257 RefreshState refreshState; 258 259 /** 260 * Keep track of when a refresh is due. 261 */ 262 Tick refreshDueAt; 263 264 /* 265 * Command energies 266 */ 267 Stats::Scalar actEnergy; 268 Stats::Scalar preEnergy; 269 Stats::Scalar readEnergy; 270 Stats::Scalar writeEnergy; 271 Stats::Scalar refreshEnergy; 272 273 /* 274 * Active Background Energy 275 */ 276 Stats::Scalar actBackEnergy; 277 278 /* 279 * Precharge Background Energy 280 */ 281 Stats::Scalar preBackEnergy; 282 283 Stats::Scalar totalEnergy; 284 Stats::Scalar averagePower; 285 286 /** 287 * Track time spent in each power state. 288 */ 289 Stats::Vector pwrStateTime; 290 291 /** 292 * Function to update Power Stats 293 */ 294 void updatePowerStats(); 295 296 /** 297 * Schedule a power state transition in the future, and 298 * potentially override an already scheduled transition. 299 * 300 * @param pwr_state Power state to transition to 301 * @param tick Tick when transition should take place 302 */ 303 void schedulePowerEvent(PowerState pwr_state, Tick tick); 304 305 public: 306 307 /** 308 * Current Rank index 309 */ 310 uint8_t rank; 311 312 /** 313 * One DRAMPower instance per rank 314 */ 315 DRAMPower power; 316 317 /** 318 * Vector of Banks. Each rank is made of several devices which in 319 * term are made from several banks. 320 */ 321 std::vector<Bank> banks; 322 323 /** 324 * To track number of banks which are currently active for 325 * this rank. 326 */ 327 unsigned int numBanksActive; 328 329 /** List to keep track of activate ticks */ 330 std::deque<Tick> actTicks; 331 332 Rank(DRAMCtrl& _memory, const DRAMCtrlParams* _p); 333 334 const std::string name() const 335 { 336 return csprintf("%s_%d", memory.name(), rank); 337 } 338 339 /** 340 * Kick off accounting for power and refresh states and 341 * schedule initial refresh. 342 * 343 * @param ref_tick Tick for first refresh 344 */ 345 void startup(Tick ref_tick); 346 347 /** 348 * Stop the refresh events. 349 */ 350 void suspend(); 351 352 /** 353 * Check if the current rank is available for scheduling. 354 * 355 * @param Return true if the rank is idle from a refresh point of view 356 */ 357 bool isAvailable() const { return refreshState == REF_IDLE; } 358 359 /** 360 * Let the rank check if it was waiting for requests to drain 361 * to allow it to transition states. 362 */ 363 void checkDrainDone(); 364 365 /* 366 * Function to register Stats 367 */ 368 void regStats(); 369 370 void processActivateEvent(); 371 EventWrapper<Rank, &Rank::processActivateEvent> 372 activateEvent; 373 374 void processPrechargeEvent(); 375 EventWrapper<Rank, &Rank::processPrechargeEvent> 376 prechargeEvent; 377 378 void processRefreshEvent(); 379 EventWrapper<Rank, &Rank::processRefreshEvent> 380 refreshEvent; 381 382 void processPowerEvent(); 383 EventWrapper<Rank, &Rank::processPowerEvent> 384 powerEvent; 385 386 }; 387 388 /** 389 * A burst helper helps organize and manage a packet that is larger than 390 * the DRAM burst size. A system packet that is larger than the burst size 391 * is split into multiple DRAM packets and all those DRAM packets point to 392 * a single burst helper such that we know when the whole packet is served. 393 */ 394 class BurstHelper { 395 396 public: 397 398 /** Number of DRAM bursts requred for a system packet **/ 399 const unsigned int burstCount; 400 401 /** Number of DRAM bursts serviced so far for a system packet **/ 402 unsigned int burstsServiced; 403 404 BurstHelper(unsigned int _burstCount) 405 : burstCount(_burstCount), burstsServiced(0) 406 { } 407 }; 408 409 /** 410 * A DRAM packet stores packets along with the timestamp of when 411 * the packet entered the queue, and also the decoded address. 412 */ 413 class DRAMPacket { 414 415 public: 416 417 /** When did request enter the controller */ 418 const Tick entryTime; 419 420 /** When will request leave the controller */ 421 Tick readyTime; 422 423 /** This comes from the outside world */ 424 const PacketPtr pkt; 425 426 const bool isRead; 427 428 /** Will be populated by address decoder */ 429 const uint8_t rank; 430 const uint8_t bank; 431 const uint32_t row; 432 433 /** 434 * Bank id is calculated considering banks in all the ranks 435 * eg: 2 ranks each with 8 banks, then bankId = 0 --> rank0, bank0 and 436 * bankId = 8 --> rank1, bank0 437 */ 438 const uint16_t bankId; 439 440 /** 441 * The starting address of the DRAM packet. 442 * This address could be unaligned to burst size boundaries. The 443 * reason is to keep the address offset so we can accurately check 444 * incoming read packets with packets in the write queue. 445 */ 446 Addr addr; 447 448 /** 449 * The size of this dram packet in bytes 450 * It is always equal or smaller than DRAM burst size 451 */ 452 unsigned int size; 453 454 /** 455 * A pointer to the BurstHelper if this DRAMPacket is a split packet 456 * If not a split packet (common case), this is set to NULL 457 */ 458 BurstHelper* burstHelper; 459 Bank& bankRef; 460 Rank& rankRef; 461 462 DRAMPacket(PacketPtr _pkt, bool is_read, uint8_t _rank, uint8_t _bank, 463 uint32_t _row, uint16_t bank_id, Addr _addr, 464 unsigned int _size, Bank& bank_ref, Rank& rank_ref) 465 : entryTime(curTick()), readyTime(curTick()), 466 pkt(_pkt), isRead(is_read), rank(_rank), bank(_bank), row(_row), 467 bankId(bank_id), addr(_addr), size(_size), burstHelper(NULL), 468 bankRef(bank_ref), rankRef(rank_ref) 469 { } 470 471 }; 472 473 /** 474 * Bunch of things requires to setup "events" in gem5 475 * When event "respondEvent" occurs for example, the method 476 * processRespondEvent is called; no parameters are allowed 477 * in these methods 478 */ 479 void processNextReqEvent(); 480 EventWrapper<DRAMCtrl,&DRAMCtrl::processNextReqEvent> nextReqEvent; 481 482 void processRespondEvent(); 483 EventWrapper<DRAMCtrl, &DRAMCtrl::processRespondEvent> respondEvent; 484 485 /** 486 * Check if the read queue has room for more entries 487 * 488 * @param pktCount The number of entries needed in the read queue 489 * @return true if read queue is full, false otherwise 490 */ 491 bool readQueueFull(unsigned int pktCount) const; 492 493 /** 494 * Check if the write queue has room for more entries 495 * 496 * @param pktCount The number of entries needed in the write queue 497 * @return true if write queue is full, false otherwise 498 */ 499 bool writeQueueFull(unsigned int pktCount) const; 500 501 /** 502 * When a new read comes in, first check if the write q has a 503 * pending request to the same address.\ If not, decode the 504 * address to populate rank/bank/row, create one or mutliple 505 * "dram_pkt", and push them to the back of the read queue.\ 506 * If this is the only 507 * read request in the system, schedule an event to start 508 * servicing it. 509 * 510 * @param pkt The request packet from the outside world 511 * @param pktCount The number of DRAM bursts the pkt 512 * translate to. If pkt size is larger then one full burst, 513 * then pktCount is greater than one. 514 */ 515 void addToReadQueue(PacketPtr pkt, unsigned int pktCount); 516 517 /** 518 * Decode the incoming pkt, create a dram_pkt and push to the 519 * back of the write queue. \If the write q length is more than 520 * the threshold specified by the user, ie the queue is beginning 521 * to get full, stop reads, and start draining writes. 522 * 523 * @param pkt The request packet from the outside world 524 * @param pktCount The number of DRAM bursts the pkt 525 * translate to. If pkt size is larger then one full burst, 526 * then pktCount is greater than one. 527 */ 528 void addToWriteQueue(PacketPtr pkt, unsigned int pktCount); 529 530 /** 531 * Actually do the DRAM access - figure out the latency it 532 * will take to service the req based on bank state, channel state etc 533 * and then update those states to account for this request.\ Based 534 * on this, update the packet's "readyTime" and move it to the 535 * response q from where it will eventually go back to the outside 536 * world. 537 * 538 * @param pkt The DRAM packet created from the outside world pkt 539 */ 540 void doDRAMAccess(DRAMPacket* dram_pkt); 541 542 /** 543 * When a packet reaches its "readyTime" in the response Q, 544 * use the "access()" method in AbstractMemory to actually 545 * create the response packet, and send it back to the outside 546 * world requestor. 547 * 548 * @param pkt The packet from the outside world 549 * @param static_latency Static latency to add before sending the packet 550 */ 551 void accessAndRespond(PacketPtr pkt, Tick static_latency); 552 553 /** 554 * Address decoder to figure out physical mapping onto ranks, 555 * banks, and rows. This function is called multiple times on the same 556 * system packet if the pakcet is larger than burst of the memory. The 557 * dramPktAddr is used for the offset within the packet. 558 * 559 * @param pkt The packet from the outside world 560 * @param dramPktAddr The starting address of the DRAM packet 561 * @param size The size of the DRAM packet in bytes 562 * @param isRead Is the request for a read or a write to DRAM 563 * @return A DRAMPacket pointer with the decoded information 564 */ 565 DRAMPacket* decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned int size, 566 bool isRead); 567 568 /** 569 * The memory schduler/arbiter - picks which request needs to 570 * go next, based on the specified policy such as FCFS or FR-FCFS 571 * and moves it to the head of the queue. 572 * Prioritizes accesses to the same rank as previous burst unless 573 * controller is switching command type. 574 * 575 * @param queue Queued requests to consider 576 * @param switched_cmd_type Command type is changing 577 * @return true if a packet is scheduled to a rank which is available else 578 * false 579 */ 580 bool chooseNext(std::deque<DRAMPacket*>& queue, bool switched_cmd_type); 581 582 /** 583 * For FR-FCFS policy reorder the read/write queue depending on row buffer 584 * hits and earliest banks available in DRAM 585 * Prioritizes accesses to the same rank as previous burst unless 586 * controller is switching command type. 587 * 588 * @param queue Queued requests to consider 589 * @param switched_cmd_type Command type is changing 590 * @return true if a packet is scheduled to a rank which is available else 591 * false 592 */ 593 bool reorderQueue(std::deque<DRAMPacket*>& queue, bool switched_cmd_type); 594 595 /** 596 * Find which are the earliest banks ready to issue an activate 597 * for the enqueued requests. Assumes maximum of 64 banks per DIMM 598 * Also checks if the bank is already prepped. 599 * 600 * @param queue Queued requests to consider 601 * @param switched_cmd_type Command type is changing 602 * @return One-hot encoded mask of bank indices 603 */ 604 uint64_t minBankPrep(const std::deque<DRAMPacket*>& queue, 605 bool switched_cmd_type) const; 606 607 /** 608 * Keep track of when row activations happen, in order to enforce 609 * the maximum number of activations in the activation window. The 610 * method updates the time that the banks become available based 611 * on the current limits. 612 * 613 * @param rank_ref Reference to the rank 614 * @param bank_ref Reference to the bank 615 * @param act_tick Time when the activation takes place 616 * @param row Index of the row 617 */ 618 void activateBank(Rank& rank_ref, Bank& bank_ref, Tick act_tick, 619 uint32_t row); 620 621 /** 622 * Precharge a given bank and also update when the precharge is 623 * done. This will also deal with any stats related to the 624 * accesses to the open page. 625 * 626 * @param rank_ref The rank to precharge 627 * @param bank_ref The bank to precharge 628 * @param pre_at Time when the precharge takes place 629 * @param trace Is this an auto precharge then do not add to trace 630 */ 631 void prechargeBank(Rank& rank_ref, Bank& bank_ref, 632 Tick pre_at, bool trace = true); 633 634 /** 635 * Used for debugging to observe the contents of the queues. 636 */ 637 void printQs() const; 638 639 /** 640 * The controller's main read and write queues 641 */ 642 std::deque<DRAMPacket*> readQueue; 643 std::deque<DRAMPacket*> writeQueue; 644 645 /** 646 * Response queue where read packets wait after we're done working 647 * with them, but it's not time to send the response yet. The 648 * responses are stored seperately mostly to keep the code clean 649 * and help with events scheduling. For all logical purposes such 650 * as sizing the read queue, this and the main read queue need to 651 * be added together. 652 */ 653 std::deque<DRAMPacket*> respQueue; 654 655 /** 656 * If we need to drain, keep the drain manager around until we're 657 * done here. 658 */ 659 DrainManager *drainManager; 660 661 /** 662 * Vector of ranks 663 */ 664 std::vector<Rank*> ranks; 665 666 /** 667 * The following are basic design parameters of the memory 668 * controller, and are initialized based on parameter values. 669 * The rowsPerBank is determined based on the capacity, number of 670 * ranks and banks, the burst size, and the row buffer size. 671 */ 672 const uint32_t deviceSize; 673 const uint32_t deviceBusWidth; 674 const uint32_t burstLength; 675 const uint32_t deviceRowBufferSize; 676 const uint32_t devicesPerRank; 677 const uint32_t burstSize; 678 const uint32_t rowBufferSize; 679 const uint32_t columnsPerRowBuffer; 680 const uint32_t columnsPerStripe; 681 const uint32_t ranksPerChannel; 682 const uint32_t bankGroupsPerRank; 683 const bool bankGroupArch; 684 const uint32_t banksPerRank; 685 const uint32_t channels; 686 uint32_t rowsPerBank; 687 const uint32_t readBufferSize; 688 const uint32_t writeBufferSize; 689 const uint32_t writeHighThreshold; 690 const uint32_t writeLowThreshold; 691 const uint32_t minWritesPerSwitch; 692 uint32_t writesThisTime; 693 uint32_t readsThisTime; 694 695 /** 696 * Basic memory timing parameters initialized based on parameter 697 * values. 698 */ 699 const Tick M5_CLASS_VAR_USED tCK; 700 const Tick tWTR; 701 const Tick tRTW; 702 const Tick tCS; 703 const Tick tBURST; 704 const Tick tCCD_L; 705 const Tick tRCD; 706 const Tick tCL; 707 const Tick tRP; 708 const Tick tRAS; 709 const Tick tWR; 710 const Tick tRTP; 711 const Tick tRFC; 712 const Tick tREFI; 713 const Tick tRRD; 714 const Tick tRRD_L; 715 const Tick tXAW; 716 const uint32_t activationLimit; 717 718 /** 719 * Memory controller configuration initialized based on parameter 720 * values. 721 */ 722 Enums::MemSched memSchedPolicy; 723 Enums::AddrMap addrMapping; 724 Enums::PageManage pageMgmt; 725 726 /** 727 * Max column accesses (read and write) per row, before forefully 728 * closing it. 729 */ 730 const uint32_t maxAccessesPerRow; 731 732 /** 733 * Pipeline latency of the controller frontend. The frontend 734 * contribution is added to writes (that complete when they are in 735 * the write buffer) and reads that are serviced the write buffer. 736 */ 737 const Tick frontendLatency; 738 739 /** 740 * Pipeline latency of the backend and PHY. Along with the 741 * frontend contribution, this latency is added to reads serviced 742 * by the DRAM. 743 */ 744 const Tick backendLatency; 745 746 /** 747 * Till when has the main data bus been spoken for already? 748 */ 749 Tick busBusyUntil; 750 751 Tick prevArrival; 752 753 /** 754 * The soonest you have to start thinking about the next request 755 * is the longest access time that can occur before 756 * busBusyUntil. Assuming you need to precharge, open a new row, 757 * and access, it is tRP + tRCD + tCL. 758 */ 759 Tick nextReqTime; 760 761 // All statistics that the model needs to capture 762 Stats::Scalar readReqs; 763 Stats::Scalar writeReqs; 764 Stats::Scalar readBursts; 765 Stats::Scalar writeBursts; 766 Stats::Scalar bytesReadDRAM; 767 Stats::Scalar bytesReadWrQ; 768 Stats::Scalar bytesWritten; 769 Stats::Scalar bytesReadSys; 770 Stats::Scalar bytesWrittenSys; 771 Stats::Scalar servicedByWrQ; 772 Stats::Scalar mergedWrBursts; 773 Stats::Scalar neitherReadNorWrite; 774 Stats::Vector perBankRdBursts; 775 Stats::Vector perBankWrBursts; 776 Stats::Scalar numRdRetry; 777 Stats::Scalar numWrRetry; 778 Stats::Scalar totGap; 779 Stats::Vector readPktSize; 780 Stats::Vector writePktSize; 781 Stats::Vector rdQLenPdf; 782 Stats::Vector wrQLenPdf; 783 Stats::Histogram bytesPerActivate; 784 Stats::Histogram rdPerTurnAround; 785 Stats::Histogram wrPerTurnAround; 786 787 // Latencies summed over all requests 788 Stats::Scalar totQLat; 789 Stats::Scalar totMemAccLat; 790 Stats::Scalar totBusLat; 791 792 // Average latencies per request 793 Stats::Formula avgQLat; 794 Stats::Formula avgBusLat; 795 Stats::Formula avgMemAccLat; 796 797 // Average bandwidth 798 Stats::Formula avgRdBW; 799 Stats::Formula avgWrBW; 800 Stats::Formula avgRdBWSys; 801 Stats::Formula avgWrBWSys; 802 Stats::Formula peakBW; 803 Stats::Formula busUtil; 804 Stats::Formula busUtilRead; 805 Stats::Formula busUtilWrite; 806 807 // Average queue lengths 808 Stats::Average avgRdQLen; 809 Stats::Average avgWrQLen; 810 811 // Row hit count and rate 812 Stats::Scalar readRowHits; 813 Stats::Scalar writeRowHits; 814 Stats::Formula readRowHitRate; 815 Stats::Formula writeRowHitRate; 816 Stats::Formula avgGap; 817 818 // DRAM Power Calculation 819 Stats::Formula pageHitRate; 820 821 // Holds the value of the rank of burst issued 822 uint8_t activeRank; 823 824 // timestamp offset 825 uint64_t timeStampOffset; 826 827 /** @todo this is a temporary workaround until the 4-phase code is 828 * committed. upstream caches needs this packet until true is returned, so 829 * hold onto it for deletion until a subsequent call 830 */ 831 std::vector<PacketPtr> pendingDelete; 832 833 /** 834 * This function increments the energy when called. If stats are 835 * dumped periodically, note accumulated energy values will 836 * appear in the stats (even if the stats are reset). This is a 837 * result of the energy values coming from DRAMPower, and there 838 * is currently no support for resetting the state. 839 * 840 * @param rank Currrent rank 841 */ 842 void updatePowerStats(Rank& rank_ref); 843 844 /** 845 * Function for sorting commands in the command list of DRAMPower. 846 * 847 * @param a Memory Command in command list of DRAMPower library 848 * @param next Memory Command in command list of DRAMPower 849 * @return true if timestamp of Command 1 < timestamp of Command 2 850 */ 851 static bool sortTime(const Data::MemCommand& m1, 852 const Data::MemCommand& m2) { 853 return m1.getTime() < m2.getTime(); 854 }; 855 856 857 public: 858 859 void regStats(); 860 861 DRAMCtrl(const DRAMCtrlParams* p); 862 863 unsigned int drain(DrainManager* dm); 864 865 virtual BaseSlavePort& getSlavePort(const std::string& if_name, 866 PortID idx = InvalidPortID); 867 868 virtual void init() M5_ATTR_OVERRIDE; 869 virtual void startup() M5_ATTR_OVERRIDE; 870 virtual void drainResume() M5_ATTR_OVERRIDE; 871 872 protected: 873 874 Tick recvAtomic(PacketPtr pkt); 875 void recvFunctional(PacketPtr pkt); 876 bool recvTimingReq(PacketPtr pkt); 877 878}; 879 880#endif //__MEM_DRAM_CTRL_HH__ 881