dram_ctrl.hh revision 10618:bb665366cc00
1/* 2 * Copyright (c) 2012-2014 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2013 Amin Farmahini-Farahani 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Andreas Hansson 41 * Ani Udipi 42 * Neha Agarwal 43 * Omar Naji 44 */ 45 46/** 47 * @file 48 * DRAMCtrl declaration 49 */ 50 51#ifndef __MEM_DRAM_CTRL_HH__ 52#define __MEM_DRAM_CTRL_HH__ 53 54#include <deque> 55#include <string> 56 57#include "base/statistics.hh" 58#include "enums/AddrMap.hh" 59#include "enums/MemSched.hh" 60#include "enums/PageManage.hh" 61#include "mem/abstract_mem.hh" 62#include "mem/qport.hh" 63#include "params/DRAMCtrl.hh" 64#include "sim/eventq.hh" 65#include "mem/drampower.hh" 66 67/** 68 * The DRAM controller is a single-channel memory controller capturing 69 * the most important timing constraints associated with a 70 * contemporary DRAM. For multi-channel memory systems, the controller 71 * is combined with a crossbar model, with the channel address 72 * interleaving taking part in the crossbar. 73 * 74 * As a basic design principle, this controller 75 * model is not cycle callable, but instead uses events to: 1) decide 76 * when new decisions can be made, 2) when resources become available, 77 * 3) when things are to be considered done, and 4) when to send 78 * things back. Through these simple principles, the model delivers 79 * high performance, and lots of flexibility, allowing users to 80 * evaluate the system impact of a wide range of memory technologies, 81 * such as DDR3/4, LPDDR2/3/4, WideIO1/2, HBM and HMC. 82 * 83 * For more details, please see Hansson et al, "Simulating DRAM 84 * controllers for future system architecture exploration", 85 * Proc. ISPASS, 2014. If you use this model as part of your research 86 * please cite the paper. 87 */ 88class DRAMCtrl : public AbstractMemory 89{ 90 91 private: 92 93 // For now, make use of a queued slave port to avoid dealing with 94 // flow control for the responses being sent back 95 class MemoryPort : public QueuedSlavePort 96 { 97 98 SlavePacketQueue queue; 99 DRAMCtrl& memory; 100 101 public: 102 103 MemoryPort(const std::string& name, DRAMCtrl& _memory); 104 105 protected: 106 107 Tick recvAtomic(PacketPtr pkt); 108 109 void recvFunctional(PacketPtr pkt); 110 111 bool recvTimingReq(PacketPtr); 112 113 virtual AddrRangeList getAddrRanges() const; 114 115 }; 116 117 /** 118 * Our incoming port, for a multi-ported controller add a crossbar 119 * in front of it 120 */ 121 MemoryPort port; 122 123 /** 124 * Remember if we have to retry a request when available. 125 */ 126 bool retryRdReq; 127 bool retryWrReq; 128 129 /** 130 * Bus state used to control the read/write switching and drive 131 * the scheduling of the next request. 132 */ 133 enum BusState { 134 READ = 0, 135 READ_TO_WRITE, 136 WRITE, 137 WRITE_TO_READ 138 }; 139 140 BusState busState; 141 142 /** 143 * A basic class to track the bank state, i.e. what row is 144 * currently open (if any), when is the bank free to accept a new 145 * column (read/write) command, when can it be precharged, and 146 * when can it be activated. 147 * 148 * The bank also keeps track of how many bytes have been accessed 149 * in the open row since it was opened. 150 */ 151 class Bank 152 { 153 154 public: 155 156 static const uint32_t NO_ROW = -1; 157 158 uint32_t openRow; 159 uint8_t bank; 160 uint8_t bankgr; 161 162 Tick colAllowedAt; 163 Tick preAllowedAt; 164 Tick actAllowedAt; 165 166 uint32_t rowAccesses; 167 uint32_t bytesAccessed; 168 169 Bank() : 170 openRow(NO_ROW), bank(0), bankgr(0), 171 colAllowedAt(0), preAllowedAt(0), actAllowedAt(0), 172 rowAccesses(0), bytesAccessed(0) 173 { } 174 }; 175 176 177 /** 178 * Rank class includes a vector of banks. Refresh and Power state 179 * machines are defined per rank. Events required to change the 180 * state of the refresh and power state machine are scheduled per 181 * rank. This class allows the implementation of rank-wise refresh 182 * and rank-wise power-down. 183 */ 184 class Rank : public EventManager 185 { 186 187 private: 188 189 /** 190 * The power state captures the different operational states of 191 * the DRAM and interacts with the bus read/write state machine, 192 * and the refresh state machine. In the idle state all banks are 193 * precharged. From there we either go to an auto refresh (as 194 * determined by the refresh state machine), or to a precharge 195 * power down mode. From idle the memory can also go to the active 196 * state (with one or more banks active), and in turn from there 197 * to active power down. At the moment we do not capture the deep 198 * power down and self-refresh state. 199 */ 200 enum PowerState { 201 PWR_IDLE = 0, 202 PWR_REF, 203 PWR_PRE_PDN, 204 PWR_ACT, 205 PWR_ACT_PDN 206 }; 207 208 /** 209 * The refresh state is used to control the progress of the 210 * refresh scheduling. When normal operation is in progress the 211 * refresh state is idle. From there, it progresses to the refresh 212 * drain state once tREFI has passed. The refresh drain state 213 * captures the DRAM row active state, as it will stay there until 214 * all ongoing accesses complete. Thereafter all banks are 215 * precharged, and lastly, the DRAM is refreshed. 216 */ 217 enum RefreshState { 218 REF_IDLE = 0, 219 REF_DRAIN, 220 REF_PRE, 221 REF_RUN 222 }; 223 224 /** 225 * A reference to the parent DRAMCtrl instance 226 */ 227 DRAMCtrl& memory; 228 229 /** 230 * Since we are taking decisions out of order, we need to keep 231 * track of what power transition is happening at what time, such 232 * that we can go back in time and change history. For example, if 233 * we precharge all banks and schedule going to the idle state, we 234 * might at a later point decide to activate a bank before the 235 * transition to idle would have taken place. 236 */ 237 PowerState pwrStateTrans; 238 239 /** 240 * Current power state. 241 */ 242 PowerState pwrState; 243 244 /** 245 * Track when we transitioned to the current power state 246 */ 247 Tick pwrStateTick; 248 249 /** 250 * current refresh state 251 */ 252 RefreshState refreshState; 253 254 /** 255 * Keep track of when a refresh is due. 256 */ 257 Tick refreshDueAt; 258 259 /* 260 * Command energies 261 */ 262 Stats::Scalar actEnergy; 263 Stats::Scalar preEnergy; 264 Stats::Scalar readEnergy; 265 Stats::Scalar writeEnergy; 266 Stats::Scalar refreshEnergy; 267 268 /* 269 * Active Background Energy 270 */ 271 Stats::Scalar actBackEnergy; 272 273 /* 274 * Precharge Background Energy 275 */ 276 Stats::Scalar preBackEnergy; 277 278 Stats::Scalar totalEnergy; 279 Stats::Scalar averagePower; 280 281 /** 282 * Track time spent in each power state. 283 */ 284 Stats::Vector pwrStateTime; 285 286 /** 287 * Function to update Power Stats 288 */ 289 void updatePowerStats(); 290 291 /** 292 * Schedule a power state transition in the future, and 293 * potentially override an already scheduled transition. 294 * 295 * @param pwr_state Power state to transition to 296 * @param tick Tick when transition should take place 297 */ 298 void schedulePowerEvent(PowerState pwr_state, Tick tick); 299 300 public: 301 302 /** 303 * Current Rank index 304 */ 305 uint8_t rank; 306 307 /** 308 * One DRAMPower instance per rank 309 */ 310 DRAMPower power; 311 312 /** 313 * Vector of Banks. Each rank is made of several devices which in 314 * term are made from several banks. 315 */ 316 std::vector<Bank> banks; 317 318 /** 319 * To track number of banks which are currently active for 320 * this rank. 321 */ 322 unsigned int numBanksActive; 323 324 /** List to keep track of activate ticks */ 325 std::deque<Tick> actTicks; 326 327 Rank(DRAMCtrl& _memory, const DRAMCtrlParams* _p); 328 329 const std::string name() const 330 { 331 return csprintf("%s_%d", memory.name(), rank); 332 } 333 334 /** 335 * Kick off accounting for power and refresh states and 336 * schedule initial refresh. 337 * 338 * @param ref_tick Tick for first refresh 339 */ 340 void startup(Tick ref_tick); 341 342 /** 343 * Check if the current rank is available for scheduling. 344 * 345 * @param Return true if the rank is idle from a refresh point of view 346 */ 347 bool isAvailable() const { return refreshState == REF_IDLE; } 348 349 /** 350 * Let the rank check if it was waiting for requests to drain 351 * to allow it to transition states. 352 */ 353 void checkDrainDone(); 354 355 /* 356 * Function to register Stats 357 */ 358 void regStats(); 359 360 void processActivateEvent(); 361 EventWrapper<Rank, &Rank::processActivateEvent> 362 activateEvent; 363 364 void processPrechargeEvent(); 365 EventWrapper<Rank, &Rank::processPrechargeEvent> 366 prechargeEvent; 367 368 void processRefreshEvent(); 369 EventWrapper<Rank, &Rank::processRefreshEvent> 370 refreshEvent; 371 372 void processPowerEvent(); 373 EventWrapper<Rank, &Rank::processPowerEvent> 374 powerEvent; 375 376 }; 377 378 /** 379 * A burst helper helps organize and manage a packet that is larger than 380 * the DRAM burst size. A system packet that is larger than the burst size 381 * is split into multiple DRAM packets and all those DRAM packets point to 382 * a single burst helper such that we know when the whole packet is served. 383 */ 384 class BurstHelper { 385 386 public: 387 388 /** Number of DRAM bursts requred for a system packet **/ 389 const unsigned int burstCount; 390 391 /** Number of DRAM bursts serviced so far for a system packet **/ 392 unsigned int burstsServiced; 393 394 BurstHelper(unsigned int _burstCount) 395 : burstCount(_burstCount), burstsServiced(0) 396 { } 397 }; 398 399 /** 400 * A DRAM packet stores packets along with the timestamp of when 401 * the packet entered the queue, and also the decoded address. 402 */ 403 class DRAMPacket { 404 405 public: 406 407 /** When did request enter the controller */ 408 const Tick entryTime; 409 410 /** When will request leave the controller */ 411 Tick readyTime; 412 413 /** This comes from the outside world */ 414 const PacketPtr pkt; 415 416 const bool isRead; 417 418 /** Will be populated by address decoder */ 419 const uint8_t rank; 420 const uint8_t bank; 421 const uint32_t row; 422 423 /** 424 * Bank id is calculated considering banks in all the ranks 425 * eg: 2 ranks each with 8 banks, then bankId = 0 --> rank0, bank0 and 426 * bankId = 8 --> rank1, bank0 427 */ 428 const uint16_t bankId; 429 430 /** 431 * The starting address of the DRAM packet. 432 * This address could be unaligned to burst size boundaries. The 433 * reason is to keep the address offset so we can accurately check 434 * incoming read packets with packets in the write queue. 435 */ 436 Addr addr; 437 438 /** 439 * The size of this dram packet in bytes 440 * It is always equal or smaller than DRAM burst size 441 */ 442 unsigned int size; 443 444 /** 445 * A pointer to the BurstHelper if this DRAMPacket is a split packet 446 * If not a split packet (common case), this is set to NULL 447 */ 448 BurstHelper* burstHelper; 449 Bank& bankRef; 450 Rank& rankRef; 451 452 DRAMPacket(PacketPtr _pkt, bool is_read, uint8_t _rank, uint8_t _bank, 453 uint32_t _row, uint16_t bank_id, Addr _addr, 454 unsigned int _size, Bank& bank_ref, Rank& rank_ref) 455 : entryTime(curTick()), readyTime(curTick()), 456 pkt(_pkt), isRead(is_read), rank(_rank), bank(_bank), row(_row), 457 bankId(bank_id), addr(_addr), size(_size), burstHelper(NULL), 458 bankRef(bank_ref), rankRef(rank_ref) 459 { } 460 461 }; 462 463 /** 464 * Bunch of things requires to setup "events" in gem5 465 * When event "respondEvent" occurs for example, the method 466 * processRespondEvent is called; no parameters are allowed 467 * in these methods 468 */ 469 void processNextReqEvent(); 470 EventWrapper<DRAMCtrl,&DRAMCtrl::processNextReqEvent> nextReqEvent; 471 472 void processRespondEvent(); 473 EventWrapper<DRAMCtrl, &DRAMCtrl::processRespondEvent> respondEvent; 474 475 /** 476 * Check if the read queue has room for more entries 477 * 478 * @param pktCount The number of entries needed in the read queue 479 * @return true if read queue is full, false otherwise 480 */ 481 bool readQueueFull(unsigned int pktCount) const; 482 483 /** 484 * Check if the write queue has room for more entries 485 * 486 * @param pktCount The number of entries needed in the write queue 487 * @return true if write queue is full, false otherwise 488 */ 489 bool writeQueueFull(unsigned int pktCount) const; 490 491 /** 492 * When a new read comes in, first check if the write q has a 493 * pending request to the same address.\ If not, decode the 494 * address to populate rank/bank/row, create one or mutliple 495 * "dram_pkt", and push them to the back of the read queue.\ 496 * If this is the only 497 * read request in the system, schedule an event to start 498 * servicing it. 499 * 500 * @param pkt The request packet from the outside world 501 * @param pktCount The number of DRAM bursts the pkt 502 * translate to. If pkt size is larger then one full burst, 503 * then pktCount is greater than one. 504 */ 505 void addToReadQueue(PacketPtr pkt, unsigned int pktCount); 506 507 /** 508 * Decode the incoming pkt, create a dram_pkt and push to the 509 * back of the write queue. \If the write q length is more than 510 * the threshold specified by the user, ie the queue is beginning 511 * to get full, stop reads, and start draining writes. 512 * 513 * @param pkt The request packet from the outside world 514 * @param pktCount The number of DRAM bursts the pkt 515 * translate to. If pkt size is larger then one full burst, 516 * then pktCount is greater than one. 517 */ 518 void addToWriteQueue(PacketPtr pkt, unsigned int pktCount); 519 520 /** 521 * Actually do the DRAM access - figure out the latency it 522 * will take to service the req based on bank state, channel state etc 523 * and then update those states to account for this request.\ Based 524 * on this, update the packet's "readyTime" and move it to the 525 * response q from where it will eventually go back to the outside 526 * world. 527 * 528 * @param pkt The DRAM packet created from the outside world pkt 529 */ 530 void doDRAMAccess(DRAMPacket* dram_pkt); 531 532 /** 533 * When a packet reaches its "readyTime" in the response Q, 534 * use the "access()" method in AbstractMemory to actually 535 * create the response packet, and send it back to the outside 536 * world requestor. 537 * 538 * @param pkt The packet from the outside world 539 * @param static_latency Static latency to add before sending the packet 540 */ 541 void accessAndRespond(PacketPtr pkt, Tick static_latency); 542 543 /** 544 * Address decoder to figure out physical mapping onto ranks, 545 * banks, and rows. This function is called multiple times on the same 546 * system packet if the pakcet is larger than burst of the memory. The 547 * dramPktAddr is used for the offset within the packet. 548 * 549 * @param pkt The packet from the outside world 550 * @param dramPktAddr The starting address of the DRAM packet 551 * @param size The size of the DRAM packet in bytes 552 * @param isRead Is the request for a read or a write to DRAM 553 * @return A DRAMPacket pointer with the decoded information 554 */ 555 DRAMPacket* decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned int size, 556 bool isRead); 557 558 /** 559 * The memory schduler/arbiter - picks which request needs to 560 * go next, based on the specified policy such as FCFS or FR-FCFS 561 * and moves it to the head of the queue. 562 * Prioritizes accesses to the same rank as previous burst unless 563 * controller is switching command type. 564 * 565 * @param queue Queued requests to consider 566 * @param switched_cmd_type Command type is changing 567 * @return true if a packet is scheduled to a rank which is available else 568 * false 569 */ 570 bool chooseNext(std::deque<DRAMPacket*>& queue, bool switched_cmd_type); 571 572 /** 573 * For FR-FCFS policy reorder the read/write queue depending on row buffer 574 * hits and earliest banks available in DRAM 575 * Prioritizes accesses to the same rank as previous burst unless 576 * controller is switching command type. 577 * 578 * @param queue Queued requests to consider 579 * @param switched_cmd_type Command type is changing 580 * @return true if a packet is scheduled to a rank which is available else 581 * false 582 */ 583 bool reorderQueue(std::deque<DRAMPacket*>& queue, bool switched_cmd_type); 584 585 /** 586 * Find which are the earliest banks ready to issue an activate 587 * for the enqueued requests. Assumes maximum of 64 banks per DIMM 588 * Also checks if the bank is already prepped. 589 * 590 * @param queue Queued requests to consider 591 * @param switched_cmd_type Command type is changing 592 * @return One-hot encoded mask of bank indices 593 */ 594 uint64_t minBankPrep(const std::deque<DRAMPacket*>& queue, 595 bool switched_cmd_type) const; 596 597 /** 598 * Keep track of when row activations happen, in order to enforce 599 * the maximum number of activations in the activation window. The 600 * method updates the time that the banks become available based 601 * on the current limits. 602 * 603 * @param rank_ref Reference to the rank 604 * @param bank_ref Reference to the bank 605 * @param act_tick Time when the activation takes place 606 * @param row Index of the row 607 */ 608 void activateBank(Rank& rank_ref, Bank& bank_ref, Tick act_tick, 609 uint32_t row); 610 611 /** 612 * Precharge a given bank and also update when the precharge is 613 * done. This will also deal with any stats related to the 614 * accesses to the open page. 615 * 616 * @param rank_ref The rank to precharge 617 * @param bank_ref The bank to precharge 618 * @param pre_at Time when the precharge takes place 619 * @param trace Is this an auto precharge then do not add to trace 620 */ 621 void prechargeBank(Rank& rank_ref, Bank& bank_ref, 622 Tick pre_at, bool trace = true); 623 624 /** 625 * Used for debugging to observe the contents of the queues. 626 */ 627 void printQs() const; 628 629 /** 630 * The controller's main read and write queues 631 */ 632 std::deque<DRAMPacket*> readQueue; 633 std::deque<DRAMPacket*> writeQueue; 634 635 /** 636 * Response queue where read packets wait after we're done working 637 * with them, but it's not time to send the response yet. The 638 * responses are stored seperately mostly to keep the code clean 639 * and help with events scheduling. For all logical purposes such 640 * as sizing the read queue, this and the main read queue need to 641 * be added together. 642 */ 643 std::deque<DRAMPacket*> respQueue; 644 645 /** 646 * If we need to drain, keep the drain manager around until we're 647 * done here. 648 */ 649 DrainManager *drainManager; 650 651 /** 652 * Vector of ranks 653 */ 654 std::vector<Rank*> ranks; 655 656 /** 657 * The following are basic design parameters of the memory 658 * controller, and are initialized based on parameter values. 659 * The rowsPerBank is determined based on the capacity, number of 660 * ranks and banks, the burst size, and the row buffer size. 661 */ 662 const uint32_t deviceSize; 663 const uint32_t deviceBusWidth; 664 const uint32_t burstLength; 665 const uint32_t deviceRowBufferSize; 666 const uint32_t devicesPerRank; 667 const uint32_t burstSize; 668 const uint32_t rowBufferSize; 669 const uint32_t columnsPerRowBuffer; 670 const uint32_t columnsPerStripe; 671 const uint32_t ranksPerChannel; 672 const uint32_t bankGroupsPerRank; 673 const bool bankGroupArch; 674 const uint32_t banksPerRank; 675 const uint32_t channels; 676 uint32_t rowsPerBank; 677 const uint32_t readBufferSize; 678 const uint32_t writeBufferSize; 679 const uint32_t writeHighThreshold; 680 const uint32_t writeLowThreshold; 681 const uint32_t minWritesPerSwitch; 682 uint32_t writesThisTime; 683 uint32_t readsThisTime; 684 685 /** 686 * Basic memory timing parameters initialized based on parameter 687 * values. 688 */ 689 const Tick M5_CLASS_VAR_USED tCK; 690 const Tick tWTR; 691 const Tick tRTW; 692 const Tick tCS; 693 const Tick tBURST; 694 const Tick tCCD_L; 695 const Tick tRCD; 696 const Tick tCL; 697 const Tick tRP; 698 const Tick tRAS; 699 const Tick tWR; 700 const Tick tRTP; 701 const Tick tRFC; 702 const Tick tREFI; 703 const Tick tRRD; 704 const Tick tRRD_L; 705 const Tick tXAW; 706 const uint32_t activationLimit; 707 708 /** 709 * Memory controller configuration initialized based on parameter 710 * values. 711 */ 712 Enums::MemSched memSchedPolicy; 713 Enums::AddrMap addrMapping; 714 Enums::PageManage pageMgmt; 715 716 /** 717 * Max column accesses (read and write) per row, before forefully 718 * closing it. 719 */ 720 const uint32_t maxAccessesPerRow; 721 722 /** 723 * Pipeline latency of the controller frontend. The frontend 724 * contribution is added to writes (that complete when they are in 725 * the write buffer) and reads that are serviced the write buffer. 726 */ 727 const Tick frontendLatency; 728 729 /** 730 * Pipeline latency of the backend and PHY. Along with the 731 * frontend contribution, this latency is added to reads serviced 732 * by the DRAM. 733 */ 734 const Tick backendLatency; 735 736 /** 737 * Till when has the main data bus been spoken for already? 738 */ 739 Tick busBusyUntil; 740 741 Tick prevArrival; 742 743 /** 744 * The soonest you have to start thinking about the next request 745 * is the longest access time that can occur before 746 * busBusyUntil. Assuming you need to precharge, open a new row, 747 * and access, it is tRP + tRCD + tCL. 748 */ 749 Tick nextReqTime; 750 751 // All statistics that the model needs to capture 752 Stats::Scalar readReqs; 753 Stats::Scalar writeReqs; 754 Stats::Scalar readBursts; 755 Stats::Scalar writeBursts; 756 Stats::Scalar bytesReadDRAM; 757 Stats::Scalar bytesReadWrQ; 758 Stats::Scalar bytesWritten; 759 Stats::Scalar bytesReadSys; 760 Stats::Scalar bytesWrittenSys; 761 Stats::Scalar servicedByWrQ; 762 Stats::Scalar mergedWrBursts; 763 Stats::Scalar neitherReadNorWrite; 764 Stats::Vector perBankRdBursts; 765 Stats::Vector perBankWrBursts; 766 Stats::Scalar numRdRetry; 767 Stats::Scalar numWrRetry; 768 Stats::Scalar totGap; 769 Stats::Vector readPktSize; 770 Stats::Vector writePktSize; 771 Stats::Vector rdQLenPdf; 772 Stats::Vector wrQLenPdf; 773 Stats::Histogram bytesPerActivate; 774 Stats::Histogram rdPerTurnAround; 775 Stats::Histogram wrPerTurnAround; 776 777 // Latencies summed over all requests 778 Stats::Scalar totQLat; 779 Stats::Scalar totMemAccLat; 780 Stats::Scalar totBusLat; 781 782 // Average latencies per request 783 Stats::Formula avgQLat; 784 Stats::Formula avgBusLat; 785 Stats::Formula avgMemAccLat; 786 787 // Average bandwidth 788 Stats::Formula avgRdBW; 789 Stats::Formula avgWrBW; 790 Stats::Formula avgRdBWSys; 791 Stats::Formula avgWrBWSys; 792 Stats::Formula peakBW; 793 Stats::Formula busUtil; 794 Stats::Formula busUtilRead; 795 Stats::Formula busUtilWrite; 796 797 // Average queue lengths 798 Stats::Average avgRdQLen; 799 Stats::Average avgWrQLen; 800 801 // Row hit count and rate 802 Stats::Scalar readRowHits; 803 Stats::Scalar writeRowHits; 804 Stats::Formula readRowHitRate; 805 Stats::Formula writeRowHitRate; 806 Stats::Formula avgGap; 807 808 // DRAM Power Calculation 809 Stats::Formula pageHitRate; 810 811 // Holds the value of the rank of burst issued 812 uint8_t activeRank; 813 814 // timestamp offset 815 uint64_t timeStampOffset; 816 817 /** @todo this is a temporary workaround until the 4-phase code is 818 * committed. upstream caches needs this packet until true is returned, so 819 * hold onto it for deletion until a subsequent call 820 */ 821 std::vector<PacketPtr> pendingDelete; 822 823 /** 824 * This function increments the energy when called. If stats are 825 * dumped periodically, note accumulated energy values will 826 * appear in the stats (even if the stats are reset). This is a 827 * result of the energy values coming from DRAMPower, and there 828 * is currently no support for resetting the state. 829 * 830 * @param rank Currrent rank 831 */ 832 void updatePowerStats(Rank& rank_ref); 833 834 /** 835 * Function for sorting commands in the command list of DRAMPower. 836 * 837 * @param a Memory Command in command list of DRAMPower library 838 * @param next Memory Command in command list of DRAMPower 839 * @return true if timestamp of Command 1 < timestamp of Command 2 840 */ 841 static bool sortTime(const Data::MemCommand& m1, 842 const Data::MemCommand& m2) { 843 return m1.getTime() < m2.getTime(); 844 }; 845 846 847 public: 848 849 void regStats(); 850 851 DRAMCtrl(const DRAMCtrlParams* p); 852 853 unsigned int drain(DrainManager* dm); 854 855 virtual BaseSlavePort& getSlavePort(const std::string& if_name, 856 PortID idx = InvalidPortID); 857 858 virtual void init(); 859 virtual void startup(); 860 861 protected: 862 863 Tick recvAtomic(PacketPtr pkt); 864 void recvFunctional(PacketPtr pkt); 865 bool recvTimingReq(PacketPtr pkt); 866 867}; 868 869#endif //__MEM_DRAM_CTRL_HH__ 870