dram_ctrl.hh revision 12969:52de9d619ce6
1/* 2 * Copyright (c) 2012-2018 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2013 Amin Farmahini-Farahani 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Andreas Hansson 41 * Ani Udipi 42 * Neha Agarwal 43 * Omar Naji 44 * Matthias Jung 45 * Wendy Elsasser 46 * Radhika Jagtap 47 */ 48 49/** 50 * @file 51 * DRAMCtrl declaration 52 */ 53 54#ifndef __MEM_DRAM_CTRL_HH__ 55#define __MEM_DRAM_CTRL_HH__ 56 57#include <deque> 58#include <string> 59#include <unordered_set> 60#include <vector> 61 62#include "base/callback.hh" 63#include "base/statistics.hh" 64#include "enums/AddrMap.hh" 65#include "enums/MemSched.hh" 66#include "enums/PageManage.hh" 67#include "mem/drampower.hh" 68#include "mem/qos/mem_ctrl.hh" 69#include "mem/qport.hh" 70#include "params/DRAMCtrl.hh" 71#include "sim/eventq.hh" 72 73/** 74 * The DRAM controller is a single-channel memory controller capturing 75 * the most important timing constraints associated with a 76 * contemporary DRAM. For multi-channel memory systems, the controller 77 * is combined with a crossbar model, with the channel address 78 * interleaving taking part in the crossbar. 79 * 80 * As a basic design principle, this controller 81 * model is not cycle callable, but instead uses events to: 1) decide 82 * when new decisions can be made, 2) when resources become available, 83 * 3) when things are to be considered done, and 4) when to send 84 * things back. Through these simple principles, the model delivers 85 * high performance, and lots of flexibility, allowing users to 86 * evaluate the system impact of a wide range of memory technologies, 87 * such as DDR3/4, LPDDR2/3/4, WideIO1/2, HBM and HMC. 88 * 89 * For more details, please see Hansson et al, "Simulating DRAM 90 * controllers for future system architecture exploration", 91 * Proc. ISPASS, 2014. If you use this model as part of your research 92 * please cite the paper. 93 * 94 * The low-power functionality implements a staggered powerdown 95 * similar to that described in "Optimized Active and Power-Down Mode 96 * Refresh Control in 3D-DRAMs" by Jung et al, VLSI-SoC, 2014. 97 */ 98class DRAMCtrl : public QoS::MemCtrl 99{ 100 101 private: 102 103 // For now, make use of a queued slave port to avoid dealing with 104 // flow control for the responses being sent back 105 class MemoryPort : public QueuedSlavePort 106 { 107 108 RespPacketQueue queue; 109 DRAMCtrl& memory; 110 111 public: 112 113 MemoryPort(const std::string& name, DRAMCtrl& _memory); 114 115 protected: 116 117 Tick recvAtomic(PacketPtr pkt); 118 119 void recvFunctional(PacketPtr pkt); 120 121 bool recvTimingReq(PacketPtr); 122 123 virtual AddrRangeList getAddrRanges() const; 124 125 }; 126 127 /** 128 * Our incoming port, for a multi-ported controller add a crossbar 129 * in front of it 130 */ 131 MemoryPort port; 132 133 /** 134 * Remember if the memory system is in timing mode 135 */ 136 bool isTimingMode; 137 138 /** 139 * Remember if we have to retry a request when available. 140 */ 141 bool retryRdReq; 142 bool retryWrReq; 143 144 /**/ 145 146 /** 147 * Simple structure to hold the values needed to keep track of 148 * commands for DRAMPower 149 */ 150 struct Command { 151 Data::MemCommand::cmds type; 152 uint8_t bank; 153 Tick timeStamp; 154 155 constexpr Command(Data::MemCommand::cmds _type, uint8_t _bank, 156 Tick time_stamp) 157 : type(_type), bank(_bank), timeStamp(time_stamp) 158 { } 159 }; 160 161 /** 162 * A basic class to track the bank state, i.e. what row is 163 * currently open (if any), when is the bank free to accept a new 164 * column (read/write) command, when can it be precharged, and 165 * when can it be activated. 166 * 167 * The bank also keeps track of how many bytes have been accessed 168 * in the open row since it was opened. 169 */ 170 class Bank 171 { 172 173 public: 174 175 static const uint32_t NO_ROW = -1; 176 177 uint32_t openRow; 178 uint8_t bank; 179 uint8_t bankgr; 180 181 Tick rdAllowedAt; 182 Tick wrAllowedAt; 183 Tick preAllowedAt; 184 Tick actAllowedAt; 185 186 uint32_t rowAccesses; 187 uint32_t bytesAccessed; 188 189 Bank() : 190 openRow(NO_ROW), bank(0), bankgr(0), 191 rdAllowedAt(0), wrAllowedAt(0), preAllowedAt(0), actAllowedAt(0), 192 rowAccesses(0), bytesAccessed(0) 193 { } 194 }; 195 196 197 /** 198 * The power state captures the different operational states of 199 * the DRAM and interacts with the bus read/write state machine, 200 * and the refresh state machine. 201 * 202 * PWR_IDLE : The idle state in which all banks are closed 203 * From here can transition to: PWR_REF, PWR_ACT, 204 * PWR_PRE_PDN 205 * 206 * PWR_REF : Auto-refresh state. Will transition when refresh is 207 * complete based on power state prior to PWR_REF 208 * From here can transition to: PWR_IDLE, PWR_PRE_PDN, 209 * PWR_SREF 210 * 211 * PWR_SREF : Self-refresh state. Entered after refresh if 212 * previous state was PWR_PRE_PDN 213 * From here can transition to: PWR_IDLE 214 * 215 * PWR_PRE_PDN : Precharge power down state 216 * From here can transition to: PWR_REF, PWR_IDLE 217 * 218 * PWR_ACT : Activate state in which one or more banks are open 219 * From here can transition to: PWR_IDLE, PWR_ACT_PDN 220 * 221 * PWR_ACT_PDN : Activate power down state 222 * From here can transition to: PWR_ACT 223 */ 224 enum PowerState { 225 PWR_IDLE = 0, 226 PWR_REF, 227 PWR_SREF, 228 PWR_PRE_PDN, 229 PWR_ACT, 230 PWR_ACT_PDN 231 }; 232 233 /** 234 * The refresh state is used to control the progress of the 235 * refresh scheduling. When normal operation is in progress the 236 * refresh state is idle. Once tREFI has elasped, a refresh event 237 * is triggered to start the following STM transitions which are 238 * used to issue a refresh and return back to normal operation 239 * 240 * REF_IDLE : IDLE state used during normal operation 241 * From here can transition to: REF_DRAIN 242 * 243 * REF_SREF_EXIT : Exiting a self-refresh; refresh event scheduled 244 * after self-refresh exit completes 245 * From here can transition to: REF_DRAIN 246 * 247 * REF_DRAIN : Drain state in which on going accesses complete. 248 * From here can transition to: REF_PD_EXIT 249 * 250 * REF_PD_EXIT : Evaluate pwrState and issue wakeup if needed 251 * Next state dependent on whether banks are open 252 * From here can transition to: REF_PRE, REF_START 253 * 254 * REF_PRE : Close (precharge) all open banks 255 * From here can transition to: REF_START 256 * 257 * REF_START : Issue refresh command and update DRAMPower stats 258 * From here can transition to: REF_RUN 259 * 260 * REF_RUN : Refresh running, waiting for tRFC to expire 261 * From here can transition to: REF_IDLE, REF_SREF_EXIT 262 */ 263 enum RefreshState { 264 REF_IDLE = 0, 265 REF_DRAIN, 266 REF_PD_EXIT, 267 REF_SREF_EXIT, 268 REF_PRE, 269 REF_START, 270 REF_RUN 271 }; 272 273 /** 274 * Rank class includes a vector of banks. Refresh and Power state 275 * machines are defined per rank. Events required to change the 276 * state of the refresh and power state machine are scheduled per 277 * rank. This class allows the implementation of rank-wise refresh 278 * and rank-wise power-down. 279 */ 280 class Rank : public EventManager 281 { 282 283 private: 284 285 /** 286 * A reference to the parent DRAMCtrl instance 287 */ 288 DRAMCtrl& memory; 289 290 /** 291 * Since we are taking decisions out of order, we need to keep 292 * track of what power transition is happening at what time 293 */ 294 PowerState pwrStateTrans; 295 296 /** 297 * Previous low-power state, which will be re-entered after refresh. 298 */ 299 PowerState pwrStatePostRefresh; 300 301 /** 302 * Track when we transitioned to the current power state 303 */ 304 Tick pwrStateTick; 305 306 /** 307 * Keep track of when a refresh is due. 308 */ 309 Tick refreshDueAt; 310 311 /* 312 * Command energies 313 */ 314 Stats::Scalar actEnergy; 315 Stats::Scalar preEnergy; 316 Stats::Scalar readEnergy; 317 Stats::Scalar writeEnergy; 318 Stats::Scalar refreshEnergy; 319 320 /* 321 * Active Background Energy 322 */ 323 Stats::Scalar actBackEnergy; 324 325 /* 326 * Precharge Background Energy 327 */ 328 Stats::Scalar preBackEnergy; 329 330 /* 331 * Active Power-Down Energy 332 */ 333 Stats::Scalar actPowerDownEnergy; 334 335 /* 336 * Precharge Power-Down Energy 337 */ 338 Stats::Scalar prePowerDownEnergy; 339 340 /* 341 * self Refresh Energy 342 */ 343 Stats::Scalar selfRefreshEnergy; 344 345 Stats::Scalar totalEnergy; 346 Stats::Scalar averagePower; 347 348 /** 349 * Stat to track total DRAM idle time 350 * 351 */ 352 Stats::Scalar totalIdleTime; 353 354 /** 355 * Track time spent in each power state. 356 */ 357 Stats::Vector pwrStateTime; 358 359 /** 360 * Function to update Power Stats 361 */ 362 void updatePowerStats(); 363 364 /** 365 * Schedule a power state transition in the future, and 366 * potentially override an already scheduled transition. 367 * 368 * @param pwr_state Power state to transition to 369 * @param tick Tick when transition should take place 370 */ 371 void schedulePowerEvent(PowerState pwr_state, Tick tick); 372 373 public: 374 375 /** 376 * Current power state. 377 */ 378 PowerState pwrState; 379 380 /** 381 * current refresh state 382 */ 383 RefreshState refreshState; 384 385 /** 386 * rank is in or transitioning to power-down or self-refresh 387 */ 388 bool inLowPowerState; 389 390 /** 391 * Current Rank index 392 */ 393 uint8_t rank; 394 395 /** 396 * Track number of packets in read queue going to this rank 397 */ 398 uint32_t readEntries; 399 400 /** 401 * Track number of packets in write queue going to this rank 402 */ 403 uint32_t writeEntries; 404 405 /** 406 * Number of ACT, RD, and WR events currently scheduled 407 * Incremented when a refresh event is started as well 408 * Used to determine when a low-power state can be entered 409 */ 410 uint8_t outstandingEvents; 411 412 /** 413 * delay power-down and self-refresh exit until this requirement is met 414 */ 415 Tick wakeUpAllowedAt; 416 417 /** 418 * One DRAMPower instance per rank 419 */ 420 DRAMPower power; 421 422 /** 423 * List of commands issued, to be sent to DRAMPpower at refresh 424 * and stats dump. Keep commands here since commands to different 425 * banks are added out of order. Will only pass commands up to 426 * curTick() to DRAMPower after sorting. 427 */ 428 std::vector<Command> cmdList; 429 430 /** 431 * Vector of Banks. Each rank is made of several devices which in 432 * term are made from several banks. 433 */ 434 std::vector<Bank> banks; 435 436 /** 437 * To track number of banks which are currently active for 438 * this rank. 439 */ 440 unsigned int numBanksActive; 441 442 /** List to keep track of activate ticks */ 443 std::deque<Tick> actTicks; 444 445 Rank(DRAMCtrl& _memory, const DRAMCtrlParams* _p, int rank); 446 447 const std::string name() const 448 { 449 return csprintf("%s_%d", memory.name(), rank); 450 } 451 452 /** 453 * Kick off accounting for power and refresh states and 454 * schedule initial refresh. 455 * 456 * @param ref_tick Tick for first refresh 457 */ 458 void startup(Tick ref_tick); 459 460 /** 461 * Stop the refresh events. 462 */ 463 void suspend(); 464 465 /** 466 * Check if there is no refresh and no preparation of refresh ongoing 467 * i.e. the refresh state machine is in idle 468 * 469 * @param Return true if the rank is idle from a refresh point of view 470 */ 471 bool inRefIdleState() const { return refreshState == REF_IDLE; } 472 473 /** 474 * Check if the current rank has all banks closed and is not 475 * in a low power state 476 * 477 * @param Return true if the rank is idle from a bank 478 * and power point of view 479 */ 480 bool inPwrIdleState() const { return pwrState == PWR_IDLE; } 481 482 /** 483 * Trigger a self-refresh exit if there are entries enqueued 484 * Exit if there are any read entries regardless of the bus state. 485 * If we are currently issuing write commands, exit if we have any 486 * write commands enqueued as well. 487 * Could expand this in the future to analyze state of entire queue 488 * if needed. 489 * 490 * @return boolean indicating self-refresh exit should be scheduled 491 */ 492 bool forceSelfRefreshExit() const { 493 return (readEntries != 0) || 494 ((memory.busStateNext == WRITE) && (writeEntries != 0)); 495 } 496 497 /** 498 * Check if the command queue of current rank is idle 499 * 500 * @param Return true if the there are no commands in Q. 501 * Bus direction determines queue checked. 502 */ 503 bool isQueueEmpty() const; 504 505 /** 506 * Let the rank check if it was waiting for requests to drain 507 * to allow it to transition states. 508 */ 509 void checkDrainDone(); 510 511 /** 512 * Push command out of cmdList queue that are scheduled at 513 * or before curTick() to DRAMPower library 514 * All commands before curTick are guaranteed to be complete 515 * and can safely be flushed. 516 */ 517 void flushCmdList(); 518 519 /* 520 * Function to register Stats 521 */ 522 void regStats(); 523 524 /** 525 * Computes stats just prior to dump event 526 */ 527 void computeStats(); 528 529 /** 530 * Reset stats on a stats event 531 */ 532 void resetStats(); 533 534 /** 535 * Schedule a transition to power-down (sleep) 536 * 537 * @param pwr_state Power state to transition to 538 * @param tick Absolute tick when transition should take place 539 */ 540 void powerDownSleep(PowerState pwr_state, Tick tick); 541 542 /** 543 * schedule and event to wake-up from power-down or self-refresh 544 * and update bank timing parameters 545 * 546 * @param exit_delay Relative tick defining the delay required between 547 * low-power exit and the next command 548 */ 549 void scheduleWakeUpEvent(Tick exit_delay); 550 551 void processWriteDoneEvent(); 552 EventFunctionWrapper writeDoneEvent; 553 554 void processActivateEvent(); 555 EventFunctionWrapper activateEvent; 556 557 void processPrechargeEvent(); 558 EventFunctionWrapper prechargeEvent; 559 560 void processRefreshEvent(); 561 EventFunctionWrapper refreshEvent; 562 563 void processPowerEvent(); 564 EventFunctionWrapper powerEvent; 565 566 void processWakeUpEvent(); 567 EventFunctionWrapper wakeUpEvent; 568 569 }; 570 571 /** 572 * Define the process to compute stats on a stats dump event, e.g. on 573 * simulation exit or intermediate stats dump. This is defined per rank 574 * as the per rank stats are based on state transition and periodically 575 * updated, requiring re-sync at exit. 576 */ 577 class RankDumpCallback : public Callback 578 { 579 Rank *ranks; 580 public: 581 RankDumpCallback(Rank *r) : ranks(r) {} 582 virtual void process() { ranks->computeStats(); }; 583 }; 584 585 /** Define a process to clear power lib counters on a stats reset */ 586 class RankResetCallback : public Callback 587 { 588 private: 589 /** Pointer to the rank, thus we instantiate per rank */ 590 Rank *rank; 591 592 public: 593 RankResetCallback(Rank *r) : rank(r) {} 594 virtual void process() { rank->resetStats(); }; 595 }; 596 597 /** Define a process to store the time on a stats reset */ 598 class MemResetCallback : public Callback 599 { 600 private: 601 /** A reference to the DRAMCtrl instance */ 602 DRAMCtrl *mem; 603 604 public: 605 MemResetCallback(DRAMCtrl *_mem) : mem(_mem) {} 606 virtual void process() { mem->lastStatsResetTick = curTick(); }; 607 }; 608 609 /** 610 * A burst helper helps organize and manage a packet that is larger than 611 * the DRAM burst size. A system packet that is larger than the burst size 612 * is split into multiple DRAM packets and all those DRAM packets point to 613 * a single burst helper such that we know when the whole packet is served. 614 */ 615 class BurstHelper { 616 617 public: 618 619 /** Number of DRAM bursts requred for a system packet **/ 620 const unsigned int burstCount; 621 622 /** Number of DRAM bursts serviced so far for a system packet **/ 623 unsigned int burstsServiced; 624 625 BurstHelper(unsigned int _burstCount) 626 : burstCount(_burstCount), burstsServiced(0) 627 { } 628 }; 629 630 /** 631 * A DRAM packet stores packets along with the timestamp of when 632 * the packet entered the queue, and also the decoded address. 633 */ 634 class DRAMPacket { 635 636 public: 637 638 /** When did request enter the controller */ 639 const Tick entryTime; 640 641 /** When will request leave the controller */ 642 Tick readyTime; 643 644 /** This comes from the outside world */ 645 const PacketPtr pkt; 646 647 /** MasterID associated with the packet */ 648 const MasterID _masterId; 649 650 const bool read; 651 652 /** Will be populated by address decoder */ 653 const uint8_t rank; 654 const uint8_t bank; 655 const uint32_t row; 656 657 /** 658 * Bank id is calculated considering banks in all the ranks 659 * eg: 2 ranks each with 8 banks, then bankId = 0 --> rank0, bank0 and 660 * bankId = 8 --> rank1, bank0 661 */ 662 const uint16_t bankId; 663 664 /** 665 * The starting address of the DRAM packet. 666 * This address could be unaligned to burst size boundaries. The 667 * reason is to keep the address offset so we can accurately check 668 * incoming read packets with packets in the write queue. 669 */ 670 Addr addr; 671 672 /** 673 * The size of this dram packet in bytes 674 * It is always equal or smaller than DRAM burst size 675 */ 676 unsigned int size; 677 678 /** 679 * A pointer to the BurstHelper if this DRAMPacket is a split packet 680 * If not a split packet (common case), this is set to NULL 681 */ 682 BurstHelper* burstHelper; 683 Bank& bankRef; 684 Rank& rankRef; 685 686 /** 687 * QoS value of the encapsulated packet read at queuing time 688 */ 689 uint8_t _qosValue; 690 691 /** 692 * Set the packet QoS value 693 * (interface compatibility with Packet) 694 */ 695 inline void qosValue(const uint8_t qv) { _qosValue = qv; } 696 697 /** 698 * Get the packet QoS value 699 * (interface compatibility with Packet) 700 */ 701 inline uint8_t qosValue() const { return _qosValue; } 702 703 /** 704 * Get the packet MasterID 705 * (interface compatibility with Packet) 706 */ 707 inline MasterID masterId() const { return _masterId; } 708 709 /** 710 * Get the packet size 711 * (interface compatibility with Packet) 712 */ 713 inline unsigned int getSize() const { return size; } 714 715 /** 716 * Get the packet address 717 * (interface compatibility with Packet) 718 */ 719 inline Addr getAddr() const { return addr; } 720 721 /** 722 * Return true if its a read packet 723 * (interface compatibility with Packet) 724 */ 725 inline bool isRead() const { return read; } 726 727 /** 728 * Return true if its a write packet 729 * (interface compatibility with Packet) 730 */ 731 inline bool isWrite() const { return !read; } 732 733 734 DRAMPacket(PacketPtr _pkt, bool is_read, uint8_t _rank, uint8_t _bank, 735 uint32_t _row, uint16_t bank_id, Addr _addr, 736 unsigned int _size, Bank& bank_ref, Rank& rank_ref) 737 : entryTime(curTick()), readyTime(curTick()), pkt(_pkt), 738 _masterId(pkt->masterId()), 739 read(is_read), rank(_rank), bank(_bank), row(_row), 740 bankId(bank_id), addr(_addr), size(_size), burstHelper(NULL), 741 bankRef(bank_ref), rankRef(rank_ref), _qosValue(_pkt->qosValue()) 742 { } 743 744 }; 745 746 // The DRAM packets are store in a multiple dequeue structure, 747 // based on their QoS priority 748 typedef std::deque<DRAMPacket*> DRAMPacketQueue; 749 750 /** 751 * Bunch of things requires to setup "events" in gem5 752 * When event "respondEvent" occurs for example, the method 753 * processRespondEvent is called; no parameters are allowed 754 * in these methods 755 */ 756 void processNextReqEvent(); 757 EventFunctionWrapper nextReqEvent; 758 759 void processRespondEvent(); 760 EventFunctionWrapper respondEvent; 761 762 /** 763 * Check if the read queue has room for more entries 764 * 765 * @param pktCount The number of entries needed in the read queue 766 * @return true if read queue is full, false otherwise 767 */ 768 bool readQueueFull(unsigned int pktCount) const; 769 770 /** 771 * Check if the write queue has room for more entries 772 * 773 * @param pktCount The number of entries needed in the write queue 774 * @return true if write queue is full, false otherwise 775 */ 776 bool writeQueueFull(unsigned int pktCount) const; 777 778 /** 779 * When a new read comes in, first check if the write q has a 780 * pending request to the same address.\ If not, decode the 781 * address to populate rank/bank/row, create one or mutliple 782 * "dram_pkt", and push them to the back of the read queue.\ 783 * If this is the only 784 * read request in the system, schedule an event to start 785 * servicing it. 786 * 787 * @param pkt The request packet from the outside world 788 * @param pktCount The number of DRAM bursts the pkt 789 * translate to. If pkt size is larger then one full burst, 790 * then pktCount is greater than one. 791 */ 792 void addToReadQueue(PacketPtr pkt, unsigned int pktCount); 793 794 /** 795 * Decode the incoming pkt, create a dram_pkt and push to the 796 * back of the write queue. \If the write q length is more than 797 * the threshold specified by the user, ie the queue is beginning 798 * to get full, stop reads, and start draining writes. 799 * 800 * @param pkt The request packet from the outside world 801 * @param pktCount The number of DRAM bursts the pkt 802 * translate to. If pkt size is larger then one full burst, 803 * then pktCount is greater than one. 804 */ 805 void addToWriteQueue(PacketPtr pkt, unsigned int pktCount); 806 807 /** 808 * Actually do the DRAM access - figure out the latency it 809 * will take to service the req based on bank state, channel state etc 810 * and then update those states to account for this request.\ Based 811 * on this, update the packet's "readyTime" and move it to the 812 * response q from where it will eventually go back to the outside 813 * world. 814 * 815 * @param pkt The DRAM packet created from the outside world pkt 816 */ 817 void doDRAMAccess(DRAMPacket* dram_pkt); 818 819 /** 820 * When a packet reaches its "readyTime" in the response Q, 821 * use the "access()" method in AbstractMemory to actually 822 * create the response packet, and send it back to the outside 823 * world requestor. 824 * 825 * @param pkt The packet from the outside world 826 * @param static_latency Static latency to add before sending the packet 827 */ 828 void accessAndRespond(PacketPtr pkt, Tick static_latency); 829 830 /** 831 * Address decoder to figure out physical mapping onto ranks, 832 * banks, and rows. This function is called multiple times on the same 833 * system packet if the pakcet is larger than burst of the memory. The 834 * dramPktAddr is used for the offset within the packet. 835 * 836 * @param pkt The packet from the outside world 837 * @param dramPktAddr The starting address of the DRAM packet 838 * @param size The size of the DRAM packet in bytes 839 * @param isRead Is the request for a read or a write to DRAM 840 * @return A DRAMPacket pointer with the decoded information 841 */ 842 DRAMPacket* decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned int size, 843 bool isRead); 844 845 /** 846 * The memory schduler/arbiter - picks which request needs to 847 * go next, based on the specified policy such as FCFS or FR-FCFS 848 * and moves it to the head of the queue. 849 * Prioritizes accesses to the same rank as previous burst unless 850 * controller is switching command type. 851 * 852 * @param queue Queued requests to consider 853 * @param extra_col_delay Any extra delay due to a read/write switch 854 * @return an iterator to the selected packet, else queue.end() 855 */ 856 DRAMPacketQueue::iterator chooseNext(DRAMPacketQueue& queue, 857 Tick extra_col_delay); 858 859 /** 860 * For FR-FCFS policy reorder the read/write queue depending on row buffer 861 * hits and earliest bursts available in DRAM 862 * 863 * @param queue Queued requests to consider 864 * @param extra_col_delay Any extra delay due to a read/write switch 865 * @return an iterator to the selected packet, else queue.end() 866 */ 867 DRAMPacketQueue::iterator chooseNextFRFCFS(DRAMPacketQueue& queue, 868 Tick extra_col_delay); 869 870 /** 871 * Find which are the earliest banks ready to issue an activate 872 * for the enqueued requests. Assumes maximum of 32 banks per rank 873 * Also checks if the bank is already prepped. 874 * 875 * @param queue Queued requests to consider 876 * @param min_col_at time of seamless burst command 877 * @return One-hot encoded mask of bank indices 878 * @return boolean indicating burst can issue seamlessly, with no gaps 879 */ 880 std::pair<std::vector<uint32_t>, bool> 881 minBankPrep(const DRAMPacketQueue& queue, Tick min_col_at) const; 882 883 /** 884 * Keep track of when row activations happen, in order to enforce 885 * the maximum number of activations in the activation window. The 886 * method updates the time that the banks become available based 887 * on the current limits. 888 * 889 * @param rank_ref Reference to the rank 890 * @param bank_ref Reference to the bank 891 * @param act_tick Time when the activation takes place 892 * @param row Index of the row 893 */ 894 void activateBank(Rank& rank_ref, Bank& bank_ref, Tick act_tick, 895 uint32_t row); 896 897 /** 898 * Precharge a given bank and also update when the precharge is 899 * done. This will also deal with any stats related to the 900 * accesses to the open page. 901 * 902 * @param rank_ref The rank to precharge 903 * @param bank_ref The bank to precharge 904 * @param pre_at Time when the precharge takes place 905 * @param trace Is this an auto precharge then do not add to trace 906 */ 907 void prechargeBank(Rank& rank_ref, Bank& bank_ref, 908 Tick pre_at, bool trace = true); 909 910 /** 911 * Used for debugging to observe the contents of the queues. 912 */ 913 void printQs() const; 914 915 /** 916 * Burst-align an address. 917 * 918 * @param addr The potentially unaligned address 919 * 920 * @return An address aligned to a DRAM burst 921 */ 922 Addr burstAlign(Addr addr) const { return (addr & ~(Addr(burstSize - 1))); } 923 924 /** 925 * The controller's main read and write queues, with support for QoS reordering 926 */ 927 std::vector<DRAMPacketQueue> readQueue; 928 std::vector<DRAMPacketQueue> writeQueue; 929 930 /** 931 * To avoid iterating over the write queue to check for 932 * overlapping transactions, maintain a set of burst addresses 933 * that are currently queued. Since we merge writes to the same 934 * location we never have more than one address to the same burst 935 * address. 936 */ 937 std::unordered_set<Addr> isInWriteQueue; 938 939 /** 940 * Response queue where read packets wait after we're done working 941 * with them, but it's not time to send the response yet. The 942 * responses are stored separately mostly to keep the code clean 943 * and help with events scheduling. For all logical purposes such 944 * as sizing the read queue, this and the main read queue need to 945 * be added together. 946 */ 947 std::deque<DRAMPacket*> respQueue; 948 949 /** 950 * Vector of ranks 951 */ 952 std::vector<Rank*> ranks; 953 954 /** 955 * The following are basic design parameters of the memory 956 * controller, and are initialized based on parameter values. 957 * The rowsPerBank is determined based on the capacity, number of 958 * ranks and banks, the burst size, and the row buffer size. 959 */ 960 const uint32_t deviceSize; 961 const uint32_t deviceBusWidth; 962 const uint32_t burstLength; 963 const uint32_t deviceRowBufferSize; 964 const uint32_t devicesPerRank; 965 const uint32_t burstSize; 966 const uint32_t rowBufferSize; 967 const uint32_t columnsPerRowBuffer; 968 const uint32_t columnsPerStripe; 969 const uint32_t ranksPerChannel; 970 const uint32_t bankGroupsPerRank; 971 const bool bankGroupArch; 972 const uint32_t banksPerRank; 973 const uint32_t channels; 974 uint32_t rowsPerBank; 975 const uint32_t readBufferSize; 976 const uint32_t writeBufferSize; 977 const uint32_t writeHighThreshold; 978 const uint32_t writeLowThreshold; 979 const uint32_t minWritesPerSwitch; 980 uint32_t writesThisTime; 981 uint32_t readsThisTime; 982 983 /** 984 * Basic memory timing parameters initialized based on parameter 985 * values. 986 */ 987 const Tick M5_CLASS_VAR_USED tCK; 988 const Tick tRTW; 989 const Tick tCS; 990 const Tick tBURST; 991 const Tick tCCD_L_WR; 992 const Tick tCCD_L; 993 const Tick tRCD; 994 const Tick tCL; 995 const Tick tRP; 996 const Tick tRAS; 997 const Tick tWR; 998 const Tick tRTP; 999 const Tick tRFC; 1000 const Tick tREFI; 1001 const Tick tRRD; 1002 const Tick tRRD_L; 1003 const Tick tXAW; 1004 const Tick tXP; 1005 const Tick tXS; 1006 const uint32_t activationLimit; 1007 const Tick rankToRankDly; 1008 const Tick wrToRdDly; 1009 const Tick rdToWrDly; 1010 1011 /** 1012 * Memory controller configuration initialized based on parameter 1013 * values. 1014 */ 1015 Enums::MemSched memSchedPolicy; 1016 Enums::AddrMap addrMapping; 1017 Enums::PageManage pageMgmt; 1018 1019 /** 1020 * Max column accesses (read and write) per row, before forcefully 1021 * closing it. 1022 */ 1023 const uint32_t maxAccessesPerRow; 1024 1025 /** 1026 * Pipeline latency of the controller frontend. The frontend 1027 * contribution is added to writes (that complete when they are in 1028 * the write buffer) and reads that are serviced the write buffer. 1029 */ 1030 const Tick frontendLatency; 1031 1032 /** 1033 * Pipeline latency of the backend and PHY. Along with the 1034 * frontend contribution, this latency is added to reads serviced 1035 * by the DRAM. 1036 */ 1037 const Tick backendLatency; 1038 1039 /** 1040 * Till when must we wait before issuing next RD/WR burst? 1041 */ 1042 Tick nextBurstAt; 1043 1044 Tick prevArrival; 1045 1046 /** 1047 * The soonest you have to start thinking about the next request 1048 * is the longest access time that can occur before 1049 * nextBurstAt. Assuming you need to precharge, open a new row, 1050 * and access, it is tRP + tRCD + tCL. 1051 */ 1052 Tick nextReqTime; 1053 1054 // All statistics that the model needs to capture 1055 Stats::Scalar readReqs; 1056 Stats::Scalar writeReqs; 1057 Stats::Scalar readBursts; 1058 Stats::Scalar writeBursts; 1059 Stats::Scalar bytesReadDRAM; 1060 Stats::Scalar bytesReadWrQ; 1061 Stats::Scalar bytesWritten; 1062 Stats::Scalar bytesReadSys; 1063 Stats::Scalar bytesWrittenSys; 1064 Stats::Scalar servicedByWrQ; 1065 Stats::Scalar mergedWrBursts; 1066 Stats::Scalar neitherReadNorWrite; 1067 Stats::Vector perBankRdBursts; 1068 Stats::Vector perBankWrBursts; 1069 Stats::Scalar numRdRetry; 1070 Stats::Scalar numWrRetry; 1071 Stats::Scalar totGap; 1072 Stats::Vector readPktSize; 1073 Stats::Vector writePktSize; 1074 Stats::Vector rdQLenPdf; 1075 Stats::Vector wrQLenPdf; 1076 Stats::Histogram bytesPerActivate; 1077 Stats::Histogram rdPerTurnAround; 1078 Stats::Histogram wrPerTurnAround; 1079 1080 // per-master bytes read and written to memory 1081 Stats::Vector masterReadBytes; 1082 Stats::Vector masterWriteBytes; 1083 1084 // per-master bytes read and written to memory rate 1085 Stats::Formula masterReadRate; 1086 Stats::Formula masterWriteRate; 1087 1088 // per-master read and write serviced memory accesses 1089 Stats::Vector masterReadAccesses; 1090 Stats::Vector masterWriteAccesses; 1091 1092 // per-master read and write total memory access latency 1093 Stats::Vector masterReadTotalLat; 1094 Stats::Vector masterWriteTotalLat; 1095 1096 // per-master raed and write average memory access latency 1097 Stats::Formula masterReadAvgLat; 1098 Stats::Formula masterWriteAvgLat; 1099 1100 // Latencies summed over all requests 1101 Stats::Scalar totQLat; 1102 Stats::Scalar totMemAccLat; 1103 Stats::Scalar totBusLat; 1104 1105 // Average latencies per request 1106 Stats::Formula avgQLat; 1107 Stats::Formula avgBusLat; 1108 Stats::Formula avgMemAccLat; 1109 1110 // Average bandwidth 1111 Stats::Formula avgRdBW; 1112 Stats::Formula avgWrBW; 1113 Stats::Formula avgRdBWSys; 1114 Stats::Formula avgWrBWSys; 1115 Stats::Formula peakBW; 1116 Stats::Formula busUtil; 1117 Stats::Formula busUtilRead; 1118 Stats::Formula busUtilWrite; 1119 1120 // Average queue lengths 1121 Stats::Average avgRdQLen; 1122 Stats::Average avgWrQLen; 1123 1124 // Row hit count and rate 1125 Stats::Scalar readRowHits; 1126 Stats::Scalar writeRowHits; 1127 Stats::Formula readRowHitRate; 1128 Stats::Formula writeRowHitRate; 1129 Stats::Formula avgGap; 1130 1131 // DRAM Power Calculation 1132 Stats::Formula pageHitRate; 1133 1134 // Holds the value of the rank of burst issued 1135 uint8_t activeRank; 1136 1137 // timestamp offset 1138 uint64_t timeStampOffset; 1139 1140 /** The time when stats were last reset used to calculate average power */ 1141 Tick lastStatsResetTick; 1142 1143 /** 1144 * Upstream caches need this packet until true is returned, so 1145 * hold it for deletion until a subsequent call 1146 */ 1147 std::unique_ptr<Packet> pendingDelete; 1148 1149 /** 1150 * This function increments the energy when called. If stats are 1151 * dumped periodically, note accumulated energy values will 1152 * appear in the stats (even if the stats are reset). This is a 1153 * result of the energy values coming from DRAMPower, and there 1154 * is currently no support for resetting the state. 1155 * 1156 * @param rank Current rank 1157 */ 1158 void updatePowerStats(Rank& rank_ref); 1159 1160 /** 1161 * Function for sorting Command structures based on timeStamp 1162 * 1163 * @param a Memory Command 1164 * @param next Memory Command 1165 * @return true if timeStamp of Command 1 < timeStamp of Command 2 1166 */ 1167 static bool sortTime(const Command& cmd, const Command& cmd_next) { 1168 return cmd.timeStamp < cmd_next.timeStamp; 1169 }; 1170 1171 public: 1172 1173 void regStats() override; 1174 1175 DRAMCtrl(const DRAMCtrlParams* p); 1176 1177 DrainState drain() override; 1178 1179 virtual BaseSlavePort& getSlavePort(const std::string& if_name, 1180 PortID idx = InvalidPortID) override; 1181 1182 virtual void init() override; 1183 virtual void startup() override; 1184 virtual void drainResume() override; 1185 1186 /** 1187 * Return true once refresh is complete for all ranks and there are no 1188 * additional commands enqueued. (only evaluated when draining) 1189 * This will ensure that all banks are closed, power state is IDLE, and 1190 * power stats have been updated 1191 * 1192 * @return true if all ranks have refreshed, with no commands enqueued 1193 * 1194 */ 1195 bool allRanksDrained() const; 1196 1197 protected: 1198 1199 Tick recvAtomic(PacketPtr pkt); 1200 void recvFunctional(PacketPtr pkt); 1201 bool recvTimingReq(PacketPtr pkt); 1202 1203}; 1204 1205#endif //__MEM_DRAM_CTRL_HH__ 1206