dram_ctrl.hh revision 11675:60d18201148d
1/* 2 * Copyright (c) 2012-2016 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2013 Amin Farmahini-Farahani 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Andreas Hansson 41 * Ani Udipi 42 * Neha Agarwal 43 * Omar Naji 44 * Matthias Jung 45 */ 46 47/** 48 * @file 49 * DRAMCtrl declaration 50 */ 51 52#ifndef __MEM_DRAM_CTRL_HH__ 53#define __MEM_DRAM_CTRL_HH__ 54 55#include <deque> 56#include <string> 57#include <unordered_set> 58 59#include "base/statistics.hh" 60#include "enums/AddrMap.hh" 61#include "enums/MemSched.hh" 62#include "enums/PageManage.hh" 63#include "mem/abstract_mem.hh" 64#include "mem/qport.hh" 65#include "params/DRAMCtrl.hh" 66#include "sim/eventq.hh" 67#include "mem/drampower.hh" 68 69/** 70 * The DRAM controller is a single-channel memory controller capturing 71 * the most important timing constraints associated with a 72 * contemporary DRAM. For multi-channel memory systems, the controller 73 * is combined with a crossbar model, with the channel address 74 * interleaving taking part in the crossbar. 75 * 76 * As a basic design principle, this controller 77 * model is not cycle callable, but instead uses events to: 1) decide 78 * when new decisions can be made, 2) when resources become available, 79 * 3) when things are to be considered done, and 4) when to send 80 * things back. Through these simple principles, the model delivers 81 * high performance, and lots of flexibility, allowing users to 82 * evaluate the system impact of a wide range of memory technologies, 83 * such as DDR3/4, LPDDR2/3/4, WideIO1/2, HBM and HMC. 84 * 85 * For more details, please see Hansson et al, "Simulating DRAM 86 * controllers for future system architecture exploration", 87 * Proc. ISPASS, 2014. If you use this model as part of your research 88 * please cite the paper. 89 */ 90class DRAMCtrl : public AbstractMemory 91{ 92 93 private: 94 95 // For now, make use of a queued slave port to avoid dealing with 96 // flow control for the responses being sent back 97 class MemoryPort : public QueuedSlavePort 98 { 99 100 RespPacketQueue queue; 101 DRAMCtrl& memory; 102 103 public: 104 105 MemoryPort(const std::string& name, DRAMCtrl& _memory); 106 107 protected: 108 109 Tick recvAtomic(PacketPtr pkt); 110 111 void recvFunctional(PacketPtr pkt); 112 113 bool recvTimingReq(PacketPtr); 114 115 virtual AddrRangeList getAddrRanges() const; 116 117 }; 118 119 /** 120 * Our incoming port, for a multi-ported controller add a crossbar 121 * in front of it 122 */ 123 MemoryPort port; 124 125 /** 126 * Remeber if the memory system is in timing mode 127 */ 128 bool isTimingMode; 129 130 /** 131 * Remember if we have to retry a request when available. 132 */ 133 bool retryRdReq; 134 bool retryWrReq; 135 136 /** 137 * Bus state used to control the read/write switching and drive 138 * the scheduling of the next request. 139 */ 140 enum BusState { 141 READ = 0, 142 READ_TO_WRITE, 143 WRITE, 144 WRITE_TO_READ 145 }; 146 147 BusState busState; 148 149 /** 150 * Simple structure to hold the values needed to keep track of 151 * commands for DRAMPower 152 */ 153 struct Command { 154 Data::MemCommand::cmds type; 155 uint8_t bank; 156 Tick timeStamp; 157 158 constexpr Command(Data::MemCommand::cmds _type, uint8_t _bank, 159 Tick time_stamp) 160 : type(_type), bank(_bank), timeStamp(time_stamp) 161 { } 162 }; 163 164 /** 165 * A basic class to track the bank state, i.e. what row is 166 * currently open (if any), when is the bank free to accept a new 167 * column (read/write) command, when can it be precharged, and 168 * when can it be activated. 169 * 170 * The bank also keeps track of how many bytes have been accessed 171 * in the open row since it was opened. 172 */ 173 class Bank 174 { 175 176 public: 177 178 static const uint32_t NO_ROW = -1; 179 180 uint32_t openRow; 181 uint8_t bank; 182 uint8_t bankgr; 183 184 Tick colAllowedAt; 185 Tick preAllowedAt; 186 Tick actAllowedAt; 187 188 uint32_t rowAccesses; 189 uint32_t bytesAccessed; 190 191 Bank() : 192 openRow(NO_ROW), bank(0), bankgr(0), 193 colAllowedAt(0), preAllowedAt(0), actAllowedAt(0), 194 rowAccesses(0), bytesAccessed(0) 195 { } 196 }; 197 198 199 /** 200 * Rank class includes a vector of banks. Refresh and Power state 201 * machines are defined per rank. Events required to change the 202 * state of the refresh and power state machine are scheduled per 203 * rank. This class allows the implementation of rank-wise refresh 204 * and rank-wise power-down. 205 */ 206 class Rank : public EventManager 207 { 208 209 private: 210 211 /** 212 * The power state captures the different operational states of 213 * the DRAM and interacts with the bus read/write state machine, 214 * and the refresh state machine. In the idle state all banks are 215 * precharged. From there we either go to an auto refresh (as 216 * determined by the refresh state machine), or to a precharge 217 * power down mode. From idle the memory can also go to the active 218 * state (with one or more banks active), and in turn from there 219 * to active power down. At the moment we do not capture the deep 220 * power down and self-refresh state. 221 */ 222 enum PowerState { 223 PWR_IDLE = 0, 224 PWR_REF, 225 PWR_PRE_PDN, 226 PWR_ACT, 227 PWR_ACT_PDN 228 }; 229 230 /** 231 * The refresh state is used to control the progress of the 232 * refresh scheduling. When normal operation is in progress the 233 * refresh state is idle. From there, it progresses to the refresh 234 * drain state once tREFI has passed. The refresh drain state 235 * captures the DRAM row active state, as it will stay there until 236 * all ongoing accesses complete. Thereafter all banks are 237 * precharged, and lastly, the DRAM is refreshed. 238 */ 239 enum RefreshState { 240 REF_IDLE = 0, 241 REF_DRAIN, 242 REF_PRE, 243 REF_RUN 244 }; 245 246 /** 247 * A reference to the parent DRAMCtrl instance 248 */ 249 DRAMCtrl& memory; 250 251 /** 252 * Since we are taking decisions out of order, we need to keep 253 * track of what power transition is happening at what time, such 254 * that we can go back in time and change history. For example, if 255 * we precharge all banks and schedule going to the idle state, we 256 * might at a later point decide to activate a bank before the 257 * transition to idle would have taken place. 258 */ 259 PowerState pwrStateTrans; 260 261 /** 262 * Current power state. 263 */ 264 PowerState pwrState; 265 266 /** 267 * Track when we transitioned to the current power state 268 */ 269 Tick pwrStateTick; 270 271 /** 272 * current refresh state 273 */ 274 RefreshState refreshState; 275 276 /** 277 * Keep track of when a refresh is due. 278 */ 279 Tick refreshDueAt; 280 281 /* 282 * Command energies 283 */ 284 Stats::Scalar actEnergy; 285 Stats::Scalar preEnergy; 286 Stats::Scalar readEnergy; 287 Stats::Scalar writeEnergy; 288 Stats::Scalar refreshEnergy; 289 290 /* 291 * Active Background Energy 292 */ 293 Stats::Scalar actBackEnergy; 294 295 /* 296 * Precharge Background Energy 297 */ 298 Stats::Scalar preBackEnergy; 299 300 Stats::Scalar totalEnergy; 301 Stats::Scalar averagePower; 302 303 /** 304 * Track time spent in each power state. 305 */ 306 Stats::Vector pwrStateTime; 307 308 /** 309 * Function to update Power Stats 310 */ 311 void updatePowerStats(); 312 313 /** 314 * Schedule a power state transition in the future, and 315 * potentially override an already scheduled transition. 316 * 317 * @param pwr_state Power state to transition to 318 * @param tick Tick when transition should take place 319 */ 320 void schedulePowerEvent(PowerState pwr_state, Tick tick); 321 322 public: 323 324 /** 325 * Current Rank index 326 */ 327 uint8_t rank; 328 329 /** 330 * One DRAMPower instance per rank 331 */ 332 DRAMPower power; 333 334 /** 335 * List of comamnds issued, to be sent to DRAMPpower at refresh 336 * and stats dump. Keep commands here since commands to different 337 * banks are added out of order. Will only pass commands up to 338 * curTick() to DRAMPower after sorting. 339 */ 340 std::vector<Command> cmdList; 341 342 /** 343 * Vector of Banks. Each rank is made of several devices which in 344 * term are made from several banks. 345 */ 346 std::vector<Bank> banks; 347 348 /** 349 * To track number of banks which are currently active for 350 * this rank. 351 */ 352 unsigned int numBanksActive; 353 354 /** List to keep track of activate ticks */ 355 std::deque<Tick> actTicks; 356 357 Rank(DRAMCtrl& _memory, const DRAMCtrlParams* _p); 358 359 const std::string name() const 360 { 361 return csprintf("%s_%d", memory.name(), rank); 362 } 363 364 /** 365 * Kick off accounting for power and refresh states and 366 * schedule initial refresh. 367 * 368 * @param ref_tick Tick for first refresh 369 */ 370 void startup(Tick ref_tick); 371 372 /** 373 * Stop the refresh events. 374 */ 375 void suspend(); 376 377 /** 378 * Check if the current rank is available for scheduling. 379 * 380 * @param Return true if the rank is idle from a refresh point of view 381 */ 382 bool isAvailable() const { return refreshState == REF_IDLE; } 383 384 /** 385 * Let the rank check if it was waiting for requests to drain 386 * to allow it to transition states. 387 */ 388 void checkDrainDone(); 389 390 /** 391 * Push command out of cmdList queue that are scheduled at 392 * or before curTick() to DRAMPower library 393 * All commands before curTick are guaranteed to be complete 394 * and can safely be flushed. 395 */ 396 void flushCmdList(); 397 398 /* 399 * Function to register Stats 400 */ 401 void regStats(); 402 403 void processActivateEvent(); 404 EventWrapper<Rank, &Rank::processActivateEvent> 405 activateEvent; 406 407 void processPrechargeEvent(); 408 EventWrapper<Rank, &Rank::processPrechargeEvent> 409 prechargeEvent; 410 411 void processRefreshEvent(); 412 EventWrapper<Rank, &Rank::processRefreshEvent> 413 refreshEvent; 414 415 void processPowerEvent(); 416 EventWrapper<Rank, &Rank::processPowerEvent> 417 powerEvent; 418 419 }; 420 421 /** 422 * A burst helper helps organize and manage a packet that is larger than 423 * the DRAM burst size. A system packet that is larger than the burst size 424 * is split into multiple DRAM packets and all those DRAM packets point to 425 * a single burst helper such that we know when the whole packet is served. 426 */ 427 class BurstHelper { 428 429 public: 430 431 /** Number of DRAM bursts requred for a system packet **/ 432 const unsigned int burstCount; 433 434 /** Number of DRAM bursts serviced so far for a system packet **/ 435 unsigned int burstsServiced; 436 437 BurstHelper(unsigned int _burstCount) 438 : burstCount(_burstCount), burstsServiced(0) 439 { } 440 }; 441 442 /** 443 * A DRAM packet stores packets along with the timestamp of when 444 * the packet entered the queue, and also the decoded address. 445 */ 446 class DRAMPacket { 447 448 public: 449 450 /** When did request enter the controller */ 451 const Tick entryTime; 452 453 /** When will request leave the controller */ 454 Tick readyTime; 455 456 /** This comes from the outside world */ 457 const PacketPtr pkt; 458 459 const bool isRead; 460 461 /** Will be populated by address decoder */ 462 const uint8_t rank; 463 const uint8_t bank; 464 const uint32_t row; 465 466 /** 467 * Bank id is calculated considering banks in all the ranks 468 * eg: 2 ranks each with 8 banks, then bankId = 0 --> rank0, bank0 and 469 * bankId = 8 --> rank1, bank0 470 */ 471 const uint16_t bankId; 472 473 /** 474 * The starting address of the DRAM packet. 475 * This address could be unaligned to burst size boundaries. The 476 * reason is to keep the address offset so we can accurately check 477 * incoming read packets with packets in the write queue. 478 */ 479 Addr addr; 480 481 /** 482 * The size of this dram packet in bytes 483 * It is always equal or smaller than DRAM burst size 484 */ 485 unsigned int size; 486 487 /** 488 * A pointer to the BurstHelper if this DRAMPacket is a split packet 489 * If not a split packet (common case), this is set to NULL 490 */ 491 BurstHelper* burstHelper; 492 Bank& bankRef; 493 Rank& rankRef; 494 495 DRAMPacket(PacketPtr _pkt, bool is_read, uint8_t _rank, uint8_t _bank, 496 uint32_t _row, uint16_t bank_id, Addr _addr, 497 unsigned int _size, Bank& bank_ref, Rank& rank_ref) 498 : entryTime(curTick()), readyTime(curTick()), 499 pkt(_pkt), isRead(is_read), rank(_rank), bank(_bank), row(_row), 500 bankId(bank_id), addr(_addr), size(_size), burstHelper(NULL), 501 bankRef(bank_ref), rankRef(rank_ref) 502 { } 503 504 }; 505 506 /** 507 * Bunch of things requires to setup "events" in gem5 508 * When event "respondEvent" occurs for example, the method 509 * processRespondEvent is called; no parameters are allowed 510 * in these methods 511 */ 512 void processNextReqEvent(); 513 EventWrapper<DRAMCtrl,&DRAMCtrl::processNextReqEvent> nextReqEvent; 514 515 void processRespondEvent(); 516 EventWrapper<DRAMCtrl, &DRAMCtrl::processRespondEvent> respondEvent; 517 518 /** 519 * Check if the read queue has room for more entries 520 * 521 * @param pktCount The number of entries needed in the read queue 522 * @return true if read queue is full, false otherwise 523 */ 524 bool readQueueFull(unsigned int pktCount) const; 525 526 /** 527 * Check if the write queue has room for more entries 528 * 529 * @param pktCount The number of entries needed in the write queue 530 * @return true if write queue is full, false otherwise 531 */ 532 bool writeQueueFull(unsigned int pktCount) const; 533 534 /** 535 * When a new read comes in, first check if the write q has a 536 * pending request to the same address.\ If not, decode the 537 * address to populate rank/bank/row, create one or mutliple 538 * "dram_pkt", and push them to the back of the read queue.\ 539 * If this is the only 540 * read request in the system, schedule an event to start 541 * servicing it. 542 * 543 * @param pkt The request packet from the outside world 544 * @param pktCount The number of DRAM bursts the pkt 545 * translate to. If pkt size is larger then one full burst, 546 * then pktCount is greater than one. 547 */ 548 void addToReadQueue(PacketPtr pkt, unsigned int pktCount); 549 550 /** 551 * Decode the incoming pkt, create a dram_pkt and push to the 552 * back of the write queue. \If the write q length is more than 553 * the threshold specified by the user, ie the queue is beginning 554 * to get full, stop reads, and start draining writes. 555 * 556 * @param pkt The request packet from the outside world 557 * @param pktCount The number of DRAM bursts the pkt 558 * translate to. If pkt size is larger then one full burst, 559 * then pktCount is greater than one. 560 */ 561 void addToWriteQueue(PacketPtr pkt, unsigned int pktCount); 562 563 /** 564 * Actually do the DRAM access - figure out the latency it 565 * will take to service the req based on bank state, channel state etc 566 * and then update those states to account for this request.\ Based 567 * on this, update the packet's "readyTime" and move it to the 568 * response q from where it will eventually go back to the outside 569 * world. 570 * 571 * @param pkt The DRAM packet created from the outside world pkt 572 */ 573 void doDRAMAccess(DRAMPacket* dram_pkt); 574 575 /** 576 * When a packet reaches its "readyTime" in the response Q, 577 * use the "access()" method in AbstractMemory to actually 578 * create the response packet, and send it back to the outside 579 * world requestor. 580 * 581 * @param pkt The packet from the outside world 582 * @param static_latency Static latency to add before sending the packet 583 */ 584 void accessAndRespond(PacketPtr pkt, Tick static_latency); 585 586 /** 587 * Address decoder to figure out physical mapping onto ranks, 588 * banks, and rows. This function is called multiple times on the same 589 * system packet if the pakcet is larger than burst of the memory. The 590 * dramPktAddr is used for the offset within the packet. 591 * 592 * @param pkt The packet from the outside world 593 * @param dramPktAddr The starting address of the DRAM packet 594 * @param size The size of the DRAM packet in bytes 595 * @param isRead Is the request for a read or a write to DRAM 596 * @return A DRAMPacket pointer with the decoded information 597 */ 598 DRAMPacket* decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned int size, 599 bool isRead); 600 601 /** 602 * The memory schduler/arbiter - picks which request needs to 603 * go next, based on the specified policy such as FCFS or FR-FCFS 604 * and moves it to the head of the queue. 605 * Prioritizes accesses to the same rank as previous burst unless 606 * controller is switching command type. 607 * 608 * @param queue Queued requests to consider 609 * @param extra_col_delay Any extra delay due to a read/write switch 610 * @return true if a packet is scheduled to a rank which is available else 611 * false 612 */ 613 bool chooseNext(std::deque<DRAMPacket*>& queue, Tick extra_col_delay); 614 615 /** 616 * For FR-FCFS policy reorder the read/write queue depending on row buffer 617 * hits and earliest bursts available in DRAM 618 * 619 * @param queue Queued requests to consider 620 * @param extra_col_delay Any extra delay due to a read/write switch 621 * @return true if a packet is scheduled to a rank which is available else 622 * false 623 */ 624 bool reorderQueue(std::deque<DRAMPacket*>& queue, Tick extra_col_delay); 625 626 /** 627 * Find which are the earliest banks ready to issue an activate 628 * for the enqueued requests. Assumes maximum of 64 banks per DIMM 629 * Also checks if the bank is already prepped. 630 * 631 * @param queue Queued requests to consider 632 * @param time of seamless burst command 633 * @return One-hot encoded mask of bank indices 634 * @return boolean indicating burst can issue seamlessly, with no gaps 635 */ 636 std::pair<uint64_t, bool> minBankPrep(const std::deque<DRAMPacket*>& queue, 637 Tick min_col_at) const; 638 639 /** 640 * Keep track of when row activations happen, in order to enforce 641 * the maximum number of activations in the activation window. The 642 * method updates the time that the banks become available based 643 * on the current limits. 644 * 645 * @param rank_ref Reference to the rank 646 * @param bank_ref Reference to the bank 647 * @param act_tick Time when the activation takes place 648 * @param row Index of the row 649 */ 650 void activateBank(Rank& rank_ref, Bank& bank_ref, Tick act_tick, 651 uint32_t row); 652 653 /** 654 * Precharge a given bank and also update when the precharge is 655 * done. This will also deal with any stats related to the 656 * accesses to the open page. 657 * 658 * @param rank_ref The rank to precharge 659 * @param bank_ref The bank to precharge 660 * @param pre_at Time when the precharge takes place 661 * @param trace Is this an auto precharge then do not add to trace 662 */ 663 void prechargeBank(Rank& rank_ref, Bank& bank_ref, 664 Tick pre_at, bool trace = true); 665 666 /** 667 * Used for debugging to observe the contents of the queues. 668 */ 669 void printQs() const; 670 671 /** 672 * Burst-align an address. 673 * 674 * @param addr The potentially unaligned address 675 * 676 * @return An address aligned to a DRAM burst 677 */ 678 Addr burstAlign(Addr addr) const { return (addr & ~(Addr(burstSize - 1))); } 679 680 /** 681 * The controller's main read and write queues 682 */ 683 std::deque<DRAMPacket*> readQueue; 684 std::deque<DRAMPacket*> writeQueue; 685 686 /** 687 * To avoid iterating over the write queue to check for 688 * overlapping transactions, maintain a set of burst addresses 689 * that are currently queued. Since we merge writes to the same 690 * location we never have more than one address to the same burst 691 * address. 692 */ 693 std::unordered_set<Addr> isInWriteQueue; 694 695 /** 696 * Response queue where read packets wait after we're done working 697 * with them, but it's not time to send the response yet. The 698 * responses are stored seperately mostly to keep the code clean 699 * and help with events scheduling. For all logical purposes such 700 * as sizing the read queue, this and the main read queue need to 701 * be added together. 702 */ 703 std::deque<DRAMPacket*> respQueue; 704 705 /** 706 * Vector of ranks 707 */ 708 std::vector<Rank*> ranks; 709 710 /** 711 * The following are basic design parameters of the memory 712 * controller, and are initialized based on parameter values. 713 * The rowsPerBank is determined based on the capacity, number of 714 * ranks and banks, the burst size, and the row buffer size. 715 */ 716 const uint32_t deviceSize; 717 const uint32_t deviceBusWidth; 718 const uint32_t burstLength; 719 const uint32_t deviceRowBufferSize; 720 const uint32_t devicesPerRank; 721 const uint32_t burstSize; 722 const uint32_t rowBufferSize; 723 const uint32_t columnsPerRowBuffer; 724 const uint32_t columnsPerStripe; 725 const uint32_t ranksPerChannel; 726 const uint32_t bankGroupsPerRank; 727 const bool bankGroupArch; 728 const uint32_t banksPerRank; 729 const uint32_t channels; 730 uint32_t rowsPerBank; 731 const uint32_t readBufferSize; 732 const uint32_t writeBufferSize; 733 const uint32_t writeHighThreshold; 734 const uint32_t writeLowThreshold; 735 const uint32_t minWritesPerSwitch; 736 uint32_t writesThisTime; 737 uint32_t readsThisTime; 738 739 /** 740 * Basic memory timing parameters initialized based on parameter 741 * values. 742 */ 743 const Tick M5_CLASS_VAR_USED tCK; 744 const Tick tWTR; 745 const Tick tRTW; 746 const Tick tCS; 747 const Tick tBURST; 748 const Tick tCCD_L; 749 const Tick tRCD; 750 const Tick tCL; 751 const Tick tRP; 752 const Tick tRAS; 753 const Tick tWR; 754 const Tick tRTP; 755 const Tick tRFC; 756 const Tick tREFI; 757 const Tick tRRD; 758 const Tick tRRD_L; 759 const Tick tXAW; 760 const Tick tXP; 761 const Tick tXS; 762 const uint32_t activationLimit; 763 764 /** 765 * Memory controller configuration initialized based on parameter 766 * values. 767 */ 768 Enums::MemSched memSchedPolicy; 769 Enums::AddrMap addrMapping; 770 Enums::PageManage pageMgmt; 771 772 /** 773 * Max column accesses (read and write) per row, before forefully 774 * closing it. 775 */ 776 const uint32_t maxAccessesPerRow; 777 778 /** 779 * Pipeline latency of the controller frontend. The frontend 780 * contribution is added to writes (that complete when they are in 781 * the write buffer) and reads that are serviced the write buffer. 782 */ 783 const Tick frontendLatency; 784 785 /** 786 * Pipeline latency of the backend and PHY. Along with the 787 * frontend contribution, this latency is added to reads serviced 788 * by the DRAM. 789 */ 790 const Tick backendLatency; 791 792 /** 793 * Till when has the main data bus been spoken for already? 794 */ 795 Tick busBusyUntil; 796 797 Tick prevArrival; 798 799 /** 800 * The soonest you have to start thinking about the next request 801 * is the longest access time that can occur before 802 * busBusyUntil. Assuming you need to precharge, open a new row, 803 * and access, it is tRP + tRCD + tCL. 804 */ 805 Tick nextReqTime; 806 807 // All statistics that the model needs to capture 808 Stats::Scalar readReqs; 809 Stats::Scalar writeReqs; 810 Stats::Scalar readBursts; 811 Stats::Scalar writeBursts; 812 Stats::Scalar bytesReadDRAM; 813 Stats::Scalar bytesReadWrQ; 814 Stats::Scalar bytesWritten; 815 Stats::Scalar bytesReadSys; 816 Stats::Scalar bytesWrittenSys; 817 Stats::Scalar servicedByWrQ; 818 Stats::Scalar mergedWrBursts; 819 Stats::Scalar neitherReadNorWrite; 820 Stats::Vector perBankRdBursts; 821 Stats::Vector perBankWrBursts; 822 Stats::Scalar numRdRetry; 823 Stats::Scalar numWrRetry; 824 Stats::Scalar totGap; 825 Stats::Vector readPktSize; 826 Stats::Vector writePktSize; 827 Stats::Vector rdQLenPdf; 828 Stats::Vector wrQLenPdf; 829 Stats::Histogram bytesPerActivate; 830 Stats::Histogram rdPerTurnAround; 831 Stats::Histogram wrPerTurnAround; 832 833 // Latencies summed over all requests 834 Stats::Scalar totQLat; 835 Stats::Scalar totMemAccLat; 836 Stats::Scalar totBusLat; 837 838 // Average latencies per request 839 Stats::Formula avgQLat; 840 Stats::Formula avgBusLat; 841 Stats::Formula avgMemAccLat; 842 843 // Average bandwidth 844 Stats::Formula avgRdBW; 845 Stats::Formula avgWrBW; 846 Stats::Formula avgRdBWSys; 847 Stats::Formula avgWrBWSys; 848 Stats::Formula peakBW; 849 Stats::Formula busUtil; 850 Stats::Formula busUtilRead; 851 Stats::Formula busUtilWrite; 852 853 // Average queue lengths 854 Stats::Average avgRdQLen; 855 Stats::Average avgWrQLen; 856 857 // Row hit count and rate 858 Stats::Scalar readRowHits; 859 Stats::Scalar writeRowHits; 860 Stats::Formula readRowHitRate; 861 Stats::Formula writeRowHitRate; 862 Stats::Formula avgGap; 863 864 // DRAM Power Calculation 865 Stats::Formula pageHitRate; 866 867 // Holds the value of the rank of burst issued 868 uint8_t activeRank; 869 870 // timestamp offset 871 uint64_t timeStampOffset; 872 873 /** 874 * Upstream caches need this packet until true is returned, so 875 * hold it for deletion until a subsequent call 876 */ 877 std::unique_ptr<Packet> pendingDelete; 878 879 /** 880 * This function increments the energy when called. If stats are 881 * dumped periodically, note accumulated energy values will 882 * appear in the stats (even if the stats are reset). This is a 883 * result of the energy values coming from DRAMPower, and there 884 * is currently no support for resetting the state. 885 * 886 * @param rank Currrent rank 887 */ 888 void updatePowerStats(Rank& rank_ref); 889 890 /** 891 * Function for sorting Command structures based on timeStamp 892 * 893 * @param a Memory Command 894 * @param next Memory Command 895 * @return true if timeStamp of Command 1 < timeStamp of Command 2 896 */ 897 static bool sortTime(const Command& cmd, const Command& cmd_next) { 898 return cmd.timeStamp < cmd_next.timeStamp; 899 }; 900 901 public: 902 903 void regStats() override; 904 905 DRAMCtrl(const DRAMCtrlParams* p); 906 907 DrainState drain() override; 908 909 virtual BaseSlavePort& getSlavePort(const std::string& if_name, 910 PortID idx = InvalidPortID) override; 911 912 virtual void init() override; 913 virtual void startup() override; 914 virtual void drainResume() override; 915 916 protected: 917 918 Tick recvAtomic(PacketPtr pkt); 919 void recvFunctional(PacketPtr pkt); 920 bool recvTimingReq(PacketPtr pkt); 921 922}; 923 924#endif //__MEM_DRAM_CTRL_HH__ 925