dram_ctrl.hh revision 11190:0964165d1857
1/* 2 * Copyright (c) 2012-2015 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2013 Amin Farmahini-Farahani 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Andreas Hansson 41 * Ani Udipi 42 * Neha Agarwal 43 * Omar Naji 44 */ 45 46/** 47 * @file 48 * DRAMCtrl declaration 49 */ 50 51#ifndef __MEM_DRAM_CTRL_HH__ 52#define __MEM_DRAM_CTRL_HH__ 53 54#include <deque> 55#include <string> 56#include <unordered_set> 57 58#include "base/statistics.hh" 59#include "enums/AddrMap.hh" 60#include "enums/MemSched.hh" 61#include "enums/PageManage.hh" 62#include "mem/abstract_mem.hh" 63#include "mem/qport.hh" 64#include "params/DRAMCtrl.hh" 65#include "sim/eventq.hh" 66#include "mem/drampower.hh" 67 68/** 69 * The DRAM controller is a single-channel memory controller capturing 70 * the most important timing constraints associated with a 71 * contemporary DRAM. For multi-channel memory systems, the controller 72 * is combined with a crossbar model, with the channel address 73 * interleaving taking part in the crossbar. 74 * 75 * As a basic design principle, this controller 76 * model is not cycle callable, but instead uses events to: 1) decide 77 * when new decisions can be made, 2) when resources become available, 78 * 3) when things are to be considered done, and 4) when to send 79 * things back. Through these simple principles, the model delivers 80 * high performance, and lots of flexibility, allowing users to 81 * evaluate the system impact of a wide range of memory technologies, 82 * such as DDR3/4, LPDDR2/3/4, WideIO1/2, HBM and HMC. 83 * 84 * For more details, please see Hansson et al, "Simulating DRAM 85 * controllers for future system architecture exploration", 86 * Proc. ISPASS, 2014. If you use this model as part of your research 87 * please cite the paper. 88 */ 89class DRAMCtrl : public AbstractMemory 90{ 91 92 private: 93 94 // For now, make use of a queued slave port to avoid dealing with 95 // flow control for the responses being sent back 96 class MemoryPort : public QueuedSlavePort 97 { 98 99 RespPacketQueue queue; 100 DRAMCtrl& memory; 101 102 public: 103 104 MemoryPort(const std::string& name, DRAMCtrl& _memory); 105 106 protected: 107 108 Tick recvAtomic(PacketPtr pkt); 109 110 void recvFunctional(PacketPtr pkt); 111 112 bool recvTimingReq(PacketPtr); 113 114 virtual AddrRangeList getAddrRanges() const; 115 116 }; 117 118 /** 119 * Our incoming port, for a multi-ported controller add a crossbar 120 * in front of it 121 */ 122 MemoryPort port; 123 124 /** 125 * Remeber if the memory system is in timing mode 126 */ 127 bool isTimingMode; 128 129 /** 130 * Remember if we have to retry a request when available. 131 */ 132 bool retryRdReq; 133 bool retryWrReq; 134 135 /** 136 * Bus state used to control the read/write switching and drive 137 * the scheduling of the next request. 138 */ 139 enum BusState { 140 READ = 0, 141 READ_TO_WRITE, 142 WRITE, 143 WRITE_TO_READ 144 }; 145 146 BusState busState; 147 148 /** 149 * A basic class to track the bank state, i.e. what row is 150 * currently open (if any), when is the bank free to accept a new 151 * column (read/write) command, when can it be precharged, and 152 * when can it be activated. 153 * 154 * The bank also keeps track of how many bytes have been accessed 155 * in the open row since it was opened. 156 */ 157 class Bank 158 { 159 160 public: 161 162 static const uint32_t NO_ROW = -1; 163 164 uint32_t openRow; 165 uint8_t bank; 166 uint8_t bankgr; 167 168 Tick colAllowedAt; 169 Tick preAllowedAt; 170 Tick actAllowedAt; 171 172 uint32_t rowAccesses; 173 uint32_t bytesAccessed; 174 175 Bank() : 176 openRow(NO_ROW), bank(0), bankgr(0), 177 colAllowedAt(0), preAllowedAt(0), actAllowedAt(0), 178 rowAccesses(0), bytesAccessed(0) 179 { } 180 }; 181 182 183 /** 184 * Rank class includes a vector of banks. Refresh and Power state 185 * machines are defined per rank. Events required to change the 186 * state of the refresh and power state machine are scheduled per 187 * rank. This class allows the implementation of rank-wise refresh 188 * and rank-wise power-down. 189 */ 190 class Rank : public EventManager 191 { 192 193 private: 194 195 /** 196 * The power state captures the different operational states of 197 * the DRAM and interacts with the bus read/write state machine, 198 * and the refresh state machine. In the idle state all banks are 199 * precharged. From there we either go to an auto refresh (as 200 * determined by the refresh state machine), or to a precharge 201 * power down mode. From idle the memory can also go to the active 202 * state (with one or more banks active), and in turn from there 203 * to active power down. At the moment we do not capture the deep 204 * power down and self-refresh state. 205 */ 206 enum PowerState { 207 PWR_IDLE = 0, 208 PWR_REF, 209 PWR_PRE_PDN, 210 PWR_ACT, 211 PWR_ACT_PDN 212 }; 213 214 /** 215 * The refresh state is used to control the progress of the 216 * refresh scheduling. When normal operation is in progress the 217 * refresh state is idle. From there, it progresses to the refresh 218 * drain state once tREFI has passed. The refresh drain state 219 * captures the DRAM row active state, as it will stay there until 220 * all ongoing accesses complete. Thereafter all banks are 221 * precharged, and lastly, the DRAM is refreshed. 222 */ 223 enum RefreshState { 224 REF_IDLE = 0, 225 REF_DRAIN, 226 REF_PRE, 227 REF_RUN 228 }; 229 230 /** 231 * A reference to the parent DRAMCtrl instance 232 */ 233 DRAMCtrl& memory; 234 235 /** 236 * Since we are taking decisions out of order, we need to keep 237 * track of what power transition is happening at what time, such 238 * that we can go back in time and change history. For example, if 239 * we precharge all banks and schedule going to the idle state, we 240 * might at a later point decide to activate a bank before the 241 * transition to idle would have taken place. 242 */ 243 PowerState pwrStateTrans; 244 245 /** 246 * Current power state. 247 */ 248 PowerState pwrState; 249 250 /** 251 * Track when we transitioned to the current power state 252 */ 253 Tick pwrStateTick; 254 255 /** 256 * current refresh state 257 */ 258 RefreshState refreshState; 259 260 /** 261 * Keep track of when a refresh is due. 262 */ 263 Tick refreshDueAt; 264 265 /* 266 * Command energies 267 */ 268 Stats::Scalar actEnergy; 269 Stats::Scalar preEnergy; 270 Stats::Scalar readEnergy; 271 Stats::Scalar writeEnergy; 272 Stats::Scalar refreshEnergy; 273 274 /* 275 * Active Background Energy 276 */ 277 Stats::Scalar actBackEnergy; 278 279 /* 280 * Precharge Background Energy 281 */ 282 Stats::Scalar preBackEnergy; 283 284 Stats::Scalar totalEnergy; 285 Stats::Scalar averagePower; 286 287 /** 288 * Track time spent in each power state. 289 */ 290 Stats::Vector pwrStateTime; 291 292 /** 293 * Function to update Power Stats 294 */ 295 void updatePowerStats(); 296 297 /** 298 * Schedule a power state transition in the future, and 299 * potentially override an already scheduled transition. 300 * 301 * @param pwr_state Power state to transition to 302 * @param tick Tick when transition should take place 303 */ 304 void schedulePowerEvent(PowerState pwr_state, Tick tick); 305 306 public: 307 308 /** 309 * Current Rank index 310 */ 311 uint8_t rank; 312 313 /** 314 * One DRAMPower instance per rank 315 */ 316 DRAMPower power; 317 318 /** 319 * Vector of Banks. Each rank is made of several devices which in 320 * term are made from several banks. 321 */ 322 std::vector<Bank> banks; 323 324 /** 325 * To track number of banks which are currently active for 326 * this rank. 327 */ 328 unsigned int numBanksActive; 329 330 /** List to keep track of activate ticks */ 331 std::deque<Tick> actTicks; 332 333 Rank(DRAMCtrl& _memory, const DRAMCtrlParams* _p); 334 335 const std::string name() const 336 { 337 return csprintf("%s_%d", memory.name(), rank); 338 } 339 340 /** 341 * Kick off accounting for power and refresh states and 342 * schedule initial refresh. 343 * 344 * @param ref_tick Tick for first refresh 345 */ 346 void startup(Tick ref_tick); 347 348 /** 349 * Stop the refresh events. 350 */ 351 void suspend(); 352 353 /** 354 * Check if the current rank is available for scheduling. 355 * 356 * @param Return true if the rank is idle from a refresh point of view 357 */ 358 bool isAvailable() const { return refreshState == REF_IDLE; } 359 360 /** 361 * Let the rank check if it was waiting for requests to drain 362 * to allow it to transition states. 363 */ 364 void checkDrainDone(); 365 366 /* 367 * Function to register Stats 368 */ 369 void regStats(); 370 371 void processActivateEvent(); 372 EventWrapper<Rank, &Rank::processActivateEvent> 373 activateEvent; 374 375 void processPrechargeEvent(); 376 EventWrapper<Rank, &Rank::processPrechargeEvent> 377 prechargeEvent; 378 379 void processRefreshEvent(); 380 EventWrapper<Rank, &Rank::processRefreshEvent> 381 refreshEvent; 382 383 void processPowerEvent(); 384 EventWrapper<Rank, &Rank::processPowerEvent> 385 powerEvent; 386 387 }; 388 389 /** 390 * A burst helper helps organize and manage a packet that is larger than 391 * the DRAM burst size. A system packet that is larger than the burst size 392 * is split into multiple DRAM packets and all those DRAM packets point to 393 * a single burst helper such that we know when the whole packet is served. 394 */ 395 class BurstHelper { 396 397 public: 398 399 /** Number of DRAM bursts requred for a system packet **/ 400 const unsigned int burstCount; 401 402 /** Number of DRAM bursts serviced so far for a system packet **/ 403 unsigned int burstsServiced; 404 405 BurstHelper(unsigned int _burstCount) 406 : burstCount(_burstCount), burstsServiced(0) 407 { } 408 }; 409 410 /** 411 * A DRAM packet stores packets along with the timestamp of when 412 * the packet entered the queue, and also the decoded address. 413 */ 414 class DRAMPacket { 415 416 public: 417 418 /** When did request enter the controller */ 419 const Tick entryTime; 420 421 /** When will request leave the controller */ 422 Tick readyTime; 423 424 /** This comes from the outside world */ 425 const PacketPtr pkt; 426 427 const bool isRead; 428 429 /** Will be populated by address decoder */ 430 const uint8_t rank; 431 const uint8_t bank; 432 const uint32_t row; 433 434 /** 435 * Bank id is calculated considering banks in all the ranks 436 * eg: 2 ranks each with 8 banks, then bankId = 0 --> rank0, bank0 and 437 * bankId = 8 --> rank1, bank0 438 */ 439 const uint16_t bankId; 440 441 /** 442 * The starting address of the DRAM packet. 443 * This address could be unaligned to burst size boundaries. The 444 * reason is to keep the address offset so we can accurately check 445 * incoming read packets with packets in the write queue. 446 */ 447 Addr addr; 448 449 /** 450 * The size of this dram packet in bytes 451 * It is always equal or smaller than DRAM burst size 452 */ 453 unsigned int size; 454 455 /** 456 * A pointer to the BurstHelper if this DRAMPacket is a split packet 457 * If not a split packet (common case), this is set to NULL 458 */ 459 BurstHelper* burstHelper; 460 Bank& bankRef; 461 Rank& rankRef; 462 463 DRAMPacket(PacketPtr _pkt, bool is_read, uint8_t _rank, uint8_t _bank, 464 uint32_t _row, uint16_t bank_id, Addr _addr, 465 unsigned int _size, Bank& bank_ref, Rank& rank_ref) 466 : entryTime(curTick()), readyTime(curTick()), 467 pkt(_pkt), isRead(is_read), rank(_rank), bank(_bank), row(_row), 468 bankId(bank_id), addr(_addr), size(_size), burstHelper(NULL), 469 bankRef(bank_ref), rankRef(rank_ref) 470 { } 471 472 }; 473 474 /** 475 * Bunch of things requires to setup "events" in gem5 476 * When event "respondEvent" occurs for example, the method 477 * processRespondEvent is called; no parameters are allowed 478 * in these methods 479 */ 480 void processNextReqEvent(); 481 EventWrapper<DRAMCtrl,&DRAMCtrl::processNextReqEvent> nextReqEvent; 482 483 void processRespondEvent(); 484 EventWrapper<DRAMCtrl, &DRAMCtrl::processRespondEvent> respondEvent; 485 486 /** 487 * Check if the read queue has room for more entries 488 * 489 * @param pktCount The number of entries needed in the read queue 490 * @return true if read queue is full, false otherwise 491 */ 492 bool readQueueFull(unsigned int pktCount) const; 493 494 /** 495 * Check if the write queue has room for more entries 496 * 497 * @param pktCount The number of entries needed in the write queue 498 * @return true if write queue is full, false otherwise 499 */ 500 bool writeQueueFull(unsigned int pktCount) const; 501 502 /** 503 * When a new read comes in, first check if the write q has a 504 * pending request to the same address.\ If not, decode the 505 * address to populate rank/bank/row, create one or mutliple 506 * "dram_pkt", and push them to the back of the read queue.\ 507 * If this is the only 508 * read request in the system, schedule an event to start 509 * servicing it. 510 * 511 * @param pkt The request packet from the outside world 512 * @param pktCount The number of DRAM bursts the pkt 513 * translate to. If pkt size is larger then one full burst, 514 * then pktCount is greater than one. 515 */ 516 void addToReadQueue(PacketPtr pkt, unsigned int pktCount); 517 518 /** 519 * Decode the incoming pkt, create a dram_pkt and push to the 520 * back of the write queue. \If the write q length is more than 521 * the threshold specified by the user, ie the queue is beginning 522 * to get full, stop reads, and start draining writes. 523 * 524 * @param pkt The request packet from the outside world 525 * @param pktCount The number of DRAM bursts the pkt 526 * translate to. If pkt size is larger then one full burst, 527 * then pktCount is greater than one. 528 */ 529 void addToWriteQueue(PacketPtr pkt, unsigned int pktCount); 530 531 /** 532 * Actually do the DRAM access - figure out the latency it 533 * will take to service the req based on bank state, channel state etc 534 * and then update those states to account for this request.\ Based 535 * on this, update the packet's "readyTime" and move it to the 536 * response q from where it will eventually go back to the outside 537 * world. 538 * 539 * @param pkt The DRAM packet created from the outside world pkt 540 */ 541 void doDRAMAccess(DRAMPacket* dram_pkt); 542 543 /** 544 * When a packet reaches its "readyTime" in the response Q, 545 * use the "access()" method in AbstractMemory to actually 546 * create the response packet, and send it back to the outside 547 * world requestor. 548 * 549 * @param pkt The packet from the outside world 550 * @param static_latency Static latency to add before sending the packet 551 */ 552 void accessAndRespond(PacketPtr pkt, Tick static_latency); 553 554 /** 555 * Address decoder to figure out physical mapping onto ranks, 556 * banks, and rows. This function is called multiple times on the same 557 * system packet if the pakcet is larger than burst of the memory. The 558 * dramPktAddr is used for the offset within the packet. 559 * 560 * @param pkt The packet from the outside world 561 * @param dramPktAddr The starting address of the DRAM packet 562 * @param size The size of the DRAM packet in bytes 563 * @param isRead Is the request for a read or a write to DRAM 564 * @return A DRAMPacket pointer with the decoded information 565 */ 566 DRAMPacket* decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned int size, 567 bool isRead); 568 569 /** 570 * The memory schduler/arbiter - picks which request needs to 571 * go next, based on the specified policy such as FCFS or FR-FCFS 572 * and moves it to the head of the queue. 573 * Prioritizes accesses to the same rank as previous burst unless 574 * controller is switching command type. 575 * 576 * @param queue Queued requests to consider 577 * @param extra_col_delay Any extra delay due to a read/write switch 578 * @return true if a packet is scheduled to a rank which is available else 579 * false 580 */ 581 bool chooseNext(std::deque<DRAMPacket*>& queue, Tick extra_col_delay); 582 583 /** 584 * For FR-FCFS policy reorder the read/write queue depending on row buffer 585 * hits and earliest bursts available in DRAM 586 * 587 * @param queue Queued requests to consider 588 * @param extra_col_delay Any extra delay due to a read/write switch 589 * @return true if a packet is scheduled to a rank which is available else 590 * false 591 */ 592 bool reorderQueue(std::deque<DRAMPacket*>& queue, Tick extra_col_delay); 593 594 /** 595 * Find which are the earliest banks ready to issue an activate 596 * for the enqueued requests. Assumes maximum of 64 banks per DIMM 597 * Also checks if the bank is already prepped. 598 * 599 * @param queue Queued requests to consider 600 * @param time of seamless burst command 601 * @return One-hot encoded mask of bank indices 602 * @return boolean indicating burst can issue seamlessly, with no gaps 603 */ 604 std::pair<uint64_t, bool> minBankPrep(const std::deque<DRAMPacket*>& queue, 605 Tick min_col_at) const; 606 607 /** 608 * Keep track of when row activations happen, in order to enforce 609 * the maximum number of activations in the activation window. The 610 * method updates the time that the banks become available based 611 * on the current limits. 612 * 613 * @param rank_ref Reference to the rank 614 * @param bank_ref Reference to the bank 615 * @param act_tick Time when the activation takes place 616 * @param row Index of the row 617 */ 618 void activateBank(Rank& rank_ref, Bank& bank_ref, Tick act_tick, 619 uint32_t row); 620 621 /** 622 * Precharge a given bank and also update when the precharge is 623 * done. This will also deal with any stats related to the 624 * accesses to the open page. 625 * 626 * @param rank_ref The rank to precharge 627 * @param bank_ref The bank to precharge 628 * @param pre_at Time when the precharge takes place 629 * @param trace Is this an auto precharge then do not add to trace 630 */ 631 void prechargeBank(Rank& rank_ref, Bank& bank_ref, 632 Tick pre_at, bool trace = true); 633 634 /** 635 * Used for debugging to observe the contents of the queues. 636 */ 637 void printQs() const; 638 639 /** 640 * Burst-align an address. 641 * 642 * @param addr The potentially unaligned address 643 * 644 * @return An address aligned to a DRAM burst 645 */ 646 Addr burstAlign(Addr addr) const { return (addr & ~(Addr(burstSize - 1))); } 647 648 /** 649 * The controller's main read and write queues 650 */ 651 std::deque<DRAMPacket*> readQueue; 652 std::deque<DRAMPacket*> writeQueue; 653 654 /** 655 * To avoid iterating over the write queue to check for 656 * overlapping transactions, maintain a set of burst addresses 657 * that are currently queued. Since we merge writes to the same 658 * location we never have more than one address to the same burst 659 * address. 660 */ 661 std::unordered_set<Addr> isInWriteQueue; 662 663 /** 664 * Response queue where read packets wait after we're done working 665 * with them, but it's not time to send the response yet. The 666 * responses are stored seperately mostly to keep the code clean 667 * and help with events scheduling. For all logical purposes such 668 * as sizing the read queue, this and the main read queue need to 669 * be added together. 670 */ 671 std::deque<DRAMPacket*> respQueue; 672 673 /** 674 * Vector of ranks 675 */ 676 std::vector<Rank*> ranks; 677 678 /** 679 * The following are basic design parameters of the memory 680 * controller, and are initialized based on parameter values. 681 * The rowsPerBank is determined based on the capacity, number of 682 * ranks and banks, the burst size, and the row buffer size. 683 */ 684 const uint32_t deviceSize; 685 const uint32_t deviceBusWidth; 686 const uint32_t burstLength; 687 const uint32_t deviceRowBufferSize; 688 const uint32_t devicesPerRank; 689 const uint32_t burstSize; 690 const uint32_t rowBufferSize; 691 const uint32_t columnsPerRowBuffer; 692 const uint32_t columnsPerStripe; 693 const uint32_t ranksPerChannel; 694 const uint32_t bankGroupsPerRank; 695 const bool bankGroupArch; 696 const uint32_t banksPerRank; 697 const uint32_t channels; 698 uint32_t rowsPerBank; 699 const uint32_t readBufferSize; 700 const uint32_t writeBufferSize; 701 const uint32_t writeHighThreshold; 702 const uint32_t writeLowThreshold; 703 const uint32_t minWritesPerSwitch; 704 uint32_t writesThisTime; 705 uint32_t readsThisTime; 706 707 /** 708 * Basic memory timing parameters initialized based on parameter 709 * values. 710 */ 711 const Tick M5_CLASS_VAR_USED tCK; 712 const Tick tWTR; 713 const Tick tRTW; 714 const Tick tCS; 715 const Tick tBURST; 716 const Tick tCCD_L; 717 const Tick tRCD; 718 const Tick tCL; 719 const Tick tRP; 720 const Tick tRAS; 721 const Tick tWR; 722 const Tick tRTP; 723 const Tick tRFC; 724 const Tick tREFI; 725 const Tick tRRD; 726 const Tick tRRD_L; 727 const Tick tXAW; 728 const uint32_t activationLimit; 729 730 /** 731 * Memory controller configuration initialized based on parameter 732 * values. 733 */ 734 Enums::MemSched memSchedPolicy; 735 Enums::AddrMap addrMapping; 736 Enums::PageManage pageMgmt; 737 738 /** 739 * Max column accesses (read and write) per row, before forefully 740 * closing it. 741 */ 742 const uint32_t maxAccessesPerRow; 743 744 /** 745 * Pipeline latency of the controller frontend. The frontend 746 * contribution is added to writes (that complete when they are in 747 * the write buffer) and reads that are serviced the write buffer. 748 */ 749 const Tick frontendLatency; 750 751 /** 752 * Pipeline latency of the backend and PHY. Along with the 753 * frontend contribution, this latency is added to reads serviced 754 * by the DRAM. 755 */ 756 const Tick backendLatency; 757 758 /** 759 * Till when has the main data bus been spoken for already? 760 */ 761 Tick busBusyUntil; 762 763 Tick prevArrival; 764 765 /** 766 * The soonest you have to start thinking about the next request 767 * is the longest access time that can occur before 768 * busBusyUntil. Assuming you need to precharge, open a new row, 769 * and access, it is tRP + tRCD + tCL. 770 */ 771 Tick nextReqTime; 772 773 // All statistics that the model needs to capture 774 Stats::Scalar readReqs; 775 Stats::Scalar writeReqs; 776 Stats::Scalar readBursts; 777 Stats::Scalar writeBursts; 778 Stats::Scalar bytesReadDRAM; 779 Stats::Scalar bytesReadWrQ; 780 Stats::Scalar bytesWritten; 781 Stats::Scalar bytesReadSys; 782 Stats::Scalar bytesWrittenSys; 783 Stats::Scalar servicedByWrQ; 784 Stats::Scalar mergedWrBursts; 785 Stats::Scalar neitherReadNorWrite; 786 Stats::Vector perBankRdBursts; 787 Stats::Vector perBankWrBursts; 788 Stats::Scalar numRdRetry; 789 Stats::Scalar numWrRetry; 790 Stats::Scalar totGap; 791 Stats::Vector readPktSize; 792 Stats::Vector writePktSize; 793 Stats::Vector rdQLenPdf; 794 Stats::Vector wrQLenPdf; 795 Stats::Histogram bytesPerActivate; 796 Stats::Histogram rdPerTurnAround; 797 Stats::Histogram wrPerTurnAround; 798 799 // Latencies summed over all requests 800 Stats::Scalar totQLat; 801 Stats::Scalar totMemAccLat; 802 Stats::Scalar totBusLat; 803 804 // Average latencies per request 805 Stats::Formula avgQLat; 806 Stats::Formula avgBusLat; 807 Stats::Formula avgMemAccLat; 808 809 // Average bandwidth 810 Stats::Formula avgRdBW; 811 Stats::Formula avgWrBW; 812 Stats::Formula avgRdBWSys; 813 Stats::Formula avgWrBWSys; 814 Stats::Formula peakBW; 815 Stats::Formula busUtil; 816 Stats::Formula busUtilRead; 817 Stats::Formula busUtilWrite; 818 819 // Average queue lengths 820 Stats::Average avgRdQLen; 821 Stats::Average avgWrQLen; 822 823 // Row hit count and rate 824 Stats::Scalar readRowHits; 825 Stats::Scalar writeRowHits; 826 Stats::Formula readRowHitRate; 827 Stats::Formula writeRowHitRate; 828 Stats::Formula avgGap; 829 830 // DRAM Power Calculation 831 Stats::Formula pageHitRate; 832 833 // Holds the value of the rank of burst issued 834 uint8_t activeRank; 835 836 // timestamp offset 837 uint64_t timeStampOffset; 838 839 /** 840 * Upstream caches need this packet until true is returned, so 841 * hold it for deletion until a subsequent call 842 */ 843 std::unique_ptr<Packet> pendingDelete; 844 845 /** 846 * This function increments the energy when called. If stats are 847 * dumped periodically, note accumulated energy values will 848 * appear in the stats (even if the stats are reset). This is a 849 * result of the energy values coming from DRAMPower, and there 850 * is currently no support for resetting the state. 851 * 852 * @param rank Currrent rank 853 */ 854 void updatePowerStats(Rank& rank_ref); 855 856 /** 857 * Function for sorting commands in the command list of DRAMPower. 858 * 859 * @param a Memory Command in command list of DRAMPower library 860 * @param next Memory Command in command list of DRAMPower 861 * @return true if timestamp of Command 1 < timestamp of Command 2 862 */ 863 static bool sortTime(const Data::MemCommand& m1, 864 const Data::MemCommand& m2) { 865 return m1.getTime() < m2.getTime(); 866 }; 867 868 869 public: 870 871 void regStats() override; 872 873 DRAMCtrl(const DRAMCtrlParams* p); 874 875 DrainState drain() override; 876 877 virtual BaseSlavePort& getSlavePort(const std::string& if_name, 878 PortID idx = InvalidPortID) override; 879 880 virtual void init() override; 881 virtual void startup() override; 882 virtual void drainResume() override; 883 884 protected: 885 886 Tick recvAtomic(PacketPtr pkt); 887 void recvFunctional(PacketPtr pkt); 888 bool recvTimingReq(PacketPtr pkt); 889 890}; 891 892#endif //__MEM_DRAM_CTRL_HH__ 893