dram_ctrl.hh revision 11673
1/* 2 * Copyright (c) 2012-2015 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2013 Amin Farmahini-Farahani 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Andreas Hansson 41 * Ani Udipi 42 * Neha Agarwal 43 * Omar Naji 44 * Matthias Jung 45 */ 46 47/** 48 * @file 49 * DRAMCtrl declaration 50 */ 51 52#ifndef __MEM_DRAM_CTRL_HH__ 53#define __MEM_DRAM_CTRL_HH__ 54 55#include <deque> 56#include <string> 57#include <unordered_set> 58 59#include "base/statistics.hh" 60#include "enums/AddrMap.hh" 61#include "enums/MemSched.hh" 62#include "enums/PageManage.hh" 63#include "mem/abstract_mem.hh" 64#include "mem/qport.hh" 65#include "params/DRAMCtrl.hh" 66#include "sim/eventq.hh" 67#include "mem/drampower.hh" 68 69/** 70 * The DRAM controller is a single-channel memory controller capturing 71 * the most important timing constraints associated with a 72 * contemporary DRAM. For multi-channel memory systems, the controller 73 * is combined with a crossbar model, with the channel address 74 * interleaving taking part in the crossbar. 75 * 76 * As a basic design principle, this controller 77 * model is not cycle callable, but instead uses events to: 1) decide 78 * when new decisions can be made, 2) when resources become available, 79 * 3) when things are to be considered done, and 4) when to send 80 * things back. Through these simple principles, the model delivers 81 * high performance, and lots of flexibility, allowing users to 82 * evaluate the system impact of a wide range of memory technologies, 83 * such as DDR3/4, LPDDR2/3/4, WideIO1/2, HBM and HMC. 84 * 85 * For more details, please see Hansson et al, "Simulating DRAM 86 * controllers for future system architecture exploration", 87 * Proc. ISPASS, 2014. If you use this model as part of your research 88 * please cite the paper. 89 */ 90class DRAMCtrl : public AbstractMemory 91{ 92 93 private: 94 95 // For now, make use of a queued slave port to avoid dealing with 96 // flow control for the responses being sent back 97 class MemoryPort : public QueuedSlavePort 98 { 99 100 RespPacketQueue queue; 101 DRAMCtrl& memory; 102 103 public: 104 105 MemoryPort(const std::string& name, DRAMCtrl& _memory); 106 107 protected: 108 109 Tick recvAtomic(PacketPtr pkt); 110 111 void recvFunctional(PacketPtr pkt); 112 113 bool recvTimingReq(PacketPtr); 114 115 virtual AddrRangeList getAddrRanges() const; 116 117 }; 118 119 /** 120 * Our incoming port, for a multi-ported controller add a crossbar 121 * in front of it 122 */ 123 MemoryPort port; 124 125 /** 126 * Remeber if the memory system is in timing mode 127 */ 128 bool isTimingMode; 129 130 /** 131 * Remember if we have to retry a request when available. 132 */ 133 bool retryRdReq; 134 bool retryWrReq; 135 136 /** 137 * Bus state used to control the read/write switching and drive 138 * the scheduling of the next request. 139 */ 140 enum BusState { 141 READ = 0, 142 READ_TO_WRITE, 143 WRITE, 144 WRITE_TO_READ 145 }; 146 147 BusState busState; 148 149 /** 150 * A basic class to track the bank state, i.e. what row is 151 * currently open (if any), when is the bank free to accept a new 152 * column (read/write) command, when can it be precharged, and 153 * when can it be activated. 154 * 155 * The bank also keeps track of how many bytes have been accessed 156 * in the open row since it was opened. 157 */ 158 class Bank 159 { 160 161 public: 162 163 static const uint32_t NO_ROW = -1; 164 165 uint32_t openRow; 166 uint8_t bank; 167 uint8_t bankgr; 168 169 Tick colAllowedAt; 170 Tick preAllowedAt; 171 Tick actAllowedAt; 172 173 uint32_t rowAccesses; 174 uint32_t bytesAccessed; 175 176 Bank() : 177 openRow(NO_ROW), bank(0), bankgr(0), 178 colAllowedAt(0), preAllowedAt(0), actAllowedAt(0), 179 rowAccesses(0), bytesAccessed(0) 180 { } 181 }; 182 183 184 /** 185 * Rank class includes a vector of banks. Refresh and Power state 186 * machines are defined per rank. Events required to change the 187 * state of the refresh and power state machine are scheduled per 188 * rank. This class allows the implementation of rank-wise refresh 189 * and rank-wise power-down. 190 */ 191 class Rank : public EventManager 192 { 193 194 private: 195 196 /** 197 * The power state captures the different operational states of 198 * the DRAM and interacts with the bus read/write state machine, 199 * and the refresh state machine. In the idle state all banks are 200 * precharged. From there we either go to an auto refresh (as 201 * determined by the refresh state machine), or to a precharge 202 * power down mode. From idle the memory can also go to the active 203 * state (with one or more banks active), and in turn from there 204 * to active power down. At the moment we do not capture the deep 205 * power down and self-refresh state. 206 */ 207 enum PowerState { 208 PWR_IDLE = 0, 209 PWR_REF, 210 PWR_PRE_PDN, 211 PWR_ACT, 212 PWR_ACT_PDN 213 }; 214 215 /** 216 * The refresh state is used to control the progress of the 217 * refresh scheduling. When normal operation is in progress the 218 * refresh state is idle. From there, it progresses to the refresh 219 * drain state once tREFI has passed. The refresh drain state 220 * captures the DRAM row active state, as it will stay there until 221 * all ongoing accesses complete. Thereafter all banks are 222 * precharged, and lastly, the DRAM is refreshed. 223 */ 224 enum RefreshState { 225 REF_IDLE = 0, 226 REF_DRAIN, 227 REF_PRE, 228 REF_RUN 229 }; 230 231 /** 232 * A reference to the parent DRAMCtrl instance 233 */ 234 DRAMCtrl& memory; 235 236 /** 237 * Since we are taking decisions out of order, we need to keep 238 * track of what power transition is happening at what time, such 239 * that we can go back in time and change history. For example, if 240 * we precharge all banks and schedule going to the idle state, we 241 * might at a later point decide to activate a bank before the 242 * transition to idle would have taken place. 243 */ 244 PowerState pwrStateTrans; 245 246 /** 247 * Current power state. 248 */ 249 PowerState pwrState; 250 251 /** 252 * Track when we transitioned to the current power state 253 */ 254 Tick pwrStateTick; 255 256 /** 257 * current refresh state 258 */ 259 RefreshState refreshState; 260 261 /** 262 * Keep track of when a refresh is due. 263 */ 264 Tick refreshDueAt; 265 266 /* 267 * Command energies 268 */ 269 Stats::Scalar actEnergy; 270 Stats::Scalar preEnergy; 271 Stats::Scalar readEnergy; 272 Stats::Scalar writeEnergy; 273 Stats::Scalar refreshEnergy; 274 275 /* 276 * Active Background Energy 277 */ 278 Stats::Scalar actBackEnergy; 279 280 /* 281 * Precharge Background Energy 282 */ 283 Stats::Scalar preBackEnergy; 284 285 Stats::Scalar totalEnergy; 286 Stats::Scalar averagePower; 287 288 /** 289 * Track time spent in each power state. 290 */ 291 Stats::Vector pwrStateTime; 292 293 /** 294 * Function to update Power Stats 295 */ 296 void updatePowerStats(); 297 298 /** 299 * Schedule a power state transition in the future, and 300 * potentially override an already scheduled transition. 301 * 302 * @param pwr_state Power state to transition to 303 * @param tick Tick when transition should take place 304 */ 305 void schedulePowerEvent(PowerState pwr_state, Tick tick); 306 307 public: 308 309 /** 310 * Current Rank index 311 */ 312 uint8_t rank; 313 314 /** 315 * One DRAMPower instance per rank 316 */ 317 DRAMPower power; 318 319 /** 320 * Vector of Banks. Each rank is made of several devices which in 321 * term are made from several banks. 322 */ 323 std::vector<Bank> banks; 324 325 /** 326 * To track number of banks which are currently active for 327 * this rank. 328 */ 329 unsigned int numBanksActive; 330 331 /** List to keep track of activate ticks */ 332 std::deque<Tick> actTicks; 333 334 Rank(DRAMCtrl& _memory, const DRAMCtrlParams* _p); 335 336 const std::string name() const 337 { 338 return csprintf("%s_%d", memory.name(), rank); 339 } 340 341 /** 342 * Kick off accounting for power and refresh states and 343 * schedule initial refresh. 344 * 345 * @param ref_tick Tick for first refresh 346 */ 347 void startup(Tick ref_tick); 348 349 /** 350 * Stop the refresh events. 351 */ 352 void suspend(); 353 354 /** 355 * Check if the current rank is available for scheduling. 356 * 357 * @param Return true if the rank is idle from a refresh point of view 358 */ 359 bool isAvailable() const { return refreshState == REF_IDLE; } 360 361 /** 362 * Let the rank check if it was waiting for requests to drain 363 * to allow it to transition states. 364 */ 365 void checkDrainDone(); 366 367 /* 368 * Function to register Stats 369 */ 370 void regStats(); 371 372 void processActivateEvent(); 373 EventWrapper<Rank, &Rank::processActivateEvent> 374 activateEvent; 375 376 void processPrechargeEvent(); 377 EventWrapper<Rank, &Rank::processPrechargeEvent> 378 prechargeEvent; 379 380 void processRefreshEvent(); 381 EventWrapper<Rank, &Rank::processRefreshEvent> 382 refreshEvent; 383 384 void processPowerEvent(); 385 EventWrapper<Rank, &Rank::processPowerEvent> 386 powerEvent; 387 388 }; 389 390 /** 391 * A burst helper helps organize and manage a packet that is larger than 392 * the DRAM burst size. A system packet that is larger than the burst size 393 * is split into multiple DRAM packets and all those DRAM packets point to 394 * a single burst helper such that we know when the whole packet is served. 395 */ 396 class BurstHelper { 397 398 public: 399 400 /** Number of DRAM bursts requred for a system packet **/ 401 const unsigned int burstCount; 402 403 /** Number of DRAM bursts serviced so far for a system packet **/ 404 unsigned int burstsServiced; 405 406 BurstHelper(unsigned int _burstCount) 407 : burstCount(_burstCount), burstsServiced(0) 408 { } 409 }; 410 411 /** 412 * A DRAM packet stores packets along with the timestamp of when 413 * the packet entered the queue, and also the decoded address. 414 */ 415 class DRAMPacket { 416 417 public: 418 419 /** When did request enter the controller */ 420 const Tick entryTime; 421 422 /** When will request leave the controller */ 423 Tick readyTime; 424 425 /** This comes from the outside world */ 426 const PacketPtr pkt; 427 428 const bool isRead; 429 430 /** Will be populated by address decoder */ 431 const uint8_t rank; 432 const uint8_t bank; 433 const uint32_t row; 434 435 /** 436 * Bank id is calculated considering banks in all the ranks 437 * eg: 2 ranks each with 8 banks, then bankId = 0 --> rank0, bank0 and 438 * bankId = 8 --> rank1, bank0 439 */ 440 const uint16_t bankId; 441 442 /** 443 * The starting address of the DRAM packet. 444 * This address could be unaligned to burst size boundaries. The 445 * reason is to keep the address offset so we can accurately check 446 * incoming read packets with packets in the write queue. 447 */ 448 Addr addr; 449 450 /** 451 * The size of this dram packet in bytes 452 * It is always equal or smaller than DRAM burst size 453 */ 454 unsigned int size; 455 456 /** 457 * A pointer to the BurstHelper if this DRAMPacket is a split packet 458 * If not a split packet (common case), this is set to NULL 459 */ 460 BurstHelper* burstHelper; 461 Bank& bankRef; 462 Rank& rankRef; 463 464 DRAMPacket(PacketPtr _pkt, bool is_read, uint8_t _rank, uint8_t _bank, 465 uint32_t _row, uint16_t bank_id, Addr _addr, 466 unsigned int _size, Bank& bank_ref, Rank& rank_ref) 467 : entryTime(curTick()), readyTime(curTick()), 468 pkt(_pkt), isRead(is_read), rank(_rank), bank(_bank), row(_row), 469 bankId(bank_id), addr(_addr), size(_size), burstHelper(NULL), 470 bankRef(bank_ref), rankRef(rank_ref) 471 { } 472 473 }; 474 475 /** 476 * Bunch of things requires to setup "events" in gem5 477 * When event "respondEvent" occurs for example, the method 478 * processRespondEvent is called; no parameters are allowed 479 * in these methods 480 */ 481 void processNextReqEvent(); 482 EventWrapper<DRAMCtrl,&DRAMCtrl::processNextReqEvent> nextReqEvent; 483 484 void processRespondEvent(); 485 EventWrapper<DRAMCtrl, &DRAMCtrl::processRespondEvent> respondEvent; 486 487 /** 488 * Check if the read queue has room for more entries 489 * 490 * @param pktCount The number of entries needed in the read queue 491 * @return true if read queue is full, false otherwise 492 */ 493 bool readQueueFull(unsigned int pktCount) const; 494 495 /** 496 * Check if the write queue has room for more entries 497 * 498 * @param pktCount The number of entries needed in the write queue 499 * @return true if write queue is full, false otherwise 500 */ 501 bool writeQueueFull(unsigned int pktCount) const; 502 503 /** 504 * When a new read comes in, first check if the write q has a 505 * pending request to the same address.\ If not, decode the 506 * address to populate rank/bank/row, create one or mutliple 507 * "dram_pkt", and push them to the back of the read queue.\ 508 * If this is the only 509 * read request in the system, schedule an event to start 510 * servicing it. 511 * 512 * @param pkt The request packet from the outside world 513 * @param pktCount The number of DRAM bursts the pkt 514 * translate to. If pkt size is larger then one full burst, 515 * then pktCount is greater than one. 516 */ 517 void addToReadQueue(PacketPtr pkt, unsigned int pktCount); 518 519 /** 520 * Decode the incoming pkt, create a dram_pkt and push to the 521 * back of the write queue. \If the write q length is more than 522 * the threshold specified by the user, ie the queue is beginning 523 * to get full, stop reads, and start draining writes. 524 * 525 * @param pkt The request packet from the outside world 526 * @param pktCount The number of DRAM bursts the pkt 527 * translate to. If pkt size is larger then one full burst, 528 * then pktCount is greater than one. 529 */ 530 void addToWriteQueue(PacketPtr pkt, unsigned int pktCount); 531 532 /** 533 * Actually do the DRAM access - figure out the latency it 534 * will take to service the req based on bank state, channel state etc 535 * and then update those states to account for this request.\ Based 536 * on this, update the packet's "readyTime" and move it to the 537 * response q from where it will eventually go back to the outside 538 * world. 539 * 540 * @param pkt The DRAM packet created from the outside world pkt 541 */ 542 void doDRAMAccess(DRAMPacket* dram_pkt); 543 544 /** 545 * When a packet reaches its "readyTime" in the response Q, 546 * use the "access()" method in AbstractMemory to actually 547 * create the response packet, and send it back to the outside 548 * world requestor. 549 * 550 * @param pkt The packet from the outside world 551 * @param static_latency Static latency to add before sending the packet 552 */ 553 void accessAndRespond(PacketPtr pkt, Tick static_latency); 554 555 /** 556 * Address decoder to figure out physical mapping onto ranks, 557 * banks, and rows. This function is called multiple times on the same 558 * system packet if the pakcet is larger than burst of the memory. The 559 * dramPktAddr is used for the offset within the packet. 560 * 561 * @param pkt The packet from the outside world 562 * @param dramPktAddr The starting address of the DRAM packet 563 * @param size The size of the DRAM packet in bytes 564 * @param isRead Is the request for a read or a write to DRAM 565 * @return A DRAMPacket pointer with the decoded information 566 */ 567 DRAMPacket* decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned int size, 568 bool isRead); 569 570 /** 571 * The memory schduler/arbiter - picks which request needs to 572 * go next, based on the specified policy such as FCFS or FR-FCFS 573 * and moves it to the head of the queue. 574 * Prioritizes accesses to the same rank as previous burst unless 575 * controller is switching command type. 576 * 577 * @param queue Queued requests to consider 578 * @param extra_col_delay Any extra delay due to a read/write switch 579 * @return true if a packet is scheduled to a rank which is available else 580 * false 581 */ 582 bool chooseNext(std::deque<DRAMPacket*>& queue, Tick extra_col_delay); 583 584 /** 585 * For FR-FCFS policy reorder the read/write queue depending on row buffer 586 * hits and earliest bursts available in DRAM 587 * 588 * @param queue Queued requests to consider 589 * @param extra_col_delay Any extra delay due to a read/write switch 590 * @return true if a packet is scheduled to a rank which is available else 591 * false 592 */ 593 bool reorderQueue(std::deque<DRAMPacket*>& queue, Tick extra_col_delay); 594 595 /** 596 * Find which are the earliest banks ready to issue an activate 597 * for the enqueued requests. Assumes maximum of 64 banks per DIMM 598 * Also checks if the bank is already prepped. 599 * 600 * @param queue Queued requests to consider 601 * @param time of seamless burst command 602 * @return One-hot encoded mask of bank indices 603 * @return boolean indicating burst can issue seamlessly, with no gaps 604 */ 605 std::pair<uint64_t, bool> minBankPrep(const std::deque<DRAMPacket*>& queue, 606 Tick min_col_at) const; 607 608 /** 609 * Keep track of when row activations happen, in order to enforce 610 * the maximum number of activations in the activation window. The 611 * method updates the time that the banks become available based 612 * on the current limits. 613 * 614 * @param rank_ref Reference to the rank 615 * @param bank_ref Reference to the bank 616 * @param act_tick Time when the activation takes place 617 * @param row Index of the row 618 */ 619 void activateBank(Rank& rank_ref, Bank& bank_ref, Tick act_tick, 620 uint32_t row); 621 622 /** 623 * Precharge a given bank and also update when the precharge is 624 * done. This will also deal with any stats related to the 625 * accesses to the open page. 626 * 627 * @param rank_ref The rank to precharge 628 * @param bank_ref The bank to precharge 629 * @param pre_at Time when the precharge takes place 630 * @param trace Is this an auto precharge then do not add to trace 631 */ 632 void prechargeBank(Rank& rank_ref, Bank& bank_ref, 633 Tick pre_at, bool trace = true); 634 635 /** 636 * Used for debugging to observe the contents of the queues. 637 */ 638 void printQs() const; 639 640 /** 641 * Burst-align an address. 642 * 643 * @param addr The potentially unaligned address 644 * 645 * @return An address aligned to a DRAM burst 646 */ 647 Addr burstAlign(Addr addr) const { return (addr & ~(Addr(burstSize - 1))); } 648 649 /** 650 * The controller's main read and write queues 651 */ 652 std::deque<DRAMPacket*> readQueue; 653 std::deque<DRAMPacket*> writeQueue; 654 655 /** 656 * To avoid iterating over the write queue to check for 657 * overlapping transactions, maintain a set of burst addresses 658 * that are currently queued. Since we merge writes to the same 659 * location we never have more than one address to the same burst 660 * address. 661 */ 662 std::unordered_set<Addr> isInWriteQueue; 663 664 /** 665 * Response queue where read packets wait after we're done working 666 * with them, but it's not time to send the response yet. The 667 * responses are stored seperately mostly to keep the code clean 668 * and help with events scheduling. For all logical purposes such 669 * as sizing the read queue, this and the main read queue need to 670 * be added together. 671 */ 672 std::deque<DRAMPacket*> respQueue; 673 674 /** 675 * Vector of ranks 676 */ 677 std::vector<Rank*> ranks; 678 679 /** 680 * The following are basic design parameters of the memory 681 * controller, and are initialized based on parameter values. 682 * The rowsPerBank is determined based on the capacity, number of 683 * ranks and banks, the burst size, and the row buffer size. 684 */ 685 const uint32_t deviceSize; 686 const uint32_t deviceBusWidth; 687 const uint32_t burstLength; 688 const uint32_t deviceRowBufferSize; 689 const uint32_t devicesPerRank; 690 const uint32_t burstSize; 691 const uint32_t rowBufferSize; 692 const uint32_t columnsPerRowBuffer; 693 const uint32_t columnsPerStripe; 694 const uint32_t ranksPerChannel; 695 const uint32_t bankGroupsPerRank; 696 const bool bankGroupArch; 697 const uint32_t banksPerRank; 698 const uint32_t channels; 699 uint32_t rowsPerBank; 700 const uint32_t readBufferSize; 701 const uint32_t writeBufferSize; 702 const uint32_t writeHighThreshold; 703 const uint32_t writeLowThreshold; 704 const uint32_t minWritesPerSwitch; 705 uint32_t writesThisTime; 706 uint32_t readsThisTime; 707 708 /** 709 * Basic memory timing parameters initialized based on parameter 710 * values. 711 */ 712 const Tick M5_CLASS_VAR_USED tCK; 713 const Tick tWTR; 714 const Tick tRTW; 715 const Tick tCS; 716 const Tick tBURST; 717 const Tick tCCD_L; 718 const Tick tRCD; 719 const Tick tCL; 720 const Tick tRP; 721 const Tick tRAS; 722 const Tick tWR; 723 const Tick tRTP; 724 const Tick tRFC; 725 const Tick tREFI; 726 const Tick tRRD; 727 const Tick tRRD_L; 728 const Tick tXAW; 729 const Tick tXP; 730 const Tick tXS; 731 const uint32_t activationLimit; 732 733 /** 734 * Memory controller configuration initialized based on parameter 735 * values. 736 */ 737 Enums::MemSched memSchedPolicy; 738 Enums::AddrMap addrMapping; 739 Enums::PageManage pageMgmt; 740 741 /** 742 * Max column accesses (read and write) per row, before forefully 743 * closing it. 744 */ 745 const uint32_t maxAccessesPerRow; 746 747 /** 748 * Pipeline latency of the controller frontend. The frontend 749 * contribution is added to writes (that complete when they are in 750 * the write buffer) and reads that are serviced the write buffer. 751 */ 752 const Tick frontendLatency; 753 754 /** 755 * Pipeline latency of the backend and PHY. Along with the 756 * frontend contribution, this latency is added to reads serviced 757 * by the DRAM. 758 */ 759 const Tick backendLatency; 760 761 /** 762 * Till when has the main data bus been spoken for already? 763 */ 764 Tick busBusyUntil; 765 766 Tick prevArrival; 767 768 /** 769 * The soonest you have to start thinking about the next request 770 * is the longest access time that can occur before 771 * busBusyUntil. Assuming you need to precharge, open a new row, 772 * and access, it is tRP + tRCD + tCL. 773 */ 774 Tick nextReqTime; 775 776 // All statistics that the model needs to capture 777 Stats::Scalar readReqs; 778 Stats::Scalar writeReqs; 779 Stats::Scalar readBursts; 780 Stats::Scalar writeBursts; 781 Stats::Scalar bytesReadDRAM; 782 Stats::Scalar bytesReadWrQ; 783 Stats::Scalar bytesWritten; 784 Stats::Scalar bytesReadSys; 785 Stats::Scalar bytesWrittenSys; 786 Stats::Scalar servicedByWrQ; 787 Stats::Scalar mergedWrBursts; 788 Stats::Scalar neitherReadNorWrite; 789 Stats::Vector perBankRdBursts; 790 Stats::Vector perBankWrBursts; 791 Stats::Scalar numRdRetry; 792 Stats::Scalar numWrRetry; 793 Stats::Scalar totGap; 794 Stats::Vector readPktSize; 795 Stats::Vector writePktSize; 796 Stats::Vector rdQLenPdf; 797 Stats::Vector wrQLenPdf; 798 Stats::Histogram bytesPerActivate; 799 Stats::Histogram rdPerTurnAround; 800 Stats::Histogram wrPerTurnAround; 801 802 // Latencies summed over all requests 803 Stats::Scalar totQLat; 804 Stats::Scalar totMemAccLat; 805 Stats::Scalar totBusLat; 806 807 // Average latencies per request 808 Stats::Formula avgQLat; 809 Stats::Formula avgBusLat; 810 Stats::Formula avgMemAccLat; 811 812 // Average bandwidth 813 Stats::Formula avgRdBW; 814 Stats::Formula avgWrBW; 815 Stats::Formula avgRdBWSys; 816 Stats::Formula avgWrBWSys; 817 Stats::Formula peakBW; 818 Stats::Formula busUtil; 819 Stats::Formula busUtilRead; 820 Stats::Formula busUtilWrite; 821 822 // Average queue lengths 823 Stats::Average avgRdQLen; 824 Stats::Average avgWrQLen; 825 826 // Row hit count and rate 827 Stats::Scalar readRowHits; 828 Stats::Scalar writeRowHits; 829 Stats::Formula readRowHitRate; 830 Stats::Formula writeRowHitRate; 831 Stats::Formula avgGap; 832 833 // DRAM Power Calculation 834 Stats::Formula pageHitRate; 835 836 // Holds the value of the rank of burst issued 837 uint8_t activeRank; 838 839 // timestamp offset 840 uint64_t timeStampOffset; 841 842 /** 843 * Upstream caches need this packet until true is returned, so 844 * hold it for deletion until a subsequent call 845 */ 846 std::unique_ptr<Packet> pendingDelete; 847 848 /** 849 * This function increments the energy when called. If stats are 850 * dumped periodically, note accumulated energy values will 851 * appear in the stats (even if the stats are reset). This is a 852 * result of the energy values coming from DRAMPower, and there 853 * is currently no support for resetting the state. 854 * 855 * @param rank Currrent rank 856 */ 857 void updatePowerStats(Rank& rank_ref); 858 859 /** 860 * Function for sorting commands in the command list of DRAMPower. 861 * 862 * @param a Memory Command in command list of DRAMPower library 863 * @param next Memory Command in command list of DRAMPower 864 * @return true if timestamp of Command 1 < timestamp of Command 2 865 */ 866 static bool sortTime(const Data::MemCommand& m1, 867 const Data::MemCommand& m2) { 868 return m1.getTimeInt64() < m2.getTimeInt64(); 869 }; 870 871 872 public: 873 874 void regStats() override; 875 876 DRAMCtrl(const DRAMCtrlParams* p); 877 878 DrainState drain() override; 879 880 virtual BaseSlavePort& getSlavePort(const std::string& if_name, 881 PortID idx = InvalidPortID) override; 882 883 virtual void init() override; 884 virtual void startup() override; 885 virtual void drainResume() override; 886 887 protected: 888 889 Tick recvAtomic(PacketPtr pkt); 890 void recvFunctional(PacketPtr pkt); 891 bool recvTimingReq(PacketPtr pkt); 892 893}; 894 895#endif //__MEM_DRAM_CTRL_HH__ 896