dram_ctrl.hh revision 10489:99d59caa4c8f
1/* 2 * Copyright (c) 2012-2014 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2013 Amin Farmahini-Farahani 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Andreas Hansson 41 * Ani Udipi 42 * Neha Agarwal 43 */ 44 45/** 46 * @file 47 * DRAMCtrl declaration 48 */ 49 50#ifndef __MEM_DRAM_CTRL_HH__ 51#define __MEM_DRAM_CTRL_HH__ 52 53#include <deque> 54 55#include "base/statistics.hh" 56#include "enums/AddrMap.hh" 57#include "enums/MemSched.hh" 58#include "enums/PageManage.hh" 59#include "mem/abstract_mem.hh" 60#include "mem/qport.hh" 61#include "params/DRAMCtrl.hh" 62#include "sim/eventq.hh" 63#include "mem/drampower.hh" 64 65/** 66 * The DRAM controller is a single-channel memory controller capturing 67 * the most important timing constraints associated with a 68 * contemporary DRAM. For multi-channel memory systems, the controller 69 * is combined with a crossbar model, with the channel address 70 * interleaving taking part in the crossbar. 71 * 72 * As a basic design principle, this controller 73 * model is not cycle callable, but instead uses events to: 1) decide 74 * when new decisions can be made, 2) when resources become available, 75 * 3) when things are to be considered done, and 4) when to send 76 * things back. Through these simple principles, the model delivers 77 * high performance, and lots of flexibility, allowing users to 78 * evaluate the system impact of a wide range of memory technologies, 79 * such as DDR3/4, LPDDR2/3/4, WideIO1/2, HBM and HMC. 80 * 81 * For more details, please see Hansson et al, "Simulating DRAM 82 * controllers for future system architecture exploration", 83 * Proc. ISPASS, 2014. If you use this model as part of your research 84 * please cite the paper. 85 */ 86class DRAMCtrl : public AbstractMemory 87{ 88 89 private: 90 91 // For now, make use of a queued slave port to avoid dealing with 92 // flow control for the responses being sent back 93 class MemoryPort : public QueuedSlavePort 94 { 95 96 SlavePacketQueue queue; 97 DRAMCtrl& memory; 98 99 public: 100 101 MemoryPort(const std::string& name, DRAMCtrl& _memory); 102 103 protected: 104 105 Tick recvAtomic(PacketPtr pkt); 106 107 void recvFunctional(PacketPtr pkt); 108 109 bool recvTimingReq(PacketPtr); 110 111 virtual AddrRangeList getAddrRanges() const; 112 113 }; 114 115 /** 116 * Our incoming port, for a multi-ported controller add a crossbar 117 * in front of it 118 */ 119 MemoryPort port; 120 121 /** 122 * Remember if we have to retry a request when available. 123 */ 124 bool retryRdReq; 125 bool retryWrReq; 126 127 /** 128 * Bus state used to control the read/write switching and drive 129 * the scheduling of the next request. 130 */ 131 enum BusState { 132 READ = 0, 133 READ_TO_WRITE, 134 WRITE, 135 WRITE_TO_READ 136 }; 137 138 BusState busState; 139 140 /** List to keep track of activate ticks */ 141 std::vector<std::deque<Tick>> actTicks; 142 143 /** 144 * A basic class to track the bank state, i.e. what row is 145 * currently open (if any), when is the bank free to accept a new 146 * column (read/write) command, when can it be precharged, and 147 * when can it be activated. 148 * 149 * The bank also keeps track of how many bytes have been accessed 150 * in the open row since it was opened. 151 */ 152 class Bank 153 { 154 155 public: 156 157 static const uint32_t NO_ROW = -1; 158 159 uint32_t openRow; 160 uint8_t rank; 161 uint8_t bank; 162 uint8_t bankgr; 163 164 Tick colAllowedAt; 165 Tick preAllowedAt; 166 Tick actAllowedAt; 167 168 uint32_t rowAccesses; 169 uint32_t bytesAccessed; 170 171 Bank() : 172 openRow(NO_ROW), rank(0), bank(0), bankgr(0), 173 colAllowedAt(0), preAllowedAt(0), actAllowedAt(0), 174 rowAccesses(0), bytesAccessed(0) 175 { } 176 }; 177 178 /** 179 * A burst helper helps organize and manage a packet that is larger than 180 * the DRAM burst size. A system packet that is larger than the burst size 181 * is split into multiple DRAM packets and all those DRAM packets point to 182 * a single burst helper such that we know when the whole packet is served. 183 */ 184 class BurstHelper { 185 186 public: 187 188 /** Number of DRAM bursts requred for a system packet **/ 189 const unsigned int burstCount; 190 191 /** Number of DRAM bursts serviced so far for a system packet **/ 192 unsigned int burstsServiced; 193 194 BurstHelper(unsigned int _burstCount) 195 : burstCount(_burstCount), burstsServiced(0) 196 { } 197 }; 198 199 /** 200 * A DRAM packet stores packets along with the timestamp of when 201 * the packet entered the queue, and also the decoded address. 202 */ 203 class DRAMPacket { 204 205 public: 206 207 /** When did request enter the controller */ 208 const Tick entryTime; 209 210 /** When will request leave the controller */ 211 Tick readyTime; 212 213 /** This comes from the outside world */ 214 const PacketPtr pkt; 215 216 const bool isRead; 217 218 /** Will be populated by address decoder */ 219 const uint8_t rank; 220 const uint8_t bank; 221 const uint32_t row; 222 223 /** 224 * Bank id is calculated considering banks in all the ranks 225 * eg: 2 ranks each with 8 banks, then bankId = 0 --> rank0, bank0 and 226 * bankId = 8 --> rank1, bank0 227 */ 228 const uint16_t bankId; 229 230 /** 231 * The starting address of the DRAM packet. 232 * This address could be unaligned to burst size boundaries. The 233 * reason is to keep the address offset so we can accurately check 234 * incoming read packets with packets in the write queue. 235 */ 236 Addr addr; 237 238 /** 239 * The size of this dram packet in bytes 240 * It is always equal or smaller than DRAM burst size 241 */ 242 unsigned int size; 243 244 /** 245 * A pointer to the BurstHelper if this DRAMPacket is a split packet 246 * If not a split packet (common case), this is set to NULL 247 */ 248 BurstHelper* burstHelper; 249 Bank& bankRef; 250 251 DRAMPacket(PacketPtr _pkt, bool is_read, uint8_t _rank, uint8_t _bank, 252 uint32_t _row, uint16_t bank_id, Addr _addr, 253 unsigned int _size, Bank& bank_ref) 254 : entryTime(curTick()), readyTime(curTick()), 255 pkt(_pkt), isRead(is_read), rank(_rank), bank(_bank), row(_row), 256 bankId(bank_id), addr(_addr), size(_size), burstHelper(NULL), 257 bankRef(bank_ref) 258 { } 259 260 }; 261 262 /** 263 * Bunch of things requires to setup "events" in gem5 264 * When event "respondEvent" occurs for example, the method 265 * processRespondEvent is called; no parameters are allowed 266 * in these methods 267 */ 268 void processNextReqEvent(); 269 EventWrapper<DRAMCtrl,&DRAMCtrl::processNextReqEvent> nextReqEvent; 270 271 void processRespondEvent(); 272 EventWrapper<DRAMCtrl, &DRAMCtrl::processRespondEvent> respondEvent; 273 274 void processActivateEvent(); 275 EventWrapper<DRAMCtrl, &DRAMCtrl::processActivateEvent> activateEvent; 276 277 void processPrechargeEvent(); 278 EventWrapper<DRAMCtrl, &DRAMCtrl::processPrechargeEvent> prechargeEvent; 279 280 void processRefreshEvent(); 281 EventWrapper<DRAMCtrl, &DRAMCtrl::processRefreshEvent> refreshEvent; 282 283 void processPowerEvent(); 284 EventWrapper<DRAMCtrl,&DRAMCtrl::processPowerEvent> powerEvent; 285 286 /** 287 * Check if the read queue has room for more entries 288 * 289 * @param pktCount The number of entries needed in the read queue 290 * @return true if read queue is full, false otherwise 291 */ 292 bool readQueueFull(unsigned int pktCount) const; 293 294 /** 295 * Check if the write queue has room for more entries 296 * 297 * @param pktCount The number of entries needed in the write queue 298 * @return true if write queue is full, false otherwise 299 */ 300 bool writeQueueFull(unsigned int pktCount) const; 301 302 /** 303 * When a new read comes in, first check if the write q has a 304 * pending request to the same address.\ If not, decode the 305 * address to populate rank/bank/row, create one or mutliple 306 * "dram_pkt", and push them to the back of the read queue.\ 307 * If this is the only 308 * read request in the system, schedule an event to start 309 * servicing it. 310 * 311 * @param pkt The request packet from the outside world 312 * @param pktCount The number of DRAM bursts the pkt 313 * translate to. If pkt size is larger then one full burst, 314 * then pktCount is greater than one. 315 */ 316 void addToReadQueue(PacketPtr pkt, unsigned int pktCount); 317 318 /** 319 * Decode the incoming pkt, create a dram_pkt and push to the 320 * back of the write queue. \If the write q length is more than 321 * the threshold specified by the user, ie the queue is beginning 322 * to get full, stop reads, and start draining writes. 323 * 324 * @param pkt The request packet from the outside world 325 * @param pktCount The number of DRAM bursts the pkt 326 * translate to. If pkt size is larger then one full burst, 327 * then pktCount is greater than one. 328 */ 329 void addToWriteQueue(PacketPtr pkt, unsigned int pktCount); 330 331 /** 332 * Actually do the DRAM access - figure out the latency it 333 * will take to service the req based on bank state, channel state etc 334 * and then update those states to account for this request.\ Based 335 * on this, update the packet's "readyTime" and move it to the 336 * response q from where it will eventually go back to the outside 337 * world. 338 * 339 * @param pkt The DRAM packet created from the outside world pkt 340 */ 341 void doDRAMAccess(DRAMPacket* dram_pkt); 342 343 /** 344 * When a packet reaches its "readyTime" in the response Q, 345 * use the "access()" method in AbstractMemory to actually 346 * create the response packet, and send it back to the outside 347 * world requestor. 348 * 349 * @param pkt The packet from the outside world 350 * @param static_latency Static latency to add before sending the packet 351 */ 352 void accessAndRespond(PacketPtr pkt, Tick static_latency); 353 354 /** 355 * Address decoder to figure out physical mapping onto ranks, 356 * banks, and rows. This function is called multiple times on the same 357 * system packet if the pakcet is larger than burst of the memory. The 358 * dramPktAddr is used for the offset within the packet. 359 * 360 * @param pkt The packet from the outside world 361 * @param dramPktAddr The starting address of the DRAM packet 362 * @param size The size of the DRAM packet in bytes 363 * @param isRead Is the request for a read or a write to DRAM 364 * @return A DRAMPacket pointer with the decoded information 365 */ 366 DRAMPacket* decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned int size, 367 bool isRead); 368 369 /** 370 * The memory schduler/arbiter - picks which request needs to 371 * go next, based on the specified policy such as FCFS or FR-FCFS 372 * and moves it to the head of the queue. 373 * Prioritizes accesses to the same rank as previous burst unless 374 * controller is switching command type. 375 * 376 * @param queue Queued requests to consider 377 * @param switched_cmd_type Command type is changing 378 */ 379 void chooseNext(std::deque<DRAMPacket*>& queue, bool switched_cmd_type); 380 381 /** 382 * For FR-FCFS policy reorder the read/write queue depending on row buffer 383 * hits and earliest banks available in DRAM 384 * Prioritizes accesses to the same rank as previous burst unless 385 * controller is switching command type. 386 * 387 * @param queue Queued requests to consider 388 * @param switched_cmd_type Command type is changing 389 */ 390 void reorderQueue(std::deque<DRAMPacket*>& queue, bool switched_cmd_type); 391 392 /** 393 * Find which are the earliest banks ready to issue an activate 394 * for the enqueued requests. Assumes maximum of 64 banks per DIMM 395 * Also checks if the bank is already prepped. 396 * 397 * @param queue Queued requests to consider 398 * @param switched_cmd_type Command type is changing 399 * @return One-hot encoded mask of bank indices 400 */ 401 uint64_t minBankPrep(const std::deque<DRAMPacket*>& queue, 402 bool switched_cmd_type) const; 403 404 /** 405 * Keep track of when row activations happen, in order to enforce 406 * the maximum number of activations in the activation window. The 407 * method updates the time that the banks become available based 408 * on the current limits. 409 * 410 * @param bank Reference to the bank 411 * @param act_tick Time when the activation takes place 412 * @param row Index of the row 413 */ 414 void activateBank(Bank& bank, Tick act_tick, uint32_t row); 415 416 /** 417 * Precharge a given bank and also update when the precharge is 418 * done. This will also deal with any stats related to the 419 * accesses to the open page. 420 * 421 * @param bank_ref The bank to precharge 422 * @param pre_at Time when the precharge takes place 423 * @param trace Is this an auto precharge then do not add to trace 424 */ 425 void prechargeBank(Bank& bank_ref, Tick pre_at, bool trace = true); 426 427 /** 428 * Used for debugging to observe the contents of the queues. 429 */ 430 void printQs() const; 431 432 /** 433 * The controller's main read and write queues 434 */ 435 std::deque<DRAMPacket*> readQueue; 436 std::deque<DRAMPacket*> writeQueue; 437 438 /** 439 * Response queue where read packets wait after we're done working 440 * with them, but it's not time to send the response yet. The 441 * responses are stored seperately mostly to keep the code clean 442 * and help with events scheduling. For all logical purposes such 443 * as sizing the read queue, this and the main read queue need to 444 * be added together. 445 */ 446 std::deque<DRAMPacket*> respQueue; 447 448 /** 449 * If we need to drain, keep the drain manager around until we're 450 * done here. 451 */ 452 DrainManager *drainManager; 453 454 /** 455 * Multi-dimensional vector of banks, first dimension is ranks, 456 * second is bank 457 */ 458 std::vector<std::vector<Bank> > banks; 459 460 /** 461 * The following are basic design parameters of the memory 462 * controller, and are initialized based on parameter values. 463 * The rowsPerBank is determined based on the capacity, number of 464 * ranks and banks, the burst size, and the row buffer size. 465 */ 466 const uint32_t deviceSize; 467 const uint32_t deviceBusWidth; 468 const uint32_t burstLength; 469 const uint32_t deviceRowBufferSize; 470 const uint32_t devicesPerRank; 471 const uint32_t burstSize; 472 const uint32_t rowBufferSize; 473 const uint32_t columnsPerRowBuffer; 474 const uint32_t columnsPerStripe; 475 const uint32_t ranksPerChannel; 476 const uint32_t bankGroupsPerRank; 477 const bool bankGroupArch; 478 const uint32_t banksPerRank; 479 const uint32_t channels; 480 uint32_t rowsPerBank; 481 const uint32_t readBufferSize; 482 const uint32_t writeBufferSize; 483 const uint32_t writeHighThreshold; 484 const uint32_t writeLowThreshold; 485 const uint32_t minWritesPerSwitch; 486 uint32_t writesThisTime; 487 uint32_t readsThisTime; 488 489 /** 490 * Basic memory timing parameters initialized based on parameter 491 * values. 492 */ 493 const Tick M5_CLASS_VAR_USED tCK; 494 const Tick tWTR; 495 const Tick tRTW; 496 const Tick tCS; 497 const Tick tBURST; 498 const Tick tCCD_L; 499 const Tick tRCD; 500 const Tick tCL; 501 const Tick tRP; 502 const Tick tRAS; 503 const Tick tWR; 504 const Tick tRTP; 505 const Tick tRFC; 506 const Tick tREFI; 507 const Tick tRRD; 508 const Tick tRRD_L; 509 const Tick tXAW; 510 const uint32_t activationLimit; 511 512 /** 513 * Memory controller configuration initialized based on parameter 514 * values. 515 */ 516 Enums::MemSched memSchedPolicy; 517 Enums::AddrMap addrMapping; 518 Enums::PageManage pageMgmt; 519 520 /** 521 * Max column accesses (read and write) per row, before forefully 522 * closing it. 523 */ 524 const uint32_t maxAccessesPerRow; 525 526 /** 527 * Pipeline latency of the controller frontend. The frontend 528 * contribution is added to writes (that complete when they are in 529 * the write buffer) and reads that are serviced the write buffer. 530 */ 531 const Tick frontendLatency; 532 533 /** 534 * Pipeline latency of the backend and PHY. Along with the 535 * frontend contribution, this latency is added to reads serviced 536 * by the DRAM. 537 */ 538 const Tick backendLatency; 539 540 /** 541 * Till when has the main data bus been spoken for already? 542 */ 543 Tick busBusyUntil; 544 545 /** 546 * Keep track of when a refresh is due. 547 */ 548 Tick refreshDueAt; 549 550 /** 551 * The refresh state is used to control the progress of the 552 * refresh scheduling. When normal operation is in progress the 553 * refresh state is idle. From there, it progresses to the refresh 554 * drain state once tREFI has passed. The refresh drain state 555 * captures the DRAM row active state, as it will stay there until 556 * all ongoing accesses complete. Thereafter all banks are 557 * precharged, and lastly, the DRAM is refreshed. 558 */ 559 enum RefreshState { 560 REF_IDLE = 0, 561 REF_DRAIN, 562 REF_PRE, 563 REF_RUN 564 }; 565 566 RefreshState refreshState; 567 568 /** 569 * The power state captures the different operational states of 570 * the DRAM and interacts with the bus read/write state machine, 571 * and the refresh state machine. In the idle state all banks are 572 * precharged. From there we either go to an auto refresh (as 573 * determined by the refresh state machine), or to a precharge 574 * power down mode. From idle the memory can also go to the active 575 * state (with one or more banks active), and in turn from there 576 * to active power down. At the moment we do not capture the deep 577 * power down and self-refresh state. 578 */ 579 enum PowerState { 580 PWR_IDLE = 0, 581 PWR_REF, 582 PWR_PRE_PDN, 583 PWR_ACT, 584 PWR_ACT_PDN 585 }; 586 587 /** 588 * Since we are taking decisions out of order, we need to keep 589 * track of what power transition is happening at what time, such 590 * that we can go back in time and change history. For example, if 591 * we precharge all banks and schedule going to the idle state, we 592 * might at a later point decide to activate a bank before the 593 * transition to idle would have taken place. 594 */ 595 PowerState pwrStateTrans; 596 597 /** 598 * Current power state. 599 */ 600 PowerState pwrState; 601 602 /** 603 * Schedule a power state transition in the future, and 604 * potentially override an already scheduled transition. 605 * 606 * @param pwr_state Power state to transition to 607 * @param tick Tick when transition should take place 608 */ 609 void schedulePowerEvent(PowerState pwr_state, Tick tick); 610 611 Tick prevArrival; 612 613 /** 614 * The soonest you have to start thinking about the next request 615 * is the longest access time that can occur before 616 * busBusyUntil. Assuming you need to precharge, open a new row, 617 * and access, it is tRP + tRCD + tCL. 618 */ 619 Tick nextReqTime; 620 621 // All statistics that the model needs to capture 622 Stats::Scalar readReqs; 623 Stats::Scalar writeReqs; 624 Stats::Scalar readBursts; 625 Stats::Scalar writeBursts; 626 Stats::Scalar bytesReadDRAM; 627 Stats::Scalar bytesReadWrQ; 628 Stats::Scalar bytesWritten; 629 Stats::Scalar bytesReadSys; 630 Stats::Scalar bytesWrittenSys; 631 Stats::Scalar servicedByWrQ; 632 Stats::Scalar mergedWrBursts; 633 Stats::Scalar neitherReadNorWrite; 634 Stats::Vector perBankRdBursts; 635 Stats::Vector perBankWrBursts; 636 Stats::Scalar numRdRetry; 637 Stats::Scalar numWrRetry; 638 Stats::Scalar totGap; 639 Stats::Vector readPktSize; 640 Stats::Vector writePktSize; 641 Stats::Vector rdQLenPdf; 642 Stats::Vector wrQLenPdf; 643 Stats::Histogram bytesPerActivate; 644 Stats::Histogram rdPerTurnAround; 645 Stats::Histogram wrPerTurnAround; 646 647 // Latencies summed over all requests 648 Stats::Scalar totQLat; 649 Stats::Scalar totMemAccLat; 650 Stats::Scalar totBusLat; 651 652 // Average latencies per request 653 Stats::Formula avgQLat; 654 Stats::Formula avgBusLat; 655 Stats::Formula avgMemAccLat; 656 657 // Average bandwidth 658 Stats::Formula avgRdBW; 659 Stats::Formula avgWrBW; 660 Stats::Formula avgRdBWSys; 661 Stats::Formula avgWrBWSys; 662 Stats::Formula peakBW; 663 Stats::Formula busUtil; 664 Stats::Formula busUtilRead; 665 Stats::Formula busUtilWrite; 666 667 // Average queue lengths 668 Stats::Average avgRdQLen; 669 Stats::Average avgWrQLen; 670 671 // Row hit count and rate 672 Stats::Scalar readRowHits; 673 Stats::Scalar writeRowHits; 674 Stats::Formula readRowHitRate; 675 Stats::Formula writeRowHitRate; 676 Stats::Formula avgGap; 677 678 // DRAM Power Calculation 679 Stats::Formula pageHitRate; 680 Stats::Vector pwrStateTime; 681 682 //Command energies 683 Stats::Vector actEnergy; 684 Stats::Vector preEnergy; 685 Stats::Vector readEnergy; 686 Stats::Vector writeEnergy; 687 Stats::Vector refreshEnergy; 688 //Active Background Energy 689 Stats::Vector actBackEnergy; 690 //Precharge Background Energy 691 Stats::Vector preBackEnergy; 692 Stats::Vector totalEnergy; 693 //Power Consumed 694 Stats::Vector averagePower; 695 696 // Track when we transitioned to the current power state 697 Tick pwrStateTick; 698 699 // To track number of banks which are currently active 700 unsigned int numBanksActive; 701 702 // Holds the value of the rank of burst issued 703 uint8_t activeRank; 704 705 // timestamp offset 706 uint64_t timeStampOffset; 707 708 /** @todo this is a temporary workaround until the 4-phase code is 709 * committed. upstream caches needs this packet until true is returned, so 710 * hold onto it for deletion until a subsequent call 711 */ 712 std::vector<PacketPtr> pendingDelete; 713 714 // One DRAMPower instance per rank 715 std::vector<DRAMPower> rankPower; 716 717 /** 718 * This function increments the energy when called. If stats are 719 * dumped periodically, note accumulated energy values will 720 * appear in the stats (even if the stats are reset). This is a 721 * result of the energy values coming from DRAMPower, and there 722 * is currently no support for resetting the state. 723 * 724 * @param rank Currrent rank 725 */ 726 void updatePowerStats(uint8_t rank); 727 728 /** 729 * Function for sorting commands in the command list of DRAMPower. 730 * 731 * @param a Memory Command in command list of DRAMPower library 732 * @param next Memory Command in command list of DRAMPower 733 * @return true if timestamp of Command 1 < timestamp of Command 2 734 */ 735 static bool sortTime(const Data::MemCommand& m1, 736 const Data::MemCommand& m2) { 737 return m1.getTime() < m2.getTime(); 738 }; 739 740 741 public: 742 743 void regStats(); 744 745 DRAMCtrl(const DRAMCtrlParams* p); 746 747 unsigned int drain(DrainManager* dm); 748 749 virtual BaseSlavePort& getSlavePort(const std::string& if_name, 750 PortID idx = InvalidPortID); 751 752 virtual void init(); 753 virtual void startup(); 754 755 protected: 756 757 Tick recvAtomic(PacketPtr pkt); 758 void recvFunctional(PacketPtr pkt); 759 bool recvTimingReq(PacketPtr pkt); 760 761}; 762 763#endif //__MEM_DRAM_CTRL_HH__ 764