dram_ctrl.hh revision 10208:c249f7660eb7
1/* 2 * Copyright (c) 2012-2014 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2013 Amin Farmahini-Farahani 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Andreas Hansson 41 * Ani Udipi 42 * Neha Agarwal 43 */ 44 45/** 46 * @file 47 * DRAMCtrl declaration 48 */ 49 50#ifndef __MEM_DRAM_CTRL_HH__ 51#define __MEM_DRAM_CTRL_HH__ 52 53#include <deque> 54 55#include "base/statistics.hh" 56#include "enums/AddrMap.hh" 57#include "enums/MemSched.hh" 58#include "enums/PageManage.hh" 59#include "mem/abstract_mem.hh" 60#include "mem/qport.hh" 61#include "params/DRAMCtrl.hh" 62#include "sim/eventq.hh" 63 64/** 65 * The DRAM controller is a basic single-channel memory controller 66 * aiming to mimic a high-level DRAM controller and the most important 67 * timing constraints associated with the DRAM. The focus is really on 68 * modelling the impact on the system rather than the DRAM itself, 69 * hence the focus is on the controller model and not on the 70 * memory. By adhering to the correct timing constraints, ultimately 71 * there is no need for a memory model in addition to the controller 72 * model. 73 * 74 * As a basic design principle, this controller is not cycle callable, 75 * but instead uses events to decide when new decisions can be made, 76 * when resources become available, when things are to be considered 77 * done, and when to send things back. Through these simple 78 * principles, we achieve a performant model that is not 79 * cycle-accurate, but enables us to evaluate the system impact of a 80 * wide range of memory technologies, and also collect statistics 81 * about the use of the memory. 82 */ 83class DRAMCtrl : public AbstractMemory 84{ 85 86 private: 87 88 // For now, make use of a queued slave port to avoid dealing with 89 // flow control for the responses being sent back 90 class MemoryPort : public QueuedSlavePort 91 { 92 93 SlavePacketQueue queue; 94 DRAMCtrl& memory; 95 96 public: 97 98 MemoryPort(const std::string& name, DRAMCtrl& _memory); 99 100 protected: 101 102 Tick recvAtomic(PacketPtr pkt); 103 104 void recvFunctional(PacketPtr pkt); 105 106 bool recvTimingReq(PacketPtr); 107 108 virtual AddrRangeList getAddrRanges() const; 109 110 }; 111 112 /** 113 * Our incoming port, for a multi-ported controller add a crossbar 114 * in front of it 115 */ 116 MemoryPort port; 117 118 /** 119 * Remember if we have to retry a request when available. 120 */ 121 bool retryRdReq; 122 bool retryWrReq; 123 124 /** 125 * Remember that a row buffer hit occured 126 */ 127 bool rowHitFlag; 128 129 /** 130 * Bus state used to control the read/write switching and drive 131 * the scheduling of the next request. 132 */ 133 enum BusState { 134 READ = 0, 135 READ_TO_WRITE, 136 WRITE, 137 WRITE_TO_READ 138 }; 139 140 BusState busState; 141 142 /** List to keep track of activate ticks */ 143 std::vector<std::deque<Tick>> actTicks; 144 145 /** 146 * A basic class to track the bank state indirectly via times 147 * "freeAt" and "tRASDoneAt" and what page is currently open. The 148 * bank also keeps track of how many bytes have been accessed in 149 * the open row since it was opened. 150 */ 151 class Bank 152 { 153 154 public: 155 156 static const uint32_t NO_ROW = -1; 157 158 uint32_t openRow; 159 160 Tick freeAt; 161 Tick tRASDoneAt; 162 Tick actAllowedAt; 163 164 uint32_t rowAccesses; 165 uint32_t bytesAccessed; 166 167 Bank() : 168 openRow(NO_ROW), freeAt(0), tRASDoneAt(0), actAllowedAt(0), 169 rowAccesses(0), bytesAccessed(0) 170 { } 171 }; 172 173 /** 174 * A burst helper helps organize and manage a packet that is larger than 175 * the DRAM burst size. A system packet that is larger than the burst size 176 * is split into multiple DRAM packets and all those DRAM packets point to 177 * a single burst helper such that we know when the whole packet is served. 178 */ 179 class BurstHelper { 180 181 public: 182 183 /** Number of DRAM bursts requred for a system packet **/ 184 const unsigned int burstCount; 185 186 /** Number of DRAM bursts serviced so far for a system packet **/ 187 unsigned int burstsServiced; 188 189 BurstHelper(unsigned int _burstCount) 190 : burstCount(_burstCount), burstsServiced(0) 191 { } 192 }; 193 194 /** 195 * A DRAM packet stores packets along with the timestamp of when 196 * the packet entered the queue, and also the decoded address. 197 */ 198 class DRAMPacket { 199 200 public: 201 202 /** When did request enter the controller */ 203 const Tick entryTime; 204 205 /** When will request leave the controller */ 206 Tick readyTime; 207 208 /** This comes from the outside world */ 209 const PacketPtr pkt; 210 211 const bool isRead; 212 213 /** Will be populated by address decoder */ 214 const uint8_t rank; 215 const uint8_t bank; 216 const uint16_t row; 217 218 /** 219 * Bank id is calculated considering banks in all the ranks 220 * eg: 2 ranks each with 8 banks, then bankId = 0 --> rank0, bank0 and 221 * bankId = 8 --> rank1, bank0 222 */ 223 const uint16_t bankId; 224 225 /** 226 * The starting address of the DRAM packet. 227 * This address could be unaligned to burst size boundaries. The 228 * reason is to keep the address offset so we can accurately check 229 * incoming read packets with packets in the write queue. 230 */ 231 Addr addr; 232 233 /** 234 * The size of this dram packet in bytes 235 * It is always equal or smaller than DRAM burst size 236 */ 237 unsigned int size; 238 239 /** 240 * A pointer to the BurstHelper if this DRAMPacket is a split packet 241 * If not a split packet (common case), this is set to NULL 242 */ 243 BurstHelper* burstHelper; 244 Bank& bankRef; 245 246 DRAMPacket(PacketPtr _pkt, bool is_read, uint8_t _rank, uint8_t _bank, 247 uint16_t _row, uint16_t bank_id, Addr _addr, 248 unsigned int _size, Bank& bank_ref) 249 : entryTime(curTick()), readyTime(curTick()), 250 pkt(_pkt), isRead(is_read), rank(_rank), bank(_bank), row(_row), 251 bankId(bank_id), addr(_addr), size(_size), burstHelper(NULL), 252 bankRef(bank_ref) 253 { } 254 255 }; 256 257 /** 258 * Bunch of things requires to setup "events" in gem5 259 * When event "respondEvent" occurs for example, the method 260 * processRespondEvent is called; no parameters are allowed 261 * in these methods 262 */ 263 void processNextReqEvent(); 264 EventWrapper<DRAMCtrl,&DRAMCtrl::processNextReqEvent> nextReqEvent; 265 266 void processRespondEvent(); 267 EventWrapper<DRAMCtrl, &DRAMCtrl::processRespondEvent> respondEvent; 268 269 void processActivateEvent(); 270 EventWrapper<DRAMCtrl, &DRAMCtrl::processActivateEvent> activateEvent; 271 272 void processPrechargeEvent(); 273 EventWrapper<DRAMCtrl, &DRAMCtrl::processPrechargeEvent> prechargeEvent; 274 275 void processRefreshEvent(); 276 EventWrapper<DRAMCtrl, &DRAMCtrl::processRefreshEvent> refreshEvent; 277 278 void processPowerEvent(); 279 EventWrapper<DRAMCtrl,&DRAMCtrl::processPowerEvent> powerEvent; 280 281 /** 282 * Check if the read queue has room for more entries 283 * 284 * @param pktCount The number of entries needed in the read queue 285 * @return true if read queue is full, false otherwise 286 */ 287 bool readQueueFull(unsigned int pktCount) const; 288 289 /** 290 * Check if the write queue has room for more entries 291 * 292 * @param pktCount The number of entries needed in the write queue 293 * @return true if write queue is full, false otherwise 294 */ 295 bool writeQueueFull(unsigned int pktCount) const; 296 297 /** 298 * When a new read comes in, first check if the write q has a 299 * pending request to the same address.\ If not, decode the 300 * address to populate rank/bank/row, create one or mutliple 301 * "dram_pkt", and push them to the back of the read queue.\ 302 * If this is the only 303 * read request in the system, schedule an event to start 304 * servicing it. 305 * 306 * @param pkt The request packet from the outside world 307 * @param pktCount The number of DRAM bursts the pkt 308 * translate to. If pkt size is larger then one full burst, 309 * then pktCount is greater than one. 310 */ 311 void addToReadQueue(PacketPtr pkt, unsigned int pktCount); 312 313 /** 314 * Decode the incoming pkt, create a dram_pkt and push to the 315 * back of the write queue. \If the write q length is more than 316 * the threshold specified by the user, ie the queue is beginning 317 * to get full, stop reads, and start draining writes. 318 * 319 * @param pkt The request packet from the outside world 320 * @param pktCount The number of DRAM bursts the pkt 321 * translate to. If pkt size is larger then one full burst, 322 * then pktCount is greater than one. 323 */ 324 void addToWriteQueue(PacketPtr pkt, unsigned int pktCount); 325 326 /** 327 * Actually do the DRAM access - figure out the latency it 328 * will take to service the req based on bank state, channel state etc 329 * and then update those states to account for this request.\ Based 330 * on this, update the packet's "readyTime" and move it to the 331 * response q from where it will eventually go back to the outside 332 * world. 333 * 334 * @param pkt The DRAM packet created from the outside world pkt 335 */ 336 void doDRAMAccess(DRAMPacket* dram_pkt); 337 338 /** 339 * When a packet reaches its "readyTime" in the response Q, 340 * use the "access()" method in AbstractMemory to actually 341 * create the response packet, and send it back to the outside 342 * world requestor. 343 * 344 * @param pkt The packet from the outside world 345 * @param static_latency Static latency to add before sending the packet 346 */ 347 void accessAndRespond(PacketPtr pkt, Tick static_latency); 348 349 /** 350 * Address decoder to figure out physical mapping onto ranks, 351 * banks, and rows. This function is called multiple times on the same 352 * system packet if the pakcet is larger than burst of the memory. The 353 * dramPktAddr is used for the offset within the packet. 354 * 355 * @param pkt The packet from the outside world 356 * @param dramPktAddr The starting address of the DRAM packet 357 * @param size The size of the DRAM packet in bytes 358 * @param isRead Is the request for a read or a write to DRAM 359 * @return A DRAMPacket pointer with the decoded information 360 */ 361 DRAMPacket* decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned int size, 362 bool isRead); 363 364 /** 365 * The memory schduler/arbiter - picks which request needs to 366 * go next, based on the specified policy such as FCFS or FR-FCFS 367 * and moves it to the head of the queue. 368 */ 369 void chooseNext(std::deque<DRAMPacket*>& queue); 370 371 /** 372 *Looks at the state of the banks, channels, row buffer hits etc 373 * to estimate how long a request will take to complete. 374 * 375 * @param dram_pkt The request for which we want to estimate latency 376 * @param inTime The tick at which you want to probe the memory 377 * 378 * @return A pair of ticks, one indicating how many ticks *after* 379 * inTime the request require, and the other indicating how 380 * much of that was just the bank access time, ignoring the 381 * ticks spent simply waiting for resources to become free 382 */ 383 std::pair<Tick, Tick> estimateLatency(DRAMPacket* dram_pkt, Tick inTime); 384 385 /** 386 * Move the request at the head of the read queue to the response 387 * queue, sorting by readyTime.\ If it is the only packet in the 388 * response queue, schedule a respond event to send it back to the 389 * outside world 390 */ 391 void moveToRespQ(); 392 393 /** 394 * For FR-FCFS policy reorder the read/write queue depending on row buffer 395 * hits and earliest banks available in DRAM 396 */ 397 void reorderQueue(std::deque<DRAMPacket*>& queue); 398 399 /** 400 * Find which are the earliest available banks for the enqueued 401 * requests. Assumes maximum of 64 banks per DIMM 402 * 403 * @param Queued requests to consider 404 * @return One-hot encoded mask of bank indices 405 */ 406 uint64_t minBankFreeAt(const std::deque<DRAMPacket*>& queue) const; 407 408 /** 409 * Keep track of when row activations happen, in order to enforce 410 * the maximum number of activations in the activation window. The 411 * method updates the time that the banks become available based 412 * on the current limits. 413 */ 414 void recordActivate(Tick act_tick, uint8_t rank, uint8_t bank, 415 uint16_t row); 416 417 /** 418 * Precharge a given bank and also update when the precharge is 419 * done. This will also deal with any stats related to the 420 * accesses to the open page. 421 * 422 * @param bank The bank to precharge 423 * @param free_at Time when the precharge is done 424 */ 425 void prechargeBank(Bank& bank, Tick free_at); 426 427 void printParams() const; 428 429 /** 430 * Used for debugging to observe the contents of the queues. 431 */ 432 void printQs() const; 433 434 /** 435 * The controller's main read and write queues 436 */ 437 std::deque<DRAMPacket*> readQueue; 438 std::deque<DRAMPacket*> writeQueue; 439 440 /** 441 * Response queue where read packets wait after we're done working 442 * with them, but it's not time to send the response yet. The 443 * responses are stored seperately mostly to keep the code clean 444 * and help with events scheduling. For all logical purposes such 445 * as sizing the read queue, this and the main read queue need to 446 * be added together. 447 */ 448 std::deque<DRAMPacket*> respQueue; 449 450 /** 451 * If we need to drain, keep the drain manager around until we're 452 * done here. 453 */ 454 DrainManager *drainManager; 455 456 /** 457 * Multi-dimensional vector of banks, first dimension is ranks, 458 * second is bank 459 */ 460 std::vector<std::vector<Bank> > banks; 461 462 /** 463 * The following are basic design parameters of the memory 464 * controller, and are initialized based on parameter values. 465 * The rowsPerBank is determined based on the capacity, number of 466 * ranks and banks, the burst size, and the row buffer size. 467 */ 468 const uint32_t deviceBusWidth; 469 const uint32_t burstLength; 470 const uint32_t deviceRowBufferSize; 471 const uint32_t devicesPerRank; 472 const uint32_t burstSize; 473 const uint32_t rowBufferSize; 474 const uint32_t columnsPerRowBuffer; 475 const uint32_t ranksPerChannel; 476 const uint32_t banksPerRank; 477 const uint32_t channels; 478 uint32_t rowsPerBank; 479 const uint32_t readBufferSize; 480 const uint32_t writeBufferSize; 481 const uint32_t writeHighThreshold; 482 const uint32_t writeLowThreshold; 483 const uint32_t minWritesPerSwitch; 484 uint32_t writesThisTime; 485 uint32_t readsThisTime; 486 487 /** 488 * Basic memory timing parameters initialized based on parameter 489 * values. 490 */ 491 const Tick tWTR; 492 const Tick tRTW; 493 const Tick tBURST; 494 const Tick tRCD; 495 const Tick tCL; 496 const Tick tRP; 497 const Tick tRAS; 498 const Tick tRFC; 499 const Tick tREFI; 500 const Tick tRRD; 501 const Tick tXAW; 502 const uint32_t activationLimit; 503 504 /** 505 * Memory controller configuration initialized based on parameter 506 * values. 507 */ 508 Enums::MemSched memSchedPolicy; 509 Enums::AddrMap addrMapping; 510 Enums::PageManage pageMgmt; 511 512 /** 513 * Max column accesses (read and write) per row, before forefully 514 * closing it. 515 */ 516 const uint32_t maxAccessesPerRow; 517 518 /** 519 * Pipeline latency of the controller frontend. The frontend 520 * contribution is added to writes (that complete when they are in 521 * the write buffer) and reads that are serviced the write buffer. 522 */ 523 const Tick frontendLatency; 524 525 /** 526 * Pipeline latency of the backend and PHY. Along with the 527 * frontend contribution, this latency is added to reads serviced 528 * by the DRAM. 529 */ 530 const Tick backendLatency; 531 532 /** 533 * Till when has the main data bus been spoken for already? 534 */ 535 Tick busBusyUntil; 536 537 /** 538 * Keep track of when a refresh is due. 539 */ 540 Tick refreshDueAt; 541 542 /** 543 * The refresh state is used to control the progress of the 544 * refresh scheduling. When normal operation is in progress the 545 * refresh state is idle. From there, it progresses to the refresh 546 * drain state once tREFI has passed. The refresh drain state 547 * captures the DRAM row active state, as it will stay there until 548 * all ongoing accesses complete. Thereafter all banks are 549 * precharged, and lastly, the DRAM is refreshed. 550 */ 551 enum RefreshState { 552 REF_IDLE = 0, 553 REF_DRAIN, 554 REF_PRE, 555 REF_RUN 556 }; 557 558 RefreshState refreshState; 559 560 /** 561 * The power state captures the different operational states of 562 * the DRAM and interacts with the bus read/write state machine, 563 * and the refresh state machine. In the idle state all banks are 564 * precharged. From there we either go to an auto refresh (as 565 * determined by the refresh state machine), or to a precharge 566 * power down mode. From idle the memory can also go to the active 567 * state (with one or more banks active), and in turn from there 568 * to active power down. At the moment we do not capture the deep 569 * power down and self-refresh state. 570 */ 571 enum PowerState { 572 PWR_IDLE = 0, 573 PWR_REF, 574 PWR_PRE_PDN, 575 PWR_ACT, 576 PWR_ACT_PDN 577 }; 578 579 /** 580 * Since we are taking decisions out of order, we need to keep 581 * track of what power transition is happening at what time, such 582 * that we can go back in time and change history. For example, if 583 * we precharge all banks and schedule going to the idle state, we 584 * might at a later point decide to activate a bank before the 585 * transition to idle would have taken place. 586 */ 587 PowerState pwrStateTrans; 588 589 /** 590 * Current power state. 591 */ 592 PowerState pwrState; 593 594 /** 595 * Schedule a power state transition in the future, and 596 * potentially override an already scheduled transition. 597 * 598 * @param pwr_state Power state to transition to 599 * @param tick Tick when transition should take place 600 */ 601 void schedulePowerEvent(PowerState pwr_state, Tick tick); 602 603 Tick prevArrival; 604 605 /** 606 * The soonest you have to start thinking about the next request 607 * is the longest access time that can occur before 608 * busBusyUntil. Assuming you need to precharge, open a new row, 609 * and access, it is tRP + tRCD + tCL. 610 */ 611 Tick nextReqTime; 612 613 // All statistics that the model needs to capture 614 Stats::Scalar readReqs; 615 Stats::Scalar writeReqs; 616 Stats::Scalar readBursts; 617 Stats::Scalar writeBursts; 618 Stats::Scalar bytesReadDRAM; 619 Stats::Scalar bytesReadWrQ; 620 Stats::Scalar bytesWritten; 621 Stats::Scalar bytesReadSys; 622 Stats::Scalar bytesWrittenSys; 623 Stats::Scalar servicedByWrQ; 624 Stats::Scalar mergedWrBursts; 625 Stats::Scalar neitherReadNorWrite; 626 Stats::Vector perBankRdBursts; 627 Stats::Vector perBankWrBursts; 628 Stats::Scalar numRdRetry; 629 Stats::Scalar numWrRetry; 630 Stats::Scalar totGap; 631 Stats::Vector readPktSize; 632 Stats::Vector writePktSize; 633 Stats::Vector rdQLenPdf; 634 Stats::Vector wrQLenPdf; 635 Stats::Histogram bytesPerActivate; 636 Stats::Histogram rdPerTurnAround; 637 Stats::Histogram wrPerTurnAround; 638 639 // Latencies summed over all requests 640 Stats::Scalar totQLat; 641 Stats::Scalar totMemAccLat; 642 Stats::Scalar totBusLat; 643 Stats::Scalar totBankLat; 644 645 // Average latencies per request 646 Stats::Formula avgQLat; 647 Stats::Formula avgBankLat; 648 Stats::Formula avgBusLat; 649 Stats::Formula avgMemAccLat; 650 651 // Average bandwidth 652 Stats::Formula avgRdBW; 653 Stats::Formula avgWrBW; 654 Stats::Formula avgRdBWSys; 655 Stats::Formula avgWrBWSys; 656 Stats::Formula peakBW; 657 Stats::Formula busUtil; 658 Stats::Formula busUtilRead; 659 Stats::Formula busUtilWrite; 660 661 // Average queue lengths 662 Stats::Average avgRdQLen; 663 Stats::Average avgWrQLen; 664 665 // Row hit count and rate 666 Stats::Scalar readRowHits; 667 Stats::Scalar writeRowHits; 668 Stats::Formula readRowHitRate; 669 Stats::Formula writeRowHitRate; 670 Stats::Formula avgGap; 671 672 // DRAM Power Calculation 673 Stats::Formula pageHitRate; 674 Stats::Vector pwrStateTime; 675 676 // Track when we transitioned to the current power state 677 Tick pwrStateTick; 678 679 // To track number of banks which are currently active 680 unsigned int numBanksActive; 681 682 /** @todo this is a temporary workaround until the 4-phase code is 683 * committed. upstream caches needs this packet until true is returned, so 684 * hold onto it for deletion until a subsequent call 685 */ 686 std::vector<PacketPtr> pendingDelete; 687 688 public: 689 690 void regStats(); 691 692 DRAMCtrl(const DRAMCtrlParams* p); 693 694 unsigned int drain(DrainManager* dm); 695 696 virtual BaseSlavePort& getSlavePort(const std::string& if_name, 697 PortID idx = InvalidPortID); 698 699 virtual void init(); 700 virtual void startup(); 701 702 protected: 703 704 Tick recvAtomic(PacketPtr pkt); 705 void recvFunctional(PacketPtr pkt); 706 bool recvTimingReq(PacketPtr pkt); 707 708}; 709 710#endif //__MEM_DRAM_CTRL_HH__ 711