1/* 2 * Copyright (c) 2012-2014 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2013 Amin Farmahini-Farahani 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Andreas Hansson 41 * Ani Udipi 42 * Neha Agarwal 43 */ 44 45/** 46 * @file 47 * DRAMCtrl declaration 48 */ 49 50#ifndef __MEM_DRAM_CTRL_HH__ 51#define __MEM_DRAM_CTRL_HH__ 52 53#include <deque> 54 55#include "base/statistics.hh" 56#include "enums/AddrMap.hh" 57#include "enums/MemSched.hh" 58#include "enums/PageManage.hh" 59#include "mem/abstract_mem.hh" 60#include "mem/qport.hh" 61#include "params/DRAMCtrl.hh" 62#include "sim/eventq.hh" 63 64/**
| 1/* 2 * Copyright (c) 2012-2014 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2013 Amin Farmahini-Farahani 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Andreas Hansson 41 * Ani Udipi 42 * Neha Agarwal 43 */ 44 45/** 46 * @file 47 * DRAMCtrl declaration 48 */ 49 50#ifndef __MEM_DRAM_CTRL_HH__ 51#define __MEM_DRAM_CTRL_HH__ 52 53#include <deque> 54 55#include "base/statistics.hh" 56#include "enums/AddrMap.hh" 57#include "enums/MemSched.hh" 58#include "enums/PageManage.hh" 59#include "mem/abstract_mem.hh" 60#include "mem/qport.hh" 61#include "params/DRAMCtrl.hh" 62#include "sim/eventq.hh" 63 64/**
|
82 */ 83class DRAMCtrl : public AbstractMemory 84{ 85 86 private: 87 88 // For now, make use of a queued slave port to avoid dealing with 89 // flow control for the responses being sent back 90 class MemoryPort : public QueuedSlavePort 91 { 92 93 SlavePacketQueue queue; 94 DRAMCtrl& memory; 95 96 public: 97 98 MemoryPort(const std::string& name, DRAMCtrl& _memory); 99 100 protected: 101 102 Tick recvAtomic(PacketPtr pkt); 103 104 void recvFunctional(PacketPtr pkt); 105 106 bool recvTimingReq(PacketPtr); 107 108 virtual AddrRangeList getAddrRanges() const; 109 110 }; 111 112 /** 113 * Our incoming port, for a multi-ported controller add a crossbar 114 * in front of it 115 */ 116 MemoryPort port; 117 118 /** 119 * Remember if we have to retry a request when available. 120 */ 121 bool retryRdReq; 122 bool retryWrReq; 123 124 /** 125 * Bus state used to control the read/write switching and drive 126 * the scheduling of the next request. 127 */ 128 enum BusState { 129 READ = 0, 130 READ_TO_WRITE, 131 WRITE, 132 WRITE_TO_READ 133 }; 134 135 BusState busState; 136 137 /** List to keep track of activate ticks */ 138 std::vector<std::deque<Tick>> actTicks; 139 140 /** 141 * A basic class to track the bank state, i.e. what row is 142 * currently open (if any), when is the bank free to accept a new 143 * column (read/write) command, when can it be precharged, and 144 * when can it be activated. 145 * 146 * The bank also keeps track of how many bytes have been accessed 147 * in the open row since it was opened. 148 */ 149 class Bank 150 { 151 152 public: 153 154 static const uint32_t NO_ROW = -1; 155 156 uint32_t openRow; 157 uint8_t rank; 158 uint8_t bank; 159 160 Tick colAllowedAt; 161 Tick preAllowedAt; 162 Tick actAllowedAt; 163 164 uint32_t rowAccesses; 165 uint32_t bytesAccessed; 166 167 Bank() : 168 openRow(NO_ROW), rank(0), bank(0), 169 colAllowedAt(0), preAllowedAt(0), actAllowedAt(0), 170 rowAccesses(0), bytesAccessed(0) 171 { } 172 }; 173 174 /** 175 * A burst helper helps organize and manage a packet that is larger than 176 * the DRAM burst size. A system packet that is larger than the burst size 177 * is split into multiple DRAM packets and all those DRAM packets point to 178 * a single burst helper such that we know when the whole packet is served. 179 */ 180 class BurstHelper { 181 182 public: 183 184 /** Number of DRAM bursts requred for a system packet **/ 185 const unsigned int burstCount; 186 187 /** Number of DRAM bursts serviced so far for a system packet **/ 188 unsigned int burstsServiced; 189 190 BurstHelper(unsigned int _burstCount) 191 : burstCount(_burstCount), burstsServiced(0) 192 { } 193 }; 194 195 /** 196 * A DRAM packet stores packets along with the timestamp of when 197 * the packet entered the queue, and also the decoded address. 198 */ 199 class DRAMPacket { 200 201 public: 202 203 /** When did request enter the controller */ 204 const Tick entryTime; 205 206 /** When will request leave the controller */ 207 Tick readyTime; 208 209 /** This comes from the outside world */ 210 const PacketPtr pkt; 211 212 const bool isRead; 213 214 /** Will be populated by address decoder */ 215 const uint8_t rank; 216 const uint8_t bank; 217 const uint32_t row; 218 219 /** 220 * Bank id is calculated considering banks in all the ranks 221 * eg: 2 ranks each with 8 banks, then bankId = 0 --> rank0, bank0 and 222 * bankId = 8 --> rank1, bank0 223 */ 224 const uint16_t bankId; 225 226 /** 227 * The starting address of the DRAM packet. 228 * This address could be unaligned to burst size boundaries. The 229 * reason is to keep the address offset so we can accurately check 230 * incoming read packets with packets in the write queue. 231 */ 232 Addr addr; 233 234 /** 235 * The size of this dram packet in bytes 236 * It is always equal or smaller than DRAM burst size 237 */ 238 unsigned int size; 239 240 /** 241 * A pointer to the BurstHelper if this DRAMPacket is a split packet 242 * If not a split packet (common case), this is set to NULL 243 */ 244 BurstHelper* burstHelper; 245 Bank& bankRef; 246 247 DRAMPacket(PacketPtr _pkt, bool is_read, uint8_t _rank, uint8_t _bank, 248 uint32_t _row, uint16_t bank_id, Addr _addr, 249 unsigned int _size, Bank& bank_ref) 250 : entryTime(curTick()), readyTime(curTick()), 251 pkt(_pkt), isRead(is_read), rank(_rank), bank(_bank), row(_row), 252 bankId(bank_id), addr(_addr), size(_size), burstHelper(NULL), 253 bankRef(bank_ref) 254 { } 255 256 }; 257 258 /** 259 * Bunch of things requires to setup "events" in gem5 260 * When event "respondEvent" occurs for example, the method 261 * processRespondEvent is called; no parameters are allowed 262 * in these methods 263 */ 264 void processNextReqEvent(); 265 EventWrapper<DRAMCtrl,&DRAMCtrl::processNextReqEvent> nextReqEvent; 266 267 void processRespondEvent(); 268 EventWrapper<DRAMCtrl, &DRAMCtrl::processRespondEvent> respondEvent; 269 270 void processActivateEvent(); 271 EventWrapper<DRAMCtrl, &DRAMCtrl::processActivateEvent> activateEvent; 272 273 void processPrechargeEvent(); 274 EventWrapper<DRAMCtrl, &DRAMCtrl::processPrechargeEvent> prechargeEvent; 275 276 void processRefreshEvent(); 277 EventWrapper<DRAMCtrl, &DRAMCtrl::processRefreshEvent> refreshEvent; 278 279 void processPowerEvent(); 280 EventWrapper<DRAMCtrl,&DRAMCtrl::processPowerEvent> powerEvent; 281 282 /** 283 * Check if the read queue has room for more entries 284 * 285 * @param pktCount The number of entries needed in the read queue 286 * @return true if read queue is full, false otherwise 287 */ 288 bool readQueueFull(unsigned int pktCount) const; 289 290 /** 291 * Check if the write queue has room for more entries 292 * 293 * @param pktCount The number of entries needed in the write queue 294 * @return true if write queue is full, false otherwise 295 */ 296 bool writeQueueFull(unsigned int pktCount) const; 297 298 /** 299 * When a new read comes in, first check if the write q has a 300 * pending request to the same address.\ If not, decode the 301 * address to populate rank/bank/row, create one or mutliple 302 * "dram_pkt", and push them to the back of the read queue.\ 303 * If this is the only 304 * read request in the system, schedule an event to start 305 * servicing it. 306 * 307 * @param pkt The request packet from the outside world 308 * @param pktCount The number of DRAM bursts the pkt 309 * translate to. If pkt size is larger then one full burst, 310 * then pktCount is greater than one. 311 */ 312 void addToReadQueue(PacketPtr pkt, unsigned int pktCount); 313 314 /** 315 * Decode the incoming pkt, create a dram_pkt and push to the 316 * back of the write queue. \If the write q length is more than 317 * the threshold specified by the user, ie the queue is beginning 318 * to get full, stop reads, and start draining writes. 319 * 320 * @param pkt The request packet from the outside world 321 * @param pktCount The number of DRAM bursts the pkt 322 * translate to. If pkt size is larger then one full burst, 323 * then pktCount is greater than one. 324 */ 325 void addToWriteQueue(PacketPtr pkt, unsigned int pktCount); 326 327 /** 328 * Actually do the DRAM access - figure out the latency it 329 * will take to service the req based on bank state, channel state etc 330 * and then update those states to account for this request.\ Based 331 * on this, update the packet's "readyTime" and move it to the 332 * response q from where it will eventually go back to the outside 333 * world. 334 * 335 * @param pkt The DRAM packet created from the outside world pkt 336 */ 337 void doDRAMAccess(DRAMPacket* dram_pkt); 338 339 /** 340 * When a packet reaches its "readyTime" in the response Q, 341 * use the "access()" method in AbstractMemory to actually 342 * create the response packet, and send it back to the outside 343 * world requestor. 344 * 345 * @param pkt The packet from the outside world 346 * @param static_latency Static latency to add before sending the packet 347 */ 348 void accessAndRespond(PacketPtr pkt, Tick static_latency); 349 350 /** 351 * Address decoder to figure out physical mapping onto ranks, 352 * banks, and rows. This function is called multiple times on the same 353 * system packet if the pakcet is larger than burst of the memory. The 354 * dramPktAddr is used for the offset within the packet. 355 * 356 * @param pkt The packet from the outside world 357 * @param dramPktAddr The starting address of the DRAM packet 358 * @param size The size of the DRAM packet in bytes 359 * @param isRead Is the request for a read or a write to DRAM 360 * @return A DRAMPacket pointer with the decoded information 361 */ 362 DRAMPacket* decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned int size, 363 bool isRead); 364 365 /** 366 * The memory schduler/arbiter - picks which request needs to 367 * go next, based on the specified policy such as FCFS or FR-FCFS 368 * and moves it to the head of the queue. 369 */ 370 void chooseNext(std::deque<DRAMPacket*>& queue); 371 372 /** 373 * For FR-FCFS policy reorder the read/write queue depending on row buffer 374 * hits and earliest banks available in DRAM 375 */ 376 void reorderQueue(std::deque<DRAMPacket*>& queue); 377 378 /** 379 * Find which are the earliest banks ready to issue an activate 380 * for the enqueued requests. Assumes maximum of 64 banks per DIMM 381 * 382 * @param Queued requests to consider 383 * @return One-hot encoded mask of bank indices 384 */ 385 uint64_t minBankActAt(const std::deque<DRAMPacket*>& queue) const; 386 387 /** 388 * Keep track of when row activations happen, in order to enforce 389 * the maximum number of activations in the activation window. The 390 * method updates the time that the banks become available based 391 * on the current limits. 392 * 393 * @param bank Reference to the bank 394 * @param act_tick Time when the activation takes place 395 * @param row Index of the row 396 */ 397 void activateBank(Bank& bank, Tick act_tick, uint32_t row); 398 399 /** 400 * Precharge a given bank and also update when the precharge is 401 * done. This will also deal with any stats related to the 402 * accesses to the open page. 403 * 404 * @param bank_ref The bank to precharge 405 * @param pre_at Time when the precharge takes place 406 * @param trace Is this an auto precharge then do not add to trace 407 */ 408 void prechargeBank(Bank& bank_ref, Tick pre_at, bool trace = true); 409 410 /** 411 * Used for debugging to observe the contents of the queues. 412 */ 413 void printQs() const; 414 415 /** 416 * The controller's main read and write queues 417 */ 418 std::deque<DRAMPacket*> readQueue; 419 std::deque<DRAMPacket*> writeQueue; 420 421 /** 422 * Response queue where read packets wait after we're done working 423 * with them, but it's not time to send the response yet. The 424 * responses are stored seperately mostly to keep the code clean 425 * and help with events scheduling. For all logical purposes such 426 * as sizing the read queue, this and the main read queue need to 427 * be added together. 428 */ 429 std::deque<DRAMPacket*> respQueue; 430 431 /** 432 * If we need to drain, keep the drain manager around until we're 433 * done here. 434 */ 435 DrainManager *drainManager; 436 437 /** 438 * Multi-dimensional vector of banks, first dimension is ranks, 439 * second is bank 440 */ 441 std::vector<std::vector<Bank> > banks; 442 443 /** 444 * The following are basic design parameters of the memory 445 * controller, and are initialized based on parameter values. 446 * The rowsPerBank is determined based on the capacity, number of 447 * ranks and banks, the burst size, and the row buffer size. 448 */ 449 const uint32_t deviceBusWidth; 450 const uint32_t burstLength; 451 const uint32_t deviceRowBufferSize; 452 const uint32_t devicesPerRank; 453 const uint32_t burstSize; 454 const uint32_t rowBufferSize; 455 const uint32_t columnsPerRowBuffer; 456 const uint32_t columnsPerStripe; 457 const uint32_t ranksPerChannel; 458 const uint32_t banksPerRank; 459 const uint32_t channels; 460 uint32_t rowsPerBank; 461 const uint32_t readBufferSize; 462 const uint32_t writeBufferSize; 463 const uint32_t writeHighThreshold; 464 const uint32_t writeLowThreshold; 465 const uint32_t minWritesPerSwitch; 466 uint32_t writesThisTime; 467 uint32_t readsThisTime; 468 469 /** 470 * Basic memory timing parameters initialized based on parameter 471 * values. 472 */ 473 const Tick M5_CLASS_VAR_USED tCK; 474 const Tick tWTR; 475 const Tick tRTW; 476 const Tick tBURST; 477 const Tick tRCD; 478 const Tick tCL; 479 const Tick tRP; 480 const Tick tRAS; 481 const Tick tWR; 482 const Tick tRTP; 483 const Tick tRFC; 484 const Tick tREFI; 485 const Tick tRRD; 486 const Tick tXAW; 487 const uint32_t activationLimit; 488 489 /** 490 * Memory controller configuration initialized based on parameter 491 * values. 492 */ 493 Enums::MemSched memSchedPolicy; 494 Enums::AddrMap addrMapping; 495 Enums::PageManage pageMgmt; 496 497 /** 498 * Max column accesses (read and write) per row, before forefully 499 * closing it. 500 */ 501 const uint32_t maxAccessesPerRow; 502 503 /** 504 * Pipeline latency of the controller frontend. The frontend 505 * contribution is added to writes (that complete when they are in 506 * the write buffer) and reads that are serviced the write buffer. 507 */ 508 const Tick frontendLatency; 509 510 /** 511 * Pipeline latency of the backend and PHY. Along with the 512 * frontend contribution, this latency is added to reads serviced 513 * by the DRAM. 514 */ 515 const Tick backendLatency; 516 517 /** 518 * Till when has the main data bus been spoken for already? 519 */ 520 Tick busBusyUntil; 521 522 /** 523 * Keep track of when a refresh is due. 524 */ 525 Tick refreshDueAt; 526 527 /** 528 * The refresh state is used to control the progress of the 529 * refresh scheduling. When normal operation is in progress the 530 * refresh state is idle. From there, it progresses to the refresh 531 * drain state once tREFI has passed. The refresh drain state 532 * captures the DRAM row active state, as it will stay there until 533 * all ongoing accesses complete. Thereafter all banks are 534 * precharged, and lastly, the DRAM is refreshed. 535 */ 536 enum RefreshState { 537 REF_IDLE = 0, 538 REF_DRAIN, 539 REF_PRE, 540 REF_RUN 541 }; 542 543 RefreshState refreshState; 544 545 /** 546 * The power state captures the different operational states of 547 * the DRAM and interacts with the bus read/write state machine, 548 * and the refresh state machine. In the idle state all banks are 549 * precharged. From there we either go to an auto refresh (as 550 * determined by the refresh state machine), or to a precharge 551 * power down mode. From idle the memory can also go to the active 552 * state (with one or more banks active), and in turn from there 553 * to active power down. At the moment we do not capture the deep 554 * power down and self-refresh state. 555 */ 556 enum PowerState { 557 PWR_IDLE = 0, 558 PWR_REF, 559 PWR_PRE_PDN, 560 PWR_ACT, 561 PWR_ACT_PDN 562 }; 563 564 /** 565 * Since we are taking decisions out of order, we need to keep 566 * track of what power transition is happening at what time, such 567 * that we can go back in time and change history. For example, if 568 * we precharge all banks and schedule going to the idle state, we 569 * might at a later point decide to activate a bank before the 570 * transition to idle would have taken place. 571 */ 572 PowerState pwrStateTrans; 573 574 /** 575 * Current power state. 576 */ 577 PowerState pwrState; 578 579 /** 580 * Schedule a power state transition in the future, and 581 * potentially override an already scheduled transition. 582 * 583 * @param pwr_state Power state to transition to 584 * @param tick Tick when transition should take place 585 */ 586 void schedulePowerEvent(PowerState pwr_state, Tick tick); 587 588 Tick prevArrival; 589 590 /** 591 * The soonest you have to start thinking about the next request 592 * is the longest access time that can occur before 593 * busBusyUntil. Assuming you need to precharge, open a new row, 594 * and access, it is tRP + tRCD + tCL. 595 */ 596 Tick nextReqTime; 597 598 // All statistics that the model needs to capture 599 Stats::Scalar readReqs; 600 Stats::Scalar writeReqs; 601 Stats::Scalar readBursts; 602 Stats::Scalar writeBursts; 603 Stats::Scalar bytesReadDRAM; 604 Stats::Scalar bytesReadWrQ; 605 Stats::Scalar bytesWritten; 606 Stats::Scalar bytesReadSys; 607 Stats::Scalar bytesWrittenSys; 608 Stats::Scalar servicedByWrQ; 609 Stats::Scalar mergedWrBursts; 610 Stats::Scalar neitherReadNorWrite; 611 Stats::Vector perBankRdBursts; 612 Stats::Vector perBankWrBursts; 613 Stats::Scalar numRdRetry; 614 Stats::Scalar numWrRetry; 615 Stats::Scalar totGap; 616 Stats::Vector readPktSize; 617 Stats::Vector writePktSize; 618 Stats::Vector rdQLenPdf; 619 Stats::Vector wrQLenPdf; 620 Stats::Histogram bytesPerActivate; 621 Stats::Histogram rdPerTurnAround; 622 Stats::Histogram wrPerTurnAround; 623 624 // Latencies summed over all requests 625 Stats::Scalar totQLat; 626 Stats::Scalar totMemAccLat; 627 Stats::Scalar totBusLat; 628 629 // Average latencies per request 630 Stats::Formula avgQLat; 631 Stats::Formula avgBusLat; 632 Stats::Formula avgMemAccLat; 633 634 // Average bandwidth 635 Stats::Formula avgRdBW; 636 Stats::Formula avgWrBW; 637 Stats::Formula avgRdBWSys; 638 Stats::Formula avgWrBWSys; 639 Stats::Formula peakBW; 640 Stats::Formula busUtil; 641 Stats::Formula busUtilRead; 642 Stats::Formula busUtilWrite; 643 644 // Average queue lengths 645 Stats::Average avgRdQLen; 646 Stats::Average avgWrQLen; 647 648 // Row hit count and rate 649 Stats::Scalar readRowHits; 650 Stats::Scalar writeRowHits; 651 Stats::Formula readRowHitRate; 652 Stats::Formula writeRowHitRate; 653 Stats::Formula avgGap; 654 655 // DRAM Power Calculation 656 Stats::Formula pageHitRate; 657 Stats::Vector pwrStateTime; 658 659 // Track when we transitioned to the current power state 660 Tick pwrStateTick; 661 662 // To track number of banks which are currently active 663 unsigned int numBanksActive; 664 665 /** @todo this is a temporary workaround until the 4-phase code is 666 * committed. upstream caches needs this packet until true is returned, so 667 * hold onto it for deletion until a subsequent call 668 */ 669 std::vector<PacketPtr> pendingDelete; 670 671 public: 672 673 void regStats(); 674 675 DRAMCtrl(const DRAMCtrlParams* p); 676 677 unsigned int drain(DrainManager* dm); 678 679 virtual BaseSlavePort& getSlavePort(const std::string& if_name, 680 PortID idx = InvalidPortID); 681 682 virtual void init(); 683 virtual void startup(); 684 685 protected: 686 687 Tick recvAtomic(PacketPtr pkt); 688 void recvFunctional(PacketPtr pkt); 689 bool recvTimingReq(PacketPtr pkt); 690 691}; 692 693#endif //__MEM_DRAM_CTRL_HH__
| 84 */ 85class DRAMCtrl : public AbstractMemory 86{ 87 88 private: 89 90 // For now, make use of a queued slave port to avoid dealing with 91 // flow control for the responses being sent back 92 class MemoryPort : public QueuedSlavePort 93 { 94 95 SlavePacketQueue queue; 96 DRAMCtrl& memory; 97 98 public: 99 100 MemoryPort(const std::string& name, DRAMCtrl& _memory); 101 102 protected: 103 104 Tick recvAtomic(PacketPtr pkt); 105 106 void recvFunctional(PacketPtr pkt); 107 108 bool recvTimingReq(PacketPtr); 109 110 virtual AddrRangeList getAddrRanges() const; 111 112 }; 113 114 /** 115 * Our incoming port, for a multi-ported controller add a crossbar 116 * in front of it 117 */ 118 MemoryPort port; 119 120 /** 121 * Remember if we have to retry a request when available. 122 */ 123 bool retryRdReq; 124 bool retryWrReq; 125 126 /** 127 * Bus state used to control the read/write switching and drive 128 * the scheduling of the next request. 129 */ 130 enum BusState { 131 READ = 0, 132 READ_TO_WRITE, 133 WRITE, 134 WRITE_TO_READ 135 }; 136 137 BusState busState; 138 139 /** List to keep track of activate ticks */ 140 std::vector<std::deque<Tick>> actTicks; 141 142 /** 143 * A basic class to track the bank state, i.e. what row is 144 * currently open (if any), when is the bank free to accept a new 145 * column (read/write) command, when can it be precharged, and 146 * when can it be activated. 147 * 148 * The bank also keeps track of how many bytes have been accessed 149 * in the open row since it was opened. 150 */ 151 class Bank 152 { 153 154 public: 155 156 static const uint32_t NO_ROW = -1; 157 158 uint32_t openRow; 159 uint8_t rank; 160 uint8_t bank; 161 162 Tick colAllowedAt; 163 Tick preAllowedAt; 164 Tick actAllowedAt; 165 166 uint32_t rowAccesses; 167 uint32_t bytesAccessed; 168 169 Bank() : 170 openRow(NO_ROW), rank(0), bank(0), 171 colAllowedAt(0), preAllowedAt(0), actAllowedAt(0), 172 rowAccesses(0), bytesAccessed(0) 173 { } 174 }; 175 176 /** 177 * A burst helper helps organize and manage a packet that is larger than 178 * the DRAM burst size. A system packet that is larger than the burst size 179 * is split into multiple DRAM packets and all those DRAM packets point to 180 * a single burst helper such that we know when the whole packet is served. 181 */ 182 class BurstHelper { 183 184 public: 185 186 /** Number of DRAM bursts requred for a system packet **/ 187 const unsigned int burstCount; 188 189 /** Number of DRAM bursts serviced so far for a system packet **/ 190 unsigned int burstsServiced; 191 192 BurstHelper(unsigned int _burstCount) 193 : burstCount(_burstCount), burstsServiced(0) 194 { } 195 }; 196 197 /** 198 * A DRAM packet stores packets along with the timestamp of when 199 * the packet entered the queue, and also the decoded address. 200 */ 201 class DRAMPacket { 202 203 public: 204 205 /** When did request enter the controller */ 206 const Tick entryTime; 207 208 /** When will request leave the controller */ 209 Tick readyTime; 210 211 /** This comes from the outside world */ 212 const PacketPtr pkt; 213 214 const bool isRead; 215 216 /** Will be populated by address decoder */ 217 const uint8_t rank; 218 const uint8_t bank; 219 const uint32_t row; 220 221 /** 222 * Bank id is calculated considering banks in all the ranks 223 * eg: 2 ranks each with 8 banks, then bankId = 0 --> rank0, bank0 and 224 * bankId = 8 --> rank1, bank0 225 */ 226 const uint16_t bankId; 227 228 /** 229 * The starting address of the DRAM packet. 230 * This address could be unaligned to burst size boundaries. The 231 * reason is to keep the address offset so we can accurately check 232 * incoming read packets with packets in the write queue. 233 */ 234 Addr addr; 235 236 /** 237 * The size of this dram packet in bytes 238 * It is always equal or smaller than DRAM burst size 239 */ 240 unsigned int size; 241 242 /** 243 * A pointer to the BurstHelper if this DRAMPacket is a split packet 244 * If not a split packet (common case), this is set to NULL 245 */ 246 BurstHelper* burstHelper; 247 Bank& bankRef; 248 249 DRAMPacket(PacketPtr _pkt, bool is_read, uint8_t _rank, uint8_t _bank, 250 uint32_t _row, uint16_t bank_id, Addr _addr, 251 unsigned int _size, Bank& bank_ref) 252 : entryTime(curTick()), readyTime(curTick()), 253 pkt(_pkt), isRead(is_read), rank(_rank), bank(_bank), row(_row), 254 bankId(bank_id), addr(_addr), size(_size), burstHelper(NULL), 255 bankRef(bank_ref) 256 { } 257 258 }; 259 260 /** 261 * Bunch of things requires to setup "events" in gem5 262 * When event "respondEvent" occurs for example, the method 263 * processRespondEvent is called; no parameters are allowed 264 * in these methods 265 */ 266 void processNextReqEvent(); 267 EventWrapper<DRAMCtrl,&DRAMCtrl::processNextReqEvent> nextReqEvent; 268 269 void processRespondEvent(); 270 EventWrapper<DRAMCtrl, &DRAMCtrl::processRespondEvent> respondEvent; 271 272 void processActivateEvent(); 273 EventWrapper<DRAMCtrl, &DRAMCtrl::processActivateEvent> activateEvent; 274 275 void processPrechargeEvent(); 276 EventWrapper<DRAMCtrl, &DRAMCtrl::processPrechargeEvent> prechargeEvent; 277 278 void processRefreshEvent(); 279 EventWrapper<DRAMCtrl, &DRAMCtrl::processRefreshEvent> refreshEvent; 280 281 void processPowerEvent(); 282 EventWrapper<DRAMCtrl,&DRAMCtrl::processPowerEvent> powerEvent; 283 284 /** 285 * Check if the read queue has room for more entries 286 * 287 * @param pktCount The number of entries needed in the read queue 288 * @return true if read queue is full, false otherwise 289 */ 290 bool readQueueFull(unsigned int pktCount) const; 291 292 /** 293 * Check if the write queue has room for more entries 294 * 295 * @param pktCount The number of entries needed in the write queue 296 * @return true if write queue is full, false otherwise 297 */ 298 bool writeQueueFull(unsigned int pktCount) const; 299 300 /** 301 * When a new read comes in, first check if the write q has a 302 * pending request to the same address.\ If not, decode the 303 * address to populate rank/bank/row, create one or mutliple 304 * "dram_pkt", and push them to the back of the read queue.\ 305 * If this is the only 306 * read request in the system, schedule an event to start 307 * servicing it. 308 * 309 * @param pkt The request packet from the outside world 310 * @param pktCount The number of DRAM bursts the pkt 311 * translate to. If pkt size is larger then one full burst, 312 * then pktCount is greater than one. 313 */ 314 void addToReadQueue(PacketPtr pkt, unsigned int pktCount); 315 316 /** 317 * Decode the incoming pkt, create a dram_pkt and push to the 318 * back of the write queue. \If the write q length is more than 319 * the threshold specified by the user, ie the queue is beginning 320 * to get full, stop reads, and start draining writes. 321 * 322 * @param pkt The request packet from the outside world 323 * @param pktCount The number of DRAM bursts the pkt 324 * translate to. If pkt size is larger then one full burst, 325 * then pktCount is greater than one. 326 */ 327 void addToWriteQueue(PacketPtr pkt, unsigned int pktCount); 328 329 /** 330 * Actually do the DRAM access - figure out the latency it 331 * will take to service the req based on bank state, channel state etc 332 * and then update those states to account for this request.\ Based 333 * on this, update the packet's "readyTime" and move it to the 334 * response q from where it will eventually go back to the outside 335 * world. 336 * 337 * @param pkt The DRAM packet created from the outside world pkt 338 */ 339 void doDRAMAccess(DRAMPacket* dram_pkt); 340 341 /** 342 * When a packet reaches its "readyTime" in the response Q, 343 * use the "access()" method in AbstractMemory to actually 344 * create the response packet, and send it back to the outside 345 * world requestor. 346 * 347 * @param pkt The packet from the outside world 348 * @param static_latency Static latency to add before sending the packet 349 */ 350 void accessAndRespond(PacketPtr pkt, Tick static_latency); 351 352 /** 353 * Address decoder to figure out physical mapping onto ranks, 354 * banks, and rows. This function is called multiple times on the same 355 * system packet if the pakcet is larger than burst of the memory. The 356 * dramPktAddr is used for the offset within the packet. 357 * 358 * @param pkt The packet from the outside world 359 * @param dramPktAddr The starting address of the DRAM packet 360 * @param size The size of the DRAM packet in bytes 361 * @param isRead Is the request for a read or a write to DRAM 362 * @return A DRAMPacket pointer with the decoded information 363 */ 364 DRAMPacket* decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned int size, 365 bool isRead); 366 367 /** 368 * The memory schduler/arbiter - picks which request needs to 369 * go next, based on the specified policy such as FCFS or FR-FCFS 370 * and moves it to the head of the queue. 371 */ 372 void chooseNext(std::deque<DRAMPacket*>& queue); 373 374 /** 375 * For FR-FCFS policy reorder the read/write queue depending on row buffer 376 * hits and earliest banks available in DRAM 377 */ 378 void reorderQueue(std::deque<DRAMPacket*>& queue); 379 380 /** 381 * Find which are the earliest banks ready to issue an activate 382 * for the enqueued requests. Assumes maximum of 64 banks per DIMM 383 * 384 * @param Queued requests to consider 385 * @return One-hot encoded mask of bank indices 386 */ 387 uint64_t minBankActAt(const std::deque<DRAMPacket*>& queue) const; 388 389 /** 390 * Keep track of when row activations happen, in order to enforce 391 * the maximum number of activations in the activation window. The 392 * method updates the time that the banks become available based 393 * on the current limits. 394 * 395 * @param bank Reference to the bank 396 * @param act_tick Time when the activation takes place 397 * @param row Index of the row 398 */ 399 void activateBank(Bank& bank, Tick act_tick, uint32_t row); 400 401 /** 402 * Precharge a given bank and also update when the precharge is 403 * done. This will also deal with any stats related to the 404 * accesses to the open page. 405 * 406 * @param bank_ref The bank to precharge 407 * @param pre_at Time when the precharge takes place 408 * @param trace Is this an auto precharge then do not add to trace 409 */ 410 void prechargeBank(Bank& bank_ref, Tick pre_at, bool trace = true); 411 412 /** 413 * Used for debugging to observe the contents of the queues. 414 */ 415 void printQs() const; 416 417 /** 418 * The controller's main read and write queues 419 */ 420 std::deque<DRAMPacket*> readQueue; 421 std::deque<DRAMPacket*> writeQueue; 422 423 /** 424 * Response queue where read packets wait after we're done working 425 * with them, but it's not time to send the response yet. The 426 * responses are stored seperately mostly to keep the code clean 427 * and help with events scheduling. For all logical purposes such 428 * as sizing the read queue, this and the main read queue need to 429 * be added together. 430 */ 431 std::deque<DRAMPacket*> respQueue; 432 433 /** 434 * If we need to drain, keep the drain manager around until we're 435 * done here. 436 */ 437 DrainManager *drainManager; 438 439 /** 440 * Multi-dimensional vector of banks, first dimension is ranks, 441 * second is bank 442 */ 443 std::vector<std::vector<Bank> > banks; 444 445 /** 446 * The following are basic design parameters of the memory 447 * controller, and are initialized based on parameter values. 448 * The rowsPerBank is determined based on the capacity, number of 449 * ranks and banks, the burst size, and the row buffer size. 450 */ 451 const uint32_t deviceBusWidth; 452 const uint32_t burstLength; 453 const uint32_t deviceRowBufferSize; 454 const uint32_t devicesPerRank; 455 const uint32_t burstSize; 456 const uint32_t rowBufferSize; 457 const uint32_t columnsPerRowBuffer; 458 const uint32_t columnsPerStripe; 459 const uint32_t ranksPerChannel; 460 const uint32_t banksPerRank; 461 const uint32_t channels; 462 uint32_t rowsPerBank; 463 const uint32_t readBufferSize; 464 const uint32_t writeBufferSize; 465 const uint32_t writeHighThreshold; 466 const uint32_t writeLowThreshold; 467 const uint32_t minWritesPerSwitch; 468 uint32_t writesThisTime; 469 uint32_t readsThisTime; 470 471 /** 472 * Basic memory timing parameters initialized based on parameter 473 * values. 474 */ 475 const Tick M5_CLASS_VAR_USED tCK; 476 const Tick tWTR; 477 const Tick tRTW; 478 const Tick tBURST; 479 const Tick tRCD; 480 const Tick tCL; 481 const Tick tRP; 482 const Tick tRAS; 483 const Tick tWR; 484 const Tick tRTP; 485 const Tick tRFC; 486 const Tick tREFI; 487 const Tick tRRD; 488 const Tick tXAW; 489 const uint32_t activationLimit; 490 491 /** 492 * Memory controller configuration initialized based on parameter 493 * values. 494 */ 495 Enums::MemSched memSchedPolicy; 496 Enums::AddrMap addrMapping; 497 Enums::PageManage pageMgmt; 498 499 /** 500 * Max column accesses (read and write) per row, before forefully 501 * closing it. 502 */ 503 const uint32_t maxAccessesPerRow; 504 505 /** 506 * Pipeline latency of the controller frontend. The frontend 507 * contribution is added to writes (that complete when they are in 508 * the write buffer) and reads that are serviced the write buffer. 509 */ 510 const Tick frontendLatency; 511 512 /** 513 * Pipeline latency of the backend and PHY. Along with the 514 * frontend contribution, this latency is added to reads serviced 515 * by the DRAM. 516 */ 517 const Tick backendLatency; 518 519 /** 520 * Till when has the main data bus been spoken for already? 521 */ 522 Tick busBusyUntil; 523 524 /** 525 * Keep track of when a refresh is due. 526 */ 527 Tick refreshDueAt; 528 529 /** 530 * The refresh state is used to control the progress of the 531 * refresh scheduling. When normal operation is in progress the 532 * refresh state is idle. From there, it progresses to the refresh 533 * drain state once tREFI has passed. The refresh drain state 534 * captures the DRAM row active state, as it will stay there until 535 * all ongoing accesses complete. Thereafter all banks are 536 * precharged, and lastly, the DRAM is refreshed. 537 */ 538 enum RefreshState { 539 REF_IDLE = 0, 540 REF_DRAIN, 541 REF_PRE, 542 REF_RUN 543 }; 544 545 RefreshState refreshState; 546 547 /** 548 * The power state captures the different operational states of 549 * the DRAM and interacts with the bus read/write state machine, 550 * and the refresh state machine. In the idle state all banks are 551 * precharged. From there we either go to an auto refresh (as 552 * determined by the refresh state machine), or to a precharge 553 * power down mode. From idle the memory can also go to the active 554 * state (with one or more banks active), and in turn from there 555 * to active power down. At the moment we do not capture the deep 556 * power down and self-refresh state. 557 */ 558 enum PowerState { 559 PWR_IDLE = 0, 560 PWR_REF, 561 PWR_PRE_PDN, 562 PWR_ACT, 563 PWR_ACT_PDN 564 }; 565 566 /** 567 * Since we are taking decisions out of order, we need to keep 568 * track of what power transition is happening at what time, such 569 * that we can go back in time and change history. For example, if 570 * we precharge all banks and schedule going to the idle state, we 571 * might at a later point decide to activate a bank before the 572 * transition to idle would have taken place. 573 */ 574 PowerState pwrStateTrans; 575 576 /** 577 * Current power state. 578 */ 579 PowerState pwrState; 580 581 /** 582 * Schedule a power state transition in the future, and 583 * potentially override an already scheduled transition. 584 * 585 * @param pwr_state Power state to transition to 586 * @param tick Tick when transition should take place 587 */ 588 void schedulePowerEvent(PowerState pwr_state, Tick tick); 589 590 Tick prevArrival; 591 592 /** 593 * The soonest you have to start thinking about the next request 594 * is the longest access time that can occur before 595 * busBusyUntil. Assuming you need to precharge, open a new row, 596 * and access, it is tRP + tRCD + tCL. 597 */ 598 Tick nextReqTime; 599 600 // All statistics that the model needs to capture 601 Stats::Scalar readReqs; 602 Stats::Scalar writeReqs; 603 Stats::Scalar readBursts; 604 Stats::Scalar writeBursts; 605 Stats::Scalar bytesReadDRAM; 606 Stats::Scalar bytesReadWrQ; 607 Stats::Scalar bytesWritten; 608 Stats::Scalar bytesReadSys; 609 Stats::Scalar bytesWrittenSys; 610 Stats::Scalar servicedByWrQ; 611 Stats::Scalar mergedWrBursts; 612 Stats::Scalar neitherReadNorWrite; 613 Stats::Vector perBankRdBursts; 614 Stats::Vector perBankWrBursts; 615 Stats::Scalar numRdRetry; 616 Stats::Scalar numWrRetry; 617 Stats::Scalar totGap; 618 Stats::Vector readPktSize; 619 Stats::Vector writePktSize; 620 Stats::Vector rdQLenPdf; 621 Stats::Vector wrQLenPdf; 622 Stats::Histogram bytesPerActivate; 623 Stats::Histogram rdPerTurnAround; 624 Stats::Histogram wrPerTurnAround; 625 626 // Latencies summed over all requests 627 Stats::Scalar totQLat; 628 Stats::Scalar totMemAccLat; 629 Stats::Scalar totBusLat; 630 631 // Average latencies per request 632 Stats::Formula avgQLat; 633 Stats::Formula avgBusLat; 634 Stats::Formula avgMemAccLat; 635 636 // Average bandwidth 637 Stats::Formula avgRdBW; 638 Stats::Formula avgWrBW; 639 Stats::Formula avgRdBWSys; 640 Stats::Formula avgWrBWSys; 641 Stats::Formula peakBW; 642 Stats::Formula busUtil; 643 Stats::Formula busUtilRead; 644 Stats::Formula busUtilWrite; 645 646 // Average queue lengths 647 Stats::Average avgRdQLen; 648 Stats::Average avgWrQLen; 649 650 // Row hit count and rate 651 Stats::Scalar readRowHits; 652 Stats::Scalar writeRowHits; 653 Stats::Formula readRowHitRate; 654 Stats::Formula writeRowHitRate; 655 Stats::Formula avgGap; 656 657 // DRAM Power Calculation 658 Stats::Formula pageHitRate; 659 Stats::Vector pwrStateTime; 660 661 // Track when we transitioned to the current power state 662 Tick pwrStateTick; 663 664 // To track number of banks which are currently active 665 unsigned int numBanksActive; 666 667 /** @todo this is a temporary workaround until the 4-phase code is 668 * committed. upstream caches needs this packet until true is returned, so 669 * hold onto it for deletion until a subsequent call 670 */ 671 std::vector<PacketPtr> pendingDelete; 672 673 public: 674 675 void regStats(); 676 677 DRAMCtrl(const DRAMCtrlParams* p); 678 679 unsigned int drain(DrainManager* dm); 680 681 virtual BaseSlavePort& getSlavePort(const std::string& if_name, 682 PortID idx = InvalidPortID); 683 684 virtual void init(); 685 virtual void startup(); 686 687 protected: 688 689 Tick recvAtomic(PacketPtr pkt); 690 void recvFunctional(PacketPtr pkt); 691 bool recvTimingReq(PacketPtr pkt); 692 693}; 694 695#endif //__MEM_DRAM_CTRL_HH__
|