dram_ctrl.hh revision 9967
1/* 2 * Copyright (c) 2012 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2013 Amin Farmahini-Farahani 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Andreas Hansson 41 * Ani Udipi 42 * Neha Agarwal 43 */ 44 45/** 46 * @file 47 * SimpleDRAM declaration 48 */ 49 50#ifndef __MEM_SIMPLE_DRAM_HH__ 51#define __MEM_SIMPLE_DRAM_HH__ 52 53#include <deque> 54 55#include "base/statistics.hh" 56#include "enums/AddrMap.hh" 57#include "enums/MemSched.hh" 58#include "enums/PageManage.hh" 59#include "mem/abstract_mem.hh" 60#include "mem/qport.hh" 61#include "params/SimpleDRAM.hh" 62#include "sim/eventq.hh" 63 64/** 65 * The simple DRAM is a basic single-channel memory controller aiming 66 * to mimic a high-level DRAM controller and the most important timing 67 * constraints associated with the DRAM. The focus is really on 68 * modelling the impact on the system rather than the DRAM itself, 69 * hence the focus is on the controller model and not on the 70 * memory. By adhering to the correct timing constraints, ultimately 71 * there is no need for a memory model in addition to the controller 72 * model. 73 * 74 * As a basic design principle, this controller is not cycle callable, 75 * but instead uses events to decide when new decisions can be made, 76 * when resources become available, when things are to be considered 77 * done, and when to send things back. Through these simple 78 * principles, we achieve a performant model that is not 79 * cycle-accurate, but enables us to evaluate the system impact of a 80 * wide range of memory technologies, and also collect statistics 81 * about the use of the memory. 82 */ 83class SimpleDRAM : public AbstractMemory 84{ 85 86 private: 87 88 // For now, make use of a queued slave port to avoid dealing with 89 // flow control for the responses being sent back 90 class MemoryPort : public QueuedSlavePort 91 { 92 93 SlavePacketQueue queue; 94 SimpleDRAM& memory; 95 96 public: 97 98 MemoryPort(const std::string& name, SimpleDRAM& _memory); 99 100 protected: 101 102 Tick recvAtomic(PacketPtr pkt); 103 104 void recvFunctional(PacketPtr pkt); 105 106 bool recvTimingReq(PacketPtr); 107 108 virtual AddrRangeList getAddrRanges() const; 109 110 }; 111 112 /** 113 * Our incoming port, for a multi-ported controller add a crossbar 114 * in front of it 115 */ 116 MemoryPort port; 117 118 /** 119 * Remember if we have to retry a request when available. 120 */ 121 bool retryRdReq; 122 bool retryWrReq; 123 124 /** 125 * Remember that a row buffer hit occured 126 */ 127 bool rowHitFlag; 128 129 /** 130 * Use this flag to shutoff reads, i.e. do not schedule any reads 131 * beyond those already done so that we can turn the bus around 132 * and do a few writes, or refresh, or whatever 133 */ 134 bool stopReads; 135 136 /** List to keep track of activate ticks */ 137 std::deque<Tick> actTicks; 138 139 /** 140 * A basic class to track the bank state indirectly via times 141 * "freeAt" and "tRASDoneAt" and what page is currently open. The 142 * bank also keeps track of how many bytes have been accessed in 143 * the open row since it was opened. 144 */ 145 class Bank 146 { 147 148 public: 149 150 static const uint32_t INVALID_ROW = -1; 151 152 uint32_t openRow; 153 154 Tick freeAt; 155 Tick tRASDoneAt; 156 157 uint32_t bytesAccessed; 158 159 Bank() : 160 openRow(INVALID_ROW), freeAt(0), tRASDoneAt(0), bytesAccessed(0) 161 { } 162 }; 163 164 /** 165 * A burst helper helps organize and manage a packet that is larger than 166 * the DRAM burst size. A system packet that is larger than the burst size 167 * is split into multiple DRAM packets and all those DRAM packets point to 168 * a single burst helper such that we know when the whole packet is served. 169 */ 170 class BurstHelper { 171 172 public: 173 174 /** Number of DRAM bursts requred for a system packet **/ 175 const unsigned int burstCount; 176 177 /** Number of DRAM bursts serviced so far for a system packet **/ 178 unsigned int burstsServiced; 179 180 BurstHelper(unsigned int _burstCount) 181 : burstCount(_burstCount), burstsServiced(0) 182 { } 183 }; 184 185 /** 186 * A DRAM packet stores packets along with the timestamp of when 187 * the packet entered the queue, and also the decoded address. 188 */ 189 class DRAMPacket { 190 191 public: 192 193 /** When did request enter the controller */ 194 const Tick entryTime; 195 196 /** When will request leave the controller */ 197 Tick readyTime; 198 199 /** This comes from the outside world */ 200 const PacketPtr pkt; 201 202 const bool isRead; 203 204 /** Will be populated by address decoder */ 205 const uint8_t rank; 206 const uint8_t bank; 207 const uint16_t row; 208 209 /** 210 * Bank id is calculated considering banks in all the ranks 211 * eg: 2 ranks each with 8 banks, then bankId = 0 --> rank0, bank0 and 212 * bankId = 8 --> rank1, bank0 213 */ 214 const uint16_t bankId; 215 216 /** 217 * The starting address of the DRAM packet. 218 * This address could be unaligned to burst size boundaries. The 219 * reason is to keep the address offset so we can accurately check 220 * incoming read packets with packets in the write queue. 221 */ 222 Addr addr; 223 224 /** 225 * The size of this dram packet in bytes 226 * It is always equal or smaller than DRAM burst size 227 */ 228 unsigned int size; 229 230 /** 231 * A pointer to the BurstHelper if this DRAMPacket is a split packet 232 * If not a split packet (common case), this is set to NULL 233 */ 234 BurstHelper* burstHelper; 235 Bank& bankRef; 236 237 DRAMPacket(PacketPtr _pkt, bool is_read, uint8_t _rank, uint8_t _bank, 238 uint16_t _row, uint16_t bank_id, Addr _addr, 239 unsigned int _size, Bank& bank_ref) 240 : entryTime(curTick()), readyTime(curTick()), 241 pkt(_pkt), isRead(is_read), rank(_rank), bank(_bank), row(_row), 242 bankId(bank_id), addr(_addr), size(_size), burstHelper(NULL), 243 bankRef(bank_ref) 244 { } 245 246 }; 247 248 /** 249 * Bunch of things requires to setup "events" in gem5 250 * When event "writeEvent" occurs for example, the method 251 * processWriteEvent is called; no parameters are allowed 252 * in these methods 253 */ 254 void processWriteEvent(); 255 EventWrapper<SimpleDRAM, &SimpleDRAM::processWriteEvent> writeEvent; 256 257 void processRespondEvent(); 258 EventWrapper<SimpleDRAM, &SimpleDRAM::processRespondEvent> respondEvent; 259 260 void processRefreshEvent(); 261 EventWrapper<SimpleDRAM, &SimpleDRAM::processRefreshEvent> refreshEvent; 262 263 void processNextReqEvent(); 264 EventWrapper<SimpleDRAM,&SimpleDRAM::processNextReqEvent> nextReqEvent; 265 266 267 /** 268 * Check if the read queue has room for more entries 269 * 270 * @param pktCount The number of entries needed in the read queue 271 * @return true if read queue is full, false otherwise 272 */ 273 bool readQueueFull(unsigned int pktCount) const; 274 275 /** 276 * Check if the write queue has room for more entries 277 * 278 * @param pktCount The number of entries needed in the write queue 279 * @return true if write queue is full, false otherwise 280 */ 281 bool writeQueueFull(unsigned int pktCount) const; 282 283 /** 284 * When a new read comes in, first check if the write q has a 285 * pending request to the same address.\ If not, decode the 286 * address to populate rank/bank/row, create one or mutliple 287 * "dram_pkt", and push them to the back of the read queue.\ 288 * If this is the only 289 * read request in the system, schedule an event to start 290 * servicing it. 291 * 292 * @param pkt The request packet from the outside world 293 * @param pktCount The number of DRAM bursts the pkt 294 * translate to. If pkt size is larger then one full burst, 295 * then pktCount is greater than one. 296 */ 297 void addToReadQueue(PacketPtr pkt, unsigned int pktCount); 298 299 /** 300 * Decode the incoming pkt, create a dram_pkt and push to the 301 * back of the write queue. \If the write q length is more than 302 * the threshold specified by the user, ie the queue is beginning 303 * to get full, stop reads, and start draining writes. 304 * 305 * @param pkt The request packet from the outside world 306 * @param pktCount The number of DRAM bursts the pkt 307 * translate to. If pkt size is larger then one full burst, 308 * then pktCount is greater than one. 309 */ 310 void addToWriteQueue(PacketPtr pkt, unsigned int pktCount); 311 312 /** 313 * Actually do the DRAM access - figure out the latency it 314 * will take to service the req based on bank state, channel state etc 315 * and then update those states to account for this request.\ Based 316 * on this, update the packet's "readyTime" and move it to the 317 * response q from where it will eventually go back to the outside 318 * world. 319 * 320 * @param pkt The DRAM packet created from the outside world pkt 321 */ 322 void doDRAMAccess(DRAMPacket* dram_pkt); 323 324 /** 325 * Check when the channel is free to turnaround, add turnaround 326 * delay and schedule a whole bunch of writes. 327 */ 328 void triggerWrites(); 329 330 /** 331 * When a packet reaches its "readyTime" in the response Q, 332 * use the "access()" method in AbstractMemory to actually 333 * create the response packet, and send it back to the outside 334 * world requestor. 335 * 336 * @param pkt The packet from the outside world 337 * @param static_latency Static latency to add before sending the packet 338 */ 339 void accessAndRespond(PacketPtr pkt, Tick static_latency); 340 341 /** 342 * Address decoder to figure out physical mapping onto ranks, 343 * banks, and rows. This function is called multiple times on the same 344 * system packet if the pakcet is larger than burst of the memory. The 345 * dramPktAddr is used for the offset within the packet. 346 * 347 * @param pkt The packet from the outside world 348 * @param dramPktAddr The starting address of the DRAM packet 349 * @param size The size of the DRAM packet in bytes 350 * @param isRead Is the request for a read or a write to DRAM 351 * @return A DRAMPacket pointer with the decoded information 352 */ 353 DRAMPacket* decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned int size, bool isRead); 354 355 /** 356 * The memory schduler/arbiter - picks which read request needs to 357 * go next, based on the specified policy such as FCFS or FR-FCFS 358 * and moves it to the head of the read queue. 359 * 360 * @return True if a request was chosen and false if queue is empty 361 */ 362 bool chooseNextRead(); 363 364 /** 365 * Calls chooseNextReq() to pick the right request, then calls 366 * doDRAMAccess on that request in order to actually service 367 * that request 368 */ 369 void scheduleNextReq(); 370 371 /** 372 *Looks at the state of the banks, channels, row buffer hits etc 373 * to estimate how long a request will take to complete. 374 * 375 * @param dram_pkt The request for which we want to estimate latency 376 * @param inTime The tick at which you want to probe the memory 377 * 378 * @return A pair of ticks, one indicating how many ticks *after* 379 * inTime the request require, and the other indicating how 380 * much of that was just the bank access time, ignoring the 381 * ticks spent simply waiting for resources to become free 382 */ 383 std::pair<Tick, Tick> estimateLatency(DRAMPacket* dram_pkt, Tick inTime); 384 385 /** 386 * Move the request at the head of the read queue to the response 387 * queue, sorting by readyTime.\ If it is the only packet in the 388 * response queue, schedule a respond event to send it back to the 389 * outside world 390 */ 391 void moveToRespQ(); 392 393 /** 394 * Scheduling policy within the write queue 395 */ 396 void chooseNextWrite(); 397 398 /** 399 * Looking at all banks, determine the moment in time when they 400 * are all free. 401 * 402 * @return The tick when all banks are free 403 */ 404 Tick maxBankFreeAt() const; 405 406 /** 407 * Find which are the earliest available banks for the enqueued 408 * requests. Assumes maximum of 64 banks per DIMM 409 * 410 * @param Queued requests to consider 411 * @return One-hot encoded mask of bank indices 412 */ 413 uint64_t minBankFreeAt(const std::deque<DRAMPacket*>& queue) const; 414 415 /** 416 * Keep track of when row activations happen, in order to enforce 417 * the maximum number of activations in the activation window. The 418 * method updates the time that the banks become available based 419 * on the current limits. 420 */ 421 void recordActivate(Tick act_tick); 422 423 void printParams() const; 424 void printQs() const; 425 426 /** 427 * The controller's main read and write queues 428 */ 429 std::deque<DRAMPacket*> readQueue; 430 std::deque<DRAMPacket*> writeQueue; 431 432 /** 433 * Response queue where read packets wait after we're done working 434 * with them, but it's not time to send the response yet. The 435 * responses are stored seperately mostly to keep the code clean 436 * and help with events scheduling. For all logical purposes such 437 * as sizing the read queue, this and the main read queue need to 438 * be added together. 439 */ 440 std::deque<DRAMPacket*> respQueue; 441 442 /** 443 * If we need to drain, keep the drain manager around until we're 444 * done here. 445 */ 446 DrainManager *drainManager; 447 448 /** 449 * Multi-dimensional vector of banks, first dimension is ranks, 450 * second is bank 451 */ 452 std::vector<std::vector<Bank> > banks; 453 454 /** 455 * The following are basic design parameters of the memory 456 * controller, and are initialized based on parameter values. 457 * The rowsPerBank is determined based on the capacity, number of 458 * ranks and banks, the burst size, and the row buffer size. 459 */ 460 const uint32_t deviceBusWidth; 461 const uint32_t burstLength; 462 const uint32_t deviceRowBufferSize; 463 const uint32_t devicesPerRank; 464 const uint32_t burstSize; 465 const uint32_t rowBufferSize; 466 const uint32_t ranksPerChannel; 467 const uint32_t banksPerRank; 468 const uint32_t channels; 469 uint32_t rowsPerBank; 470 uint32_t columnsPerRowBuffer; 471 const uint32_t readBufferSize; 472 const uint32_t writeBufferSize; 473 const double writeThresholdPerc; 474 uint32_t writeThreshold; 475 476 /** 477 * Basic memory timing parameters initialized based on parameter 478 * values. 479 */ 480 const Tick tWTR; 481 const Tick tBURST; 482 const Tick tRCD; 483 const Tick tCL; 484 const Tick tRP; 485 const Tick tRAS; 486 const Tick tRFC; 487 const Tick tREFI; 488 const Tick tXAW; 489 const uint32_t activationLimit; 490 491 /** 492 * Memory controller configuration initialized based on parameter 493 * values. 494 */ 495 Enums::MemSched memSchedPolicy; 496 Enums::AddrMap addrMapping; 497 Enums::PageManage pageMgmt; 498 499 /** 500 * Pipeline latency of the controller frontend. The frontend 501 * contribution is added to writes (that complete when they are in 502 * the write buffer) and reads that are serviced the write buffer. 503 */ 504 const Tick frontendLatency; 505 506 /** 507 * Pipeline latency of the backend and PHY. Along with the 508 * frontend contribution, this latency is added to reads serviced 509 * by the DRAM. 510 */ 511 const Tick backendLatency; 512 513 /** 514 * Till when has the main data bus been spoken for already? 515 */ 516 Tick busBusyUntil; 517 518 Tick writeStartTime; 519 Tick prevArrival; 520 int numReqs; 521 522 // All statistics that the model needs to capture 523 Stats::Scalar readReqs; 524 Stats::Scalar writeReqs; 525 Stats::Scalar readBursts; 526 Stats::Scalar writeBursts; 527 Stats::Scalar bytesRead; 528 Stats::Scalar bytesWritten; 529 Stats::Scalar bytesConsumedRd; 530 Stats::Scalar bytesConsumedWr; 531 Stats::Scalar servicedByWrQ; 532 Stats::Scalar neitherReadNorWrite; 533 Stats::Vector perBankRdReqs; 534 Stats::Vector perBankWrReqs; 535 Stats::Scalar numRdRetry; 536 Stats::Scalar numWrRetry; 537 Stats::Scalar totGap; 538 Stats::Vector readPktSize; 539 Stats::Vector writePktSize; 540 Stats::Vector rdQLenPdf; 541 Stats::Vector wrQLenPdf; 542 Stats::Histogram bytesPerActivate; 543 544 // Latencies summed over all requests 545 Stats::Scalar totQLat; 546 Stats::Scalar totMemAccLat; 547 Stats::Scalar totBusLat; 548 Stats::Scalar totBankLat; 549 550 // Average latencies per request 551 Stats::Formula avgQLat; 552 Stats::Formula avgBankLat; 553 Stats::Formula avgBusLat; 554 Stats::Formula avgMemAccLat; 555 556 // Average bandwidth 557 Stats::Formula avgRdBW; 558 Stats::Formula avgWrBW; 559 Stats::Formula avgConsumedRdBW; 560 Stats::Formula avgConsumedWrBW; 561 Stats::Formula peakBW; 562 Stats::Formula busUtil; 563 564 // Average queue lengths 565 Stats::Average avgRdQLen; 566 Stats::Average avgWrQLen; 567 568 // Row hit count and rate 569 Stats::Scalar readRowHits; 570 Stats::Scalar writeRowHits; 571 Stats::Formula readRowHitRate; 572 Stats::Formula writeRowHitRate; 573 Stats::Formula avgGap; 574 575 /** @todo this is a temporary workaround until the 4-phase code is 576 * committed. upstream caches needs this packet until true is returned, so 577 * hold onto it for deletion until a subsequent call 578 */ 579 std::vector<PacketPtr> pendingDelete; 580 581 public: 582 583 void regStats(); 584 585 SimpleDRAM(const SimpleDRAMParams* p); 586 587 unsigned int drain(DrainManager* dm); 588 589 virtual BaseSlavePort& getSlavePort(const std::string& if_name, 590 PortID idx = InvalidPortID); 591 592 virtual void init(); 593 virtual void startup(); 594 595 protected: 596 597 Tick recvAtomic(PacketPtr pkt); 598 void recvFunctional(PacketPtr pkt); 599 bool recvTimingReq(PacketPtr pkt); 600 601}; 602 603#endif //__MEM_SIMPLE_DRAM_HH__ 604