dram_ctrl.hh revision 10393:0fafa62b6c01
16145Snate@binkert.org/* 26145Snate@binkert.org * Copyright (c) 2012-2014 ARM Limited 36145Snate@binkert.org * All rights reserved 46145Snate@binkert.org * 56145Snate@binkert.org * The license below extends only to copyright in the software and shall 66145Snate@binkert.org * not be construed as granting a license to any other intellectual 76145Snate@binkert.org * property including but not limited to intellectual property relating 86145Snate@binkert.org * to a hardware implementation of the functionality of the software 96145Snate@binkert.org * licensed hereunder. You may use the software subject to the license 106145Snate@binkert.org * terms below provided that you ensure that this notice is replicated 116145Snate@binkert.org * unmodified and in its entirety in all distributions of the software, 126145Snate@binkert.org * modified or unmodified, in source code or in binary form. 136145Snate@binkert.org * 146145Snate@binkert.org * Copyright (c) 2013 Amin Farmahini-Farahani 156145Snate@binkert.org * All rights reserved. 166145Snate@binkert.org * 176145Snate@binkert.org * Redistribution and use in source and binary forms, with or without 186145Snate@binkert.org * modification, are permitted provided that the following conditions are 196145Snate@binkert.org * met: redistributions of source code must retain the above copyright 206145Snate@binkert.org * notice, this list of conditions and the following disclaimer; 216145Snate@binkert.org * redistributions in binary form must reproduce the above copyright 226145Snate@binkert.org * notice, this list of conditions and the following disclaimer in the 236145Snate@binkert.org * documentation and/or other materials provided with the distribution; 246145Snate@binkert.org * neither the name of the copyright holders nor the names of its 256145Snate@binkert.org * contributors may be used to endorse or promote products derived from 266145Snate@binkert.org * this software without specific prior written permission. 276145Snate@binkert.org * 286145Snate@binkert.org * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 296145Snate@binkert.org * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 306145Snate@binkert.org * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 316145Snate@binkert.org * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 326145Snate@binkert.org * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 336145Snate@binkert.org * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 346145Snate@binkert.org * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 356145Snate@binkert.org * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 366145Snate@binkert.org * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 376145Snate@binkert.org * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 386145Snate@binkert.org * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 396145Snate@binkert.org * 406145Snate@binkert.org * Authors: Andreas Hansson 416145Snate@binkert.org * Ani Udipi 426145Snate@binkert.org * Neha Agarwal 436145Snate@binkert.org */ 446145Snate@binkert.org 456145Snate@binkert.org/** 466145Snate@binkert.org * @file 476145Snate@binkert.org * DRAMCtrl declaration 486145Snate@binkert.org */ 496145Snate@binkert.org 506145Snate@binkert.org#ifndef __MEM_DRAM_CTRL_HH__ 516145Snate@binkert.org#define __MEM_DRAM_CTRL_HH__ 526145Snate@binkert.org 536145Snate@binkert.org#include <deque> 546145Snate@binkert.org 556145Snate@binkert.org#include "base/statistics.hh" 566145Snate@binkert.org#include "enums/AddrMap.hh" 576145Snate@binkert.org#include "enums/MemSched.hh" 586145Snate@binkert.org#include "enums/PageManage.hh" 596145Snate@binkert.org#include "mem/abstract_mem.hh" 606145Snate@binkert.org#include "mem/qport.hh" 616145Snate@binkert.org#include "params/DRAMCtrl.hh" 626145Snate@binkert.org#include "sim/eventq.hh" 636145Snate@binkert.org 646145Snate@binkert.org/** 656145Snate@binkert.org * The DRAM controller is a single-channel memory controller capturing 666145Snate@binkert.org * the most important timing constraints associated with a 676145Snate@binkert.org * contemporary DRAM. For multi-channel memory systems, the controller 686145Snate@binkert.org * is combined with a crossbar model, with the channel address 696145Snate@binkert.org * interleaving taking part in the crossbar. 706145Snate@binkert.org * 716145Snate@binkert.org * As a basic design principle, this controller 726145Snate@binkert.org * model is not cycle callable, but instead uses events to: 1) decide 736145Snate@binkert.org * when new decisions can be made, 2) when resources become available, 746145Snate@binkert.org * 3) when things are to be considered done, and 4) when to send 756145Snate@binkert.org * things back. Through these simple principles, the model delivers 766145Snate@binkert.org * high performance, and lots of flexibility, allowing users to 776145Snate@binkert.org * evaluate the system impact of a wide range of memory technologies, 786145Snate@binkert.org * such as DDR3/4, LPDDR2/3/4, WideIO1/2, HBM and HMC. 796145Snate@binkert.org * 806145Snate@binkert.org * For more details, please see Hansson et al, "Simulating DRAM 816145Snate@binkert.org * controllers for future system architecture exploration", 826145Snate@binkert.org * Proc. ISPASS, 2014. If you use this model as part of your research 836145Snate@binkert.org * please cite the paper. 846145Snate@binkert.org */ 856145Snate@binkert.orgclass DRAMCtrl : public AbstractMemory 866145Snate@binkert.org{ 876145Snate@binkert.org 886145Snate@binkert.org private: 896145Snate@binkert.org 906145Snate@binkert.org // For now, make use of a queued slave port to avoid dealing with 916145Snate@binkert.org // flow control for the responses being sent back 926145Snate@binkert.org class MemoryPort : public QueuedSlavePort 936145Snate@binkert.org { 946145Snate@binkert.org 956145Snate@binkert.org SlavePacketQueue queue; 966145Snate@binkert.org DRAMCtrl& memory; 976145Snate@binkert.org 986145Snate@binkert.org public: 996145Snate@binkert.org 1006145Snate@binkert.org MemoryPort(const std::string& name, DRAMCtrl& _memory); 1016145Snate@binkert.org 1026145Snate@binkert.org protected: 1036145Snate@binkert.org 1046145Snate@binkert.org Tick recvAtomic(PacketPtr pkt); 1056145Snate@binkert.org 1066145Snate@binkert.org void recvFunctional(PacketPtr pkt); 1076145Snate@binkert.org 1086145Snate@binkert.org bool recvTimingReq(PacketPtr); 1096145Snate@binkert.org 1106145Snate@binkert.org virtual AddrRangeList getAddrRanges() const; 1116145Snate@binkert.org 1126145Snate@binkert.org }; 1136145Snate@binkert.org 1146145Snate@binkert.org /** 1156145Snate@binkert.org * Our incoming port, for a multi-ported controller add a crossbar 1166145Snate@binkert.org * in front of it 1176145Snate@binkert.org */ 1186145Snate@binkert.org MemoryPort port; 1196145Snate@binkert.org 1206145Snate@binkert.org /** 1216145Snate@binkert.org * Remember if we have to retry a request when available. 1226145Snate@binkert.org */ 1236145Snate@binkert.org bool retryRdReq; 1246145Snate@binkert.org bool retryWrReq; 1256145Snate@binkert.org 1266145Snate@binkert.org /** 1276145Snate@binkert.org * Bus state used to control the read/write switching and drive 1286145Snate@binkert.org * the scheduling of the next request. 1296145Snate@binkert.org */ 1306145Snate@binkert.org enum BusState { 1316145Snate@binkert.org READ = 0, 1326145Snate@binkert.org READ_TO_WRITE, 1336145Snate@binkert.org WRITE, 1346145Snate@binkert.org WRITE_TO_READ 1356145Snate@binkert.org }; 1366145Snate@binkert.org 1376145Snate@binkert.org BusState busState; 1386145Snate@binkert.org 1396145Snate@binkert.org /** List to keep track of activate ticks */ 1406145Snate@binkert.org std::vector<std::deque<Tick>> actTicks; 1416145Snate@binkert.org 1426145Snate@binkert.org /** 1436145Snate@binkert.org * A basic class to track the bank state, i.e. what row is 1446145Snate@binkert.org * currently open (if any), when is the bank free to accept a new 1456145Snate@binkert.org * column (read/write) command, when can it be precharged, and 1466145Snate@binkert.org * when can it be activated. 1476145Snate@binkert.org * 1486145Snate@binkert.org * The bank also keeps track of how many bytes have been accessed 1496145Snate@binkert.org * in the open row since it was opened. 1506145Snate@binkert.org */ 1516145Snate@binkert.org class Bank 1526145Snate@binkert.org { 1536145Snate@binkert.org 1546145Snate@binkert.org public: 1556145Snate@binkert.org 1566145Snate@binkert.org static const uint32_t NO_ROW = -1; 1576145Snate@binkert.org 1586145Snate@binkert.org uint32_t openRow; 1596145Snate@binkert.org uint8_t rank; 1606145Snate@binkert.org uint8_t bank; 1616145Snate@binkert.org 1626145Snate@binkert.org Tick colAllowedAt; 1636145Snate@binkert.org Tick preAllowedAt; 1646145Snate@binkert.org Tick actAllowedAt; 1656145Snate@binkert.org 1666145Snate@binkert.org uint32_t rowAccesses; 1676145Snate@binkert.org uint32_t bytesAccessed; 1686145Snate@binkert.org 1696145Snate@binkert.org Bank() : 1706145Snate@binkert.org openRow(NO_ROW), rank(0), bank(0), 171 colAllowedAt(0), preAllowedAt(0), actAllowedAt(0), 172 rowAccesses(0), bytesAccessed(0) 173 { } 174 }; 175 176 /** 177 * A burst helper helps organize and manage a packet that is larger than 178 * the DRAM burst size. A system packet that is larger than the burst size 179 * is split into multiple DRAM packets and all those DRAM packets point to 180 * a single burst helper such that we know when the whole packet is served. 181 */ 182 class BurstHelper { 183 184 public: 185 186 /** Number of DRAM bursts requred for a system packet **/ 187 const unsigned int burstCount; 188 189 /** Number of DRAM bursts serviced so far for a system packet **/ 190 unsigned int burstsServiced; 191 192 BurstHelper(unsigned int _burstCount) 193 : burstCount(_burstCount), burstsServiced(0) 194 { } 195 }; 196 197 /** 198 * A DRAM packet stores packets along with the timestamp of when 199 * the packet entered the queue, and also the decoded address. 200 */ 201 class DRAMPacket { 202 203 public: 204 205 /** When did request enter the controller */ 206 const Tick entryTime; 207 208 /** When will request leave the controller */ 209 Tick readyTime; 210 211 /** This comes from the outside world */ 212 const PacketPtr pkt; 213 214 const bool isRead; 215 216 /** Will be populated by address decoder */ 217 const uint8_t rank; 218 const uint8_t bank; 219 const uint32_t row; 220 221 /** 222 * Bank id is calculated considering banks in all the ranks 223 * eg: 2 ranks each with 8 banks, then bankId = 0 --> rank0, bank0 and 224 * bankId = 8 --> rank1, bank0 225 */ 226 const uint16_t bankId; 227 228 /** 229 * The starting address of the DRAM packet. 230 * This address could be unaligned to burst size boundaries. The 231 * reason is to keep the address offset so we can accurately check 232 * incoming read packets with packets in the write queue. 233 */ 234 Addr addr; 235 236 /** 237 * The size of this dram packet in bytes 238 * It is always equal or smaller than DRAM burst size 239 */ 240 unsigned int size; 241 242 /** 243 * A pointer to the BurstHelper if this DRAMPacket is a split packet 244 * If not a split packet (common case), this is set to NULL 245 */ 246 BurstHelper* burstHelper; 247 Bank& bankRef; 248 249 DRAMPacket(PacketPtr _pkt, bool is_read, uint8_t _rank, uint8_t _bank, 250 uint32_t _row, uint16_t bank_id, Addr _addr, 251 unsigned int _size, Bank& bank_ref) 252 : entryTime(curTick()), readyTime(curTick()), 253 pkt(_pkt), isRead(is_read), rank(_rank), bank(_bank), row(_row), 254 bankId(bank_id), addr(_addr), size(_size), burstHelper(NULL), 255 bankRef(bank_ref) 256 { } 257 258 }; 259 260 /** 261 * Bunch of things requires to setup "events" in gem5 262 * When event "respondEvent" occurs for example, the method 263 * processRespondEvent is called; no parameters are allowed 264 * in these methods 265 */ 266 void processNextReqEvent(); 267 EventWrapper<DRAMCtrl,&DRAMCtrl::processNextReqEvent> nextReqEvent; 268 269 void processRespondEvent(); 270 EventWrapper<DRAMCtrl, &DRAMCtrl::processRespondEvent> respondEvent; 271 272 void processActivateEvent(); 273 EventWrapper<DRAMCtrl, &DRAMCtrl::processActivateEvent> activateEvent; 274 275 void processPrechargeEvent(); 276 EventWrapper<DRAMCtrl, &DRAMCtrl::processPrechargeEvent> prechargeEvent; 277 278 void processRefreshEvent(); 279 EventWrapper<DRAMCtrl, &DRAMCtrl::processRefreshEvent> refreshEvent; 280 281 void processPowerEvent(); 282 EventWrapper<DRAMCtrl,&DRAMCtrl::processPowerEvent> powerEvent; 283 284 /** 285 * Check if the read queue has room for more entries 286 * 287 * @param pktCount The number of entries needed in the read queue 288 * @return true if read queue is full, false otherwise 289 */ 290 bool readQueueFull(unsigned int pktCount) const; 291 292 /** 293 * Check if the write queue has room for more entries 294 * 295 * @param pktCount The number of entries needed in the write queue 296 * @return true if write queue is full, false otherwise 297 */ 298 bool writeQueueFull(unsigned int pktCount) const; 299 300 /** 301 * When a new read comes in, first check if the write q has a 302 * pending request to the same address.\ If not, decode the 303 * address to populate rank/bank/row, create one or mutliple 304 * "dram_pkt", and push them to the back of the read queue.\ 305 * If this is the only 306 * read request in the system, schedule an event to start 307 * servicing it. 308 * 309 * @param pkt The request packet from the outside world 310 * @param pktCount The number of DRAM bursts the pkt 311 * translate to. If pkt size is larger then one full burst, 312 * then pktCount is greater than one. 313 */ 314 void addToReadQueue(PacketPtr pkt, unsigned int pktCount); 315 316 /** 317 * Decode the incoming pkt, create a dram_pkt and push to the 318 * back of the write queue. \If the write q length is more than 319 * the threshold specified by the user, ie the queue is beginning 320 * to get full, stop reads, and start draining writes. 321 * 322 * @param pkt The request packet from the outside world 323 * @param pktCount The number of DRAM bursts the pkt 324 * translate to. If pkt size is larger then one full burst, 325 * then pktCount is greater than one. 326 */ 327 void addToWriteQueue(PacketPtr pkt, unsigned int pktCount); 328 329 /** 330 * Actually do the DRAM access - figure out the latency it 331 * will take to service the req based on bank state, channel state etc 332 * and then update those states to account for this request.\ Based 333 * on this, update the packet's "readyTime" and move it to the 334 * response q from where it will eventually go back to the outside 335 * world. 336 * 337 * @param pkt The DRAM packet created from the outside world pkt 338 */ 339 void doDRAMAccess(DRAMPacket* dram_pkt); 340 341 /** 342 * When a packet reaches its "readyTime" in the response Q, 343 * use the "access()" method in AbstractMemory to actually 344 * create the response packet, and send it back to the outside 345 * world requestor. 346 * 347 * @param pkt The packet from the outside world 348 * @param static_latency Static latency to add before sending the packet 349 */ 350 void accessAndRespond(PacketPtr pkt, Tick static_latency); 351 352 /** 353 * Address decoder to figure out physical mapping onto ranks, 354 * banks, and rows. This function is called multiple times on the same 355 * system packet if the pakcet is larger than burst of the memory. The 356 * dramPktAddr is used for the offset within the packet. 357 * 358 * @param pkt The packet from the outside world 359 * @param dramPktAddr The starting address of the DRAM packet 360 * @param size The size of the DRAM packet in bytes 361 * @param isRead Is the request for a read or a write to DRAM 362 * @return A DRAMPacket pointer with the decoded information 363 */ 364 DRAMPacket* decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned int size, 365 bool isRead); 366 367 /** 368 * The memory schduler/arbiter - picks which request needs to 369 * go next, based on the specified policy such as FCFS or FR-FCFS 370 * and moves it to the head of the queue. 371 * Prioritizes accesses to the same rank as previous burst unless 372 * controller is switching command type. 373 * 374 * @param queue Queued requests to consider 375 * @param switched_cmd_type Command type is changing 376 */ 377 void chooseNext(std::deque<DRAMPacket*>& queue, bool switched_cmd_type); 378 379 /** 380 * For FR-FCFS policy reorder the read/write queue depending on row buffer 381 * hits and earliest banks available in DRAM 382 * Prioritizes accesses to the same rank as previous burst unless 383 * controller is switching command type. 384 * 385 * @param queue Queued requests to consider 386 * @param switched_cmd_type Command type is changing 387 */ 388 void reorderQueue(std::deque<DRAMPacket*>& queue, bool switched_cmd_type); 389 390 /** 391 * Find which are the earliest banks ready to issue an activate 392 * for the enqueued requests. Assumes maximum of 64 banks per DIMM 393 * Also checks if the bank is already prepped. 394 * 395 * @param queue Queued requests to consider 396 * @param switched_cmd_type Command type is changing 397 * @return One-hot encoded mask of bank indices 398 */ 399 uint64_t minBankPrep(const std::deque<DRAMPacket*>& queue, 400 bool switched_cmd_type) const; 401 402 /** 403 * Keep track of when row activations happen, in order to enforce 404 * the maximum number of activations in the activation window. The 405 * method updates the time that the banks become available based 406 * on the current limits. 407 * 408 * @param bank Reference to the bank 409 * @param act_tick Time when the activation takes place 410 * @param row Index of the row 411 */ 412 void activateBank(Bank& bank, Tick act_tick, uint32_t row); 413 414 /** 415 * Precharge a given bank and also update when the precharge is 416 * done. This will also deal with any stats related to the 417 * accesses to the open page. 418 * 419 * @param bank_ref The bank to precharge 420 * @param pre_at Time when the precharge takes place 421 * @param trace Is this an auto precharge then do not add to trace 422 */ 423 void prechargeBank(Bank& bank_ref, Tick pre_at, bool trace = true); 424 425 /** 426 * Used for debugging to observe the contents of the queues. 427 */ 428 void printQs() const; 429 430 /** 431 * The controller's main read and write queues 432 */ 433 std::deque<DRAMPacket*> readQueue; 434 std::deque<DRAMPacket*> writeQueue; 435 436 /** 437 * Response queue where read packets wait after we're done working 438 * with them, but it's not time to send the response yet. The 439 * responses are stored seperately mostly to keep the code clean 440 * and help with events scheduling. For all logical purposes such 441 * as sizing the read queue, this and the main read queue need to 442 * be added together. 443 */ 444 std::deque<DRAMPacket*> respQueue; 445 446 /** 447 * If we need to drain, keep the drain manager around until we're 448 * done here. 449 */ 450 DrainManager *drainManager; 451 452 /** 453 * Multi-dimensional vector of banks, first dimension is ranks, 454 * second is bank 455 */ 456 std::vector<std::vector<Bank> > banks; 457 458 /** 459 * The following are basic design parameters of the memory 460 * controller, and are initialized based on parameter values. 461 * The rowsPerBank is determined based on the capacity, number of 462 * ranks and banks, the burst size, and the row buffer size. 463 */ 464 const uint32_t deviceBusWidth; 465 const uint32_t burstLength; 466 const uint32_t deviceRowBufferSize; 467 const uint32_t devicesPerRank; 468 const uint32_t burstSize; 469 const uint32_t rowBufferSize; 470 const uint32_t columnsPerRowBuffer; 471 const uint32_t columnsPerStripe; 472 const uint32_t ranksPerChannel; 473 const uint32_t banksPerRank; 474 const uint32_t channels; 475 uint32_t rowsPerBank; 476 const uint32_t readBufferSize; 477 const uint32_t writeBufferSize; 478 const uint32_t writeHighThreshold; 479 const uint32_t writeLowThreshold; 480 const uint32_t minWritesPerSwitch; 481 uint32_t writesThisTime; 482 uint32_t readsThisTime; 483 484 /** 485 * Basic memory timing parameters initialized based on parameter 486 * values. 487 */ 488 const Tick M5_CLASS_VAR_USED tCK; 489 const Tick tWTR; 490 const Tick tRTW; 491 const Tick tCS; 492 const Tick tBURST; 493 const Tick tRCD; 494 const Tick tCL; 495 const Tick tRP; 496 const Tick tRAS; 497 const Tick tWR; 498 const Tick tRTP; 499 const Tick tRFC; 500 const Tick tREFI; 501 const Tick tRRD; 502 const Tick tXAW; 503 const uint32_t activationLimit; 504 505 /** 506 * Memory controller configuration initialized based on parameter 507 * values. 508 */ 509 Enums::MemSched memSchedPolicy; 510 Enums::AddrMap addrMapping; 511 Enums::PageManage pageMgmt; 512 513 /** 514 * Max column accesses (read and write) per row, before forefully 515 * closing it. 516 */ 517 const uint32_t maxAccessesPerRow; 518 519 /** 520 * Pipeline latency of the controller frontend. The frontend 521 * contribution is added to writes (that complete when they are in 522 * the write buffer) and reads that are serviced the write buffer. 523 */ 524 const Tick frontendLatency; 525 526 /** 527 * Pipeline latency of the backend and PHY. Along with the 528 * frontend contribution, this latency is added to reads serviced 529 * by the DRAM. 530 */ 531 const Tick backendLatency; 532 533 /** 534 * Till when has the main data bus been spoken for already? 535 */ 536 Tick busBusyUntil; 537 538 /** 539 * Keep track of when a refresh is due. 540 */ 541 Tick refreshDueAt; 542 543 /** 544 * The refresh state is used to control the progress of the 545 * refresh scheduling. When normal operation is in progress the 546 * refresh state is idle. From there, it progresses to the refresh 547 * drain state once tREFI has passed. The refresh drain state 548 * captures the DRAM row active state, as it will stay there until 549 * all ongoing accesses complete. Thereafter all banks are 550 * precharged, and lastly, the DRAM is refreshed. 551 */ 552 enum RefreshState { 553 REF_IDLE = 0, 554 REF_DRAIN, 555 REF_PRE, 556 REF_RUN 557 }; 558 559 RefreshState refreshState; 560 561 /** 562 * The power state captures the different operational states of 563 * the DRAM and interacts with the bus read/write state machine, 564 * and the refresh state machine. In the idle state all banks are 565 * precharged. From there we either go to an auto refresh (as 566 * determined by the refresh state machine), or to a precharge 567 * power down mode. From idle the memory can also go to the active 568 * state (with one or more banks active), and in turn from there 569 * to active power down. At the moment we do not capture the deep 570 * power down and self-refresh state. 571 */ 572 enum PowerState { 573 PWR_IDLE = 0, 574 PWR_REF, 575 PWR_PRE_PDN, 576 PWR_ACT, 577 PWR_ACT_PDN 578 }; 579 580 /** 581 * Since we are taking decisions out of order, we need to keep 582 * track of what power transition is happening at what time, such 583 * that we can go back in time and change history. For example, if 584 * we precharge all banks and schedule going to the idle state, we 585 * might at a later point decide to activate a bank before the 586 * transition to idle would have taken place. 587 */ 588 PowerState pwrStateTrans; 589 590 /** 591 * Current power state. 592 */ 593 PowerState pwrState; 594 595 /** 596 * Schedule a power state transition in the future, and 597 * potentially override an already scheduled transition. 598 * 599 * @param pwr_state Power state to transition to 600 * @param tick Tick when transition should take place 601 */ 602 void schedulePowerEvent(PowerState pwr_state, Tick tick); 603 604 Tick prevArrival; 605 606 /** 607 * The soonest you have to start thinking about the next request 608 * is the longest access time that can occur before 609 * busBusyUntil. Assuming you need to precharge, open a new row, 610 * and access, it is tRP + tRCD + tCL. 611 */ 612 Tick nextReqTime; 613 614 // All statistics that the model needs to capture 615 Stats::Scalar readReqs; 616 Stats::Scalar writeReqs; 617 Stats::Scalar readBursts; 618 Stats::Scalar writeBursts; 619 Stats::Scalar bytesReadDRAM; 620 Stats::Scalar bytesReadWrQ; 621 Stats::Scalar bytesWritten; 622 Stats::Scalar bytesReadSys; 623 Stats::Scalar bytesWrittenSys; 624 Stats::Scalar servicedByWrQ; 625 Stats::Scalar mergedWrBursts; 626 Stats::Scalar neitherReadNorWrite; 627 Stats::Vector perBankRdBursts; 628 Stats::Vector perBankWrBursts; 629 Stats::Scalar numRdRetry; 630 Stats::Scalar numWrRetry; 631 Stats::Scalar totGap; 632 Stats::Vector readPktSize; 633 Stats::Vector writePktSize; 634 Stats::Vector rdQLenPdf; 635 Stats::Vector wrQLenPdf; 636 Stats::Histogram bytesPerActivate; 637 Stats::Histogram rdPerTurnAround; 638 Stats::Histogram wrPerTurnAround; 639 640 // Latencies summed over all requests 641 Stats::Scalar totQLat; 642 Stats::Scalar totMemAccLat; 643 Stats::Scalar totBusLat; 644 645 // Average latencies per request 646 Stats::Formula avgQLat; 647 Stats::Formula avgBusLat; 648 Stats::Formula avgMemAccLat; 649 650 // Average bandwidth 651 Stats::Formula avgRdBW; 652 Stats::Formula avgWrBW; 653 Stats::Formula avgRdBWSys; 654 Stats::Formula avgWrBWSys; 655 Stats::Formula peakBW; 656 Stats::Formula busUtil; 657 Stats::Formula busUtilRead; 658 Stats::Formula busUtilWrite; 659 660 // Average queue lengths 661 Stats::Average avgRdQLen; 662 Stats::Average avgWrQLen; 663 664 // Row hit count and rate 665 Stats::Scalar readRowHits; 666 Stats::Scalar writeRowHits; 667 Stats::Formula readRowHitRate; 668 Stats::Formula writeRowHitRate; 669 Stats::Formula avgGap; 670 671 // DRAM Power Calculation 672 Stats::Formula pageHitRate; 673 Stats::Vector pwrStateTime; 674 675 // Track when we transitioned to the current power state 676 Tick pwrStateTick; 677 678 // To track number of banks which are currently active 679 unsigned int numBanksActive; 680 681 // Holds the value of the rank of burst issued 682 uint8_t activeRank; 683 684 /** @todo this is a temporary workaround until the 4-phase code is 685 * committed. upstream caches needs this packet until true is returned, so 686 * hold onto it for deletion until a subsequent call 687 */ 688 std::vector<PacketPtr> pendingDelete; 689 690 public: 691 692 void regStats(); 693 694 DRAMCtrl(const DRAMCtrlParams* p); 695 696 unsigned int drain(DrainManager* dm); 697 698 virtual BaseSlavePort& getSlavePort(const std::string& if_name, 699 PortID idx = InvalidPortID); 700 701 virtual void init(); 702 virtual void startup(); 703 704 protected: 705 706 Tick recvAtomic(PacketPtr pkt); 707 void recvFunctional(PacketPtr pkt); 708 bool recvTimingReq(PacketPtr pkt); 709 710}; 711 712#endif //__MEM_DRAM_CTRL_HH__ 713