dram_ctrl.cc revision 10619:6dd27a0e0d23
1/* 2 * Copyright (c) 2010-2014 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2013 Amin Farmahini-Farahani 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Andreas Hansson 41 * Ani Udipi 42 * Neha Agarwal 43 * Omar Naji 44 */ 45 46#include "base/bitfield.hh" 47#include "base/trace.hh" 48#include "debug/DRAM.hh" 49#include "debug/DRAMPower.hh" 50#include "debug/DRAMState.hh" 51#include "debug/Drain.hh" 52#include "mem/dram_ctrl.hh" 53#include "sim/system.hh" 54 55using namespace std; 56using namespace Data; 57 58DRAMCtrl::DRAMCtrl(const DRAMCtrlParams* p) : 59 AbstractMemory(p), 60 port(name() + ".port", *this), isTimingMode(false), 61 retryRdReq(false), retryWrReq(false), 62 busState(READ), 63 nextReqEvent(this), respondEvent(this), 64 drainManager(NULL), 65 deviceSize(p->device_size), 66 deviceBusWidth(p->device_bus_width), burstLength(p->burst_length), 67 deviceRowBufferSize(p->device_rowbuffer_size), 68 devicesPerRank(p->devices_per_rank), 69 burstSize((devicesPerRank * burstLength * deviceBusWidth) / 8), 70 rowBufferSize(devicesPerRank * deviceRowBufferSize), 71 columnsPerRowBuffer(rowBufferSize / burstSize), 72 columnsPerStripe(range.granularity() / burstSize), 73 ranksPerChannel(p->ranks_per_channel), 74 bankGroupsPerRank(p->bank_groups_per_rank), 75 bankGroupArch(p->bank_groups_per_rank > 0), 76 banksPerRank(p->banks_per_rank), channels(p->channels), rowsPerBank(0), 77 readBufferSize(p->read_buffer_size), 78 writeBufferSize(p->write_buffer_size), 79 writeHighThreshold(writeBufferSize * p->write_high_thresh_perc / 100.0), 80 writeLowThreshold(writeBufferSize * p->write_low_thresh_perc / 100.0), 81 minWritesPerSwitch(p->min_writes_per_switch), 82 writesThisTime(0), readsThisTime(0), 83 tCK(p->tCK), tWTR(p->tWTR), tRTW(p->tRTW), tCS(p->tCS), tBURST(p->tBURST), 84 tCCD_L(p->tCCD_L), tRCD(p->tRCD), tCL(p->tCL), tRP(p->tRP), tRAS(p->tRAS), 85 tWR(p->tWR), tRTP(p->tRTP), tRFC(p->tRFC), tREFI(p->tREFI), tRRD(p->tRRD), 86 tRRD_L(p->tRRD_L), tXAW(p->tXAW), activationLimit(p->activation_limit), 87 memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping), 88 pageMgmt(p->page_policy), 89 maxAccessesPerRow(p->max_accesses_per_row), 90 frontendLatency(p->static_frontend_latency), 91 backendLatency(p->static_backend_latency), 92 busBusyUntil(0), prevArrival(0), 93 nextReqTime(0), activeRank(0), timeStampOffset(0) 94{ 95 for (int i = 0; i < ranksPerChannel; i++) { 96 Rank* rank = new Rank(*this, p); 97 ranks.push_back(rank); 98 99 rank->actTicks.resize(activationLimit, 0); 100 rank->banks.resize(banksPerRank); 101 rank->rank = i; 102 103 for (int b = 0; b < banksPerRank; b++) { 104 rank->banks[b].bank = b; 105 // GDDR addressing of banks to BG is linear. 106 // Here we assume that all DRAM generations address bank groups as 107 // follows: 108 if (bankGroupArch) { 109 // Simply assign lower bits to bank group in order to 110 // rotate across bank groups as banks are incremented 111 // e.g. with 4 banks per bank group and 16 banks total: 112 // banks 0,4,8,12 are in bank group 0 113 // banks 1,5,9,13 are in bank group 1 114 // banks 2,6,10,14 are in bank group 2 115 // banks 3,7,11,15 are in bank group 3 116 rank->banks[b].bankgr = b % bankGroupsPerRank; 117 } else { 118 // No bank groups; simply assign to bank number 119 rank->banks[b].bankgr = b; 120 } 121 } 122 } 123 124 // perform a basic check of the write thresholds 125 if (p->write_low_thresh_perc >= p->write_high_thresh_perc) 126 fatal("Write buffer low threshold %d must be smaller than the " 127 "high threshold %d\n", p->write_low_thresh_perc, 128 p->write_high_thresh_perc); 129 130 // determine the rows per bank by looking at the total capacity 131 uint64_t capacity = ULL(1) << ceilLog2(AbstractMemory::size()); 132 133 // determine the dram actual capacity from the DRAM config in Mbytes 134 uint64_t deviceCapacity = deviceSize / (1024 * 1024) * devicesPerRank * 135 ranksPerChannel; 136 137 // if actual DRAM size does not match memory capacity in system warn! 138 if (deviceCapacity != capacity / (1024 * 1024)) 139 warn("DRAM device capacity (%d Mbytes) does not match the " 140 "address range assigned (%d Mbytes)\n", deviceCapacity, 141 capacity / (1024 * 1024)); 142 143 DPRINTF(DRAM, "Memory capacity %lld (%lld) bytes\n", capacity, 144 AbstractMemory::size()); 145 146 DPRINTF(DRAM, "Row buffer size %d bytes with %d columns per row buffer\n", 147 rowBufferSize, columnsPerRowBuffer); 148 149 rowsPerBank = capacity / (rowBufferSize * banksPerRank * ranksPerChannel); 150 151 // a bit of sanity checks on the interleaving 152 if (range.interleaved()) { 153 if (channels != range.stripes()) 154 fatal("%s has %d interleaved address stripes but %d channel(s)\n", 155 name(), range.stripes(), channels); 156 157 if (addrMapping == Enums::RoRaBaChCo) { 158 if (rowBufferSize != range.granularity()) { 159 fatal("Channel interleaving of %s doesn't match RoRaBaChCo " 160 "address map\n", name()); 161 } 162 } else if (addrMapping == Enums::RoRaBaCoCh || 163 addrMapping == Enums::RoCoRaBaCh) { 164 // for the interleavings with channel bits in the bottom, 165 // if the system uses a channel striping granularity that 166 // is larger than the DRAM burst size, then map the 167 // sequential accesses within a stripe to a number of 168 // columns in the DRAM, effectively placing some of the 169 // lower-order column bits as the least-significant bits 170 // of the address (above the ones denoting the burst size) 171 assert(columnsPerStripe >= 1); 172 173 // channel striping has to be done at a granularity that 174 // is equal or larger to a cache line 175 if (system()->cacheLineSize() > range.granularity()) { 176 fatal("Channel interleaving of %s must be at least as large " 177 "as the cache line size\n", name()); 178 } 179 180 // ...and equal or smaller than the row-buffer size 181 if (rowBufferSize < range.granularity()) { 182 fatal("Channel interleaving of %s must be at most as large " 183 "as the row-buffer size\n", name()); 184 } 185 // this is essentially the check above, so just to be sure 186 assert(columnsPerStripe <= columnsPerRowBuffer); 187 } 188 } 189 190 // some basic sanity checks 191 if (tREFI <= tRP || tREFI <= tRFC) { 192 fatal("tREFI (%d) must be larger than tRP (%d) and tRFC (%d)\n", 193 tREFI, tRP, tRFC); 194 } 195 196 // basic bank group architecture checks -> 197 if (bankGroupArch) { 198 // must have at least one bank per bank group 199 if (bankGroupsPerRank > banksPerRank) { 200 fatal("banks per rank (%d) must be equal to or larger than " 201 "banks groups per rank (%d)\n", 202 banksPerRank, bankGroupsPerRank); 203 } 204 // must have same number of banks in each bank group 205 if ((banksPerRank % bankGroupsPerRank) != 0) { 206 fatal("Banks per rank (%d) must be evenly divisible by bank groups " 207 "per rank (%d) for equal banks per bank group\n", 208 banksPerRank, bankGroupsPerRank); 209 } 210 // tCCD_L should be greater than minimal, back-to-back burst delay 211 if (tCCD_L <= tBURST) { 212 fatal("tCCD_L (%d) should be larger than tBURST (%d) when " 213 "bank groups per rank (%d) is greater than 1\n", 214 tCCD_L, tBURST, bankGroupsPerRank); 215 } 216 // tRRD_L is greater than minimal, same bank group ACT-to-ACT delay 217 // some datasheets might specify it equal to tRRD 218 if (tRRD_L < tRRD) { 219 fatal("tRRD_L (%d) should be larger than tRRD (%d) when " 220 "bank groups per rank (%d) is greater than 1\n", 221 tRRD_L, tRRD, bankGroupsPerRank); 222 } 223 } 224 225} 226 227void 228DRAMCtrl::init() 229{ 230 AbstractMemory::init(); 231 232 if (!port.isConnected()) { 233 fatal("DRAMCtrl %s is unconnected!\n", name()); 234 } else { 235 port.sendRangeChange(); 236 } 237} 238 239void 240DRAMCtrl::startup() 241{ 242 // remember the memory system mode of operation 243 isTimingMode = system()->isTimingMode(); 244 245 if (isTimingMode) { 246 // timestamp offset should be in clock cycles for DRAMPower 247 timeStampOffset = divCeil(curTick(), tCK); 248 249 // update the start tick for the precharge accounting to the 250 // current tick 251 for (auto r : ranks) { 252 r->startup(curTick() + tREFI - tRP); 253 } 254 255 // shift the bus busy time sufficiently far ahead that we never 256 // have to worry about negative values when computing the time for 257 // the next request, this will add an insignificant bubble at the 258 // start of simulation 259 busBusyUntil = curTick() + tRP + tRCD + tCL; 260 } 261} 262 263Tick 264DRAMCtrl::recvAtomic(PacketPtr pkt) 265{ 266 DPRINTF(DRAM, "recvAtomic: %s 0x%x\n", pkt->cmdString(), pkt->getAddr()); 267 268 // do the actual memory access and turn the packet into a response 269 access(pkt); 270 271 Tick latency = 0; 272 if (!pkt->memInhibitAsserted() && pkt->hasData()) { 273 // this value is not supposed to be accurate, just enough to 274 // keep things going, mimic a closed page 275 latency = tRP + tRCD + tCL; 276 } 277 return latency; 278} 279 280bool 281DRAMCtrl::readQueueFull(unsigned int neededEntries) const 282{ 283 DPRINTF(DRAM, "Read queue limit %d, current size %d, entries needed %d\n", 284 readBufferSize, readQueue.size() + respQueue.size(), 285 neededEntries); 286 287 return 288 (readQueue.size() + respQueue.size() + neededEntries) > readBufferSize; 289} 290 291bool 292DRAMCtrl::writeQueueFull(unsigned int neededEntries) const 293{ 294 DPRINTF(DRAM, "Write queue limit %d, current size %d, entries needed %d\n", 295 writeBufferSize, writeQueue.size(), neededEntries); 296 return (writeQueue.size() + neededEntries) > writeBufferSize; 297} 298 299DRAMCtrl::DRAMPacket* 300DRAMCtrl::decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned size, 301 bool isRead) 302{ 303 // decode the address based on the address mapping scheme, with 304 // Ro, Ra, Co, Ba and Ch denoting row, rank, column, bank and 305 // channel, respectively 306 uint8_t rank; 307 uint8_t bank; 308 // use a 64-bit unsigned during the computations as the row is 309 // always the top bits, and check before creating the DRAMPacket 310 uint64_t row; 311 312 // truncate the address to a DRAM burst, which makes it unique to 313 // a specific column, row, bank, rank and channel 314 Addr addr = dramPktAddr / burstSize; 315 316 // we have removed the lowest order address bits that denote the 317 // position within the column 318 if (addrMapping == Enums::RoRaBaChCo) { 319 // the lowest order bits denote the column to ensure that 320 // sequential cache lines occupy the same row 321 addr = addr / columnsPerRowBuffer; 322 323 // take out the channel part of the address 324 addr = addr / channels; 325 326 // after the channel bits, get the bank bits to interleave 327 // over the banks 328 bank = addr % banksPerRank; 329 addr = addr / banksPerRank; 330 331 // after the bank, we get the rank bits which thus interleaves 332 // over the ranks 333 rank = addr % ranksPerChannel; 334 addr = addr / ranksPerChannel; 335 336 // lastly, get the row bits 337 row = addr % rowsPerBank; 338 addr = addr / rowsPerBank; 339 } else if (addrMapping == Enums::RoRaBaCoCh) { 340 // take out the lower-order column bits 341 addr = addr / columnsPerStripe; 342 343 // take out the channel part of the address 344 addr = addr / channels; 345 346 // next, the higher-order column bites 347 addr = addr / (columnsPerRowBuffer / columnsPerStripe); 348 349 // after the column bits, we get the bank bits to interleave 350 // over the banks 351 bank = addr % banksPerRank; 352 addr = addr / banksPerRank; 353 354 // after the bank, we get the rank bits which thus interleaves 355 // over the ranks 356 rank = addr % ranksPerChannel; 357 addr = addr / ranksPerChannel; 358 359 // lastly, get the row bits 360 row = addr % rowsPerBank; 361 addr = addr / rowsPerBank; 362 } else if (addrMapping == Enums::RoCoRaBaCh) { 363 // optimise for closed page mode and utilise maximum 364 // parallelism of the DRAM (at the cost of power) 365 366 // take out the lower-order column bits 367 addr = addr / columnsPerStripe; 368 369 // take out the channel part of the address, not that this has 370 // to match with how accesses are interleaved between the 371 // controllers in the address mapping 372 addr = addr / channels; 373 374 // start with the bank bits, as this provides the maximum 375 // opportunity for parallelism between requests 376 bank = addr % banksPerRank; 377 addr = addr / banksPerRank; 378 379 // next get the rank bits 380 rank = addr % ranksPerChannel; 381 addr = addr / ranksPerChannel; 382 383 // next, the higher-order column bites 384 addr = addr / (columnsPerRowBuffer / columnsPerStripe); 385 386 // lastly, get the row bits 387 row = addr % rowsPerBank; 388 addr = addr / rowsPerBank; 389 } else 390 panic("Unknown address mapping policy chosen!"); 391 392 assert(rank < ranksPerChannel); 393 assert(bank < banksPerRank); 394 assert(row < rowsPerBank); 395 assert(row < Bank::NO_ROW); 396 397 DPRINTF(DRAM, "Address: %lld Rank %d Bank %d Row %d\n", 398 dramPktAddr, rank, bank, row); 399 400 // create the corresponding DRAM packet with the entry time and 401 // ready time set to the current tick, the latter will be updated 402 // later 403 uint16_t bank_id = banksPerRank * rank + bank; 404 return new DRAMPacket(pkt, isRead, rank, bank, row, bank_id, dramPktAddr, 405 size, ranks[rank]->banks[bank], *ranks[rank]); 406} 407 408void 409DRAMCtrl::addToReadQueue(PacketPtr pkt, unsigned int pktCount) 410{ 411 // only add to the read queue here. whenever the request is 412 // eventually done, set the readyTime, and call schedule() 413 assert(!pkt->isWrite()); 414 415 assert(pktCount != 0); 416 417 // if the request size is larger than burst size, the pkt is split into 418 // multiple DRAM packets 419 // Note if the pkt starting address is not aligened to burst size, the 420 // address of first DRAM packet is kept unaliged. Subsequent DRAM packets 421 // are aligned to burst size boundaries. This is to ensure we accurately 422 // check read packets against packets in write queue. 423 Addr addr = pkt->getAddr(); 424 unsigned pktsServicedByWrQ = 0; 425 BurstHelper* burst_helper = NULL; 426 for (int cnt = 0; cnt < pktCount; ++cnt) { 427 unsigned size = std::min((addr | (burstSize - 1)) + 1, 428 pkt->getAddr() + pkt->getSize()) - addr; 429 readPktSize[ceilLog2(size)]++; 430 readBursts++; 431 432 // First check write buffer to see if the data is already at 433 // the controller 434 bool foundInWrQ = false; 435 for (auto i = writeQueue.begin(); i != writeQueue.end(); ++i) { 436 // check if the read is subsumed in the write entry we are 437 // looking at 438 if ((*i)->addr <= addr && 439 (addr + size) <= ((*i)->addr + (*i)->size)) { 440 foundInWrQ = true; 441 servicedByWrQ++; 442 pktsServicedByWrQ++; 443 DPRINTF(DRAM, "Read to addr %lld with size %d serviced by " 444 "write queue\n", addr, size); 445 bytesReadWrQ += burstSize; 446 break; 447 } 448 } 449 450 // If not found in the write q, make a DRAM packet and 451 // push it onto the read queue 452 if (!foundInWrQ) { 453 454 // Make the burst helper for split packets 455 if (pktCount > 1 && burst_helper == NULL) { 456 DPRINTF(DRAM, "Read to addr %lld translates to %d " 457 "dram requests\n", pkt->getAddr(), pktCount); 458 burst_helper = new BurstHelper(pktCount); 459 } 460 461 DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, true); 462 dram_pkt->burstHelper = burst_helper; 463 464 assert(!readQueueFull(1)); 465 rdQLenPdf[readQueue.size() + respQueue.size()]++; 466 467 DPRINTF(DRAM, "Adding to read queue\n"); 468 469 readQueue.push_back(dram_pkt); 470 471 // Update stats 472 avgRdQLen = readQueue.size() + respQueue.size(); 473 } 474 475 // Starting address of next dram pkt (aligend to burstSize boundary) 476 addr = (addr | (burstSize - 1)) + 1; 477 } 478 479 // If all packets are serviced by write queue, we send the repsonse back 480 if (pktsServicedByWrQ == pktCount) { 481 accessAndRespond(pkt, frontendLatency); 482 return; 483 } 484 485 // Update how many split packets are serviced by write queue 486 if (burst_helper != NULL) 487 burst_helper->burstsServiced = pktsServicedByWrQ; 488 489 // If we are not already scheduled to get a request out of the 490 // queue, do so now 491 if (!nextReqEvent.scheduled()) { 492 DPRINTF(DRAM, "Request scheduled immediately\n"); 493 schedule(nextReqEvent, curTick()); 494 } 495} 496 497void 498DRAMCtrl::addToWriteQueue(PacketPtr pkt, unsigned int pktCount) 499{ 500 // only add to the write queue here. whenever the request is 501 // eventually done, set the readyTime, and call schedule() 502 assert(pkt->isWrite()); 503 504 // if the request size is larger than burst size, the pkt is split into 505 // multiple DRAM packets 506 Addr addr = pkt->getAddr(); 507 for (int cnt = 0; cnt < pktCount; ++cnt) { 508 unsigned size = std::min((addr | (burstSize - 1)) + 1, 509 pkt->getAddr() + pkt->getSize()) - addr; 510 writePktSize[ceilLog2(size)]++; 511 writeBursts++; 512 513 // see if we can merge with an existing item in the write 514 // queue and keep track of whether we have merged or not so we 515 // can stop at that point and also avoid enqueueing a new 516 // request 517 bool merged = false; 518 auto w = writeQueue.begin(); 519 520 while(!merged && w != writeQueue.end()) { 521 // either of the two could be first, if they are the same 522 // it does not matter which way we go 523 if ((*w)->addr >= addr) { 524 // the existing one starts after the new one, figure 525 // out where the new one ends with respect to the 526 // existing one 527 if ((addr + size) >= ((*w)->addr + (*w)->size)) { 528 // check if the existing one is completely 529 // subsumed in the new one 530 DPRINTF(DRAM, "Merging write covering existing burst\n"); 531 merged = true; 532 // update both the address and the size 533 (*w)->addr = addr; 534 (*w)->size = size; 535 } else if ((addr + size) >= (*w)->addr && 536 ((*w)->addr + (*w)->size - addr) <= burstSize) { 537 // the new one is just before or partially 538 // overlapping with the existing one, and together 539 // they fit within a burst 540 DPRINTF(DRAM, "Merging write before existing burst\n"); 541 merged = true; 542 // the existing queue item needs to be adjusted with 543 // respect to both address and size 544 (*w)->size = (*w)->addr + (*w)->size - addr; 545 (*w)->addr = addr; 546 } 547 } else { 548 // the new one starts after the current one, figure 549 // out where the existing one ends with respect to the 550 // new one 551 if (((*w)->addr + (*w)->size) >= (addr + size)) { 552 // check if the new one is completely subsumed in the 553 // existing one 554 DPRINTF(DRAM, "Merging write into existing burst\n"); 555 merged = true; 556 // no adjustments necessary 557 } else if (((*w)->addr + (*w)->size) >= addr && 558 (addr + size - (*w)->addr) <= burstSize) { 559 // the existing one is just before or partially 560 // overlapping with the new one, and together 561 // they fit within a burst 562 DPRINTF(DRAM, "Merging write after existing burst\n"); 563 merged = true; 564 // the address is right, and only the size has 565 // to be adjusted 566 (*w)->size = addr + size - (*w)->addr; 567 } 568 } 569 ++w; 570 } 571 572 // if the item was not merged we need to create a new write 573 // and enqueue it 574 if (!merged) { 575 DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, false); 576 577 assert(writeQueue.size() < writeBufferSize); 578 wrQLenPdf[writeQueue.size()]++; 579 580 DPRINTF(DRAM, "Adding to write queue\n"); 581 582 writeQueue.push_back(dram_pkt); 583 584 // Update stats 585 avgWrQLen = writeQueue.size(); 586 } else { 587 // keep track of the fact that this burst effectively 588 // disappeared as it was merged with an existing one 589 mergedWrBursts++; 590 } 591 592 // Starting address of next dram pkt (aligend to burstSize boundary) 593 addr = (addr | (burstSize - 1)) + 1; 594 } 595 596 // we do not wait for the writes to be send to the actual memory, 597 // but instead take responsibility for the consistency here and 598 // snoop the write queue for any upcoming reads 599 // @todo, if a pkt size is larger than burst size, we might need a 600 // different front end latency 601 accessAndRespond(pkt, frontendLatency); 602 603 // If we are not already scheduled to get a request out of the 604 // queue, do so now 605 if (!nextReqEvent.scheduled()) { 606 DPRINTF(DRAM, "Request scheduled immediately\n"); 607 schedule(nextReqEvent, curTick()); 608 } 609} 610 611void 612DRAMCtrl::printQs() const { 613 DPRINTF(DRAM, "===READ QUEUE===\n\n"); 614 for (auto i = readQueue.begin() ; i != readQueue.end() ; ++i) { 615 DPRINTF(DRAM, "Read %lu\n", (*i)->addr); 616 } 617 DPRINTF(DRAM, "\n===RESP QUEUE===\n\n"); 618 for (auto i = respQueue.begin() ; i != respQueue.end() ; ++i) { 619 DPRINTF(DRAM, "Response %lu\n", (*i)->addr); 620 } 621 DPRINTF(DRAM, "\n===WRITE QUEUE===\n\n"); 622 for (auto i = writeQueue.begin() ; i != writeQueue.end() ; ++i) { 623 DPRINTF(DRAM, "Write %lu\n", (*i)->addr); 624 } 625} 626 627bool 628DRAMCtrl::recvTimingReq(PacketPtr pkt) 629{ 630 /// @todo temporary hack to deal with memory corruption issues until 631 /// 4-phase transactions are complete 632 for (int x = 0; x < pendingDelete.size(); x++) 633 delete pendingDelete[x]; 634 pendingDelete.clear(); 635 636 // This is where we enter from the outside world 637 DPRINTF(DRAM, "recvTimingReq: request %s addr %lld size %d\n", 638 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 639 640 // simply drop inhibited packets for now 641 if (pkt->memInhibitAsserted()) { 642 DPRINTF(DRAM, "Inhibited packet -- Dropping it now\n"); 643 pendingDelete.push_back(pkt); 644 return true; 645 } 646 647 // Calc avg gap between requests 648 if (prevArrival != 0) { 649 totGap += curTick() - prevArrival; 650 } 651 prevArrival = curTick(); 652 653 654 // Find out how many dram packets a pkt translates to 655 // If the burst size is equal or larger than the pkt size, then a pkt 656 // translates to only one dram packet. Otherwise, a pkt translates to 657 // multiple dram packets 658 unsigned size = pkt->getSize(); 659 unsigned offset = pkt->getAddr() & (burstSize - 1); 660 unsigned int dram_pkt_count = divCeil(offset + size, burstSize); 661 662 // check local buffers and do not accept if full 663 if (pkt->isRead()) { 664 assert(size != 0); 665 if (readQueueFull(dram_pkt_count)) { 666 DPRINTF(DRAM, "Read queue full, not accepting\n"); 667 // remember that we have to retry this port 668 retryRdReq = true; 669 numRdRetry++; 670 return false; 671 } else { 672 addToReadQueue(pkt, dram_pkt_count); 673 readReqs++; 674 bytesReadSys += size; 675 } 676 } else if (pkt->isWrite()) { 677 assert(size != 0); 678 if (writeQueueFull(dram_pkt_count)) { 679 DPRINTF(DRAM, "Write queue full, not accepting\n"); 680 // remember that we have to retry this port 681 retryWrReq = true; 682 numWrRetry++; 683 return false; 684 } else { 685 addToWriteQueue(pkt, dram_pkt_count); 686 writeReqs++; 687 bytesWrittenSys += size; 688 } 689 } else { 690 DPRINTF(DRAM,"Neither read nor write, ignore timing\n"); 691 neitherReadNorWrite++; 692 accessAndRespond(pkt, 1); 693 } 694 695 return true; 696} 697 698void 699DRAMCtrl::processRespondEvent() 700{ 701 DPRINTF(DRAM, 702 "processRespondEvent(): Some req has reached its readyTime\n"); 703 704 DRAMPacket* dram_pkt = respQueue.front(); 705 706 if (dram_pkt->burstHelper) { 707 // it is a split packet 708 dram_pkt->burstHelper->burstsServiced++; 709 if (dram_pkt->burstHelper->burstsServiced == 710 dram_pkt->burstHelper->burstCount) { 711 // we have now serviced all children packets of a system packet 712 // so we can now respond to the requester 713 // @todo we probably want to have a different front end and back 714 // end latency for split packets 715 accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency); 716 delete dram_pkt->burstHelper; 717 dram_pkt->burstHelper = NULL; 718 } 719 } else { 720 // it is not a split packet 721 accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency); 722 } 723 724 delete respQueue.front(); 725 respQueue.pop_front(); 726 727 if (!respQueue.empty()) { 728 assert(respQueue.front()->readyTime >= curTick()); 729 assert(!respondEvent.scheduled()); 730 schedule(respondEvent, respQueue.front()->readyTime); 731 } else { 732 // if there is nothing left in any queue, signal a drain 733 if (writeQueue.empty() && readQueue.empty() && 734 drainManager) { 735 DPRINTF(Drain, "DRAM controller done draining\n"); 736 drainManager->signalDrainDone(); 737 drainManager = NULL; 738 } 739 } 740 741 // We have made a location in the queue available at this point, 742 // so if there is a read that was forced to wait, retry now 743 if (retryRdReq) { 744 retryRdReq = false; 745 port.sendRetry(); 746 } 747} 748 749bool 750DRAMCtrl::chooseNext(std::deque<DRAMPacket*>& queue, bool switched_cmd_type) 751{ 752 // This method does the arbitration between requests. The chosen 753 // packet is simply moved to the head of the queue. The other 754 // methods know that this is the place to look. For example, with 755 // FCFS, this method does nothing 756 assert(!queue.empty()); 757 758 // bool to indicate if a packet to an available rank is found 759 bool found_packet = false; 760 if (queue.size() == 1) { 761 DRAMPacket* dram_pkt = queue.front(); 762 // available rank corresponds to state refresh idle 763 if (ranks[dram_pkt->rank]->isAvailable()) { 764 found_packet = true; 765 DPRINTF(DRAM, "Single request, going to a free rank\n"); 766 } else { 767 DPRINTF(DRAM, "Single request, going to a busy rank\n"); 768 } 769 return found_packet; 770 } 771 772 if (memSchedPolicy == Enums::fcfs) { 773 // check if there is a packet going to a free rank 774 for(auto i = queue.begin(); i != queue.end() ; ++i) { 775 DRAMPacket* dram_pkt = *i; 776 if (ranks[dram_pkt->rank]->isAvailable()) { 777 queue.erase(i); 778 queue.push_front(dram_pkt); 779 found_packet = true; 780 break; 781 } 782 } 783 } else if (memSchedPolicy == Enums::frfcfs) { 784 found_packet = reorderQueue(queue, switched_cmd_type); 785 } else 786 panic("No scheduling policy chosen\n"); 787 return found_packet; 788} 789 790bool 791DRAMCtrl::reorderQueue(std::deque<DRAMPacket*>& queue, bool switched_cmd_type) 792{ 793 // Only determine this when needed 794 uint64_t earliest_banks = 0; 795 796 // Search for row hits first, if no row hit is found then schedule the 797 // packet to one of the earliest banks available 798 bool found_packet = false; 799 bool found_earliest_pkt = false; 800 bool found_prepped_diff_rank_pkt = false; 801 auto selected_pkt_it = queue.end(); 802 803 for (auto i = queue.begin(); i != queue.end() ; ++i) { 804 DRAMPacket* dram_pkt = *i; 805 const Bank& bank = dram_pkt->bankRef; 806 // check if rank is busy. If this is the case jump to the next packet 807 // Check if it is a row hit 808 if (dram_pkt->rankRef.isAvailable()) { 809 if (bank.openRow == dram_pkt->row) { 810 if (dram_pkt->rank == activeRank || switched_cmd_type) { 811 // FCFS within the hits, giving priority to commands 812 // that access the same rank as the previous burst 813 // to minimize bus turnaround delays 814 // Only give rank prioity when command type is 815 // not changing 816 DPRINTF(DRAM, "Row buffer hit\n"); 817 selected_pkt_it = i; 818 break; 819 } else if (!found_prepped_diff_rank_pkt) { 820 // found row hit for command on different rank 821 // than prev burst 822 selected_pkt_it = i; 823 found_prepped_diff_rank_pkt = true; 824 } 825 } else if (!found_earliest_pkt & !found_prepped_diff_rank_pkt) { 826 // packet going to a rank which is currently not waiting for a 827 // refresh, No row hit and 828 // haven't found an entry with a row hit to a new rank 829 if (earliest_banks == 0) 830 // Determine entries with earliest bank prep delay 831 // Function will give priority to commands that access the 832 // same rank as previous burst and can prep 833 // the bank seamlessly 834 earliest_banks = minBankPrep(queue, switched_cmd_type); 835 836 // FCFS - Bank is first available bank 837 if (bits(earliest_banks, dram_pkt->bankId, 838 dram_pkt->bankId)) { 839 // Remember the packet to be scheduled to one of 840 // the earliest banks available, FCFS amongst the 841 // earliest banks 842 selected_pkt_it = i; 843 //if the packet found is going to a rank that is currently 844 //not busy then update the found_packet to true 845 found_earliest_pkt = true; 846 } 847 } 848 } 849 } 850 851 if (selected_pkt_it != queue.end()) { 852 DRAMPacket* selected_pkt = *selected_pkt_it; 853 queue.erase(selected_pkt_it); 854 queue.push_front(selected_pkt); 855 found_packet = true; 856 } 857 return found_packet; 858} 859 860void 861DRAMCtrl::accessAndRespond(PacketPtr pkt, Tick static_latency) 862{ 863 DPRINTF(DRAM, "Responding to Address %lld.. ",pkt->getAddr()); 864 865 bool needsResponse = pkt->needsResponse(); 866 // do the actual memory access which also turns the packet into a 867 // response 868 access(pkt); 869 870 // turn packet around to go back to requester if response expected 871 if (needsResponse) { 872 // access already turned the packet into a response 873 assert(pkt->isResponse()); 874 875 // @todo someone should pay for this 876 pkt->firstWordDelay = pkt->lastWordDelay = 0; 877 878 // queue the packet in the response queue to be sent out after 879 // the static latency has passed 880 port.schedTimingResp(pkt, curTick() + static_latency); 881 } else { 882 // @todo the packet is going to be deleted, and the DRAMPacket 883 // is still having a pointer to it 884 pendingDelete.push_back(pkt); 885 } 886 887 DPRINTF(DRAM, "Done\n"); 888 889 return; 890} 891 892void 893DRAMCtrl::activateBank(Rank& rank_ref, Bank& bank_ref, 894 Tick act_tick, uint32_t row) 895{ 896 assert(rank_ref.actTicks.size() == activationLimit); 897 898 DPRINTF(DRAM, "Activate at tick %d\n", act_tick); 899 900 // update the open row 901 assert(bank_ref.openRow == Bank::NO_ROW); 902 bank_ref.openRow = row; 903 904 // start counting anew, this covers both the case when we 905 // auto-precharged, and when this access is forced to 906 // precharge 907 bank_ref.bytesAccessed = 0; 908 bank_ref.rowAccesses = 0; 909 910 ++rank_ref.numBanksActive; 911 assert(rank_ref.numBanksActive <= banksPerRank); 912 913 DPRINTF(DRAM, "Activate bank %d, rank %d at tick %lld, now got %d active\n", 914 bank_ref.bank, rank_ref.rank, act_tick, 915 ranks[rank_ref.rank]->numBanksActive); 916 917 rank_ref.power.powerlib.doCommand(MemCommand::ACT, bank_ref.bank, 918 divCeil(act_tick, tCK) - 919 timeStampOffset); 920 921 DPRINTF(DRAMPower, "%llu,ACT,%d,%d\n", divCeil(act_tick, tCK) - 922 timeStampOffset, bank_ref.bank, rank_ref.rank); 923 924 // The next access has to respect tRAS for this bank 925 bank_ref.preAllowedAt = act_tick + tRAS; 926 927 // Respect the row-to-column command delay 928 bank_ref.colAllowedAt = std::max(act_tick + tRCD, bank_ref.colAllowedAt); 929 930 // start by enforcing tRRD 931 for(int i = 0; i < banksPerRank; i++) { 932 // next activate to any bank in this rank must not happen 933 // before tRRD 934 if (bankGroupArch && (bank_ref.bankgr == rank_ref.banks[i].bankgr)) { 935 // bank group architecture requires longer delays between 936 // ACT commands within the same bank group. Use tRRD_L 937 // in this case 938 rank_ref.banks[i].actAllowedAt = std::max(act_tick + tRRD_L, 939 rank_ref.banks[i].actAllowedAt); 940 } else { 941 // use shorter tRRD value when either 942 // 1) bank group architecture is not supportted 943 // 2) bank is in a different bank group 944 rank_ref.banks[i].actAllowedAt = std::max(act_tick + tRRD, 945 rank_ref.banks[i].actAllowedAt); 946 } 947 } 948 949 // next, we deal with tXAW, if the activation limit is disabled 950 // then we directly schedule an activate power event 951 if (!rank_ref.actTicks.empty()) { 952 // sanity check 953 if (rank_ref.actTicks.back() && 954 (act_tick - rank_ref.actTicks.back()) < tXAW) { 955 panic("Got %d activates in window %d (%llu - %llu) which " 956 "is smaller than %llu\n", activationLimit, act_tick - 957 rank_ref.actTicks.back(), act_tick, 958 rank_ref.actTicks.back(), tXAW); 959 } 960 961 // shift the times used for the book keeping, the last element 962 // (highest index) is the oldest one and hence the lowest value 963 rank_ref.actTicks.pop_back(); 964 965 // record an new activation (in the future) 966 rank_ref.actTicks.push_front(act_tick); 967 968 // cannot activate more than X times in time window tXAW, push the 969 // next one (the X + 1'st activate) to be tXAW away from the 970 // oldest in our window of X 971 if (rank_ref.actTicks.back() && 972 (act_tick - rank_ref.actTicks.back()) < tXAW) { 973 DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate " 974 "no earlier than %llu\n", activationLimit, 975 rank_ref.actTicks.back() + tXAW); 976 for(int j = 0; j < banksPerRank; j++) 977 // next activate must not happen before end of window 978 rank_ref.banks[j].actAllowedAt = 979 std::max(rank_ref.actTicks.back() + tXAW, 980 rank_ref.banks[j].actAllowedAt); 981 } 982 } 983 984 // at the point when this activate takes place, make sure we 985 // transition to the active power state 986 if (!rank_ref.activateEvent.scheduled()) 987 schedule(rank_ref.activateEvent, act_tick); 988 else if (rank_ref.activateEvent.when() > act_tick) 989 // move it sooner in time 990 reschedule(rank_ref.activateEvent, act_tick); 991} 992 993void 994DRAMCtrl::prechargeBank(Rank& rank_ref, Bank& bank, Tick pre_at, bool trace) 995{ 996 // make sure the bank has an open row 997 assert(bank.openRow != Bank::NO_ROW); 998 999 // sample the bytes per activate here since we are closing 1000 // the page 1001 bytesPerActivate.sample(bank.bytesAccessed); 1002 1003 bank.openRow = Bank::NO_ROW; 1004 1005 // no precharge allowed before this one 1006 bank.preAllowedAt = pre_at; 1007 1008 Tick pre_done_at = pre_at + tRP; 1009 1010 bank.actAllowedAt = std::max(bank.actAllowedAt, pre_done_at); 1011 1012 assert(rank_ref.numBanksActive != 0); 1013 --rank_ref.numBanksActive; 1014 1015 DPRINTF(DRAM, "Precharging bank %d, rank %d at tick %lld, now got " 1016 "%d active\n", bank.bank, rank_ref.rank, pre_at, 1017 rank_ref.numBanksActive); 1018 1019 if (trace) { 1020 1021 rank_ref.power.powerlib.doCommand(MemCommand::PRE, bank.bank, 1022 divCeil(pre_at, tCK) - 1023 timeStampOffset); 1024 DPRINTF(DRAMPower, "%llu,PRE,%d,%d\n", divCeil(pre_at, tCK) - 1025 timeStampOffset, bank.bank, rank_ref.rank); 1026 } 1027 // if we look at the current number of active banks we might be 1028 // tempted to think the DRAM is now idle, however this can be 1029 // undone by an activate that is scheduled to happen before we 1030 // would have reached the idle state, so schedule an event and 1031 // rather check once we actually make it to the point in time when 1032 // the (last) precharge takes place 1033 if (!rank_ref.prechargeEvent.scheduled()) 1034 schedule(rank_ref.prechargeEvent, pre_done_at); 1035 else if (rank_ref.prechargeEvent.when() < pre_done_at) 1036 reschedule(rank_ref.prechargeEvent, pre_done_at); 1037} 1038 1039void 1040DRAMCtrl::doDRAMAccess(DRAMPacket* dram_pkt) 1041{ 1042 DPRINTF(DRAM, "Timing access to addr %lld, rank/bank/row %d %d %d\n", 1043 dram_pkt->addr, dram_pkt->rank, dram_pkt->bank, dram_pkt->row); 1044 1045 // get the rank 1046 Rank& rank = dram_pkt->rankRef; 1047 1048 // get the bank 1049 Bank& bank = dram_pkt->bankRef; 1050 1051 // for the state we need to track if it is a row hit or not 1052 bool row_hit = true; 1053 1054 // respect any constraints on the command (e.g. tRCD or tCCD) 1055 Tick cmd_at = std::max(bank.colAllowedAt, curTick()); 1056 1057 // Determine the access latency and update the bank state 1058 if (bank.openRow == dram_pkt->row) { 1059 // nothing to do 1060 } else { 1061 row_hit = false; 1062 1063 // If there is a page open, precharge it. 1064 if (bank.openRow != Bank::NO_ROW) { 1065 prechargeBank(rank, bank, std::max(bank.preAllowedAt, curTick())); 1066 } 1067 1068 // next we need to account for the delay in activating the 1069 // page 1070 Tick act_tick = std::max(bank.actAllowedAt, curTick()); 1071 1072 // Record the activation and deal with all the global timing 1073 // constraints caused be a new activation (tRRD and tXAW) 1074 activateBank(rank, bank, act_tick, dram_pkt->row); 1075 1076 // issue the command as early as possible 1077 cmd_at = bank.colAllowedAt; 1078 } 1079 1080 // we need to wait until the bus is available before we can issue 1081 // the command 1082 cmd_at = std::max(cmd_at, busBusyUntil - tCL); 1083 1084 // update the packet ready time 1085 dram_pkt->readyTime = cmd_at + tCL + tBURST; 1086 1087 // only one burst can use the bus at any one point in time 1088 assert(dram_pkt->readyTime - busBusyUntil >= tBURST); 1089 1090 // update the time for the next read/write burst for each 1091 // bank (add a max with tCCD/tCCD_L here) 1092 Tick cmd_dly; 1093 for(int j = 0; j < ranksPerChannel; j++) { 1094 for(int i = 0; i < banksPerRank; i++) { 1095 // next burst to same bank group in this rank must not happen 1096 // before tCCD_L. Different bank group timing requirement is 1097 // tBURST; Add tCS for different ranks 1098 if (dram_pkt->rank == j) { 1099 if (bankGroupArch && 1100 (bank.bankgr == ranks[j]->banks[i].bankgr)) { 1101 // bank group architecture requires longer delays between 1102 // RD/WR burst commands to the same bank group. 1103 // Use tCCD_L in this case 1104 cmd_dly = tCCD_L; 1105 } else { 1106 // use tBURST (equivalent to tCCD_S), the shorter 1107 // cas-to-cas delay value, when either: 1108 // 1) bank group architecture is not supportted 1109 // 2) bank is in a different bank group 1110 cmd_dly = tBURST; 1111 } 1112 } else { 1113 // different rank is by default in a different bank group 1114 // use tBURST (equivalent to tCCD_S), which is the shorter 1115 // cas-to-cas delay in this case 1116 // Add tCS to account for rank-to-rank bus delay requirements 1117 cmd_dly = tBURST + tCS; 1118 } 1119 ranks[j]->banks[i].colAllowedAt = std::max(cmd_at + cmd_dly, 1120 ranks[j]->banks[i].colAllowedAt); 1121 } 1122 } 1123 1124 // Save rank of current access 1125 activeRank = dram_pkt->rank; 1126 1127 // If this is a write, we also need to respect the write recovery 1128 // time before a precharge, in the case of a read, respect the 1129 // read to precharge constraint 1130 bank.preAllowedAt = std::max(bank.preAllowedAt, 1131 dram_pkt->isRead ? cmd_at + tRTP : 1132 dram_pkt->readyTime + tWR); 1133 1134 // increment the bytes accessed and the accesses per row 1135 bank.bytesAccessed += burstSize; 1136 ++bank.rowAccesses; 1137 1138 // if we reached the max, then issue with an auto-precharge 1139 bool auto_precharge = pageMgmt == Enums::close || 1140 bank.rowAccesses == maxAccessesPerRow; 1141 1142 // if we did not hit the limit, we might still want to 1143 // auto-precharge 1144 if (!auto_precharge && 1145 (pageMgmt == Enums::open_adaptive || 1146 pageMgmt == Enums::close_adaptive)) { 1147 // a twist on the open and close page policies: 1148 // 1) open_adaptive page policy does not blindly keep the 1149 // page open, but close it if there are no row hits, and there 1150 // are bank conflicts in the queue 1151 // 2) close_adaptive page policy does not blindly close the 1152 // page, but closes it only if there are no row hits in the queue. 1153 // In this case, only force an auto precharge when there 1154 // are no same page hits in the queue 1155 bool got_more_hits = false; 1156 bool got_bank_conflict = false; 1157 1158 // either look at the read queue or write queue 1159 const deque<DRAMPacket*>& queue = dram_pkt->isRead ? readQueue : 1160 writeQueue; 1161 auto p = queue.begin(); 1162 // make sure we are not considering the packet that we are 1163 // currently dealing with (which is the head of the queue) 1164 ++p; 1165 1166 // keep on looking until we have found required condition or 1167 // reached the end 1168 while (!(got_more_hits && 1169 (got_bank_conflict || pageMgmt == Enums::close_adaptive)) && 1170 p != queue.end()) { 1171 bool same_rank_bank = (dram_pkt->rank == (*p)->rank) && 1172 (dram_pkt->bank == (*p)->bank); 1173 bool same_row = dram_pkt->row == (*p)->row; 1174 got_more_hits |= same_rank_bank && same_row; 1175 got_bank_conflict |= same_rank_bank && !same_row; 1176 ++p; 1177 } 1178 1179 // auto pre-charge when either 1180 // 1) open_adaptive policy, we have not got any more hits, and 1181 // have a bank conflict 1182 // 2) close_adaptive policy and we have not got any more hits 1183 auto_precharge = !got_more_hits && 1184 (got_bank_conflict || pageMgmt == Enums::close_adaptive); 1185 } 1186 1187 // DRAMPower trace command to be written 1188 std::string mem_cmd = dram_pkt->isRead ? "RD" : "WR"; 1189 1190 // MemCommand required for DRAMPower library 1191 MemCommand::cmds command = (mem_cmd == "RD") ? MemCommand::RD : 1192 MemCommand::WR; 1193 1194 // if this access should use auto-precharge, then we are 1195 // closing the row 1196 if (auto_precharge) { 1197 // if auto-precharge push a PRE command at the correct tick to the 1198 // list used by DRAMPower library to calculate power 1199 prechargeBank(rank, bank, std::max(curTick(), bank.preAllowedAt)); 1200 1201 DPRINTF(DRAM, "Auto-precharged bank: %d\n", dram_pkt->bankId); 1202 } 1203 1204 // Update bus state 1205 busBusyUntil = dram_pkt->readyTime; 1206 1207 DPRINTF(DRAM, "Access to %lld, ready at %lld bus busy until %lld.\n", 1208 dram_pkt->addr, dram_pkt->readyTime, busBusyUntil); 1209 1210 dram_pkt->rankRef.power.powerlib.doCommand(command, dram_pkt->bank, 1211 divCeil(cmd_at, tCK) - 1212 timeStampOffset); 1213 1214 DPRINTF(DRAMPower, "%llu,%s,%d,%d\n", divCeil(cmd_at, tCK) - 1215 timeStampOffset, mem_cmd, dram_pkt->bank, dram_pkt->rank); 1216 1217 // Update the minimum timing between the requests, this is a 1218 // conservative estimate of when we have to schedule the next 1219 // request to not introduce any unecessary bubbles. In most cases 1220 // we will wake up sooner than we have to. 1221 nextReqTime = busBusyUntil - (tRP + tRCD + tCL); 1222 1223 // Update the stats and schedule the next request 1224 if (dram_pkt->isRead) { 1225 ++readsThisTime; 1226 if (row_hit) 1227 readRowHits++; 1228 bytesReadDRAM += burstSize; 1229 perBankRdBursts[dram_pkt->bankId]++; 1230 1231 // Update latency stats 1232 totMemAccLat += dram_pkt->readyTime - dram_pkt->entryTime; 1233 totBusLat += tBURST; 1234 totQLat += cmd_at - dram_pkt->entryTime; 1235 } else { 1236 ++writesThisTime; 1237 if (row_hit) 1238 writeRowHits++; 1239 bytesWritten += burstSize; 1240 perBankWrBursts[dram_pkt->bankId]++; 1241 } 1242} 1243 1244void 1245DRAMCtrl::processNextReqEvent() 1246{ 1247 int busyRanks = 0; 1248 for (auto r : ranks) { 1249 if (!r->isAvailable()) { 1250 // rank is busy refreshing 1251 busyRanks++; 1252 1253 // let the rank know that if it was waiting to drain, it 1254 // is now done and ready to proceed 1255 r->checkDrainDone(); 1256 } 1257 } 1258 1259 if (busyRanks == ranksPerChannel) { 1260 // if all ranks are refreshing wait for them to finish 1261 // and stall this state machine without taking any further 1262 // action, and do not schedule a new nextReqEvent 1263 return; 1264 } 1265 1266 // pre-emptively set to false. Overwrite if in READ_TO_WRITE 1267 // or WRITE_TO_READ state 1268 bool switched_cmd_type = false; 1269 if (busState == READ_TO_WRITE) { 1270 DPRINTF(DRAM, "Switching to writes after %d reads with %d reads " 1271 "waiting\n", readsThisTime, readQueue.size()); 1272 1273 // sample and reset the read-related stats as we are now 1274 // transitioning to writes, and all reads are done 1275 rdPerTurnAround.sample(readsThisTime); 1276 readsThisTime = 0; 1277 1278 // now proceed to do the actual writes 1279 busState = WRITE; 1280 switched_cmd_type = true; 1281 } else if (busState == WRITE_TO_READ) { 1282 DPRINTF(DRAM, "Switching to reads after %d writes with %d writes " 1283 "waiting\n", writesThisTime, writeQueue.size()); 1284 1285 wrPerTurnAround.sample(writesThisTime); 1286 writesThisTime = 0; 1287 1288 busState = READ; 1289 switched_cmd_type = true; 1290 } 1291 1292 // when we get here it is either a read or a write 1293 if (busState == READ) { 1294 1295 // track if we should switch or not 1296 bool switch_to_writes = false; 1297 1298 if (readQueue.empty()) { 1299 // In the case there is no read request to go next, 1300 // trigger writes if we have passed the low threshold (or 1301 // if we are draining) 1302 if (!writeQueue.empty() && 1303 (drainManager || writeQueue.size() > writeLowThreshold)) { 1304 1305 switch_to_writes = true; 1306 } else { 1307 // check if we are drained 1308 if (respQueue.empty () && drainManager) { 1309 DPRINTF(Drain, "DRAM controller done draining\n"); 1310 drainManager->signalDrainDone(); 1311 drainManager = NULL; 1312 } 1313 1314 // nothing to do, not even any point in scheduling an 1315 // event for the next request 1316 return; 1317 } 1318 } else { 1319 // bool to check if there is a read to a free rank 1320 bool found_read = false; 1321 1322 // Figure out which read request goes next, and move it to the 1323 // front of the read queue 1324 found_read = chooseNext(readQueue, switched_cmd_type); 1325 1326 // if no read to an available rank is found then return 1327 // at this point. There could be writes to the available ranks 1328 // which are above the required threshold. However, to 1329 // avoid adding more complexity to the code, return and wait 1330 // for a refresh event to kick things into action again. 1331 if (!found_read) 1332 return; 1333 1334 DRAMPacket* dram_pkt = readQueue.front(); 1335 assert(dram_pkt->rankRef.isAvailable()); 1336 // here we get a bit creative and shift the bus busy time not 1337 // just the tWTR, but also a CAS latency to capture the fact 1338 // that we are allowed to prepare a new bank, but not issue a 1339 // read command until after tWTR, in essence we capture a 1340 // bubble on the data bus that is tWTR + tCL 1341 if (switched_cmd_type && dram_pkt->rank == activeRank) { 1342 busBusyUntil += tWTR + tCL; 1343 } 1344 1345 doDRAMAccess(dram_pkt); 1346 1347 // At this point we're done dealing with the request 1348 readQueue.pop_front(); 1349 1350 // sanity check 1351 assert(dram_pkt->size <= burstSize); 1352 assert(dram_pkt->readyTime >= curTick()); 1353 1354 // Insert into response queue. It will be sent back to the 1355 // requestor at its readyTime 1356 if (respQueue.empty()) { 1357 assert(!respondEvent.scheduled()); 1358 schedule(respondEvent, dram_pkt->readyTime); 1359 } else { 1360 assert(respQueue.back()->readyTime <= dram_pkt->readyTime); 1361 assert(respondEvent.scheduled()); 1362 } 1363 1364 respQueue.push_back(dram_pkt); 1365 1366 // we have so many writes that we have to transition 1367 if (writeQueue.size() > writeHighThreshold) { 1368 switch_to_writes = true; 1369 } 1370 } 1371 1372 // switching to writes, either because the read queue is empty 1373 // and the writes have passed the low threshold (or we are 1374 // draining), or because the writes hit the hight threshold 1375 if (switch_to_writes) { 1376 // transition to writing 1377 busState = READ_TO_WRITE; 1378 } 1379 } else { 1380 // bool to check if write to free rank is found 1381 bool found_write = false; 1382 1383 found_write = chooseNext(writeQueue, switched_cmd_type); 1384 1385 // if no writes to an available rank are found then return. 1386 // There could be reads to the available ranks. However, to avoid 1387 // adding more complexity to the code, return at this point and wait 1388 // for a refresh event to kick things into action again. 1389 if (!found_write) 1390 return; 1391 1392 DRAMPacket* dram_pkt = writeQueue.front(); 1393 assert(dram_pkt->rankRef.isAvailable()); 1394 // sanity check 1395 assert(dram_pkt->size <= burstSize); 1396 1397 // add a bubble to the data bus, as defined by the 1398 // tRTW when access is to the same rank as previous burst 1399 // Different rank timing is handled with tCS, which is 1400 // applied to colAllowedAt 1401 if (switched_cmd_type && dram_pkt->rank == activeRank) { 1402 busBusyUntil += tRTW; 1403 } 1404 1405 doDRAMAccess(dram_pkt); 1406 1407 writeQueue.pop_front(); 1408 delete dram_pkt; 1409 1410 // If we emptied the write queue, or got sufficiently below the 1411 // threshold (using the minWritesPerSwitch as the hysteresis) and 1412 // are not draining, or we have reads waiting and have done enough 1413 // writes, then switch to reads. 1414 if (writeQueue.empty() || 1415 (writeQueue.size() + minWritesPerSwitch < writeLowThreshold && 1416 !drainManager) || 1417 (!readQueue.empty() && writesThisTime >= minWritesPerSwitch)) { 1418 // turn the bus back around for reads again 1419 busState = WRITE_TO_READ; 1420 1421 // note that the we switch back to reads also in the idle 1422 // case, which eventually will check for any draining and 1423 // also pause any further scheduling if there is really 1424 // nothing to do 1425 } 1426 } 1427 // It is possible that a refresh to another rank kicks things back into 1428 // action before reaching this point. 1429 if (!nextReqEvent.scheduled()) 1430 schedule(nextReqEvent, std::max(nextReqTime, curTick())); 1431 1432 // If there is space available and we have writes waiting then let 1433 // them retry. This is done here to ensure that the retry does not 1434 // cause a nextReqEvent to be scheduled before we do so as part of 1435 // the next request processing 1436 if (retryWrReq && writeQueue.size() < writeBufferSize) { 1437 retryWrReq = false; 1438 port.sendRetry(); 1439 } 1440} 1441 1442uint64_t 1443DRAMCtrl::minBankPrep(const deque<DRAMPacket*>& queue, 1444 bool switched_cmd_type) const 1445{ 1446 uint64_t bank_mask = 0; 1447 Tick min_act_at = MaxTick; 1448 1449 uint64_t bank_mask_same_rank = 0; 1450 Tick min_act_at_same_rank = MaxTick; 1451 1452 // Give precedence to commands that access same rank as previous command 1453 bool same_rank_match = false; 1454 1455 // determine if we have queued transactions targetting the 1456 // bank in question 1457 vector<bool> got_waiting(ranksPerChannel * banksPerRank, false); 1458 for (const auto& p : queue) { 1459 if(p->rankRef.isAvailable()) 1460 got_waiting[p->bankId] = true; 1461 } 1462 1463 for (int i = 0; i < ranksPerChannel; i++) { 1464 for (int j = 0; j < banksPerRank; j++) { 1465 uint16_t bank_id = i * banksPerRank + j; 1466 1467 // if we have waiting requests for the bank, and it is 1468 // amongst the first available, update the mask 1469 if (got_waiting[bank_id]) { 1470 // make sure this rank is not currently refreshing. 1471 assert(ranks[i]->isAvailable()); 1472 // simplistic approximation of when the bank can issue 1473 // an activate, ignoring any rank-to-rank switching 1474 // cost in this calculation 1475 Tick act_at = ranks[i]->banks[j].openRow == Bank::NO_ROW ? 1476 ranks[i]->banks[j].actAllowedAt : 1477 std::max(ranks[i]->banks[j].preAllowedAt, curTick()) + tRP; 1478 1479 // prioritize commands that access the 1480 // same rank as previous burst 1481 // Calculate bank mask separately for the case and 1482 // evaluate after loop iterations complete 1483 if (i == activeRank && ranksPerChannel > 1) { 1484 if (act_at <= min_act_at_same_rank) { 1485 // reset same rank bank mask if new minimum is found 1486 // and previous minimum could not immediately send ACT 1487 if (act_at < min_act_at_same_rank && 1488 min_act_at_same_rank > curTick()) 1489 bank_mask_same_rank = 0; 1490 1491 // Set flag indicating that a same rank 1492 // opportunity was found 1493 same_rank_match = true; 1494 1495 // set the bit corresponding to the available bank 1496 replaceBits(bank_mask_same_rank, bank_id, bank_id, 1); 1497 min_act_at_same_rank = act_at; 1498 } 1499 } else { 1500 if (act_at <= min_act_at) { 1501 // reset bank mask if new minimum is found 1502 // and either previous minimum could not immediately send ACT 1503 if (act_at < min_act_at && min_act_at > curTick()) 1504 bank_mask = 0; 1505 // set the bit corresponding to the available bank 1506 replaceBits(bank_mask, bank_id, bank_id, 1); 1507 min_act_at = act_at; 1508 } 1509 } 1510 } 1511 } 1512 } 1513 1514 // Determine the earliest time when the next burst can issue based 1515 // on the current busBusyUntil delay. 1516 // Offset by tRCD to correlate with ACT timing variables 1517 Tick min_cmd_at = busBusyUntil - tCL - tRCD; 1518 1519 // if we have multiple ranks and all 1520 // waiting packets are accessing a rank which was previously active 1521 // then bank_mask_same_rank will be set to a value while bank_mask will 1522 // remain 0. In this case, the function should return the value of 1523 // bank_mask_same_rank. 1524 // else if waiting packets access a rank which was previously active and 1525 // other ranks, prioritize same rank accesses that can issue B2B 1526 // Only optimize for same ranks when the command type 1527 // does not change; do not want to unnecessarily incur tWTR 1528 // 1529 // Resulting FCFS prioritization Order is: 1530 // 1) Commands that access the same rank as previous burst 1531 // and can prep the bank seamlessly. 1532 // 2) Commands (any rank) with earliest bank prep 1533 if ((bank_mask == 0) || (!switched_cmd_type && same_rank_match && 1534 min_act_at_same_rank <= min_cmd_at)) { 1535 bank_mask = bank_mask_same_rank; 1536 } 1537 1538 return bank_mask; 1539} 1540 1541DRAMCtrl::Rank::Rank(DRAMCtrl& _memory, const DRAMCtrlParams* _p) 1542 : EventManager(&_memory), memory(_memory), 1543 pwrStateTrans(PWR_IDLE), pwrState(PWR_IDLE), pwrStateTick(0), 1544 refreshState(REF_IDLE), refreshDueAt(0), 1545 power(_p, false), numBanksActive(0), 1546 activateEvent(*this), prechargeEvent(*this), 1547 refreshEvent(*this), powerEvent(*this) 1548{ } 1549 1550void 1551DRAMCtrl::Rank::startup(Tick ref_tick) 1552{ 1553 assert(ref_tick > curTick()); 1554 1555 pwrStateTick = curTick(); 1556 1557 // kick off the refresh, and give ourselves enough time to 1558 // precharge 1559 schedule(refreshEvent, ref_tick); 1560} 1561 1562void 1563DRAMCtrl::Rank::suspend() 1564{ 1565 deschedule(refreshEvent); 1566} 1567 1568void 1569DRAMCtrl::Rank::checkDrainDone() 1570{ 1571 // if this rank was waiting to drain it is now able to proceed to 1572 // precharge 1573 if (refreshState == REF_DRAIN) { 1574 DPRINTF(DRAM, "Refresh drain done, now precharging\n"); 1575 1576 refreshState = REF_PRE; 1577 1578 // hand control back to the refresh event loop 1579 schedule(refreshEvent, curTick()); 1580 } 1581} 1582 1583void 1584DRAMCtrl::Rank::processActivateEvent() 1585{ 1586 // we should transition to the active state as soon as any bank is active 1587 if (pwrState != PWR_ACT) 1588 // note that at this point numBanksActive could be back at 1589 // zero again due to a precharge scheduled in the future 1590 schedulePowerEvent(PWR_ACT, curTick()); 1591} 1592 1593void 1594DRAMCtrl::Rank::processPrechargeEvent() 1595{ 1596 // if we reached zero, then special conditions apply as we track 1597 // if all banks are precharged for the power models 1598 if (numBanksActive == 0) { 1599 // we should transition to the idle state when the last bank 1600 // is precharged 1601 schedulePowerEvent(PWR_IDLE, curTick()); 1602 } 1603} 1604 1605void 1606DRAMCtrl::Rank::processRefreshEvent() 1607{ 1608 // when first preparing the refresh, remember when it was due 1609 if (refreshState == REF_IDLE) { 1610 // remember when the refresh is due 1611 refreshDueAt = curTick(); 1612 1613 // proceed to drain 1614 refreshState = REF_DRAIN; 1615 1616 DPRINTF(DRAM, "Refresh due\n"); 1617 } 1618 1619 // let any scheduled read or write to the same rank go ahead, 1620 // after which it will 1621 // hand control back to this event loop 1622 if (refreshState == REF_DRAIN) { 1623 // if a request is at the moment being handled and this request is 1624 // accessing the current rank then wait for it to finish 1625 if ((rank == memory.activeRank) 1626 && (memory.nextReqEvent.scheduled())) { 1627 // hand control over to the request loop until it is 1628 // evaluated next 1629 DPRINTF(DRAM, "Refresh awaiting draining\n"); 1630 1631 return; 1632 } else { 1633 refreshState = REF_PRE; 1634 } 1635 } 1636 1637 // at this point, ensure that all banks are precharged 1638 if (refreshState == REF_PRE) { 1639 // precharge any active bank if we are not already in the idle 1640 // state 1641 if (pwrState != PWR_IDLE) { 1642 // at the moment, we use a precharge all even if there is 1643 // only a single bank open 1644 DPRINTF(DRAM, "Precharging all\n"); 1645 1646 // first determine when we can precharge 1647 Tick pre_at = curTick(); 1648 1649 for (auto &b : banks) { 1650 // respect both causality and any existing bank 1651 // constraints, some banks could already have a 1652 // (auto) precharge scheduled 1653 pre_at = std::max(b.preAllowedAt, pre_at); 1654 } 1655 1656 // make sure all banks per rank are precharged, and for those that 1657 // already are, update their availability 1658 Tick act_allowed_at = pre_at + memory.tRP; 1659 1660 for (auto &b : banks) { 1661 if (b.openRow != Bank::NO_ROW) { 1662 memory.prechargeBank(*this, b, pre_at, false); 1663 } else { 1664 b.actAllowedAt = std::max(b.actAllowedAt, act_allowed_at); 1665 b.preAllowedAt = std::max(b.preAllowedAt, pre_at); 1666 } 1667 } 1668 1669 // precharge all banks in rank 1670 power.powerlib.doCommand(MemCommand::PREA, 0, 1671 divCeil(pre_at, memory.tCK) - 1672 memory.timeStampOffset); 1673 1674 DPRINTF(DRAMPower, "%llu,PREA,0,%d\n", 1675 divCeil(pre_at, memory.tCK) - 1676 memory.timeStampOffset, rank); 1677 } else { 1678 DPRINTF(DRAM, "All banks already precharged, starting refresh\n"); 1679 1680 // go ahead and kick the power state machine into gear if 1681 // we are already idle 1682 schedulePowerEvent(PWR_REF, curTick()); 1683 } 1684 1685 refreshState = REF_RUN; 1686 assert(numBanksActive == 0); 1687 1688 // wait for all banks to be precharged, at which point the 1689 // power state machine will transition to the idle state, and 1690 // automatically move to a refresh, at that point it will also 1691 // call this method to get the refresh event loop going again 1692 return; 1693 } 1694 1695 // last but not least we perform the actual refresh 1696 if (refreshState == REF_RUN) { 1697 // should never get here with any banks active 1698 assert(numBanksActive == 0); 1699 assert(pwrState == PWR_REF); 1700 1701 Tick ref_done_at = curTick() + memory.tRFC; 1702 1703 for (auto &b : banks) { 1704 b.actAllowedAt = ref_done_at; 1705 } 1706 1707 // at the moment this affects all ranks 1708 power.powerlib.doCommand(MemCommand::REF, 0, 1709 divCeil(curTick(), memory.tCK) - 1710 memory.timeStampOffset); 1711 1712 // at the moment sort the list of commands and update the counters 1713 // for DRAMPower libray when doing a refresh 1714 sort(power.powerlib.cmdList.begin(), 1715 power.powerlib.cmdList.end(), DRAMCtrl::sortTime); 1716 1717 // update the counters for DRAMPower, passing false to 1718 // indicate that this is not the last command in the 1719 // list. DRAMPower requires this information for the 1720 // correct calculation of the background energy at the end 1721 // of the simulation. Ideally we would want to call this 1722 // function with true once at the end of the 1723 // simulation. However, the discarded energy is extremly 1724 // small and does not effect the final results. 1725 power.powerlib.updateCounters(false); 1726 1727 // call the energy function 1728 power.powerlib.calcEnergy(); 1729 1730 // Update the stats 1731 updatePowerStats(); 1732 1733 DPRINTF(DRAMPower, "%llu,REF,0,%d\n", divCeil(curTick(), memory.tCK) - 1734 memory.timeStampOffset, rank); 1735 1736 // make sure we did not wait so long that we cannot make up 1737 // for it 1738 if (refreshDueAt + memory.tREFI < ref_done_at) { 1739 fatal("Refresh was delayed so long we cannot catch up\n"); 1740 } 1741 1742 // compensate for the delay in actually performing the refresh 1743 // when scheduling the next one 1744 schedule(refreshEvent, refreshDueAt + memory.tREFI - memory.tRP); 1745 1746 assert(!powerEvent.scheduled()); 1747 1748 // move to the idle power state once the refresh is done, this 1749 // will also move the refresh state machine to the refresh 1750 // idle state 1751 schedulePowerEvent(PWR_IDLE, ref_done_at); 1752 1753 DPRINTF(DRAMState, "Refresh done at %llu and next refresh at %llu\n", 1754 ref_done_at, refreshDueAt + memory.tREFI); 1755 } 1756} 1757 1758void 1759DRAMCtrl::Rank::schedulePowerEvent(PowerState pwr_state, Tick tick) 1760{ 1761 // respect causality 1762 assert(tick >= curTick()); 1763 1764 if (!powerEvent.scheduled()) { 1765 DPRINTF(DRAMState, "Scheduling power event at %llu to state %d\n", 1766 tick, pwr_state); 1767 1768 // insert the new transition 1769 pwrStateTrans = pwr_state; 1770 1771 schedule(powerEvent, tick); 1772 } else { 1773 panic("Scheduled power event at %llu to state %d, " 1774 "with scheduled event at %llu to %d\n", tick, pwr_state, 1775 powerEvent.when(), pwrStateTrans); 1776 } 1777} 1778 1779void 1780DRAMCtrl::Rank::processPowerEvent() 1781{ 1782 // remember where we were, and for how long 1783 Tick duration = curTick() - pwrStateTick; 1784 PowerState prev_state = pwrState; 1785 1786 // update the accounting 1787 pwrStateTime[prev_state] += duration; 1788 1789 pwrState = pwrStateTrans; 1790 pwrStateTick = curTick(); 1791 1792 if (pwrState == PWR_IDLE) { 1793 DPRINTF(DRAMState, "All banks precharged\n"); 1794 1795 // if we were refreshing, make sure we start scheduling requests again 1796 if (prev_state == PWR_REF) { 1797 DPRINTF(DRAMState, "Was refreshing for %llu ticks\n", duration); 1798 assert(pwrState == PWR_IDLE); 1799 1800 // kick things into action again 1801 refreshState = REF_IDLE; 1802 // a request event could be already scheduled by the state 1803 // machine of the other rank 1804 if (!memory.nextReqEvent.scheduled()) 1805 schedule(memory.nextReqEvent, curTick()); 1806 } else { 1807 assert(prev_state == PWR_ACT); 1808 1809 // if we have a pending refresh, and are now moving to 1810 // the idle state, direclty transition to a refresh 1811 if (refreshState == REF_RUN) { 1812 // there should be nothing waiting at this point 1813 assert(!powerEvent.scheduled()); 1814 1815 // update the state in zero time and proceed below 1816 pwrState = PWR_REF; 1817 } 1818 } 1819 } 1820 1821 // we transition to the refresh state, let the refresh state 1822 // machine know of this state update and let it deal with the 1823 // scheduling of the next power state transition as well as the 1824 // following refresh 1825 if (pwrState == PWR_REF) { 1826 DPRINTF(DRAMState, "Refreshing\n"); 1827 // kick the refresh event loop into action again, and that 1828 // in turn will schedule a transition to the idle power 1829 // state once the refresh is done 1830 assert(refreshState == REF_RUN); 1831 processRefreshEvent(); 1832 } 1833} 1834 1835void 1836DRAMCtrl::Rank::updatePowerStats() 1837{ 1838 // Get the energy and power from DRAMPower 1839 Data::MemoryPowerModel::Energy energy = 1840 power.powerlib.getEnergy(); 1841 Data::MemoryPowerModel::Power rank_power = 1842 power.powerlib.getPower(); 1843 1844 actEnergy = energy.act_energy * memory.devicesPerRank; 1845 preEnergy = energy.pre_energy * memory.devicesPerRank; 1846 readEnergy = energy.read_energy * memory.devicesPerRank; 1847 writeEnergy = energy.write_energy * memory.devicesPerRank; 1848 refreshEnergy = energy.ref_energy * memory.devicesPerRank; 1849 actBackEnergy = energy.act_stdby_energy * memory.devicesPerRank; 1850 preBackEnergy = energy.pre_stdby_energy * memory.devicesPerRank; 1851 totalEnergy = energy.total_energy * memory.devicesPerRank; 1852 averagePower = rank_power.average_power * memory.devicesPerRank; 1853} 1854 1855void 1856DRAMCtrl::Rank::regStats() 1857{ 1858 using namespace Stats; 1859 1860 pwrStateTime 1861 .init(5) 1862 .name(name() + ".memoryStateTime") 1863 .desc("Time in different power states"); 1864 pwrStateTime.subname(0, "IDLE"); 1865 pwrStateTime.subname(1, "REF"); 1866 pwrStateTime.subname(2, "PRE_PDN"); 1867 pwrStateTime.subname(3, "ACT"); 1868 pwrStateTime.subname(4, "ACT_PDN"); 1869 1870 actEnergy 1871 .name(name() + ".actEnergy") 1872 .desc("Energy for activate commands per rank (pJ)"); 1873 1874 preEnergy 1875 .name(name() + ".preEnergy") 1876 .desc("Energy for precharge commands per rank (pJ)"); 1877 1878 readEnergy 1879 .name(name() + ".readEnergy") 1880 .desc("Energy for read commands per rank (pJ)"); 1881 1882 writeEnergy 1883 .name(name() + ".writeEnergy") 1884 .desc("Energy for write commands per rank (pJ)"); 1885 1886 refreshEnergy 1887 .name(name() + ".refreshEnergy") 1888 .desc("Energy for refresh commands per rank (pJ)"); 1889 1890 actBackEnergy 1891 .name(name() + ".actBackEnergy") 1892 .desc("Energy for active background per rank (pJ)"); 1893 1894 preBackEnergy 1895 .name(name() + ".preBackEnergy") 1896 .desc("Energy for precharge background per rank (pJ)"); 1897 1898 totalEnergy 1899 .name(name() + ".totalEnergy") 1900 .desc("Total energy per rank (pJ)"); 1901 1902 averagePower 1903 .name(name() + ".averagePower") 1904 .desc("Core power per rank (mW)"); 1905} 1906void 1907DRAMCtrl::regStats() 1908{ 1909 using namespace Stats; 1910 1911 AbstractMemory::regStats(); 1912 1913 for (auto r : ranks) { 1914 r->regStats(); 1915 } 1916 1917 readReqs 1918 .name(name() + ".readReqs") 1919 .desc("Number of read requests accepted"); 1920 1921 writeReqs 1922 .name(name() + ".writeReqs") 1923 .desc("Number of write requests accepted"); 1924 1925 readBursts 1926 .name(name() + ".readBursts") 1927 .desc("Number of DRAM read bursts, " 1928 "including those serviced by the write queue"); 1929 1930 writeBursts 1931 .name(name() + ".writeBursts") 1932 .desc("Number of DRAM write bursts, " 1933 "including those merged in the write queue"); 1934 1935 servicedByWrQ 1936 .name(name() + ".servicedByWrQ") 1937 .desc("Number of DRAM read bursts serviced by the write queue"); 1938 1939 mergedWrBursts 1940 .name(name() + ".mergedWrBursts") 1941 .desc("Number of DRAM write bursts merged with an existing one"); 1942 1943 neitherReadNorWrite 1944 .name(name() + ".neitherReadNorWriteReqs") 1945 .desc("Number of requests that are neither read nor write"); 1946 1947 perBankRdBursts 1948 .init(banksPerRank * ranksPerChannel) 1949 .name(name() + ".perBankRdBursts") 1950 .desc("Per bank write bursts"); 1951 1952 perBankWrBursts 1953 .init(banksPerRank * ranksPerChannel) 1954 .name(name() + ".perBankWrBursts") 1955 .desc("Per bank write bursts"); 1956 1957 avgRdQLen 1958 .name(name() + ".avgRdQLen") 1959 .desc("Average read queue length when enqueuing") 1960 .precision(2); 1961 1962 avgWrQLen 1963 .name(name() + ".avgWrQLen") 1964 .desc("Average write queue length when enqueuing") 1965 .precision(2); 1966 1967 totQLat 1968 .name(name() + ".totQLat") 1969 .desc("Total ticks spent queuing"); 1970 1971 totBusLat 1972 .name(name() + ".totBusLat") 1973 .desc("Total ticks spent in databus transfers"); 1974 1975 totMemAccLat 1976 .name(name() + ".totMemAccLat") 1977 .desc("Total ticks spent from burst creation until serviced " 1978 "by the DRAM"); 1979 1980 avgQLat 1981 .name(name() + ".avgQLat") 1982 .desc("Average queueing delay per DRAM burst") 1983 .precision(2); 1984 1985 avgQLat = totQLat / (readBursts - servicedByWrQ); 1986 1987 avgBusLat 1988 .name(name() + ".avgBusLat") 1989 .desc("Average bus latency per DRAM burst") 1990 .precision(2); 1991 1992 avgBusLat = totBusLat / (readBursts - servicedByWrQ); 1993 1994 avgMemAccLat 1995 .name(name() + ".avgMemAccLat") 1996 .desc("Average memory access latency per DRAM burst") 1997 .precision(2); 1998 1999 avgMemAccLat = totMemAccLat / (readBursts - servicedByWrQ); 2000 2001 numRdRetry 2002 .name(name() + ".numRdRetry") 2003 .desc("Number of times read queue was full causing retry"); 2004 2005 numWrRetry 2006 .name(name() + ".numWrRetry") 2007 .desc("Number of times write queue was full causing retry"); 2008 2009 readRowHits 2010 .name(name() + ".readRowHits") 2011 .desc("Number of row buffer hits during reads"); 2012 2013 writeRowHits 2014 .name(name() + ".writeRowHits") 2015 .desc("Number of row buffer hits during writes"); 2016 2017 readRowHitRate 2018 .name(name() + ".readRowHitRate") 2019 .desc("Row buffer hit rate for reads") 2020 .precision(2); 2021 2022 readRowHitRate = (readRowHits / (readBursts - servicedByWrQ)) * 100; 2023 2024 writeRowHitRate 2025 .name(name() + ".writeRowHitRate") 2026 .desc("Row buffer hit rate for writes") 2027 .precision(2); 2028 2029 writeRowHitRate = (writeRowHits / (writeBursts - mergedWrBursts)) * 100; 2030 2031 readPktSize 2032 .init(ceilLog2(burstSize) + 1) 2033 .name(name() + ".readPktSize") 2034 .desc("Read request sizes (log2)"); 2035 2036 writePktSize 2037 .init(ceilLog2(burstSize) + 1) 2038 .name(name() + ".writePktSize") 2039 .desc("Write request sizes (log2)"); 2040 2041 rdQLenPdf 2042 .init(readBufferSize) 2043 .name(name() + ".rdQLenPdf") 2044 .desc("What read queue length does an incoming req see"); 2045 2046 wrQLenPdf 2047 .init(writeBufferSize) 2048 .name(name() + ".wrQLenPdf") 2049 .desc("What write queue length does an incoming req see"); 2050 2051 bytesPerActivate 2052 .init(maxAccessesPerRow) 2053 .name(name() + ".bytesPerActivate") 2054 .desc("Bytes accessed per row activation") 2055 .flags(nozero); 2056 2057 rdPerTurnAround 2058 .init(readBufferSize) 2059 .name(name() + ".rdPerTurnAround") 2060 .desc("Reads before turning the bus around for writes") 2061 .flags(nozero); 2062 2063 wrPerTurnAround 2064 .init(writeBufferSize) 2065 .name(name() + ".wrPerTurnAround") 2066 .desc("Writes before turning the bus around for reads") 2067 .flags(nozero); 2068 2069 bytesReadDRAM 2070 .name(name() + ".bytesReadDRAM") 2071 .desc("Total number of bytes read from DRAM"); 2072 2073 bytesReadWrQ 2074 .name(name() + ".bytesReadWrQ") 2075 .desc("Total number of bytes read from write queue"); 2076 2077 bytesWritten 2078 .name(name() + ".bytesWritten") 2079 .desc("Total number of bytes written to DRAM"); 2080 2081 bytesReadSys 2082 .name(name() + ".bytesReadSys") 2083 .desc("Total read bytes from the system interface side"); 2084 2085 bytesWrittenSys 2086 .name(name() + ".bytesWrittenSys") 2087 .desc("Total written bytes from the system interface side"); 2088 2089 avgRdBW 2090 .name(name() + ".avgRdBW") 2091 .desc("Average DRAM read bandwidth in MiByte/s") 2092 .precision(2); 2093 2094 avgRdBW = (bytesReadDRAM / 1000000) / simSeconds; 2095 2096 avgWrBW 2097 .name(name() + ".avgWrBW") 2098 .desc("Average achieved write bandwidth in MiByte/s") 2099 .precision(2); 2100 2101 avgWrBW = (bytesWritten / 1000000) / simSeconds; 2102 2103 avgRdBWSys 2104 .name(name() + ".avgRdBWSys") 2105 .desc("Average system read bandwidth in MiByte/s") 2106 .precision(2); 2107 2108 avgRdBWSys = (bytesReadSys / 1000000) / simSeconds; 2109 2110 avgWrBWSys 2111 .name(name() + ".avgWrBWSys") 2112 .desc("Average system write bandwidth in MiByte/s") 2113 .precision(2); 2114 2115 avgWrBWSys = (bytesWrittenSys / 1000000) / simSeconds; 2116 2117 peakBW 2118 .name(name() + ".peakBW") 2119 .desc("Theoretical peak bandwidth in MiByte/s") 2120 .precision(2); 2121 2122 peakBW = (SimClock::Frequency / tBURST) * burstSize / 1000000; 2123 2124 busUtil 2125 .name(name() + ".busUtil") 2126 .desc("Data bus utilization in percentage") 2127 .precision(2); 2128 busUtil = (avgRdBW + avgWrBW) / peakBW * 100; 2129 2130 totGap 2131 .name(name() + ".totGap") 2132 .desc("Total gap between requests"); 2133 2134 avgGap 2135 .name(name() + ".avgGap") 2136 .desc("Average gap between requests") 2137 .precision(2); 2138 2139 avgGap = totGap / (readReqs + writeReqs); 2140 2141 // Stats for DRAM Power calculation based on Micron datasheet 2142 busUtilRead 2143 .name(name() + ".busUtilRead") 2144 .desc("Data bus utilization in percentage for reads") 2145 .precision(2); 2146 2147 busUtilRead = avgRdBW / peakBW * 100; 2148 2149 busUtilWrite 2150 .name(name() + ".busUtilWrite") 2151 .desc("Data bus utilization in percentage for writes") 2152 .precision(2); 2153 2154 busUtilWrite = avgWrBW / peakBW * 100; 2155 2156 pageHitRate 2157 .name(name() + ".pageHitRate") 2158 .desc("Row buffer hit rate, read and write combined") 2159 .precision(2); 2160 2161 pageHitRate = (writeRowHits + readRowHits) / 2162 (writeBursts - mergedWrBursts + readBursts - servicedByWrQ) * 100; 2163} 2164 2165void 2166DRAMCtrl::recvFunctional(PacketPtr pkt) 2167{ 2168 // rely on the abstract memory 2169 functionalAccess(pkt); 2170} 2171 2172BaseSlavePort& 2173DRAMCtrl::getSlavePort(const string &if_name, PortID idx) 2174{ 2175 if (if_name != "port") { 2176 return MemObject::getSlavePort(if_name, idx); 2177 } else { 2178 return port; 2179 } 2180} 2181 2182unsigned int 2183DRAMCtrl::drain(DrainManager *dm) 2184{ 2185 unsigned int count = port.drain(dm); 2186 2187 // if there is anything in any of our internal queues, keep track 2188 // of that as well 2189 if (!(writeQueue.empty() && readQueue.empty() && 2190 respQueue.empty())) { 2191 DPRINTF(Drain, "DRAM controller not drained, write: %d, read: %d," 2192 " resp: %d\n", writeQueue.size(), readQueue.size(), 2193 respQueue.size()); 2194 ++count; 2195 drainManager = dm; 2196 2197 // the only part that is not drained automatically over time 2198 // is the write queue, thus kick things into action if needed 2199 if (!writeQueue.empty() && !nextReqEvent.scheduled()) { 2200 schedule(nextReqEvent, curTick()); 2201 } 2202 } 2203 2204 if (count) 2205 setDrainState(Drainable::Draining); 2206 else 2207 setDrainState(Drainable::Drained); 2208 return count; 2209} 2210 2211void 2212DRAMCtrl::drainResume() 2213{ 2214 if (!isTimingMode && system()->isTimingMode()) { 2215 // if we switched to timing mode, kick things into action, 2216 // and behave as if we restored from a checkpoint 2217 startup(); 2218 } else if (isTimingMode && !system()->isTimingMode()) { 2219 // if we switch from timing mode, stop the refresh events to 2220 // not cause issues with KVM 2221 for (auto r : ranks) { 2222 r->suspend(); 2223 } 2224 } 2225 2226 // update the mode 2227 isTimingMode = system()->isTimingMode(); 2228} 2229 2230DRAMCtrl::MemoryPort::MemoryPort(const std::string& name, DRAMCtrl& _memory) 2231 : QueuedSlavePort(name, &_memory, queue), queue(_memory, *this), 2232 memory(_memory) 2233{ } 2234 2235AddrRangeList 2236DRAMCtrl::MemoryPort::getAddrRanges() const 2237{ 2238 AddrRangeList ranges; 2239 ranges.push_back(memory.getAddrRange()); 2240 return ranges; 2241} 2242 2243void 2244DRAMCtrl::MemoryPort::recvFunctional(PacketPtr pkt) 2245{ 2246 pkt->pushLabel(memory.name()); 2247 2248 if (!queue.checkFunctional(pkt)) { 2249 // Default implementation of SimpleTimingPort::recvFunctional() 2250 // calls recvAtomic() and throws away the latency; we can save a 2251 // little here by just not calculating the latency. 2252 memory.recvFunctional(pkt); 2253 } 2254 2255 pkt->popLabel(); 2256} 2257 2258Tick 2259DRAMCtrl::MemoryPort::recvAtomic(PacketPtr pkt) 2260{ 2261 return memory.recvAtomic(pkt); 2262} 2263 2264bool 2265DRAMCtrl::MemoryPort::recvTimingReq(PacketPtr pkt) 2266{ 2267 // pass it to the memory controller 2268 return memory.recvTimingReq(pkt); 2269} 2270 2271DRAMCtrl* 2272DRAMCtrlParams::create() 2273{ 2274 return new DRAMCtrl(this); 2275} 2276