dram_ctrl.cc revision 10620
1/* 2 * Copyright (c) 2010-2014 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2013 Amin Farmahini-Farahani 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Andreas Hansson 41 * Ani Udipi 42 * Neha Agarwal 43 * Omar Naji 44 */ 45 46#include "base/bitfield.hh" 47#include "base/trace.hh" 48#include "debug/DRAM.hh" 49#include "debug/DRAMPower.hh" 50#include "debug/DRAMState.hh" 51#include "debug/Drain.hh" 52#include "mem/dram_ctrl.hh" 53#include "sim/system.hh" 54 55using namespace std; 56using namespace Data; 57 58DRAMCtrl::DRAMCtrl(const DRAMCtrlParams* p) : 59 AbstractMemory(p), 60 port(name() + ".port", *this), isTimingMode(false), 61 retryRdReq(false), retryWrReq(false), 62 busState(READ), 63 nextReqEvent(this), respondEvent(this), 64 drainManager(NULL), 65 deviceSize(p->device_size), 66 deviceBusWidth(p->device_bus_width), burstLength(p->burst_length), 67 deviceRowBufferSize(p->device_rowbuffer_size), 68 devicesPerRank(p->devices_per_rank), 69 burstSize((devicesPerRank * burstLength * deviceBusWidth) / 8), 70 rowBufferSize(devicesPerRank * deviceRowBufferSize), 71 columnsPerRowBuffer(rowBufferSize / burstSize), 72 columnsPerStripe(range.granularity() / burstSize), 73 ranksPerChannel(p->ranks_per_channel), 74 bankGroupsPerRank(p->bank_groups_per_rank), 75 bankGroupArch(p->bank_groups_per_rank > 0), 76 banksPerRank(p->banks_per_rank), channels(p->channels), rowsPerBank(0), 77 readBufferSize(p->read_buffer_size), 78 writeBufferSize(p->write_buffer_size), 79 writeHighThreshold(writeBufferSize * p->write_high_thresh_perc / 100.0), 80 writeLowThreshold(writeBufferSize * p->write_low_thresh_perc / 100.0), 81 minWritesPerSwitch(p->min_writes_per_switch), 82 writesThisTime(0), readsThisTime(0), 83 tCK(p->tCK), tWTR(p->tWTR), tRTW(p->tRTW), tCS(p->tCS), tBURST(p->tBURST), 84 tCCD_L(p->tCCD_L), tRCD(p->tRCD), tCL(p->tCL), tRP(p->tRP), tRAS(p->tRAS), 85 tWR(p->tWR), tRTP(p->tRTP), tRFC(p->tRFC), tREFI(p->tREFI), tRRD(p->tRRD), 86 tRRD_L(p->tRRD_L), tXAW(p->tXAW), activationLimit(p->activation_limit), 87 memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping), 88 pageMgmt(p->page_policy), 89 maxAccessesPerRow(p->max_accesses_per_row), 90 frontendLatency(p->static_frontend_latency), 91 backendLatency(p->static_backend_latency), 92 busBusyUntil(0), prevArrival(0), 93 nextReqTime(0), activeRank(0), timeStampOffset(0) 94{ 95 // sanity check the ranks since we rely on bit slicing for the 96 // address decoding 97 fatal_if(!isPowerOf2(ranksPerChannel), "DRAM rank count of %d is not " 98 "allowed, must be a power of two\n", ranksPerChannel); 99 100 for (int i = 0; i < ranksPerChannel; i++) { 101 Rank* rank = new Rank(*this, p); 102 ranks.push_back(rank); 103 104 rank->actTicks.resize(activationLimit, 0); 105 rank->banks.resize(banksPerRank); 106 rank->rank = i; 107 108 for (int b = 0; b < banksPerRank; b++) { 109 rank->banks[b].bank = b; 110 // GDDR addressing of banks to BG is linear. 111 // Here we assume that all DRAM generations address bank groups as 112 // follows: 113 if (bankGroupArch) { 114 // Simply assign lower bits to bank group in order to 115 // rotate across bank groups as banks are incremented 116 // e.g. with 4 banks per bank group and 16 banks total: 117 // banks 0,4,8,12 are in bank group 0 118 // banks 1,5,9,13 are in bank group 1 119 // banks 2,6,10,14 are in bank group 2 120 // banks 3,7,11,15 are in bank group 3 121 rank->banks[b].bankgr = b % bankGroupsPerRank; 122 } else { 123 // No bank groups; simply assign to bank number 124 rank->banks[b].bankgr = b; 125 } 126 } 127 } 128 129 // perform a basic check of the write thresholds 130 if (p->write_low_thresh_perc >= p->write_high_thresh_perc) 131 fatal("Write buffer low threshold %d must be smaller than the " 132 "high threshold %d\n", p->write_low_thresh_perc, 133 p->write_high_thresh_perc); 134 135 // determine the rows per bank by looking at the total capacity 136 uint64_t capacity = ULL(1) << ceilLog2(AbstractMemory::size()); 137 138 // determine the dram actual capacity from the DRAM config in Mbytes 139 uint64_t deviceCapacity = deviceSize / (1024 * 1024) * devicesPerRank * 140 ranksPerChannel; 141 142 // if actual DRAM size does not match memory capacity in system warn! 143 if (deviceCapacity != capacity / (1024 * 1024)) 144 warn("DRAM device capacity (%d Mbytes) does not match the " 145 "address range assigned (%d Mbytes)\n", deviceCapacity, 146 capacity / (1024 * 1024)); 147 148 DPRINTF(DRAM, "Memory capacity %lld (%lld) bytes\n", capacity, 149 AbstractMemory::size()); 150 151 DPRINTF(DRAM, "Row buffer size %d bytes with %d columns per row buffer\n", 152 rowBufferSize, columnsPerRowBuffer); 153 154 rowsPerBank = capacity / (rowBufferSize * banksPerRank * ranksPerChannel); 155 156 // a bit of sanity checks on the interleaving 157 if (range.interleaved()) { 158 if (channels != range.stripes()) 159 fatal("%s has %d interleaved address stripes but %d channel(s)\n", 160 name(), range.stripes(), channels); 161 162 if (addrMapping == Enums::RoRaBaChCo) { 163 if (rowBufferSize != range.granularity()) { 164 fatal("Channel interleaving of %s doesn't match RoRaBaChCo " 165 "address map\n", name()); 166 } 167 } else if (addrMapping == Enums::RoRaBaCoCh || 168 addrMapping == Enums::RoCoRaBaCh) { 169 // for the interleavings with channel bits in the bottom, 170 // if the system uses a channel striping granularity that 171 // is larger than the DRAM burst size, then map the 172 // sequential accesses within a stripe to a number of 173 // columns in the DRAM, effectively placing some of the 174 // lower-order column bits as the least-significant bits 175 // of the address (above the ones denoting the burst size) 176 assert(columnsPerStripe >= 1); 177 178 // channel striping has to be done at a granularity that 179 // is equal or larger to a cache line 180 if (system()->cacheLineSize() > range.granularity()) { 181 fatal("Channel interleaving of %s must be at least as large " 182 "as the cache line size\n", name()); 183 } 184 185 // ...and equal or smaller than the row-buffer size 186 if (rowBufferSize < range.granularity()) { 187 fatal("Channel interleaving of %s must be at most as large " 188 "as the row-buffer size\n", name()); 189 } 190 // this is essentially the check above, so just to be sure 191 assert(columnsPerStripe <= columnsPerRowBuffer); 192 } 193 } 194 195 // some basic sanity checks 196 if (tREFI <= tRP || tREFI <= tRFC) { 197 fatal("tREFI (%d) must be larger than tRP (%d) and tRFC (%d)\n", 198 tREFI, tRP, tRFC); 199 } 200 201 // basic bank group architecture checks -> 202 if (bankGroupArch) { 203 // must have at least one bank per bank group 204 if (bankGroupsPerRank > banksPerRank) { 205 fatal("banks per rank (%d) must be equal to or larger than " 206 "banks groups per rank (%d)\n", 207 banksPerRank, bankGroupsPerRank); 208 } 209 // must have same number of banks in each bank group 210 if ((banksPerRank % bankGroupsPerRank) != 0) { 211 fatal("Banks per rank (%d) must be evenly divisible by bank groups " 212 "per rank (%d) for equal banks per bank group\n", 213 banksPerRank, bankGroupsPerRank); 214 } 215 // tCCD_L should be greater than minimal, back-to-back burst delay 216 if (tCCD_L <= tBURST) { 217 fatal("tCCD_L (%d) should be larger than tBURST (%d) when " 218 "bank groups per rank (%d) is greater than 1\n", 219 tCCD_L, tBURST, bankGroupsPerRank); 220 } 221 // tRRD_L is greater than minimal, same bank group ACT-to-ACT delay 222 // some datasheets might specify it equal to tRRD 223 if (tRRD_L < tRRD) { 224 fatal("tRRD_L (%d) should be larger than tRRD (%d) when " 225 "bank groups per rank (%d) is greater than 1\n", 226 tRRD_L, tRRD, bankGroupsPerRank); 227 } 228 } 229 230} 231 232void 233DRAMCtrl::init() 234{ 235 AbstractMemory::init(); 236 237 if (!port.isConnected()) { 238 fatal("DRAMCtrl %s is unconnected!\n", name()); 239 } else { 240 port.sendRangeChange(); 241 } 242} 243 244void 245DRAMCtrl::startup() 246{ 247 // remember the memory system mode of operation 248 isTimingMode = system()->isTimingMode(); 249 250 if (isTimingMode) { 251 // timestamp offset should be in clock cycles for DRAMPower 252 timeStampOffset = divCeil(curTick(), tCK); 253 254 // update the start tick for the precharge accounting to the 255 // current tick 256 for (auto r : ranks) { 257 r->startup(curTick() + tREFI - tRP); 258 } 259 260 // shift the bus busy time sufficiently far ahead that we never 261 // have to worry about negative values when computing the time for 262 // the next request, this will add an insignificant bubble at the 263 // start of simulation 264 busBusyUntil = curTick() + tRP + tRCD + tCL; 265 } 266} 267 268Tick 269DRAMCtrl::recvAtomic(PacketPtr pkt) 270{ 271 DPRINTF(DRAM, "recvAtomic: %s 0x%x\n", pkt->cmdString(), pkt->getAddr()); 272 273 // do the actual memory access and turn the packet into a response 274 access(pkt); 275 276 Tick latency = 0; 277 if (!pkt->memInhibitAsserted() && pkt->hasData()) { 278 // this value is not supposed to be accurate, just enough to 279 // keep things going, mimic a closed page 280 latency = tRP + tRCD + tCL; 281 } 282 return latency; 283} 284 285bool 286DRAMCtrl::readQueueFull(unsigned int neededEntries) const 287{ 288 DPRINTF(DRAM, "Read queue limit %d, current size %d, entries needed %d\n", 289 readBufferSize, readQueue.size() + respQueue.size(), 290 neededEntries); 291 292 return 293 (readQueue.size() + respQueue.size() + neededEntries) > readBufferSize; 294} 295 296bool 297DRAMCtrl::writeQueueFull(unsigned int neededEntries) const 298{ 299 DPRINTF(DRAM, "Write queue limit %d, current size %d, entries needed %d\n", 300 writeBufferSize, writeQueue.size(), neededEntries); 301 return (writeQueue.size() + neededEntries) > writeBufferSize; 302} 303 304DRAMCtrl::DRAMPacket* 305DRAMCtrl::decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned size, 306 bool isRead) 307{ 308 // decode the address based on the address mapping scheme, with 309 // Ro, Ra, Co, Ba and Ch denoting row, rank, column, bank and 310 // channel, respectively 311 uint8_t rank; 312 uint8_t bank; 313 // use a 64-bit unsigned during the computations as the row is 314 // always the top bits, and check before creating the DRAMPacket 315 uint64_t row; 316 317 // truncate the address to a DRAM burst, which makes it unique to 318 // a specific column, row, bank, rank and channel 319 Addr addr = dramPktAddr / burstSize; 320 321 // we have removed the lowest order address bits that denote the 322 // position within the column 323 if (addrMapping == Enums::RoRaBaChCo) { 324 // the lowest order bits denote the column to ensure that 325 // sequential cache lines occupy the same row 326 addr = addr / columnsPerRowBuffer; 327 328 // take out the channel part of the address 329 addr = addr / channels; 330 331 // after the channel bits, get the bank bits to interleave 332 // over the banks 333 bank = addr % banksPerRank; 334 addr = addr / banksPerRank; 335 336 // after the bank, we get the rank bits which thus interleaves 337 // over the ranks 338 rank = addr % ranksPerChannel; 339 addr = addr / ranksPerChannel; 340 341 // lastly, get the row bits 342 row = addr % rowsPerBank; 343 addr = addr / rowsPerBank; 344 } else if (addrMapping == Enums::RoRaBaCoCh) { 345 // take out the lower-order column bits 346 addr = addr / columnsPerStripe; 347 348 // take out the channel part of the address 349 addr = addr / channels; 350 351 // next, the higher-order column bites 352 addr = addr / (columnsPerRowBuffer / columnsPerStripe); 353 354 // after the column bits, we get the bank bits to interleave 355 // over the banks 356 bank = addr % banksPerRank; 357 addr = addr / banksPerRank; 358 359 // after the bank, we get the rank bits which thus interleaves 360 // over the ranks 361 rank = addr % ranksPerChannel; 362 addr = addr / ranksPerChannel; 363 364 // lastly, get the row bits 365 row = addr % rowsPerBank; 366 addr = addr / rowsPerBank; 367 } else if (addrMapping == Enums::RoCoRaBaCh) { 368 // optimise for closed page mode and utilise maximum 369 // parallelism of the DRAM (at the cost of power) 370 371 // take out the lower-order column bits 372 addr = addr / columnsPerStripe; 373 374 // take out the channel part of the address, not that this has 375 // to match with how accesses are interleaved between the 376 // controllers in the address mapping 377 addr = addr / channels; 378 379 // start with the bank bits, as this provides the maximum 380 // opportunity for parallelism between requests 381 bank = addr % banksPerRank; 382 addr = addr / banksPerRank; 383 384 // next get the rank bits 385 rank = addr % ranksPerChannel; 386 addr = addr / ranksPerChannel; 387 388 // next, the higher-order column bites 389 addr = addr / (columnsPerRowBuffer / columnsPerStripe); 390 391 // lastly, get the row bits 392 row = addr % rowsPerBank; 393 addr = addr / rowsPerBank; 394 } else 395 panic("Unknown address mapping policy chosen!"); 396 397 assert(rank < ranksPerChannel); 398 assert(bank < banksPerRank); 399 assert(row < rowsPerBank); 400 assert(row < Bank::NO_ROW); 401 402 DPRINTF(DRAM, "Address: %lld Rank %d Bank %d Row %d\n", 403 dramPktAddr, rank, bank, row); 404 405 // create the corresponding DRAM packet with the entry time and 406 // ready time set to the current tick, the latter will be updated 407 // later 408 uint16_t bank_id = banksPerRank * rank + bank; 409 return new DRAMPacket(pkt, isRead, rank, bank, row, bank_id, dramPktAddr, 410 size, ranks[rank]->banks[bank], *ranks[rank]); 411} 412 413void 414DRAMCtrl::addToReadQueue(PacketPtr pkt, unsigned int pktCount) 415{ 416 // only add to the read queue here. whenever the request is 417 // eventually done, set the readyTime, and call schedule() 418 assert(!pkt->isWrite()); 419 420 assert(pktCount != 0); 421 422 // if the request size is larger than burst size, the pkt is split into 423 // multiple DRAM packets 424 // Note if the pkt starting address is not aligened to burst size, the 425 // address of first DRAM packet is kept unaliged. Subsequent DRAM packets 426 // are aligned to burst size boundaries. This is to ensure we accurately 427 // check read packets against packets in write queue. 428 Addr addr = pkt->getAddr(); 429 unsigned pktsServicedByWrQ = 0; 430 BurstHelper* burst_helper = NULL; 431 for (int cnt = 0; cnt < pktCount; ++cnt) { 432 unsigned size = std::min((addr | (burstSize - 1)) + 1, 433 pkt->getAddr() + pkt->getSize()) - addr; 434 readPktSize[ceilLog2(size)]++; 435 readBursts++; 436 437 // First check write buffer to see if the data is already at 438 // the controller 439 bool foundInWrQ = false; 440 for (auto i = writeQueue.begin(); i != writeQueue.end(); ++i) { 441 // check if the read is subsumed in the write entry we are 442 // looking at 443 if ((*i)->addr <= addr && 444 (addr + size) <= ((*i)->addr + (*i)->size)) { 445 foundInWrQ = true; 446 servicedByWrQ++; 447 pktsServicedByWrQ++; 448 DPRINTF(DRAM, "Read to addr %lld with size %d serviced by " 449 "write queue\n", addr, size); 450 bytesReadWrQ += burstSize; 451 break; 452 } 453 } 454 455 // If not found in the write q, make a DRAM packet and 456 // push it onto the read queue 457 if (!foundInWrQ) { 458 459 // Make the burst helper for split packets 460 if (pktCount > 1 && burst_helper == NULL) { 461 DPRINTF(DRAM, "Read to addr %lld translates to %d " 462 "dram requests\n", pkt->getAddr(), pktCount); 463 burst_helper = new BurstHelper(pktCount); 464 } 465 466 DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, true); 467 dram_pkt->burstHelper = burst_helper; 468 469 assert(!readQueueFull(1)); 470 rdQLenPdf[readQueue.size() + respQueue.size()]++; 471 472 DPRINTF(DRAM, "Adding to read queue\n"); 473 474 readQueue.push_back(dram_pkt); 475 476 // Update stats 477 avgRdQLen = readQueue.size() + respQueue.size(); 478 } 479 480 // Starting address of next dram pkt (aligend to burstSize boundary) 481 addr = (addr | (burstSize - 1)) + 1; 482 } 483 484 // If all packets are serviced by write queue, we send the repsonse back 485 if (pktsServicedByWrQ == pktCount) { 486 accessAndRespond(pkt, frontendLatency); 487 return; 488 } 489 490 // Update how many split packets are serviced by write queue 491 if (burst_helper != NULL) 492 burst_helper->burstsServiced = pktsServicedByWrQ; 493 494 // If we are not already scheduled to get a request out of the 495 // queue, do so now 496 if (!nextReqEvent.scheduled()) { 497 DPRINTF(DRAM, "Request scheduled immediately\n"); 498 schedule(nextReqEvent, curTick()); 499 } 500} 501 502void 503DRAMCtrl::addToWriteQueue(PacketPtr pkt, unsigned int pktCount) 504{ 505 // only add to the write queue here. whenever the request is 506 // eventually done, set the readyTime, and call schedule() 507 assert(pkt->isWrite()); 508 509 // if the request size is larger than burst size, the pkt is split into 510 // multiple DRAM packets 511 Addr addr = pkt->getAddr(); 512 for (int cnt = 0; cnt < pktCount; ++cnt) { 513 unsigned size = std::min((addr | (burstSize - 1)) + 1, 514 pkt->getAddr() + pkt->getSize()) - addr; 515 writePktSize[ceilLog2(size)]++; 516 writeBursts++; 517 518 // see if we can merge with an existing item in the write 519 // queue and keep track of whether we have merged or not so we 520 // can stop at that point and also avoid enqueueing a new 521 // request 522 bool merged = false; 523 auto w = writeQueue.begin(); 524 525 while(!merged && w != writeQueue.end()) { 526 // either of the two could be first, if they are the same 527 // it does not matter which way we go 528 if ((*w)->addr >= addr) { 529 // the existing one starts after the new one, figure 530 // out where the new one ends with respect to the 531 // existing one 532 if ((addr + size) >= ((*w)->addr + (*w)->size)) { 533 // check if the existing one is completely 534 // subsumed in the new one 535 DPRINTF(DRAM, "Merging write covering existing burst\n"); 536 merged = true; 537 // update both the address and the size 538 (*w)->addr = addr; 539 (*w)->size = size; 540 } else if ((addr + size) >= (*w)->addr && 541 ((*w)->addr + (*w)->size - addr) <= burstSize) { 542 // the new one is just before or partially 543 // overlapping with the existing one, and together 544 // they fit within a burst 545 DPRINTF(DRAM, "Merging write before existing burst\n"); 546 merged = true; 547 // the existing queue item needs to be adjusted with 548 // respect to both address and size 549 (*w)->size = (*w)->addr + (*w)->size - addr; 550 (*w)->addr = addr; 551 } 552 } else { 553 // the new one starts after the current one, figure 554 // out where the existing one ends with respect to the 555 // new one 556 if (((*w)->addr + (*w)->size) >= (addr + size)) { 557 // check if the new one is completely subsumed in the 558 // existing one 559 DPRINTF(DRAM, "Merging write into existing burst\n"); 560 merged = true; 561 // no adjustments necessary 562 } else if (((*w)->addr + (*w)->size) >= addr && 563 (addr + size - (*w)->addr) <= burstSize) { 564 // the existing one is just before or partially 565 // overlapping with the new one, and together 566 // they fit within a burst 567 DPRINTF(DRAM, "Merging write after existing burst\n"); 568 merged = true; 569 // the address is right, and only the size has 570 // to be adjusted 571 (*w)->size = addr + size - (*w)->addr; 572 } 573 } 574 ++w; 575 } 576 577 // if the item was not merged we need to create a new write 578 // and enqueue it 579 if (!merged) { 580 DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, false); 581 582 assert(writeQueue.size() < writeBufferSize); 583 wrQLenPdf[writeQueue.size()]++; 584 585 DPRINTF(DRAM, "Adding to write queue\n"); 586 587 writeQueue.push_back(dram_pkt); 588 589 // Update stats 590 avgWrQLen = writeQueue.size(); 591 } else { 592 // keep track of the fact that this burst effectively 593 // disappeared as it was merged with an existing one 594 mergedWrBursts++; 595 } 596 597 // Starting address of next dram pkt (aligend to burstSize boundary) 598 addr = (addr | (burstSize - 1)) + 1; 599 } 600 601 // we do not wait for the writes to be send to the actual memory, 602 // but instead take responsibility for the consistency here and 603 // snoop the write queue for any upcoming reads 604 // @todo, if a pkt size is larger than burst size, we might need a 605 // different front end latency 606 accessAndRespond(pkt, frontendLatency); 607 608 // If we are not already scheduled to get a request out of the 609 // queue, do so now 610 if (!nextReqEvent.scheduled()) { 611 DPRINTF(DRAM, "Request scheduled immediately\n"); 612 schedule(nextReqEvent, curTick()); 613 } 614} 615 616void 617DRAMCtrl::printQs() const { 618 DPRINTF(DRAM, "===READ QUEUE===\n\n"); 619 for (auto i = readQueue.begin() ; i != readQueue.end() ; ++i) { 620 DPRINTF(DRAM, "Read %lu\n", (*i)->addr); 621 } 622 DPRINTF(DRAM, "\n===RESP QUEUE===\n\n"); 623 for (auto i = respQueue.begin() ; i != respQueue.end() ; ++i) { 624 DPRINTF(DRAM, "Response %lu\n", (*i)->addr); 625 } 626 DPRINTF(DRAM, "\n===WRITE QUEUE===\n\n"); 627 for (auto i = writeQueue.begin() ; i != writeQueue.end() ; ++i) { 628 DPRINTF(DRAM, "Write %lu\n", (*i)->addr); 629 } 630} 631 632bool 633DRAMCtrl::recvTimingReq(PacketPtr pkt) 634{ 635 /// @todo temporary hack to deal with memory corruption issues until 636 /// 4-phase transactions are complete 637 for (int x = 0; x < pendingDelete.size(); x++) 638 delete pendingDelete[x]; 639 pendingDelete.clear(); 640 641 // This is where we enter from the outside world 642 DPRINTF(DRAM, "recvTimingReq: request %s addr %lld size %d\n", 643 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 644 645 // simply drop inhibited packets for now 646 if (pkt->memInhibitAsserted()) { 647 DPRINTF(DRAM, "Inhibited packet -- Dropping it now\n"); 648 pendingDelete.push_back(pkt); 649 return true; 650 } 651 652 // Calc avg gap between requests 653 if (prevArrival != 0) { 654 totGap += curTick() - prevArrival; 655 } 656 prevArrival = curTick(); 657 658 659 // Find out how many dram packets a pkt translates to 660 // If the burst size is equal or larger than the pkt size, then a pkt 661 // translates to only one dram packet. Otherwise, a pkt translates to 662 // multiple dram packets 663 unsigned size = pkt->getSize(); 664 unsigned offset = pkt->getAddr() & (burstSize - 1); 665 unsigned int dram_pkt_count = divCeil(offset + size, burstSize); 666 667 // check local buffers and do not accept if full 668 if (pkt->isRead()) { 669 assert(size != 0); 670 if (readQueueFull(dram_pkt_count)) { 671 DPRINTF(DRAM, "Read queue full, not accepting\n"); 672 // remember that we have to retry this port 673 retryRdReq = true; 674 numRdRetry++; 675 return false; 676 } else { 677 addToReadQueue(pkt, dram_pkt_count); 678 readReqs++; 679 bytesReadSys += size; 680 } 681 } else if (pkt->isWrite()) { 682 assert(size != 0); 683 if (writeQueueFull(dram_pkt_count)) { 684 DPRINTF(DRAM, "Write queue full, not accepting\n"); 685 // remember that we have to retry this port 686 retryWrReq = true; 687 numWrRetry++; 688 return false; 689 } else { 690 addToWriteQueue(pkt, dram_pkt_count); 691 writeReqs++; 692 bytesWrittenSys += size; 693 } 694 } else { 695 DPRINTF(DRAM,"Neither read nor write, ignore timing\n"); 696 neitherReadNorWrite++; 697 accessAndRespond(pkt, 1); 698 } 699 700 return true; 701} 702 703void 704DRAMCtrl::processRespondEvent() 705{ 706 DPRINTF(DRAM, 707 "processRespondEvent(): Some req has reached its readyTime\n"); 708 709 DRAMPacket* dram_pkt = respQueue.front(); 710 711 if (dram_pkt->burstHelper) { 712 // it is a split packet 713 dram_pkt->burstHelper->burstsServiced++; 714 if (dram_pkt->burstHelper->burstsServiced == 715 dram_pkt->burstHelper->burstCount) { 716 // we have now serviced all children packets of a system packet 717 // so we can now respond to the requester 718 // @todo we probably want to have a different front end and back 719 // end latency for split packets 720 accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency); 721 delete dram_pkt->burstHelper; 722 dram_pkt->burstHelper = NULL; 723 } 724 } else { 725 // it is not a split packet 726 accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency); 727 } 728 729 delete respQueue.front(); 730 respQueue.pop_front(); 731 732 if (!respQueue.empty()) { 733 assert(respQueue.front()->readyTime >= curTick()); 734 assert(!respondEvent.scheduled()); 735 schedule(respondEvent, respQueue.front()->readyTime); 736 } else { 737 // if there is nothing left in any queue, signal a drain 738 if (writeQueue.empty() && readQueue.empty() && 739 drainManager) { 740 DPRINTF(Drain, "DRAM controller done draining\n"); 741 drainManager->signalDrainDone(); 742 drainManager = NULL; 743 } 744 } 745 746 // We have made a location in the queue available at this point, 747 // so if there is a read that was forced to wait, retry now 748 if (retryRdReq) { 749 retryRdReq = false; 750 port.sendRetry(); 751 } 752} 753 754bool 755DRAMCtrl::chooseNext(std::deque<DRAMPacket*>& queue, bool switched_cmd_type) 756{ 757 // This method does the arbitration between requests. The chosen 758 // packet is simply moved to the head of the queue. The other 759 // methods know that this is the place to look. For example, with 760 // FCFS, this method does nothing 761 assert(!queue.empty()); 762 763 // bool to indicate if a packet to an available rank is found 764 bool found_packet = false; 765 if (queue.size() == 1) { 766 DRAMPacket* dram_pkt = queue.front(); 767 // available rank corresponds to state refresh idle 768 if (ranks[dram_pkt->rank]->isAvailable()) { 769 found_packet = true; 770 DPRINTF(DRAM, "Single request, going to a free rank\n"); 771 } else { 772 DPRINTF(DRAM, "Single request, going to a busy rank\n"); 773 } 774 return found_packet; 775 } 776 777 if (memSchedPolicy == Enums::fcfs) { 778 // check if there is a packet going to a free rank 779 for(auto i = queue.begin(); i != queue.end() ; ++i) { 780 DRAMPacket* dram_pkt = *i; 781 if (ranks[dram_pkt->rank]->isAvailable()) { 782 queue.erase(i); 783 queue.push_front(dram_pkt); 784 found_packet = true; 785 break; 786 } 787 } 788 } else if (memSchedPolicy == Enums::frfcfs) { 789 found_packet = reorderQueue(queue, switched_cmd_type); 790 } else 791 panic("No scheduling policy chosen\n"); 792 return found_packet; 793} 794 795bool 796DRAMCtrl::reorderQueue(std::deque<DRAMPacket*>& queue, bool switched_cmd_type) 797{ 798 // Only determine this when needed 799 uint64_t earliest_banks = 0; 800 801 // Search for row hits first, if no row hit is found then schedule the 802 // packet to one of the earliest banks available 803 bool found_packet = false; 804 bool found_earliest_pkt = false; 805 bool found_prepped_diff_rank_pkt = false; 806 auto selected_pkt_it = queue.end(); 807 808 for (auto i = queue.begin(); i != queue.end() ; ++i) { 809 DRAMPacket* dram_pkt = *i; 810 const Bank& bank = dram_pkt->bankRef; 811 // check if rank is busy. If this is the case jump to the next packet 812 // Check if it is a row hit 813 if (dram_pkt->rankRef.isAvailable()) { 814 if (bank.openRow == dram_pkt->row) { 815 if (dram_pkt->rank == activeRank || switched_cmd_type) { 816 // FCFS within the hits, giving priority to commands 817 // that access the same rank as the previous burst 818 // to minimize bus turnaround delays 819 // Only give rank prioity when command type is 820 // not changing 821 DPRINTF(DRAM, "Row buffer hit\n"); 822 selected_pkt_it = i; 823 break; 824 } else if (!found_prepped_diff_rank_pkt) { 825 // found row hit for command on different rank 826 // than prev burst 827 selected_pkt_it = i; 828 found_prepped_diff_rank_pkt = true; 829 } 830 } else if (!found_earliest_pkt & !found_prepped_diff_rank_pkt) { 831 // packet going to a rank which is currently not waiting for a 832 // refresh, No row hit and 833 // haven't found an entry with a row hit to a new rank 834 if (earliest_banks == 0) 835 // Determine entries with earliest bank prep delay 836 // Function will give priority to commands that access the 837 // same rank as previous burst and can prep 838 // the bank seamlessly 839 earliest_banks = minBankPrep(queue, switched_cmd_type); 840 841 // FCFS - Bank is first available bank 842 if (bits(earliest_banks, dram_pkt->bankId, 843 dram_pkt->bankId)) { 844 // Remember the packet to be scheduled to one of 845 // the earliest banks available, FCFS amongst the 846 // earliest banks 847 selected_pkt_it = i; 848 //if the packet found is going to a rank that is currently 849 //not busy then update the found_packet to true 850 found_earliest_pkt = true; 851 } 852 } 853 } 854 } 855 856 if (selected_pkt_it != queue.end()) { 857 DRAMPacket* selected_pkt = *selected_pkt_it; 858 queue.erase(selected_pkt_it); 859 queue.push_front(selected_pkt); 860 found_packet = true; 861 } 862 return found_packet; 863} 864 865void 866DRAMCtrl::accessAndRespond(PacketPtr pkt, Tick static_latency) 867{ 868 DPRINTF(DRAM, "Responding to Address %lld.. ",pkt->getAddr()); 869 870 bool needsResponse = pkt->needsResponse(); 871 // do the actual memory access which also turns the packet into a 872 // response 873 access(pkt); 874 875 // turn packet around to go back to requester if response expected 876 if (needsResponse) { 877 // access already turned the packet into a response 878 assert(pkt->isResponse()); 879 880 // @todo someone should pay for this 881 pkt->firstWordDelay = pkt->lastWordDelay = 0; 882 883 // queue the packet in the response queue to be sent out after 884 // the static latency has passed 885 port.schedTimingResp(pkt, curTick() + static_latency); 886 } else { 887 // @todo the packet is going to be deleted, and the DRAMPacket 888 // is still having a pointer to it 889 pendingDelete.push_back(pkt); 890 } 891 892 DPRINTF(DRAM, "Done\n"); 893 894 return; 895} 896 897void 898DRAMCtrl::activateBank(Rank& rank_ref, Bank& bank_ref, 899 Tick act_tick, uint32_t row) 900{ 901 assert(rank_ref.actTicks.size() == activationLimit); 902 903 DPRINTF(DRAM, "Activate at tick %d\n", act_tick); 904 905 // update the open row 906 assert(bank_ref.openRow == Bank::NO_ROW); 907 bank_ref.openRow = row; 908 909 // start counting anew, this covers both the case when we 910 // auto-precharged, and when this access is forced to 911 // precharge 912 bank_ref.bytesAccessed = 0; 913 bank_ref.rowAccesses = 0; 914 915 ++rank_ref.numBanksActive; 916 assert(rank_ref.numBanksActive <= banksPerRank); 917 918 DPRINTF(DRAM, "Activate bank %d, rank %d at tick %lld, now got %d active\n", 919 bank_ref.bank, rank_ref.rank, act_tick, 920 ranks[rank_ref.rank]->numBanksActive); 921 922 rank_ref.power.powerlib.doCommand(MemCommand::ACT, bank_ref.bank, 923 divCeil(act_tick, tCK) - 924 timeStampOffset); 925 926 DPRINTF(DRAMPower, "%llu,ACT,%d,%d\n", divCeil(act_tick, tCK) - 927 timeStampOffset, bank_ref.bank, rank_ref.rank); 928 929 // The next access has to respect tRAS for this bank 930 bank_ref.preAllowedAt = act_tick + tRAS; 931 932 // Respect the row-to-column command delay 933 bank_ref.colAllowedAt = std::max(act_tick + tRCD, bank_ref.colAllowedAt); 934 935 // start by enforcing tRRD 936 for(int i = 0; i < banksPerRank; i++) { 937 // next activate to any bank in this rank must not happen 938 // before tRRD 939 if (bankGroupArch && (bank_ref.bankgr == rank_ref.banks[i].bankgr)) { 940 // bank group architecture requires longer delays between 941 // ACT commands within the same bank group. Use tRRD_L 942 // in this case 943 rank_ref.banks[i].actAllowedAt = std::max(act_tick + tRRD_L, 944 rank_ref.banks[i].actAllowedAt); 945 } else { 946 // use shorter tRRD value when either 947 // 1) bank group architecture is not supportted 948 // 2) bank is in a different bank group 949 rank_ref.banks[i].actAllowedAt = std::max(act_tick + tRRD, 950 rank_ref.banks[i].actAllowedAt); 951 } 952 } 953 954 // next, we deal with tXAW, if the activation limit is disabled 955 // then we directly schedule an activate power event 956 if (!rank_ref.actTicks.empty()) { 957 // sanity check 958 if (rank_ref.actTicks.back() && 959 (act_tick - rank_ref.actTicks.back()) < tXAW) { 960 panic("Got %d activates in window %d (%llu - %llu) which " 961 "is smaller than %llu\n", activationLimit, act_tick - 962 rank_ref.actTicks.back(), act_tick, 963 rank_ref.actTicks.back(), tXAW); 964 } 965 966 // shift the times used for the book keeping, the last element 967 // (highest index) is the oldest one and hence the lowest value 968 rank_ref.actTicks.pop_back(); 969 970 // record an new activation (in the future) 971 rank_ref.actTicks.push_front(act_tick); 972 973 // cannot activate more than X times in time window tXAW, push the 974 // next one (the X + 1'st activate) to be tXAW away from the 975 // oldest in our window of X 976 if (rank_ref.actTicks.back() && 977 (act_tick - rank_ref.actTicks.back()) < tXAW) { 978 DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate " 979 "no earlier than %llu\n", activationLimit, 980 rank_ref.actTicks.back() + tXAW); 981 for(int j = 0; j < banksPerRank; j++) 982 // next activate must not happen before end of window 983 rank_ref.banks[j].actAllowedAt = 984 std::max(rank_ref.actTicks.back() + tXAW, 985 rank_ref.banks[j].actAllowedAt); 986 } 987 } 988 989 // at the point when this activate takes place, make sure we 990 // transition to the active power state 991 if (!rank_ref.activateEvent.scheduled()) 992 schedule(rank_ref.activateEvent, act_tick); 993 else if (rank_ref.activateEvent.when() > act_tick) 994 // move it sooner in time 995 reschedule(rank_ref.activateEvent, act_tick); 996} 997 998void 999DRAMCtrl::prechargeBank(Rank& rank_ref, Bank& bank, Tick pre_at, bool trace) 1000{ 1001 // make sure the bank has an open row 1002 assert(bank.openRow != Bank::NO_ROW); 1003 1004 // sample the bytes per activate here since we are closing 1005 // the page 1006 bytesPerActivate.sample(bank.bytesAccessed); 1007 1008 bank.openRow = Bank::NO_ROW; 1009 1010 // no precharge allowed before this one 1011 bank.preAllowedAt = pre_at; 1012 1013 Tick pre_done_at = pre_at + tRP; 1014 1015 bank.actAllowedAt = std::max(bank.actAllowedAt, pre_done_at); 1016 1017 assert(rank_ref.numBanksActive != 0); 1018 --rank_ref.numBanksActive; 1019 1020 DPRINTF(DRAM, "Precharging bank %d, rank %d at tick %lld, now got " 1021 "%d active\n", bank.bank, rank_ref.rank, pre_at, 1022 rank_ref.numBanksActive); 1023 1024 if (trace) { 1025 1026 rank_ref.power.powerlib.doCommand(MemCommand::PRE, bank.bank, 1027 divCeil(pre_at, tCK) - 1028 timeStampOffset); 1029 DPRINTF(DRAMPower, "%llu,PRE,%d,%d\n", divCeil(pre_at, tCK) - 1030 timeStampOffset, bank.bank, rank_ref.rank); 1031 } 1032 // if we look at the current number of active banks we might be 1033 // tempted to think the DRAM is now idle, however this can be 1034 // undone by an activate that is scheduled to happen before we 1035 // would have reached the idle state, so schedule an event and 1036 // rather check once we actually make it to the point in time when 1037 // the (last) precharge takes place 1038 if (!rank_ref.prechargeEvent.scheduled()) 1039 schedule(rank_ref.prechargeEvent, pre_done_at); 1040 else if (rank_ref.prechargeEvent.when() < pre_done_at) 1041 reschedule(rank_ref.prechargeEvent, pre_done_at); 1042} 1043 1044void 1045DRAMCtrl::doDRAMAccess(DRAMPacket* dram_pkt) 1046{ 1047 DPRINTF(DRAM, "Timing access to addr %lld, rank/bank/row %d %d %d\n", 1048 dram_pkt->addr, dram_pkt->rank, dram_pkt->bank, dram_pkt->row); 1049 1050 // get the rank 1051 Rank& rank = dram_pkt->rankRef; 1052 1053 // get the bank 1054 Bank& bank = dram_pkt->bankRef; 1055 1056 // for the state we need to track if it is a row hit or not 1057 bool row_hit = true; 1058 1059 // respect any constraints on the command (e.g. tRCD or tCCD) 1060 Tick cmd_at = std::max(bank.colAllowedAt, curTick()); 1061 1062 // Determine the access latency and update the bank state 1063 if (bank.openRow == dram_pkt->row) { 1064 // nothing to do 1065 } else { 1066 row_hit = false; 1067 1068 // If there is a page open, precharge it. 1069 if (bank.openRow != Bank::NO_ROW) { 1070 prechargeBank(rank, bank, std::max(bank.preAllowedAt, curTick())); 1071 } 1072 1073 // next we need to account for the delay in activating the 1074 // page 1075 Tick act_tick = std::max(bank.actAllowedAt, curTick()); 1076 1077 // Record the activation and deal with all the global timing 1078 // constraints caused be a new activation (tRRD and tXAW) 1079 activateBank(rank, bank, act_tick, dram_pkt->row); 1080 1081 // issue the command as early as possible 1082 cmd_at = bank.colAllowedAt; 1083 } 1084 1085 // we need to wait until the bus is available before we can issue 1086 // the command 1087 cmd_at = std::max(cmd_at, busBusyUntil - tCL); 1088 1089 // update the packet ready time 1090 dram_pkt->readyTime = cmd_at + tCL + tBURST; 1091 1092 // only one burst can use the bus at any one point in time 1093 assert(dram_pkt->readyTime - busBusyUntil >= tBURST); 1094 1095 // update the time for the next read/write burst for each 1096 // bank (add a max with tCCD/tCCD_L here) 1097 Tick cmd_dly; 1098 for(int j = 0; j < ranksPerChannel; j++) { 1099 for(int i = 0; i < banksPerRank; i++) { 1100 // next burst to same bank group in this rank must not happen 1101 // before tCCD_L. Different bank group timing requirement is 1102 // tBURST; Add tCS for different ranks 1103 if (dram_pkt->rank == j) { 1104 if (bankGroupArch && 1105 (bank.bankgr == ranks[j]->banks[i].bankgr)) { 1106 // bank group architecture requires longer delays between 1107 // RD/WR burst commands to the same bank group. 1108 // Use tCCD_L in this case 1109 cmd_dly = tCCD_L; 1110 } else { 1111 // use tBURST (equivalent to tCCD_S), the shorter 1112 // cas-to-cas delay value, when either: 1113 // 1) bank group architecture is not supportted 1114 // 2) bank is in a different bank group 1115 cmd_dly = tBURST; 1116 } 1117 } else { 1118 // different rank is by default in a different bank group 1119 // use tBURST (equivalent to tCCD_S), which is the shorter 1120 // cas-to-cas delay in this case 1121 // Add tCS to account for rank-to-rank bus delay requirements 1122 cmd_dly = tBURST + tCS; 1123 } 1124 ranks[j]->banks[i].colAllowedAt = std::max(cmd_at + cmd_dly, 1125 ranks[j]->banks[i].colAllowedAt); 1126 } 1127 } 1128 1129 // Save rank of current access 1130 activeRank = dram_pkt->rank; 1131 1132 // If this is a write, we also need to respect the write recovery 1133 // time before a precharge, in the case of a read, respect the 1134 // read to precharge constraint 1135 bank.preAllowedAt = std::max(bank.preAllowedAt, 1136 dram_pkt->isRead ? cmd_at + tRTP : 1137 dram_pkt->readyTime + tWR); 1138 1139 // increment the bytes accessed and the accesses per row 1140 bank.bytesAccessed += burstSize; 1141 ++bank.rowAccesses; 1142 1143 // if we reached the max, then issue with an auto-precharge 1144 bool auto_precharge = pageMgmt == Enums::close || 1145 bank.rowAccesses == maxAccessesPerRow; 1146 1147 // if we did not hit the limit, we might still want to 1148 // auto-precharge 1149 if (!auto_precharge && 1150 (pageMgmt == Enums::open_adaptive || 1151 pageMgmt == Enums::close_adaptive)) { 1152 // a twist on the open and close page policies: 1153 // 1) open_adaptive page policy does not blindly keep the 1154 // page open, but close it if there are no row hits, and there 1155 // are bank conflicts in the queue 1156 // 2) close_adaptive page policy does not blindly close the 1157 // page, but closes it only if there are no row hits in the queue. 1158 // In this case, only force an auto precharge when there 1159 // are no same page hits in the queue 1160 bool got_more_hits = false; 1161 bool got_bank_conflict = false; 1162 1163 // either look at the read queue or write queue 1164 const deque<DRAMPacket*>& queue = dram_pkt->isRead ? readQueue : 1165 writeQueue; 1166 auto p = queue.begin(); 1167 // make sure we are not considering the packet that we are 1168 // currently dealing with (which is the head of the queue) 1169 ++p; 1170 1171 // keep on looking until we have found required condition or 1172 // reached the end 1173 while (!(got_more_hits && 1174 (got_bank_conflict || pageMgmt == Enums::close_adaptive)) && 1175 p != queue.end()) { 1176 bool same_rank_bank = (dram_pkt->rank == (*p)->rank) && 1177 (dram_pkt->bank == (*p)->bank); 1178 bool same_row = dram_pkt->row == (*p)->row; 1179 got_more_hits |= same_rank_bank && same_row; 1180 got_bank_conflict |= same_rank_bank && !same_row; 1181 ++p; 1182 } 1183 1184 // auto pre-charge when either 1185 // 1) open_adaptive policy, we have not got any more hits, and 1186 // have a bank conflict 1187 // 2) close_adaptive policy and we have not got any more hits 1188 auto_precharge = !got_more_hits && 1189 (got_bank_conflict || pageMgmt == Enums::close_adaptive); 1190 } 1191 1192 // DRAMPower trace command to be written 1193 std::string mem_cmd = dram_pkt->isRead ? "RD" : "WR"; 1194 1195 // MemCommand required for DRAMPower library 1196 MemCommand::cmds command = (mem_cmd == "RD") ? MemCommand::RD : 1197 MemCommand::WR; 1198 1199 // if this access should use auto-precharge, then we are 1200 // closing the row 1201 if (auto_precharge) { 1202 // if auto-precharge push a PRE command at the correct tick to the 1203 // list used by DRAMPower library to calculate power 1204 prechargeBank(rank, bank, std::max(curTick(), bank.preAllowedAt)); 1205 1206 DPRINTF(DRAM, "Auto-precharged bank: %d\n", dram_pkt->bankId); 1207 } 1208 1209 // Update bus state 1210 busBusyUntil = dram_pkt->readyTime; 1211 1212 DPRINTF(DRAM, "Access to %lld, ready at %lld bus busy until %lld.\n", 1213 dram_pkt->addr, dram_pkt->readyTime, busBusyUntil); 1214 1215 dram_pkt->rankRef.power.powerlib.doCommand(command, dram_pkt->bank, 1216 divCeil(cmd_at, tCK) - 1217 timeStampOffset); 1218 1219 DPRINTF(DRAMPower, "%llu,%s,%d,%d\n", divCeil(cmd_at, tCK) - 1220 timeStampOffset, mem_cmd, dram_pkt->bank, dram_pkt->rank); 1221 1222 // Update the minimum timing between the requests, this is a 1223 // conservative estimate of when we have to schedule the next 1224 // request to not introduce any unecessary bubbles. In most cases 1225 // we will wake up sooner than we have to. 1226 nextReqTime = busBusyUntil - (tRP + tRCD + tCL); 1227 1228 // Update the stats and schedule the next request 1229 if (dram_pkt->isRead) { 1230 ++readsThisTime; 1231 if (row_hit) 1232 readRowHits++; 1233 bytesReadDRAM += burstSize; 1234 perBankRdBursts[dram_pkt->bankId]++; 1235 1236 // Update latency stats 1237 totMemAccLat += dram_pkt->readyTime - dram_pkt->entryTime; 1238 totBusLat += tBURST; 1239 totQLat += cmd_at - dram_pkt->entryTime; 1240 } else { 1241 ++writesThisTime; 1242 if (row_hit) 1243 writeRowHits++; 1244 bytesWritten += burstSize; 1245 perBankWrBursts[dram_pkt->bankId]++; 1246 } 1247} 1248 1249void 1250DRAMCtrl::processNextReqEvent() 1251{ 1252 int busyRanks = 0; 1253 for (auto r : ranks) { 1254 if (!r->isAvailable()) { 1255 // rank is busy refreshing 1256 busyRanks++; 1257 1258 // let the rank know that if it was waiting to drain, it 1259 // is now done and ready to proceed 1260 r->checkDrainDone(); 1261 } 1262 } 1263 1264 if (busyRanks == ranksPerChannel) { 1265 // if all ranks are refreshing wait for them to finish 1266 // and stall this state machine without taking any further 1267 // action, and do not schedule a new nextReqEvent 1268 return; 1269 } 1270 1271 // pre-emptively set to false. Overwrite if in READ_TO_WRITE 1272 // or WRITE_TO_READ state 1273 bool switched_cmd_type = false; 1274 if (busState == READ_TO_WRITE) { 1275 DPRINTF(DRAM, "Switching to writes after %d reads with %d reads " 1276 "waiting\n", readsThisTime, readQueue.size()); 1277 1278 // sample and reset the read-related stats as we are now 1279 // transitioning to writes, and all reads are done 1280 rdPerTurnAround.sample(readsThisTime); 1281 readsThisTime = 0; 1282 1283 // now proceed to do the actual writes 1284 busState = WRITE; 1285 switched_cmd_type = true; 1286 } else if (busState == WRITE_TO_READ) { 1287 DPRINTF(DRAM, "Switching to reads after %d writes with %d writes " 1288 "waiting\n", writesThisTime, writeQueue.size()); 1289 1290 wrPerTurnAround.sample(writesThisTime); 1291 writesThisTime = 0; 1292 1293 busState = READ; 1294 switched_cmd_type = true; 1295 } 1296 1297 // when we get here it is either a read or a write 1298 if (busState == READ) { 1299 1300 // track if we should switch or not 1301 bool switch_to_writes = false; 1302 1303 if (readQueue.empty()) { 1304 // In the case there is no read request to go next, 1305 // trigger writes if we have passed the low threshold (or 1306 // if we are draining) 1307 if (!writeQueue.empty() && 1308 (drainManager || writeQueue.size() > writeLowThreshold)) { 1309 1310 switch_to_writes = true; 1311 } else { 1312 // check if we are drained 1313 if (respQueue.empty () && drainManager) { 1314 DPRINTF(Drain, "DRAM controller done draining\n"); 1315 drainManager->signalDrainDone(); 1316 drainManager = NULL; 1317 } 1318 1319 // nothing to do, not even any point in scheduling an 1320 // event for the next request 1321 return; 1322 } 1323 } else { 1324 // bool to check if there is a read to a free rank 1325 bool found_read = false; 1326 1327 // Figure out which read request goes next, and move it to the 1328 // front of the read queue 1329 found_read = chooseNext(readQueue, switched_cmd_type); 1330 1331 // if no read to an available rank is found then return 1332 // at this point. There could be writes to the available ranks 1333 // which are above the required threshold. However, to 1334 // avoid adding more complexity to the code, return and wait 1335 // for a refresh event to kick things into action again. 1336 if (!found_read) 1337 return; 1338 1339 DRAMPacket* dram_pkt = readQueue.front(); 1340 assert(dram_pkt->rankRef.isAvailable()); 1341 // here we get a bit creative and shift the bus busy time not 1342 // just the tWTR, but also a CAS latency to capture the fact 1343 // that we are allowed to prepare a new bank, but not issue a 1344 // read command until after tWTR, in essence we capture a 1345 // bubble on the data bus that is tWTR + tCL 1346 if (switched_cmd_type && dram_pkt->rank == activeRank) { 1347 busBusyUntil += tWTR + tCL; 1348 } 1349 1350 doDRAMAccess(dram_pkt); 1351 1352 // At this point we're done dealing with the request 1353 readQueue.pop_front(); 1354 1355 // sanity check 1356 assert(dram_pkt->size <= burstSize); 1357 assert(dram_pkt->readyTime >= curTick()); 1358 1359 // Insert into response queue. It will be sent back to the 1360 // requestor at its readyTime 1361 if (respQueue.empty()) { 1362 assert(!respondEvent.scheduled()); 1363 schedule(respondEvent, dram_pkt->readyTime); 1364 } else { 1365 assert(respQueue.back()->readyTime <= dram_pkt->readyTime); 1366 assert(respondEvent.scheduled()); 1367 } 1368 1369 respQueue.push_back(dram_pkt); 1370 1371 // we have so many writes that we have to transition 1372 if (writeQueue.size() > writeHighThreshold) { 1373 switch_to_writes = true; 1374 } 1375 } 1376 1377 // switching to writes, either because the read queue is empty 1378 // and the writes have passed the low threshold (or we are 1379 // draining), or because the writes hit the hight threshold 1380 if (switch_to_writes) { 1381 // transition to writing 1382 busState = READ_TO_WRITE; 1383 } 1384 } else { 1385 // bool to check if write to free rank is found 1386 bool found_write = false; 1387 1388 found_write = chooseNext(writeQueue, switched_cmd_type); 1389 1390 // if no writes to an available rank are found then return. 1391 // There could be reads to the available ranks. However, to avoid 1392 // adding more complexity to the code, return at this point and wait 1393 // for a refresh event to kick things into action again. 1394 if (!found_write) 1395 return; 1396 1397 DRAMPacket* dram_pkt = writeQueue.front(); 1398 assert(dram_pkt->rankRef.isAvailable()); 1399 // sanity check 1400 assert(dram_pkt->size <= burstSize); 1401 1402 // add a bubble to the data bus, as defined by the 1403 // tRTW when access is to the same rank as previous burst 1404 // Different rank timing is handled with tCS, which is 1405 // applied to colAllowedAt 1406 if (switched_cmd_type && dram_pkt->rank == activeRank) { 1407 busBusyUntil += tRTW; 1408 } 1409 1410 doDRAMAccess(dram_pkt); 1411 1412 writeQueue.pop_front(); 1413 delete dram_pkt; 1414 1415 // If we emptied the write queue, or got sufficiently below the 1416 // threshold (using the minWritesPerSwitch as the hysteresis) and 1417 // are not draining, or we have reads waiting and have done enough 1418 // writes, then switch to reads. 1419 if (writeQueue.empty() || 1420 (writeQueue.size() + minWritesPerSwitch < writeLowThreshold && 1421 !drainManager) || 1422 (!readQueue.empty() && writesThisTime >= minWritesPerSwitch)) { 1423 // turn the bus back around for reads again 1424 busState = WRITE_TO_READ; 1425 1426 // note that the we switch back to reads also in the idle 1427 // case, which eventually will check for any draining and 1428 // also pause any further scheduling if there is really 1429 // nothing to do 1430 } 1431 } 1432 // It is possible that a refresh to another rank kicks things back into 1433 // action before reaching this point. 1434 if (!nextReqEvent.scheduled()) 1435 schedule(nextReqEvent, std::max(nextReqTime, curTick())); 1436 1437 // If there is space available and we have writes waiting then let 1438 // them retry. This is done here to ensure that the retry does not 1439 // cause a nextReqEvent to be scheduled before we do so as part of 1440 // the next request processing 1441 if (retryWrReq && writeQueue.size() < writeBufferSize) { 1442 retryWrReq = false; 1443 port.sendRetry(); 1444 } 1445} 1446 1447uint64_t 1448DRAMCtrl::minBankPrep(const deque<DRAMPacket*>& queue, 1449 bool switched_cmd_type) const 1450{ 1451 uint64_t bank_mask = 0; 1452 Tick min_act_at = MaxTick; 1453 1454 uint64_t bank_mask_same_rank = 0; 1455 Tick min_act_at_same_rank = MaxTick; 1456 1457 // Give precedence to commands that access same rank as previous command 1458 bool same_rank_match = false; 1459 1460 // determine if we have queued transactions targetting the 1461 // bank in question 1462 vector<bool> got_waiting(ranksPerChannel * banksPerRank, false); 1463 for (const auto& p : queue) { 1464 if(p->rankRef.isAvailable()) 1465 got_waiting[p->bankId] = true; 1466 } 1467 1468 for (int i = 0; i < ranksPerChannel; i++) { 1469 for (int j = 0; j < banksPerRank; j++) { 1470 uint16_t bank_id = i * banksPerRank + j; 1471 1472 // if we have waiting requests for the bank, and it is 1473 // amongst the first available, update the mask 1474 if (got_waiting[bank_id]) { 1475 // make sure this rank is not currently refreshing. 1476 assert(ranks[i]->isAvailable()); 1477 // simplistic approximation of when the bank can issue 1478 // an activate, ignoring any rank-to-rank switching 1479 // cost in this calculation 1480 Tick act_at = ranks[i]->banks[j].openRow == Bank::NO_ROW ? 1481 ranks[i]->banks[j].actAllowedAt : 1482 std::max(ranks[i]->banks[j].preAllowedAt, curTick()) + tRP; 1483 1484 // prioritize commands that access the 1485 // same rank as previous burst 1486 // Calculate bank mask separately for the case and 1487 // evaluate after loop iterations complete 1488 if (i == activeRank && ranksPerChannel > 1) { 1489 if (act_at <= min_act_at_same_rank) { 1490 // reset same rank bank mask if new minimum is found 1491 // and previous minimum could not immediately send ACT 1492 if (act_at < min_act_at_same_rank && 1493 min_act_at_same_rank > curTick()) 1494 bank_mask_same_rank = 0; 1495 1496 // Set flag indicating that a same rank 1497 // opportunity was found 1498 same_rank_match = true; 1499 1500 // set the bit corresponding to the available bank 1501 replaceBits(bank_mask_same_rank, bank_id, bank_id, 1); 1502 min_act_at_same_rank = act_at; 1503 } 1504 } else { 1505 if (act_at <= min_act_at) { 1506 // reset bank mask if new minimum is found 1507 // and either previous minimum could not immediately send ACT 1508 if (act_at < min_act_at && min_act_at > curTick()) 1509 bank_mask = 0; 1510 // set the bit corresponding to the available bank 1511 replaceBits(bank_mask, bank_id, bank_id, 1); 1512 min_act_at = act_at; 1513 } 1514 } 1515 } 1516 } 1517 } 1518 1519 // Determine the earliest time when the next burst can issue based 1520 // on the current busBusyUntil delay. 1521 // Offset by tRCD to correlate with ACT timing variables 1522 Tick min_cmd_at = busBusyUntil - tCL - tRCD; 1523 1524 // if we have multiple ranks and all 1525 // waiting packets are accessing a rank which was previously active 1526 // then bank_mask_same_rank will be set to a value while bank_mask will 1527 // remain 0. In this case, the function should return the value of 1528 // bank_mask_same_rank. 1529 // else if waiting packets access a rank which was previously active and 1530 // other ranks, prioritize same rank accesses that can issue B2B 1531 // Only optimize for same ranks when the command type 1532 // does not change; do not want to unnecessarily incur tWTR 1533 // 1534 // Resulting FCFS prioritization Order is: 1535 // 1) Commands that access the same rank as previous burst 1536 // and can prep the bank seamlessly. 1537 // 2) Commands (any rank) with earliest bank prep 1538 if ((bank_mask == 0) || (!switched_cmd_type && same_rank_match && 1539 min_act_at_same_rank <= min_cmd_at)) { 1540 bank_mask = bank_mask_same_rank; 1541 } 1542 1543 return bank_mask; 1544} 1545 1546DRAMCtrl::Rank::Rank(DRAMCtrl& _memory, const DRAMCtrlParams* _p) 1547 : EventManager(&_memory), memory(_memory), 1548 pwrStateTrans(PWR_IDLE), pwrState(PWR_IDLE), pwrStateTick(0), 1549 refreshState(REF_IDLE), refreshDueAt(0), 1550 power(_p, false), numBanksActive(0), 1551 activateEvent(*this), prechargeEvent(*this), 1552 refreshEvent(*this), powerEvent(*this) 1553{ } 1554 1555void 1556DRAMCtrl::Rank::startup(Tick ref_tick) 1557{ 1558 assert(ref_tick > curTick()); 1559 1560 pwrStateTick = curTick(); 1561 1562 // kick off the refresh, and give ourselves enough time to 1563 // precharge 1564 schedule(refreshEvent, ref_tick); 1565} 1566 1567void 1568DRAMCtrl::Rank::suspend() 1569{ 1570 deschedule(refreshEvent); 1571} 1572 1573void 1574DRAMCtrl::Rank::checkDrainDone() 1575{ 1576 // if this rank was waiting to drain it is now able to proceed to 1577 // precharge 1578 if (refreshState == REF_DRAIN) { 1579 DPRINTF(DRAM, "Refresh drain done, now precharging\n"); 1580 1581 refreshState = REF_PRE; 1582 1583 // hand control back to the refresh event loop 1584 schedule(refreshEvent, curTick()); 1585 } 1586} 1587 1588void 1589DRAMCtrl::Rank::processActivateEvent() 1590{ 1591 // we should transition to the active state as soon as any bank is active 1592 if (pwrState != PWR_ACT) 1593 // note that at this point numBanksActive could be back at 1594 // zero again due to a precharge scheduled in the future 1595 schedulePowerEvent(PWR_ACT, curTick()); 1596} 1597 1598void 1599DRAMCtrl::Rank::processPrechargeEvent() 1600{ 1601 // if we reached zero, then special conditions apply as we track 1602 // if all banks are precharged for the power models 1603 if (numBanksActive == 0) { 1604 // we should transition to the idle state when the last bank 1605 // is precharged 1606 schedulePowerEvent(PWR_IDLE, curTick()); 1607 } 1608} 1609 1610void 1611DRAMCtrl::Rank::processRefreshEvent() 1612{ 1613 // when first preparing the refresh, remember when it was due 1614 if (refreshState == REF_IDLE) { 1615 // remember when the refresh is due 1616 refreshDueAt = curTick(); 1617 1618 // proceed to drain 1619 refreshState = REF_DRAIN; 1620 1621 DPRINTF(DRAM, "Refresh due\n"); 1622 } 1623 1624 // let any scheduled read or write to the same rank go ahead, 1625 // after which it will 1626 // hand control back to this event loop 1627 if (refreshState == REF_DRAIN) { 1628 // if a request is at the moment being handled and this request is 1629 // accessing the current rank then wait for it to finish 1630 if ((rank == memory.activeRank) 1631 && (memory.nextReqEvent.scheduled())) { 1632 // hand control over to the request loop until it is 1633 // evaluated next 1634 DPRINTF(DRAM, "Refresh awaiting draining\n"); 1635 1636 return; 1637 } else { 1638 refreshState = REF_PRE; 1639 } 1640 } 1641 1642 // at this point, ensure that all banks are precharged 1643 if (refreshState == REF_PRE) { 1644 // precharge any active bank if we are not already in the idle 1645 // state 1646 if (pwrState != PWR_IDLE) { 1647 // at the moment, we use a precharge all even if there is 1648 // only a single bank open 1649 DPRINTF(DRAM, "Precharging all\n"); 1650 1651 // first determine when we can precharge 1652 Tick pre_at = curTick(); 1653 1654 for (auto &b : banks) { 1655 // respect both causality and any existing bank 1656 // constraints, some banks could already have a 1657 // (auto) precharge scheduled 1658 pre_at = std::max(b.preAllowedAt, pre_at); 1659 } 1660 1661 // make sure all banks per rank are precharged, and for those that 1662 // already are, update their availability 1663 Tick act_allowed_at = pre_at + memory.tRP; 1664 1665 for (auto &b : banks) { 1666 if (b.openRow != Bank::NO_ROW) { 1667 memory.prechargeBank(*this, b, pre_at, false); 1668 } else { 1669 b.actAllowedAt = std::max(b.actAllowedAt, act_allowed_at); 1670 b.preAllowedAt = std::max(b.preAllowedAt, pre_at); 1671 } 1672 } 1673 1674 // precharge all banks in rank 1675 power.powerlib.doCommand(MemCommand::PREA, 0, 1676 divCeil(pre_at, memory.tCK) - 1677 memory.timeStampOffset); 1678 1679 DPRINTF(DRAMPower, "%llu,PREA,0,%d\n", 1680 divCeil(pre_at, memory.tCK) - 1681 memory.timeStampOffset, rank); 1682 } else { 1683 DPRINTF(DRAM, "All banks already precharged, starting refresh\n"); 1684 1685 // go ahead and kick the power state machine into gear if 1686 // we are already idle 1687 schedulePowerEvent(PWR_REF, curTick()); 1688 } 1689 1690 refreshState = REF_RUN; 1691 assert(numBanksActive == 0); 1692 1693 // wait for all banks to be precharged, at which point the 1694 // power state machine will transition to the idle state, and 1695 // automatically move to a refresh, at that point it will also 1696 // call this method to get the refresh event loop going again 1697 return; 1698 } 1699 1700 // last but not least we perform the actual refresh 1701 if (refreshState == REF_RUN) { 1702 // should never get here with any banks active 1703 assert(numBanksActive == 0); 1704 assert(pwrState == PWR_REF); 1705 1706 Tick ref_done_at = curTick() + memory.tRFC; 1707 1708 for (auto &b : banks) { 1709 b.actAllowedAt = ref_done_at; 1710 } 1711 1712 // at the moment this affects all ranks 1713 power.powerlib.doCommand(MemCommand::REF, 0, 1714 divCeil(curTick(), memory.tCK) - 1715 memory.timeStampOffset); 1716 1717 // at the moment sort the list of commands and update the counters 1718 // for DRAMPower libray when doing a refresh 1719 sort(power.powerlib.cmdList.begin(), 1720 power.powerlib.cmdList.end(), DRAMCtrl::sortTime); 1721 1722 // update the counters for DRAMPower, passing false to 1723 // indicate that this is not the last command in the 1724 // list. DRAMPower requires this information for the 1725 // correct calculation of the background energy at the end 1726 // of the simulation. Ideally we would want to call this 1727 // function with true once at the end of the 1728 // simulation. However, the discarded energy is extremly 1729 // small and does not effect the final results. 1730 power.powerlib.updateCounters(false); 1731 1732 // call the energy function 1733 power.powerlib.calcEnergy(); 1734 1735 // Update the stats 1736 updatePowerStats(); 1737 1738 DPRINTF(DRAMPower, "%llu,REF,0,%d\n", divCeil(curTick(), memory.tCK) - 1739 memory.timeStampOffset, rank); 1740 1741 // make sure we did not wait so long that we cannot make up 1742 // for it 1743 if (refreshDueAt + memory.tREFI < ref_done_at) { 1744 fatal("Refresh was delayed so long we cannot catch up\n"); 1745 } 1746 1747 // compensate for the delay in actually performing the refresh 1748 // when scheduling the next one 1749 schedule(refreshEvent, refreshDueAt + memory.tREFI - memory.tRP); 1750 1751 assert(!powerEvent.scheduled()); 1752 1753 // move to the idle power state once the refresh is done, this 1754 // will also move the refresh state machine to the refresh 1755 // idle state 1756 schedulePowerEvent(PWR_IDLE, ref_done_at); 1757 1758 DPRINTF(DRAMState, "Refresh done at %llu and next refresh at %llu\n", 1759 ref_done_at, refreshDueAt + memory.tREFI); 1760 } 1761} 1762 1763void 1764DRAMCtrl::Rank::schedulePowerEvent(PowerState pwr_state, Tick tick) 1765{ 1766 // respect causality 1767 assert(tick >= curTick()); 1768 1769 if (!powerEvent.scheduled()) { 1770 DPRINTF(DRAMState, "Scheduling power event at %llu to state %d\n", 1771 tick, pwr_state); 1772 1773 // insert the new transition 1774 pwrStateTrans = pwr_state; 1775 1776 schedule(powerEvent, tick); 1777 } else { 1778 panic("Scheduled power event at %llu to state %d, " 1779 "with scheduled event at %llu to %d\n", tick, pwr_state, 1780 powerEvent.when(), pwrStateTrans); 1781 } 1782} 1783 1784void 1785DRAMCtrl::Rank::processPowerEvent() 1786{ 1787 // remember where we were, and for how long 1788 Tick duration = curTick() - pwrStateTick; 1789 PowerState prev_state = pwrState; 1790 1791 // update the accounting 1792 pwrStateTime[prev_state] += duration; 1793 1794 pwrState = pwrStateTrans; 1795 pwrStateTick = curTick(); 1796 1797 if (pwrState == PWR_IDLE) { 1798 DPRINTF(DRAMState, "All banks precharged\n"); 1799 1800 // if we were refreshing, make sure we start scheduling requests again 1801 if (prev_state == PWR_REF) { 1802 DPRINTF(DRAMState, "Was refreshing for %llu ticks\n", duration); 1803 assert(pwrState == PWR_IDLE); 1804 1805 // kick things into action again 1806 refreshState = REF_IDLE; 1807 // a request event could be already scheduled by the state 1808 // machine of the other rank 1809 if (!memory.nextReqEvent.scheduled()) 1810 schedule(memory.nextReqEvent, curTick()); 1811 } else { 1812 assert(prev_state == PWR_ACT); 1813 1814 // if we have a pending refresh, and are now moving to 1815 // the idle state, direclty transition to a refresh 1816 if (refreshState == REF_RUN) { 1817 // there should be nothing waiting at this point 1818 assert(!powerEvent.scheduled()); 1819 1820 // update the state in zero time and proceed below 1821 pwrState = PWR_REF; 1822 } 1823 } 1824 } 1825 1826 // we transition to the refresh state, let the refresh state 1827 // machine know of this state update and let it deal with the 1828 // scheduling of the next power state transition as well as the 1829 // following refresh 1830 if (pwrState == PWR_REF) { 1831 DPRINTF(DRAMState, "Refreshing\n"); 1832 // kick the refresh event loop into action again, and that 1833 // in turn will schedule a transition to the idle power 1834 // state once the refresh is done 1835 assert(refreshState == REF_RUN); 1836 processRefreshEvent(); 1837 } 1838} 1839 1840void 1841DRAMCtrl::Rank::updatePowerStats() 1842{ 1843 // Get the energy and power from DRAMPower 1844 Data::MemoryPowerModel::Energy energy = 1845 power.powerlib.getEnergy(); 1846 Data::MemoryPowerModel::Power rank_power = 1847 power.powerlib.getPower(); 1848 1849 actEnergy = energy.act_energy * memory.devicesPerRank; 1850 preEnergy = energy.pre_energy * memory.devicesPerRank; 1851 readEnergy = energy.read_energy * memory.devicesPerRank; 1852 writeEnergy = energy.write_energy * memory.devicesPerRank; 1853 refreshEnergy = energy.ref_energy * memory.devicesPerRank; 1854 actBackEnergy = energy.act_stdby_energy * memory.devicesPerRank; 1855 preBackEnergy = energy.pre_stdby_energy * memory.devicesPerRank; 1856 totalEnergy = energy.total_energy * memory.devicesPerRank; 1857 averagePower = rank_power.average_power * memory.devicesPerRank; 1858} 1859 1860void 1861DRAMCtrl::Rank::regStats() 1862{ 1863 using namespace Stats; 1864 1865 pwrStateTime 1866 .init(5) 1867 .name(name() + ".memoryStateTime") 1868 .desc("Time in different power states"); 1869 pwrStateTime.subname(0, "IDLE"); 1870 pwrStateTime.subname(1, "REF"); 1871 pwrStateTime.subname(2, "PRE_PDN"); 1872 pwrStateTime.subname(3, "ACT"); 1873 pwrStateTime.subname(4, "ACT_PDN"); 1874 1875 actEnergy 1876 .name(name() + ".actEnergy") 1877 .desc("Energy for activate commands per rank (pJ)"); 1878 1879 preEnergy 1880 .name(name() + ".preEnergy") 1881 .desc("Energy for precharge commands per rank (pJ)"); 1882 1883 readEnergy 1884 .name(name() + ".readEnergy") 1885 .desc("Energy for read commands per rank (pJ)"); 1886 1887 writeEnergy 1888 .name(name() + ".writeEnergy") 1889 .desc("Energy for write commands per rank (pJ)"); 1890 1891 refreshEnergy 1892 .name(name() + ".refreshEnergy") 1893 .desc("Energy for refresh commands per rank (pJ)"); 1894 1895 actBackEnergy 1896 .name(name() + ".actBackEnergy") 1897 .desc("Energy for active background per rank (pJ)"); 1898 1899 preBackEnergy 1900 .name(name() + ".preBackEnergy") 1901 .desc("Energy for precharge background per rank (pJ)"); 1902 1903 totalEnergy 1904 .name(name() + ".totalEnergy") 1905 .desc("Total energy per rank (pJ)"); 1906 1907 averagePower 1908 .name(name() + ".averagePower") 1909 .desc("Core power per rank (mW)"); 1910} 1911void 1912DRAMCtrl::regStats() 1913{ 1914 using namespace Stats; 1915 1916 AbstractMemory::regStats(); 1917 1918 for (auto r : ranks) { 1919 r->regStats(); 1920 } 1921 1922 readReqs 1923 .name(name() + ".readReqs") 1924 .desc("Number of read requests accepted"); 1925 1926 writeReqs 1927 .name(name() + ".writeReqs") 1928 .desc("Number of write requests accepted"); 1929 1930 readBursts 1931 .name(name() + ".readBursts") 1932 .desc("Number of DRAM read bursts, " 1933 "including those serviced by the write queue"); 1934 1935 writeBursts 1936 .name(name() + ".writeBursts") 1937 .desc("Number of DRAM write bursts, " 1938 "including those merged in the write queue"); 1939 1940 servicedByWrQ 1941 .name(name() + ".servicedByWrQ") 1942 .desc("Number of DRAM read bursts serviced by the write queue"); 1943 1944 mergedWrBursts 1945 .name(name() + ".mergedWrBursts") 1946 .desc("Number of DRAM write bursts merged with an existing one"); 1947 1948 neitherReadNorWrite 1949 .name(name() + ".neitherReadNorWriteReqs") 1950 .desc("Number of requests that are neither read nor write"); 1951 1952 perBankRdBursts 1953 .init(banksPerRank * ranksPerChannel) 1954 .name(name() + ".perBankRdBursts") 1955 .desc("Per bank write bursts"); 1956 1957 perBankWrBursts 1958 .init(banksPerRank * ranksPerChannel) 1959 .name(name() + ".perBankWrBursts") 1960 .desc("Per bank write bursts"); 1961 1962 avgRdQLen 1963 .name(name() + ".avgRdQLen") 1964 .desc("Average read queue length when enqueuing") 1965 .precision(2); 1966 1967 avgWrQLen 1968 .name(name() + ".avgWrQLen") 1969 .desc("Average write queue length when enqueuing") 1970 .precision(2); 1971 1972 totQLat 1973 .name(name() + ".totQLat") 1974 .desc("Total ticks spent queuing"); 1975 1976 totBusLat 1977 .name(name() + ".totBusLat") 1978 .desc("Total ticks spent in databus transfers"); 1979 1980 totMemAccLat 1981 .name(name() + ".totMemAccLat") 1982 .desc("Total ticks spent from burst creation until serviced " 1983 "by the DRAM"); 1984 1985 avgQLat 1986 .name(name() + ".avgQLat") 1987 .desc("Average queueing delay per DRAM burst") 1988 .precision(2); 1989 1990 avgQLat = totQLat / (readBursts - servicedByWrQ); 1991 1992 avgBusLat 1993 .name(name() + ".avgBusLat") 1994 .desc("Average bus latency per DRAM burst") 1995 .precision(2); 1996 1997 avgBusLat = totBusLat / (readBursts - servicedByWrQ); 1998 1999 avgMemAccLat 2000 .name(name() + ".avgMemAccLat") 2001 .desc("Average memory access latency per DRAM burst") 2002 .precision(2); 2003 2004 avgMemAccLat = totMemAccLat / (readBursts - servicedByWrQ); 2005 2006 numRdRetry 2007 .name(name() + ".numRdRetry") 2008 .desc("Number of times read queue was full causing retry"); 2009 2010 numWrRetry 2011 .name(name() + ".numWrRetry") 2012 .desc("Number of times write queue was full causing retry"); 2013 2014 readRowHits 2015 .name(name() + ".readRowHits") 2016 .desc("Number of row buffer hits during reads"); 2017 2018 writeRowHits 2019 .name(name() + ".writeRowHits") 2020 .desc("Number of row buffer hits during writes"); 2021 2022 readRowHitRate 2023 .name(name() + ".readRowHitRate") 2024 .desc("Row buffer hit rate for reads") 2025 .precision(2); 2026 2027 readRowHitRate = (readRowHits / (readBursts - servicedByWrQ)) * 100; 2028 2029 writeRowHitRate 2030 .name(name() + ".writeRowHitRate") 2031 .desc("Row buffer hit rate for writes") 2032 .precision(2); 2033 2034 writeRowHitRate = (writeRowHits / (writeBursts - mergedWrBursts)) * 100; 2035 2036 readPktSize 2037 .init(ceilLog2(burstSize) + 1) 2038 .name(name() + ".readPktSize") 2039 .desc("Read request sizes (log2)"); 2040 2041 writePktSize 2042 .init(ceilLog2(burstSize) + 1) 2043 .name(name() + ".writePktSize") 2044 .desc("Write request sizes (log2)"); 2045 2046 rdQLenPdf 2047 .init(readBufferSize) 2048 .name(name() + ".rdQLenPdf") 2049 .desc("What read queue length does an incoming req see"); 2050 2051 wrQLenPdf 2052 .init(writeBufferSize) 2053 .name(name() + ".wrQLenPdf") 2054 .desc("What write queue length does an incoming req see"); 2055 2056 bytesPerActivate 2057 .init(maxAccessesPerRow) 2058 .name(name() + ".bytesPerActivate") 2059 .desc("Bytes accessed per row activation") 2060 .flags(nozero); 2061 2062 rdPerTurnAround 2063 .init(readBufferSize) 2064 .name(name() + ".rdPerTurnAround") 2065 .desc("Reads before turning the bus around for writes") 2066 .flags(nozero); 2067 2068 wrPerTurnAround 2069 .init(writeBufferSize) 2070 .name(name() + ".wrPerTurnAround") 2071 .desc("Writes before turning the bus around for reads") 2072 .flags(nozero); 2073 2074 bytesReadDRAM 2075 .name(name() + ".bytesReadDRAM") 2076 .desc("Total number of bytes read from DRAM"); 2077 2078 bytesReadWrQ 2079 .name(name() + ".bytesReadWrQ") 2080 .desc("Total number of bytes read from write queue"); 2081 2082 bytesWritten 2083 .name(name() + ".bytesWritten") 2084 .desc("Total number of bytes written to DRAM"); 2085 2086 bytesReadSys 2087 .name(name() + ".bytesReadSys") 2088 .desc("Total read bytes from the system interface side"); 2089 2090 bytesWrittenSys 2091 .name(name() + ".bytesWrittenSys") 2092 .desc("Total written bytes from the system interface side"); 2093 2094 avgRdBW 2095 .name(name() + ".avgRdBW") 2096 .desc("Average DRAM read bandwidth in MiByte/s") 2097 .precision(2); 2098 2099 avgRdBW = (bytesReadDRAM / 1000000) / simSeconds; 2100 2101 avgWrBW 2102 .name(name() + ".avgWrBW") 2103 .desc("Average achieved write bandwidth in MiByte/s") 2104 .precision(2); 2105 2106 avgWrBW = (bytesWritten / 1000000) / simSeconds; 2107 2108 avgRdBWSys 2109 .name(name() + ".avgRdBWSys") 2110 .desc("Average system read bandwidth in MiByte/s") 2111 .precision(2); 2112 2113 avgRdBWSys = (bytesReadSys / 1000000) / simSeconds; 2114 2115 avgWrBWSys 2116 .name(name() + ".avgWrBWSys") 2117 .desc("Average system write bandwidth in MiByte/s") 2118 .precision(2); 2119 2120 avgWrBWSys = (bytesWrittenSys / 1000000) / simSeconds; 2121 2122 peakBW 2123 .name(name() + ".peakBW") 2124 .desc("Theoretical peak bandwidth in MiByte/s") 2125 .precision(2); 2126 2127 peakBW = (SimClock::Frequency / tBURST) * burstSize / 1000000; 2128 2129 busUtil 2130 .name(name() + ".busUtil") 2131 .desc("Data bus utilization in percentage") 2132 .precision(2); 2133 busUtil = (avgRdBW + avgWrBW) / peakBW * 100; 2134 2135 totGap 2136 .name(name() + ".totGap") 2137 .desc("Total gap between requests"); 2138 2139 avgGap 2140 .name(name() + ".avgGap") 2141 .desc("Average gap between requests") 2142 .precision(2); 2143 2144 avgGap = totGap / (readReqs + writeReqs); 2145 2146 // Stats for DRAM Power calculation based on Micron datasheet 2147 busUtilRead 2148 .name(name() + ".busUtilRead") 2149 .desc("Data bus utilization in percentage for reads") 2150 .precision(2); 2151 2152 busUtilRead = avgRdBW / peakBW * 100; 2153 2154 busUtilWrite 2155 .name(name() + ".busUtilWrite") 2156 .desc("Data bus utilization in percentage for writes") 2157 .precision(2); 2158 2159 busUtilWrite = avgWrBW / peakBW * 100; 2160 2161 pageHitRate 2162 .name(name() + ".pageHitRate") 2163 .desc("Row buffer hit rate, read and write combined") 2164 .precision(2); 2165 2166 pageHitRate = (writeRowHits + readRowHits) / 2167 (writeBursts - mergedWrBursts + readBursts - servicedByWrQ) * 100; 2168} 2169 2170void 2171DRAMCtrl::recvFunctional(PacketPtr pkt) 2172{ 2173 // rely on the abstract memory 2174 functionalAccess(pkt); 2175} 2176 2177BaseSlavePort& 2178DRAMCtrl::getSlavePort(const string &if_name, PortID idx) 2179{ 2180 if (if_name != "port") { 2181 return MemObject::getSlavePort(if_name, idx); 2182 } else { 2183 return port; 2184 } 2185} 2186 2187unsigned int 2188DRAMCtrl::drain(DrainManager *dm) 2189{ 2190 unsigned int count = port.drain(dm); 2191 2192 // if there is anything in any of our internal queues, keep track 2193 // of that as well 2194 if (!(writeQueue.empty() && readQueue.empty() && 2195 respQueue.empty())) { 2196 DPRINTF(Drain, "DRAM controller not drained, write: %d, read: %d," 2197 " resp: %d\n", writeQueue.size(), readQueue.size(), 2198 respQueue.size()); 2199 ++count; 2200 drainManager = dm; 2201 2202 // the only part that is not drained automatically over time 2203 // is the write queue, thus kick things into action if needed 2204 if (!writeQueue.empty() && !nextReqEvent.scheduled()) { 2205 schedule(nextReqEvent, curTick()); 2206 } 2207 } 2208 2209 if (count) 2210 setDrainState(Drainable::Draining); 2211 else 2212 setDrainState(Drainable::Drained); 2213 return count; 2214} 2215 2216void 2217DRAMCtrl::drainResume() 2218{ 2219 if (!isTimingMode && system()->isTimingMode()) { 2220 // if we switched to timing mode, kick things into action, 2221 // and behave as if we restored from a checkpoint 2222 startup(); 2223 } else if (isTimingMode && !system()->isTimingMode()) { 2224 // if we switch from timing mode, stop the refresh events to 2225 // not cause issues with KVM 2226 for (auto r : ranks) { 2227 r->suspend(); 2228 } 2229 } 2230 2231 // update the mode 2232 isTimingMode = system()->isTimingMode(); 2233} 2234 2235DRAMCtrl::MemoryPort::MemoryPort(const std::string& name, DRAMCtrl& _memory) 2236 : QueuedSlavePort(name, &_memory, queue), queue(_memory, *this), 2237 memory(_memory) 2238{ } 2239 2240AddrRangeList 2241DRAMCtrl::MemoryPort::getAddrRanges() const 2242{ 2243 AddrRangeList ranges; 2244 ranges.push_back(memory.getAddrRange()); 2245 return ranges; 2246} 2247 2248void 2249DRAMCtrl::MemoryPort::recvFunctional(PacketPtr pkt) 2250{ 2251 pkt->pushLabel(memory.name()); 2252 2253 if (!queue.checkFunctional(pkt)) { 2254 // Default implementation of SimpleTimingPort::recvFunctional() 2255 // calls recvAtomic() and throws away the latency; we can save a 2256 // little here by just not calculating the latency. 2257 memory.recvFunctional(pkt); 2258 } 2259 2260 pkt->popLabel(); 2261} 2262 2263Tick 2264DRAMCtrl::MemoryPort::recvAtomic(PacketPtr pkt) 2265{ 2266 return memory.recvAtomic(pkt); 2267} 2268 2269bool 2270DRAMCtrl::MemoryPort::recvTimingReq(PacketPtr pkt) 2271{ 2272 // pass it to the memory controller 2273 return memory.recvTimingReq(pkt); 2274} 2275 2276DRAMCtrl* 2277DRAMCtrlParams::create() 2278{ 2279 return new DRAMCtrl(this); 2280} 2281