54#include "sim/system.hh" 55 56using namespace std; 57using namespace Data; 58 59DRAMCtrl::DRAMCtrl(const DRAMCtrlParams* p) : 60 AbstractMemory(p), 61 port(name() + ".port", *this), isTimingMode(false), 62 retryRdReq(false), retryWrReq(false), 63 busState(READ), 64 busStateNext(READ), 65 nextReqEvent(this), respondEvent(this), 66 deviceSize(p->device_size), 67 deviceBusWidth(p->device_bus_width), burstLength(p->burst_length), 68 deviceRowBufferSize(p->device_rowbuffer_size), 69 devicesPerRank(p->devices_per_rank), 70 burstSize((devicesPerRank * burstLength * deviceBusWidth) / 8), 71 rowBufferSize(devicesPerRank * deviceRowBufferSize), 72 columnsPerRowBuffer(rowBufferSize / burstSize), 73 columnsPerStripe(range.interleaved() ? range.granularity() / burstSize : 1), 74 ranksPerChannel(p->ranks_per_channel), 75 bankGroupsPerRank(p->bank_groups_per_rank), 76 bankGroupArch(p->bank_groups_per_rank > 0), 77 banksPerRank(p->banks_per_rank), channels(p->channels), rowsPerBank(0), 78 readBufferSize(p->read_buffer_size), 79 writeBufferSize(p->write_buffer_size), 80 writeHighThreshold(writeBufferSize * p->write_high_thresh_perc / 100.0), 81 writeLowThreshold(writeBufferSize * p->write_low_thresh_perc / 100.0), 82 minWritesPerSwitch(p->min_writes_per_switch), 83 writesThisTime(0), readsThisTime(0), 84 tCK(p->tCK), tWTR(p->tWTR), tRTW(p->tRTW), tCS(p->tCS), tBURST(p->tBURST), 85 tCCD_L(p->tCCD_L), tRCD(p->tRCD), tCL(p->tCL), tRP(p->tRP), tRAS(p->tRAS), 86 tWR(p->tWR), tRTP(p->tRTP), tRFC(p->tRFC), tREFI(p->tREFI), tRRD(p->tRRD), 87 tRRD_L(p->tRRD_L), tXAW(p->tXAW), tXP(p->tXP), tXS(p->tXS), 88 activationLimit(p->activation_limit), 89 memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping), 90 pageMgmt(p->page_policy), 91 maxAccessesPerRow(p->max_accesses_per_row), 92 frontendLatency(p->static_frontend_latency), 93 backendLatency(p->static_backend_latency), 94 busBusyUntil(0), prevArrival(0), 95 nextReqTime(0), activeRank(0), timeStampOffset(0) 96{ 97 // sanity check the ranks since we rely on bit slicing for the 98 // address decoding 99 fatal_if(!isPowerOf2(ranksPerChannel), "DRAM rank count of %d is not " 100 "allowed, must be a power of two\n", ranksPerChannel); 101 102 fatal_if(!isPowerOf2(burstSize), "DRAM burst size %d is not allowed, " 103 "must be a power of two\n", burstSize); 104 105 for (int i = 0; i < ranksPerChannel; i++) { 106 Rank* rank = new Rank(*this, p); 107 ranks.push_back(rank); 108 109 rank->actTicks.resize(activationLimit, 0); 110 rank->banks.resize(banksPerRank); 111 rank->rank = i; 112 113 for (int b = 0; b < banksPerRank; b++) { 114 rank->banks[b].bank = b; 115 // GDDR addressing of banks to BG is linear. 116 // Here we assume that all DRAM generations address bank groups as 117 // follows: 118 if (bankGroupArch) { 119 // Simply assign lower bits to bank group in order to 120 // rotate across bank groups as banks are incremented 121 // e.g. with 4 banks per bank group and 16 banks total: 122 // banks 0,4,8,12 are in bank group 0 123 // banks 1,5,9,13 are in bank group 1 124 // banks 2,6,10,14 are in bank group 2 125 // banks 3,7,11,15 are in bank group 3 126 rank->banks[b].bankgr = b % bankGroupsPerRank; 127 } else { 128 // No bank groups; simply assign to bank number 129 rank->banks[b].bankgr = b; 130 } 131 } 132 } 133 134 // perform a basic check of the write thresholds 135 if (p->write_low_thresh_perc >= p->write_high_thresh_perc) 136 fatal("Write buffer low threshold %d must be smaller than the " 137 "high threshold %d\n", p->write_low_thresh_perc, 138 p->write_high_thresh_perc); 139 140 // determine the rows per bank by looking at the total capacity 141 uint64_t capacity = ULL(1) << ceilLog2(AbstractMemory::size()); 142 143 // determine the dram actual capacity from the DRAM config in Mbytes 144 uint64_t deviceCapacity = deviceSize / (1024 * 1024) * devicesPerRank * 145 ranksPerChannel; 146 147 // if actual DRAM size does not match memory capacity in system warn! 148 if (deviceCapacity != capacity / (1024 * 1024)) 149 warn("DRAM device capacity (%d Mbytes) does not match the " 150 "address range assigned (%d Mbytes)\n", deviceCapacity, 151 capacity / (1024 * 1024)); 152 153 DPRINTF(DRAM, "Memory capacity %lld (%lld) bytes\n", capacity, 154 AbstractMemory::size()); 155 156 DPRINTF(DRAM, "Row buffer size %d bytes with %d columns per row buffer\n", 157 rowBufferSize, columnsPerRowBuffer); 158 159 rowsPerBank = capacity / (rowBufferSize * banksPerRank * ranksPerChannel); 160 161 // some basic sanity checks 162 if (tREFI <= tRP || tREFI <= tRFC) { 163 fatal("tREFI (%d) must be larger than tRP (%d) and tRFC (%d)\n", 164 tREFI, tRP, tRFC); 165 } 166 167 // basic bank group architecture checks -> 168 if (bankGroupArch) { 169 // must have at least one bank per bank group 170 if (bankGroupsPerRank > banksPerRank) { 171 fatal("banks per rank (%d) must be equal to or larger than " 172 "banks groups per rank (%d)\n", 173 banksPerRank, bankGroupsPerRank); 174 } 175 // must have same number of banks in each bank group 176 if ((banksPerRank % bankGroupsPerRank) != 0) { 177 fatal("Banks per rank (%d) must be evenly divisible by bank groups " 178 "per rank (%d) for equal banks per bank group\n", 179 banksPerRank, bankGroupsPerRank); 180 } 181 // tCCD_L should be greater than minimal, back-to-back burst delay 182 if (tCCD_L <= tBURST) { 183 fatal("tCCD_L (%d) should be larger than tBURST (%d) when " 184 "bank groups per rank (%d) is greater than 1\n", 185 tCCD_L, tBURST, bankGroupsPerRank); 186 } 187 // tRRD_L is greater than minimal, same bank group ACT-to-ACT delay 188 // some datasheets might specify it equal to tRRD 189 if (tRRD_L < tRRD) { 190 fatal("tRRD_L (%d) should be larger than tRRD (%d) when " 191 "bank groups per rank (%d) is greater than 1\n", 192 tRRD_L, tRRD, bankGroupsPerRank); 193 } 194 } 195 196} 197 198void 199DRAMCtrl::init() 200{ 201 AbstractMemory::init(); 202 203 if (!port.isConnected()) { 204 fatal("DRAMCtrl %s is unconnected!\n", name()); 205 } else { 206 port.sendRangeChange(); 207 } 208 209 // a bit of sanity checks on the interleaving, save it for here to 210 // ensure that the system pointer is initialised 211 if (range.interleaved()) { 212 if (channels != range.stripes()) 213 fatal("%s has %d interleaved address stripes but %d channel(s)\n", 214 name(), range.stripes(), channels); 215 216 if (addrMapping == Enums::RoRaBaChCo) { 217 if (rowBufferSize != range.granularity()) { 218 fatal("Channel interleaving of %s doesn't match RoRaBaChCo " 219 "address map\n", name()); 220 } 221 } else if (addrMapping == Enums::RoRaBaCoCh || 222 addrMapping == Enums::RoCoRaBaCh) { 223 // for the interleavings with channel bits in the bottom, 224 // if the system uses a channel striping granularity that 225 // is larger than the DRAM burst size, then map the 226 // sequential accesses within a stripe to a number of 227 // columns in the DRAM, effectively placing some of the 228 // lower-order column bits as the least-significant bits 229 // of the address (above the ones denoting the burst size) 230 assert(columnsPerStripe >= 1); 231 232 // channel striping has to be done at a granularity that 233 // is equal or larger to a cache line 234 if (system()->cacheLineSize() > range.granularity()) { 235 fatal("Channel interleaving of %s must be at least as large " 236 "as the cache line size\n", name()); 237 } 238 239 // ...and equal or smaller than the row-buffer size 240 if (rowBufferSize < range.granularity()) { 241 fatal("Channel interleaving of %s must be at most as large " 242 "as the row-buffer size\n", name()); 243 } 244 // this is essentially the check above, so just to be sure 245 assert(columnsPerStripe <= columnsPerRowBuffer); 246 } 247 } 248} 249 250void 251DRAMCtrl::startup() 252{ 253 // remember the memory system mode of operation 254 isTimingMode = system()->isTimingMode(); 255 256 if (isTimingMode) { 257 // timestamp offset should be in clock cycles for DRAMPower 258 timeStampOffset = divCeil(curTick(), tCK); 259 260 // update the start tick for the precharge accounting to the 261 // current tick 262 for (auto r : ranks) { 263 r->startup(curTick() + tREFI - tRP); 264 } 265 266 // shift the bus busy time sufficiently far ahead that we never 267 // have to worry about negative values when computing the time for 268 // the next request, this will add an insignificant bubble at the 269 // start of simulation 270 busBusyUntil = curTick() + tRP + tRCD + tCL; 271 } 272} 273 274Tick 275DRAMCtrl::recvAtomic(PacketPtr pkt) 276{ 277 DPRINTF(DRAM, "recvAtomic: %s 0x%x\n", pkt->cmdString(), pkt->getAddr()); 278 279 panic_if(pkt->cacheResponding(), "Should not see packets where cache " 280 "is responding"); 281 282 // do the actual memory access and turn the packet into a response 283 access(pkt); 284 285 Tick latency = 0; 286 if (pkt->hasData()) { 287 // this value is not supposed to be accurate, just enough to 288 // keep things going, mimic a closed page 289 latency = tRP + tRCD + tCL; 290 } 291 return latency; 292} 293 294bool 295DRAMCtrl::readQueueFull(unsigned int neededEntries) const 296{ 297 DPRINTF(DRAM, "Read queue limit %d, current size %d, entries needed %d\n", 298 readBufferSize, readQueue.size() + respQueue.size(), 299 neededEntries); 300 301 return 302 (readQueue.size() + respQueue.size() + neededEntries) > readBufferSize; 303} 304 305bool 306DRAMCtrl::writeQueueFull(unsigned int neededEntries) const 307{ 308 DPRINTF(DRAM, "Write queue limit %d, current size %d, entries needed %d\n", 309 writeBufferSize, writeQueue.size(), neededEntries); 310 return (writeQueue.size() + neededEntries) > writeBufferSize; 311} 312 313DRAMCtrl::DRAMPacket* 314DRAMCtrl::decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned size, 315 bool isRead) 316{ 317 // decode the address based on the address mapping scheme, with 318 // Ro, Ra, Co, Ba and Ch denoting row, rank, column, bank and 319 // channel, respectively 320 uint8_t rank; 321 uint8_t bank; 322 // use a 64-bit unsigned during the computations as the row is 323 // always the top bits, and check before creating the DRAMPacket 324 uint64_t row; 325 326 // truncate the address to a DRAM burst, which makes it unique to 327 // a specific column, row, bank, rank and channel 328 Addr addr = dramPktAddr / burstSize; 329 330 // we have removed the lowest order address bits that denote the 331 // position within the column 332 if (addrMapping == Enums::RoRaBaChCo) { 333 // the lowest order bits denote the column to ensure that 334 // sequential cache lines occupy the same row 335 addr = addr / columnsPerRowBuffer; 336 337 // take out the channel part of the address 338 addr = addr / channels; 339 340 // after the channel bits, get the bank bits to interleave 341 // over the banks 342 bank = addr % banksPerRank; 343 addr = addr / banksPerRank; 344 345 // after the bank, we get the rank bits which thus interleaves 346 // over the ranks 347 rank = addr % ranksPerChannel; 348 addr = addr / ranksPerChannel; 349 350 // lastly, get the row bits, no need to remove them from addr 351 row = addr % rowsPerBank; 352 } else if (addrMapping == Enums::RoRaBaCoCh) { 353 // take out the lower-order column bits 354 addr = addr / columnsPerStripe; 355 356 // take out the channel part of the address 357 addr = addr / channels; 358 359 // next, the higher-order column bites 360 addr = addr / (columnsPerRowBuffer / columnsPerStripe); 361 362 // after the column bits, we get the bank bits to interleave 363 // over the banks 364 bank = addr % banksPerRank; 365 addr = addr / banksPerRank; 366 367 // after the bank, we get the rank bits which thus interleaves 368 // over the ranks 369 rank = addr % ranksPerChannel; 370 addr = addr / ranksPerChannel; 371 372 // lastly, get the row bits, no need to remove them from addr 373 row = addr % rowsPerBank; 374 } else if (addrMapping == Enums::RoCoRaBaCh) { 375 // optimise for closed page mode and utilise maximum 376 // parallelism of the DRAM (at the cost of power) 377 378 // take out the lower-order column bits 379 addr = addr / columnsPerStripe; 380 381 // take out the channel part of the address, not that this has 382 // to match with how accesses are interleaved between the 383 // controllers in the address mapping 384 addr = addr / channels; 385 386 // start with the bank bits, as this provides the maximum 387 // opportunity for parallelism between requests 388 bank = addr % banksPerRank; 389 addr = addr / banksPerRank; 390 391 // next get the rank bits 392 rank = addr % ranksPerChannel; 393 addr = addr / ranksPerChannel; 394 395 // next, the higher-order column bites 396 addr = addr / (columnsPerRowBuffer / columnsPerStripe); 397 398 // lastly, get the row bits, no need to remove them from addr 399 row = addr % rowsPerBank; 400 } else 401 panic("Unknown address mapping policy chosen!"); 402 403 assert(rank < ranksPerChannel); 404 assert(bank < banksPerRank); 405 assert(row < rowsPerBank); 406 assert(row < Bank::NO_ROW); 407 408 DPRINTF(DRAM, "Address: %lld Rank %d Bank %d Row %d\n", 409 dramPktAddr, rank, bank, row); 410 411 // create the corresponding DRAM packet with the entry time and 412 // ready time set to the current tick, the latter will be updated 413 // later 414 uint16_t bank_id = banksPerRank * rank + bank; 415 return new DRAMPacket(pkt, isRead, rank, bank, row, bank_id, dramPktAddr, 416 size, ranks[rank]->banks[bank], *ranks[rank]); 417} 418 419void 420DRAMCtrl::addToReadQueue(PacketPtr pkt, unsigned int pktCount) 421{ 422 // only add to the read queue here. whenever the request is 423 // eventually done, set the readyTime, and call schedule() 424 assert(!pkt->isWrite()); 425 426 assert(pktCount != 0); 427 428 // if the request size is larger than burst size, the pkt is split into 429 // multiple DRAM packets 430 // Note if the pkt starting address is not aligened to burst size, the 431 // address of first DRAM packet is kept unaliged. Subsequent DRAM packets 432 // are aligned to burst size boundaries. This is to ensure we accurately 433 // check read packets against packets in write queue. 434 Addr addr = pkt->getAddr(); 435 unsigned pktsServicedByWrQ = 0; 436 BurstHelper* burst_helper = NULL; 437 for (int cnt = 0; cnt < pktCount; ++cnt) { 438 unsigned size = std::min((addr | (burstSize - 1)) + 1, 439 pkt->getAddr() + pkt->getSize()) - addr; 440 readPktSize[ceilLog2(size)]++; 441 readBursts++; 442 443 // First check write buffer to see if the data is already at 444 // the controller 445 bool foundInWrQ = false; 446 Addr burst_addr = burstAlign(addr); 447 // if the burst address is not present then there is no need 448 // looking any further 449 if (isInWriteQueue.find(burst_addr) != isInWriteQueue.end()) { 450 for (const auto& p : writeQueue) { 451 // check if the read is subsumed in the write queue 452 // packet we are looking at 453 if (p->addr <= addr && (addr + size) <= (p->addr + p->size)) { 454 foundInWrQ = true; 455 servicedByWrQ++; 456 pktsServicedByWrQ++; 457 DPRINTF(DRAM, "Read to addr %lld with size %d serviced by " 458 "write queue\n", addr, size); 459 bytesReadWrQ += burstSize; 460 break; 461 } 462 } 463 } 464 465 // If not found in the write q, make a DRAM packet and 466 // push it onto the read queue 467 if (!foundInWrQ) { 468 469 // Make the burst helper for split packets 470 if (pktCount > 1 && burst_helper == NULL) { 471 DPRINTF(DRAM, "Read to addr %lld translates to %d " 472 "dram requests\n", pkt->getAddr(), pktCount); 473 burst_helper = new BurstHelper(pktCount); 474 } 475 476 DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, true); 477 dram_pkt->burstHelper = burst_helper; 478 479 assert(!readQueueFull(1)); 480 rdQLenPdf[readQueue.size() + respQueue.size()]++; 481 482 DPRINTF(DRAM, "Adding to read queue\n"); 483 484 readQueue.push_back(dram_pkt); 485 486 // increment read entries of the rank 487 ++dram_pkt->rankRef.readEntries; 488 489 // Update stats 490 avgRdQLen = readQueue.size() + respQueue.size(); 491 } 492 493 // Starting address of next dram pkt (aligend to burstSize boundary) 494 addr = (addr | (burstSize - 1)) + 1; 495 } 496 497 // If all packets are serviced by write queue, we send the repsonse back 498 if (pktsServicedByWrQ == pktCount) { 499 accessAndRespond(pkt, frontendLatency); 500 return; 501 } 502 503 // Update how many split packets are serviced by write queue 504 if (burst_helper != NULL) 505 burst_helper->burstsServiced = pktsServicedByWrQ; 506 507 // If we are not already scheduled to get a request out of the 508 // queue, do so now 509 if (!nextReqEvent.scheduled()) { 510 DPRINTF(DRAM, "Request scheduled immediately\n"); 511 schedule(nextReqEvent, curTick()); 512 } 513} 514 515void 516DRAMCtrl::addToWriteQueue(PacketPtr pkt, unsigned int pktCount) 517{ 518 // only add to the write queue here. whenever the request is 519 // eventually done, set the readyTime, and call schedule() 520 assert(pkt->isWrite()); 521 522 // if the request size is larger than burst size, the pkt is split into 523 // multiple DRAM packets 524 Addr addr = pkt->getAddr(); 525 for (int cnt = 0; cnt < pktCount; ++cnt) { 526 unsigned size = std::min((addr | (burstSize - 1)) + 1, 527 pkt->getAddr() + pkt->getSize()) - addr; 528 writePktSize[ceilLog2(size)]++; 529 writeBursts++; 530 531 // see if we can merge with an existing item in the write 532 // queue and keep track of whether we have merged or not 533 bool merged = isInWriteQueue.find(burstAlign(addr)) != 534 isInWriteQueue.end(); 535 536 // if the item was not merged we need to create a new write 537 // and enqueue it 538 if (!merged) { 539 DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, false); 540 541 assert(writeQueue.size() < writeBufferSize); 542 wrQLenPdf[writeQueue.size()]++; 543 544 DPRINTF(DRAM, "Adding to write queue\n"); 545 546 writeQueue.push_back(dram_pkt); 547 isInWriteQueue.insert(burstAlign(addr)); 548 assert(writeQueue.size() == isInWriteQueue.size()); 549 550 // Update stats 551 avgWrQLen = writeQueue.size(); 552 553 // increment write entries of the rank 554 ++dram_pkt->rankRef.writeEntries; 555 } else { 556 DPRINTF(DRAM, "Merging write burst with existing queue entry\n"); 557 558 // keep track of the fact that this burst effectively 559 // disappeared as it was merged with an existing one 560 mergedWrBursts++; 561 } 562 563 // Starting address of next dram pkt (aligend to burstSize boundary) 564 addr = (addr | (burstSize - 1)) + 1; 565 } 566 567 // we do not wait for the writes to be send to the actual memory, 568 // but instead take responsibility for the consistency here and 569 // snoop the write queue for any upcoming reads 570 // @todo, if a pkt size is larger than burst size, we might need a 571 // different front end latency 572 accessAndRespond(pkt, frontendLatency); 573 574 // If we are not already scheduled to get a request out of the 575 // queue, do so now 576 if (!nextReqEvent.scheduled()) { 577 DPRINTF(DRAM, "Request scheduled immediately\n"); 578 schedule(nextReqEvent, curTick()); 579 } 580} 581 582void 583DRAMCtrl::printQs() const { 584 DPRINTF(DRAM, "===READ QUEUE===\n\n"); 585 for (auto i = readQueue.begin() ; i != readQueue.end() ; ++i) { 586 DPRINTF(DRAM, "Read %lu\n", (*i)->addr); 587 } 588 DPRINTF(DRAM, "\n===RESP QUEUE===\n\n"); 589 for (auto i = respQueue.begin() ; i != respQueue.end() ; ++i) { 590 DPRINTF(DRAM, "Response %lu\n", (*i)->addr); 591 } 592 DPRINTF(DRAM, "\n===WRITE QUEUE===\n\n"); 593 for (auto i = writeQueue.begin() ; i != writeQueue.end() ; ++i) { 594 DPRINTF(DRAM, "Write %lu\n", (*i)->addr); 595 } 596} 597 598bool 599DRAMCtrl::recvTimingReq(PacketPtr pkt) 600{ 601 // This is where we enter from the outside world 602 DPRINTF(DRAM, "recvTimingReq: request %s addr %lld size %d\n", 603 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 604 605 panic_if(pkt->cacheResponding(), "Should not see packets where cache " 606 "is responding"); 607 608 panic_if(!(pkt->isRead() || pkt->isWrite()), 609 "Should only see read and writes at memory controller\n"); 610 611 // Calc avg gap between requests 612 if (prevArrival != 0) { 613 totGap += curTick() - prevArrival; 614 } 615 prevArrival = curTick(); 616 617 618 // Find out how many dram packets a pkt translates to 619 // If the burst size is equal or larger than the pkt size, then a pkt 620 // translates to only one dram packet. Otherwise, a pkt translates to 621 // multiple dram packets 622 unsigned size = pkt->getSize(); 623 unsigned offset = pkt->getAddr() & (burstSize - 1); 624 unsigned int dram_pkt_count = divCeil(offset + size, burstSize); 625 626 // check local buffers and do not accept if full 627 if (pkt->isRead()) { 628 assert(size != 0); 629 if (readQueueFull(dram_pkt_count)) { 630 DPRINTF(DRAM, "Read queue full, not accepting\n"); 631 // remember that we have to retry this port 632 retryRdReq = true; 633 numRdRetry++; 634 return false; 635 } else { 636 addToReadQueue(pkt, dram_pkt_count); 637 readReqs++; 638 bytesReadSys += size; 639 } 640 } else { 641 assert(pkt->isWrite()); 642 assert(size != 0); 643 if (writeQueueFull(dram_pkt_count)) { 644 DPRINTF(DRAM, "Write queue full, not accepting\n"); 645 // remember that we have to retry this port 646 retryWrReq = true; 647 numWrRetry++; 648 return false; 649 } else { 650 addToWriteQueue(pkt, dram_pkt_count); 651 writeReqs++; 652 bytesWrittenSys += size; 653 } 654 } 655 656 return true; 657} 658 659void 660DRAMCtrl::processRespondEvent() 661{ 662 DPRINTF(DRAM, 663 "processRespondEvent(): Some req has reached its readyTime\n"); 664 665 DRAMPacket* dram_pkt = respQueue.front(); 666 667 // if a read has reached its ready-time, decrement the number of reads 668 // At this point the packet has been handled and there is a possibility 669 // to switch to low-power mode if no other packet is available 670 --dram_pkt->rankRef.readEntries; 671 DPRINTF(DRAM, "number of read entries for rank %d is %d\n", 672 dram_pkt->rank, dram_pkt->rankRef.readEntries); 673 674 // counter should at least indicate one outstanding request 675 // for this read 676 assert(dram_pkt->rankRef.outstandingEvents > 0); 677 // read response received, decrement count 678 --dram_pkt->rankRef.outstandingEvents; 679 680 // at this moment should be either ACT or IDLE depending on 681 // if PRE has occurred to close all banks 682 assert((dram_pkt->rankRef.pwrState == PWR_ACT) || 683 (dram_pkt->rankRef.pwrState == PWR_IDLE)); 684 685 // track if this is the last packet before idling 686 // and that there are no outstanding commands to this rank 687 if (dram_pkt->rankRef.lowPowerEntryReady()) { 688 // verify that there are no events scheduled 689 assert(!dram_pkt->rankRef.activateEvent.scheduled()); 690 assert(!dram_pkt->rankRef.prechargeEvent.scheduled()); 691 assert(dram_pkt->rankRef.refreshState == REF_IDLE); 692 693 // if coming from active state, schedule power event to 694 // active power-down else go to precharge power-down 695 DPRINTF(DRAMState, "Rank %d sleep at tick %d; current power state is " 696 "%d\n", dram_pkt->rank, curTick(), dram_pkt->rankRef.pwrState); 697 698 // default to ACT power-down unless already in IDLE state 699 // could be in IDLE if PRE issued before data returned 700 PowerState next_pwr_state = PWR_ACT_PDN; 701 if (dram_pkt->rankRef.pwrState == PWR_IDLE) { 702 next_pwr_state = PWR_PRE_PDN; 703 } 704 705 dram_pkt->rankRef.powerDownSleep(next_pwr_state, curTick()); 706 } 707 708 if (dram_pkt->burstHelper) { 709 // it is a split packet 710 dram_pkt->burstHelper->burstsServiced++; 711 if (dram_pkt->burstHelper->burstsServiced == 712 dram_pkt->burstHelper->burstCount) { 713 // we have now serviced all children packets of a system packet 714 // so we can now respond to the requester 715 // @todo we probably want to have a different front end and back 716 // end latency for split packets 717 accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency); 718 delete dram_pkt->burstHelper; 719 dram_pkt->burstHelper = NULL; 720 } 721 } else { 722 // it is not a split packet 723 accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency); 724 } 725 726 delete respQueue.front(); 727 respQueue.pop_front(); 728 729 if (!respQueue.empty()) { 730 assert(respQueue.front()->readyTime >= curTick()); 731 assert(!respondEvent.scheduled()); 732 schedule(respondEvent, respQueue.front()->readyTime); 733 } else { 734 // if there is nothing left in any queue, signal a drain 735 if (drainState() == DrainState::Draining && 736 writeQueue.empty() && readQueue.empty() && allRanksDrained()) { 737 738 DPRINTF(Drain, "DRAM controller done draining\n"); 739 signalDrainDone(); 740 } 741 } 742 743 // We have made a location in the queue available at this point, 744 // so if there is a read that was forced to wait, retry now 745 if (retryRdReq) { 746 retryRdReq = false; 747 port.sendRetryReq(); 748 } 749} 750 751bool 752DRAMCtrl::chooseNext(std::deque<DRAMPacket*>& queue, Tick extra_col_delay) 753{ 754 // This method does the arbitration between requests. The chosen 755 // packet is simply moved to the head of the queue. The other 756 // methods know that this is the place to look. For example, with 757 // FCFS, this method does nothing 758 assert(!queue.empty()); 759 760 // bool to indicate if a packet to an available rank is found 761 bool found_packet = false; 762 if (queue.size() == 1) { 763 DRAMPacket* dram_pkt = queue.front(); 764 // available rank corresponds to state refresh idle 765 if (ranks[dram_pkt->rank]->isAvailable()) { 766 found_packet = true; 767 DPRINTF(DRAM, "Single request, going to a free rank\n"); 768 } else { 769 DPRINTF(DRAM, "Single request, going to a busy rank\n"); 770 } 771 return found_packet; 772 } 773 774 if (memSchedPolicy == Enums::fcfs) { 775 // check if there is a packet going to a free rank 776 for (auto i = queue.begin(); i != queue.end() ; ++i) { 777 DRAMPacket* dram_pkt = *i; 778 if (ranks[dram_pkt->rank]->isAvailable()) { 779 queue.erase(i); 780 queue.push_front(dram_pkt); 781 found_packet = true; 782 break; 783 } 784 } 785 } else if (memSchedPolicy == Enums::frfcfs) { 786 found_packet = reorderQueue(queue, extra_col_delay); 787 } else 788 panic("No scheduling policy chosen\n"); 789 return found_packet; 790} 791 792bool 793DRAMCtrl::reorderQueue(std::deque<DRAMPacket*>& queue, Tick extra_col_delay) 794{ 795 // Only determine this if needed 796 uint64_t earliest_banks = 0; 797 bool hidden_bank_prep = false; 798 799 // search for seamless row hits first, if no seamless row hit is 800 // found then determine if there are other packets that can be issued 801 // without incurring additional bus delay due to bank timing 802 // Will select closed rows first to enable more open row possibilies 803 // in future selections 804 bool found_hidden_bank = false; 805 806 // remember if we found a row hit, not seamless, but bank prepped 807 // and ready 808 bool found_prepped_pkt = false; 809 810 // if we have no row hit, prepped or not, and no seamless packet, 811 // just go for the earliest possible 812 bool found_earliest_pkt = false; 813 814 auto selected_pkt_it = queue.end(); 815 816 // time we need to issue a column command to be seamless 817 const Tick min_col_at = std::max(busBusyUntil - tCL + extra_col_delay, 818 curTick()); 819 820 for (auto i = queue.begin(); i != queue.end() ; ++i) { 821 DRAMPacket* dram_pkt = *i; 822 const Bank& bank = dram_pkt->bankRef; 823 824 // check if rank is available, if not, jump to the next packet 825 if (dram_pkt->rankRef.isAvailable()) { 826 // check if it is a row hit 827 if (bank.openRow == dram_pkt->row) { 828 // no additional rank-to-rank or same bank-group 829 // delays, or we switched read/write and might as well 830 // go for the row hit 831 if (bank.colAllowedAt <= min_col_at) { 832 // FCFS within the hits, giving priority to 833 // commands that can issue seamlessly, without 834 // additional delay, such as same rank accesses 835 // and/or different bank-group accesses 836 DPRINTF(DRAM, "Seamless row buffer hit\n"); 837 selected_pkt_it = i; 838 // no need to look through the remaining queue entries 839 break; 840 } else if (!found_hidden_bank && !found_prepped_pkt) { 841 // if we did not find a packet to a closed row that can 842 // issue the bank commands without incurring delay, and 843 // did not yet find a packet to a prepped row, remember 844 // the current one 845 selected_pkt_it = i; 846 found_prepped_pkt = true; 847 DPRINTF(DRAM, "Prepped row buffer hit\n"); 848 } 849 } else if (!found_earliest_pkt) { 850 // if we have not initialised the bank status, do it 851 // now, and only once per scheduling decisions 852 if (earliest_banks == 0) { 853 // determine entries with earliest bank delay 854 pair<uint64_t, bool> bankStatus = 855 minBankPrep(queue, min_col_at); 856 earliest_banks = bankStatus.first; 857 hidden_bank_prep = bankStatus.second; 858 } 859 860 // bank is amongst first available banks 861 // minBankPrep will give priority to packets that can 862 // issue seamlessly 863 if (bits(earliest_banks, dram_pkt->bankId, dram_pkt->bankId)) { 864 found_earliest_pkt = true; 865 found_hidden_bank = hidden_bank_prep; 866 867 // give priority to packets that can issue 868 // bank commands 'behind the scenes' 869 // any additional delay if any will be due to 870 // col-to-col command requirements 871 if (hidden_bank_prep || !found_prepped_pkt) 872 selected_pkt_it = i; 873 } 874 } 875 } 876 } 877 878 if (selected_pkt_it != queue.end()) { 879 DRAMPacket* selected_pkt = *selected_pkt_it; 880 queue.erase(selected_pkt_it); 881 queue.push_front(selected_pkt); 882 return true; 883 } 884 885 return false; 886} 887 888void 889DRAMCtrl::accessAndRespond(PacketPtr pkt, Tick static_latency) 890{ 891 DPRINTF(DRAM, "Responding to Address %lld.. ",pkt->getAddr()); 892 893 bool needsResponse = pkt->needsResponse(); 894 // do the actual memory access which also turns the packet into a 895 // response 896 access(pkt); 897 898 // turn packet around to go back to requester if response expected 899 if (needsResponse) { 900 // access already turned the packet into a response 901 assert(pkt->isResponse()); 902 // response_time consumes the static latency and is charged also 903 // with headerDelay that takes into account the delay provided by 904 // the xbar and also the payloadDelay that takes into account the 905 // number of data beats. 906 Tick response_time = curTick() + static_latency + pkt->headerDelay + 907 pkt->payloadDelay; 908 // Here we reset the timing of the packet before sending it out. 909 pkt->headerDelay = pkt->payloadDelay = 0; 910 911 // queue the packet in the response queue to be sent out after 912 // the static latency has passed 913 port.schedTimingResp(pkt, response_time, true); 914 } else { 915 // @todo the packet is going to be deleted, and the DRAMPacket 916 // is still having a pointer to it 917 pendingDelete.reset(pkt); 918 } 919 920 DPRINTF(DRAM, "Done\n"); 921 922 return; 923} 924 925void 926DRAMCtrl::activateBank(Rank& rank_ref, Bank& bank_ref, 927 Tick act_tick, uint32_t row) 928{ 929 assert(rank_ref.actTicks.size() == activationLimit); 930 931 DPRINTF(DRAM, "Activate at tick %d\n", act_tick); 932 933 // update the open row 934 assert(bank_ref.openRow == Bank::NO_ROW); 935 bank_ref.openRow = row; 936 937 // start counting anew, this covers both the case when we 938 // auto-precharged, and when this access is forced to 939 // precharge 940 bank_ref.bytesAccessed = 0; 941 bank_ref.rowAccesses = 0; 942 943 ++rank_ref.numBanksActive; 944 assert(rank_ref.numBanksActive <= banksPerRank); 945 946 DPRINTF(DRAM, "Activate bank %d, rank %d at tick %lld, now got %d active\n", 947 bank_ref.bank, rank_ref.rank, act_tick, 948 ranks[rank_ref.rank]->numBanksActive); 949 950 rank_ref.cmdList.push_back(Command(MemCommand::ACT, bank_ref.bank, 951 act_tick)); 952 953 DPRINTF(DRAMPower, "%llu,ACT,%d,%d\n", divCeil(act_tick, tCK) - 954 timeStampOffset, bank_ref.bank, rank_ref.rank); 955 956 // The next access has to respect tRAS for this bank 957 bank_ref.preAllowedAt = act_tick + tRAS; 958 959 // Respect the row-to-column command delay 960 bank_ref.colAllowedAt = std::max(act_tick + tRCD, bank_ref.colAllowedAt); 961 962 // start by enforcing tRRD 963 for (int i = 0; i < banksPerRank; i++) { 964 // next activate to any bank in this rank must not happen 965 // before tRRD 966 if (bankGroupArch && (bank_ref.bankgr == rank_ref.banks[i].bankgr)) { 967 // bank group architecture requires longer delays between 968 // ACT commands within the same bank group. Use tRRD_L 969 // in this case 970 rank_ref.banks[i].actAllowedAt = std::max(act_tick + tRRD_L, 971 rank_ref.banks[i].actAllowedAt); 972 } else { 973 // use shorter tRRD value when either 974 // 1) bank group architecture is not supportted 975 // 2) bank is in a different bank group 976 rank_ref.banks[i].actAllowedAt = std::max(act_tick + tRRD, 977 rank_ref.banks[i].actAllowedAt); 978 } 979 } 980 981 // next, we deal with tXAW, if the activation limit is disabled 982 // then we directly schedule an activate power event 983 if (!rank_ref.actTicks.empty()) { 984 // sanity check 985 if (rank_ref.actTicks.back() && 986 (act_tick - rank_ref.actTicks.back()) < tXAW) { 987 panic("Got %d activates in window %d (%llu - %llu) which " 988 "is smaller than %llu\n", activationLimit, act_tick - 989 rank_ref.actTicks.back(), act_tick, 990 rank_ref.actTicks.back(), tXAW); 991 } 992 993 // shift the times used for the book keeping, the last element 994 // (highest index) is the oldest one and hence the lowest value 995 rank_ref.actTicks.pop_back(); 996 997 // record an new activation (in the future) 998 rank_ref.actTicks.push_front(act_tick); 999 1000 // cannot activate more than X times in time window tXAW, push the 1001 // next one (the X + 1'st activate) to be tXAW away from the 1002 // oldest in our window of X 1003 if (rank_ref.actTicks.back() && 1004 (act_tick - rank_ref.actTicks.back()) < tXAW) { 1005 DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate " 1006 "no earlier than %llu\n", activationLimit, 1007 rank_ref.actTicks.back() + tXAW); 1008 for (int j = 0; j < banksPerRank; j++) 1009 // next activate must not happen before end of window 1010 rank_ref.banks[j].actAllowedAt = 1011 std::max(rank_ref.actTicks.back() + tXAW, 1012 rank_ref.banks[j].actAllowedAt); 1013 } 1014 } 1015 1016 // at the point when this activate takes place, make sure we 1017 // transition to the active power state 1018 if (!rank_ref.activateEvent.scheduled()) 1019 schedule(rank_ref.activateEvent, act_tick); 1020 else if (rank_ref.activateEvent.when() > act_tick) 1021 // move it sooner in time 1022 reschedule(rank_ref.activateEvent, act_tick); 1023} 1024 1025void 1026DRAMCtrl::prechargeBank(Rank& rank_ref, Bank& bank, Tick pre_at, bool trace) 1027{ 1028 // make sure the bank has an open row 1029 assert(bank.openRow != Bank::NO_ROW); 1030 1031 // sample the bytes per activate here since we are closing 1032 // the page 1033 bytesPerActivate.sample(bank.bytesAccessed); 1034 1035 bank.openRow = Bank::NO_ROW; 1036 1037 // no precharge allowed before this one 1038 bank.preAllowedAt = pre_at; 1039 1040 Tick pre_done_at = pre_at + tRP; 1041 1042 bank.actAllowedAt = std::max(bank.actAllowedAt, pre_done_at); 1043 1044 assert(rank_ref.numBanksActive != 0); 1045 --rank_ref.numBanksActive; 1046 1047 DPRINTF(DRAM, "Precharging bank %d, rank %d at tick %lld, now got " 1048 "%d active\n", bank.bank, rank_ref.rank, pre_at, 1049 rank_ref.numBanksActive); 1050 1051 if (trace) { 1052 1053 rank_ref.cmdList.push_back(Command(MemCommand::PRE, bank.bank, 1054 pre_at)); 1055 DPRINTF(DRAMPower, "%llu,PRE,%d,%d\n", divCeil(pre_at, tCK) - 1056 timeStampOffset, bank.bank, rank_ref.rank); 1057 } 1058 // if we look at the current number of active banks we might be 1059 // tempted to think the DRAM is now idle, however this can be 1060 // undone by an activate that is scheduled to happen before we 1061 // would have reached the idle state, so schedule an event and 1062 // rather check once we actually make it to the point in time when 1063 // the (last) precharge takes place 1064 if (!rank_ref.prechargeEvent.scheduled()) { 1065 schedule(rank_ref.prechargeEvent, pre_done_at); 1066 // New event, increment count 1067 ++rank_ref.outstandingEvents; 1068 } else if (rank_ref.prechargeEvent.when() < pre_done_at) { 1069 reschedule(rank_ref.prechargeEvent, pre_done_at); 1070 } 1071} 1072 1073void 1074DRAMCtrl::doDRAMAccess(DRAMPacket* dram_pkt) 1075{ 1076 DPRINTF(DRAM, "Timing access to addr %lld, rank/bank/row %d %d %d\n", 1077 dram_pkt->addr, dram_pkt->rank, dram_pkt->bank, dram_pkt->row); 1078 1079 // get the rank 1080 Rank& rank = dram_pkt->rankRef; 1081 1082 // are we in or transitioning to a low-power state and have not scheduled 1083 // a power-up event? 1084 // if so, wake up from power down to issue RD/WR burst 1085 if (rank.inLowPowerState) { 1086 assert(rank.pwrState != PWR_SREF); 1087 rank.scheduleWakeUpEvent(tXP); 1088 } 1089 1090 // get the bank 1091 Bank& bank = dram_pkt->bankRef; 1092 1093 // for the state we need to track if it is a row hit or not 1094 bool row_hit = true; 1095 1096 // respect any constraints on the command (e.g. tRCD or tCCD) 1097 Tick cmd_at = std::max(bank.colAllowedAt, curTick()); 1098 1099 // Determine the access latency and update the bank state 1100 if (bank.openRow == dram_pkt->row) { 1101 // nothing to do 1102 } else { 1103 row_hit = false; 1104 1105 // If there is a page open, precharge it. 1106 if (bank.openRow != Bank::NO_ROW) { 1107 prechargeBank(rank, bank, std::max(bank.preAllowedAt, curTick())); 1108 } 1109 1110 // next we need to account for the delay in activating the 1111 // page 1112 Tick act_tick = std::max(bank.actAllowedAt, curTick()); 1113 1114 // Record the activation and deal with all the global timing 1115 // constraints caused be a new activation (tRRD and tXAW) 1116 activateBank(rank, bank, act_tick, dram_pkt->row); 1117 1118 // issue the command as early as possible 1119 cmd_at = bank.colAllowedAt; 1120 } 1121 1122 // we need to wait until the bus is available before we can issue 1123 // the command 1124 cmd_at = std::max(cmd_at, busBusyUntil - tCL); 1125 1126 // update the packet ready time 1127 dram_pkt->readyTime = cmd_at + tCL + tBURST; 1128 1129 // only one burst can use the bus at any one point in time 1130 assert(dram_pkt->readyTime - busBusyUntil >= tBURST); 1131 1132 // update the time for the next read/write burst for each 1133 // bank (add a max with tCCD/tCCD_L here) 1134 Tick cmd_dly; 1135 for (int j = 0; j < ranksPerChannel; j++) { 1136 for (int i = 0; i < banksPerRank; i++) { 1137 // next burst to same bank group in this rank must not happen 1138 // before tCCD_L. Different bank group timing requirement is 1139 // tBURST; Add tCS for different ranks 1140 if (dram_pkt->rank == j) { 1141 if (bankGroupArch && 1142 (bank.bankgr == ranks[j]->banks[i].bankgr)) { 1143 // bank group architecture requires longer delays between 1144 // RD/WR burst commands to the same bank group. 1145 // Use tCCD_L in this case 1146 cmd_dly = tCCD_L; 1147 } else { 1148 // use tBURST (equivalent to tCCD_S), the shorter 1149 // cas-to-cas delay value, when either: 1150 // 1) bank group architecture is not supportted 1151 // 2) bank is in a different bank group 1152 cmd_dly = tBURST; 1153 } 1154 } else { 1155 // different rank is by default in a different bank group 1156 // use tBURST (equivalent to tCCD_S), which is the shorter 1157 // cas-to-cas delay in this case 1158 // Add tCS to account for rank-to-rank bus delay requirements 1159 cmd_dly = tBURST + tCS; 1160 } 1161 ranks[j]->banks[i].colAllowedAt = std::max(cmd_at + cmd_dly, 1162 ranks[j]->banks[i].colAllowedAt); 1163 } 1164 } 1165 1166 // Save rank of current access 1167 activeRank = dram_pkt->rank; 1168 1169 // If this is a write, we also need to respect the write recovery 1170 // time before a precharge, in the case of a read, respect the 1171 // read to precharge constraint 1172 bank.preAllowedAt = std::max(bank.preAllowedAt, 1173 dram_pkt->isRead ? cmd_at + tRTP : 1174 dram_pkt->readyTime + tWR); 1175 1176 // increment the bytes accessed and the accesses per row 1177 bank.bytesAccessed += burstSize; 1178 ++bank.rowAccesses; 1179 1180 // if we reached the max, then issue with an auto-precharge 1181 bool auto_precharge = pageMgmt == Enums::close || 1182 bank.rowAccesses == maxAccessesPerRow; 1183 1184 // if we did not hit the limit, we might still want to 1185 // auto-precharge 1186 if (!auto_precharge && 1187 (pageMgmt == Enums::open_adaptive || 1188 pageMgmt == Enums::close_adaptive)) { 1189 // a twist on the open and close page policies: 1190 // 1) open_adaptive page policy does not blindly keep the 1191 // page open, but close it if there are no row hits, and there 1192 // are bank conflicts in the queue 1193 // 2) close_adaptive page policy does not blindly close the 1194 // page, but closes it only if there are no row hits in the queue. 1195 // In this case, only force an auto precharge when there 1196 // are no same page hits in the queue 1197 bool got_more_hits = false; 1198 bool got_bank_conflict = false; 1199 1200 // either look at the read queue or write queue 1201 const deque<DRAMPacket*>& queue = dram_pkt->isRead ? readQueue : 1202 writeQueue; 1203 auto p = queue.begin(); 1204 // make sure we are not considering the packet that we are 1205 // currently dealing with (which is the head of the queue) 1206 ++p; 1207 1208 // keep on looking until we find a hit or reach the end of the queue 1209 // 1) if a hit is found, then both open and close adaptive policies keep 1210 // the page open 1211 // 2) if no hit is found, got_bank_conflict is set to true if a bank 1212 // conflict request is waiting in the queue 1213 while (!got_more_hits && p != queue.end()) { 1214 bool same_rank_bank = (dram_pkt->rank == (*p)->rank) && 1215 (dram_pkt->bank == (*p)->bank); 1216 bool same_row = dram_pkt->row == (*p)->row; 1217 got_more_hits |= same_rank_bank && same_row; 1218 got_bank_conflict |= same_rank_bank && !same_row; 1219 ++p; 1220 } 1221 1222 // auto pre-charge when either 1223 // 1) open_adaptive policy, we have not got any more hits, and 1224 // have a bank conflict 1225 // 2) close_adaptive policy and we have not got any more hits 1226 auto_precharge = !got_more_hits && 1227 (got_bank_conflict || pageMgmt == Enums::close_adaptive); 1228 } 1229 1230 // DRAMPower trace command to be written 1231 std::string mem_cmd = dram_pkt->isRead ? "RD" : "WR"; 1232 1233 // MemCommand required for DRAMPower library 1234 MemCommand::cmds command = (mem_cmd == "RD") ? MemCommand::RD : 1235 MemCommand::WR; 1236 1237 // Update bus state 1238 busBusyUntil = dram_pkt->readyTime; 1239 1240 DPRINTF(DRAM, "Access to %lld, ready at %lld bus busy until %lld.\n", 1241 dram_pkt->addr, dram_pkt->readyTime, busBusyUntil); 1242 1243 dram_pkt->rankRef.cmdList.push_back(Command(command, dram_pkt->bank, 1244 cmd_at)); 1245 1246 DPRINTF(DRAMPower, "%llu,%s,%d,%d\n", divCeil(cmd_at, tCK) - 1247 timeStampOffset, mem_cmd, dram_pkt->bank, dram_pkt->rank); 1248 1249 // if this access should use auto-precharge, then we are 1250 // closing the row after the read/write burst 1251 if (auto_precharge) { 1252 // if auto-precharge push a PRE command at the correct tick to the 1253 // list used by DRAMPower library to calculate power 1254 prechargeBank(rank, bank, std::max(curTick(), bank.preAllowedAt)); 1255 1256 DPRINTF(DRAM, "Auto-precharged bank: %d\n", dram_pkt->bankId); 1257 } 1258 1259 // Update the minimum timing between the requests, this is a 1260 // conservative estimate of when we have to schedule the next 1261 // request to not introduce any unecessary bubbles. In most cases 1262 // we will wake up sooner than we have to. 1263 nextReqTime = busBusyUntil - (tRP + tRCD + tCL); 1264 1265 // Update the stats and schedule the next request 1266 if (dram_pkt->isRead) { 1267 ++readsThisTime; 1268 if (row_hit) 1269 readRowHits++; 1270 bytesReadDRAM += burstSize; 1271 perBankRdBursts[dram_pkt->bankId]++; 1272 1273 // Update latency stats 1274 totMemAccLat += dram_pkt->readyTime - dram_pkt->entryTime; 1275 totBusLat += tBURST; 1276 totQLat += cmd_at - dram_pkt->entryTime; 1277 } else { 1278 ++writesThisTime; 1279 if (row_hit) 1280 writeRowHits++; 1281 bytesWritten += burstSize; 1282 perBankWrBursts[dram_pkt->bankId]++; 1283 } 1284} 1285 1286void 1287DRAMCtrl::processNextReqEvent() 1288{ 1289 int busyRanks = 0; 1290 for (auto r : ranks) { 1291 if (!r->isAvailable()) { 1292 if (r->pwrState != PWR_SREF) { 1293 // rank is busy refreshing 1294 DPRINTF(DRAMState, "Rank %d is not available\n", r->rank); 1295 busyRanks++; 1296 1297 // let the rank know that if it was waiting to drain, it 1298 // is now done and ready to proceed 1299 r->checkDrainDone(); 1300 } 1301 1302 // check if we were in self-refresh and haven't started 1303 // to transition out 1304 if ((r->pwrState == PWR_SREF) && r->inLowPowerState) { 1305 DPRINTF(DRAMState, "Rank %d is in self-refresh\n", r->rank); 1306 // if we have commands queued to this rank and we don't have 1307 // a minimum number of active commands enqueued, 1308 // exit self-refresh 1309 if (r->forceSelfRefreshExit()) { 1310 DPRINTF(DRAMState, "rank %d was in self refresh and" 1311 " should wake up\n", r->rank); 1312 //wake up from self-refresh 1313 r->scheduleWakeUpEvent(tXS); 1314 // things are brought back into action once a refresh is 1315 // performed after self-refresh 1316 // continue with selection for other ranks 1317 } 1318 } 1319 } 1320 } 1321 1322 if (busyRanks == ranksPerChannel) { 1323 // if all ranks are refreshing wait for them to finish 1324 // and stall this state machine without taking any further 1325 // action, and do not schedule a new nextReqEvent 1326 return; 1327 } 1328 1329 // pre-emptively set to false. Overwrite if in transitioning to 1330 // a new state 1331 bool switched_cmd_type = false; 1332 if (busState != busStateNext) { 1333 if (busState == READ) { 1334 DPRINTF(DRAM, "Switching to writes after %d reads with %d reads " 1335 "waiting\n", readsThisTime, readQueue.size()); 1336 1337 // sample and reset the read-related stats as we are now 1338 // transitioning to writes, and all reads are done 1339 rdPerTurnAround.sample(readsThisTime); 1340 readsThisTime = 0; 1341 1342 // now proceed to do the actual writes 1343 switched_cmd_type = true; 1344 } else { 1345 DPRINTF(DRAM, "Switching to reads after %d writes with %d writes " 1346 "waiting\n", writesThisTime, writeQueue.size()); 1347 1348 wrPerTurnAround.sample(writesThisTime); 1349 writesThisTime = 0; 1350 1351 switched_cmd_type = true; 1352 } 1353 // update busState to match next state until next transition 1354 busState = busStateNext; 1355 } 1356 1357 // when we get here it is either a read or a write 1358 if (busState == READ) { 1359 1360 // track if we should switch or not 1361 bool switch_to_writes = false; 1362 1363 if (readQueue.empty()) { 1364 // In the case there is no read request to go next, 1365 // trigger writes if we have passed the low threshold (or 1366 // if we are draining) 1367 if (!writeQueue.empty() && 1368 (drainState() == DrainState::Draining || 1369 writeQueue.size() > writeLowThreshold)) { 1370 1371 switch_to_writes = true; 1372 } else { 1373 // check if we are drained 1374 // not done draining until in PWR_IDLE state 1375 // ensuring all banks are closed and 1376 // have exited low power states 1377 if (drainState() == DrainState::Draining && 1378 respQueue.empty() && allRanksDrained()) { 1379 1380 DPRINTF(Drain, "DRAM controller done draining\n"); 1381 signalDrainDone(); 1382 } 1383 1384 // nothing to do, not even any point in scheduling an 1385 // event for the next request 1386 return; 1387 } 1388 } else { 1389 // bool to check if there is a read to a free rank 1390 bool found_read = false; 1391 1392 // Figure out which read request goes next, and move it to the 1393 // front of the read queue 1394 // If we are changing command type, incorporate the minimum 1395 // bus turnaround delay which will be tCS (different rank) case 1396 found_read = chooseNext(readQueue, 1397 switched_cmd_type ? tCS : 0); 1398 1399 // if no read to an available rank is found then return 1400 // at this point. There could be writes to the available ranks 1401 // which are above the required threshold. However, to 1402 // avoid adding more complexity to the code, return and wait 1403 // for a refresh event to kick things into action again. 1404 if (!found_read) 1405 return; 1406 1407 DRAMPacket* dram_pkt = readQueue.front(); 1408 assert(dram_pkt->rankRef.isAvailable()); 1409 1410 // here we get a bit creative and shift the bus busy time not 1411 // just the tWTR, but also a CAS latency to capture the fact 1412 // that we are allowed to prepare a new bank, but not issue a 1413 // read command until after tWTR, in essence we capture a 1414 // bubble on the data bus that is tWTR + tCL 1415 if (switched_cmd_type && dram_pkt->rank == activeRank) { 1416 busBusyUntil += tWTR + tCL; 1417 } 1418 1419 doDRAMAccess(dram_pkt); 1420 1421 // At this point we're done dealing with the request 1422 readQueue.pop_front(); 1423 1424 // Every respQueue which will generate an event, increment count 1425 ++dram_pkt->rankRef.outstandingEvents; 1426 1427 // sanity check 1428 assert(dram_pkt->size <= burstSize); 1429 assert(dram_pkt->readyTime >= curTick()); 1430 1431 // Insert into response queue. It will be sent back to the 1432 // requestor at its readyTime 1433 if (respQueue.empty()) { 1434 assert(!respondEvent.scheduled()); 1435 schedule(respondEvent, dram_pkt->readyTime); 1436 } else { 1437 assert(respQueue.back()->readyTime <= dram_pkt->readyTime); 1438 assert(respondEvent.scheduled()); 1439 } 1440 1441 respQueue.push_back(dram_pkt); 1442 1443 // we have so many writes that we have to transition 1444 if (writeQueue.size() > writeHighThreshold) { 1445 switch_to_writes = true; 1446 } 1447 } 1448 1449 // switching to writes, either because the read queue is empty 1450 // and the writes have passed the low threshold (or we are 1451 // draining), or because the writes hit the hight threshold 1452 if (switch_to_writes) { 1453 // transition to writing 1454 busStateNext = WRITE; 1455 } 1456 } else { 1457 // bool to check if write to free rank is found 1458 bool found_write = false; 1459 1460 // If we are changing command type, incorporate the minimum 1461 // bus turnaround delay 1462 found_write = chooseNext(writeQueue, 1463 switched_cmd_type ? std::min(tRTW, tCS) : 0); 1464 1465 // if no writes to an available rank are found then return. 1466 // There could be reads to the available ranks. However, to avoid 1467 // adding more complexity to the code, return at this point and wait 1468 // for a refresh event to kick things into action again. 1469 if (!found_write) 1470 return; 1471 1472 DRAMPacket* dram_pkt = writeQueue.front(); 1473 assert(dram_pkt->rankRef.isAvailable()); 1474 // sanity check 1475 assert(dram_pkt->size <= burstSize); 1476 1477 // add a bubble to the data bus, as defined by the 1478 // tRTW when access is to the same rank as previous burst 1479 // Different rank timing is handled with tCS, which is 1480 // applied to colAllowedAt 1481 if (switched_cmd_type && dram_pkt->rank == activeRank) { 1482 busBusyUntil += tRTW; 1483 } 1484 1485 doDRAMAccess(dram_pkt); 1486 1487 writeQueue.pop_front(); 1488 1489 // removed write from queue, decrement count 1490 --dram_pkt->rankRef.writeEntries; 1491 1492 // Schedule write done event to decrement event count 1493 // after the readyTime has been reached 1494 // Only schedule latest write event to minimize events 1495 // required; only need to ensure that final event scheduled covers 1496 // the time that writes are outstanding and bus is active 1497 // to holdoff power-down entry events 1498 if (!dram_pkt->rankRef.writeDoneEvent.scheduled()) { 1499 schedule(dram_pkt->rankRef.writeDoneEvent, dram_pkt->readyTime); 1500 // New event, increment count 1501 ++dram_pkt->rankRef.outstandingEvents; 1502 1503 } else if (dram_pkt->rankRef.writeDoneEvent.when() < 1504 dram_pkt-> readyTime) { 1505 reschedule(dram_pkt->rankRef.writeDoneEvent, dram_pkt->readyTime); 1506 } 1507 1508 isInWriteQueue.erase(burstAlign(dram_pkt->addr)); 1509 delete dram_pkt; 1510 1511 // If we emptied the write queue, or got sufficiently below the 1512 // threshold (using the minWritesPerSwitch as the hysteresis) and 1513 // are not draining, or we have reads waiting and have done enough 1514 // writes, then switch to reads. 1515 if (writeQueue.empty() || 1516 (writeQueue.size() + minWritesPerSwitch < writeLowThreshold && 1517 drainState() != DrainState::Draining) || 1518 (!readQueue.empty() && writesThisTime >= minWritesPerSwitch)) { 1519 // turn the bus back around for reads again 1520 busStateNext = READ; 1521 1522 // note that the we switch back to reads also in the idle 1523 // case, which eventually will check for any draining and 1524 // also pause any further scheduling if there is really 1525 // nothing to do 1526 } 1527 } 1528 // It is possible that a refresh to another rank kicks things back into 1529 // action before reaching this point. 1530 if (!nextReqEvent.scheduled()) 1531 schedule(nextReqEvent, std::max(nextReqTime, curTick())); 1532 1533 // If there is space available and we have writes waiting then let 1534 // them retry. This is done here to ensure that the retry does not 1535 // cause a nextReqEvent to be scheduled before we do so as part of 1536 // the next request processing 1537 if (retryWrReq && writeQueue.size() < writeBufferSize) { 1538 retryWrReq = false; 1539 port.sendRetryReq(); 1540 } 1541} 1542 1543pair<uint64_t, bool> 1544DRAMCtrl::minBankPrep(const deque<DRAMPacket*>& queue, 1545 Tick min_col_at) const 1546{ 1547 uint64_t bank_mask = 0; 1548 Tick min_act_at = MaxTick; 1549 1550 // latest Tick for which ACT can occur without incurring additoinal 1551 // delay on the data bus 1552 const Tick hidden_act_max = std::max(min_col_at - tRCD, curTick()); 1553 1554 // Flag condition when burst can issue back-to-back with previous burst 1555 bool found_seamless_bank = false; 1556 1557 // Flag condition when bank can be opened without incurring additional 1558 // delay on the data bus 1559 bool hidden_bank_prep = false; 1560 1561 // determine if we have queued transactions targetting the 1562 // bank in question 1563 vector<bool> got_waiting(ranksPerChannel * banksPerRank, false); 1564 for (const auto& p : queue) { 1565 if (p->rankRef.isAvailable()) 1566 got_waiting[p->bankId] = true; 1567 } 1568 1569 // Find command with optimal bank timing 1570 // Will prioritize commands that can issue seamlessly. 1571 for (int i = 0; i < ranksPerChannel; i++) { 1572 for (int j = 0; j < banksPerRank; j++) { 1573 uint16_t bank_id = i * banksPerRank + j; 1574 1575 // if we have waiting requests for the bank, and it is 1576 // amongst the first available, update the mask 1577 if (got_waiting[bank_id]) { 1578 // make sure this rank is not currently refreshing. 1579 assert(ranks[i]->isAvailable()); 1580 // simplistic approximation of when the bank can issue 1581 // an activate, ignoring any rank-to-rank switching 1582 // cost in this calculation 1583 Tick act_at = ranks[i]->banks[j].openRow == Bank::NO_ROW ? 1584 std::max(ranks[i]->banks[j].actAllowedAt, curTick()) : 1585 std::max(ranks[i]->banks[j].preAllowedAt, curTick()) + tRP; 1586 1587 // When is the earliest the R/W burst can issue? 1588 Tick col_at = std::max(ranks[i]->banks[j].colAllowedAt, 1589 act_at + tRCD); 1590 1591 // bank can issue burst back-to-back (seamlessly) with 1592 // previous burst 1593 bool new_seamless_bank = col_at <= min_col_at; 1594 1595 // if we found a new seamless bank or we have no 1596 // seamless banks, and got a bank with an earlier 1597 // activate time, it should be added to the bit mask 1598 if (new_seamless_bank || 1599 (!found_seamless_bank && act_at <= min_act_at)) { 1600 // if we did not have a seamless bank before, and 1601 // we do now, reset the bank mask, also reset it 1602 // if we have not yet found a seamless bank and 1603 // the activate time is smaller than what we have 1604 // seen so far 1605 if (!found_seamless_bank && 1606 (new_seamless_bank || act_at < min_act_at)) { 1607 bank_mask = 0; 1608 } 1609 1610 found_seamless_bank |= new_seamless_bank; 1611 1612 // ACT can occur 'behind the scenes' 1613 hidden_bank_prep = act_at <= hidden_act_max; 1614 1615 // set the bit corresponding to the available bank 1616 replaceBits(bank_mask, bank_id, bank_id, 1); 1617 min_act_at = act_at; 1618 } 1619 } 1620 } 1621 } 1622 1623 return make_pair(bank_mask, hidden_bank_prep); 1624} 1625 1626DRAMCtrl::Rank::Rank(DRAMCtrl& _memory, const DRAMCtrlParams* _p) 1627 : EventManager(&_memory), memory(_memory), 1628 pwrStateTrans(PWR_IDLE), pwrStatePostRefresh(PWR_IDLE), 1629 pwrStateTick(0), refreshDueAt(0), pwrState(PWR_IDLE), 1630 refreshState(REF_IDLE), inLowPowerState(false), rank(0), 1631 readEntries(0), writeEntries(0), outstandingEvents(0), 1632 wakeUpAllowedAt(0), power(_p, false), numBanksActive(0), 1633 writeDoneEvent(*this), activateEvent(*this), prechargeEvent(*this), 1634 refreshEvent(*this), powerEvent(*this), wakeUpEvent(*this) 1635{ } 1636 1637void 1638DRAMCtrl::Rank::startup(Tick ref_tick) 1639{ 1640 assert(ref_tick > curTick()); 1641 1642 pwrStateTick = curTick(); 1643 1644 // kick off the refresh, and give ourselves enough time to 1645 // precharge 1646 schedule(refreshEvent, ref_tick); 1647} 1648 1649void 1650DRAMCtrl::Rank::suspend() 1651{ 1652 deschedule(refreshEvent); 1653 1654 // Update the stats 1655 updatePowerStats(); 1656 1657 // don't automatically transition back to LP state after next REF 1658 pwrStatePostRefresh = PWR_IDLE; 1659} 1660 1661bool 1662DRAMCtrl::Rank::lowPowerEntryReady() const 1663{ 1664 bool no_queued_cmds = ((memory.busStateNext == READ) && (readEntries == 0)) 1665 || ((memory.busStateNext == WRITE) && 1666 (writeEntries == 0)); 1667 1668 if (refreshState == REF_RUN) { 1669 // have not decremented outstandingEvents for refresh command 1670 // still check if there are no commands queued to force PD 1671 // entry after refresh completes 1672 return no_queued_cmds; 1673 } else { 1674 // ensure no commands in Q and no commands scheduled 1675 return (no_queued_cmds && (outstandingEvents == 0)); 1676 } 1677} 1678 1679void 1680DRAMCtrl::Rank::checkDrainDone() 1681{ 1682 // if this rank was waiting to drain it is now able to proceed to 1683 // precharge 1684 if (refreshState == REF_DRAIN) { 1685 DPRINTF(DRAM, "Refresh drain done, now precharging\n"); 1686 1687 refreshState = REF_PD_EXIT; 1688 1689 // hand control back to the refresh event loop 1690 schedule(refreshEvent, curTick()); 1691 } 1692} 1693 1694void 1695DRAMCtrl::Rank::flushCmdList() 1696{ 1697 // at the moment sort the list of commands and update the counters 1698 // for DRAMPower libray when doing a refresh 1699 sort(cmdList.begin(), cmdList.end(), DRAMCtrl::sortTime); 1700 1701 auto next_iter = cmdList.begin(); 1702 // push to commands to DRAMPower 1703 for ( ; next_iter != cmdList.end() ; ++next_iter) { 1704 Command cmd = *next_iter; 1705 if (cmd.timeStamp <= curTick()) { 1706 // Move all commands at or before curTick to DRAMPower 1707 power.powerlib.doCommand(cmd.type, cmd.bank, 1708 divCeil(cmd.timeStamp, memory.tCK) - 1709 memory.timeStampOffset); 1710 } else { 1711 // done - found all commands at or before curTick() 1712 // next_iter references the 1st command after curTick 1713 break; 1714 } 1715 } 1716 // reset cmdList to only contain commands after curTick 1717 // if there are no commands after curTick, updated cmdList will be empty 1718 // in this case, next_iter is cmdList.end() 1719 cmdList.assign(next_iter, cmdList.end()); 1720} 1721 1722void 1723DRAMCtrl::Rank::processActivateEvent() 1724{ 1725 // we should transition to the active state as soon as any bank is active 1726 if (pwrState != PWR_ACT) 1727 // note that at this point numBanksActive could be back at 1728 // zero again due to a precharge scheduled in the future 1729 schedulePowerEvent(PWR_ACT, curTick()); 1730} 1731 1732void 1733DRAMCtrl::Rank::processPrechargeEvent() 1734{ 1735 // counter should at least indicate one outstanding request 1736 // for this precharge 1737 assert(outstandingEvents > 0); 1738 // precharge complete, decrement count 1739 --outstandingEvents; 1740 1741 // if we reached zero, then special conditions apply as we track 1742 // if all banks are precharged for the power models 1743 if (numBanksActive == 0) { 1744 // no reads to this rank in the Q and no pending 1745 // RD/WR or refresh commands 1746 if (lowPowerEntryReady()) { 1747 // should still be in ACT state since bank still open 1748 assert(pwrState == PWR_ACT); 1749 1750 // All banks closed - switch to precharge power down state. 1751 DPRINTF(DRAMState, "Rank %d sleep at tick %d\n", 1752 rank, curTick()); 1753 powerDownSleep(PWR_PRE_PDN, curTick()); 1754 } else { 1755 // we should transition to the idle state when the last bank 1756 // is precharged 1757 schedulePowerEvent(PWR_IDLE, curTick()); 1758 } 1759 } 1760} 1761 1762void 1763DRAMCtrl::Rank::processWriteDoneEvent() 1764{ 1765 // counter should at least indicate one outstanding request 1766 // for this write 1767 assert(outstandingEvents > 0); 1768 // Write transfer on bus has completed 1769 // decrement per rank counter 1770 --outstandingEvents; 1771} 1772 1773void 1774DRAMCtrl::Rank::processRefreshEvent() 1775{ 1776 // when first preparing the refresh, remember when it was due 1777 if ((refreshState == REF_IDLE) || (refreshState == REF_SREF_EXIT)) { 1778 // remember when the refresh is due 1779 refreshDueAt = curTick(); 1780 1781 // proceed to drain 1782 refreshState = REF_DRAIN; 1783 1784 // make nonzero while refresh is pending to ensure 1785 // power down and self-refresh are not entered 1786 ++outstandingEvents; 1787 1788 DPRINTF(DRAM, "Refresh due\n"); 1789 } 1790 1791 // let any scheduled read or write to the same rank go ahead, 1792 // after which it will 1793 // hand control back to this event loop 1794 if (refreshState == REF_DRAIN) { 1795 // if a request is at the moment being handled and this request is 1796 // accessing the current rank then wait for it to finish 1797 if ((rank == memory.activeRank) 1798 && (memory.nextReqEvent.scheduled())) { 1799 // hand control over to the request loop until it is 1800 // evaluated next 1801 DPRINTF(DRAM, "Refresh awaiting draining\n"); 1802 1803 return; 1804 } else { 1805 refreshState = REF_PD_EXIT; 1806 } 1807 } 1808 1809 // at this point, ensure that rank is not in a power-down state 1810 if (refreshState == REF_PD_EXIT) { 1811 // if rank was sleeping and we have't started exit process, 1812 // wake-up for refresh 1813 if (inLowPowerState) { 1814 DPRINTF(DRAM, "Wake Up for refresh\n"); 1815 // save state and return after refresh completes 1816 scheduleWakeUpEvent(memory.tXP); 1817 return; 1818 } else { 1819 refreshState = REF_PRE; 1820 } 1821 } 1822 1823 // at this point, ensure that all banks are precharged 1824 if (refreshState == REF_PRE) { 1825 // precharge any active bank 1826 if (numBanksActive != 0) { 1827 // at the moment, we use a precharge all even if there is 1828 // only a single bank open 1829 DPRINTF(DRAM, "Precharging all\n"); 1830 1831 // first determine when we can precharge 1832 Tick pre_at = curTick(); 1833 1834 for (auto &b : banks) { 1835 // respect both causality and any existing bank 1836 // constraints, some banks could already have a 1837 // (auto) precharge scheduled 1838 pre_at = std::max(b.preAllowedAt, pre_at); 1839 } 1840 1841 // make sure all banks per rank are precharged, and for those that 1842 // already are, update their availability 1843 Tick act_allowed_at = pre_at + memory.tRP; 1844 1845 for (auto &b : banks) { 1846 if (b.openRow != Bank::NO_ROW) { 1847 memory.prechargeBank(*this, b, pre_at, false); 1848 } else { 1849 b.actAllowedAt = std::max(b.actAllowedAt, act_allowed_at); 1850 b.preAllowedAt = std::max(b.preAllowedAt, pre_at); 1851 } 1852 } 1853 1854 // precharge all banks in rank 1855 cmdList.push_back(Command(MemCommand::PREA, 0, pre_at)); 1856 1857 DPRINTF(DRAMPower, "%llu,PREA,0,%d\n", 1858 divCeil(pre_at, memory.tCK) - 1859 memory.timeStampOffset, rank); 1860 } else if ((pwrState == PWR_IDLE) && (outstandingEvents == 1)) { 1861 // Banks are closed, have transitioned to IDLE state, and 1862 // no outstanding ACT,RD/WR,Auto-PRE sequence scheduled 1863 DPRINTF(DRAM, "All banks already precharged, starting refresh\n"); 1864 1865 // go ahead and kick the power state machine into gear since 1866 // we are already idle 1867 schedulePowerEvent(PWR_REF, curTick()); 1868 } else { 1869 // banks state is closed but haven't transitioned pwrState to IDLE 1870 // or have outstanding ACT,RD/WR,Auto-PRE sequence scheduled 1871 // should have outstanding precharge event in this case 1872 assert(prechargeEvent.scheduled()); 1873 // will start refresh when pwrState transitions to IDLE 1874 } 1875 1876 assert(numBanksActive == 0); 1877 1878 // wait for all banks to be precharged, at which point the 1879 // power state machine will transition to the idle state, and 1880 // automatically move to a refresh, at that point it will also 1881 // call this method to get the refresh event loop going again 1882 return; 1883 } 1884 1885 // last but not least we perform the actual refresh 1886 if (refreshState == REF_START) { 1887 // should never get here with any banks active 1888 assert(numBanksActive == 0); 1889 assert(pwrState == PWR_REF); 1890 1891 Tick ref_done_at = curTick() + memory.tRFC; 1892 1893 for (auto &b : banks) { 1894 b.actAllowedAt = ref_done_at; 1895 } 1896 1897 // at the moment this affects all ranks 1898 cmdList.push_back(Command(MemCommand::REF, 0, curTick())); 1899 1900 // Update the stats 1901 updatePowerStats(); 1902 1903 DPRINTF(DRAMPower, "%llu,REF,0,%d\n", divCeil(curTick(), memory.tCK) - 1904 memory.timeStampOffset, rank); 1905 1906 // Update for next refresh 1907 refreshDueAt += memory.tREFI; 1908 1909 // make sure we did not wait so long that we cannot make up 1910 // for it 1911 if (refreshDueAt < ref_done_at) { 1912 fatal("Refresh was delayed so long we cannot catch up\n"); 1913 } 1914 1915 // Run the refresh and schedule event to transition power states 1916 // when refresh completes 1917 refreshState = REF_RUN; 1918 schedule(refreshEvent, ref_done_at); 1919 return; 1920 } 1921 1922 if (refreshState == REF_RUN) { 1923 // should never get here with any banks active 1924 assert(numBanksActive == 0); 1925 assert(pwrState == PWR_REF); 1926 1927 assert(!powerEvent.scheduled()); 1928 1929 if ((memory.drainState() == DrainState::Draining) || 1930 (memory.drainState() == DrainState::Drained)) { 1931 // if draining, do not re-enter low-power mode. 1932 // simply go to IDLE and wait 1933 schedulePowerEvent(PWR_IDLE, curTick()); 1934 } else { 1935 // At the moment, we sleep when the refresh ends and wait to be 1936 // woken up again if previously in a low-power state. 1937 if (pwrStatePostRefresh != PWR_IDLE) { 1938 // power State should be power Refresh 1939 assert(pwrState == PWR_REF); 1940 DPRINTF(DRAMState, "Rank %d sleeping after refresh and was in " 1941 "power state %d before refreshing\n", rank, 1942 pwrStatePostRefresh); 1943 powerDownSleep(pwrState, curTick()); 1944 1945 // Force PRE power-down if there are no outstanding commands 1946 // in Q after refresh. 1947 } else if (lowPowerEntryReady()) { 1948 DPRINTF(DRAMState, "Rank %d sleeping after refresh but was NOT" 1949 " in a low power state before refreshing\n", rank); 1950 powerDownSleep(PWR_PRE_PDN, curTick()); 1951 1952 } else { 1953 // move to the idle power state once the refresh is done, this 1954 // will also move the refresh state machine to the refresh 1955 // idle state 1956 schedulePowerEvent(PWR_IDLE, curTick()); 1957 } 1958 } 1959 1960 // if transitioning to self refresh do not schedule a new refresh; 1961 // when waking from self refresh, a refresh is scheduled again. 1962 if (pwrStateTrans != PWR_SREF) { 1963 // compensate for the delay in actually performing the refresh 1964 // when scheduling the next one 1965 schedule(refreshEvent, refreshDueAt - memory.tRP); 1966 1967 DPRINTF(DRAMState, "Refresh done at %llu and next refresh" 1968 " at %llu\n", curTick(), refreshDueAt); 1969 } 1970 } 1971} 1972 1973void 1974DRAMCtrl::Rank::schedulePowerEvent(PowerState pwr_state, Tick tick) 1975{ 1976 // respect causality 1977 assert(tick >= curTick()); 1978 1979 if (!powerEvent.scheduled()) { 1980 DPRINTF(DRAMState, "Scheduling power event at %llu to state %d\n", 1981 tick, pwr_state); 1982 1983 // insert the new transition 1984 pwrStateTrans = pwr_state; 1985 1986 schedule(powerEvent, tick); 1987 } else { 1988 panic("Scheduled power event at %llu to state %d, " 1989 "with scheduled event at %llu to %d\n", tick, pwr_state, 1990 powerEvent.when(), pwrStateTrans); 1991 } 1992} 1993 1994void 1995DRAMCtrl::Rank::powerDownSleep(PowerState pwr_state, Tick tick) 1996{ 1997 // if low power state is active low, schedule to active low power state. 1998 // in reality tCKE is needed to enter active low power. This is neglected 1999 // here and could be added in the future. 2000 if (pwr_state == PWR_ACT_PDN) { 2001 schedulePowerEvent(pwr_state, tick); 2002 // push command to DRAMPower 2003 cmdList.push_back(Command(MemCommand::PDN_F_ACT, 0, tick)); 2004 DPRINTF(DRAMPower, "%llu,PDN_F_ACT,0,%d\n", divCeil(tick, 2005 memory.tCK) - memory.timeStampOffset, rank); 2006 } else if (pwr_state == PWR_PRE_PDN) { 2007 // if low power state is precharge low, schedule to precharge low 2008 // power state. In reality tCKE is needed to enter active low power. 2009 // This is neglected here. 2010 schedulePowerEvent(pwr_state, tick); 2011 //push Command to DRAMPower 2012 cmdList.push_back(Command(MemCommand::PDN_F_PRE, 0, tick)); 2013 DPRINTF(DRAMPower, "%llu,PDN_F_PRE,0,%d\n", divCeil(tick, 2014 memory.tCK) - memory.timeStampOffset, rank); 2015 } else if (pwr_state == PWR_REF) { 2016 // if a refresh just occured 2017 // transition to PRE_PDN now that all banks are closed 2018 // do not transition to SREF if commands are in Q; stay in PRE_PDN 2019 if (pwrStatePostRefresh == PWR_ACT_PDN || !lowPowerEntryReady()) { 2020 // prechage power down requires tCKE to enter. For simplicity 2021 // this is not considered. 2022 schedulePowerEvent(PWR_PRE_PDN, tick); 2023 //push Command to DRAMPower 2024 cmdList.push_back(Command(MemCommand::PDN_F_PRE, 0, tick)); 2025 DPRINTF(DRAMPower, "%llu,PDN_F_PRE,0,%d\n", divCeil(tick, 2026 memory.tCK) - memory.timeStampOffset, rank); 2027 } else { 2028 // last low power State was power precharge 2029 assert(pwrStatePostRefresh == PWR_PRE_PDN); 2030 // self refresh requires time tCKESR to enter. For simplicity, 2031 // this is not considered. 2032 schedulePowerEvent(PWR_SREF, tick); 2033 // push Command to DRAMPower 2034 cmdList.push_back(Command(MemCommand::SREN, 0, tick)); 2035 DPRINTF(DRAMPower, "%llu,SREN,0,%d\n", divCeil(tick, 2036 memory.tCK) - memory.timeStampOffset, rank); 2037 } 2038 } 2039 // Ensure that we don't power-down and back up in same tick 2040 // Once we commit to PD entry, do it and wait for at least 1tCK 2041 // This could be replaced with tCKE if/when that is added to the model 2042 wakeUpAllowedAt = tick + memory.tCK; 2043 2044 // Transitioning to a low power state, set flag 2045 inLowPowerState = true; 2046} 2047 2048void 2049DRAMCtrl::Rank::scheduleWakeUpEvent(Tick exit_delay) 2050{ 2051 Tick wake_up_tick = std::max(curTick(), wakeUpAllowedAt); 2052 2053 DPRINTF(DRAMState, "Scheduling wake-up for rank %d at tick %d\n", 2054 rank, wake_up_tick); 2055 2056 // if waking for refresh, hold previous state 2057 // else reset state back to IDLE 2058 if (refreshState == REF_PD_EXIT) { 2059 pwrStatePostRefresh = pwrState; 2060 } else { 2061 // don't automatically transition back to LP state after next REF 2062 pwrStatePostRefresh = PWR_IDLE; 2063 } 2064 2065 // schedule wake-up with event to ensure entry has completed before 2066 // we try to wake-up 2067 schedule(wakeUpEvent, wake_up_tick); 2068 2069 for (auto &b : banks) { 2070 // respect both causality and any existing bank 2071 // constraints, some banks could already have a 2072 // (auto) precharge scheduled 2073 b.colAllowedAt = std::max(wake_up_tick + exit_delay, b.colAllowedAt); 2074 b.preAllowedAt = std::max(wake_up_tick + exit_delay, b.preAllowedAt); 2075 b.actAllowedAt = std::max(wake_up_tick + exit_delay, b.actAllowedAt); 2076 } 2077 // Transitioning out of low power state, clear flag 2078 inLowPowerState = false; 2079 2080 // push to DRAMPower 2081 // use pwrStateTrans for cases where we have a power event scheduled 2082 // to enter low power that has not yet been processed 2083 if (pwrStateTrans == PWR_ACT_PDN) { 2084 cmdList.push_back(Command(MemCommand::PUP_ACT, 0, wake_up_tick)); 2085 DPRINTF(DRAMPower, "%llu,PUP_ACT,0,%d\n", divCeil(wake_up_tick, 2086 memory.tCK) - memory.timeStampOffset, rank); 2087 2088 } else if (pwrStateTrans == PWR_PRE_PDN) { 2089 cmdList.push_back(Command(MemCommand::PUP_PRE, 0, wake_up_tick)); 2090 DPRINTF(DRAMPower, "%llu,PUP_PRE,0,%d\n", divCeil(wake_up_tick, 2091 memory.tCK) - memory.timeStampOffset, rank); 2092 } else if (pwrStateTrans == PWR_SREF) { 2093 cmdList.push_back(Command(MemCommand::SREX, 0, wake_up_tick)); 2094 DPRINTF(DRAMPower, "%llu,SREX,0,%d\n", divCeil(wake_up_tick, 2095 memory.tCK) - memory.timeStampOffset, rank); 2096 } 2097} 2098 2099void 2100DRAMCtrl::Rank::processWakeUpEvent() 2101{ 2102 // Should be in a power-down or self-refresh state 2103 assert((pwrState == PWR_ACT_PDN) || (pwrState == PWR_PRE_PDN) || 2104 (pwrState == PWR_SREF)); 2105 2106 // Check current state to determine transition state 2107 if (pwrState == PWR_ACT_PDN) { 2108 // banks still open, transition to PWR_ACT 2109 schedulePowerEvent(PWR_ACT, curTick()); 2110 } else { 2111 // transitioning from a precharge power-down or self-refresh state 2112 // banks are closed - transition to PWR_IDLE 2113 schedulePowerEvent(PWR_IDLE, curTick()); 2114 } 2115} 2116 2117void 2118DRAMCtrl::Rank::processPowerEvent() 2119{ 2120 assert(curTick() >= pwrStateTick); 2121 // remember where we were, and for how long 2122 Tick duration = curTick() - pwrStateTick; 2123 PowerState prev_state = pwrState; 2124 2125 // update the accounting 2126 pwrStateTime[prev_state] += duration; 2127 2128 // track to total idle time 2129 if ((prev_state == PWR_PRE_PDN) || (prev_state == PWR_ACT_PDN) || 2130 (prev_state == PWR_SREF)) { 2131 totalIdleTime += duration; 2132 } 2133 2134 pwrState = pwrStateTrans; 2135 pwrStateTick = curTick(); 2136 2137 // if rank was refreshing, make sure to start scheduling requests again 2138 if (prev_state == PWR_REF) { 2139 // bus IDLED prior to REF 2140 // counter should be one for refresh command only 2141 assert(outstandingEvents == 1); 2142 // REF complete, decrement count 2143 --outstandingEvents; 2144 2145 DPRINTF(DRAMState, "Was refreshing for %llu ticks\n", duration); 2146 // if sleeping after refresh 2147 if (pwrState != PWR_IDLE) { 2148 assert((pwrState == PWR_PRE_PDN) || (pwrState == PWR_SREF)); 2149 DPRINTF(DRAMState, "Switching to power down state after refreshing" 2150 " rank %d at %llu tick\n", rank, curTick()); 2151 } 2152 if (pwrState != PWR_SREF) { 2153 // rank is not available in SREF 2154 // don't transition to IDLE in this case 2155 refreshState = REF_IDLE; 2156 } 2157 // a request event could be already scheduled by the state 2158 // machine of the other rank 2159 if (!memory.nextReqEvent.scheduled()) { 2160 DPRINTF(DRAM, "Scheduling next request after refreshing rank %d\n", 2161 rank); 2162 schedule(memory.nextReqEvent, curTick()); 2163 } 2164 } else if (pwrState == PWR_ACT) { 2165 if (refreshState == REF_PD_EXIT) { 2166 // kick the refresh event loop into action again 2167 assert(prev_state == PWR_ACT_PDN); 2168 2169 // go back to REF event and close banks 2170 refreshState = REF_PRE; 2171 schedule(refreshEvent, curTick()); 2172 } 2173 } else if (pwrState == PWR_IDLE) { 2174 DPRINTF(DRAMState, "All banks precharged\n"); 2175 if (prev_state == PWR_SREF) { 2176 // set refresh state to REF_SREF_EXIT, ensuring isAvailable 2177 // continues to return false during tXS after SREF exit 2178 // Schedule a refresh which kicks things back into action 2179 // when it finishes 2180 refreshState = REF_SREF_EXIT; 2181 schedule(refreshEvent, curTick() + memory.tXS); 2182 } else { 2183 // if we have a pending refresh, and are now moving to 2184 // the idle state, directly transition to a refresh 2185 if ((refreshState == REF_PRE) || (refreshState == REF_PD_EXIT)) { 2186 // ensure refresh is restarted only after final PRE command. 2187 // do not restart refresh if controller is in an intermediate 2188 // state, after PRE_PDN exit, when banks are IDLE but an 2189 // ACT is scheduled. 2190 if (!activateEvent.scheduled()) { 2191 // there should be nothing waiting at this point 2192 assert(!powerEvent.scheduled()); 2193 // update the state in zero time and proceed below 2194 pwrState = PWR_REF; 2195 } else { 2196 // must have PRE scheduled to transition back to IDLE 2197 // and re-kick off refresh 2198 assert(prechargeEvent.scheduled()); 2199 } 2200 } 2201 } 2202 } 2203 2204 // we transition to the refresh state, let the refresh state 2205 // machine know of this state update and let it deal with the 2206 // scheduling of the next power state transition as well as the 2207 // following refresh 2208 if (pwrState == PWR_REF) { 2209 assert(refreshState == REF_PRE || refreshState == REF_PD_EXIT); 2210 DPRINTF(DRAMState, "Refreshing\n"); 2211 2212 // kick the refresh event loop into action again, and that 2213 // in turn will schedule a transition to the idle power 2214 // state once the refresh is done 2215 if (refreshState == REF_PD_EXIT) { 2216 // Wait for PD exit timing to complete before issuing REF 2217 schedule(refreshEvent, curTick() + memory.tXP); 2218 } else { 2219 schedule(refreshEvent, curTick()); 2220 } 2221 // Banks transitioned to IDLE, start REF 2222 refreshState = REF_START; 2223 } 2224} 2225 2226void 2227DRAMCtrl::Rank::updatePowerStats() 2228{ 2229 // All commands up to refresh have completed 2230 // flush cmdList to DRAMPower 2231 flushCmdList(); 2232 2233 // update the counters for DRAMPower, passing false to 2234 // indicate that this is not the last command in the 2235 // list. DRAMPower requires this information for the 2236 // correct calculation of the background energy at the end 2237 // of the simulation. Ideally we would want to call this 2238 // function with true once at the end of the 2239 // simulation. However, the discarded energy is extremly 2240 // small and does not effect the final results. 2241 power.powerlib.updateCounters(false); 2242 2243 // call the energy function 2244 power.powerlib.calcEnergy(); 2245 2246 // Get the energy and power from DRAMPower 2247 Data::MemoryPowerModel::Energy energy = 2248 power.powerlib.getEnergy(); 2249 Data::MemoryPowerModel::Power rank_power = 2250 power.powerlib.getPower(); 2251 2252 actEnergy = energy.act_energy * memory.devicesPerRank; 2253 preEnergy = energy.pre_energy * memory.devicesPerRank; 2254 readEnergy = energy.read_energy * memory.devicesPerRank; 2255 writeEnergy = energy.write_energy * memory.devicesPerRank; 2256 refreshEnergy = energy.ref_energy * memory.devicesPerRank; 2257 actBackEnergy = energy.act_stdby_energy * memory.devicesPerRank; 2258 preBackEnergy = energy.pre_stdby_energy * memory.devicesPerRank; 2259 actPowerDownEnergy = energy.f_act_pd_energy * memory.devicesPerRank; 2260 prePowerDownEnergy = energy.f_pre_pd_energy * memory.devicesPerRank; 2261 selfRefreshEnergy = energy.sref_energy * memory.devicesPerRank; 2262 totalEnergy = energy.total_energy * memory.devicesPerRank; 2263 averagePower = rank_power.average_power * memory.devicesPerRank; 2264} 2265 2266void 2267DRAMCtrl::Rank::computeStats() 2268{ 2269 DPRINTF(DRAM,"Computing final stats\n"); 2270 2271 // Force DRAM power to update counters based on time spent in 2272 // current state up to curTick() 2273 cmdList.push_back(Command(MemCommand::NOP, 0, curTick())); 2274 2275 // Update the stats 2276 updatePowerStats(); 2277 2278 // final update of power state times 2279 pwrStateTime[pwrState] += (curTick() - pwrStateTick); 2280 pwrStateTick = curTick(); 2281 2282} 2283 2284void 2285DRAMCtrl::Rank::regStats() 2286{ 2287 using namespace Stats; 2288 2289 pwrStateTime 2290 .init(6) 2291 .name(name() + ".memoryStateTime") 2292 .desc("Time in different power states"); 2293 pwrStateTime.subname(0, "IDLE"); 2294 pwrStateTime.subname(1, "REF"); 2295 pwrStateTime.subname(2, "SREF"); 2296 pwrStateTime.subname(3, "PRE_PDN"); 2297 pwrStateTime.subname(4, "ACT"); 2298 pwrStateTime.subname(5, "ACT_PDN"); 2299 2300 actEnergy 2301 .name(name() + ".actEnergy") 2302 .desc("Energy for activate commands per rank (pJ)"); 2303 2304 preEnergy 2305 .name(name() + ".preEnergy") 2306 .desc("Energy for precharge commands per rank (pJ)"); 2307 2308 readEnergy 2309 .name(name() + ".readEnergy") 2310 .desc("Energy for read commands per rank (pJ)"); 2311 2312 writeEnergy 2313 .name(name() + ".writeEnergy") 2314 .desc("Energy for write commands per rank (pJ)"); 2315 2316 refreshEnergy 2317 .name(name() + ".refreshEnergy") 2318 .desc("Energy for refresh commands per rank (pJ)"); 2319 2320 actBackEnergy 2321 .name(name() + ".actBackEnergy") 2322 .desc("Energy for active background per rank (pJ)"); 2323 2324 preBackEnergy 2325 .name(name() + ".preBackEnergy") 2326 .desc("Energy for precharge background per rank (pJ)"); 2327 2328 actPowerDownEnergy 2329 .name(name() + ".actPowerDownEnergy") 2330 .desc("Energy for active power-down per rank (pJ)"); 2331 2332 prePowerDownEnergy 2333 .name(name() + ".prePowerDownEnergy") 2334 .desc("Energy for precharge power-down per rank (pJ)"); 2335 2336 selfRefreshEnergy 2337 .name(name() + ".selfRefreshEnergy") 2338 .desc("Energy for self refresh per rank (pJ)"); 2339 2340 totalEnergy 2341 .name(name() + ".totalEnergy") 2342 .desc("Total energy per rank (pJ)"); 2343 2344 averagePower 2345 .name(name() + ".averagePower") 2346 .desc("Core power per rank (mW)"); 2347 2348 totalIdleTime 2349 .name(name() + ".totalIdleTime") 2350 .desc("Total Idle time Per DRAM Rank"); 2351 2352 registerDumpCallback(new RankDumpCallback(this)); 2353} 2354void 2355DRAMCtrl::regStats() 2356{ 2357 using namespace Stats; 2358 2359 AbstractMemory::regStats(); 2360 2361 for (auto r : ranks) { 2362 r->regStats(); 2363 } 2364 2365 readReqs 2366 .name(name() + ".readReqs") 2367 .desc("Number of read requests accepted"); 2368 2369 writeReqs 2370 .name(name() + ".writeReqs") 2371 .desc("Number of write requests accepted"); 2372 2373 readBursts 2374 .name(name() + ".readBursts") 2375 .desc("Number of DRAM read bursts, " 2376 "including those serviced by the write queue"); 2377 2378 writeBursts 2379 .name(name() + ".writeBursts") 2380 .desc("Number of DRAM write bursts, " 2381 "including those merged in the write queue"); 2382 2383 servicedByWrQ 2384 .name(name() + ".servicedByWrQ") 2385 .desc("Number of DRAM read bursts serviced by the write queue"); 2386 2387 mergedWrBursts 2388 .name(name() + ".mergedWrBursts") 2389 .desc("Number of DRAM write bursts merged with an existing one"); 2390 2391 neitherReadNorWrite 2392 .name(name() + ".neitherReadNorWriteReqs") 2393 .desc("Number of requests that are neither read nor write"); 2394 2395 perBankRdBursts 2396 .init(banksPerRank * ranksPerChannel) 2397 .name(name() + ".perBankRdBursts") 2398 .desc("Per bank write bursts"); 2399 2400 perBankWrBursts 2401 .init(banksPerRank * ranksPerChannel) 2402 .name(name() + ".perBankWrBursts") 2403 .desc("Per bank write bursts"); 2404 2405 avgRdQLen 2406 .name(name() + ".avgRdQLen") 2407 .desc("Average read queue length when enqueuing") 2408 .precision(2); 2409 2410 avgWrQLen 2411 .name(name() + ".avgWrQLen") 2412 .desc("Average write queue length when enqueuing") 2413 .precision(2); 2414 2415 totQLat 2416 .name(name() + ".totQLat") 2417 .desc("Total ticks spent queuing"); 2418 2419 totBusLat 2420 .name(name() + ".totBusLat") 2421 .desc("Total ticks spent in databus transfers"); 2422 2423 totMemAccLat 2424 .name(name() + ".totMemAccLat") 2425 .desc("Total ticks spent from burst creation until serviced " 2426 "by the DRAM"); 2427 2428 avgQLat 2429 .name(name() + ".avgQLat") 2430 .desc("Average queueing delay per DRAM burst") 2431 .precision(2); 2432 2433 avgQLat = totQLat / (readBursts - servicedByWrQ); 2434 2435 avgBusLat 2436 .name(name() + ".avgBusLat") 2437 .desc("Average bus latency per DRAM burst") 2438 .precision(2); 2439 2440 avgBusLat = totBusLat / (readBursts - servicedByWrQ); 2441 2442 avgMemAccLat 2443 .name(name() + ".avgMemAccLat") 2444 .desc("Average memory access latency per DRAM burst") 2445 .precision(2); 2446 2447 avgMemAccLat = totMemAccLat / (readBursts - servicedByWrQ); 2448 2449 numRdRetry 2450 .name(name() + ".numRdRetry") 2451 .desc("Number of times read queue was full causing retry"); 2452 2453 numWrRetry 2454 .name(name() + ".numWrRetry") 2455 .desc("Number of times write queue was full causing retry"); 2456 2457 readRowHits 2458 .name(name() + ".readRowHits") 2459 .desc("Number of row buffer hits during reads"); 2460 2461 writeRowHits 2462 .name(name() + ".writeRowHits") 2463 .desc("Number of row buffer hits during writes"); 2464 2465 readRowHitRate 2466 .name(name() + ".readRowHitRate") 2467 .desc("Row buffer hit rate for reads") 2468 .precision(2); 2469 2470 readRowHitRate = (readRowHits / (readBursts - servicedByWrQ)) * 100; 2471 2472 writeRowHitRate 2473 .name(name() + ".writeRowHitRate") 2474 .desc("Row buffer hit rate for writes") 2475 .precision(2); 2476 2477 writeRowHitRate = (writeRowHits / (writeBursts - mergedWrBursts)) * 100; 2478 2479 readPktSize 2480 .init(ceilLog2(burstSize) + 1) 2481 .name(name() + ".readPktSize") 2482 .desc("Read request sizes (log2)"); 2483 2484 writePktSize 2485 .init(ceilLog2(burstSize) + 1) 2486 .name(name() + ".writePktSize") 2487 .desc("Write request sizes (log2)"); 2488 2489 rdQLenPdf 2490 .init(readBufferSize) 2491 .name(name() + ".rdQLenPdf") 2492 .desc("What read queue length does an incoming req see"); 2493 2494 wrQLenPdf 2495 .init(writeBufferSize) 2496 .name(name() + ".wrQLenPdf") 2497 .desc("What write queue length does an incoming req see"); 2498 2499 bytesPerActivate 2500 .init(maxAccessesPerRow) 2501 .name(name() + ".bytesPerActivate") 2502 .desc("Bytes accessed per row activation") 2503 .flags(nozero); 2504 2505 rdPerTurnAround 2506 .init(readBufferSize) 2507 .name(name() + ".rdPerTurnAround") 2508 .desc("Reads before turning the bus around for writes") 2509 .flags(nozero); 2510 2511 wrPerTurnAround 2512 .init(writeBufferSize) 2513 .name(name() + ".wrPerTurnAround") 2514 .desc("Writes before turning the bus around for reads") 2515 .flags(nozero); 2516 2517 bytesReadDRAM 2518 .name(name() + ".bytesReadDRAM") 2519 .desc("Total number of bytes read from DRAM"); 2520 2521 bytesReadWrQ 2522 .name(name() + ".bytesReadWrQ") 2523 .desc("Total number of bytes read from write queue"); 2524 2525 bytesWritten 2526 .name(name() + ".bytesWritten") 2527 .desc("Total number of bytes written to DRAM"); 2528 2529 bytesReadSys 2530 .name(name() + ".bytesReadSys") 2531 .desc("Total read bytes from the system interface side"); 2532 2533 bytesWrittenSys 2534 .name(name() + ".bytesWrittenSys") 2535 .desc("Total written bytes from the system interface side"); 2536 2537 avgRdBW 2538 .name(name() + ".avgRdBW") 2539 .desc("Average DRAM read bandwidth in MiByte/s") 2540 .precision(2); 2541 2542 avgRdBW = (bytesReadDRAM / 1000000) / simSeconds; 2543 2544 avgWrBW 2545 .name(name() + ".avgWrBW") 2546 .desc("Average achieved write bandwidth in MiByte/s") 2547 .precision(2); 2548 2549 avgWrBW = (bytesWritten / 1000000) / simSeconds; 2550 2551 avgRdBWSys 2552 .name(name() + ".avgRdBWSys") 2553 .desc("Average system read bandwidth in MiByte/s") 2554 .precision(2); 2555 2556 avgRdBWSys = (bytesReadSys / 1000000) / simSeconds; 2557 2558 avgWrBWSys 2559 .name(name() + ".avgWrBWSys") 2560 .desc("Average system write bandwidth in MiByte/s") 2561 .precision(2); 2562 2563 avgWrBWSys = (bytesWrittenSys / 1000000) / simSeconds; 2564 2565 peakBW 2566 .name(name() + ".peakBW") 2567 .desc("Theoretical peak bandwidth in MiByte/s") 2568 .precision(2); 2569 2570 peakBW = (SimClock::Frequency / tBURST) * burstSize / 1000000; 2571 2572 busUtil 2573 .name(name() + ".busUtil") 2574 .desc("Data bus utilization in percentage") 2575 .precision(2); 2576 busUtil = (avgRdBW + avgWrBW) / peakBW * 100; 2577 2578 totGap 2579 .name(name() + ".totGap") 2580 .desc("Total gap between requests"); 2581 2582 avgGap 2583 .name(name() + ".avgGap") 2584 .desc("Average gap between requests") 2585 .precision(2); 2586 2587 avgGap = totGap / (readReqs + writeReqs); 2588 2589 // Stats for DRAM Power calculation based on Micron datasheet 2590 busUtilRead 2591 .name(name() + ".busUtilRead") 2592 .desc("Data bus utilization in percentage for reads") 2593 .precision(2); 2594 2595 busUtilRead = avgRdBW / peakBW * 100; 2596 2597 busUtilWrite 2598 .name(name() + ".busUtilWrite") 2599 .desc("Data bus utilization in percentage for writes") 2600 .precision(2); 2601 2602 busUtilWrite = avgWrBW / peakBW * 100; 2603 2604 pageHitRate 2605 .name(name() + ".pageHitRate") 2606 .desc("Row buffer hit rate, read and write combined") 2607 .precision(2); 2608 2609 pageHitRate = (writeRowHits + readRowHits) / 2610 (writeBursts - mergedWrBursts + readBursts - servicedByWrQ) * 100; 2611} 2612 2613void 2614DRAMCtrl::recvFunctional(PacketPtr pkt) 2615{ 2616 // rely on the abstract memory 2617 functionalAccess(pkt); 2618} 2619 2620BaseSlavePort& 2621DRAMCtrl::getSlavePort(const string &if_name, PortID idx) 2622{ 2623 if (if_name != "port") { 2624 return MemObject::getSlavePort(if_name, idx); 2625 } else { 2626 return port; 2627 } 2628} 2629 2630DrainState 2631DRAMCtrl::drain() 2632{ 2633 // if there is anything in any of our internal queues, keep track 2634 // of that as well 2635 if (!(writeQueue.empty() && readQueue.empty() && respQueue.empty() && 2636 allRanksDrained())) { 2637 2638 DPRINTF(Drain, "DRAM controller not drained, write: %d, read: %d," 2639 " resp: %d\n", writeQueue.size(), readQueue.size(), 2640 respQueue.size()); 2641 2642 // the only queue that is not drained automatically over time 2643 // is the write queue, thus kick things into action if needed 2644 if (!writeQueue.empty() && !nextReqEvent.scheduled()) { 2645 schedule(nextReqEvent, curTick()); 2646 } 2647 2648 // also need to kick off events to exit self-refresh 2649 for (auto r : ranks) { 2650 // force self-refresh exit, which in turn will issue auto-refresh 2651 if (r->pwrState == PWR_SREF) { 2652 DPRINTF(DRAM,"Rank%d: Forcing self-refresh wakeup in drain\n", 2653 r->rank); 2654 r->scheduleWakeUpEvent(tXS); 2655 } 2656 } 2657 2658 return DrainState::Draining; 2659 } else { 2660 return DrainState::Drained; 2661 } 2662} 2663 2664bool 2665DRAMCtrl::allRanksDrained() const 2666{ 2667 // true until proven false 2668 bool all_ranks_drained = true; 2669 for (auto r : ranks) { 2670 // then verify that the power state is IDLE 2671 // ensuring all banks are closed and rank is not in a low power state 2672 all_ranks_drained = r->inPwrIdleState() && all_ranks_drained; 2673 } 2674 return all_ranks_drained; 2675} 2676 2677void 2678DRAMCtrl::drainResume() 2679{ 2680 if (!isTimingMode && system()->isTimingMode()) { 2681 // if we switched to timing mode, kick things into action, 2682 // and behave as if we restored from a checkpoint 2683 startup(); 2684 } else if (isTimingMode && !system()->isTimingMode()) { 2685 // if we switch from timing mode, stop the refresh events to 2686 // not cause issues with KVM 2687 for (auto r : ranks) { 2688 r->suspend(); 2689 } 2690 } 2691 2692 // update the mode 2693 isTimingMode = system()->isTimingMode(); 2694} 2695 2696DRAMCtrl::MemoryPort::MemoryPort(const std::string& name, DRAMCtrl& _memory) 2697 : QueuedSlavePort(name, &_memory, queue), queue(_memory, *this), 2698 memory(_memory) 2699{ } 2700 2701AddrRangeList 2702DRAMCtrl::MemoryPort::getAddrRanges() const 2703{ 2704 AddrRangeList ranges; 2705 ranges.push_back(memory.getAddrRange()); 2706 return ranges; 2707} 2708 2709void 2710DRAMCtrl::MemoryPort::recvFunctional(PacketPtr pkt) 2711{ 2712 pkt->pushLabel(memory.name()); 2713 2714 if (!queue.checkFunctional(pkt)) { 2715 // Default implementation of SimpleTimingPort::recvFunctional() 2716 // calls recvAtomic() and throws away the latency; we can save a 2717 // little here by just not calculating the latency. 2718 memory.recvFunctional(pkt); 2719 } 2720 2721 pkt->popLabel(); 2722} 2723 2724Tick 2725DRAMCtrl::MemoryPort::recvAtomic(PacketPtr pkt) 2726{ 2727 return memory.recvAtomic(pkt); 2728} 2729 2730bool 2731DRAMCtrl::MemoryPort::recvTimingReq(PacketPtr pkt) 2732{ 2733 // pass it to the memory controller 2734 return memory.recvTimingReq(pkt); 2735} 2736 2737DRAMCtrl* 2738DRAMCtrlParams::create() 2739{ 2740 return new DRAMCtrl(this); 2741}
| 55#include "sim/system.hh" 56 57using namespace std; 58using namespace Data; 59 60DRAMCtrl::DRAMCtrl(const DRAMCtrlParams* p) : 61 AbstractMemory(p), 62 port(name() + ".port", *this), isTimingMode(false), 63 retryRdReq(false), retryWrReq(false), 64 busState(READ), 65 busStateNext(READ), 66 nextReqEvent(this), respondEvent(this), 67 deviceSize(p->device_size), 68 deviceBusWidth(p->device_bus_width), burstLength(p->burst_length), 69 deviceRowBufferSize(p->device_rowbuffer_size), 70 devicesPerRank(p->devices_per_rank), 71 burstSize((devicesPerRank * burstLength * deviceBusWidth) / 8), 72 rowBufferSize(devicesPerRank * deviceRowBufferSize), 73 columnsPerRowBuffer(rowBufferSize / burstSize), 74 columnsPerStripe(range.interleaved() ? range.granularity() / burstSize : 1), 75 ranksPerChannel(p->ranks_per_channel), 76 bankGroupsPerRank(p->bank_groups_per_rank), 77 bankGroupArch(p->bank_groups_per_rank > 0), 78 banksPerRank(p->banks_per_rank), channels(p->channels), rowsPerBank(0), 79 readBufferSize(p->read_buffer_size), 80 writeBufferSize(p->write_buffer_size), 81 writeHighThreshold(writeBufferSize * p->write_high_thresh_perc / 100.0), 82 writeLowThreshold(writeBufferSize * p->write_low_thresh_perc / 100.0), 83 minWritesPerSwitch(p->min_writes_per_switch), 84 writesThisTime(0), readsThisTime(0), 85 tCK(p->tCK), tWTR(p->tWTR), tRTW(p->tRTW), tCS(p->tCS), tBURST(p->tBURST), 86 tCCD_L(p->tCCD_L), tRCD(p->tRCD), tCL(p->tCL), tRP(p->tRP), tRAS(p->tRAS), 87 tWR(p->tWR), tRTP(p->tRTP), tRFC(p->tRFC), tREFI(p->tREFI), tRRD(p->tRRD), 88 tRRD_L(p->tRRD_L), tXAW(p->tXAW), tXP(p->tXP), tXS(p->tXS), 89 activationLimit(p->activation_limit), 90 memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping), 91 pageMgmt(p->page_policy), 92 maxAccessesPerRow(p->max_accesses_per_row), 93 frontendLatency(p->static_frontend_latency), 94 backendLatency(p->static_backend_latency), 95 busBusyUntil(0), prevArrival(0), 96 nextReqTime(0), activeRank(0), timeStampOffset(0) 97{ 98 // sanity check the ranks since we rely on bit slicing for the 99 // address decoding 100 fatal_if(!isPowerOf2(ranksPerChannel), "DRAM rank count of %d is not " 101 "allowed, must be a power of two\n", ranksPerChannel); 102 103 fatal_if(!isPowerOf2(burstSize), "DRAM burst size %d is not allowed, " 104 "must be a power of two\n", burstSize); 105 106 for (int i = 0; i < ranksPerChannel; i++) { 107 Rank* rank = new Rank(*this, p); 108 ranks.push_back(rank); 109 110 rank->actTicks.resize(activationLimit, 0); 111 rank->banks.resize(banksPerRank); 112 rank->rank = i; 113 114 for (int b = 0; b < banksPerRank; b++) { 115 rank->banks[b].bank = b; 116 // GDDR addressing of banks to BG is linear. 117 // Here we assume that all DRAM generations address bank groups as 118 // follows: 119 if (bankGroupArch) { 120 // Simply assign lower bits to bank group in order to 121 // rotate across bank groups as banks are incremented 122 // e.g. with 4 banks per bank group and 16 banks total: 123 // banks 0,4,8,12 are in bank group 0 124 // banks 1,5,9,13 are in bank group 1 125 // banks 2,6,10,14 are in bank group 2 126 // banks 3,7,11,15 are in bank group 3 127 rank->banks[b].bankgr = b % bankGroupsPerRank; 128 } else { 129 // No bank groups; simply assign to bank number 130 rank->banks[b].bankgr = b; 131 } 132 } 133 } 134 135 // perform a basic check of the write thresholds 136 if (p->write_low_thresh_perc >= p->write_high_thresh_perc) 137 fatal("Write buffer low threshold %d must be smaller than the " 138 "high threshold %d\n", p->write_low_thresh_perc, 139 p->write_high_thresh_perc); 140 141 // determine the rows per bank by looking at the total capacity 142 uint64_t capacity = ULL(1) << ceilLog2(AbstractMemory::size()); 143 144 // determine the dram actual capacity from the DRAM config in Mbytes 145 uint64_t deviceCapacity = deviceSize / (1024 * 1024) * devicesPerRank * 146 ranksPerChannel; 147 148 // if actual DRAM size does not match memory capacity in system warn! 149 if (deviceCapacity != capacity / (1024 * 1024)) 150 warn("DRAM device capacity (%d Mbytes) does not match the " 151 "address range assigned (%d Mbytes)\n", deviceCapacity, 152 capacity / (1024 * 1024)); 153 154 DPRINTF(DRAM, "Memory capacity %lld (%lld) bytes\n", capacity, 155 AbstractMemory::size()); 156 157 DPRINTF(DRAM, "Row buffer size %d bytes with %d columns per row buffer\n", 158 rowBufferSize, columnsPerRowBuffer); 159 160 rowsPerBank = capacity / (rowBufferSize * banksPerRank * ranksPerChannel); 161 162 // some basic sanity checks 163 if (tREFI <= tRP || tREFI <= tRFC) { 164 fatal("tREFI (%d) must be larger than tRP (%d) and tRFC (%d)\n", 165 tREFI, tRP, tRFC); 166 } 167 168 // basic bank group architecture checks -> 169 if (bankGroupArch) { 170 // must have at least one bank per bank group 171 if (bankGroupsPerRank > banksPerRank) { 172 fatal("banks per rank (%d) must be equal to or larger than " 173 "banks groups per rank (%d)\n", 174 banksPerRank, bankGroupsPerRank); 175 } 176 // must have same number of banks in each bank group 177 if ((banksPerRank % bankGroupsPerRank) != 0) { 178 fatal("Banks per rank (%d) must be evenly divisible by bank groups " 179 "per rank (%d) for equal banks per bank group\n", 180 banksPerRank, bankGroupsPerRank); 181 } 182 // tCCD_L should be greater than minimal, back-to-back burst delay 183 if (tCCD_L <= tBURST) { 184 fatal("tCCD_L (%d) should be larger than tBURST (%d) when " 185 "bank groups per rank (%d) is greater than 1\n", 186 tCCD_L, tBURST, bankGroupsPerRank); 187 } 188 // tRRD_L is greater than minimal, same bank group ACT-to-ACT delay 189 // some datasheets might specify it equal to tRRD 190 if (tRRD_L < tRRD) { 191 fatal("tRRD_L (%d) should be larger than tRRD (%d) when " 192 "bank groups per rank (%d) is greater than 1\n", 193 tRRD_L, tRRD, bankGroupsPerRank); 194 } 195 } 196 197} 198 199void 200DRAMCtrl::init() 201{ 202 AbstractMemory::init(); 203 204 if (!port.isConnected()) { 205 fatal("DRAMCtrl %s is unconnected!\n", name()); 206 } else { 207 port.sendRangeChange(); 208 } 209 210 // a bit of sanity checks on the interleaving, save it for here to 211 // ensure that the system pointer is initialised 212 if (range.interleaved()) { 213 if (channels != range.stripes()) 214 fatal("%s has %d interleaved address stripes but %d channel(s)\n", 215 name(), range.stripes(), channels); 216 217 if (addrMapping == Enums::RoRaBaChCo) { 218 if (rowBufferSize != range.granularity()) { 219 fatal("Channel interleaving of %s doesn't match RoRaBaChCo " 220 "address map\n", name()); 221 } 222 } else if (addrMapping == Enums::RoRaBaCoCh || 223 addrMapping == Enums::RoCoRaBaCh) { 224 // for the interleavings with channel bits in the bottom, 225 // if the system uses a channel striping granularity that 226 // is larger than the DRAM burst size, then map the 227 // sequential accesses within a stripe to a number of 228 // columns in the DRAM, effectively placing some of the 229 // lower-order column bits as the least-significant bits 230 // of the address (above the ones denoting the burst size) 231 assert(columnsPerStripe >= 1); 232 233 // channel striping has to be done at a granularity that 234 // is equal or larger to a cache line 235 if (system()->cacheLineSize() > range.granularity()) { 236 fatal("Channel interleaving of %s must be at least as large " 237 "as the cache line size\n", name()); 238 } 239 240 // ...and equal or smaller than the row-buffer size 241 if (rowBufferSize < range.granularity()) { 242 fatal("Channel interleaving of %s must be at most as large " 243 "as the row-buffer size\n", name()); 244 } 245 // this is essentially the check above, so just to be sure 246 assert(columnsPerStripe <= columnsPerRowBuffer); 247 } 248 } 249} 250 251void 252DRAMCtrl::startup() 253{ 254 // remember the memory system mode of operation 255 isTimingMode = system()->isTimingMode(); 256 257 if (isTimingMode) { 258 // timestamp offset should be in clock cycles for DRAMPower 259 timeStampOffset = divCeil(curTick(), tCK); 260 261 // update the start tick for the precharge accounting to the 262 // current tick 263 for (auto r : ranks) { 264 r->startup(curTick() + tREFI - tRP); 265 } 266 267 // shift the bus busy time sufficiently far ahead that we never 268 // have to worry about negative values when computing the time for 269 // the next request, this will add an insignificant bubble at the 270 // start of simulation 271 busBusyUntil = curTick() + tRP + tRCD + tCL; 272 } 273} 274 275Tick 276DRAMCtrl::recvAtomic(PacketPtr pkt) 277{ 278 DPRINTF(DRAM, "recvAtomic: %s 0x%x\n", pkt->cmdString(), pkt->getAddr()); 279 280 panic_if(pkt->cacheResponding(), "Should not see packets where cache " 281 "is responding"); 282 283 // do the actual memory access and turn the packet into a response 284 access(pkt); 285 286 Tick latency = 0; 287 if (pkt->hasData()) { 288 // this value is not supposed to be accurate, just enough to 289 // keep things going, mimic a closed page 290 latency = tRP + tRCD + tCL; 291 } 292 return latency; 293} 294 295bool 296DRAMCtrl::readQueueFull(unsigned int neededEntries) const 297{ 298 DPRINTF(DRAM, "Read queue limit %d, current size %d, entries needed %d\n", 299 readBufferSize, readQueue.size() + respQueue.size(), 300 neededEntries); 301 302 return 303 (readQueue.size() + respQueue.size() + neededEntries) > readBufferSize; 304} 305 306bool 307DRAMCtrl::writeQueueFull(unsigned int neededEntries) const 308{ 309 DPRINTF(DRAM, "Write queue limit %d, current size %d, entries needed %d\n", 310 writeBufferSize, writeQueue.size(), neededEntries); 311 return (writeQueue.size() + neededEntries) > writeBufferSize; 312} 313 314DRAMCtrl::DRAMPacket* 315DRAMCtrl::decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned size, 316 bool isRead) 317{ 318 // decode the address based on the address mapping scheme, with 319 // Ro, Ra, Co, Ba and Ch denoting row, rank, column, bank and 320 // channel, respectively 321 uint8_t rank; 322 uint8_t bank; 323 // use a 64-bit unsigned during the computations as the row is 324 // always the top bits, and check before creating the DRAMPacket 325 uint64_t row; 326 327 // truncate the address to a DRAM burst, which makes it unique to 328 // a specific column, row, bank, rank and channel 329 Addr addr = dramPktAddr / burstSize; 330 331 // we have removed the lowest order address bits that denote the 332 // position within the column 333 if (addrMapping == Enums::RoRaBaChCo) { 334 // the lowest order bits denote the column to ensure that 335 // sequential cache lines occupy the same row 336 addr = addr / columnsPerRowBuffer; 337 338 // take out the channel part of the address 339 addr = addr / channels; 340 341 // after the channel bits, get the bank bits to interleave 342 // over the banks 343 bank = addr % banksPerRank; 344 addr = addr / banksPerRank; 345 346 // after the bank, we get the rank bits which thus interleaves 347 // over the ranks 348 rank = addr % ranksPerChannel; 349 addr = addr / ranksPerChannel; 350 351 // lastly, get the row bits, no need to remove them from addr 352 row = addr % rowsPerBank; 353 } else if (addrMapping == Enums::RoRaBaCoCh) { 354 // take out the lower-order column bits 355 addr = addr / columnsPerStripe; 356 357 // take out the channel part of the address 358 addr = addr / channels; 359 360 // next, the higher-order column bites 361 addr = addr / (columnsPerRowBuffer / columnsPerStripe); 362 363 // after the column bits, we get the bank bits to interleave 364 // over the banks 365 bank = addr % banksPerRank; 366 addr = addr / banksPerRank; 367 368 // after the bank, we get the rank bits which thus interleaves 369 // over the ranks 370 rank = addr % ranksPerChannel; 371 addr = addr / ranksPerChannel; 372 373 // lastly, get the row bits, no need to remove them from addr 374 row = addr % rowsPerBank; 375 } else if (addrMapping == Enums::RoCoRaBaCh) { 376 // optimise for closed page mode and utilise maximum 377 // parallelism of the DRAM (at the cost of power) 378 379 // take out the lower-order column bits 380 addr = addr / columnsPerStripe; 381 382 // take out the channel part of the address, not that this has 383 // to match with how accesses are interleaved between the 384 // controllers in the address mapping 385 addr = addr / channels; 386 387 // start with the bank bits, as this provides the maximum 388 // opportunity for parallelism between requests 389 bank = addr % banksPerRank; 390 addr = addr / banksPerRank; 391 392 // next get the rank bits 393 rank = addr % ranksPerChannel; 394 addr = addr / ranksPerChannel; 395 396 // next, the higher-order column bites 397 addr = addr / (columnsPerRowBuffer / columnsPerStripe); 398 399 // lastly, get the row bits, no need to remove them from addr 400 row = addr % rowsPerBank; 401 } else 402 panic("Unknown address mapping policy chosen!"); 403 404 assert(rank < ranksPerChannel); 405 assert(bank < banksPerRank); 406 assert(row < rowsPerBank); 407 assert(row < Bank::NO_ROW); 408 409 DPRINTF(DRAM, "Address: %lld Rank %d Bank %d Row %d\n", 410 dramPktAddr, rank, bank, row); 411 412 // create the corresponding DRAM packet with the entry time and 413 // ready time set to the current tick, the latter will be updated 414 // later 415 uint16_t bank_id = banksPerRank * rank + bank; 416 return new DRAMPacket(pkt, isRead, rank, bank, row, bank_id, dramPktAddr, 417 size, ranks[rank]->banks[bank], *ranks[rank]); 418} 419 420void 421DRAMCtrl::addToReadQueue(PacketPtr pkt, unsigned int pktCount) 422{ 423 // only add to the read queue here. whenever the request is 424 // eventually done, set the readyTime, and call schedule() 425 assert(!pkt->isWrite()); 426 427 assert(pktCount != 0); 428 429 // if the request size is larger than burst size, the pkt is split into 430 // multiple DRAM packets 431 // Note if the pkt starting address is not aligened to burst size, the 432 // address of first DRAM packet is kept unaliged. Subsequent DRAM packets 433 // are aligned to burst size boundaries. This is to ensure we accurately 434 // check read packets against packets in write queue. 435 Addr addr = pkt->getAddr(); 436 unsigned pktsServicedByWrQ = 0; 437 BurstHelper* burst_helper = NULL; 438 for (int cnt = 0; cnt < pktCount; ++cnt) { 439 unsigned size = std::min((addr | (burstSize - 1)) + 1, 440 pkt->getAddr() + pkt->getSize()) - addr; 441 readPktSize[ceilLog2(size)]++; 442 readBursts++; 443 444 // First check write buffer to see if the data is already at 445 // the controller 446 bool foundInWrQ = false; 447 Addr burst_addr = burstAlign(addr); 448 // if the burst address is not present then there is no need 449 // looking any further 450 if (isInWriteQueue.find(burst_addr) != isInWriteQueue.end()) { 451 for (const auto& p : writeQueue) { 452 // check if the read is subsumed in the write queue 453 // packet we are looking at 454 if (p->addr <= addr && (addr + size) <= (p->addr + p->size)) { 455 foundInWrQ = true; 456 servicedByWrQ++; 457 pktsServicedByWrQ++; 458 DPRINTF(DRAM, "Read to addr %lld with size %d serviced by " 459 "write queue\n", addr, size); 460 bytesReadWrQ += burstSize; 461 break; 462 } 463 } 464 } 465 466 // If not found in the write q, make a DRAM packet and 467 // push it onto the read queue 468 if (!foundInWrQ) { 469 470 // Make the burst helper for split packets 471 if (pktCount > 1 && burst_helper == NULL) { 472 DPRINTF(DRAM, "Read to addr %lld translates to %d " 473 "dram requests\n", pkt->getAddr(), pktCount); 474 burst_helper = new BurstHelper(pktCount); 475 } 476 477 DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, true); 478 dram_pkt->burstHelper = burst_helper; 479 480 assert(!readQueueFull(1)); 481 rdQLenPdf[readQueue.size() + respQueue.size()]++; 482 483 DPRINTF(DRAM, "Adding to read queue\n"); 484 485 readQueue.push_back(dram_pkt); 486 487 // increment read entries of the rank 488 ++dram_pkt->rankRef.readEntries; 489 490 // Update stats 491 avgRdQLen = readQueue.size() + respQueue.size(); 492 } 493 494 // Starting address of next dram pkt (aligend to burstSize boundary) 495 addr = (addr | (burstSize - 1)) + 1; 496 } 497 498 // If all packets are serviced by write queue, we send the repsonse back 499 if (pktsServicedByWrQ == pktCount) { 500 accessAndRespond(pkt, frontendLatency); 501 return; 502 } 503 504 // Update how many split packets are serviced by write queue 505 if (burst_helper != NULL) 506 burst_helper->burstsServiced = pktsServicedByWrQ; 507 508 // If we are not already scheduled to get a request out of the 509 // queue, do so now 510 if (!nextReqEvent.scheduled()) { 511 DPRINTF(DRAM, "Request scheduled immediately\n"); 512 schedule(nextReqEvent, curTick()); 513 } 514} 515 516void 517DRAMCtrl::addToWriteQueue(PacketPtr pkt, unsigned int pktCount) 518{ 519 // only add to the write queue here. whenever the request is 520 // eventually done, set the readyTime, and call schedule() 521 assert(pkt->isWrite()); 522 523 // if the request size is larger than burst size, the pkt is split into 524 // multiple DRAM packets 525 Addr addr = pkt->getAddr(); 526 for (int cnt = 0; cnt < pktCount; ++cnt) { 527 unsigned size = std::min((addr | (burstSize - 1)) + 1, 528 pkt->getAddr() + pkt->getSize()) - addr; 529 writePktSize[ceilLog2(size)]++; 530 writeBursts++; 531 532 // see if we can merge with an existing item in the write 533 // queue and keep track of whether we have merged or not 534 bool merged = isInWriteQueue.find(burstAlign(addr)) != 535 isInWriteQueue.end(); 536 537 // if the item was not merged we need to create a new write 538 // and enqueue it 539 if (!merged) { 540 DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, false); 541 542 assert(writeQueue.size() < writeBufferSize); 543 wrQLenPdf[writeQueue.size()]++; 544 545 DPRINTF(DRAM, "Adding to write queue\n"); 546 547 writeQueue.push_back(dram_pkt); 548 isInWriteQueue.insert(burstAlign(addr)); 549 assert(writeQueue.size() == isInWriteQueue.size()); 550 551 // Update stats 552 avgWrQLen = writeQueue.size(); 553 554 // increment write entries of the rank 555 ++dram_pkt->rankRef.writeEntries; 556 } else { 557 DPRINTF(DRAM, "Merging write burst with existing queue entry\n"); 558 559 // keep track of the fact that this burst effectively 560 // disappeared as it was merged with an existing one 561 mergedWrBursts++; 562 } 563 564 // Starting address of next dram pkt (aligend to burstSize boundary) 565 addr = (addr | (burstSize - 1)) + 1; 566 } 567 568 // we do not wait for the writes to be send to the actual memory, 569 // but instead take responsibility for the consistency here and 570 // snoop the write queue for any upcoming reads 571 // @todo, if a pkt size is larger than burst size, we might need a 572 // different front end latency 573 accessAndRespond(pkt, frontendLatency); 574 575 // If we are not already scheduled to get a request out of the 576 // queue, do so now 577 if (!nextReqEvent.scheduled()) { 578 DPRINTF(DRAM, "Request scheduled immediately\n"); 579 schedule(nextReqEvent, curTick()); 580 } 581} 582 583void 584DRAMCtrl::printQs() const { 585 DPRINTF(DRAM, "===READ QUEUE===\n\n"); 586 for (auto i = readQueue.begin() ; i != readQueue.end() ; ++i) { 587 DPRINTF(DRAM, "Read %lu\n", (*i)->addr); 588 } 589 DPRINTF(DRAM, "\n===RESP QUEUE===\n\n"); 590 for (auto i = respQueue.begin() ; i != respQueue.end() ; ++i) { 591 DPRINTF(DRAM, "Response %lu\n", (*i)->addr); 592 } 593 DPRINTF(DRAM, "\n===WRITE QUEUE===\n\n"); 594 for (auto i = writeQueue.begin() ; i != writeQueue.end() ; ++i) { 595 DPRINTF(DRAM, "Write %lu\n", (*i)->addr); 596 } 597} 598 599bool 600DRAMCtrl::recvTimingReq(PacketPtr pkt) 601{ 602 // This is where we enter from the outside world 603 DPRINTF(DRAM, "recvTimingReq: request %s addr %lld size %d\n", 604 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 605 606 panic_if(pkt->cacheResponding(), "Should not see packets where cache " 607 "is responding"); 608 609 panic_if(!(pkt->isRead() || pkt->isWrite()), 610 "Should only see read and writes at memory controller\n"); 611 612 // Calc avg gap between requests 613 if (prevArrival != 0) { 614 totGap += curTick() - prevArrival; 615 } 616 prevArrival = curTick(); 617 618 619 // Find out how many dram packets a pkt translates to 620 // If the burst size is equal or larger than the pkt size, then a pkt 621 // translates to only one dram packet. Otherwise, a pkt translates to 622 // multiple dram packets 623 unsigned size = pkt->getSize(); 624 unsigned offset = pkt->getAddr() & (burstSize - 1); 625 unsigned int dram_pkt_count = divCeil(offset + size, burstSize); 626 627 // check local buffers and do not accept if full 628 if (pkt->isRead()) { 629 assert(size != 0); 630 if (readQueueFull(dram_pkt_count)) { 631 DPRINTF(DRAM, "Read queue full, not accepting\n"); 632 // remember that we have to retry this port 633 retryRdReq = true; 634 numRdRetry++; 635 return false; 636 } else { 637 addToReadQueue(pkt, dram_pkt_count); 638 readReqs++; 639 bytesReadSys += size; 640 } 641 } else { 642 assert(pkt->isWrite()); 643 assert(size != 0); 644 if (writeQueueFull(dram_pkt_count)) { 645 DPRINTF(DRAM, "Write queue full, not accepting\n"); 646 // remember that we have to retry this port 647 retryWrReq = true; 648 numWrRetry++; 649 return false; 650 } else { 651 addToWriteQueue(pkt, dram_pkt_count); 652 writeReqs++; 653 bytesWrittenSys += size; 654 } 655 } 656 657 return true; 658} 659 660void 661DRAMCtrl::processRespondEvent() 662{ 663 DPRINTF(DRAM, 664 "processRespondEvent(): Some req has reached its readyTime\n"); 665 666 DRAMPacket* dram_pkt = respQueue.front(); 667 668 // if a read has reached its ready-time, decrement the number of reads 669 // At this point the packet has been handled and there is a possibility 670 // to switch to low-power mode if no other packet is available 671 --dram_pkt->rankRef.readEntries; 672 DPRINTF(DRAM, "number of read entries for rank %d is %d\n", 673 dram_pkt->rank, dram_pkt->rankRef.readEntries); 674 675 // counter should at least indicate one outstanding request 676 // for this read 677 assert(dram_pkt->rankRef.outstandingEvents > 0); 678 // read response received, decrement count 679 --dram_pkt->rankRef.outstandingEvents; 680 681 // at this moment should be either ACT or IDLE depending on 682 // if PRE has occurred to close all banks 683 assert((dram_pkt->rankRef.pwrState == PWR_ACT) || 684 (dram_pkt->rankRef.pwrState == PWR_IDLE)); 685 686 // track if this is the last packet before idling 687 // and that there are no outstanding commands to this rank 688 if (dram_pkt->rankRef.lowPowerEntryReady()) { 689 // verify that there are no events scheduled 690 assert(!dram_pkt->rankRef.activateEvent.scheduled()); 691 assert(!dram_pkt->rankRef.prechargeEvent.scheduled()); 692 assert(dram_pkt->rankRef.refreshState == REF_IDLE); 693 694 // if coming from active state, schedule power event to 695 // active power-down else go to precharge power-down 696 DPRINTF(DRAMState, "Rank %d sleep at tick %d; current power state is " 697 "%d\n", dram_pkt->rank, curTick(), dram_pkt->rankRef.pwrState); 698 699 // default to ACT power-down unless already in IDLE state 700 // could be in IDLE if PRE issued before data returned 701 PowerState next_pwr_state = PWR_ACT_PDN; 702 if (dram_pkt->rankRef.pwrState == PWR_IDLE) { 703 next_pwr_state = PWR_PRE_PDN; 704 } 705 706 dram_pkt->rankRef.powerDownSleep(next_pwr_state, curTick()); 707 } 708 709 if (dram_pkt->burstHelper) { 710 // it is a split packet 711 dram_pkt->burstHelper->burstsServiced++; 712 if (dram_pkt->burstHelper->burstsServiced == 713 dram_pkt->burstHelper->burstCount) { 714 // we have now serviced all children packets of a system packet 715 // so we can now respond to the requester 716 // @todo we probably want to have a different front end and back 717 // end latency for split packets 718 accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency); 719 delete dram_pkt->burstHelper; 720 dram_pkt->burstHelper = NULL; 721 } 722 } else { 723 // it is not a split packet 724 accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency); 725 } 726 727 delete respQueue.front(); 728 respQueue.pop_front(); 729 730 if (!respQueue.empty()) { 731 assert(respQueue.front()->readyTime >= curTick()); 732 assert(!respondEvent.scheduled()); 733 schedule(respondEvent, respQueue.front()->readyTime); 734 } else { 735 // if there is nothing left in any queue, signal a drain 736 if (drainState() == DrainState::Draining && 737 writeQueue.empty() && readQueue.empty() && allRanksDrained()) { 738 739 DPRINTF(Drain, "DRAM controller done draining\n"); 740 signalDrainDone(); 741 } 742 } 743 744 // We have made a location in the queue available at this point, 745 // so if there is a read that was forced to wait, retry now 746 if (retryRdReq) { 747 retryRdReq = false; 748 port.sendRetryReq(); 749 } 750} 751 752bool 753DRAMCtrl::chooseNext(std::deque<DRAMPacket*>& queue, Tick extra_col_delay) 754{ 755 // This method does the arbitration between requests. The chosen 756 // packet is simply moved to the head of the queue. The other 757 // methods know that this is the place to look. For example, with 758 // FCFS, this method does nothing 759 assert(!queue.empty()); 760 761 // bool to indicate if a packet to an available rank is found 762 bool found_packet = false; 763 if (queue.size() == 1) { 764 DRAMPacket* dram_pkt = queue.front(); 765 // available rank corresponds to state refresh idle 766 if (ranks[dram_pkt->rank]->isAvailable()) { 767 found_packet = true; 768 DPRINTF(DRAM, "Single request, going to a free rank\n"); 769 } else { 770 DPRINTF(DRAM, "Single request, going to a busy rank\n"); 771 } 772 return found_packet; 773 } 774 775 if (memSchedPolicy == Enums::fcfs) { 776 // check if there is a packet going to a free rank 777 for (auto i = queue.begin(); i != queue.end() ; ++i) { 778 DRAMPacket* dram_pkt = *i; 779 if (ranks[dram_pkt->rank]->isAvailable()) { 780 queue.erase(i); 781 queue.push_front(dram_pkt); 782 found_packet = true; 783 break; 784 } 785 } 786 } else if (memSchedPolicy == Enums::frfcfs) { 787 found_packet = reorderQueue(queue, extra_col_delay); 788 } else 789 panic("No scheduling policy chosen\n"); 790 return found_packet; 791} 792 793bool 794DRAMCtrl::reorderQueue(std::deque<DRAMPacket*>& queue, Tick extra_col_delay) 795{ 796 // Only determine this if needed 797 uint64_t earliest_banks = 0; 798 bool hidden_bank_prep = false; 799 800 // search for seamless row hits first, if no seamless row hit is 801 // found then determine if there are other packets that can be issued 802 // without incurring additional bus delay due to bank timing 803 // Will select closed rows first to enable more open row possibilies 804 // in future selections 805 bool found_hidden_bank = false; 806 807 // remember if we found a row hit, not seamless, but bank prepped 808 // and ready 809 bool found_prepped_pkt = false; 810 811 // if we have no row hit, prepped or not, and no seamless packet, 812 // just go for the earliest possible 813 bool found_earliest_pkt = false; 814 815 auto selected_pkt_it = queue.end(); 816 817 // time we need to issue a column command to be seamless 818 const Tick min_col_at = std::max(busBusyUntil - tCL + extra_col_delay, 819 curTick()); 820 821 for (auto i = queue.begin(); i != queue.end() ; ++i) { 822 DRAMPacket* dram_pkt = *i; 823 const Bank& bank = dram_pkt->bankRef; 824 825 // check if rank is available, if not, jump to the next packet 826 if (dram_pkt->rankRef.isAvailable()) { 827 // check if it is a row hit 828 if (bank.openRow == dram_pkt->row) { 829 // no additional rank-to-rank or same bank-group 830 // delays, or we switched read/write and might as well 831 // go for the row hit 832 if (bank.colAllowedAt <= min_col_at) { 833 // FCFS within the hits, giving priority to 834 // commands that can issue seamlessly, without 835 // additional delay, such as same rank accesses 836 // and/or different bank-group accesses 837 DPRINTF(DRAM, "Seamless row buffer hit\n"); 838 selected_pkt_it = i; 839 // no need to look through the remaining queue entries 840 break; 841 } else if (!found_hidden_bank && !found_prepped_pkt) { 842 // if we did not find a packet to a closed row that can 843 // issue the bank commands without incurring delay, and 844 // did not yet find a packet to a prepped row, remember 845 // the current one 846 selected_pkt_it = i; 847 found_prepped_pkt = true; 848 DPRINTF(DRAM, "Prepped row buffer hit\n"); 849 } 850 } else if (!found_earliest_pkt) { 851 // if we have not initialised the bank status, do it 852 // now, and only once per scheduling decisions 853 if (earliest_banks == 0) { 854 // determine entries with earliest bank delay 855 pair<uint64_t, bool> bankStatus = 856 minBankPrep(queue, min_col_at); 857 earliest_banks = bankStatus.first; 858 hidden_bank_prep = bankStatus.second; 859 } 860 861 // bank is amongst first available banks 862 // minBankPrep will give priority to packets that can 863 // issue seamlessly 864 if (bits(earliest_banks, dram_pkt->bankId, dram_pkt->bankId)) { 865 found_earliest_pkt = true; 866 found_hidden_bank = hidden_bank_prep; 867 868 // give priority to packets that can issue 869 // bank commands 'behind the scenes' 870 // any additional delay if any will be due to 871 // col-to-col command requirements 872 if (hidden_bank_prep || !found_prepped_pkt) 873 selected_pkt_it = i; 874 } 875 } 876 } 877 } 878 879 if (selected_pkt_it != queue.end()) { 880 DRAMPacket* selected_pkt = *selected_pkt_it; 881 queue.erase(selected_pkt_it); 882 queue.push_front(selected_pkt); 883 return true; 884 } 885 886 return false; 887} 888 889void 890DRAMCtrl::accessAndRespond(PacketPtr pkt, Tick static_latency) 891{ 892 DPRINTF(DRAM, "Responding to Address %lld.. ",pkt->getAddr()); 893 894 bool needsResponse = pkt->needsResponse(); 895 // do the actual memory access which also turns the packet into a 896 // response 897 access(pkt); 898 899 // turn packet around to go back to requester if response expected 900 if (needsResponse) { 901 // access already turned the packet into a response 902 assert(pkt->isResponse()); 903 // response_time consumes the static latency and is charged also 904 // with headerDelay that takes into account the delay provided by 905 // the xbar and also the payloadDelay that takes into account the 906 // number of data beats. 907 Tick response_time = curTick() + static_latency + pkt->headerDelay + 908 pkt->payloadDelay; 909 // Here we reset the timing of the packet before sending it out. 910 pkt->headerDelay = pkt->payloadDelay = 0; 911 912 // queue the packet in the response queue to be sent out after 913 // the static latency has passed 914 port.schedTimingResp(pkt, response_time, true); 915 } else { 916 // @todo the packet is going to be deleted, and the DRAMPacket 917 // is still having a pointer to it 918 pendingDelete.reset(pkt); 919 } 920 921 DPRINTF(DRAM, "Done\n"); 922 923 return; 924} 925 926void 927DRAMCtrl::activateBank(Rank& rank_ref, Bank& bank_ref, 928 Tick act_tick, uint32_t row) 929{ 930 assert(rank_ref.actTicks.size() == activationLimit); 931 932 DPRINTF(DRAM, "Activate at tick %d\n", act_tick); 933 934 // update the open row 935 assert(bank_ref.openRow == Bank::NO_ROW); 936 bank_ref.openRow = row; 937 938 // start counting anew, this covers both the case when we 939 // auto-precharged, and when this access is forced to 940 // precharge 941 bank_ref.bytesAccessed = 0; 942 bank_ref.rowAccesses = 0; 943 944 ++rank_ref.numBanksActive; 945 assert(rank_ref.numBanksActive <= banksPerRank); 946 947 DPRINTF(DRAM, "Activate bank %d, rank %d at tick %lld, now got %d active\n", 948 bank_ref.bank, rank_ref.rank, act_tick, 949 ranks[rank_ref.rank]->numBanksActive); 950 951 rank_ref.cmdList.push_back(Command(MemCommand::ACT, bank_ref.bank, 952 act_tick)); 953 954 DPRINTF(DRAMPower, "%llu,ACT,%d,%d\n", divCeil(act_tick, tCK) - 955 timeStampOffset, bank_ref.bank, rank_ref.rank); 956 957 // The next access has to respect tRAS for this bank 958 bank_ref.preAllowedAt = act_tick + tRAS; 959 960 // Respect the row-to-column command delay 961 bank_ref.colAllowedAt = std::max(act_tick + tRCD, bank_ref.colAllowedAt); 962 963 // start by enforcing tRRD 964 for (int i = 0; i < banksPerRank; i++) { 965 // next activate to any bank in this rank must not happen 966 // before tRRD 967 if (bankGroupArch && (bank_ref.bankgr == rank_ref.banks[i].bankgr)) { 968 // bank group architecture requires longer delays between 969 // ACT commands within the same bank group. Use tRRD_L 970 // in this case 971 rank_ref.banks[i].actAllowedAt = std::max(act_tick + tRRD_L, 972 rank_ref.banks[i].actAllowedAt); 973 } else { 974 // use shorter tRRD value when either 975 // 1) bank group architecture is not supportted 976 // 2) bank is in a different bank group 977 rank_ref.banks[i].actAllowedAt = std::max(act_tick + tRRD, 978 rank_ref.banks[i].actAllowedAt); 979 } 980 } 981 982 // next, we deal with tXAW, if the activation limit is disabled 983 // then we directly schedule an activate power event 984 if (!rank_ref.actTicks.empty()) { 985 // sanity check 986 if (rank_ref.actTicks.back() && 987 (act_tick - rank_ref.actTicks.back()) < tXAW) { 988 panic("Got %d activates in window %d (%llu - %llu) which " 989 "is smaller than %llu\n", activationLimit, act_tick - 990 rank_ref.actTicks.back(), act_tick, 991 rank_ref.actTicks.back(), tXAW); 992 } 993 994 // shift the times used for the book keeping, the last element 995 // (highest index) is the oldest one and hence the lowest value 996 rank_ref.actTicks.pop_back(); 997 998 // record an new activation (in the future) 999 rank_ref.actTicks.push_front(act_tick); 1000 1001 // cannot activate more than X times in time window tXAW, push the 1002 // next one (the X + 1'st activate) to be tXAW away from the 1003 // oldest in our window of X 1004 if (rank_ref.actTicks.back() && 1005 (act_tick - rank_ref.actTicks.back()) < tXAW) { 1006 DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate " 1007 "no earlier than %llu\n", activationLimit, 1008 rank_ref.actTicks.back() + tXAW); 1009 for (int j = 0; j < banksPerRank; j++) 1010 // next activate must not happen before end of window 1011 rank_ref.banks[j].actAllowedAt = 1012 std::max(rank_ref.actTicks.back() + tXAW, 1013 rank_ref.banks[j].actAllowedAt); 1014 } 1015 } 1016 1017 // at the point when this activate takes place, make sure we 1018 // transition to the active power state 1019 if (!rank_ref.activateEvent.scheduled()) 1020 schedule(rank_ref.activateEvent, act_tick); 1021 else if (rank_ref.activateEvent.when() > act_tick) 1022 // move it sooner in time 1023 reschedule(rank_ref.activateEvent, act_tick); 1024} 1025 1026void 1027DRAMCtrl::prechargeBank(Rank& rank_ref, Bank& bank, Tick pre_at, bool trace) 1028{ 1029 // make sure the bank has an open row 1030 assert(bank.openRow != Bank::NO_ROW); 1031 1032 // sample the bytes per activate here since we are closing 1033 // the page 1034 bytesPerActivate.sample(bank.bytesAccessed); 1035 1036 bank.openRow = Bank::NO_ROW; 1037 1038 // no precharge allowed before this one 1039 bank.preAllowedAt = pre_at; 1040 1041 Tick pre_done_at = pre_at + tRP; 1042 1043 bank.actAllowedAt = std::max(bank.actAllowedAt, pre_done_at); 1044 1045 assert(rank_ref.numBanksActive != 0); 1046 --rank_ref.numBanksActive; 1047 1048 DPRINTF(DRAM, "Precharging bank %d, rank %d at tick %lld, now got " 1049 "%d active\n", bank.bank, rank_ref.rank, pre_at, 1050 rank_ref.numBanksActive); 1051 1052 if (trace) { 1053 1054 rank_ref.cmdList.push_back(Command(MemCommand::PRE, bank.bank, 1055 pre_at)); 1056 DPRINTF(DRAMPower, "%llu,PRE,%d,%d\n", divCeil(pre_at, tCK) - 1057 timeStampOffset, bank.bank, rank_ref.rank); 1058 } 1059 // if we look at the current number of active banks we might be 1060 // tempted to think the DRAM is now idle, however this can be 1061 // undone by an activate that is scheduled to happen before we 1062 // would have reached the idle state, so schedule an event and 1063 // rather check once we actually make it to the point in time when 1064 // the (last) precharge takes place 1065 if (!rank_ref.prechargeEvent.scheduled()) { 1066 schedule(rank_ref.prechargeEvent, pre_done_at); 1067 // New event, increment count 1068 ++rank_ref.outstandingEvents; 1069 } else if (rank_ref.prechargeEvent.when() < pre_done_at) { 1070 reschedule(rank_ref.prechargeEvent, pre_done_at); 1071 } 1072} 1073 1074void 1075DRAMCtrl::doDRAMAccess(DRAMPacket* dram_pkt) 1076{ 1077 DPRINTF(DRAM, "Timing access to addr %lld, rank/bank/row %d %d %d\n", 1078 dram_pkt->addr, dram_pkt->rank, dram_pkt->bank, dram_pkt->row); 1079 1080 // get the rank 1081 Rank& rank = dram_pkt->rankRef; 1082 1083 // are we in or transitioning to a low-power state and have not scheduled 1084 // a power-up event? 1085 // if so, wake up from power down to issue RD/WR burst 1086 if (rank.inLowPowerState) { 1087 assert(rank.pwrState != PWR_SREF); 1088 rank.scheduleWakeUpEvent(tXP); 1089 } 1090 1091 // get the bank 1092 Bank& bank = dram_pkt->bankRef; 1093 1094 // for the state we need to track if it is a row hit or not 1095 bool row_hit = true; 1096 1097 // respect any constraints on the command (e.g. tRCD or tCCD) 1098 Tick cmd_at = std::max(bank.colAllowedAt, curTick()); 1099 1100 // Determine the access latency and update the bank state 1101 if (bank.openRow == dram_pkt->row) { 1102 // nothing to do 1103 } else { 1104 row_hit = false; 1105 1106 // If there is a page open, precharge it. 1107 if (bank.openRow != Bank::NO_ROW) { 1108 prechargeBank(rank, bank, std::max(bank.preAllowedAt, curTick())); 1109 } 1110 1111 // next we need to account for the delay in activating the 1112 // page 1113 Tick act_tick = std::max(bank.actAllowedAt, curTick()); 1114 1115 // Record the activation and deal with all the global timing 1116 // constraints caused be a new activation (tRRD and tXAW) 1117 activateBank(rank, bank, act_tick, dram_pkt->row); 1118 1119 // issue the command as early as possible 1120 cmd_at = bank.colAllowedAt; 1121 } 1122 1123 // we need to wait until the bus is available before we can issue 1124 // the command 1125 cmd_at = std::max(cmd_at, busBusyUntil - tCL); 1126 1127 // update the packet ready time 1128 dram_pkt->readyTime = cmd_at + tCL + tBURST; 1129 1130 // only one burst can use the bus at any one point in time 1131 assert(dram_pkt->readyTime - busBusyUntil >= tBURST); 1132 1133 // update the time for the next read/write burst for each 1134 // bank (add a max with tCCD/tCCD_L here) 1135 Tick cmd_dly; 1136 for (int j = 0; j < ranksPerChannel; j++) { 1137 for (int i = 0; i < banksPerRank; i++) { 1138 // next burst to same bank group in this rank must not happen 1139 // before tCCD_L. Different bank group timing requirement is 1140 // tBURST; Add tCS for different ranks 1141 if (dram_pkt->rank == j) { 1142 if (bankGroupArch && 1143 (bank.bankgr == ranks[j]->banks[i].bankgr)) { 1144 // bank group architecture requires longer delays between 1145 // RD/WR burst commands to the same bank group. 1146 // Use tCCD_L in this case 1147 cmd_dly = tCCD_L; 1148 } else { 1149 // use tBURST (equivalent to tCCD_S), the shorter 1150 // cas-to-cas delay value, when either: 1151 // 1) bank group architecture is not supportted 1152 // 2) bank is in a different bank group 1153 cmd_dly = tBURST; 1154 } 1155 } else { 1156 // different rank is by default in a different bank group 1157 // use tBURST (equivalent to tCCD_S), which is the shorter 1158 // cas-to-cas delay in this case 1159 // Add tCS to account for rank-to-rank bus delay requirements 1160 cmd_dly = tBURST + tCS; 1161 } 1162 ranks[j]->banks[i].colAllowedAt = std::max(cmd_at + cmd_dly, 1163 ranks[j]->banks[i].colAllowedAt); 1164 } 1165 } 1166 1167 // Save rank of current access 1168 activeRank = dram_pkt->rank; 1169 1170 // If this is a write, we also need to respect the write recovery 1171 // time before a precharge, in the case of a read, respect the 1172 // read to precharge constraint 1173 bank.preAllowedAt = std::max(bank.preAllowedAt, 1174 dram_pkt->isRead ? cmd_at + tRTP : 1175 dram_pkt->readyTime + tWR); 1176 1177 // increment the bytes accessed and the accesses per row 1178 bank.bytesAccessed += burstSize; 1179 ++bank.rowAccesses; 1180 1181 // if we reached the max, then issue with an auto-precharge 1182 bool auto_precharge = pageMgmt == Enums::close || 1183 bank.rowAccesses == maxAccessesPerRow; 1184 1185 // if we did not hit the limit, we might still want to 1186 // auto-precharge 1187 if (!auto_precharge && 1188 (pageMgmt == Enums::open_adaptive || 1189 pageMgmt == Enums::close_adaptive)) { 1190 // a twist on the open and close page policies: 1191 // 1) open_adaptive page policy does not blindly keep the 1192 // page open, but close it if there are no row hits, and there 1193 // are bank conflicts in the queue 1194 // 2) close_adaptive page policy does not blindly close the 1195 // page, but closes it only if there are no row hits in the queue. 1196 // In this case, only force an auto precharge when there 1197 // are no same page hits in the queue 1198 bool got_more_hits = false; 1199 bool got_bank_conflict = false; 1200 1201 // either look at the read queue or write queue 1202 const deque<DRAMPacket*>& queue = dram_pkt->isRead ? readQueue : 1203 writeQueue; 1204 auto p = queue.begin(); 1205 // make sure we are not considering the packet that we are 1206 // currently dealing with (which is the head of the queue) 1207 ++p; 1208 1209 // keep on looking until we find a hit or reach the end of the queue 1210 // 1) if a hit is found, then both open and close adaptive policies keep 1211 // the page open 1212 // 2) if no hit is found, got_bank_conflict is set to true if a bank 1213 // conflict request is waiting in the queue 1214 while (!got_more_hits && p != queue.end()) { 1215 bool same_rank_bank = (dram_pkt->rank == (*p)->rank) && 1216 (dram_pkt->bank == (*p)->bank); 1217 bool same_row = dram_pkt->row == (*p)->row; 1218 got_more_hits |= same_rank_bank && same_row; 1219 got_bank_conflict |= same_rank_bank && !same_row; 1220 ++p; 1221 } 1222 1223 // auto pre-charge when either 1224 // 1) open_adaptive policy, we have not got any more hits, and 1225 // have a bank conflict 1226 // 2) close_adaptive policy and we have not got any more hits 1227 auto_precharge = !got_more_hits && 1228 (got_bank_conflict || pageMgmt == Enums::close_adaptive); 1229 } 1230 1231 // DRAMPower trace command to be written 1232 std::string mem_cmd = dram_pkt->isRead ? "RD" : "WR"; 1233 1234 // MemCommand required for DRAMPower library 1235 MemCommand::cmds command = (mem_cmd == "RD") ? MemCommand::RD : 1236 MemCommand::WR; 1237 1238 // Update bus state 1239 busBusyUntil = dram_pkt->readyTime; 1240 1241 DPRINTF(DRAM, "Access to %lld, ready at %lld bus busy until %lld.\n", 1242 dram_pkt->addr, dram_pkt->readyTime, busBusyUntil); 1243 1244 dram_pkt->rankRef.cmdList.push_back(Command(command, dram_pkt->bank, 1245 cmd_at)); 1246 1247 DPRINTF(DRAMPower, "%llu,%s,%d,%d\n", divCeil(cmd_at, tCK) - 1248 timeStampOffset, mem_cmd, dram_pkt->bank, dram_pkt->rank); 1249 1250 // if this access should use auto-precharge, then we are 1251 // closing the row after the read/write burst 1252 if (auto_precharge) { 1253 // if auto-precharge push a PRE command at the correct tick to the 1254 // list used by DRAMPower library to calculate power 1255 prechargeBank(rank, bank, std::max(curTick(), bank.preAllowedAt)); 1256 1257 DPRINTF(DRAM, "Auto-precharged bank: %d\n", dram_pkt->bankId); 1258 } 1259 1260 // Update the minimum timing between the requests, this is a 1261 // conservative estimate of when we have to schedule the next 1262 // request to not introduce any unecessary bubbles. In most cases 1263 // we will wake up sooner than we have to. 1264 nextReqTime = busBusyUntil - (tRP + tRCD + tCL); 1265 1266 // Update the stats and schedule the next request 1267 if (dram_pkt->isRead) { 1268 ++readsThisTime; 1269 if (row_hit) 1270 readRowHits++; 1271 bytesReadDRAM += burstSize; 1272 perBankRdBursts[dram_pkt->bankId]++; 1273 1274 // Update latency stats 1275 totMemAccLat += dram_pkt->readyTime - dram_pkt->entryTime; 1276 totBusLat += tBURST; 1277 totQLat += cmd_at - dram_pkt->entryTime; 1278 } else { 1279 ++writesThisTime; 1280 if (row_hit) 1281 writeRowHits++; 1282 bytesWritten += burstSize; 1283 perBankWrBursts[dram_pkt->bankId]++; 1284 } 1285} 1286 1287void 1288DRAMCtrl::processNextReqEvent() 1289{ 1290 int busyRanks = 0; 1291 for (auto r : ranks) { 1292 if (!r->isAvailable()) { 1293 if (r->pwrState != PWR_SREF) { 1294 // rank is busy refreshing 1295 DPRINTF(DRAMState, "Rank %d is not available\n", r->rank); 1296 busyRanks++; 1297 1298 // let the rank know that if it was waiting to drain, it 1299 // is now done and ready to proceed 1300 r->checkDrainDone(); 1301 } 1302 1303 // check if we were in self-refresh and haven't started 1304 // to transition out 1305 if ((r->pwrState == PWR_SREF) && r->inLowPowerState) { 1306 DPRINTF(DRAMState, "Rank %d is in self-refresh\n", r->rank); 1307 // if we have commands queued to this rank and we don't have 1308 // a minimum number of active commands enqueued, 1309 // exit self-refresh 1310 if (r->forceSelfRefreshExit()) { 1311 DPRINTF(DRAMState, "rank %d was in self refresh and" 1312 " should wake up\n", r->rank); 1313 //wake up from self-refresh 1314 r->scheduleWakeUpEvent(tXS); 1315 // things are brought back into action once a refresh is 1316 // performed after self-refresh 1317 // continue with selection for other ranks 1318 } 1319 } 1320 } 1321 } 1322 1323 if (busyRanks == ranksPerChannel) { 1324 // if all ranks are refreshing wait for them to finish 1325 // and stall this state machine without taking any further 1326 // action, and do not schedule a new nextReqEvent 1327 return; 1328 } 1329 1330 // pre-emptively set to false. Overwrite if in transitioning to 1331 // a new state 1332 bool switched_cmd_type = false; 1333 if (busState != busStateNext) { 1334 if (busState == READ) { 1335 DPRINTF(DRAM, "Switching to writes after %d reads with %d reads " 1336 "waiting\n", readsThisTime, readQueue.size()); 1337 1338 // sample and reset the read-related stats as we are now 1339 // transitioning to writes, and all reads are done 1340 rdPerTurnAround.sample(readsThisTime); 1341 readsThisTime = 0; 1342 1343 // now proceed to do the actual writes 1344 switched_cmd_type = true; 1345 } else { 1346 DPRINTF(DRAM, "Switching to reads after %d writes with %d writes " 1347 "waiting\n", writesThisTime, writeQueue.size()); 1348 1349 wrPerTurnAround.sample(writesThisTime); 1350 writesThisTime = 0; 1351 1352 switched_cmd_type = true; 1353 } 1354 // update busState to match next state until next transition 1355 busState = busStateNext; 1356 } 1357 1358 // when we get here it is either a read or a write 1359 if (busState == READ) { 1360 1361 // track if we should switch or not 1362 bool switch_to_writes = false; 1363 1364 if (readQueue.empty()) { 1365 // In the case there is no read request to go next, 1366 // trigger writes if we have passed the low threshold (or 1367 // if we are draining) 1368 if (!writeQueue.empty() && 1369 (drainState() == DrainState::Draining || 1370 writeQueue.size() > writeLowThreshold)) { 1371 1372 switch_to_writes = true; 1373 } else { 1374 // check if we are drained 1375 // not done draining until in PWR_IDLE state 1376 // ensuring all banks are closed and 1377 // have exited low power states 1378 if (drainState() == DrainState::Draining && 1379 respQueue.empty() && allRanksDrained()) { 1380 1381 DPRINTF(Drain, "DRAM controller done draining\n"); 1382 signalDrainDone(); 1383 } 1384 1385 // nothing to do, not even any point in scheduling an 1386 // event for the next request 1387 return; 1388 } 1389 } else { 1390 // bool to check if there is a read to a free rank 1391 bool found_read = false; 1392 1393 // Figure out which read request goes next, and move it to the 1394 // front of the read queue 1395 // If we are changing command type, incorporate the minimum 1396 // bus turnaround delay which will be tCS (different rank) case 1397 found_read = chooseNext(readQueue, 1398 switched_cmd_type ? tCS : 0); 1399 1400 // if no read to an available rank is found then return 1401 // at this point. There could be writes to the available ranks 1402 // which are above the required threshold. However, to 1403 // avoid adding more complexity to the code, return and wait 1404 // for a refresh event to kick things into action again. 1405 if (!found_read) 1406 return; 1407 1408 DRAMPacket* dram_pkt = readQueue.front(); 1409 assert(dram_pkt->rankRef.isAvailable()); 1410 1411 // here we get a bit creative and shift the bus busy time not 1412 // just the tWTR, but also a CAS latency to capture the fact 1413 // that we are allowed to prepare a new bank, but not issue a 1414 // read command until after tWTR, in essence we capture a 1415 // bubble on the data bus that is tWTR + tCL 1416 if (switched_cmd_type && dram_pkt->rank == activeRank) { 1417 busBusyUntil += tWTR + tCL; 1418 } 1419 1420 doDRAMAccess(dram_pkt); 1421 1422 // At this point we're done dealing with the request 1423 readQueue.pop_front(); 1424 1425 // Every respQueue which will generate an event, increment count 1426 ++dram_pkt->rankRef.outstandingEvents; 1427 1428 // sanity check 1429 assert(dram_pkt->size <= burstSize); 1430 assert(dram_pkt->readyTime >= curTick()); 1431 1432 // Insert into response queue. It will be sent back to the 1433 // requestor at its readyTime 1434 if (respQueue.empty()) { 1435 assert(!respondEvent.scheduled()); 1436 schedule(respondEvent, dram_pkt->readyTime); 1437 } else { 1438 assert(respQueue.back()->readyTime <= dram_pkt->readyTime); 1439 assert(respondEvent.scheduled()); 1440 } 1441 1442 respQueue.push_back(dram_pkt); 1443 1444 // we have so many writes that we have to transition 1445 if (writeQueue.size() > writeHighThreshold) { 1446 switch_to_writes = true; 1447 } 1448 } 1449 1450 // switching to writes, either because the read queue is empty 1451 // and the writes have passed the low threshold (or we are 1452 // draining), or because the writes hit the hight threshold 1453 if (switch_to_writes) { 1454 // transition to writing 1455 busStateNext = WRITE; 1456 } 1457 } else { 1458 // bool to check if write to free rank is found 1459 bool found_write = false; 1460 1461 // If we are changing command type, incorporate the minimum 1462 // bus turnaround delay 1463 found_write = chooseNext(writeQueue, 1464 switched_cmd_type ? std::min(tRTW, tCS) : 0); 1465 1466 // if no writes to an available rank are found then return. 1467 // There could be reads to the available ranks. However, to avoid 1468 // adding more complexity to the code, return at this point and wait 1469 // for a refresh event to kick things into action again. 1470 if (!found_write) 1471 return; 1472 1473 DRAMPacket* dram_pkt = writeQueue.front(); 1474 assert(dram_pkt->rankRef.isAvailable()); 1475 // sanity check 1476 assert(dram_pkt->size <= burstSize); 1477 1478 // add a bubble to the data bus, as defined by the 1479 // tRTW when access is to the same rank as previous burst 1480 // Different rank timing is handled with tCS, which is 1481 // applied to colAllowedAt 1482 if (switched_cmd_type && dram_pkt->rank == activeRank) { 1483 busBusyUntil += tRTW; 1484 } 1485 1486 doDRAMAccess(dram_pkt); 1487 1488 writeQueue.pop_front(); 1489 1490 // removed write from queue, decrement count 1491 --dram_pkt->rankRef.writeEntries; 1492 1493 // Schedule write done event to decrement event count 1494 // after the readyTime has been reached 1495 // Only schedule latest write event to minimize events 1496 // required; only need to ensure that final event scheduled covers 1497 // the time that writes are outstanding and bus is active 1498 // to holdoff power-down entry events 1499 if (!dram_pkt->rankRef.writeDoneEvent.scheduled()) { 1500 schedule(dram_pkt->rankRef.writeDoneEvent, dram_pkt->readyTime); 1501 // New event, increment count 1502 ++dram_pkt->rankRef.outstandingEvents; 1503 1504 } else if (dram_pkt->rankRef.writeDoneEvent.when() < 1505 dram_pkt-> readyTime) { 1506 reschedule(dram_pkt->rankRef.writeDoneEvent, dram_pkt->readyTime); 1507 } 1508 1509 isInWriteQueue.erase(burstAlign(dram_pkt->addr)); 1510 delete dram_pkt; 1511 1512 // If we emptied the write queue, or got sufficiently below the 1513 // threshold (using the minWritesPerSwitch as the hysteresis) and 1514 // are not draining, or we have reads waiting and have done enough 1515 // writes, then switch to reads. 1516 if (writeQueue.empty() || 1517 (writeQueue.size() + minWritesPerSwitch < writeLowThreshold && 1518 drainState() != DrainState::Draining) || 1519 (!readQueue.empty() && writesThisTime >= minWritesPerSwitch)) { 1520 // turn the bus back around for reads again 1521 busStateNext = READ; 1522 1523 // note that the we switch back to reads also in the idle 1524 // case, which eventually will check for any draining and 1525 // also pause any further scheduling if there is really 1526 // nothing to do 1527 } 1528 } 1529 // It is possible that a refresh to another rank kicks things back into 1530 // action before reaching this point. 1531 if (!nextReqEvent.scheduled()) 1532 schedule(nextReqEvent, std::max(nextReqTime, curTick())); 1533 1534 // If there is space available and we have writes waiting then let 1535 // them retry. This is done here to ensure that the retry does not 1536 // cause a nextReqEvent to be scheduled before we do so as part of 1537 // the next request processing 1538 if (retryWrReq && writeQueue.size() < writeBufferSize) { 1539 retryWrReq = false; 1540 port.sendRetryReq(); 1541 } 1542} 1543 1544pair<uint64_t, bool> 1545DRAMCtrl::minBankPrep(const deque<DRAMPacket*>& queue, 1546 Tick min_col_at) const 1547{ 1548 uint64_t bank_mask = 0; 1549 Tick min_act_at = MaxTick; 1550 1551 // latest Tick for which ACT can occur without incurring additoinal 1552 // delay on the data bus 1553 const Tick hidden_act_max = std::max(min_col_at - tRCD, curTick()); 1554 1555 // Flag condition when burst can issue back-to-back with previous burst 1556 bool found_seamless_bank = false; 1557 1558 // Flag condition when bank can be opened without incurring additional 1559 // delay on the data bus 1560 bool hidden_bank_prep = false; 1561 1562 // determine if we have queued transactions targetting the 1563 // bank in question 1564 vector<bool> got_waiting(ranksPerChannel * banksPerRank, false); 1565 for (const auto& p : queue) { 1566 if (p->rankRef.isAvailable()) 1567 got_waiting[p->bankId] = true; 1568 } 1569 1570 // Find command with optimal bank timing 1571 // Will prioritize commands that can issue seamlessly. 1572 for (int i = 0; i < ranksPerChannel; i++) { 1573 for (int j = 0; j < banksPerRank; j++) { 1574 uint16_t bank_id = i * banksPerRank + j; 1575 1576 // if we have waiting requests for the bank, and it is 1577 // amongst the first available, update the mask 1578 if (got_waiting[bank_id]) { 1579 // make sure this rank is not currently refreshing. 1580 assert(ranks[i]->isAvailable()); 1581 // simplistic approximation of when the bank can issue 1582 // an activate, ignoring any rank-to-rank switching 1583 // cost in this calculation 1584 Tick act_at = ranks[i]->banks[j].openRow == Bank::NO_ROW ? 1585 std::max(ranks[i]->banks[j].actAllowedAt, curTick()) : 1586 std::max(ranks[i]->banks[j].preAllowedAt, curTick()) + tRP; 1587 1588 // When is the earliest the R/W burst can issue? 1589 Tick col_at = std::max(ranks[i]->banks[j].colAllowedAt, 1590 act_at + tRCD); 1591 1592 // bank can issue burst back-to-back (seamlessly) with 1593 // previous burst 1594 bool new_seamless_bank = col_at <= min_col_at; 1595 1596 // if we found a new seamless bank or we have no 1597 // seamless banks, and got a bank with an earlier 1598 // activate time, it should be added to the bit mask 1599 if (new_seamless_bank || 1600 (!found_seamless_bank && act_at <= min_act_at)) { 1601 // if we did not have a seamless bank before, and 1602 // we do now, reset the bank mask, also reset it 1603 // if we have not yet found a seamless bank and 1604 // the activate time is smaller than what we have 1605 // seen so far 1606 if (!found_seamless_bank && 1607 (new_seamless_bank || act_at < min_act_at)) { 1608 bank_mask = 0; 1609 } 1610 1611 found_seamless_bank |= new_seamless_bank; 1612 1613 // ACT can occur 'behind the scenes' 1614 hidden_bank_prep = act_at <= hidden_act_max; 1615 1616 // set the bit corresponding to the available bank 1617 replaceBits(bank_mask, bank_id, bank_id, 1); 1618 min_act_at = act_at; 1619 } 1620 } 1621 } 1622 } 1623 1624 return make_pair(bank_mask, hidden_bank_prep); 1625} 1626 1627DRAMCtrl::Rank::Rank(DRAMCtrl& _memory, const DRAMCtrlParams* _p) 1628 : EventManager(&_memory), memory(_memory), 1629 pwrStateTrans(PWR_IDLE), pwrStatePostRefresh(PWR_IDLE), 1630 pwrStateTick(0), refreshDueAt(0), pwrState(PWR_IDLE), 1631 refreshState(REF_IDLE), inLowPowerState(false), rank(0), 1632 readEntries(0), writeEntries(0), outstandingEvents(0), 1633 wakeUpAllowedAt(0), power(_p, false), numBanksActive(0), 1634 writeDoneEvent(*this), activateEvent(*this), prechargeEvent(*this), 1635 refreshEvent(*this), powerEvent(*this), wakeUpEvent(*this) 1636{ } 1637 1638void 1639DRAMCtrl::Rank::startup(Tick ref_tick) 1640{ 1641 assert(ref_tick > curTick()); 1642 1643 pwrStateTick = curTick(); 1644 1645 // kick off the refresh, and give ourselves enough time to 1646 // precharge 1647 schedule(refreshEvent, ref_tick); 1648} 1649 1650void 1651DRAMCtrl::Rank::suspend() 1652{ 1653 deschedule(refreshEvent); 1654 1655 // Update the stats 1656 updatePowerStats(); 1657 1658 // don't automatically transition back to LP state after next REF 1659 pwrStatePostRefresh = PWR_IDLE; 1660} 1661 1662bool 1663DRAMCtrl::Rank::lowPowerEntryReady() const 1664{ 1665 bool no_queued_cmds = ((memory.busStateNext == READ) && (readEntries == 0)) 1666 || ((memory.busStateNext == WRITE) && 1667 (writeEntries == 0)); 1668 1669 if (refreshState == REF_RUN) { 1670 // have not decremented outstandingEvents for refresh command 1671 // still check if there are no commands queued to force PD 1672 // entry after refresh completes 1673 return no_queued_cmds; 1674 } else { 1675 // ensure no commands in Q and no commands scheduled 1676 return (no_queued_cmds && (outstandingEvents == 0)); 1677 } 1678} 1679 1680void 1681DRAMCtrl::Rank::checkDrainDone() 1682{ 1683 // if this rank was waiting to drain it is now able to proceed to 1684 // precharge 1685 if (refreshState == REF_DRAIN) { 1686 DPRINTF(DRAM, "Refresh drain done, now precharging\n"); 1687 1688 refreshState = REF_PD_EXIT; 1689 1690 // hand control back to the refresh event loop 1691 schedule(refreshEvent, curTick()); 1692 } 1693} 1694 1695void 1696DRAMCtrl::Rank::flushCmdList() 1697{ 1698 // at the moment sort the list of commands and update the counters 1699 // for DRAMPower libray when doing a refresh 1700 sort(cmdList.begin(), cmdList.end(), DRAMCtrl::sortTime); 1701 1702 auto next_iter = cmdList.begin(); 1703 // push to commands to DRAMPower 1704 for ( ; next_iter != cmdList.end() ; ++next_iter) { 1705 Command cmd = *next_iter; 1706 if (cmd.timeStamp <= curTick()) { 1707 // Move all commands at or before curTick to DRAMPower 1708 power.powerlib.doCommand(cmd.type, cmd.bank, 1709 divCeil(cmd.timeStamp, memory.tCK) - 1710 memory.timeStampOffset); 1711 } else { 1712 // done - found all commands at or before curTick() 1713 // next_iter references the 1st command after curTick 1714 break; 1715 } 1716 } 1717 // reset cmdList to only contain commands after curTick 1718 // if there are no commands after curTick, updated cmdList will be empty 1719 // in this case, next_iter is cmdList.end() 1720 cmdList.assign(next_iter, cmdList.end()); 1721} 1722 1723void 1724DRAMCtrl::Rank::processActivateEvent() 1725{ 1726 // we should transition to the active state as soon as any bank is active 1727 if (pwrState != PWR_ACT) 1728 // note that at this point numBanksActive could be back at 1729 // zero again due to a precharge scheduled in the future 1730 schedulePowerEvent(PWR_ACT, curTick()); 1731} 1732 1733void 1734DRAMCtrl::Rank::processPrechargeEvent() 1735{ 1736 // counter should at least indicate one outstanding request 1737 // for this precharge 1738 assert(outstandingEvents > 0); 1739 // precharge complete, decrement count 1740 --outstandingEvents; 1741 1742 // if we reached zero, then special conditions apply as we track 1743 // if all banks are precharged for the power models 1744 if (numBanksActive == 0) { 1745 // no reads to this rank in the Q and no pending 1746 // RD/WR or refresh commands 1747 if (lowPowerEntryReady()) { 1748 // should still be in ACT state since bank still open 1749 assert(pwrState == PWR_ACT); 1750 1751 // All banks closed - switch to precharge power down state. 1752 DPRINTF(DRAMState, "Rank %d sleep at tick %d\n", 1753 rank, curTick()); 1754 powerDownSleep(PWR_PRE_PDN, curTick()); 1755 } else { 1756 // we should transition to the idle state when the last bank 1757 // is precharged 1758 schedulePowerEvent(PWR_IDLE, curTick()); 1759 } 1760 } 1761} 1762 1763void 1764DRAMCtrl::Rank::processWriteDoneEvent() 1765{ 1766 // counter should at least indicate one outstanding request 1767 // for this write 1768 assert(outstandingEvents > 0); 1769 // Write transfer on bus has completed 1770 // decrement per rank counter 1771 --outstandingEvents; 1772} 1773 1774void 1775DRAMCtrl::Rank::processRefreshEvent() 1776{ 1777 // when first preparing the refresh, remember when it was due 1778 if ((refreshState == REF_IDLE) || (refreshState == REF_SREF_EXIT)) { 1779 // remember when the refresh is due 1780 refreshDueAt = curTick(); 1781 1782 // proceed to drain 1783 refreshState = REF_DRAIN; 1784 1785 // make nonzero while refresh is pending to ensure 1786 // power down and self-refresh are not entered 1787 ++outstandingEvents; 1788 1789 DPRINTF(DRAM, "Refresh due\n"); 1790 } 1791 1792 // let any scheduled read or write to the same rank go ahead, 1793 // after which it will 1794 // hand control back to this event loop 1795 if (refreshState == REF_DRAIN) { 1796 // if a request is at the moment being handled and this request is 1797 // accessing the current rank then wait for it to finish 1798 if ((rank == memory.activeRank) 1799 && (memory.nextReqEvent.scheduled())) { 1800 // hand control over to the request loop until it is 1801 // evaluated next 1802 DPRINTF(DRAM, "Refresh awaiting draining\n"); 1803 1804 return; 1805 } else { 1806 refreshState = REF_PD_EXIT; 1807 } 1808 } 1809 1810 // at this point, ensure that rank is not in a power-down state 1811 if (refreshState == REF_PD_EXIT) { 1812 // if rank was sleeping and we have't started exit process, 1813 // wake-up for refresh 1814 if (inLowPowerState) { 1815 DPRINTF(DRAM, "Wake Up for refresh\n"); 1816 // save state and return after refresh completes 1817 scheduleWakeUpEvent(memory.tXP); 1818 return; 1819 } else { 1820 refreshState = REF_PRE; 1821 } 1822 } 1823 1824 // at this point, ensure that all banks are precharged 1825 if (refreshState == REF_PRE) { 1826 // precharge any active bank 1827 if (numBanksActive != 0) { 1828 // at the moment, we use a precharge all even if there is 1829 // only a single bank open 1830 DPRINTF(DRAM, "Precharging all\n"); 1831 1832 // first determine when we can precharge 1833 Tick pre_at = curTick(); 1834 1835 for (auto &b : banks) { 1836 // respect both causality and any existing bank 1837 // constraints, some banks could already have a 1838 // (auto) precharge scheduled 1839 pre_at = std::max(b.preAllowedAt, pre_at); 1840 } 1841 1842 // make sure all banks per rank are precharged, and for those that 1843 // already are, update their availability 1844 Tick act_allowed_at = pre_at + memory.tRP; 1845 1846 for (auto &b : banks) { 1847 if (b.openRow != Bank::NO_ROW) { 1848 memory.prechargeBank(*this, b, pre_at, false); 1849 } else { 1850 b.actAllowedAt = std::max(b.actAllowedAt, act_allowed_at); 1851 b.preAllowedAt = std::max(b.preAllowedAt, pre_at); 1852 } 1853 } 1854 1855 // precharge all banks in rank 1856 cmdList.push_back(Command(MemCommand::PREA, 0, pre_at)); 1857 1858 DPRINTF(DRAMPower, "%llu,PREA,0,%d\n", 1859 divCeil(pre_at, memory.tCK) - 1860 memory.timeStampOffset, rank); 1861 } else if ((pwrState == PWR_IDLE) && (outstandingEvents == 1)) { 1862 // Banks are closed, have transitioned to IDLE state, and 1863 // no outstanding ACT,RD/WR,Auto-PRE sequence scheduled 1864 DPRINTF(DRAM, "All banks already precharged, starting refresh\n"); 1865 1866 // go ahead and kick the power state machine into gear since 1867 // we are already idle 1868 schedulePowerEvent(PWR_REF, curTick()); 1869 } else { 1870 // banks state is closed but haven't transitioned pwrState to IDLE 1871 // or have outstanding ACT,RD/WR,Auto-PRE sequence scheduled 1872 // should have outstanding precharge event in this case 1873 assert(prechargeEvent.scheduled()); 1874 // will start refresh when pwrState transitions to IDLE 1875 } 1876 1877 assert(numBanksActive == 0); 1878 1879 // wait for all banks to be precharged, at which point the 1880 // power state machine will transition to the idle state, and 1881 // automatically move to a refresh, at that point it will also 1882 // call this method to get the refresh event loop going again 1883 return; 1884 } 1885 1886 // last but not least we perform the actual refresh 1887 if (refreshState == REF_START) { 1888 // should never get here with any banks active 1889 assert(numBanksActive == 0); 1890 assert(pwrState == PWR_REF); 1891 1892 Tick ref_done_at = curTick() + memory.tRFC; 1893 1894 for (auto &b : banks) { 1895 b.actAllowedAt = ref_done_at; 1896 } 1897 1898 // at the moment this affects all ranks 1899 cmdList.push_back(Command(MemCommand::REF, 0, curTick())); 1900 1901 // Update the stats 1902 updatePowerStats(); 1903 1904 DPRINTF(DRAMPower, "%llu,REF,0,%d\n", divCeil(curTick(), memory.tCK) - 1905 memory.timeStampOffset, rank); 1906 1907 // Update for next refresh 1908 refreshDueAt += memory.tREFI; 1909 1910 // make sure we did not wait so long that we cannot make up 1911 // for it 1912 if (refreshDueAt < ref_done_at) { 1913 fatal("Refresh was delayed so long we cannot catch up\n"); 1914 } 1915 1916 // Run the refresh and schedule event to transition power states 1917 // when refresh completes 1918 refreshState = REF_RUN; 1919 schedule(refreshEvent, ref_done_at); 1920 return; 1921 } 1922 1923 if (refreshState == REF_RUN) { 1924 // should never get here with any banks active 1925 assert(numBanksActive == 0); 1926 assert(pwrState == PWR_REF); 1927 1928 assert(!powerEvent.scheduled()); 1929 1930 if ((memory.drainState() == DrainState::Draining) || 1931 (memory.drainState() == DrainState::Drained)) { 1932 // if draining, do not re-enter low-power mode. 1933 // simply go to IDLE and wait 1934 schedulePowerEvent(PWR_IDLE, curTick()); 1935 } else { 1936 // At the moment, we sleep when the refresh ends and wait to be 1937 // woken up again if previously in a low-power state. 1938 if (pwrStatePostRefresh != PWR_IDLE) { 1939 // power State should be power Refresh 1940 assert(pwrState == PWR_REF); 1941 DPRINTF(DRAMState, "Rank %d sleeping after refresh and was in " 1942 "power state %d before refreshing\n", rank, 1943 pwrStatePostRefresh); 1944 powerDownSleep(pwrState, curTick()); 1945 1946 // Force PRE power-down if there are no outstanding commands 1947 // in Q after refresh. 1948 } else if (lowPowerEntryReady()) { 1949 DPRINTF(DRAMState, "Rank %d sleeping after refresh but was NOT" 1950 " in a low power state before refreshing\n", rank); 1951 powerDownSleep(PWR_PRE_PDN, curTick()); 1952 1953 } else { 1954 // move to the idle power state once the refresh is done, this 1955 // will also move the refresh state machine to the refresh 1956 // idle state 1957 schedulePowerEvent(PWR_IDLE, curTick()); 1958 } 1959 } 1960 1961 // if transitioning to self refresh do not schedule a new refresh; 1962 // when waking from self refresh, a refresh is scheduled again. 1963 if (pwrStateTrans != PWR_SREF) { 1964 // compensate for the delay in actually performing the refresh 1965 // when scheduling the next one 1966 schedule(refreshEvent, refreshDueAt - memory.tRP); 1967 1968 DPRINTF(DRAMState, "Refresh done at %llu and next refresh" 1969 " at %llu\n", curTick(), refreshDueAt); 1970 } 1971 } 1972} 1973 1974void 1975DRAMCtrl::Rank::schedulePowerEvent(PowerState pwr_state, Tick tick) 1976{ 1977 // respect causality 1978 assert(tick >= curTick()); 1979 1980 if (!powerEvent.scheduled()) { 1981 DPRINTF(DRAMState, "Scheduling power event at %llu to state %d\n", 1982 tick, pwr_state); 1983 1984 // insert the new transition 1985 pwrStateTrans = pwr_state; 1986 1987 schedule(powerEvent, tick); 1988 } else { 1989 panic("Scheduled power event at %llu to state %d, " 1990 "with scheduled event at %llu to %d\n", tick, pwr_state, 1991 powerEvent.when(), pwrStateTrans); 1992 } 1993} 1994 1995void 1996DRAMCtrl::Rank::powerDownSleep(PowerState pwr_state, Tick tick) 1997{ 1998 // if low power state is active low, schedule to active low power state. 1999 // in reality tCKE is needed to enter active low power. This is neglected 2000 // here and could be added in the future. 2001 if (pwr_state == PWR_ACT_PDN) { 2002 schedulePowerEvent(pwr_state, tick); 2003 // push command to DRAMPower 2004 cmdList.push_back(Command(MemCommand::PDN_F_ACT, 0, tick)); 2005 DPRINTF(DRAMPower, "%llu,PDN_F_ACT,0,%d\n", divCeil(tick, 2006 memory.tCK) - memory.timeStampOffset, rank); 2007 } else if (pwr_state == PWR_PRE_PDN) { 2008 // if low power state is precharge low, schedule to precharge low 2009 // power state. In reality tCKE is needed to enter active low power. 2010 // This is neglected here. 2011 schedulePowerEvent(pwr_state, tick); 2012 //push Command to DRAMPower 2013 cmdList.push_back(Command(MemCommand::PDN_F_PRE, 0, tick)); 2014 DPRINTF(DRAMPower, "%llu,PDN_F_PRE,0,%d\n", divCeil(tick, 2015 memory.tCK) - memory.timeStampOffset, rank); 2016 } else if (pwr_state == PWR_REF) { 2017 // if a refresh just occured 2018 // transition to PRE_PDN now that all banks are closed 2019 // do not transition to SREF if commands are in Q; stay in PRE_PDN 2020 if (pwrStatePostRefresh == PWR_ACT_PDN || !lowPowerEntryReady()) { 2021 // prechage power down requires tCKE to enter. For simplicity 2022 // this is not considered. 2023 schedulePowerEvent(PWR_PRE_PDN, tick); 2024 //push Command to DRAMPower 2025 cmdList.push_back(Command(MemCommand::PDN_F_PRE, 0, tick)); 2026 DPRINTF(DRAMPower, "%llu,PDN_F_PRE,0,%d\n", divCeil(tick, 2027 memory.tCK) - memory.timeStampOffset, rank); 2028 } else { 2029 // last low power State was power precharge 2030 assert(pwrStatePostRefresh == PWR_PRE_PDN); 2031 // self refresh requires time tCKESR to enter. For simplicity, 2032 // this is not considered. 2033 schedulePowerEvent(PWR_SREF, tick); 2034 // push Command to DRAMPower 2035 cmdList.push_back(Command(MemCommand::SREN, 0, tick)); 2036 DPRINTF(DRAMPower, "%llu,SREN,0,%d\n", divCeil(tick, 2037 memory.tCK) - memory.timeStampOffset, rank); 2038 } 2039 } 2040 // Ensure that we don't power-down and back up in same tick 2041 // Once we commit to PD entry, do it and wait for at least 1tCK 2042 // This could be replaced with tCKE if/when that is added to the model 2043 wakeUpAllowedAt = tick + memory.tCK; 2044 2045 // Transitioning to a low power state, set flag 2046 inLowPowerState = true; 2047} 2048 2049void 2050DRAMCtrl::Rank::scheduleWakeUpEvent(Tick exit_delay) 2051{ 2052 Tick wake_up_tick = std::max(curTick(), wakeUpAllowedAt); 2053 2054 DPRINTF(DRAMState, "Scheduling wake-up for rank %d at tick %d\n", 2055 rank, wake_up_tick); 2056 2057 // if waking for refresh, hold previous state 2058 // else reset state back to IDLE 2059 if (refreshState == REF_PD_EXIT) { 2060 pwrStatePostRefresh = pwrState; 2061 } else { 2062 // don't automatically transition back to LP state after next REF 2063 pwrStatePostRefresh = PWR_IDLE; 2064 } 2065 2066 // schedule wake-up with event to ensure entry has completed before 2067 // we try to wake-up 2068 schedule(wakeUpEvent, wake_up_tick); 2069 2070 for (auto &b : banks) { 2071 // respect both causality and any existing bank 2072 // constraints, some banks could already have a 2073 // (auto) precharge scheduled 2074 b.colAllowedAt = std::max(wake_up_tick + exit_delay, b.colAllowedAt); 2075 b.preAllowedAt = std::max(wake_up_tick + exit_delay, b.preAllowedAt); 2076 b.actAllowedAt = std::max(wake_up_tick + exit_delay, b.actAllowedAt); 2077 } 2078 // Transitioning out of low power state, clear flag 2079 inLowPowerState = false; 2080 2081 // push to DRAMPower 2082 // use pwrStateTrans for cases where we have a power event scheduled 2083 // to enter low power that has not yet been processed 2084 if (pwrStateTrans == PWR_ACT_PDN) { 2085 cmdList.push_back(Command(MemCommand::PUP_ACT, 0, wake_up_tick)); 2086 DPRINTF(DRAMPower, "%llu,PUP_ACT,0,%d\n", divCeil(wake_up_tick, 2087 memory.tCK) - memory.timeStampOffset, rank); 2088 2089 } else if (pwrStateTrans == PWR_PRE_PDN) { 2090 cmdList.push_back(Command(MemCommand::PUP_PRE, 0, wake_up_tick)); 2091 DPRINTF(DRAMPower, "%llu,PUP_PRE,0,%d\n", divCeil(wake_up_tick, 2092 memory.tCK) - memory.timeStampOffset, rank); 2093 } else if (pwrStateTrans == PWR_SREF) { 2094 cmdList.push_back(Command(MemCommand::SREX, 0, wake_up_tick)); 2095 DPRINTF(DRAMPower, "%llu,SREX,0,%d\n", divCeil(wake_up_tick, 2096 memory.tCK) - memory.timeStampOffset, rank); 2097 } 2098} 2099 2100void 2101DRAMCtrl::Rank::processWakeUpEvent() 2102{ 2103 // Should be in a power-down or self-refresh state 2104 assert((pwrState == PWR_ACT_PDN) || (pwrState == PWR_PRE_PDN) || 2105 (pwrState == PWR_SREF)); 2106 2107 // Check current state to determine transition state 2108 if (pwrState == PWR_ACT_PDN) { 2109 // banks still open, transition to PWR_ACT 2110 schedulePowerEvent(PWR_ACT, curTick()); 2111 } else { 2112 // transitioning from a precharge power-down or self-refresh state 2113 // banks are closed - transition to PWR_IDLE 2114 schedulePowerEvent(PWR_IDLE, curTick()); 2115 } 2116} 2117 2118void 2119DRAMCtrl::Rank::processPowerEvent() 2120{ 2121 assert(curTick() >= pwrStateTick); 2122 // remember where we were, and for how long 2123 Tick duration = curTick() - pwrStateTick; 2124 PowerState prev_state = pwrState; 2125 2126 // update the accounting 2127 pwrStateTime[prev_state] += duration; 2128 2129 // track to total idle time 2130 if ((prev_state == PWR_PRE_PDN) || (prev_state == PWR_ACT_PDN) || 2131 (prev_state == PWR_SREF)) { 2132 totalIdleTime += duration; 2133 } 2134 2135 pwrState = pwrStateTrans; 2136 pwrStateTick = curTick(); 2137 2138 // if rank was refreshing, make sure to start scheduling requests again 2139 if (prev_state == PWR_REF) { 2140 // bus IDLED prior to REF 2141 // counter should be one for refresh command only 2142 assert(outstandingEvents == 1); 2143 // REF complete, decrement count 2144 --outstandingEvents; 2145 2146 DPRINTF(DRAMState, "Was refreshing for %llu ticks\n", duration); 2147 // if sleeping after refresh 2148 if (pwrState != PWR_IDLE) { 2149 assert((pwrState == PWR_PRE_PDN) || (pwrState == PWR_SREF)); 2150 DPRINTF(DRAMState, "Switching to power down state after refreshing" 2151 " rank %d at %llu tick\n", rank, curTick()); 2152 } 2153 if (pwrState != PWR_SREF) { 2154 // rank is not available in SREF 2155 // don't transition to IDLE in this case 2156 refreshState = REF_IDLE; 2157 } 2158 // a request event could be already scheduled by the state 2159 // machine of the other rank 2160 if (!memory.nextReqEvent.scheduled()) { 2161 DPRINTF(DRAM, "Scheduling next request after refreshing rank %d\n", 2162 rank); 2163 schedule(memory.nextReqEvent, curTick()); 2164 } 2165 } else if (pwrState == PWR_ACT) { 2166 if (refreshState == REF_PD_EXIT) { 2167 // kick the refresh event loop into action again 2168 assert(prev_state == PWR_ACT_PDN); 2169 2170 // go back to REF event and close banks 2171 refreshState = REF_PRE; 2172 schedule(refreshEvent, curTick()); 2173 } 2174 } else if (pwrState == PWR_IDLE) { 2175 DPRINTF(DRAMState, "All banks precharged\n"); 2176 if (prev_state == PWR_SREF) { 2177 // set refresh state to REF_SREF_EXIT, ensuring isAvailable 2178 // continues to return false during tXS after SREF exit 2179 // Schedule a refresh which kicks things back into action 2180 // when it finishes 2181 refreshState = REF_SREF_EXIT; 2182 schedule(refreshEvent, curTick() + memory.tXS); 2183 } else { 2184 // if we have a pending refresh, and are now moving to 2185 // the idle state, directly transition to a refresh 2186 if ((refreshState == REF_PRE) || (refreshState == REF_PD_EXIT)) { 2187 // ensure refresh is restarted only after final PRE command. 2188 // do not restart refresh if controller is in an intermediate 2189 // state, after PRE_PDN exit, when banks are IDLE but an 2190 // ACT is scheduled. 2191 if (!activateEvent.scheduled()) { 2192 // there should be nothing waiting at this point 2193 assert(!powerEvent.scheduled()); 2194 // update the state in zero time and proceed below 2195 pwrState = PWR_REF; 2196 } else { 2197 // must have PRE scheduled to transition back to IDLE 2198 // and re-kick off refresh 2199 assert(prechargeEvent.scheduled()); 2200 } 2201 } 2202 } 2203 } 2204 2205 // we transition to the refresh state, let the refresh state 2206 // machine know of this state update and let it deal with the 2207 // scheduling of the next power state transition as well as the 2208 // following refresh 2209 if (pwrState == PWR_REF) { 2210 assert(refreshState == REF_PRE || refreshState == REF_PD_EXIT); 2211 DPRINTF(DRAMState, "Refreshing\n"); 2212 2213 // kick the refresh event loop into action again, and that 2214 // in turn will schedule a transition to the idle power 2215 // state once the refresh is done 2216 if (refreshState == REF_PD_EXIT) { 2217 // Wait for PD exit timing to complete before issuing REF 2218 schedule(refreshEvent, curTick() + memory.tXP); 2219 } else { 2220 schedule(refreshEvent, curTick()); 2221 } 2222 // Banks transitioned to IDLE, start REF 2223 refreshState = REF_START; 2224 } 2225} 2226 2227void 2228DRAMCtrl::Rank::updatePowerStats() 2229{ 2230 // All commands up to refresh have completed 2231 // flush cmdList to DRAMPower 2232 flushCmdList(); 2233 2234 // update the counters for DRAMPower, passing false to 2235 // indicate that this is not the last command in the 2236 // list. DRAMPower requires this information for the 2237 // correct calculation of the background energy at the end 2238 // of the simulation. Ideally we would want to call this 2239 // function with true once at the end of the 2240 // simulation. However, the discarded energy is extremly 2241 // small and does not effect the final results. 2242 power.powerlib.updateCounters(false); 2243 2244 // call the energy function 2245 power.powerlib.calcEnergy(); 2246 2247 // Get the energy and power from DRAMPower 2248 Data::MemoryPowerModel::Energy energy = 2249 power.powerlib.getEnergy(); 2250 Data::MemoryPowerModel::Power rank_power = 2251 power.powerlib.getPower(); 2252 2253 actEnergy = energy.act_energy * memory.devicesPerRank; 2254 preEnergy = energy.pre_energy * memory.devicesPerRank; 2255 readEnergy = energy.read_energy * memory.devicesPerRank; 2256 writeEnergy = energy.write_energy * memory.devicesPerRank; 2257 refreshEnergy = energy.ref_energy * memory.devicesPerRank; 2258 actBackEnergy = energy.act_stdby_energy * memory.devicesPerRank; 2259 preBackEnergy = energy.pre_stdby_energy * memory.devicesPerRank; 2260 actPowerDownEnergy = energy.f_act_pd_energy * memory.devicesPerRank; 2261 prePowerDownEnergy = energy.f_pre_pd_energy * memory.devicesPerRank; 2262 selfRefreshEnergy = energy.sref_energy * memory.devicesPerRank; 2263 totalEnergy = energy.total_energy * memory.devicesPerRank; 2264 averagePower = rank_power.average_power * memory.devicesPerRank; 2265} 2266 2267void 2268DRAMCtrl::Rank::computeStats() 2269{ 2270 DPRINTF(DRAM,"Computing final stats\n"); 2271 2272 // Force DRAM power to update counters based on time spent in 2273 // current state up to curTick() 2274 cmdList.push_back(Command(MemCommand::NOP, 0, curTick())); 2275 2276 // Update the stats 2277 updatePowerStats(); 2278 2279 // final update of power state times 2280 pwrStateTime[pwrState] += (curTick() - pwrStateTick); 2281 pwrStateTick = curTick(); 2282 2283} 2284 2285void 2286DRAMCtrl::Rank::regStats() 2287{ 2288 using namespace Stats; 2289 2290 pwrStateTime 2291 .init(6) 2292 .name(name() + ".memoryStateTime") 2293 .desc("Time in different power states"); 2294 pwrStateTime.subname(0, "IDLE"); 2295 pwrStateTime.subname(1, "REF"); 2296 pwrStateTime.subname(2, "SREF"); 2297 pwrStateTime.subname(3, "PRE_PDN"); 2298 pwrStateTime.subname(4, "ACT"); 2299 pwrStateTime.subname(5, "ACT_PDN"); 2300 2301 actEnergy 2302 .name(name() + ".actEnergy") 2303 .desc("Energy for activate commands per rank (pJ)"); 2304 2305 preEnergy 2306 .name(name() + ".preEnergy") 2307 .desc("Energy for precharge commands per rank (pJ)"); 2308 2309 readEnergy 2310 .name(name() + ".readEnergy") 2311 .desc("Energy for read commands per rank (pJ)"); 2312 2313 writeEnergy 2314 .name(name() + ".writeEnergy") 2315 .desc("Energy for write commands per rank (pJ)"); 2316 2317 refreshEnergy 2318 .name(name() + ".refreshEnergy") 2319 .desc("Energy for refresh commands per rank (pJ)"); 2320 2321 actBackEnergy 2322 .name(name() + ".actBackEnergy") 2323 .desc("Energy for active background per rank (pJ)"); 2324 2325 preBackEnergy 2326 .name(name() + ".preBackEnergy") 2327 .desc("Energy for precharge background per rank (pJ)"); 2328 2329 actPowerDownEnergy 2330 .name(name() + ".actPowerDownEnergy") 2331 .desc("Energy for active power-down per rank (pJ)"); 2332 2333 prePowerDownEnergy 2334 .name(name() + ".prePowerDownEnergy") 2335 .desc("Energy for precharge power-down per rank (pJ)"); 2336 2337 selfRefreshEnergy 2338 .name(name() + ".selfRefreshEnergy") 2339 .desc("Energy for self refresh per rank (pJ)"); 2340 2341 totalEnergy 2342 .name(name() + ".totalEnergy") 2343 .desc("Total energy per rank (pJ)"); 2344 2345 averagePower 2346 .name(name() + ".averagePower") 2347 .desc("Core power per rank (mW)"); 2348 2349 totalIdleTime 2350 .name(name() + ".totalIdleTime") 2351 .desc("Total Idle time Per DRAM Rank"); 2352 2353 registerDumpCallback(new RankDumpCallback(this)); 2354} 2355void 2356DRAMCtrl::regStats() 2357{ 2358 using namespace Stats; 2359 2360 AbstractMemory::regStats(); 2361 2362 for (auto r : ranks) { 2363 r->regStats(); 2364 } 2365 2366 readReqs 2367 .name(name() + ".readReqs") 2368 .desc("Number of read requests accepted"); 2369 2370 writeReqs 2371 .name(name() + ".writeReqs") 2372 .desc("Number of write requests accepted"); 2373 2374 readBursts 2375 .name(name() + ".readBursts") 2376 .desc("Number of DRAM read bursts, " 2377 "including those serviced by the write queue"); 2378 2379 writeBursts 2380 .name(name() + ".writeBursts") 2381 .desc("Number of DRAM write bursts, " 2382 "including those merged in the write queue"); 2383 2384 servicedByWrQ 2385 .name(name() + ".servicedByWrQ") 2386 .desc("Number of DRAM read bursts serviced by the write queue"); 2387 2388 mergedWrBursts 2389 .name(name() + ".mergedWrBursts") 2390 .desc("Number of DRAM write bursts merged with an existing one"); 2391 2392 neitherReadNorWrite 2393 .name(name() + ".neitherReadNorWriteReqs") 2394 .desc("Number of requests that are neither read nor write"); 2395 2396 perBankRdBursts 2397 .init(banksPerRank * ranksPerChannel) 2398 .name(name() + ".perBankRdBursts") 2399 .desc("Per bank write bursts"); 2400 2401 perBankWrBursts 2402 .init(banksPerRank * ranksPerChannel) 2403 .name(name() + ".perBankWrBursts") 2404 .desc("Per bank write bursts"); 2405 2406 avgRdQLen 2407 .name(name() + ".avgRdQLen") 2408 .desc("Average read queue length when enqueuing") 2409 .precision(2); 2410 2411 avgWrQLen 2412 .name(name() + ".avgWrQLen") 2413 .desc("Average write queue length when enqueuing") 2414 .precision(2); 2415 2416 totQLat 2417 .name(name() + ".totQLat") 2418 .desc("Total ticks spent queuing"); 2419 2420 totBusLat 2421 .name(name() + ".totBusLat") 2422 .desc("Total ticks spent in databus transfers"); 2423 2424 totMemAccLat 2425 .name(name() + ".totMemAccLat") 2426 .desc("Total ticks spent from burst creation until serviced " 2427 "by the DRAM"); 2428 2429 avgQLat 2430 .name(name() + ".avgQLat") 2431 .desc("Average queueing delay per DRAM burst") 2432 .precision(2); 2433 2434 avgQLat = totQLat / (readBursts - servicedByWrQ); 2435 2436 avgBusLat 2437 .name(name() + ".avgBusLat") 2438 .desc("Average bus latency per DRAM burst") 2439 .precision(2); 2440 2441 avgBusLat = totBusLat / (readBursts - servicedByWrQ); 2442 2443 avgMemAccLat 2444 .name(name() + ".avgMemAccLat") 2445 .desc("Average memory access latency per DRAM burst") 2446 .precision(2); 2447 2448 avgMemAccLat = totMemAccLat / (readBursts - servicedByWrQ); 2449 2450 numRdRetry 2451 .name(name() + ".numRdRetry") 2452 .desc("Number of times read queue was full causing retry"); 2453 2454 numWrRetry 2455 .name(name() + ".numWrRetry") 2456 .desc("Number of times write queue was full causing retry"); 2457 2458 readRowHits 2459 .name(name() + ".readRowHits") 2460 .desc("Number of row buffer hits during reads"); 2461 2462 writeRowHits 2463 .name(name() + ".writeRowHits") 2464 .desc("Number of row buffer hits during writes"); 2465 2466 readRowHitRate 2467 .name(name() + ".readRowHitRate") 2468 .desc("Row buffer hit rate for reads") 2469 .precision(2); 2470 2471 readRowHitRate = (readRowHits / (readBursts - servicedByWrQ)) * 100; 2472 2473 writeRowHitRate 2474 .name(name() + ".writeRowHitRate") 2475 .desc("Row buffer hit rate for writes") 2476 .precision(2); 2477 2478 writeRowHitRate = (writeRowHits / (writeBursts - mergedWrBursts)) * 100; 2479 2480 readPktSize 2481 .init(ceilLog2(burstSize) + 1) 2482 .name(name() + ".readPktSize") 2483 .desc("Read request sizes (log2)"); 2484 2485 writePktSize 2486 .init(ceilLog2(burstSize) + 1) 2487 .name(name() + ".writePktSize") 2488 .desc("Write request sizes (log2)"); 2489 2490 rdQLenPdf 2491 .init(readBufferSize) 2492 .name(name() + ".rdQLenPdf") 2493 .desc("What read queue length does an incoming req see"); 2494 2495 wrQLenPdf 2496 .init(writeBufferSize) 2497 .name(name() + ".wrQLenPdf") 2498 .desc("What write queue length does an incoming req see"); 2499 2500 bytesPerActivate 2501 .init(maxAccessesPerRow) 2502 .name(name() + ".bytesPerActivate") 2503 .desc("Bytes accessed per row activation") 2504 .flags(nozero); 2505 2506 rdPerTurnAround 2507 .init(readBufferSize) 2508 .name(name() + ".rdPerTurnAround") 2509 .desc("Reads before turning the bus around for writes") 2510 .flags(nozero); 2511 2512 wrPerTurnAround 2513 .init(writeBufferSize) 2514 .name(name() + ".wrPerTurnAround") 2515 .desc("Writes before turning the bus around for reads") 2516 .flags(nozero); 2517 2518 bytesReadDRAM 2519 .name(name() + ".bytesReadDRAM") 2520 .desc("Total number of bytes read from DRAM"); 2521 2522 bytesReadWrQ 2523 .name(name() + ".bytesReadWrQ") 2524 .desc("Total number of bytes read from write queue"); 2525 2526 bytesWritten 2527 .name(name() + ".bytesWritten") 2528 .desc("Total number of bytes written to DRAM"); 2529 2530 bytesReadSys 2531 .name(name() + ".bytesReadSys") 2532 .desc("Total read bytes from the system interface side"); 2533 2534 bytesWrittenSys 2535 .name(name() + ".bytesWrittenSys") 2536 .desc("Total written bytes from the system interface side"); 2537 2538 avgRdBW 2539 .name(name() + ".avgRdBW") 2540 .desc("Average DRAM read bandwidth in MiByte/s") 2541 .precision(2); 2542 2543 avgRdBW = (bytesReadDRAM / 1000000) / simSeconds; 2544 2545 avgWrBW 2546 .name(name() + ".avgWrBW") 2547 .desc("Average achieved write bandwidth in MiByte/s") 2548 .precision(2); 2549 2550 avgWrBW = (bytesWritten / 1000000) / simSeconds; 2551 2552 avgRdBWSys 2553 .name(name() + ".avgRdBWSys") 2554 .desc("Average system read bandwidth in MiByte/s") 2555 .precision(2); 2556 2557 avgRdBWSys = (bytesReadSys / 1000000) / simSeconds; 2558 2559 avgWrBWSys 2560 .name(name() + ".avgWrBWSys") 2561 .desc("Average system write bandwidth in MiByte/s") 2562 .precision(2); 2563 2564 avgWrBWSys = (bytesWrittenSys / 1000000) / simSeconds; 2565 2566 peakBW 2567 .name(name() + ".peakBW") 2568 .desc("Theoretical peak bandwidth in MiByte/s") 2569 .precision(2); 2570 2571 peakBW = (SimClock::Frequency / tBURST) * burstSize / 1000000; 2572 2573 busUtil 2574 .name(name() + ".busUtil") 2575 .desc("Data bus utilization in percentage") 2576 .precision(2); 2577 busUtil = (avgRdBW + avgWrBW) / peakBW * 100; 2578 2579 totGap 2580 .name(name() + ".totGap") 2581 .desc("Total gap between requests"); 2582 2583 avgGap 2584 .name(name() + ".avgGap") 2585 .desc("Average gap between requests") 2586 .precision(2); 2587 2588 avgGap = totGap / (readReqs + writeReqs); 2589 2590 // Stats for DRAM Power calculation based on Micron datasheet 2591 busUtilRead 2592 .name(name() + ".busUtilRead") 2593 .desc("Data bus utilization in percentage for reads") 2594 .precision(2); 2595 2596 busUtilRead = avgRdBW / peakBW * 100; 2597 2598 busUtilWrite 2599 .name(name() + ".busUtilWrite") 2600 .desc("Data bus utilization in percentage for writes") 2601 .precision(2); 2602 2603 busUtilWrite = avgWrBW / peakBW * 100; 2604 2605 pageHitRate 2606 .name(name() + ".pageHitRate") 2607 .desc("Row buffer hit rate, read and write combined") 2608 .precision(2); 2609 2610 pageHitRate = (writeRowHits + readRowHits) / 2611 (writeBursts - mergedWrBursts + readBursts - servicedByWrQ) * 100; 2612} 2613 2614void 2615DRAMCtrl::recvFunctional(PacketPtr pkt) 2616{ 2617 // rely on the abstract memory 2618 functionalAccess(pkt); 2619} 2620 2621BaseSlavePort& 2622DRAMCtrl::getSlavePort(const string &if_name, PortID idx) 2623{ 2624 if (if_name != "port") { 2625 return MemObject::getSlavePort(if_name, idx); 2626 } else { 2627 return port; 2628 } 2629} 2630 2631DrainState 2632DRAMCtrl::drain() 2633{ 2634 // if there is anything in any of our internal queues, keep track 2635 // of that as well 2636 if (!(writeQueue.empty() && readQueue.empty() && respQueue.empty() && 2637 allRanksDrained())) { 2638 2639 DPRINTF(Drain, "DRAM controller not drained, write: %d, read: %d," 2640 " resp: %d\n", writeQueue.size(), readQueue.size(), 2641 respQueue.size()); 2642 2643 // the only queue that is not drained automatically over time 2644 // is the write queue, thus kick things into action if needed 2645 if (!writeQueue.empty() && !nextReqEvent.scheduled()) { 2646 schedule(nextReqEvent, curTick()); 2647 } 2648 2649 // also need to kick off events to exit self-refresh 2650 for (auto r : ranks) { 2651 // force self-refresh exit, which in turn will issue auto-refresh 2652 if (r->pwrState == PWR_SREF) { 2653 DPRINTF(DRAM,"Rank%d: Forcing self-refresh wakeup in drain\n", 2654 r->rank); 2655 r->scheduleWakeUpEvent(tXS); 2656 } 2657 } 2658 2659 return DrainState::Draining; 2660 } else { 2661 return DrainState::Drained; 2662 } 2663} 2664 2665bool 2666DRAMCtrl::allRanksDrained() const 2667{ 2668 // true until proven false 2669 bool all_ranks_drained = true; 2670 for (auto r : ranks) { 2671 // then verify that the power state is IDLE 2672 // ensuring all banks are closed and rank is not in a low power state 2673 all_ranks_drained = r->inPwrIdleState() && all_ranks_drained; 2674 } 2675 return all_ranks_drained; 2676} 2677 2678void 2679DRAMCtrl::drainResume() 2680{ 2681 if (!isTimingMode && system()->isTimingMode()) { 2682 // if we switched to timing mode, kick things into action, 2683 // and behave as if we restored from a checkpoint 2684 startup(); 2685 } else if (isTimingMode && !system()->isTimingMode()) { 2686 // if we switch from timing mode, stop the refresh events to 2687 // not cause issues with KVM 2688 for (auto r : ranks) { 2689 r->suspend(); 2690 } 2691 } 2692 2693 // update the mode 2694 isTimingMode = system()->isTimingMode(); 2695} 2696 2697DRAMCtrl::MemoryPort::MemoryPort(const std::string& name, DRAMCtrl& _memory) 2698 : QueuedSlavePort(name, &_memory, queue), queue(_memory, *this), 2699 memory(_memory) 2700{ } 2701 2702AddrRangeList 2703DRAMCtrl::MemoryPort::getAddrRanges() const 2704{ 2705 AddrRangeList ranges; 2706 ranges.push_back(memory.getAddrRange()); 2707 return ranges; 2708} 2709 2710void 2711DRAMCtrl::MemoryPort::recvFunctional(PacketPtr pkt) 2712{ 2713 pkt->pushLabel(memory.name()); 2714 2715 if (!queue.checkFunctional(pkt)) { 2716 // Default implementation of SimpleTimingPort::recvFunctional() 2717 // calls recvAtomic() and throws away the latency; we can save a 2718 // little here by just not calculating the latency. 2719 memory.recvFunctional(pkt); 2720 } 2721 2722 pkt->popLabel(); 2723} 2724 2725Tick 2726DRAMCtrl::MemoryPort::recvAtomic(PacketPtr pkt) 2727{ 2728 return memory.recvAtomic(pkt); 2729} 2730 2731bool 2732DRAMCtrl::MemoryPort::recvTimingReq(PacketPtr pkt) 2733{ 2734 // pass it to the memory controller 2735 return memory.recvTimingReq(pkt); 2736} 2737 2738DRAMCtrl* 2739DRAMCtrlParams::create() 2740{ 2741 return new DRAMCtrl(this); 2742}
|