dram_ctrl.cc revision 11675:60d18201148d
1/* 2 * Copyright (c) 2010-2016 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2013 Amin Farmahini-Farahani 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Andreas Hansson 41 * Ani Udipi 42 * Neha Agarwal 43 * Omar Naji 44 */ 45 46#include "base/bitfield.hh" 47#include "base/trace.hh" 48#include "debug/DRAM.hh" 49#include "debug/DRAMPower.hh" 50#include "debug/DRAMState.hh" 51#include "debug/Drain.hh" 52#include "mem/dram_ctrl.hh" 53#include "sim/system.hh" 54 55using namespace std; 56using namespace Data; 57 58DRAMCtrl::DRAMCtrl(const DRAMCtrlParams* p) : 59 AbstractMemory(p), 60 port(name() + ".port", *this), isTimingMode(false), 61 retryRdReq(false), retryWrReq(false), 62 busState(READ), 63 nextReqEvent(this), respondEvent(this), 64 deviceSize(p->device_size), 65 deviceBusWidth(p->device_bus_width), burstLength(p->burst_length), 66 deviceRowBufferSize(p->device_rowbuffer_size), 67 devicesPerRank(p->devices_per_rank), 68 burstSize((devicesPerRank * burstLength * deviceBusWidth) / 8), 69 rowBufferSize(devicesPerRank * deviceRowBufferSize), 70 columnsPerRowBuffer(rowBufferSize / burstSize), 71 columnsPerStripe(range.interleaved() ? range.granularity() / burstSize : 1), 72 ranksPerChannel(p->ranks_per_channel), 73 bankGroupsPerRank(p->bank_groups_per_rank), 74 bankGroupArch(p->bank_groups_per_rank > 0), 75 banksPerRank(p->banks_per_rank), channels(p->channels), rowsPerBank(0), 76 readBufferSize(p->read_buffer_size), 77 writeBufferSize(p->write_buffer_size), 78 writeHighThreshold(writeBufferSize * p->write_high_thresh_perc / 100.0), 79 writeLowThreshold(writeBufferSize * p->write_low_thresh_perc / 100.0), 80 minWritesPerSwitch(p->min_writes_per_switch), 81 writesThisTime(0), readsThisTime(0), 82 tCK(p->tCK), tWTR(p->tWTR), tRTW(p->tRTW), tCS(p->tCS), tBURST(p->tBURST), 83 tCCD_L(p->tCCD_L), tRCD(p->tRCD), tCL(p->tCL), tRP(p->tRP), tRAS(p->tRAS), 84 tWR(p->tWR), tRTP(p->tRTP), tRFC(p->tRFC), tREFI(p->tREFI), tRRD(p->tRRD), 85 tRRD_L(p->tRRD_L), tXAW(p->tXAW), tXP(p->tXP), tXS(p->tXS), 86 activationLimit(p->activation_limit), 87 memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping), 88 pageMgmt(p->page_policy), 89 maxAccessesPerRow(p->max_accesses_per_row), 90 frontendLatency(p->static_frontend_latency), 91 backendLatency(p->static_backend_latency), 92 busBusyUntil(0), prevArrival(0), 93 nextReqTime(0), activeRank(0), timeStampOffset(0) 94{ 95 // sanity check the ranks since we rely on bit slicing for the 96 // address decoding 97 fatal_if(!isPowerOf2(ranksPerChannel), "DRAM rank count of %d is not " 98 "allowed, must be a power of two\n", ranksPerChannel); 99 100 fatal_if(!isPowerOf2(burstSize), "DRAM burst size %d is not allowed, " 101 "must be a power of two\n", burstSize); 102 103 for (int i = 0; i < ranksPerChannel; i++) { 104 Rank* rank = new Rank(*this, p); 105 ranks.push_back(rank); 106 107 rank->actTicks.resize(activationLimit, 0); 108 rank->banks.resize(banksPerRank); 109 rank->rank = i; 110 111 for (int b = 0; b < banksPerRank; b++) { 112 rank->banks[b].bank = b; 113 // GDDR addressing of banks to BG is linear. 114 // Here we assume that all DRAM generations address bank groups as 115 // follows: 116 if (bankGroupArch) { 117 // Simply assign lower bits to bank group in order to 118 // rotate across bank groups as banks are incremented 119 // e.g. with 4 banks per bank group and 16 banks total: 120 // banks 0,4,8,12 are in bank group 0 121 // banks 1,5,9,13 are in bank group 1 122 // banks 2,6,10,14 are in bank group 2 123 // banks 3,7,11,15 are in bank group 3 124 rank->banks[b].bankgr = b % bankGroupsPerRank; 125 } else { 126 // No bank groups; simply assign to bank number 127 rank->banks[b].bankgr = b; 128 } 129 } 130 } 131 132 // perform a basic check of the write thresholds 133 if (p->write_low_thresh_perc >= p->write_high_thresh_perc) 134 fatal("Write buffer low threshold %d must be smaller than the " 135 "high threshold %d\n", p->write_low_thresh_perc, 136 p->write_high_thresh_perc); 137 138 // determine the rows per bank by looking at the total capacity 139 uint64_t capacity = ULL(1) << ceilLog2(AbstractMemory::size()); 140 141 // determine the dram actual capacity from the DRAM config in Mbytes 142 uint64_t deviceCapacity = deviceSize / (1024 * 1024) * devicesPerRank * 143 ranksPerChannel; 144 145 // if actual DRAM size does not match memory capacity in system warn! 146 if (deviceCapacity != capacity / (1024 * 1024)) 147 warn("DRAM device capacity (%d Mbytes) does not match the " 148 "address range assigned (%d Mbytes)\n", deviceCapacity, 149 capacity / (1024 * 1024)); 150 151 DPRINTF(DRAM, "Memory capacity %lld (%lld) bytes\n", capacity, 152 AbstractMemory::size()); 153 154 DPRINTF(DRAM, "Row buffer size %d bytes with %d columns per row buffer\n", 155 rowBufferSize, columnsPerRowBuffer); 156 157 rowsPerBank = capacity / (rowBufferSize * banksPerRank * ranksPerChannel); 158 159 // some basic sanity checks 160 if (tREFI <= tRP || tREFI <= tRFC) { 161 fatal("tREFI (%d) must be larger than tRP (%d) and tRFC (%d)\n", 162 tREFI, tRP, tRFC); 163 } 164 165 // basic bank group architecture checks -> 166 if (bankGroupArch) { 167 // must have at least one bank per bank group 168 if (bankGroupsPerRank > banksPerRank) { 169 fatal("banks per rank (%d) must be equal to or larger than " 170 "banks groups per rank (%d)\n", 171 banksPerRank, bankGroupsPerRank); 172 } 173 // must have same number of banks in each bank group 174 if ((banksPerRank % bankGroupsPerRank) != 0) { 175 fatal("Banks per rank (%d) must be evenly divisible by bank groups " 176 "per rank (%d) for equal banks per bank group\n", 177 banksPerRank, bankGroupsPerRank); 178 } 179 // tCCD_L should be greater than minimal, back-to-back burst delay 180 if (tCCD_L <= tBURST) { 181 fatal("tCCD_L (%d) should be larger than tBURST (%d) when " 182 "bank groups per rank (%d) is greater than 1\n", 183 tCCD_L, tBURST, bankGroupsPerRank); 184 } 185 // tRRD_L is greater than minimal, same bank group ACT-to-ACT delay 186 // some datasheets might specify it equal to tRRD 187 if (tRRD_L < tRRD) { 188 fatal("tRRD_L (%d) should be larger than tRRD (%d) when " 189 "bank groups per rank (%d) is greater than 1\n", 190 tRRD_L, tRRD, bankGroupsPerRank); 191 } 192 } 193 194} 195 196void 197DRAMCtrl::init() 198{ 199 AbstractMemory::init(); 200 201 if (!port.isConnected()) { 202 fatal("DRAMCtrl %s is unconnected!\n", name()); 203 } else { 204 port.sendRangeChange(); 205 } 206 207 // a bit of sanity checks on the interleaving, save it for here to 208 // ensure that the system pointer is initialised 209 if (range.interleaved()) { 210 if (channels != range.stripes()) 211 fatal("%s has %d interleaved address stripes but %d channel(s)\n", 212 name(), range.stripes(), channels); 213 214 if (addrMapping == Enums::RoRaBaChCo) { 215 if (rowBufferSize != range.granularity()) { 216 fatal("Channel interleaving of %s doesn't match RoRaBaChCo " 217 "address map\n", name()); 218 } 219 } else if (addrMapping == Enums::RoRaBaCoCh || 220 addrMapping == Enums::RoCoRaBaCh) { 221 // for the interleavings with channel bits in the bottom, 222 // if the system uses a channel striping granularity that 223 // is larger than the DRAM burst size, then map the 224 // sequential accesses within a stripe to a number of 225 // columns in the DRAM, effectively placing some of the 226 // lower-order column bits as the least-significant bits 227 // of the address (above the ones denoting the burst size) 228 assert(columnsPerStripe >= 1); 229 230 // channel striping has to be done at a granularity that 231 // is equal or larger to a cache line 232 if (system()->cacheLineSize() > range.granularity()) { 233 fatal("Channel interleaving of %s must be at least as large " 234 "as the cache line size\n", name()); 235 } 236 237 // ...and equal or smaller than the row-buffer size 238 if (rowBufferSize < range.granularity()) { 239 fatal("Channel interleaving of %s must be at most as large " 240 "as the row-buffer size\n", name()); 241 } 242 // this is essentially the check above, so just to be sure 243 assert(columnsPerStripe <= columnsPerRowBuffer); 244 } 245 } 246} 247 248void 249DRAMCtrl::startup() 250{ 251 // remember the memory system mode of operation 252 isTimingMode = system()->isTimingMode(); 253 254 if (isTimingMode) { 255 // timestamp offset should be in clock cycles for DRAMPower 256 timeStampOffset = divCeil(curTick(), tCK); 257 258 // update the start tick for the precharge accounting to the 259 // current tick 260 for (auto r : ranks) { 261 r->startup(curTick() + tREFI - tRP); 262 } 263 264 // shift the bus busy time sufficiently far ahead that we never 265 // have to worry about negative values when computing the time for 266 // the next request, this will add an insignificant bubble at the 267 // start of simulation 268 busBusyUntil = curTick() + tRP + tRCD + tCL; 269 } 270} 271 272Tick 273DRAMCtrl::recvAtomic(PacketPtr pkt) 274{ 275 DPRINTF(DRAM, "recvAtomic: %s 0x%x\n", pkt->cmdString(), pkt->getAddr()); 276 277 panic_if(pkt->cacheResponding(), "Should not see packets where cache " 278 "is responding"); 279 280 // do the actual memory access and turn the packet into a response 281 access(pkt); 282 283 Tick latency = 0; 284 if (pkt->hasData()) { 285 // this value is not supposed to be accurate, just enough to 286 // keep things going, mimic a closed page 287 latency = tRP + tRCD + tCL; 288 } 289 return latency; 290} 291 292bool 293DRAMCtrl::readQueueFull(unsigned int neededEntries) const 294{ 295 DPRINTF(DRAM, "Read queue limit %d, current size %d, entries needed %d\n", 296 readBufferSize, readQueue.size() + respQueue.size(), 297 neededEntries); 298 299 return 300 (readQueue.size() + respQueue.size() + neededEntries) > readBufferSize; 301} 302 303bool 304DRAMCtrl::writeQueueFull(unsigned int neededEntries) const 305{ 306 DPRINTF(DRAM, "Write queue limit %d, current size %d, entries needed %d\n", 307 writeBufferSize, writeQueue.size(), neededEntries); 308 return (writeQueue.size() + neededEntries) > writeBufferSize; 309} 310 311DRAMCtrl::DRAMPacket* 312DRAMCtrl::decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned size, 313 bool isRead) 314{ 315 // decode the address based on the address mapping scheme, with 316 // Ro, Ra, Co, Ba and Ch denoting row, rank, column, bank and 317 // channel, respectively 318 uint8_t rank; 319 uint8_t bank; 320 // use a 64-bit unsigned during the computations as the row is 321 // always the top bits, and check before creating the DRAMPacket 322 uint64_t row; 323 324 // truncate the address to a DRAM burst, which makes it unique to 325 // a specific column, row, bank, rank and channel 326 Addr addr = dramPktAddr / burstSize; 327 328 // we have removed the lowest order address bits that denote the 329 // position within the column 330 if (addrMapping == Enums::RoRaBaChCo) { 331 // the lowest order bits denote the column to ensure that 332 // sequential cache lines occupy the same row 333 addr = addr / columnsPerRowBuffer; 334 335 // take out the channel part of the address 336 addr = addr / channels; 337 338 // after the channel bits, get the bank bits to interleave 339 // over the banks 340 bank = addr % banksPerRank; 341 addr = addr / banksPerRank; 342 343 // after the bank, we get the rank bits which thus interleaves 344 // over the ranks 345 rank = addr % ranksPerChannel; 346 addr = addr / ranksPerChannel; 347 348 // lastly, get the row bits, no need to remove them from addr 349 row = addr % rowsPerBank; 350 } else if (addrMapping == Enums::RoRaBaCoCh) { 351 // take out the lower-order column bits 352 addr = addr / columnsPerStripe; 353 354 // take out the channel part of the address 355 addr = addr / channels; 356 357 // next, the higher-order column bites 358 addr = addr / (columnsPerRowBuffer / columnsPerStripe); 359 360 // after the column bits, we get the bank bits to interleave 361 // over the banks 362 bank = addr % banksPerRank; 363 addr = addr / banksPerRank; 364 365 // after the bank, we get the rank bits which thus interleaves 366 // over the ranks 367 rank = addr % ranksPerChannel; 368 addr = addr / ranksPerChannel; 369 370 // lastly, get the row bits, no need to remove them from addr 371 row = addr % rowsPerBank; 372 } else if (addrMapping == Enums::RoCoRaBaCh) { 373 // optimise for closed page mode and utilise maximum 374 // parallelism of the DRAM (at the cost of power) 375 376 // take out the lower-order column bits 377 addr = addr / columnsPerStripe; 378 379 // take out the channel part of the address, not that this has 380 // to match with how accesses are interleaved between the 381 // controllers in the address mapping 382 addr = addr / channels; 383 384 // start with the bank bits, as this provides the maximum 385 // opportunity for parallelism between requests 386 bank = addr % banksPerRank; 387 addr = addr / banksPerRank; 388 389 // next get the rank bits 390 rank = addr % ranksPerChannel; 391 addr = addr / ranksPerChannel; 392 393 // next, the higher-order column bites 394 addr = addr / (columnsPerRowBuffer / columnsPerStripe); 395 396 // lastly, get the row bits, no need to remove them from addr 397 row = addr % rowsPerBank; 398 } else 399 panic("Unknown address mapping policy chosen!"); 400 401 assert(rank < ranksPerChannel); 402 assert(bank < banksPerRank); 403 assert(row < rowsPerBank); 404 assert(row < Bank::NO_ROW); 405 406 DPRINTF(DRAM, "Address: %lld Rank %d Bank %d Row %d\n", 407 dramPktAddr, rank, bank, row); 408 409 // create the corresponding DRAM packet with the entry time and 410 // ready time set to the current tick, the latter will be updated 411 // later 412 uint16_t bank_id = banksPerRank * rank + bank; 413 return new DRAMPacket(pkt, isRead, rank, bank, row, bank_id, dramPktAddr, 414 size, ranks[rank]->banks[bank], *ranks[rank]); 415} 416 417void 418DRAMCtrl::addToReadQueue(PacketPtr pkt, unsigned int pktCount) 419{ 420 // only add to the read queue here. whenever the request is 421 // eventually done, set the readyTime, and call schedule() 422 assert(!pkt->isWrite()); 423 424 assert(pktCount != 0); 425 426 // if the request size is larger than burst size, the pkt is split into 427 // multiple DRAM packets 428 // Note if the pkt starting address is not aligened to burst size, the 429 // address of first DRAM packet is kept unaliged. Subsequent DRAM packets 430 // are aligned to burst size boundaries. This is to ensure we accurately 431 // check read packets against packets in write queue. 432 Addr addr = pkt->getAddr(); 433 unsigned pktsServicedByWrQ = 0; 434 BurstHelper* burst_helper = NULL; 435 for (int cnt = 0; cnt < pktCount; ++cnt) { 436 unsigned size = std::min((addr | (burstSize - 1)) + 1, 437 pkt->getAddr() + pkt->getSize()) - addr; 438 readPktSize[ceilLog2(size)]++; 439 readBursts++; 440 441 // First check write buffer to see if the data is already at 442 // the controller 443 bool foundInWrQ = false; 444 Addr burst_addr = burstAlign(addr); 445 // if the burst address is not present then there is no need 446 // looking any further 447 if (isInWriteQueue.find(burst_addr) != isInWriteQueue.end()) { 448 for (const auto& p : writeQueue) { 449 // check if the read is subsumed in the write queue 450 // packet we are looking at 451 if (p->addr <= addr && (addr + size) <= (p->addr + p->size)) { 452 foundInWrQ = true; 453 servicedByWrQ++; 454 pktsServicedByWrQ++; 455 DPRINTF(DRAM, "Read to addr %lld with size %d serviced by " 456 "write queue\n", addr, size); 457 bytesReadWrQ += burstSize; 458 break; 459 } 460 } 461 } 462 463 // If not found in the write q, make a DRAM packet and 464 // push it onto the read queue 465 if (!foundInWrQ) { 466 467 // Make the burst helper for split packets 468 if (pktCount > 1 && burst_helper == NULL) { 469 DPRINTF(DRAM, "Read to addr %lld translates to %d " 470 "dram requests\n", pkt->getAddr(), pktCount); 471 burst_helper = new BurstHelper(pktCount); 472 } 473 474 DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, true); 475 dram_pkt->burstHelper = burst_helper; 476 477 assert(!readQueueFull(1)); 478 rdQLenPdf[readQueue.size() + respQueue.size()]++; 479 480 DPRINTF(DRAM, "Adding to read queue\n"); 481 482 readQueue.push_back(dram_pkt); 483 484 // Update stats 485 avgRdQLen = readQueue.size() + respQueue.size(); 486 } 487 488 // Starting address of next dram pkt (aligend to burstSize boundary) 489 addr = (addr | (burstSize - 1)) + 1; 490 } 491 492 // If all packets are serviced by write queue, we send the repsonse back 493 if (pktsServicedByWrQ == pktCount) { 494 accessAndRespond(pkt, frontendLatency); 495 return; 496 } 497 498 // Update how many split packets are serviced by write queue 499 if (burst_helper != NULL) 500 burst_helper->burstsServiced = pktsServicedByWrQ; 501 502 // If we are not already scheduled to get a request out of the 503 // queue, do so now 504 if (!nextReqEvent.scheduled()) { 505 DPRINTF(DRAM, "Request scheduled immediately\n"); 506 schedule(nextReqEvent, curTick()); 507 } 508} 509 510void 511DRAMCtrl::addToWriteQueue(PacketPtr pkt, unsigned int pktCount) 512{ 513 // only add to the write queue here. whenever the request is 514 // eventually done, set the readyTime, and call schedule() 515 assert(pkt->isWrite()); 516 517 // if the request size is larger than burst size, the pkt is split into 518 // multiple DRAM packets 519 Addr addr = pkt->getAddr(); 520 for (int cnt = 0; cnt < pktCount; ++cnt) { 521 unsigned size = std::min((addr | (burstSize - 1)) + 1, 522 pkt->getAddr() + pkt->getSize()) - addr; 523 writePktSize[ceilLog2(size)]++; 524 writeBursts++; 525 526 // see if we can merge with an existing item in the write 527 // queue and keep track of whether we have merged or not 528 bool merged = isInWriteQueue.find(burstAlign(addr)) != 529 isInWriteQueue.end(); 530 531 // if the item was not merged we need to create a new write 532 // and enqueue it 533 if (!merged) { 534 DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, false); 535 536 assert(writeQueue.size() < writeBufferSize); 537 wrQLenPdf[writeQueue.size()]++; 538 539 DPRINTF(DRAM, "Adding to write queue\n"); 540 541 writeQueue.push_back(dram_pkt); 542 isInWriteQueue.insert(burstAlign(addr)); 543 assert(writeQueue.size() == isInWriteQueue.size()); 544 545 // Update stats 546 avgWrQLen = writeQueue.size(); 547 } else { 548 DPRINTF(DRAM, "Merging write burst with existing queue entry\n"); 549 550 // keep track of the fact that this burst effectively 551 // disappeared as it was merged with an existing one 552 mergedWrBursts++; 553 } 554 555 // Starting address of next dram pkt (aligend to burstSize boundary) 556 addr = (addr | (burstSize - 1)) + 1; 557 } 558 559 // we do not wait for the writes to be send to the actual memory, 560 // but instead take responsibility for the consistency here and 561 // snoop the write queue for any upcoming reads 562 // @todo, if a pkt size is larger than burst size, we might need a 563 // different front end latency 564 accessAndRespond(pkt, frontendLatency); 565 566 // If we are not already scheduled to get a request out of the 567 // queue, do so now 568 if (!nextReqEvent.scheduled()) { 569 DPRINTF(DRAM, "Request scheduled immediately\n"); 570 schedule(nextReqEvent, curTick()); 571 } 572} 573 574void 575DRAMCtrl::printQs() const { 576 DPRINTF(DRAM, "===READ QUEUE===\n\n"); 577 for (auto i = readQueue.begin() ; i != readQueue.end() ; ++i) { 578 DPRINTF(DRAM, "Read %lu\n", (*i)->addr); 579 } 580 DPRINTF(DRAM, "\n===RESP QUEUE===\n\n"); 581 for (auto i = respQueue.begin() ; i != respQueue.end() ; ++i) { 582 DPRINTF(DRAM, "Response %lu\n", (*i)->addr); 583 } 584 DPRINTF(DRAM, "\n===WRITE QUEUE===\n\n"); 585 for (auto i = writeQueue.begin() ; i != writeQueue.end() ; ++i) { 586 DPRINTF(DRAM, "Write %lu\n", (*i)->addr); 587 } 588} 589 590bool 591DRAMCtrl::recvTimingReq(PacketPtr pkt) 592{ 593 // This is where we enter from the outside world 594 DPRINTF(DRAM, "recvTimingReq: request %s addr %lld size %d\n", 595 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 596 597 panic_if(pkt->cacheResponding(), "Should not see packets where cache " 598 "is responding"); 599 600 panic_if(!(pkt->isRead() || pkt->isWrite()), 601 "Should only see read and writes at memory controller\n"); 602 603 // Calc avg gap between requests 604 if (prevArrival != 0) { 605 totGap += curTick() - prevArrival; 606 } 607 prevArrival = curTick(); 608 609 610 // Find out how many dram packets a pkt translates to 611 // If the burst size is equal or larger than the pkt size, then a pkt 612 // translates to only one dram packet. Otherwise, a pkt translates to 613 // multiple dram packets 614 unsigned size = pkt->getSize(); 615 unsigned offset = pkt->getAddr() & (burstSize - 1); 616 unsigned int dram_pkt_count = divCeil(offset + size, burstSize); 617 618 // check local buffers and do not accept if full 619 if (pkt->isRead()) { 620 assert(size != 0); 621 if (readQueueFull(dram_pkt_count)) { 622 DPRINTF(DRAM, "Read queue full, not accepting\n"); 623 // remember that we have to retry this port 624 retryRdReq = true; 625 numRdRetry++; 626 return false; 627 } else { 628 addToReadQueue(pkt, dram_pkt_count); 629 readReqs++; 630 bytesReadSys += size; 631 } 632 } else { 633 assert(pkt->isWrite()); 634 assert(size != 0); 635 if (writeQueueFull(dram_pkt_count)) { 636 DPRINTF(DRAM, "Write queue full, not accepting\n"); 637 // remember that we have to retry this port 638 retryWrReq = true; 639 numWrRetry++; 640 return false; 641 } else { 642 addToWriteQueue(pkt, dram_pkt_count); 643 writeReqs++; 644 bytesWrittenSys += size; 645 } 646 } 647 648 return true; 649} 650 651void 652DRAMCtrl::processRespondEvent() 653{ 654 DPRINTF(DRAM, 655 "processRespondEvent(): Some req has reached its readyTime\n"); 656 657 DRAMPacket* dram_pkt = respQueue.front(); 658 659 if (dram_pkt->burstHelper) { 660 // it is a split packet 661 dram_pkt->burstHelper->burstsServiced++; 662 if (dram_pkt->burstHelper->burstsServiced == 663 dram_pkt->burstHelper->burstCount) { 664 // we have now serviced all children packets of a system packet 665 // so we can now respond to the requester 666 // @todo we probably want to have a different front end and back 667 // end latency for split packets 668 accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency); 669 delete dram_pkt->burstHelper; 670 dram_pkt->burstHelper = NULL; 671 } 672 } else { 673 // it is not a split packet 674 accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency); 675 } 676 677 delete respQueue.front(); 678 respQueue.pop_front(); 679 680 if (!respQueue.empty()) { 681 assert(respQueue.front()->readyTime >= curTick()); 682 assert(!respondEvent.scheduled()); 683 schedule(respondEvent, respQueue.front()->readyTime); 684 } else { 685 // if there is nothing left in any queue, signal a drain 686 if (drainState() == DrainState::Draining && 687 writeQueue.empty() && readQueue.empty()) { 688 689 DPRINTF(Drain, "DRAM controller done draining\n"); 690 signalDrainDone(); 691 } 692 } 693 694 // We have made a location in the queue available at this point, 695 // so if there is a read that was forced to wait, retry now 696 if (retryRdReq) { 697 retryRdReq = false; 698 port.sendRetryReq(); 699 } 700} 701 702bool 703DRAMCtrl::chooseNext(std::deque<DRAMPacket*>& queue, Tick extra_col_delay) 704{ 705 // This method does the arbitration between requests. The chosen 706 // packet is simply moved to the head of the queue. The other 707 // methods know that this is the place to look. For example, with 708 // FCFS, this method does nothing 709 assert(!queue.empty()); 710 711 // bool to indicate if a packet to an available rank is found 712 bool found_packet = false; 713 if (queue.size() == 1) { 714 DRAMPacket* dram_pkt = queue.front(); 715 // available rank corresponds to state refresh idle 716 if (ranks[dram_pkt->rank]->isAvailable()) { 717 found_packet = true; 718 DPRINTF(DRAM, "Single request, going to a free rank\n"); 719 } else { 720 DPRINTF(DRAM, "Single request, going to a busy rank\n"); 721 } 722 return found_packet; 723 } 724 725 if (memSchedPolicy == Enums::fcfs) { 726 // check if there is a packet going to a free rank 727 for (auto i = queue.begin(); i != queue.end() ; ++i) { 728 DRAMPacket* dram_pkt = *i; 729 if (ranks[dram_pkt->rank]->isAvailable()) { 730 queue.erase(i); 731 queue.push_front(dram_pkt); 732 found_packet = true; 733 break; 734 } 735 } 736 } else if (memSchedPolicy == Enums::frfcfs) { 737 found_packet = reorderQueue(queue, extra_col_delay); 738 } else 739 panic("No scheduling policy chosen\n"); 740 return found_packet; 741} 742 743bool 744DRAMCtrl::reorderQueue(std::deque<DRAMPacket*>& queue, Tick extra_col_delay) 745{ 746 // Only determine this if needed 747 uint64_t earliest_banks = 0; 748 bool hidden_bank_prep = false; 749 750 // search for seamless row hits first, if no seamless row hit is 751 // found then determine if there are other packets that can be issued 752 // without incurring additional bus delay due to bank timing 753 // Will select closed rows first to enable more open row possibilies 754 // in future selections 755 bool found_hidden_bank = false; 756 757 // remember if we found a row hit, not seamless, but bank prepped 758 // and ready 759 bool found_prepped_pkt = false; 760 761 // if we have no row hit, prepped or not, and no seamless packet, 762 // just go for the earliest possible 763 bool found_earliest_pkt = false; 764 765 auto selected_pkt_it = queue.end(); 766 767 // time we need to issue a column command to be seamless 768 const Tick min_col_at = std::max(busBusyUntil - tCL + extra_col_delay, 769 curTick()); 770 771 for (auto i = queue.begin(); i != queue.end() ; ++i) { 772 DRAMPacket* dram_pkt = *i; 773 const Bank& bank = dram_pkt->bankRef; 774 775 // check if rank is available, if not, jump to the next packet 776 if (dram_pkt->rankRef.isAvailable()) { 777 // check if it is a row hit 778 if (bank.openRow == dram_pkt->row) { 779 // no additional rank-to-rank or same bank-group 780 // delays, or we switched read/write and might as well 781 // go for the row hit 782 if (bank.colAllowedAt <= min_col_at) { 783 // FCFS within the hits, giving priority to 784 // commands that can issue seamlessly, without 785 // additional delay, such as same rank accesses 786 // and/or different bank-group accesses 787 DPRINTF(DRAM, "Seamless row buffer hit\n"); 788 selected_pkt_it = i; 789 // no need to look through the remaining queue entries 790 break; 791 } else if (!found_hidden_bank && !found_prepped_pkt) { 792 // if we did not find a packet to a closed row that can 793 // issue the bank commands without incurring delay, and 794 // did not yet find a packet to a prepped row, remember 795 // the current one 796 selected_pkt_it = i; 797 found_prepped_pkt = true; 798 DPRINTF(DRAM, "Prepped row buffer hit\n"); 799 } 800 } else if (!found_earliest_pkt) { 801 // if we have not initialised the bank status, do it 802 // now, and only once per scheduling decisions 803 if (earliest_banks == 0) { 804 // determine entries with earliest bank delay 805 pair<uint64_t, bool> bankStatus = 806 minBankPrep(queue, min_col_at); 807 earliest_banks = bankStatus.first; 808 hidden_bank_prep = bankStatus.second; 809 } 810 811 // bank is amongst first available banks 812 // minBankPrep will give priority to packets that can 813 // issue seamlessly 814 if (bits(earliest_banks, dram_pkt->bankId, dram_pkt->bankId)) { 815 found_earliest_pkt = true; 816 found_hidden_bank = hidden_bank_prep; 817 818 // give priority to packets that can issue 819 // bank commands 'behind the scenes' 820 // any additional delay if any will be due to 821 // col-to-col command requirements 822 if (hidden_bank_prep || !found_prepped_pkt) 823 selected_pkt_it = i; 824 } 825 } 826 } 827 } 828 829 if (selected_pkt_it != queue.end()) { 830 DRAMPacket* selected_pkt = *selected_pkt_it; 831 queue.erase(selected_pkt_it); 832 queue.push_front(selected_pkt); 833 return true; 834 } 835 836 return false; 837} 838 839void 840DRAMCtrl::accessAndRespond(PacketPtr pkt, Tick static_latency) 841{ 842 DPRINTF(DRAM, "Responding to Address %lld.. ",pkt->getAddr()); 843 844 bool needsResponse = pkt->needsResponse(); 845 // do the actual memory access which also turns the packet into a 846 // response 847 access(pkt); 848 849 // turn packet around to go back to requester if response expected 850 if (needsResponse) { 851 // access already turned the packet into a response 852 assert(pkt->isResponse()); 853 // response_time consumes the static latency and is charged also 854 // with headerDelay that takes into account the delay provided by 855 // the xbar and also the payloadDelay that takes into account the 856 // number of data beats. 857 Tick response_time = curTick() + static_latency + pkt->headerDelay + 858 pkt->payloadDelay; 859 // Here we reset the timing of the packet before sending it out. 860 pkt->headerDelay = pkt->payloadDelay = 0; 861 862 // queue the packet in the response queue to be sent out after 863 // the static latency has passed 864 port.schedTimingResp(pkt, response_time, true); 865 } else { 866 // @todo the packet is going to be deleted, and the DRAMPacket 867 // is still having a pointer to it 868 pendingDelete.reset(pkt); 869 } 870 871 DPRINTF(DRAM, "Done\n"); 872 873 return; 874} 875 876void 877DRAMCtrl::activateBank(Rank& rank_ref, Bank& bank_ref, 878 Tick act_tick, uint32_t row) 879{ 880 assert(rank_ref.actTicks.size() == activationLimit); 881 882 DPRINTF(DRAM, "Activate at tick %d\n", act_tick); 883 884 // update the open row 885 assert(bank_ref.openRow == Bank::NO_ROW); 886 bank_ref.openRow = row; 887 888 // start counting anew, this covers both the case when we 889 // auto-precharged, and when this access is forced to 890 // precharge 891 bank_ref.bytesAccessed = 0; 892 bank_ref.rowAccesses = 0; 893 894 ++rank_ref.numBanksActive; 895 assert(rank_ref.numBanksActive <= banksPerRank); 896 897 DPRINTF(DRAM, "Activate bank %d, rank %d at tick %lld, now got %d active\n", 898 bank_ref.bank, rank_ref.rank, act_tick, 899 ranks[rank_ref.rank]->numBanksActive); 900 901 rank_ref.cmdList.push_back(Command(MemCommand::ACT, bank_ref.bank, 902 act_tick)); 903 904 DPRINTF(DRAMPower, "%llu,ACT,%d,%d\n", divCeil(act_tick, tCK) - 905 timeStampOffset, bank_ref.bank, rank_ref.rank); 906 907 // The next access has to respect tRAS for this bank 908 bank_ref.preAllowedAt = act_tick + tRAS; 909 910 // Respect the row-to-column command delay 911 bank_ref.colAllowedAt = std::max(act_tick + tRCD, bank_ref.colAllowedAt); 912 913 // start by enforcing tRRD 914 for (int i = 0; i < banksPerRank; i++) { 915 // next activate to any bank in this rank must not happen 916 // before tRRD 917 if (bankGroupArch && (bank_ref.bankgr == rank_ref.banks[i].bankgr)) { 918 // bank group architecture requires longer delays between 919 // ACT commands within the same bank group. Use tRRD_L 920 // in this case 921 rank_ref.banks[i].actAllowedAt = std::max(act_tick + tRRD_L, 922 rank_ref.banks[i].actAllowedAt); 923 } else { 924 // use shorter tRRD value when either 925 // 1) bank group architecture is not supportted 926 // 2) bank is in a different bank group 927 rank_ref.banks[i].actAllowedAt = std::max(act_tick + tRRD, 928 rank_ref.banks[i].actAllowedAt); 929 } 930 } 931 932 // next, we deal with tXAW, if the activation limit is disabled 933 // then we directly schedule an activate power event 934 if (!rank_ref.actTicks.empty()) { 935 // sanity check 936 if (rank_ref.actTicks.back() && 937 (act_tick - rank_ref.actTicks.back()) < tXAW) { 938 panic("Got %d activates in window %d (%llu - %llu) which " 939 "is smaller than %llu\n", activationLimit, act_tick - 940 rank_ref.actTicks.back(), act_tick, 941 rank_ref.actTicks.back(), tXAW); 942 } 943 944 // shift the times used for the book keeping, the last element 945 // (highest index) is the oldest one and hence the lowest value 946 rank_ref.actTicks.pop_back(); 947 948 // record an new activation (in the future) 949 rank_ref.actTicks.push_front(act_tick); 950 951 // cannot activate more than X times in time window tXAW, push the 952 // next one (the X + 1'st activate) to be tXAW away from the 953 // oldest in our window of X 954 if (rank_ref.actTicks.back() && 955 (act_tick - rank_ref.actTicks.back()) < tXAW) { 956 DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate " 957 "no earlier than %llu\n", activationLimit, 958 rank_ref.actTicks.back() + tXAW); 959 for (int j = 0; j < banksPerRank; j++) 960 // next activate must not happen before end of window 961 rank_ref.banks[j].actAllowedAt = 962 std::max(rank_ref.actTicks.back() + tXAW, 963 rank_ref.banks[j].actAllowedAt); 964 } 965 } 966 967 // at the point when this activate takes place, make sure we 968 // transition to the active power state 969 if (!rank_ref.activateEvent.scheduled()) 970 schedule(rank_ref.activateEvent, act_tick); 971 else if (rank_ref.activateEvent.when() > act_tick) 972 // move it sooner in time 973 reschedule(rank_ref.activateEvent, act_tick); 974} 975 976void 977DRAMCtrl::prechargeBank(Rank& rank_ref, Bank& bank, Tick pre_at, bool trace) 978{ 979 // make sure the bank has an open row 980 assert(bank.openRow != Bank::NO_ROW); 981 982 // sample the bytes per activate here since we are closing 983 // the page 984 bytesPerActivate.sample(bank.bytesAccessed); 985 986 bank.openRow = Bank::NO_ROW; 987 988 // no precharge allowed before this one 989 bank.preAllowedAt = pre_at; 990 991 Tick pre_done_at = pre_at + tRP; 992 993 bank.actAllowedAt = std::max(bank.actAllowedAt, pre_done_at); 994 995 assert(rank_ref.numBanksActive != 0); 996 --rank_ref.numBanksActive; 997 998 DPRINTF(DRAM, "Precharging bank %d, rank %d at tick %lld, now got " 999 "%d active\n", bank.bank, rank_ref.rank, pre_at, 1000 rank_ref.numBanksActive); 1001 1002 if (trace) { 1003 1004 rank_ref.cmdList.push_back(Command(MemCommand::PRE, bank.bank, 1005 pre_at)); 1006 DPRINTF(DRAMPower, "%llu,PRE,%d,%d\n", divCeil(pre_at, tCK) - 1007 timeStampOffset, bank.bank, rank_ref.rank); 1008 } 1009 // if we look at the current number of active banks we might be 1010 // tempted to think the DRAM is now idle, however this can be 1011 // undone by an activate that is scheduled to happen before we 1012 // would have reached the idle state, so schedule an event and 1013 // rather check once we actually make it to the point in time when 1014 // the (last) precharge takes place 1015 if (!rank_ref.prechargeEvent.scheduled()) 1016 schedule(rank_ref.prechargeEvent, pre_done_at); 1017 else if (rank_ref.prechargeEvent.when() < pre_done_at) 1018 reschedule(rank_ref.prechargeEvent, pre_done_at); 1019} 1020 1021void 1022DRAMCtrl::doDRAMAccess(DRAMPacket* dram_pkt) 1023{ 1024 DPRINTF(DRAM, "Timing access to addr %lld, rank/bank/row %d %d %d\n", 1025 dram_pkt->addr, dram_pkt->rank, dram_pkt->bank, dram_pkt->row); 1026 1027 // get the rank 1028 Rank& rank = dram_pkt->rankRef; 1029 1030 // get the bank 1031 Bank& bank = dram_pkt->bankRef; 1032 1033 // for the state we need to track if it is a row hit or not 1034 bool row_hit = true; 1035 1036 // respect any constraints on the command (e.g. tRCD or tCCD) 1037 Tick cmd_at = std::max(bank.colAllowedAt, curTick()); 1038 1039 // Determine the access latency and update the bank state 1040 if (bank.openRow == dram_pkt->row) { 1041 // nothing to do 1042 } else { 1043 row_hit = false; 1044 1045 // If there is a page open, precharge it. 1046 if (bank.openRow != Bank::NO_ROW) { 1047 prechargeBank(rank, bank, std::max(bank.preAllowedAt, curTick())); 1048 } 1049 1050 // next we need to account for the delay in activating the 1051 // page 1052 Tick act_tick = std::max(bank.actAllowedAt, curTick()); 1053 1054 // Record the activation and deal with all the global timing 1055 // constraints caused be a new activation (tRRD and tXAW) 1056 activateBank(rank, bank, act_tick, dram_pkt->row); 1057 1058 // issue the command as early as possible 1059 cmd_at = bank.colAllowedAt; 1060 } 1061 1062 // we need to wait until the bus is available before we can issue 1063 // the command 1064 cmd_at = std::max(cmd_at, busBusyUntil - tCL); 1065 1066 // update the packet ready time 1067 dram_pkt->readyTime = cmd_at + tCL + tBURST; 1068 1069 // only one burst can use the bus at any one point in time 1070 assert(dram_pkt->readyTime - busBusyUntil >= tBURST); 1071 1072 // update the time for the next read/write burst for each 1073 // bank (add a max with tCCD/tCCD_L here) 1074 Tick cmd_dly; 1075 for (int j = 0; j < ranksPerChannel; j++) { 1076 for (int i = 0; i < banksPerRank; i++) { 1077 // next burst to same bank group in this rank must not happen 1078 // before tCCD_L. Different bank group timing requirement is 1079 // tBURST; Add tCS for different ranks 1080 if (dram_pkt->rank == j) { 1081 if (bankGroupArch && 1082 (bank.bankgr == ranks[j]->banks[i].bankgr)) { 1083 // bank group architecture requires longer delays between 1084 // RD/WR burst commands to the same bank group. 1085 // Use tCCD_L in this case 1086 cmd_dly = tCCD_L; 1087 } else { 1088 // use tBURST (equivalent to tCCD_S), the shorter 1089 // cas-to-cas delay value, when either: 1090 // 1) bank group architecture is not supportted 1091 // 2) bank is in a different bank group 1092 cmd_dly = tBURST; 1093 } 1094 } else { 1095 // different rank is by default in a different bank group 1096 // use tBURST (equivalent to tCCD_S), which is the shorter 1097 // cas-to-cas delay in this case 1098 // Add tCS to account for rank-to-rank bus delay requirements 1099 cmd_dly = tBURST + tCS; 1100 } 1101 ranks[j]->banks[i].colAllowedAt = std::max(cmd_at + cmd_dly, 1102 ranks[j]->banks[i].colAllowedAt); 1103 } 1104 } 1105 1106 // Save rank of current access 1107 activeRank = dram_pkt->rank; 1108 1109 // If this is a write, we also need to respect the write recovery 1110 // time before a precharge, in the case of a read, respect the 1111 // read to precharge constraint 1112 bank.preAllowedAt = std::max(bank.preAllowedAt, 1113 dram_pkt->isRead ? cmd_at + tRTP : 1114 dram_pkt->readyTime + tWR); 1115 1116 // increment the bytes accessed and the accesses per row 1117 bank.bytesAccessed += burstSize; 1118 ++bank.rowAccesses; 1119 1120 // if we reached the max, then issue with an auto-precharge 1121 bool auto_precharge = pageMgmt == Enums::close || 1122 bank.rowAccesses == maxAccessesPerRow; 1123 1124 // if we did not hit the limit, we might still want to 1125 // auto-precharge 1126 if (!auto_precharge && 1127 (pageMgmt == Enums::open_adaptive || 1128 pageMgmt == Enums::close_adaptive)) { 1129 // a twist on the open and close page policies: 1130 // 1) open_adaptive page policy does not blindly keep the 1131 // page open, but close it if there are no row hits, and there 1132 // are bank conflicts in the queue 1133 // 2) close_adaptive page policy does not blindly close the 1134 // page, but closes it only if there are no row hits in the queue. 1135 // In this case, only force an auto precharge when there 1136 // are no same page hits in the queue 1137 bool got_more_hits = false; 1138 bool got_bank_conflict = false; 1139 1140 // either look at the read queue or write queue 1141 const deque<DRAMPacket*>& queue = dram_pkt->isRead ? readQueue : 1142 writeQueue; 1143 auto p = queue.begin(); 1144 // make sure we are not considering the packet that we are 1145 // currently dealing with (which is the head of the queue) 1146 ++p; 1147 1148 // keep on looking until we find a hit or reach the end of the queue 1149 // 1) if a hit is found, then both open and close adaptive policies keep 1150 // the page open 1151 // 2) if no hit is found, got_bank_conflict is set to true if a bank 1152 // conflict request is waiting in the queue 1153 while (!got_more_hits && p != queue.end()) { 1154 bool same_rank_bank = (dram_pkt->rank == (*p)->rank) && 1155 (dram_pkt->bank == (*p)->bank); 1156 bool same_row = dram_pkt->row == (*p)->row; 1157 got_more_hits |= same_rank_bank && same_row; 1158 got_bank_conflict |= same_rank_bank && !same_row; 1159 ++p; 1160 } 1161 1162 // auto pre-charge when either 1163 // 1) open_adaptive policy, we have not got any more hits, and 1164 // have a bank conflict 1165 // 2) close_adaptive policy and we have not got any more hits 1166 auto_precharge = !got_more_hits && 1167 (got_bank_conflict || pageMgmt == Enums::close_adaptive); 1168 } 1169 1170 // DRAMPower trace command to be written 1171 std::string mem_cmd = dram_pkt->isRead ? "RD" : "WR"; 1172 1173 // MemCommand required for DRAMPower library 1174 MemCommand::cmds command = (mem_cmd == "RD") ? MemCommand::RD : 1175 MemCommand::WR; 1176 1177 // Update bus state 1178 busBusyUntil = dram_pkt->readyTime; 1179 1180 DPRINTF(DRAM, "Access to %lld, ready at %lld bus busy until %lld.\n", 1181 dram_pkt->addr, dram_pkt->readyTime, busBusyUntil); 1182 1183 dram_pkt->rankRef.cmdList.push_back(Command(command, dram_pkt->bank, 1184 cmd_at)); 1185 1186 DPRINTF(DRAMPower, "%llu,%s,%d,%d\n", divCeil(cmd_at, tCK) - 1187 timeStampOffset, mem_cmd, dram_pkt->bank, dram_pkt->rank); 1188 1189 // if this access should use auto-precharge, then we are 1190 // closing the row after the read/write burst 1191 if (auto_precharge) { 1192 // if auto-precharge push a PRE command at the correct tick to the 1193 // list used by DRAMPower library to calculate power 1194 prechargeBank(rank, bank, std::max(curTick(), bank.preAllowedAt)); 1195 1196 DPRINTF(DRAM, "Auto-precharged bank: %d\n", dram_pkt->bankId); 1197 } 1198 1199 // Update the minimum timing between the requests, this is a 1200 // conservative estimate of when we have to schedule the next 1201 // request to not introduce any unecessary bubbles. In most cases 1202 // we will wake up sooner than we have to. 1203 nextReqTime = busBusyUntil - (tRP + tRCD + tCL); 1204 1205 // Update the stats and schedule the next request 1206 if (dram_pkt->isRead) { 1207 ++readsThisTime; 1208 if (row_hit) 1209 readRowHits++; 1210 bytesReadDRAM += burstSize; 1211 perBankRdBursts[dram_pkt->bankId]++; 1212 1213 // Update latency stats 1214 totMemAccLat += dram_pkt->readyTime - dram_pkt->entryTime; 1215 totBusLat += tBURST; 1216 totQLat += cmd_at - dram_pkt->entryTime; 1217 } else { 1218 ++writesThisTime; 1219 if (row_hit) 1220 writeRowHits++; 1221 bytesWritten += burstSize; 1222 perBankWrBursts[dram_pkt->bankId]++; 1223 } 1224} 1225 1226void 1227DRAMCtrl::processNextReqEvent() 1228{ 1229 int busyRanks = 0; 1230 for (auto r : ranks) { 1231 if (!r->isAvailable()) { 1232 // rank is busy refreshing 1233 busyRanks++; 1234 1235 // let the rank know that if it was waiting to drain, it 1236 // is now done and ready to proceed 1237 r->checkDrainDone(); 1238 } 1239 } 1240 1241 if (busyRanks == ranksPerChannel) { 1242 // if all ranks are refreshing wait for them to finish 1243 // and stall this state machine without taking any further 1244 // action, and do not schedule a new nextReqEvent 1245 return; 1246 } 1247 1248 // pre-emptively set to false. Overwrite if in READ_TO_WRITE 1249 // or WRITE_TO_READ state 1250 bool switched_cmd_type = false; 1251 if (busState == READ_TO_WRITE) { 1252 DPRINTF(DRAM, "Switching to writes after %d reads with %d reads " 1253 "waiting\n", readsThisTime, readQueue.size()); 1254 1255 // sample and reset the read-related stats as we are now 1256 // transitioning to writes, and all reads are done 1257 rdPerTurnAround.sample(readsThisTime); 1258 readsThisTime = 0; 1259 1260 // now proceed to do the actual writes 1261 busState = WRITE; 1262 switched_cmd_type = true; 1263 } else if (busState == WRITE_TO_READ) { 1264 DPRINTF(DRAM, "Switching to reads after %d writes with %d writes " 1265 "waiting\n", writesThisTime, writeQueue.size()); 1266 1267 wrPerTurnAround.sample(writesThisTime); 1268 writesThisTime = 0; 1269 1270 busState = READ; 1271 switched_cmd_type = true; 1272 } 1273 1274 // when we get here it is either a read or a write 1275 if (busState == READ) { 1276 1277 // track if we should switch or not 1278 bool switch_to_writes = false; 1279 1280 if (readQueue.empty()) { 1281 // In the case there is no read request to go next, 1282 // trigger writes if we have passed the low threshold (or 1283 // if we are draining) 1284 if (!writeQueue.empty() && 1285 (drainState() == DrainState::Draining || 1286 writeQueue.size() > writeLowThreshold)) { 1287 1288 switch_to_writes = true; 1289 } else { 1290 // check if we are drained 1291 if (drainState() == DrainState::Draining && 1292 respQueue.empty()) { 1293 1294 DPRINTF(Drain, "DRAM controller done draining\n"); 1295 signalDrainDone(); 1296 } 1297 1298 // nothing to do, not even any point in scheduling an 1299 // event for the next request 1300 return; 1301 } 1302 } else { 1303 // bool to check if there is a read to a free rank 1304 bool found_read = false; 1305 1306 // Figure out which read request goes next, and move it to the 1307 // front of the read queue 1308 // If we are changing command type, incorporate the minimum 1309 // bus turnaround delay which will be tCS (different rank) case 1310 found_read = chooseNext(readQueue, 1311 switched_cmd_type ? tCS : 0); 1312 1313 // if no read to an available rank is found then return 1314 // at this point. There could be writes to the available ranks 1315 // which are above the required threshold. However, to 1316 // avoid adding more complexity to the code, return and wait 1317 // for a refresh event to kick things into action again. 1318 if (!found_read) 1319 return; 1320 1321 DRAMPacket* dram_pkt = readQueue.front(); 1322 assert(dram_pkt->rankRef.isAvailable()); 1323 // here we get a bit creative and shift the bus busy time not 1324 // just the tWTR, but also a CAS latency to capture the fact 1325 // that we are allowed to prepare a new bank, but not issue a 1326 // read command until after tWTR, in essence we capture a 1327 // bubble on the data bus that is tWTR + tCL 1328 if (switched_cmd_type && dram_pkt->rank == activeRank) { 1329 busBusyUntil += tWTR + tCL; 1330 } 1331 1332 doDRAMAccess(dram_pkt); 1333 1334 // At this point we're done dealing with the request 1335 readQueue.pop_front(); 1336 1337 // sanity check 1338 assert(dram_pkt->size <= burstSize); 1339 assert(dram_pkt->readyTime >= curTick()); 1340 1341 // Insert into response queue. It will be sent back to the 1342 // requestor at its readyTime 1343 if (respQueue.empty()) { 1344 assert(!respondEvent.scheduled()); 1345 schedule(respondEvent, dram_pkt->readyTime); 1346 } else { 1347 assert(respQueue.back()->readyTime <= dram_pkt->readyTime); 1348 assert(respondEvent.scheduled()); 1349 } 1350 1351 respQueue.push_back(dram_pkt); 1352 1353 // we have so many writes that we have to transition 1354 if (writeQueue.size() > writeHighThreshold) { 1355 switch_to_writes = true; 1356 } 1357 } 1358 1359 // switching to writes, either because the read queue is empty 1360 // and the writes have passed the low threshold (or we are 1361 // draining), or because the writes hit the hight threshold 1362 if (switch_to_writes) { 1363 // transition to writing 1364 busState = READ_TO_WRITE; 1365 } 1366 } else { 1367 // bool to check if write to free rank is found 1368 bool found_write = false; 1369 1370 // If we are changing command type, incorporate the minimum 1371 // bus turnaround delay 1372 found_write = chooseNext(writeQueue, 1373 switched_cmd_type ? std::min(tRTW, tCS) : 0); 1374 1375 // if no writes to an available rank are found then return. 1376 // There could be reads to the available ranks. However, to avoid 1377 // adding more complexity to the code, return at this point and wait 1378 // for a refresh event to kick things into action again. 1379 if (!found_write) 1380 return; 1381 1382 DRAMPacket* dram_pkt = writeQueue.front(); 1383 assert(dram_pkt->rankRef.isAvailable()); 1384 // sanity check 1385 assert(dram_pkt->size <= burstSize); 1386 1387 // add a bubble to the data bus, as defined by the 1388 // tRTW when access is to the same rank as previous burst 1389 // Different rank timing is handled with tCS, which is 1390 // applied to colAllowedAt 1391 if (switched_cmd_type && dram_pkt->rank == activeRank) { 1392 busBusyUntil += tRTW; 1393 } 1394 1395 doDRAMAccess(dram_pkt); 1396 1397 writeQueue.pop_front(); 1398 isInWriteQueue.erase(burstAlign(dram_pkt->addr)); 1399 delete dram_pkt; 1400 1401 // If we emptied the write queue, or got sufficiently below the 1402 // threshold (using the minWritesPerSwitch as the hysteresis) and 1403 // are not draining, or we have reads waiting and have done enough 1404 // writes, then switch to reads. 1405 if (writeQueue.empty() || 1406 (writeQueue.size() + minWritesPerSwitch < writeLowThreshold && 1407 drainState() != DrainState::Draining) || 1408 (!readQueue.empty() && writesThisTime >= minWritesPerSwitch)) { 1409 // turn the bus back around for reads again 1410 busState = WRITE_TO_READ; 1411 1412 // note that the we switch back to reads also in the idle 1413 // case, which eventually will check for any draining and 1414 // also pause any further scheduling if there is really 1415 // nothing to do 1416 } 1417 } 1418 // It is possible that a refresh to another rank kicks things back into 1419 // action before reaching this point. 1420 if (!nextReqEvent.scheduled()) 1421 schedule(nextReqEvent, std::max(nextReqTime, curTick())); 1422 1423 // If there is space available and we have writes waiting then let 1424 // them retry. This is done here to ensure that the retry does not 1425 // cause a nextReqEvent to be scheduled before we do so as part of 1426 // the next request processing 1427 if (retryWrReq && writeQueue.size() < writeBufferSize) { 1428 retryWrReq = false; 1429 port.sendRetryReq(); 1430 } 1431} 1432 1433pair<uint64_t, bool> 1434DRAMCtrl::minBankPrep(const deque<DRAMPacket*>& queue, 1435 Tick min_col_at) const 1436{ 1437 uint64_t bank_mask = 0; 1438 Tick min_act_at = MaxTick; 1439 1440 // latest Tick for which ACT can occur without incurring additoinal 1441 // delay on the data bus 1442 const Tick hidden_act_max = std::max(min_col_at - tRCD, curTick()); 1443 1444 // Flag condition when burst can issue back-to-back with previous burst 1445 bool found_seamless_bank = false; 1446 1447 // Flag condition when bank can be opened without incurring additional 1448 // delay on the data bus 1449 bool hidden_bank_prep = false; 1450 1451 // determine if we have queued transactions targetting the 1452 // bank in question 1453 vector<bool> got_waiting(ranksPerChannel * banksPerRank, false); 1454 for (const auto& p : queue) { 1455 if (p->rankRef.isAvailable()) 1456 got_waiting[p->bankId] = true; 1457 } 1458 1459 // Find command with optimal bank timing 1460 // Will prioritize commands that can issue seamlessly. 1461 for (int i = 0; i < ranksPerChannel; i++) { 1462 for (int j = 0; j < banksPerRank; j++) { 1463 uint16_t bank_id = i * banksPerRank + j; 1464 1465 // if we have waiting requests for the bank, and it is 1466 // amongst the first available, update the mask 1467 if (got_waiting[bank_id]) { 1468 // make sure this rank is not currently refreshing. 1469 assert(ranks[i]->isAvailable()); 1470 // simplistic approximation of when the bank can issue 1471 // an activate, ignoring any rank-to-rank switching 1472 // cost in this calculation 1473 Tick act_at = ranks[i]->banks[j].openRow == Bank::NO_ROW ? 1474 std::max(ranks[i]->banks[j].actAllowedAt, curTick()) : 1475 std::max(ranks[i]->banks[j].preAllowedAt, curTick()) + tRP; 1476 1477 // When is the earliest the R/W burst can issue? 1478 Tick col_at = std::max(ranks[i]->banks[j].colAllowedAt, 1479 act_at + tRCD); 1480 1481 // bank can issue burst back-to-back (seamlessly) with 1482 // previous burst 1483 bool new_seamless_bank = col_at <= min_col_at; 1484 1485 // if we found a new seamless bank or we have no 1486 // seamless banks, and got a bank with an earlier 1487 // activate time, it should be added to the bit mask 1488 if (new_seamless_bank || 1489 (!found_seamless_bank && act_at <= min_act_at)) { 1490 // if we did not have a seamless bank before, and 1491 // we do now, reset the bank mask, also reset it 1492 // if we have not yet found a seamless bank and 1493 // the activate time is smaller than what we have 1494 // seen so far 1495 if (!found_seamless_bank && 1496 (new_seamless_bank || act_at < min_act_at)) { 1497 bank_mask = 0; 1498 } 1499 1500 found_seamless_bank |= new_seamless_bank; 1501 1502 // ACT can occur 'behind the scenes' 1503 hidden_bank_prep = act_at <= hidden_act_max; 1504 1505 // set the bit corresponding to the available bank 1506 replaceBits(bank_mask, bank_id, bank_id, 1); 1507 min_act_at = act_at; 1508 } 1509 } 1510 } 1511 } 1512 1513 return make_pair(bank_mask, hidden_bank_prep); 1514} 1515 1516DRAMCtrl::Rank::Rank(DRAMCtrl& _memory, const DRAMCtrlParams* _p) 1517 : EventManager(&_memory), memory(_memory), 1518 pwrStateTrans(PWR_IDLE), pwrState(PWR_IDLE), pwrStateTick(0), 1519 refreshState(REF_IDLE), refreshDueAt(0), 1520 power(_p, false), numBanksActive(0), 1521 activateEvent(*this), prechargeEvent(*this), 1522 refreshEvent(*this), powerEvent(*this) 1523{ } 1524 1525void 1526DRAMCtrl::Rank::startup(Tick ref_tick) 1527{ 1528 assert(ref_tick > curTick()); 1529 1530 pwrStateTick = curTick(); 1531 1532 // kick off the refresh, and give ourselves enough time to 1533 // precharge 1534 schedule(refreshEvent, ref_tick); 1535} 1536 1537void 1538DRAMCtrl::Rank::suspend() 1539{ 1540 deschedule(refreshEvent); 1541} 1542 1543void 1544DRAMCtrl::Rank::checkDrainDone() 1545{ 1546 // if this rank was waiting to drain it is now able to proceed to 1547 // precharge 1548 if (refreshState == REF_DRAIN) { 1549 DPRINTF(DRAM, "Refresh drain done, now precharging\n"); 1550 1551 refreshState = REF_PRE; 1552 1553 // hand control back to the refresh event loop 1554 schedule(refreshEvent, curTick()); 1555 } 1556} 1557 1558void 1559DRAMCtrl::Rank::flushCmdList() 1560{ 1561 // at the moment sort the list of commands and update the counters 1562 // for DRAMPower libray when doing a refresh 1563 sort(cmdList.begin(), cmdList.end(), DRAMCtrl::sortTime); 1564 1565 auto next_iter = cmdList.begin(); 1566 // push to commands to DRAMPower 1567 for ( ; next_iter != cmdList.end() ; ++next_iter) { 1568 Command cmd = *next_iter; 1569 if (cmd.timeStamp <= curTick()) { 1570 // Move all commands at or before curTick to DRAMPower 1571 power.powerlib.doCommand(cmd.type, cmd.bank, 1572 divCeil(cmd.timeStamp, memory.tCK) - 1573 memory.timeStampOffset); 1574 } else { 1575 // done - found all commands at or before curTick() 1576 // next_iter references the 1st command after curTick 1577 break; 1578 } 1579 } 1580 // reset cmdList to only contain commands after curTick 1581 // if there are no commands after curTick, updated cmdList will be empty 1582 // in this case, next_iter is cmdList.end() 1583 cmdList.assign(next_iter, cmdList.end()); 1584} 1585 1586void 1587DRAMCtrl::Rank::processActivateEvent() 1588{ 1589 // we should transition to the active state as soon as any bank is active 1590 if (pwrState != PWR_ACT) 1591 // note that at this point numBanksActive could be back at 1592 // zero again due to a precharge scheduled in the future 1593 schedulePowerEvent(PWR_ACT, curTick()); 1594} 1595 1596void 1597DRAMCtrl::Rank::processPrechargeEvent() 1598{ 1599 // if we reached zero, then special conditions apply as we track 1600 // if all banks are precharged for the power models 1601 if (numBanksActive == 0) { 1602 // we should transition to the idle state when the last bank 1603 // is precharged 1604 schedulePowerEvent(PWR_IDLE, curTick()); 1605 } 1606} 1607 1608void 1609DRAMCtrl::Rank::processRefreshEvent() 1610{ 1611 // when first preparing the refresh, remember when it was due 1612 if (refreshState == REF_IDLE) { 1613 // remember when the refresh is due 1614 refreshDueAt = curTick(); 1615 1616 // proceed to drain 1617 refreshState = REF_DRAIN; 1618 1619 DPRINTF(DRAM, "Refresh due\n"); 1620 } 1621 1622 // let any scheduled read or write to the same rank go ahead, 1623 // after which it will 1624 // hand control back to this event loop 1625 if (refreshState == REF_DRAIN) { 1626 // if a request is at the moment being handled and this request is 1627 // accessing the current rank then wait for it to finish 1628 if ((rank == memory.activeRank) 1629 && (memory.nextReqEvent.scheduled())) { 1630 // hand control over to the request loop until it is 1631 // evaluated next 1632 DPRINTF(DRAM, "Refresh awaiting draining\n"); 1633 1634 return; 1635 } else { 1636 refreshState = REF_PRE; 1637 } 1638 } 1639 1640 // at this point, ensure that all banks are precharged 1641 if (refreshState == REF_PRE) { 1642 // precharge any active bank if we are not already in the idle 1643 // state 1644 if (pwrState != PWR_IDLE) { 1645 // at the moment, we use a precharge all even if there is 1646 // only a single bank open 1647 DPRINTF(DRAM, "Precharging all\n"); 1648 1649 // first determine when we can precharge 1650 Tick pre_at = curTick(); 1651 1652 for (auto &b : banks) { 1653 // respect both causality and any existing bank 1654 // constraints, some banks could already have a 1655 // (auto) precharge scheduled 1656 pre_at = std::max(b.preAllowedAt, pre_at); 1657 } 1658 1659 // make sure all banks per rank are precharged, and for those that 1660 // already are, update their availability 1661 Tick act_allowed_at = pre_at + memory.tRP; 1662 1663 for (auto &b : banks) { 1664 if (b.openRow != Bank::NO_ROW) { 1665 memory.prechargeBank(*this, b, pre_at, false); 1666 } else { 1667 b.actAllowedAt = std::max(b.actAllowedAt, act_allowed_at); 1668 b.preAllowedAt = std::max(b.preAllowedAt, pre_at); 1669 } 1670 } 1671 1672 // precharge all banks in rank 1673 cmdList.push_back(Command(MemCommand::PREA, 0, pre_at)); 1674 1675 DPRINTF(DRAMPower, "%llu,PREA,0,%d\n", 1676 divCeil(pre_at, memory.tCK) - 1677 memory.timeStampOffset, rank); 1678 } else { 1679 DPRINTF(DRAM, "All banks already precharged, starting refresh\n"); 1680 1681 // go ahead and kick the power state machine into gear if 1682 // we are already idle 1683 schedulePowerEvent(PWR_REF, curTick()); 1684 } 1685 1686 refreshState = REF_RUN; 1687 assert(numBanksActive == 0); 1688 1689 // wait for all banks to be precharged, at which point the 1690 // power state machine will transition to the idle state, and 1691 // automatically move to a refresh, at that point it will also 1692 // call this method to get the refresh event loop going again 1693 return; 1694 } 1695 1696 // last but not least we perform the actual refresh 1697 if (refreshState == REF_RUN) { 1698 // should never get here with any banks active 1699 assert(numBanksActive == 0); 1700 assert(pwrState == PWR_REF); 1701 1702 Tick ref_done_at = curTick() + memory.tRFC; 1703 1704 for (auto &b : banks) { 1705 b.actAllowedAt = ref_done_at; 1706 } 1707 1708 // at the moment this affects all ranks 1709 cmdList.push_back(Command(MemCommand::REF, 0, curTick())); 1710 1711 // All commands up to refresh have completed 1712 // flush cmdList to DRAMPower 1713 flushCmdList(); 1714 1715 // update the counters for DRAMPower, passing false to 1716 // indicate that this is not the last command in the 1717 // list. DRAMPower requires this information for the 1718 // correct calculation of the background energy at the end 1719 // of the simulation. Ideally we would want to call this 1720 // function with true once at the end of the 1721 // simulation. However, the discarded energy is extremly 1722 // small and does not effect the final results. 1723 power.powerlib.updateCounters(false); 1724 1725 // call the energy function 1726 power.powerlib.calcEnergy(); 1727 1728 // Update the stats 1729 updatePowerStats(); 1730 1731 DPRINTF(DRAMPower, "%llu,REF,0,%d\n", divCeil(curTick(), memory.tCK) - 1732 memory.timeStampOffset, rank); 1733 1734 // make sure we did not wait so long that we cannot make up 1735 // for it 1736 if (refreshDueAt + memory.tREFI < ref_done_at) { 1737 fatal("Refresh was delayed so long we cannot catch up\n"); 1738 } 1739 1740 // compensate for the delay in actually performing the refresh 1741 // when scheduling the next one 1742 schedule(refreshEvent, refreshDueAt + memory.tREFI - memory.tRP); 1743 1744 assert(!powerEvent.scheduled()); 1745 1746 // move to the idle power state once the refresh is done, this 1747 // will also move the refresh state machine to the refresh 1748 // idle state 1749 schedulePowerEvent(PWR_IDLE, ref_done_at); 1750 1751 DPRINTF(DRAMState, "Refresh done at %llu and next refresh at %llu\n", 1752 ref_done_at, refreshDueAt + memory.tREFI); 1753 } 1754} 1755 1756void 1757DRAMCtrl::Rank::schedulePowerEvent(PowerState pwr_state, Tick tick) 1758{ 1759 // respect causality 1760 assert(tick >= curTick()); 1761 1762 if (!powerEvent.scheduled()) { 1763 DPRINTF(DRAMState, "Scheduling power event at %llu to state %d\n", 1764 tick, pwr_state); 1765 1766 // insert the new transition 1767 pwrStateTrans = pwr_state; 1768 1769 schedule(powerEvent, tick); 1770 } else { 1771 panic("Scheduled power event at %llu to state %d, " 1772 "with scheduled event at %llu to %d\n", tick, pwr_state, 1773 powerEvent.when(), pwrStateTrans); 1774 } 1775} 1776 1777void 1778DRAMCtrl::Rank::processPowerEvent() 1779{ 1780 // remember where we were, and for how long 1781 Tick duration = curTick() - pwrStateTick; 1782 PowerState prev_state = pwrState; 1783 1784 // update the accounting 1785 pwrStateTime[prev_state] += duration; 1786 1787 pwrState = pwrStateTrans; 1788 pwrStateTick = curTick(); 1789 1790 if (pwrState == PWR_IDLE) { 1791 DPRINTF(DRAMState, "All banks precharged\n"); 1792 1793 // if we were refreshing, make sure we start scheduling requests again 1794 if (prev_state == PWR_REF) { 1795 DPRINTF(DRAMState, "Was refreshing for %llu ticks\n", duration); 1796 assert(pwrState == PWR_IDLE); 1797 1798 // kick things into action again 1799 refreshState = REF_IDLE; 1800 // a request event could be already scheduled by the state 1801 // machine of the other rank 1802 if (!memory.nextReqEvent.scheduled()) 1803 schedule(memory.nextReqEvent, curTick()); 1804 } else { 1805 assert(prev_state == PWR_ACT); 1806 1807 // if we have a pending refresh, and are now moving to 1808 // the idle state, direclty transition to a refresh 1809 if (refreshState == REF_RUN) { 1810 // there should be nothing waiting at this point 1811 assert(!powerEvent.scheduled()); 1812 1813 // update the state in zero time and proceed below 1814 pwrState = PWR_REF; 1815 } 1816 } 1817 } 1818 1819 // we transition to the refresh state, let the refresh state 1820 // machine know of this state update and let it deal with the 1821 // scheduling of the next power state transition as well as the 1822 // following refresh 1823 if (pwrState == PWR_REF) { 1824 DPRINTF(DRAMState, "Refreshing\n"); 1825 // kick the refresh event loop into action again, and that 1826 // in turn will schedule a transition to the idle power 1827 // state once the refresh is done 1828 assert(refreshState == REF_RUN); 1829 processRefreshEvent(); 1830 } 1831} 1832 1833void 1834DRAMCtrl::Rank::updatePowerStats() 1835{ 1836 // Get the energy and power from DRAMPower 1837 Data::MemoryPowerModel::Energy energy = 1838 power.powerlib.getEnergy(); 1839 Data::MemoryPowerModel::Power rank_power = 1840 power.powerlib.getPower(); 1841 1842 actEnergy = energy.act_energy * memory.devicesPerRank; 1843 preEnergy = energy.pre_energy * memory.devicesPerRank; 1844 readEnergy = energy.read_energy * memory.devicesPerRank; 1845 writeEnergy = energy.write_energy * memory.devicesPerRank; 1846 refreshEnergy = energy.ref_energy * memory.devicesPerRank; 1847 actBackEnergy = energy.act_stdby_energy * memory.devicesPerRank; 1848 preBackEnergy = energy.pre_stdby_energy * memory.devicesPerRank; 1849 totalEnergy = energy.total_energy * memory.devicesPerRank; 1850 averagePower = rank_power.average_power * memory.devicesPerRank; 1851} 1852 1853void 1854DRAMCtrl::Rank::regStats() 1855{ 1856 using namespace Stats; 1857 1858 pwrStateTime 1859 .init(5) 1860 .name(name() + ".memoryStateTime") 1861 .desc("Time in different power states"); 1862 pwrStateTime.subname(0, "IDLE"); 1863 pwrStateTime.subname(1, "REF"); 1864 pwrStateTime.subname(2, "PRE_PDN"); 1865 pwrStateTime.subname(3, "ACT"); 1866 pwrStateTime.subname(4, "ACT_PDN"); 1867 1868 actEnergy 1869 .name(name() + ".actEnergy") 1870 .desc("Energy for activate commands per rank (pJ)"); 1871 1872 preEnergy 1873 .name(name() + ".preEnergy") 1874 .desc("Energy for precharge commands per rank (pJ)"); 1875 1876 readEnergy 1877 .name(name() + ".readEnergy") 1878 .desc("Energy for read commands per rank (pJ)"); 1879 1880 writeEnergy 1881 .name(name() + ".writeEnergy") 1882 .desc("Energy for write commands per rank (pJ)"); 1883 1884 refreshEnergy 1885 .name(name() + ".refreshEnergy") 1886 .desc("Energy for refresh commands per rank (pJ)"); 1887 1888 actBackEnergy 1889 .name(name() + ".actBackEnergy") 1890 .desc("Energy for active background per rank (pJ)"); 1891 1892 preBackEnergy 1893 .name(name() + ".preBackEnergy") 1894 .desc("Energy for precharge background per rank (pJ)"); 1895 1896 totalEnergy 1897 .name(name() + ".totalEnergy") 1898 .desc("Total energy per rank (pJ)"); 1899 1900 averagePower 1901 .name(name() + ".averagePower") 1902 .desc("Core power per rank (mW)"); 1903} 1904void 1905DRAMCtrl::regStats() 1906{ 1907 using namespace Stats; 1908 1909 AbstractMemory::regStats(); 1910 1911 for (auto r : ranks) { 1912 r->regStats(); 1913 } 1914 1915 readReqs 1916 .name(name() + ".readReqs") 1917 .desc("Number of read requests accepted"); 1918 1919 writeReqs 1920 .name(name() + ".writeReqs") 1921 .desc("Number of write requests accepted"); 1922 1923 readBursts 1924 .name(name() + ".readBursts") 1925 .desc("Number of DRAM read bursts, " 1926 "including those serviced by the write queue"); 1927 1928 writeBursts 1929 .name(name() + ".writeBursts") 1930 .desc("Number of DRAM write bursts, " 1931 "including those merged in the write queue"); 1932 1933 servicedByWrQ 1934 .name(name() + ".servicedByWrQ") 1935 .desc("Number of DRAM read bursts serviced by the write queue"); 1936 1937 mergedWrBursts 1938 .name(name() + ".mergedWrBursts") 1939 .desc("Number of DRAM write bursts merged with an existing one"); 1940 1941 neitherReadNorWrite 1942 .name(name() + ".neitherReadNorWriteReqs") 1943 .desc("Number of requests that are neither read nor write"); 1944 1945 perBankRdBursts 1946 .init(banksPerRank * ranksPerChannel) 1947 .name(name() + ".perBankRdBursts") 1948 .desc("Per bank write bursts"); 1949 1950 perBankWrBursts 1951 .init(banksPerRank * ranksPerChannel) 1952 .name(name() + ".perBankWrBursts") 1953 .desc("Per bank write bursts"); 1954 1955 avgRdQLen 1956 .name(name() + ".avgRdQLen") 1957 .desc("Average read queue length when enqueuing") 1958 .precision(2); 1959 1960 avgWrQLen 1961 .name(name() + ".avgWrQLen") 1962 .desc("Average write queue length when enqueuing") 1963 .precision(2); 1964 1965 totQLat 1966 .name(name() + ".totQLat") 1967 .desc("Total ticks spent queuing"); 1968 1969 totBusLat 1970 .name(name() + ".totBusLat") 1971 .desc("Total ticks spent in databus transfers"); 1972 1973 totMemAccLat 1974 .name(name() + ".totMemAccLat") 1975 .desc("Total ticks spent from burst creation until serviced " 1976 "by the DRAM"); 1977 1978 avgQLat 1979 .name(name() + ".avgQLat") 1980 .desc("Average queueing delay per DRAM burst") 1981 .precision(2); 1982 1983 avgQLat = totQLat / (readBursts - servicedByWrQ); 1984 1985 avgBusLat 1986 .name(name() + ".avgBusLat") 1987 .desc("Average bus latency per DRAM burst") 1988 .precision(2); 1989 1990 avgBusLat = totBusLat / (readBursts - servicedByWrQ); 1991 1992 avgMemAccLat 1993 .name(name() + ".avgMemAccLat") 1994 .desc("Average memory access latency per DRAM burst") 1995 .precision(2); 1996 1997 avgMemAccLat = totMemAccLat / (readBursts - servicedByWrQ); 1998 1999 numRdRetry 2000 .name(name() + ".numRdRetry") 2001 .desc("Number of times read queue was full causing retry"); 2002 2003 numWrRetry 2004 .name(name() + ".numWrRetry") 2005 .desc("Number of times write queue was full causing retry"); 2006 2007 readRowHits 2008 .name(name() + ".readRowHits") 2009 .desc("Number of row buffer hits during reads"); 2010 2011 writeRowHits 2012 .name(name() + ".writeRowHits") 2013 .desc("Number of row buffer hits during writes"); 2014 2015 readRowHitRate 2016 .name(name() + ".readRowHitRate") 2017 .desc("Row buffer hit rate for reads") 2018 .precision(2); 2019 2020 readRowHitRate = (readRowHits / (readBursts - servicedByWrQ)) * 100; 2021 2022 writeRowHitRate 2023 .name(name() + ".writeRowHitRate") 2024 .desc("Row buffer hit rate for writes") 2025 .precision(2); 2026 2027 writeRowHitRate = (writeRowHits / (writeBursts - mergedWrBursts)) * 100; 2028 2029 readPktSize 2030 .init(ceilLog2(burstSize) + 1) 2031 .name(name() + ".readPktSize") 2032 .desc("Read request sizes (log2)"); 2033 2034 writePktSize 2035 .init(ceilLog2(burstSize) + 1) 2036 .name(name() + ".writePktSize") 2037 .desc("Write request sizes (log2)"); 2038 2039 rdQLenPdf 2040 .init(readBufferSize) 2041 .name(name() + ".rdQLenPdf") 2042 .desc("What read queue length does an incoming req see"); 2043 2044 wrQLenPdf 2045 .init(writeBufferSize) 2046 .name(name() + ".wrQLenPdf") 2047 .desc("What write queue length does an incoming req see"); 2048 2049 bytesPerActivate 2050 .init(maxAccessesPerRow) 2051 .name(name() + ".bytesPerActivate") 2052 .desc("Bytes accessed per row activation") 2053 .flags(nozero); 2054 2055 rdPerTurnAround 2056 .init(readBufferSize) 2057 .name(name() + ".rdPerTurnAround") 2058 .desc("Reads before turning the bus around for writes") 2059 .flags(nozero); 2060 2061 wrPerTurnAround 2062 .init(writeBufferSize) 2063 .name(name() + ".wrPerTurnAround") 2064 .desc("Writes before turning the bus around for reads") 2065 .flags(nozero); 2066 2067 bytesReadDRAM 2068 .name(name() + ".bytesReadDRAM") 2069 .desc("Total number of bytes read from DRAM"); 2070 2071 bytesReadWrQ 2072 .name(name() + ".bytesReadWrQ") 2073 .desc("Total number of bytes read from write queue"); 2074 2075 bytesWritten 2076 .name(name() + ".bytesWritten") 2077 .desc("Total number of bytes written to DRAM"); 2078 2079 bytesReadSys 2080 .name(name() + ".bytesReadSys") 2081 .desc("Total read bytes from the system interface side"); 2082 2083 bytesWrittenSys 2084 .name(name() + ".bytesWrittenSys") 2085 .desc("Total written bytes from the system interface side"); 2086 2087 avgRdBW 2088 .name(name() + ".avgRdBW") 2089 .desc("Average DRAM read bandwidth in MiByte/s") 2090 .precision(2); 2091 2092 avgRdBW = (bytesReadDRAM / 1000000) / simSeconds; 2093 2094 avgWrBW 2095 .name(name() + ".avgWrBW") 2096 .desc("Average achieved write bandwidth in MiByte/s") 2097 .precision(2); 2098 2099 avgWrBW = (bytesWritten / 1000000) / simSeconds; 2100 2101 avgRdBWSys 2102 .name(name() + ".avgRdBWSys") 2103 .desc("Average system read bandwidth in MiByte/s") 2104 .precision(2); 2105 2106 avgRdBWSys = (bytesReadSys / 1000000) / simSeconds; 2107 2108 avgWrBWSys 2109 .name(name() + ".avgWrBWSys") 2110 .desc("Average system write bandwidth in MiByte/s") 2111 .precision(2); 2112 2113 avgWrBWSys = (bytesWrittenSys / 1000000) / simSeconds; 2114 2115 peakBW 2116 .name(name() + ".peakBW") 2117 .desc("Theoretical peak bandwidth in MiByte/s") 2118 .precision(2); 2119 2120 peakBW = (SimClock::Frequency / tBURST) * burstSize / 1000000; 2121 2122 busUtil 2123 .name(name() + ".busUtil") 2124 .desc("Data bus utilization in percentage") 2125 .precision(2); 2126 busUtil = (avgRdBW + avgWrBW) / peakBW * 100; 2127 2128 totGap 2129 .name(name() + ".totGap") 2130 .desc("Total gap between requests"); 2131 2132 avgGap 2133 .name(name() + ".avgGap") 2134 .desc("Average gap between requests") 2135 .precision(2); 2136 2137 avgGap = totGap / (readReqs + writeReqs); 2138 2139 // Stats for DRAM Power calculation based on Micron datasheet 2140 busUtilRead 2141 .name(name() + ".busUtilRead") 2142 .desc("Data bus utilization in percentage for reads") 2143 .precision(2); 2144 2145 busUtilRead = avgRdBW / peakBW * 100; 2146 2147 busUtilWrite 2148 .name(name() + ".busUtilWrite") 2149 .desc("Data bus utilization in percentage for writes") 2150 .precision(2); 2151 2152 busUtilWrite = avgWrBW / peakBW * 100; 2153 2154 pageHitRate 2155 .name(name() + ".pageHitRate") 2156 .desc("Row buffer hit rate, read and write combined") 2157 .precision(2); 2158 2159 pageHitRate = (writeRowHits + readRowHits) / 2160 (writeBursts - mergedWrBursts + readBursts - servicedByWrQ) * 100; 2161} 2162 2163void 2164DRAMCtrl::recvFunctional(PacketPtr pkt) 2165{ 2166 // rely on the abstract memory 2167 functionalAccess(pkt); 2168} 2169 2170BaseSlavePort& 2171DRAMCtrl::getSlavePort(const string &if_name, PortID idx) 2172{ 2173 if (if_name != "port") { 2174 return MemObject::getSlavePort(if_name, idx); 2175 } else { 2176 return port; 2177 } 2178} 2179 2180DrainState 2181DRAMCtrl::drain() 2182{ 2183 // if there is anything in any of our internal queues, keep track 2184 // of that as well 2185 if (!(writeQueue.empty() && readQueue.empty() && respQueue.empty())) { 2186 DPRINTF(Drain, "DRAM controller not drained, write: %d, read: %d," 2187 " resp: %d\n", writeQueue.size(), readQueue.size(), 2188 respQueue.size()); 2189 2190 // the only part that is not drained automatically over time 2191 // is the write queue, thus kick things into action if needed 2192 if (!writeQueue.empty() && !nextReqEvent.scheduled()) { 2193 schedule(nextReqEvent, curTick()); 2194 } 2195 return DrainState::Draining; 2196 } else { 2197 return DrainState::Drained; 2198 } 2199} 2200 2201void 2202DRAMCtrl::drainResume() 2203{ 2204 if (!isTimingMode && system()->isTimingMode()) { 2205 // if we switched to timing mode, kick things into action, 2206 // and behave as if we restored from a checkpoint 2207 startup(); 2208 } else if (isTimingMode && !system()->isTimingMode()) { 2209 // if we switch from timing mode, stop the refresh events to 2210 // not cause issues with KVM 2211 for (auto r : ranks) { 2212 r->suspend(); 2213 } 2214 } 2215 2216 // update the mode 2217 isTimingMode = system()->isTimingMode(); 2218} 2219 2220DRAMCtrl::MemoryPort::MemoryPort(const std::string& name, DRAMCtrl& _memory) 2221 : QueuedSlavePort(name, &_memory, queue), queue(_memory, *this), 2222 memory(_memory) 2223{ } 2224 2225AddrRangeList 2226DRAMCtrl::MemoryPort::getAddrRanges() const 2227{ 2228 AddrRangeList ranges; 2229 ranges.push_back(memory.getAddrRange()); 2230 return ranges; 2231} 2232 2233void 2234DRAMCtrl::MemoryPort::recvFunctional(PacketPtr pkt) 2235{ 2236 pkt->pushLabel(memory.name()); 2237 2238 if (!queue.checkFunctional(pkt)) { 2239 // Default implementation of SimpleTimingPort::recvFunctional() 2240 // calls recvAtomic() and throws away the latency; we can save a 2241 // little here by just not calculating the latency. 2242 memory.recvFunctional(pkt); 2243 } 2244 2245 pkt->popLabel(); 2246} 2247 2248Tick 2249DRAMCtrl::MemoryPort::recvAtomic(PacketPtr pkt) 2250{ 2251 return memory.recvAtomic(pkt); 2252} 2253 2254bool 2255DRAMCtrl::MemoryPort::recvTimingReq(PacketPtr pkt) 2256{ 2257 // pass it to the memory controller 2258 return memory.recvTimingReq(pkt); 2259} 2260 2261DRAMCtrl* 2262DRAMCtrlParams::create() 2263{ 2264 return new DRAMCtrl(this); 2265} 2266