dram_ctrl.cc revision 10913
1/* 2 * Copyright (c) 2010-2015 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2013 Amin Farmahini-Farahani 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Andreas Hansson 41 * Ani Udipi 42 * Neha Agarwal 43 * Omar Naji 44 */ 45 46#include "base/bitfield.hh" 47#include "base/trace.hh" 48#include "debug/DRAM.hh" 49#include "debug/DRAMPower.hh" 50#include "debug/DRAMState.hh" 51#include "debug/Drain.hh" 52#include "mem/dram_ctrl.hh" 53#include "sim/system.hh" 54 55using namespace std; 56using namespace Data; 57 58DRAMCtrl::DRAMCtrl(const DRAMCtrlParams* p) : 59 AbstractMemory(p), 60 port(name() + ".port", *this), isTimingMode(false), 61 retryRdReq(false), retryWrReq(false), 62 busState(READ), 63 nextReqEvent(this), respondEvent(this), 64 deviceSize(p->device_size), 65 deviceBusWidth(p->device_bus_width), burstLength(p->burst_length), 66 deviceRowBufferSize(p->device_rowbuffer_size), 67 devicesPerRank(p->devices_per_rank), 68 burstSize((devicesPerRank * burstLength * deviceBusWidth) / 8), 69 rowBufferSize(devicesPerRank * deviceRowBufferSize), 70 columnsPerRowBuffer(rowBufferSize / burstSize), 71 columnsPerStripe(range.interleaved() ? range.granularity() / burstSize : 1), 72 ranksPerChannel(p->ranks_per_channel), 73 bankGroupsPerRank(p->bank_groups_per_rank), 74 bankGroupArch(p->bank_groups_per_rank > 0), 75 banksPerRank(p->banks_per_rank), channels(p->channels), rowsPerBank(0), 76 readBufferSize(p->read_buffer_size), 77 writeBufferSize(p->write_buffer_size), 78 writeHighThreshold(writeBufferSize * p->write_high_thresh_perc / 100.0), 79 writeLowThreshold(writeBufferSize * p->write_low_thresh_perc / 100.0), 80 minWritesPerSwitch(p->min_writes_per_switch), 81 writesThisTime(0), readsThisTime(0), 82 tCK(p->tCK), tWTR(p->tWTR), tRTW(p->tRTW), tCS(p->tCS), tBURST(p->tBURST), 83 tCCD_L(p->tCCD_L), tRCD(p->tRCD), tCL(p->tCL), tRP(p->tRP), tRAS(p->tRAS), 84 tWR(p->tWR), tRTP(p->tRTP), tRFC(p->tRFC), tREFI(p->tREFI), tRRD(p->tRRD), 85 tRRD_L(p->tRRD_L), tXAW(p->tXAW), activationLimit(p->activation_limit), 86 memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping), 87 pageMgmt(p->page_policy), 88 maxAccessesPerRow(p->max_accesses_per_row), 89 frontendLatency(p->static_frontend_latency), 90 backendLatency(p->static_backend_latency), 91 busBusyUntil(0), prevArrival(0), 92 nextReqTime(0), activeRank(0), timeStampOffset(0) 93{ 94 // sanity check the ranks since we rely on bit slicing for the 95 // address decoding 96 fatal_if(!isPowerOf2(ranksPerChannel), "DRAM rank count of %d is not " 97 "allowed, must be a power of two\n", ranksPerChannel); 98 99 fatal_if(!isPowerOf2(burstSize), "DRAM burst size %d is not allowed, " 100 "must be a power of two\n", burstSize); 101 102 for (int i = 0; i < ranksPerChannel; i++) { 103 Rank* rank = new Rank(*this, p); 104 ranks.push_back(rank); 105 106 rank->actTicks.resize(activationLimit, 0); 107 rank->banks.resize(banksPerRank); 108 rank->rank = i; 109 110 for (int b = 0; b < banksPerRank; b++) { 111 rank->banks[b].bank = b; 112 // GDDR addressing of banks to BG is linear. 113 // Here we assume that all DRAM generations address bank groups as 114 // follows: 115 if (bankGroupArch) { 116 // Simply assign lower bits to bank group in order to 117 // rotate across bank groups as banks are incremented 118 // e.g. with 4 banks per bank group and 16 banks total: 119 // banks 0,4,8,12 are in bank group 0 120 // banks 1,5,9,13 are in bank group 1 121 // banks 2,6,10,14 are in bank group 2 122 // banks 3,7,11,15 are in bank group 3 123 rank->banks[b].bankgr = b % bankGroupsPerRank; 124 } else { 125 // No bank groups; simply assign to bank number 126 rank->banks[b].bankgr = b; 127 } 128 } 129 } 130 131 // perform a basic check of the write thresholds 132 if (p->write_low_thresh_perc >= p->write_high_thresh_perc) 133 fatal("Write buffer low threshold %d must be smaller than the " 134 "high threshold %d\n", p->write_low_thresh_perc, 135 p->write_high_thresh_perc); 136 137 // determine the rows per bank by looking at the total capacity 138 uint64_t capacity = ULL(1) << ceilLog2(AbstractMemory::size()); 139 140 // determine the dram actual capacity from the DRAM config in Mbytes 141 uint64_t deviceCapacity = deviceSize / (1024 * 1024) * devicesPerRank * 142 ranksPerChannel; 143 144 // if actual DRAM size does not match memory capacity in system warn! 145 if (deviceCapacity != capacity / (1024 * 1024)) 146 warn("DRAM device capacity (%d Mbytes) does not match the " 147 "address range assigned (%d Mbytes)\n", deviceCapacity, 148 capacity / (1024 * 1024)); 149 150 DPRINTF(DRAM, "Memory capacity %lld (%lld) bytes\n", capacity, 151 AbstractMemory::size()); 152 153 DPRINTF(DRAM, "Row buffer size %d bytes with %d columns per row buffer\n", 154 rowBufferSize, columnsPerRowBuffer); 155 156 rowsPerBank = capacity / (rowBufferSize * banksPerRank * ranksPerChannel); 157 158 // some basic sanity checks 159 if (tREFI <= tRP || tREFI <= tRFC) { 160 fatal("tREFI (%d) must be larger than tRP (%d) and tRFC (%d)\n", 161 tREFI, tRP, tRFC); 162 } 163 164 // basic bank group architecture checks -> 165 if (bankGroupArch) { 166 // must have at least one bank per bank group 167 if (bankGroupsPerRank > banksPerRank) { 168 fatal("banks per rank (%d) must be equal to or larger than " 169 "banks groups per rank (%d)\n", 170 banksPerRank, bankGroupsPerRank); 171 } 172 // must have same number of banks in each bank group 173 if ((banksPerRank % bankGroupsPerRank) != 0) { 174 fatal("Banks per rank (%d) must be evenly divisible by bank groups " 175 "per rank (%d) for equal banks per bank group\n", 176 banksPerRank, bankGroupsPerRank); 177 } 178 // tCCD_L should be greater than minimal, back-to-back burst delay 179 if (tCCD_L <= tBURST) { 180 fatal("tCCD_L (%d) should be larger than tBURST (%d) when " 181 "bank groups per rank (%d) is greater than 1\n", 182 tCCD_L, tBURST, bankGroupsPerRank); 183 } 184 // tRRD_L is greater than minimal, same bank group ACT-to-ACT delay 185 // some datasheets might specify it equal to tRRD 186 if (tRRD_L < tRRD) { 187 fatal("tRRD_L (%d) should be larger than tRRD (%d) when " 188 "bank groups per rank (%d) is greater than 1\n", 189 tRRD_L, tRRD, bankGroupsPerRank); 190 } 191 } 192 193} 194 195void 196DRAMCtrl::init() 197{ 198 AbstractMemory::init(); 199 200 if (!port.isConnected()) { 201 fatal("DRAMCtrl %s is unconnected!\n", name()); 202 } else { 203 port.sendRangeChange(); 204 } 205 206 // a bit of sanity checks on the interleaving, save it for here to 207 // ensure that the system pointer is initialised 208 if (range.interleaved()) { 209 if (channels != range.stripes()) 210 fatal("%s has %d interleaved address stripes but %d channel(s)\n", 211 name(), range.stripes(), channels); 212 213 if (addrMapping == Enums::RoRaBaChCo) { 214 if (rowBufferSize != range.granularity()) { 215 fatal("Channel interleaving of %s doesn't match RoRaBaChCo " 216 "address map\n", name()); 217 } 218 } else if (addrMapping == Enums::RoRaBaCoCh || 219 addrMapping == Enums::RoCoRaBaCh) { 220 // for the interleavings with channel bits in the bottom, 221 // if the system uses a channel striping granularity that 222 // is larger than the DRAM burst size, then map the 223 // sequential accesses within a stripe to a number of 224 // columns in the DRAM, effectively placing some of the 225 // lower-order column bits as the least-significant bits 226 // of the address (above the ones denoting the burst size) 227 assert(columnsPerStripe >= 1); 228 229 // channel striping has to be done at a granularity that 230 // is equal or larger to a cache line 231 if (system()->cacheLineSize() > range.granularity()) { 232 fatal("Channel interleaving of %s must be at least as large " 233 "as the cache line size\n", name()); 234 } 235 236 // ...and equal or smaller than the row-buffer size 237 if (rowBufferSize < range.granularity()) { 238 fatal("Channel interleaving of %s must be at most as large " 239 "as the row-buffer size\n", name()); 240 } 241 // this is essentially the check above, so just to be sure 242 assert(columnsPerStripe <= columnsPerRowBuffer); 243 } 244 } 245} 246 247void 248DRAMCtrl::startup() 249{ 250 // remember the memory system mode of operation 251 isTimingMode = system()->isTimingMode(); 252 253 if (isTimingMode) { 254 // timestamp offset should be in clock cycles for DRAMPower 255 timeStampOffset = divCeil(curTick(), tCK); 256 257 // update the start tick for the precharge accounting to the 258 // current tick 259 for (auto r : ranks) { 260 r->startup(curTick() + tREFI - tRP); 261 } 262 263 // shift the bus busy time sufficiently far ahead that we never 264 // have to worry about negative values when computing the time for 265 // the next request, this will add an insignificant bubble at the 266 // start of simulation 267 busBusyUntil = curTick() + tRP + tRCD + tCL; 268 } 269} 270 271Tick 272DRAMCtrl::recvAtomic(PacketPtr pkt) 273{ 274 DPRINTF(DRAM, "recvAtomic: %s 0x%x\n", pkt->cmdString(), pkt->getAddr()); 275 276 // do the actual memory access and turn the packet into a response 277 access(pkt); 278 279 Tick latency = 0; 280 if (!pkt->memInhibitAsserted() && pkt->hasData()) { 281 // this value is not supposed to be accurate, just enough to 282 // keep things going, mimic a closed page 283 latency = tRP + tRCD + tCL; 284 } 285 return latency; 286} 287 288bool 289DRAMCtrl::readQueueFull(unsigned int neededEntries) const 290{ 291 DPRINTF(DRAM, "Read queue limit %d, current size %d, entries needed %d\n", 292 readBufferSize, readQueue.size() + respQueue.size(), 293 neededEntries); 294 295 return 296 (readQueue.size() + respQueue.size() + neededEntries) > readBufferSize; 297} 298 299bool 300DRAMCtrl::writeQueueFull(unsigned int neededEntries) const 301{ 302 DPRINTF(DRAM, "Write queue limit %d, current size %d, entries needed %d\n", 303 writeBufferSize, writeQueue.size(), neededEntries); 304 return (writeQueue.size() + neededEntries) > writeBufferSize; 305} 306 307DRAMCtrl::DRAMPacket* 308DRAMCtrl::decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned size, 309 bool isRead) 310{ 311 // decode the address based on the address mapping scheme, with 312 // Ro, Ra, Co, Ba and Ch denoting row, rank, column, bank and 313 // channel, respectively 314 uint8_t rank; 315 uint8_t bank; 316 // use a 64-bit unsigned during the computations as the row is 317 // always the top bits, and check before creating the DRAMPacket 318 uint64_t row; 319 320 // truncate the address to a DRAM burst, which makes it unique to 321 // a specific column, row, bank, rank and channel 322 Addr addr = dramPktAddr / burstSize; 323 324 // we have removed the lowest order address bits that denote the 325 // position within the column 326 if (addrMapping == Enums::RoRaBaChCo) { 327 // the lowest order bits denote the column to ensure that 328 // sequential cache lines occupy the same row 329 addr = addr / columnsPerRowBuffer; 330 331 // take out the channel part of the address 332 addr = addr / channels; 333 334 // after the channel bits, get the bank bits to interleave 335 // over the banks 336 bank = addr % banksPerRank; 337 addr = addr / banksPerRank; 338 339 // after the bank, we get the rank bits which thus interleaves 340 // over the ranks 341 rank = addr % ranksPerChannel; 342 addr = addr / ranksPerChannel; 343 344 // lastly, get the row bits 345 row = addr % rowsPerBank; 346 addr = addr / rowsPerBank; 347 } else if (addrMapping == Enums::RoRaBaCoCh) { 348 // take out the lower-order column bits 349 addr = addr / columnsPerStripe; 350 351 // take out the channel part of the address 352 addr = addr / channels; 353 354 // next, the higher-order column bites 355 addr = addr / (columnsPerRowBuffer / columnsPerStripe); 356 357 // after the column bits, we get the bank bits to interleave 358 // over the banks 359 bank = addr % banksPerRank; 360 addr = addr / banksPerRank; 361 362 // after the bank, we get the rank bits which thus interleaves 363 // over the ranks 364 rank = addr % ranksPerChannel; 365 addr = addr / ranksPerChannel; 366 367 // lastly, get the row bits 368 row = addr % rowsPerBank; 369 addr = addr / rowsPerBank; 370 } else if (addrMapping == Enums::RoCoRaBaCh) { 371 // optimise for closed page mode and utilise maximum 372 // parallelism of the DRAM (at the cost of power) 373 374 // take out the lower-order column bits 375 addr = addr / columnsPerStripe; 376 377 // take out the channel part of the address, not that this has 378 // to match with how accesses are interleaved between the 379 // controllers in the address mapping 380 addr = addr / channels; 381 382 // start with the bank bits, as this provides the maximum 383 // opportunity for parallelism between requests 384 bank = addr % banksPerRank; 385 addr = addr / banksPerRank; 386 387 // next get the rank bits 388 rank = addr % ranksPerChannel; 389 addr = addr / ranksPerChannel; 390 391 // next, the higher-order column bites 392 addr = addr / (columnsPerRowBuffer / columnsPerStripe); 393 394 // lastly, get the row bits 395 row = addr % rowsPerBank; 396 addr = addr / rowsPerBank; 397 } else 398 panic("Unknown address mapping policy chosen!"); 399 400 assert(rank < ranksPerChannel); 401 assert(bank < banksPerRank); 402 assert(row < rowsPerBank); 403 assert(row < Bank::NO_ROW); 404 405 DPRINTF(DRAM, "Address: %lld Rank %d Bank %d Row %d\n", 406 dramPktAddr, rank, bank, row); 407 408 // create the corresponding DRAM packet with the entry time and 409 // ready time set to the current tick, the latter will be updated 410 // later 411 uint16_t bank_id = banksPerRank * rank + bank; 412 return new DRAMPacket(pkt, isRead, rank, bank, row, bank_id, dramPktAddr, 413 size, ranks[rank]->banks[bank], *ranks[rank]); 414} 415 416void 417DRAMCtrl::addToReadQueue(PacketPtr pkt, unsigned int pktCount) 418{ 419 // only add to the read queue here. whenever the request is 420 // eventually done, set the readyTime, and call schedule() 421 assert(!pkt->isWrite()); 422 423 assert(pktCount != 0); 424 425 // if the request size is larger than burst size, the pkt is split into 426 // multiple DRAM packets 427 // Note if the pkt starting address is not aligened to burst size, the 428 // address of first DRAM packet is kept unaliged. Subsequent DRAM packets 429 // are aligned to burst size boundaries. This is to ensure we accurately 430 // check read packets against packets in write queue. 431 Addr addr = pkt->getAddr(); 432 unsigned pktsServicedByWrQ = 0; 433 BurstHelper* burst_helper = NULL; 434 for (int cnt = 0; cnt < pktCount; ++cnt) { 435 unsigned size = std::min((addr | (burstSize - 1)) + 1, 436 pkt->getAddr() + pkt->getSize()) - addr; 437 readPktSize[ceilLog2(size)]++; 438 readBursts++; 439 440 // First check write buffer to see if the data is already at 441 // the controller 442 bool foundInWrQ = false; 443 Addr burst_addr = burstAlign(addr); 444 // if the burst address is not present then there is no need 445 // looking any further 446 if (isInWriteQueue.find(burst_addr) != isInWriteQueue.end()) { 447 for (const auto& p : writeQueue) { 448 // check if the read is subsumed in the write queue 449 // packet we are looking at 450 if (p->addr <= addr && (addr + size) <= (p->addr + p->size)) { 451 foundInWrQ = true; 452 servicedByWrQ++; 453 pktsServicedByWrQ++; 454 DPRINTF(DRAM, "Read to addr %lld with size %d serviced by " 455 "write queue\n", addr, size); 456 bytesReadWrQ += burstSize; 457 break; 458 } 459 } 460 } 461 462 // If not found in the write q, make a DRAM packet and 463 // push it onto the read queue 464 if (!foundInWrQ) { 465 466 // Make the burst helper for split packets 467 if (pktCount > 1 && burst_helper == NULL) { 468 DPRINTF(DRAM, "Read to addr %lld translates to %d " 469 "dram requests\n", pkt->getAddr(), pktCount); 470 burst_helper = new BurstHelper(pktCount); 471 } 472 473 DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, true); 474 dram_pkt->burstHelper = burst_helper; 475 476 assert(!readQueueFull(1)); 477 rdQLenPdf[readQueue.size() + respQueue.size()]++; 478 479 DPRINTF(DRAM, "Adding to read queue\n"); 480 481 readQueue.push_back(dram_pkt); 482 483 // Update stats 484 avgRdQLen = readQueue.size() + respQueue.size(); 485 } 486 487 // Starting address of next dram pkt (aligend to burstSize boundary) 488 addr = (addr | (burstSize - 1)) + 1; 489 } 490 491 // If all packets are serviced by write queue, we send the repsonse back 492 if (pktsServicedByWrQ == pktCount) { 493 accessAndRespond(pkt, frontendLatency); 494 return; 495 } 496 497 // Update how many split packets are serviced by write queue 498 if (burst_helper != NULL) 499 burst_helper->burstsServiced = pktsServicedByWrQ; 500 501 // If we are not already scheduled to get a request out of the 502 // queue, do so now 503 if (!nextReqEvent.scheduled()) { 504 DPRINTF(DRAM, "Request scheduled immediately\n"); 505 schedule(nextReqEvent, curTick()); 506 } 507} 508 509void 510DRAMCtrl::addToWriteQueue(PacketPtr pkt, unsigned int pktCount) 511{ 512 // only add to the write queue here. whenever the request is 513 // eventually done, set the readyTime, and call schedule() 514 assert(pkt->isWrite()); 515 516 // if the request size is larger than burst size, the pkt is split into 517 // multiple DRAM packets 518 Addr addr = pkt->getAddr(); 519 for (int cnt = 0; cnt < pktCount; ++cnt) { 520 unsigned size = std::min((addr | (burstSize - 1)) + 1, 521 pkt->getAddr() + pkt->getSize()) - addr; 522 writePktSize[ceilLog2(size)]++; 523 writeBursts++; 524 525 // see if we can merge with an existing item in the write 526 // queue and keep track of whether we have merged or not 527 bool merged = isInWriteQueue.find(burstAlign(addr)) != 528 isInWriteQueue.end(); 529 530 // if the item was not merged we need to create a new write 531 // and enqueue it 532 if (!merged) { 533 DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, false); 534 535 assert(writeQueue.size() < writeBufferSize); 536 wrQLenPdf[writeQueue.size()]++; 537 538 DPRINTF(DRAM, "Adding to write queue\n"); 539 540 writeQueue.push_back(dram_pkt); 541 isInWriteQueue.insert(burstAlign(addr)); 542 assert(writeQueue.size() == isInWriteQueue.size()); 543 544 // Update stats 545 avgWrQLen = writeQueue.size(); 546 } else { 547 DPRINTF(DRAM, "Merging write burst with existing queue entry\n"); 548 549 // keep track of the fact that this burst effectively 550 // disappeared as it was merged with an existing one 551 mergedWrBursts++; 552 } 553 554 // Starting address of next dram pkt (aligend to burstSize boundary) 555 addr = (addr | (burstSize - 1)) + 1; 556 } 557 558 // we do not wait for the writes to be send to the actual memory, 559 // but instead take responsibility for the consistency here and 560 // snoop the write queue for any upcoming reads 561 // @todo, if a pkt size is larger than burst size, we might need a 562 // different front end latency 563 accessAndRespond(pkt, frontendLatency); 564 565 // If we are not already scheduled to get a request out of the 566 // queue, do so now 567 if (!nextReqEvent.scheduled()) { 568 DPRINTF(DRAM, "Request scheduled immediately\n"); 569 schedule(nextReqEvent, curTick()); 570 } 571} 572 573void 574DRAMCtrl::printQs() const { 575 DPRINTF(DRAM, "===READ QUEUE===\n\n"); 576 for (auto i = readQueue.begin() ; i != readQueue.end() ; ++i) { 577 DPRINTF(DRAM, "Read %lu\n", (*i)->addr); 578 } 579 DPRINTF(DRAM, "\n===RESP QUEUE===\n\n"); 580 for (auto i = respQueue.begin() ; i != respQueue.end() ; ++i) { 581 DPRINTF(DRAM, "Response %lu\n", (*i)->addr); 582 } 583 DPRINTF(DRAM, "\n===WRITE QUEUE===\n\n"); 584 for (auto i = writeQueue.begin() ; i != writeQueue.end() ; ++i) { 585 DPRINTF(DRAM, "Write %lu\n", (*i)->addr); 586 } 587} 588 589bool 590DRAMCtrl::recvTimingReq(PacketPtr pkt) 591{ 592 /// @todo temporary hack to deal with memory corruption issues until 593 /// 4-phase transactions are complete 594 for (int x = 0; x < pendingDelete.size(); x++) 595 delete pendingDelete[x]; 596 pendingDelete.clear(); 597 598 // This is where we enter from the outside world 599 DPRINTF(DRAM, "recvTimingReq: request %s addr %lld size %d\n", 600 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 601 602 // simply drop inhibited packets and clean evictions 603 if (pkt->memInhibitAsserted() || 604 pkt->cmd == MemCmd::CleanEvict) { 605 DPRINTF(DRAM, "Inhibited packet or clean evict -- Dropping it now\n"); 606 pendingDelete.push_back(pkt); 607 return true; 608 } 609 610 // Calc avg gap between requests 611 if (prevArrival != 0) { 612 totGap += curTick() - prevArrival; 613 } 614 prevArrival = curTick(); 615 616 617 // Find out how many dram packets a pkt translates to 618 // If the burst size is equal or larger than the pkt size, then a pkt 619 // translates to only one dram packet. Otherwise, a pkt translates to 620 // multiple dram packets 621 unsigned size = pkt->getSize(); 622 unsigned offset = pkt->getAddr() & (burstSize - 1); 623 unsigned int dram_pkt_count = divCeil(offset + size, burstSize); 624 625 // check local buffers and do not accept if full 626 if (pkt->isRead()) { 627 assert(size != 0); 628 if (readQueueFull(dram_pkt_count)) { 629 DPRINTF(DRAM, "Read queue full, not accepting\n"); 630 // remember that we have to retry this port 631 retryRdReq = true; 632 numRdRetry++; 633 return false; 634 } else { 635 addToReadQueue(pkt, dram_pkt_count); 636 readReqs++; 637 bytesReadSys += size; 638 } 639 } else if (pkt->isWrite()) { 640 assert(size != 0); 641 if (writeQueueFull(dram_pkt_count)) { 642 DPRINTF(DRAM, "Write queue full, not accepting\n"); 643 // remember that we have to retry this port 644 retryWrReq = true; 645 numWrRetry++; 646 return false; 647 } else { 648 addToWriteQueue(pkt, dram_pkt_count); 649 writeReqs++; 650 bytesWrittenSys += size; 651 } 652 } else { 653 DPRINTF(DRAM,"Neither read nor write, ignore timing\n"); 654 neitherReadNorWrite++; 655 accessAndRespond(pkt, 1); 656 } 657 658 return true; 659} 660 661void 662DRAMCtrl::processRespondEvent() 663{ 664 DPRINTF(DRAM, 665 "processRespondEvent(): Some req has reached its readyTime\n"); 666 667 DRAMPacket* dram_pkt = respQueue.front(); 668 669 if (dram_pkt->burstHelper) { 670 // it is a split packet 671 dram_pkt->burstHelper->burstsServiced++; 672 if (dram_pkt->burstHelper->burstsServiced == 673 dram_pkt->burstHelper->burstCount) { 674 // we have now serviced all children packets of a system packet 675 // so we can now respond to the requester 676 // @todo we probably want to have a different front end and back 677 // end latency for split packets 678 accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency); 679 delete dram_pkt->burstHelper; 680 dram_pkt->burstHelper = NULL; 681 } 682 } else { 683 // it is not a split packet 684 accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency); 685 } 686 687 delete respQueue.front(); 688 respQueue.pop_front(); 689 690 if (!respQueue.empty()) { 691 assert(respQueue.front()->readyTime >= curTick()); 692 assert(!respondEvent.scheduled()); 693 schedule(respondEvent, respQueue.front()->readyTime); 694 } else { 695 // if there is nothing left in any queue, signal a drain 696 if (drainState() == DrainState::Draining && 697 writeQueue.empty() && readQueue.empty()) { 698 699 DPRINTF(Drain, "DRAM controller done draining\n"); 700 signalDrainDone(); 701 } 702 } 703 704 // We have made a location in the queue available at this point, 705 // so if there is a read that was forced to wait, retry now 706 if (retryRdReq) { 707 retryRdReq = false; 708 port.sendRetryReq(); 709 } 710} 711 712bool 713DRAMCtrl::chooseNext(std::deque<DRAMPacket*>& queue, Tick extra_col_delay) 714{ 715 // This method does the arbitration between requests. The chosen 716 // packet is simply moved to the head of the queue. The other 717 // methods know that this is the place to look. For example, with 718 // FCFS, this method does nothing 719 assert(!queue.empty()); 720 721 // bool to indicate if a packet to an available rank is found 722 bool found_packet = false; 723 if (queue.size() == 1) { 724 DRAMPacket* dram_pkt = queue.front(); 725 // available rank corresponds to state refresh idle 726 if (ranks[dram_pkt->rank]->isAvailable()) { 727 found_packet = true; 728 DPRINTF(DRAM, "Single request, going to a free rank\n"); 729 } else { 730 DPRINTF(DRAM, "Single request, going to a busy rank\n"); 731 } 732 return found_packet; 733 } 734 735 if (memSchedPolicy == Enums::fcfs) { 736 // check if there is a packet going to a free rank 737 for(auto i = queue.begin(); i != queue.end() ; ++i) { 738 DRAMPacket* dram_pkt = *i; 739 if (ranks[dram_pkt->rank]->isAvailable()) { 740 queue.erase(i); 741 queue.push_front(dram_pkt); 742 found_packet = true; 743 break; 744 } 745 } 746 } else if (memSchedPolicy == Enums::frfcfs) { 747 found_packet = reorderQueue(queue, extra_col_delay); 748 } else 749 panic("No scheduling policy chosen\n"); 750 return found_packet; 751} 752 753bool 754DRAMCtrl::reorderQueue(std::deque<DRAMPacket*>& queue, Tick extra_col_delay) 755{ 756 // Only determine this if needed 757 uint64_t earliest_banks = 0; 758 bool hidden_bank_prep = false; 759 760 // search for seamless row hits first, if no seamless row hit is 761 // found then determine if there are other packets that can be issued 762 // without incurring additional bus delay due to bank timing 763 // Will select closed rows first to enable more open row possibilies 764 // in future selections 765 bool found_hidden_bank = false; 766 767 // remember if we found a row hit, not seamless, but bank prepped 768 // and ready 769 bool found_prepped_pkt = false; 770 771 // if we have no row hit, prepped or not, and no seamless packet, 772 // just go for the earliest possible 773 bool found_earliest_pkt = false; 774 775 auto selected_pkt_it = queue.end(); 776 777 // time we need to issue a column command to be seamless 778 const Tick min_col_at = std::max(busBusyUntil - tCL + extra_col_delay, 779 curTick()); 780 781 for (auto i = queue.begin(); i != queue.end() ; ++i) { 782 DRAMPacket* dram_pkt = *i; 783 const Bank& bank = dram_pkt->bankRef; 784 785 // check if rank is available, if not, jump to the next packet 786 if (dram_pkt->rankRef.isAvailable()) { 787 // check if it is a row hit 788 if (bank.openRow == dram_pkt->row) { 789 // no additional rank-to-rank or same bank-group 790 // delays, or we switched read/write and might as well 791 // go for the row hit 792 if (bank.colAllowedAt <= min_col_at) { 793 // FCFS within the hits, giving priority to 794 // commands that can issue seamlessly, without 795 // additional delay, such as same rank accesses 796 // and/or different bank-group accesses 797 DPRINTF(DRAM, "Seamless row buffer hit\n"); 798 selected_pkt_it = i; 799 // no need to look through the remaining queue entries 800 break; 801 } else if (!found_hidden_bank && !found_prepped_pkt) { 802 // if we did not find a packet to a closed row that can 803 // issue the bank commands without incurring delay, and 804 // did not yet find a packet to a prepped row, remember 805 // the current one 806 selected_pkt_it = i; 807 found_prepped_pkt = true; 808 DPRINTF(DRAM, "Prepped row buffer hit\n"); 809 } 810 } else if (!found_earliest_pkt) { 811 // if we have not initialised the bank status, do it 812 // now, and only once per scheduling decisions 813 if (earliest_banks == 0) { 814 // determine entries with earliest bank delay 815 pair<uint64_t, bool> bankStatus = 816 minBankPrep(queue, min_col_at); 817 earliest_banks = bankStatus.first; 818 hidden_bank_prep = bankStatus.second; 819 } 820 821 // bank is amongst first available banks 822 // minBankPrep will give priority to packets that can 823 // issue seamlessly 824 if (bits(earliest_banks, dram_pkt->bankId, dram_pkt->bankId)) { 825 found_earliest_pkt = true; 826 found_hidden_bank = hidden_bank_prep; 827 828 // give priority to packets that can issue 829 // bank commands 'behind the scenes' 830 // any additional delay if any will be due to 831 // col-to-col command requirements 832 if (hidden_bank_prep || !found_prepped_pkt) 833 selected_pkt_it = i; 834 } 835 } 836 } 837 } 838 839 if (selected_pkt_it != queue.end()) { 840 DRAMPacket* selected_pkt = *selected_pkt_it; 841 queue.erase(selected_pkt_it); 842 queue.push_front(selected_pkt); 843 return true; 844 } 845 846 return false; 847} 848 849void 850DRAMCtrl::accessAndRespond(PacketPtr pkt, Tick static_latency) 851{ 852 DPRINTF(DRAM, "Responding to Address %lld.. ",pkt->getAddr()); 853 854 bool needsResponse = pkt->needsResponse(); 855 // do the actual memory access which also turns the packet into a 856 // response 857 access(pkt); 858 859 // turn packet around to go back to requester if response expected 860 if (needsResponse) { 861 // access already turned the packet into a response 862 assert(pkt->isResponse()); 863 // response_time consumes the static latency and is charged also 864 // with headerDelay that takes into account the delay provided by 865 // the xbar and also the payloadDelay that takes into account the 866 // number of data beats. 867 Tick response_time = curTick() + static_latency + pkt->headerDelay + 868 pkt->payloadDelay; 869 // Here we reset the timing of the packet before sending it out. 870 pkt->headerDelay = pkt->payloadDelay = 0; 871 872 // queue the packet in the response queue to be sent out after 873 // the static latency has passed 874 port.schedTimingResp(pkt, response_time); 875 } else { 876 // @todo the packet is going to be deleted, and the DRAMPacket 877 // is still having a pointer to it 878 pendingDelete.push_back(pkt); 879 } 880 881 DPRINTF(DRAM, "Done\n"); 882 883 return; 884} 885 886void 887DRAMCtrl::activateBank(Rank& rank_ref, Bank& bank_ref, 888 Tick act_tick, uint32_t row) 889{ 890 assert(rank_ref.actTicks.size() == activationLimit); 891 892 DPRINTF(DRAM, "Activate at tick %d\n", act_tick); 893 894 // update the open row 895 assert(bank_ref.openRow == Bank::NO_ROW); 896 bank_ref.openRow = row; 897 898 // start counting anew, this covers both the case when we 899 // auto-precharged, and when this access is forced to 900 // precharge 901 bank_ref.bytesAccessed = 0; 902 bank_ref.rowAccesses = 0; 903 904 ++rank_ref.numBanksActive; 905 assert(rank_ref.numBanksActive <= banksPerRank); 906 907 DPRINTF(DRAM, "Activate bank %d, rank %d at tick %lld, now got %d active\n", 908 bank_ref.bank, rank_ref.rank, act_tick, 909 ranks[rank_ref.rank]->numBanksActive); 910 911 rank_ref.power.powerlib.doCommand(MemCommand::ACT, bank_ref.bank, 912 divCeil(act_tick, tCK) - 913 timeStampOffset); 914 915 DPRINTF(DRAMPower, "%llu,ACT,%d,%d\n", divCeil(act_tick, tCK) - 916 timeStampOffset, bank_ref.bank, rank_ref.rank); 917 918 // The next access has to respect tRAS for this bank 919 bank_ref.preAllowedAt = act_tick + tRAS; 920 921 // Respect the row-to-column command delay 922 bank_ref.colAllowedAt = std::max(act_tick + tRCD, bank_ref.colAllowedAt); 923 924 // start by enforcing tRRD 925 for(int i = 0; i < banksPerRank; i++) { 926 // next activate to any bank in this rank must not happen 927 // before tRRD 928 if (bankGroupArch && (bank_ref.bankgr == rank_ref.banks[i].bankgr)) { 929 // bank group architecture requires longer delays between 930 // ACT commands within the same bank group. Use tRRD_L 931 // in this case 932 rank_ref.banks[i].actAllowedAt = std::max(act_tick + tRRD_L, 933 rank_ref.banks[i].actAllowedAt); 934 } else { 935 // use shorter tRRD value when either 936 // 1) bank group architecture is not supportted 937 // 2) bank is in a different bank group 938 rank_ref.banks[i].actAllowedAt = std::max(act_tick + tRRD, 939 rank_ref.banks[i].actAllowedAt); 940 } 941 } 942 943 // next, we deal with tXAW, if the activation limit is disabled 944 // then we directly schedule an activate power event 945 if (!rank_ref.actTicks.empty()) { 946 // sanity check 947 if (rank_ref.actTicks.back() && 948 (act_tick - rank_ref.actTicks.back()) < tXAW) { 949 panic("Got %d activates in window %d (%llu - %llu) which " 950 "is smaller than %llu\n", activationLimit, act_tick - 951 rank_ref.actTicks.back(), act_tick, 952 rank_ref.actTicks.back(), tXAW); 953 } 954 955 // shift the times used for the book keeping, the last element 956 // (highest index) is the oldest one and hence the lowest value 957 rank_ref.actTicks.pop_back(); 958 959 // record an new activation (in the future) 960 rank_ref.actTicks.push_front(act_tick); 961 962 // cannot activate more than X times in time window tXAW, push the 963 // next one (the X + 1'st activate) to be tXAW away from the 964 // oldest in our window of X 965 if (rank_ref.actTicks.back() && 966 (act_tick - rank_ref.actTicks.back()) < tXAW) { 967 DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate " 968 "no earlier than %llu\n", activationLimit, 969 rank_ref.actTicks.back() + tXAW); 970 for(int j = 0; j < banksPerRank; j++) 971 // next activate must not happen before end of window 972 rank_ref.banks[j].actAllowedAt = 973 std::max(rank_ref.actTicks.back() + tXAW, 974 rank_ref.banks[j].actAllowedAt); 975 } 976 } 977 978 // at the point when this activate takes place, make sure we 979 // transition to the active power state 980 if (!rank_ref.activateEvent.scheduled()) 981 schedule(rank_ref.activateEvent, act_tick); 982 else if (rank_ref.activateEvent.when() > act_tick) 983 // move it sooner in time 984 reschedule(rank_ref.activateEvent, act_tick); 985} 986 987void 988DRAMCtrl::prechargeBank(Rank& rank_ref, Bank& bank, Tick pre_at, bool trace) 989{ 990 // make sure the bank has an open row 991 assert(bank.openRow != Bank::NO_ROW); 992 993 // sample the bytes per activate here since we are closing 994 // the page 995 bytesPerActivate.sample(bank.bytesAccessed); 996 997 bank.openRow = Bank::NO_ROW; 998 999 // no precharge allowed before this one 1000 bank.preAllowedAt = pre_at; 1001 1002 Tick pre_done_at = pre_at + tRP; 1003 1004 bank.actAllowedAt = std::max(bank.actAllowedAt, pre_done_at); 1005 1006 assert(rank_ref.numBanksActive != 0); 1007 --rank_ref.numBanksActive; 1008 1009 DPRINTF(DRAM, "Precharging bank %d, rank %d at tick %lld, now got " 1010 "%d active\n", bank.bank, rank_ref.rank, pre_at, 1011 rank_ref.numBanksActive); 1012 1013 if (trace) { 1014 1015 rank_ref.power.powerlib.doCommand(MemCommand::PRE, bank.bank, 1016 divCeil(pre_at, tCK) - 1017 timeStampOffset); 1018 DPRINTF(DRAMPower, "%llu,PRE,%d,%d\n", divCeil(pre_at, tCK) - 1019 timeStampOffset, bank.bank, rank_ref.rank); 1020 } 1021 // if we look at the current number of active banks we might be 1022 // tempted to think the DRAM is now idle, however this can be 1023 // undone by an activate that is scheduled to happen before we 1024 // would have reached the idle state, so schedule an event and 1025 // rather check once we actually make it to the point in time when 1026 // the (last) precharge takes place 1027 if (!rank_ref.prechargeEvent.scheduled()) 1028 schedule(rank_ref.prechargeEvent, pre_done_at); 1029 else if (rank_ref.prechargeEvent.when() < pre_done_at) 1030 reschedule(rank_ref.prechargeEvent, pre_done_at); 1031} 1032 1033void 1034DRAMCtrl::doDRAMAccess(DRAMPacket* dram_pkt) 1035{ 1036 DPRINTF(DRAM, "Timing access to addr %lld, rank/bank/row %d %d %d\n", 1037 dram_pkt->addr, dram_pkt->rank, dram_pkt->bank, dram_pkt->row); 1038 1039 // get the rank 1040 Rank& rank = dram_pkt->rankRef; 1041 1042 // get the bank 1043 Bank& bank = dram_pkt->bankRef; 1044 1045 // for the state we need to track if it is a row hit or not 1046 bool row_hit = true; 1047 1048 // respect any constraints on the command (e.g. tRCD or tCCD) 1049 Tick cmd_at = std::max(bank.colAllowedAt, curTick()); 1050 1051 // Determine the access latency and update the bank state 1052 if (bank.openRow == dram_pkt->row) { 1053 // nothing to do 1054 } else { 1055 row_hit = false; 1056 1057 // If there is a page open, precharge it. 1058 if (bank.openRow != Bank::NO_ROW) { 1059 prechargeBank(rank, bank, std::max(bank.preAllowedAt, curTick())); 1060 } 1061 1062 // next we need to account for the delay in activating the 1063 // page 1064 Tick act_tick = std::max(bank.actAllowedAt, curTick()); 1065 1066 // Record the activation and deal with all the global timing 1067 // constraints caused be a new activation (tRRD and tXAW) 1068 activateBank(rank, bank, act_tick, dram_pkt->row); 1069 1070 // issue the command as early as possible 1071 cmd_at = bank.colAllowedAt; 1072 } 1073 1074 // we need to wait until the bus is available before we can issue 1075 // the command 1076 cmd_at = std::max(cmd_at, busBusyUntil - tCL); 1077 1078 // update the packet ready time 1079 dram_pkt->readyTime = cmd_at + tCL + tBURST; 1080 1081 // only one burst can use the bus at any one point in time 1082 assert(dram_pkt->readyTime - busBusyUntil >= tBURST); 1083 1084 // update the time for the next read/write burst for each 1085 // bank (add a max with tCCD/tCCD_L here) 1086 Tick cmd_dly; 1087 for(int j = 0; j < ranksPerChannel; j++) { 1088 for(int i = 0; i < banksPerRank; i++) { 1089 // next burst to same bank group in this rank must not happen 1090 // before tCCD_L. Different bank group timing requirement is 1091 // tBURST; Add tCS for different ranks 1092 if (dram_pkt->rank == j) { 1093 if (bankGroupArch && 1094 (bank.bankgr == ranks[j]->banks[i].bankgr)) { 1095 // bank group architecture requires longer delays between 1096 // RD/WR burst commands to the same bank group. 1097 // Use tCCD_L in this case 1098 cmd_dly = tCCD_L; 1099 } else { 1100 // use tBURST (equivalent to tCCD_S), the shorter 1101 // cas-to-cas delay value, when either: 1102 // 1) bank group architecture is not supportted 1103 // 2) bank is in a different bank group 1104 cmd_dly = tBURST; 1105 } 1106 } else { 1107 // different rank is by default in a different bank group 1108 // use tBURST (equivalent to tCCD_S), which is the shorter 1109 // cas-to-cas delay in this case 1110 // Add tCS to account for rank-to-rank bus delay requirements 1111 cmd_dly = tBURST + tCS; 1112 } 1113 ranks[j]->banks[i].colAllowedAt = std::max(cmd_at + cmd_dly, 1114 ranks[j]->banks[i].colAllowedAt); 1115 } 1116 } 1117 1118 // Save rank of current access 1119 activeRank = dram_pkt->rank; 1120 1121 // If this is a write, we also need to respect the write recovery 1122 // time before a precharge, in the case of a read, respect the 1123 // read to precharge constraint 1124 bank.preAllowedAt = std::max(bank.preAllowedAt, 1125 dram_pkt->isRead ? cmd_at + tRTP : 1126 dram_pkt->readyTime + tWR); 1127 1128 // increment the bytes accessed and the accesses per row 1129 bank.bytesAccessed += burstSize; 1130 ++bank.rowAccesses; 1131 1132 // if we reached the max, then issue with an auto-precharge 1133 bool auto_precharge = pageMgmt == Enums::close || 1134 bank.rowAccesses == maxAccessesPerRow; 1135 1136 // if we did not hit the limit, we might still want to 1137 // auto-precharge 1138 if (!auto_precharge && 1139 (pageMgmt == Enums::open_adaptive || 1140 pageMgmt == Enums::close_adaptive)) { 1141 // a twist on the open and close page policies: 1142 // 1) open_adaptive page policy does not blindly keep the 1143 // page open, but close it if there are no row hits, and there 1144 // are bank conflicts in the queue 1145 // 2) close_adaptive page policy does not blindly close the 1146 // page, but closes it only if there are no row hits in the queue. 1147 // In this case, only force an auto precharge when there 1148 // are no same page hits in the queue 1149 bool got_more_hits = false; 1150 bool got_bank_conflict = false; 1151 1152 // either look at the read queue or write queue 1153 const deque<DRAMPacket*>& queue = dram_pkt->isRead ? readQueue : 1154 writeQueue; 1155 auto p = queue.begin(); 1156 // make sure we are not considering the packet that we are 1157 // currently dealing with (which is the head of the queue) 1158 ++p; 1159 1160 // keep on looking until we find a hit or reach the end of the queue 1161 // 1) if a hit is found, then both open and close adaptive policies keep 1162 // the page open 1163 // 2) if no hit is found, got_bank_conflict is set to true if a bank 1164 // conflict request is waiting in the queue 1165 while (!got_more_hits && p != queue.end()) { 1166 bool same_rank_bank = (dram_pkt->rank == (*p)->rank) && 1167 (dram_pkt->bank == (*p)->bank); 1168 bool same_row = dram_pkt->row == (*p)->row; 1169 got_more_hits |= same_rank_bank && same_row; 1170 got_bank_conflict |= same_rank_bank && !same_row; 1171 ++p; 1172 } 1173 1174 // auto pre-charge when either 1175 // 1) open_adaptive policy, we have not got any more hits, and 1176 // have a bank conflict 1177 // 2) close_adaptive policy and we have not got any more hits 1178 auto_precharge = !got_more_hits && 1179 (got_bank_conflict || pageMgmt == Enums::close_adaptive); 1180 } 1181 1182 // DRAMPower trace command to be written 1183 std::string mem_cmd = dram_pkt->isRead ? "RD" : "WR"; 1184 1185 // MemCommand required for DRAMPower library 1186 MemCommand::cmds command = (mem_cmd == "RD") ? MemCommand::RD : 1187 MemCommand::WR; 1188 1189 // if this access should use auto-precharge, then we are 1190 // closing the row 1191 if (auto_precharge) { 1192 // if auto-precharge push a PRE command at the correct tick to the 1193 // list used by DRAMPower library to calculate power 1194 prechargeBank(rank, bank, std::max(curTick(), bank.preAllowedAt)); 1195 1196 DPRINTF(DRAM, "Auto-precharged bank: %d\n", dram_pkt->bankId); 1197 } 1198 1199 // Update bus state 1200 busBusyUntil = dram_pkt->readyTime; 1201 1202 DPRINTF(DRAM, "Access to %lld, ready at %lld bus busy until %lld.\n", 1203 dram_pkt->addr, dram_pkt->readyTime, busBusyUntil); 1204 1205 dram_pkt->rankRef.power.powerlib.doCommand(command, dram_pkt->bank, 1206 divCeil(cmd_at, tCK) - 1207 timeStampOffset); 1208 1209 DPRINTF(DRAMPower, "%llu,%s,%d,%d\n", divCeil(cmd_at, tCK) - 1210 timeStampOffset, mem_cmd, dram_pkt->bank, dram_pkt->rank); 1211 1212 // Update the minimum timing between the requests, this is a 1213 // conservative estimate of when we have to schedule the next 1214 // request to not introduce any unecessary bubbles. In most cases 1215 // we will wake up sooner than we have to. 1216 nextReqTime = busBusyUntil - (tRP + tRCD + tCL); 1217 1218 // Update the stats and schedule the next request 1219 if (dram_pkt->isRead) { 1220 ++readsThisTime; 1221 if (row_hit) 1222 readRowHits++; 1223 bytesReadDRAM += burstSize; 1224 perBankRdBursts[dram_pkt->bankId]++; 1225 1226 // Update latency stats 1227 totMemAccLat += dram_pkt->readyTime - dram_pkt->entryTime; 1228 totBusLat += tBURST; 1229 totQLat += cmd_at - dram_pkt->entryTime; 1230 } else { 1231 ++writesThisTime; 1232 if (row_hit) 1233 writeRowHits++; 1234 bytesWritten += burstSize; 1235 perBankWrBursts[dram_pkt->bankId]++; 1236 } 1237} 1238 1239void 1240DRAMCtrl::processNextReqEvent() 1241{ 1242 int busyRanks = 0; 1243 for (auto r : ranks) { 1244 if (!r->isAvailable()) { 1245 // rank is busy refreshing 1246 busyRanks++; 1247 1248 // let the rank know that if it was waiting to drain, it 1249 // is now done and ready to proceed 1250 r->checkDrainDone(); 1251 } 1252 } 1253 1254 if (busyRanks == ranksPerChannel) { 1255 // if all ranks are refreshing wait for them to finish 1256 // and stall this state machine without taking any further 1257 // action, and do not schedule a new nextReqEvent 1258 return; 1259 } 1260 1261 // pre-emptively set to false. Overwrite if in READ_TO_WRITE 1262 // or WRITE_TO_READ state 1263 bool switched_cmd_type = false; 1264 if (busState == READ_TO_WRITE) { 1265 DPRINTF(DRAM, "Switching to writes after %d reads with %d reads " 1266 "waiting\n", readsThisTime, readQueue.size()); 1267 1268 // sample and reset the read-related stats as we are now 1269 // transitioning to writes, and all reads are done 1270 rdPerTurnAround.sample(readsThisTime); 1271 readsThisTime = 0; 1272 1273 // now proceed to do the actual writes 1274 busState = WRITE; 1275 switched_cmd_type = true; 1276 } else if (busState == WRITE_TO_READ) { 1277 DPRINTF(DRAM, "Switching to reads after %d writes with %d writes " 1278 "waiting\n", writesThisTime, writeQueue.size()); 1279 1280 wrPerTurnAround.sample(writesThisTime); 1281 writesThisTime = 0; 1282 1283 busState = READ; 1284 switched_cmd_type = true; 1285 } 1286 1287 // when we get here it is either a read or a write 1288 if (busState == READ) { 1289 1290 // track if we should switch or not 1291 bool switch_to_writes = false; 1292 1293 if (readQueue.empty()) { 1294 // In the case there is no read request to go next, 1295 // trigger writes if we have passed the low threshold (or 1296 // if we are draining) 1297 if (!writeQueue.empty() && 1298 (drainState() == DrainState::Draining || 1299 writeQueue.size() > writeLowThreshold)) { 1300 1301 switch_to_writes = true; 1302 } else { 1303 // check if we are drained 1304 if (drainState() == DrainState::Draining && 1305 respQueue.empty()) { 1306 1307 DPRINTF(Drain, "DRAM controller done draining\n"); 1308 signalDrainDone(); 1309 } 1310 1311 // nothing to do, not even any point in scheduling an 1312 // event for the next request 1313 return; 1314 } 1315 } else { 1316 // bool to check if there is a read to a free rank 1317 bool found_read = false; 1318 1319 // Figure out which read request goes next, and move it to the 1320 // front of the read queue 1321 // If we are changing command type, incorporate the minimum 1322 // bus turnaround delay which will be tCS (different rank) case 1323 found_read = chooseNext(readQueue, 1324 switched_cmd_type ? tCS : 0); 1325 1326 // if no read to an available rank is found then return 1327 // at this point. There could be writes to the available ranks 1328 // which are above the required threshold. However, to 1329 // avoid adding more complexity to the code, return and wait 1330 // for a refresh event to kick things into action again. 1331 if (!found_read) 1332 return; 1333 1334 DRAMPacket* dram_pkt = readQueue.front(); 1335 assert(dram_pkt->rankRef.isAvailable()); 1336 // here we get a bit creative and shift the bus busy time not 1337 // just the tWTR, but also a CAS latency to capture the fact 1338 // that we are allowed to prepare a new bank, but not issue a 1339 // read command until after tWTR, in essence we capture a 1340 // bubble on the data bus that is tWTR + tCL 1341 if (switched_cmd_type && dram_pkt->rank == activeRank) { 1342 busBusyUntil += tWTR + tCL; 1343 } 1344 1345 doDRAMAccess(dram_pkt); 1346 1347 // At this point we're done dealing with the request 1348 readQueue.pop_front(); 1349 1350 // sanity check 1351 assert(dram_pkt->size <= burstSize); 1352 assert(dram_pkt->readyTime >= curTick()); 1353 1354 // Insert into response queue. It will be sent back to the 1355 // requestor at its readyTime 1356 if (respQueue.empty()) { 1357 assert(!respondEvent.scheduled()); 1358 schedule(respondEvent, dram_pkt->readyTime); 1359 } else { 1360 assert(respQueue.back()->readyTime <= dram_pkt->readyTime); 1361 assert(respondEvent.scheduled()); 1362 } 1363 1364 respQueue.push_back(dram_pkt); 1365 1366 // we have so many writes that we have to transition 1367 if (writeQueue.size() > writeHighThreshold) { 1368 switch_to_writes = true; 1369 } 1370 } 1371 1372 // switching to writes, either because the read queue is empty 1373 // and the writes have passed the low threshold (or we are 1374 // draining), or because the writes hit the hight threshold 1375 if (switch_to_writes) { 1376 // transition to writing 1377 busState = READ_TO_WRITE; 1378 } 1379 } else { 1380 // bool to check if write to free rank is found 1381 bool found_write = false; 1382 1383 // If we are changing command type, incorporate the minimum 1384 // bus turnaround delay 1385 found_write = chooseNext(writeQueue, 1386 switched_cmd_type ? std::min(tRTW, tCS) : 0); 1387 1388 // if no writes to an available rank are found then return. 1389 // There could be reads to the available ranks. However, to avoid 1390 // adding more complexity to the code, return at this point and wait 1391 // for a refresh event to kick things into action again. 1392 if (!found_write) 1393 return; 1394 1395 DRAMPacket* dram_pkt = writeQueue.front(); 1396 assert(dram_pkt->rankRef.isAvailable()); 1397 // sanity check 1398 assert(dram_pkt->size <= burstSize); 1399 1400 // add a bubble to the data bus, as defined by the 1401 // tRTW when access is to the same rank as previous burst 1402 // Different rank timing is handled with tCS, which is 1403 // applied to colAllowedAt 1404 if (switched_cmd_type && dram_pkt->rank == activeRank) { 1405 busBusyUntil += tRTW; 1406 } 1407 1408 doDRAMAccess(dram_pkt); 1409 1410 writeQueue.pop_front(); 1411 isInWriteQueue.erase(burstAlign(dram_pkt->addr)); 1412 delete dram_pkt; 1413 1414 // If we emptied the write queue, or got sufficiently below the 1415 // threshold (using the minWritesPerSwitch as the hysteresis) and 1416 // are not draining, or we have reads waiting and have done enough 1417 // writes, then switch to reads. 1418 if (writeQueue.empty() || 1419 (writeQueue.size() + minWritesPerSwitch < writeLowThreshold && 1420 drainState() != DrainState::Draining) || 1421 (!readQueue.empty() && writesThisTime >= minWritesPerSwitch)) { 1422 // turn the bus back around for reads again 1423 busState = WRITE_TO_READ; 1424 1425 // note that the we switch back to reads also in the idle 1426 // case, which eventually will check for any draining and 1427 // also pause any further scheduling if there is really 1428 // nothing to do 1429 } 1430 } 1431 // It is possible that a refresh to another rank kicks things back into 1432 // action before reaching this point. 1433 if (!nextReqEvent.scheduled()) 1434 schedule(nextReqEvent, std::max(nextReqTime, curTick())); 1435 1436 // If there is space available and we have writes waiting then let 1437 // them retry. This is done here to ensure that the retry does not 1438 // cause a nextReqEvent to be scheduled before we do so as part of 1439 // the next request processing 1440 if (retryWrReq && writeQueue.size() < writeBufferSize) { 1441 retryWrReq = false; 1442 port.sendRetryReq(); 1443 } 1444} 1445 1446pair<uint64_t, bool> 1447DRAMCtrl::minBankPrep(const deque<DRAMPacket*>& queue, 1448 Tick min_col_at) const 1449{ 1450 uint64_t bank_mask = 0; 1451 Tick min_act_at = MaxTick; 1452 1453 // latest Tick for which ACT can occur without incurring additoinal 1454 // delay on the data bus 1455 const Tick hidden_act_max = std::max(min_col_at - tRCD, curTick()); 1456 1457 // Flag condition when burst can issue back-to-back with previous burst 1458 bool found_seamless_bank = false; 1459 1460 // Flag condition when bank can be opened without incurring additional 1461 // delay on the data bus 1462 bool hidden_bank_prep = false; 1463 1464 // determine if we have queued transactions targetting the 1465 // bank in question 1466 vector<bool> got_waiting(ranksPerChannel * banksPerRank, false); 1467 for (const auto& p : queue) { 1468 if(p->rankRef.isAvailable()) 1469 got_waiting[p->bankId] = true; 1470 } 1471 1472 // Find command with optimal bank timing 1473 // Will prioritize commands that can issue seamlessly. 1474 for (int i = 0; i < ranksPerChannel; i++) { 1475 for (int j = 0; j < banksPerRank; j++) { 1476 uint16_t bank_id = i * banksPerRank + j; 1477 1478 // if we have waiting requests for the bank, and it is 1479 // amongst the first available, update the mask 1480 if (got_waiting[bank_id]) { 1481 // make sure this rank is not currently refreshing. 1482 assert(ranks[i]->isAvailable()); 1483 // simplistic approximation of when the bank can issue 1484 // an activate, ignoring any rank-to-rank switching 1485 // cost in this calculation 1486 Tick act_at = ranks[i]->banks[j].openRow == Bank::NO_ROW ? 1487 std::max(ranks[i]->banks[j].actAllowedAt, curTick()) : 1488 std::max(ranks[i]->banks[j].preAllowedAt, curTick()) + tRP; 1489 1490 // When is the earliest the R/W burst can issue? 1491 Tick col_at = std::max(ranks[i]->banks[j].colAllowedAt, 1492 act_at + tRCD); 1493 1494 // bank can issue burst back-to-back (seamlessly) with 1495 // previous burst 1496 bool new_seamless_bank = col_at <= min_col_at; 1497 1498 // if we found a new seamless bank or we have no 1499 // seamless banks, and got a bank with an earlier 1500 // activate time, it should be added to the bit mask 1501 if (new_seamless_bank || 1502 (!found_seamless_bank && act_at <= min_act_at)) { 1503 // if we did not have a seamless bank before, and 1504 // we do now, reset the bank mask, also reset it 1505 // if we have not yet found a seamless bank and 1506 // the activate time is smaller than what we have 1507 // seen so far 1508 if (!found_seamless_bank && 1509 (new_seamless_bank || act_at < min_act_at)) { 1510 bank_mask = 0; 1511 } 1512 1513 found_seamless_bank |= new_seamless_bank; 1514 1515 // ACT can occur 'behind the scenes' 1516 hidden_bank_prep = act_at <= hidden_act_max; 1517 1518 // set the bit corresponding to the available bank 1519 replaceBits(bank_mask, bank_id, bank_id, 1); 1520 min_act_at = act_at; 1521 } 1522 } 1523 } 1524 } 1525 1526 return make_pair(bank_mask, hidden_bank_prep); 1527} 1528 1529DRAMCtrl::Rank::Rank(DRAMCtrl& _memory, const DRAMCtrlParams* _p) 1530 : EventManager(&_memory), memory(_memory), 1531 pwrStateTrans(PWR_IDLE), pwrState(PWR_IDLE), pwrStateTick(0), 1532 refreshState(REF_IDLE), refreshDueAt(0), 1533 power(_p, false), numBanksActive(0), 1534 activateEvent(*this), prechargeEvent(*this), 1535 refreshEvent(*this), powerEvent(*this) 1536{ } 1537 1538void 1539DRAMCtrl::Rank::startup(Tick ref_tick) 1540{ 1541 assert(ref_tick > curTick()); 1542 1543 pwrStateTick = curTick(); 1544 1545 // kick off the refresh, and give ourselves enough time to 1546 // precharge 1547 schedule(refreshEvent, ref_tick); 1548} 1549 1550void 1551DRAMCtrl::Rank::suspend() 1552{ 1553 deschedule(refreshEvent); 1554} 1555 1556void 1557DRAMCtrl::Rank::checkDrainDone() 1558{ 1559 // if this rank was waiting to drain it is now able to proceed to 1560 // precharge 1561 if (refreshState == REF_DRAIN) { 1562 DPRINTF(DRAM, "Refresh drain done, now precharging\n"); 1563 1564 refreshState = REF_PRE; 1565 1566 // hand control back to the refresh event loop 1567 schedule(refreshEvent, curTick()); 1568 } 1569} 1570 1571void 1572DRAMCtrl::Rank::processActivateEvent() 1573{ 1574 // we should transition to the active state as soon as any bank is active 1575 if (pwrState != PWR_ACT) 1576 // note that at this point numBanksActive could be back at 1577 // zero again due to a precharge scheduled in the future 1578 schedulePowerEvent(PWR_ACT, curTick()); 1579} 1580 1581void 1582DRAMCtrl::Rank::processPrechargeEvent() 1583{ 1584 // if we reached zero, then special conditions apply as we track 1585 // if all banks are precharged for the power models 1586 if (numBanksActive == 0) { 1587 // we should transition to the idle state when the last bank 1588 // is precharged 1589 schedulePowerEvent(PWR_IDLE, curTick()); 1590 } 1591} 1592 1593void 1594DRAMCtrl::Rank::processRefreshEvent() 1595{ 1596 // when first preparing the refresh, remember when it was due 1597 if (refreshState == REF_IDLE) { 1598 // remember when the refresh is due 1599 refreshDueAt = curTick(); 1600 1601 // proceed to drain 1602 refreshState = REF_DRAIN; 1603 1604 DPRINTF(DRAM, "Refresh due\n"); 1605 } 1606 1607 // let any scheduled read or write to the same rank go ahead, 1608 // after which it will 1609 // hand control back to this event loop 1610 if (refreshState == REF_DRAIN) { 1611 // if a request is at the moment being handled and this request is 1612 // accessing the current rank then wait for it to finish 1613 if ((rank == memory.activeRank) 1614 && (memory.nextReqEvent.scheduled())) { 1615 // hand control over to the request loop until it is 1616 // evaluated next 1617 DPRINTF(DRAM, "Refresh awaiting draining\n"); 1618 1619 return; 1620 } else { 1621 refreshState = REF_PRE; 1622 } 1623 } 1624 1625 // at this point, ensure that all banks are precharged 1626 if (refreshState == REF_PRE) { 1627 // precharge any active bank if we are not already in the idle 1628 // state 1629 if (pwrState != PWR_IDLE) { 1630 // at the moment, we use a precharge all even if there is 1631 // only a single bank open 1632 DPRINTF(DRAM, "Precharging all\n"); 1633 1634 // first determine when we can precharge 1635 Tick pre_at = curTick(); 1636 1637 for (auto &b : banks) { 1638 // respect both causality and any existing bank 1639 // constraints, some banks could already have a 1640 // (auto) precharge scheduled 1641 pre_at = std::max(b.preAllowedAt, pre_at); 1642 } 1643 1644 // make sure all banks per rank are precharged, and for those that 1645 // already are, update their availability 1646 Tick act_allowed_at = pre_at + memory.tRP; 1647 1648 for (auto &b : banks) { 1649 if (b.openRow != Bank::NO_ROW) { 1650 memory.prechargeBank(*this, b, pre_at, false); 1651 } else { 1652 b.actAllowedAt = std::max(b.actAllowedAt, act_allowed_at); 1653 b.preAllowedAt = std::max(b.preAllowedAt, pre_at); 1654 } 1655 } 1656 1657 // precharge all banks in rank 1658 power.powerlib.doCommand(MemCommand::PREA, 0, 1659 divCeil(pre_at, memory.tCK) - 1660 memory.timeStampOffset); 1661 1662 DPRINTF(DRAMPower, "%llu,PREA,0,%d\n", 1663 divCeil(pre_at, memory.tCK) - 1664 memory.timeStampOffset, rank); 1665 } else { 1666 DPRINTF(DRAM, "All banks already precharged, starting refresh\n"); 1667 1668 // go ahead and kick the power state machine into gear if 1669 // we are already idle 1670 schedulePowerEvent(PWR_REF, curTick()); 1671 } 1672 1673 refreshState = REF_RUN; 1674 assert(numBanksActive == 0); 1675 1676 // wait for all banks to be precharged, at which point the 1677 // power state machine will transition to the idle state, and 1678 // automatically move to a refresh, at that point it will also 1679 // call this method to get the refresh event loop going again 1680 return; 1681 } 1682 1683 // last but not least we perform the actual refresh 1684 if (refreshState == REF_RUN) { 1685 // should never get here with any banks active 1686 assert(numBanksActive == 0); 1687 assert(pwrState == PWR_REF); 1688 1689 Tick ref_done_at = curTick() + memory.tRFC; 1690 1691 for (auto &b : banks) { 1692 b.actAllowedAt = ref_done_at; 1693 } 1694 1695 // at the moment this affects all ranks 1696 power.powerlib.doCommand(MemCommand::REF, 0, 1697 divCeil(curTick(), memory.tCK) - 1698 memory.timeStampOffset); 1699 1700 // at the moment sort the list of commands and update the counters 1701 // for DRAMPower libray when doing a refresh 1702 sort(power.powerlib.cmdList.begin(), 1703 power.powerlib.cmdList.end(), DRAMCtrl::sortTime); 1704 1705 // update the counters for DRAMPower, passing false to 1706 // indicate that this is not the last command in the 1707 // list. DRAMPower requires this information for the 1708 // correct calculation of the background energy at the end 1709 // of the simulation. Ideally we would want to call this 1710 // function with true once at the end of the 1711 // simulation. However, the discarded energy is extremly 1712 // small and does not effect the final results. 1713 power.powerlib.updateCounters(false); 1714 1715 // call the energy function 1716 power.powerlib.calcEnergy(); 1717 1718 // Update the stats 1719 updatePowerStats(); 1720 1721 DPRINTF(DRAMPower, "%llu,REF,0,%d\n", divCeil(curTick(), memory.tCK) - 1722 memory.timeStampOffset, rank); 1723 1724 // make sure we did not wait so long that we cannot make up 1725 // for it 1726 if (refreshDueAt + memory.tREFI < ref_done_at) { 1727 fatal("Refresh was delayed so long we cannot catch up\n"); 1728 } 1729 1730 // compensate for the delay in actually performing the refresh 1731 // when scheduling the next one 1732 schedule(refreshEvent, refreshDueAt + memory.tREFI - memory.tRP); 1733 1734 assert(!powerEvent.scheduled()); 1735 1736 // move to the idle power state once the refresh is done, this 1737 // will also move the refresh state machine to the refresh 1738 // idle state 1739 schedulePowerEvent(PWR_IDLE, ref_done_at); 1740 1741 DPRINTF(DRAMState, "Refresh done at %llu and next refresh at %llu\n", 1742 ref_done_at, refreshDueAt + memory.tREFI); 1743 } 1744} 1745 1746void 1747DRAMCtrl::Rank::schedulePowerEvent(PowerState pwr_state, Tick tick) 1748{ 1749 // respect causality 1750 assert(tick >= curTick()); 1751 1752 if (!powerEvent.scheduled()) { 1753 DPRINTF(DRAMState, "Scheduling power event at %llu to state %d\n", 1754 tick, pwr_state); 1755 1756 // insert the new transition 1757 pwrStateTrans = pwr_state; 1758 1759 schedule(powerEvent, tick); 1760 } else { 1761 panic("Scheduled power event at %llu to state %d, " 1762 "with scheduled event at %llu to %d\n", tick, pwr_state, 1763 powerEvent.when(), pwrStateTrans); 1764 } 1765} 1766 1767void 1768DRAMCtrl::Rank::processPowerEvent() 1769{ 1770 // remember where we were, and for how long 1771 Tick duration = curTick() - pwrStateTick; 1772 PowerState prev_state = pwrState; 1773 1774 // update the accounting 1775 pwrStateTime[prev_state] += duration; 1776 1777 pwrState = pwrStateTrans; 1778 pwrStateTick = curTick(); 1779 1780 if (pwrState == PWR_IDLE) { 1781 DPRINTF(DRAMState, "All banks precharged\n"); 1782 1783 // if we were refreshing, make sure we start scheduling requests again 1784 if (prev_state == PWR_REF) { 1785 DPRINTF(DRAMState, "Was refreshing for %llu ticks\n", duration); 1786 assert(pwrState == PWR_IDLE); 1787 1788 // kick things into action again 1789 refreshState = REF_IDLE; 1790 // a request event could be already scheduled by the state 1791 // machine of the other rank 1792 if (!memory.nextReqEvent.scheduled()) 1793 schedule(memory.nextReqEvent, curTick()); 1794 } else { 1795 assert(prev_state == PWR_ACT); 1796 1797 // if we have a pending refresh, and are now moving to 1798 // the idle state, direclty transition to a refresh 1799 if (refreshState == REF_RUN) { 1800 // there should be nothing waiting at this point 1801 assert(!powerEvent.scheduled()); 1802 1803 // update the state in zero time and proceed below 1804 pwrState = PWR_REF; 1805 } 1806 } 1807 } 1808 1809 // we transition to the refresh state, let the refresh state 1810 // machine know of this state update and let it deal with the 1811 // scheduling of the next power state transition as well as the 1812 // following refresh 1813 if (pwrState == PWR_REF) { 1814 DPRINTF(DRAMState, "Refreshing\n"); 1815 // kick the refresh event loop into action again, and that 1816 // in turn will schedule a transition to the idle power 1817 // state once the refresh is done 1818 assert(refreshState == REF_RUN); 1819 processRefreshEvent(); 1820 } 1821} 1822 1823void 1824DRAMCtrl::Rank::updatePowerStats() 1825{ 1826 // Get the energy and power from DRAMPower 1827 Data::MemoryPowerModel::Energy energy = 1828 power.powerlib.getEnergy(); 1829 Data::MemoryPowerModel::Power rank_power = 1830 power.powerlib.getPower(); 1831 1832 actEnergy = energy.act_energy * memory.devicesPerRank; 1833 preEnergy = energy.pre_energy * memory.devicesPerRank; 1834 readEnergy = energy.read_energy * memory.devicesPerRank; 1835 writeEnergy = energy.write_energy * memory.devicesPerRank; 1836 refreshEnergy = energy.ref_energy * memory.devicesPerRank; 1837 actBackEnergy = energy.act_stdby_energy * memory.devicesPerRank; 1838 preBackEnergy = energy.pre_stdby_energy * memory.devicesPerRank; 1839 totalEnergy = energy.total_energy * memory.devicesPerRank; 1840 averagePower = rank_power.average_power * memory.devicesPerRank; 1841} 1842 1843void 1844DRAMCtrl::Rank::regStats() 1845{ 1846 using namespace Stats; 1847 1848 pwrStateTime 1849 .init(5) 1850 .name(name() + ".memoryStateTime") 1851 .desc("Time in different power states"); 1852 pwrStateTime.subname(0, "IDLE"); 1853 pwrStateTime.subname(1, "REF"); 1854 pwrStateTime.subname(2, "PRE_PDN"); 1855 pwrStateTime.subname(3, "ACT"); 1856 pwrStateTime.subname(4, "ACT_PDN"); 1857 1858 actEnergy 1859 .name(name() + ".actEnergy") 1860 .desc("Energy for activate commands per rank (pJ)"); 1861 1862 preEnergy 1863 .name(name() + ".preEnergy") 1864 .desc("Energy for precharge commands per rank (pJ)"); 1865 1866 readEnergy 1867 .name(name() + ".readEnergy") 1868 .desc("Energy for read commands per rank (pJ)"); 1869 1870 writeEnergy 1871 .name(name() + ".writeEnergy") 1872 .desc("Energy for write commands per rank (pJ)"); 1873 1874 refreshEnergy 1875 .name(name() + ".refreshEnergy") 1876 .desc("Energy for refresh commands per rank (pJ)"); 1877 1878 actBackEnergy 1879 .name(name() + ".actBackEnergy") 1880 .desc("Energy for active background per rank (pJ)"); 1881 1882 preBackEnergy 1883 .name(name() + ".preBackEnergy") 1884 .desc("Energy for precharge background per rank (pJ)"); 1885 1886 totalEnergy 1887 .name(name() + ".totalEnergy") 1888 .desc("Total energy per rank (pJ)"); 1889 1890 averagePower 1891 .name(name() + ".averagePower") 1892 .desc("Core power per rank (mW)"); 1893} 1894void 1895DRAMCtrl::regStats() 1896{ 1897 using namespace Stats; 1898 1899 AbstractMemory::regStats(); 1900 1901 for (auto r : ranks) { 1902 r->regStats(); 1903 } 1904 1905 readReqs 1906 .name(name() + ".readReqs") 1907 .desc("Number of read requests accepted"); 1908 1909 writeReqs 1910 .name(name() + ".writeReqs") 1911 .desc("Number of write requests accepted"); 1912 1913 readBursts 1914 .name(name() + ".readBursts") 1915 .desc("Number of DRAM read bursts, " 1916 "including those serviced by the write queue"); 1917 1918 writeBursts 1919 .name(name() + ".writeBursts") 1920 .desc("Number of DRAM write bursts, " 1921 "including those merged in the write queue"); 1922 1923 servicedByWrQ 1924 .name(name() + ".servicedByWrQ") 1925 .desc("Number of DRAM read bursts serviced by the write queue"); 1926 1927 mergedWrBursts 1928 .name(name() + ".mergedWrBursts") 1929 .desc("Number of DRAM write bursts merged with an existing one"); 1930 1931 neitherReadNorWrite 1932 .name(name() + ".neitherReadNorWriteReqs") 1933 .desc("Number of requests that are neither read nor write"); 1934 1935 perBankRdBursts 1936 .init(banksPerRank * ranksPerChannel) 1937 .name(name() + ".perBankRdBursts") 1938 .desc("Per bank write bursts"); 1939 1940 perBankWrBursts 1941 .init(banksPerRank * ranksPerChannel) 1942 .name(name() + ".perBankWrBursts") 1943 .desc("Per bank write bursts"); 1944 1945 avgRdQLen 1946 .name(name() + ".avgRdQLen") 1947 .desc("Average read queue length when enqueuing") 1948 .precision(2); 1949 1950 avgWrQLen 1951 .name(name() + ".avgWrQLen") 1952 .desc("Average write queue length when enqueuing") 1953 .precision(2); 1954 1955 totQLat 1956 .name(name() + ".totQLat") 1957 .desc("Total ticks spent queuing"); 1958 1959 totBusLat 1960 .name(name() + ".totBusLat") 1961 .desc("Total ticks spent in databus transfers"); 1962 1963 totMemAccLat 1964 .name(name() + ".totMemAccLat") 1965 .desc("Total ticks spent from burst creation until serviced " 1966 "by the DRAM"); 1967 1968 avgQLat 1969 .name(name() + ".avgQLat") 1970 .desc("Average queueing delay per DRAM burst") 1971 .precision(2); 1972 1973 avgQLat = totQLat / (readBursts - servicedByWrQ); 1974 1975 avgBusLat 1976 .name(name() + ".avgBusLat") 1977 .desc("Average bus latency per DRAM burst") 1978 .precision(2); 1979 1980 avgBusLat = totBusLat / (readBursts - servicedByWrQ); 1981 1982 avgMemAccLat 1983 .name(name() + ".avgMemAccLat") 1984 .desc("Average memory access latency per DRAM burst") 1985 .precision(2); 1986 1987 avgMemAccLat = totMemAccLat / (readBursts - servicedByWrQ); 1988 1989 numRdRetry 1990 .name(name() + ".numRdRetry") 1991 .desc("Number of times read queue was full causing retry"); 1992 1993 numWrRetry 1994 .name(name() + ".numWrRetry") 1995 .desc("Number of times write queue was full causing retry"); 1996 1997 readRowHits 1998 .name(name() + ".readRowHits") 1999 .desc("Number of row buffer hits during reads"); 2000 2001 writeRowHits 2002 .name(name() + ".writeRowHits") 2003 .desc("Number of row buffer hits during writes"); 2004 2005 readRowHitRate 2006 .name(name() + ".readRowHitRate") 2007 .desc("Row buffer hit rate for reads") 2008 .precision(2); 2009 2010 readRowHitRate = (readRowHits / (readBursts - servicedByWrQ)) * 100; 2011 2012 writeRowHitRate 2013 .name(name() + ".writeRowHitRate") 2014 .desc("Row buffer hit rate for writes") 2015 .precision(2); 2016 2017 writeRowHitRate = (writeRowHits / (writeBursts - mergedWrBursts)) * 100; 2018 2019 readPktSize 2020 .init(ceilLog2(burstSize) + 1) 2021 .name(name() + ".readPktSize") 2022 .desc("Read request sizes (log2)"); 2023 2024 writePktSize 2025 .init(ceilLog2(burstSize) + 1) 2026 .name(name() + ".writePktSize") 2027 .desc("Write request sizes (log2)"); 2028 2029 rdQLenPdf 2030 .init(readBufferSize) 2031 .name(name() + ".rdQLenPdf") 2032 .desc("What read queue length does an incoming req see"); 2033 2034 wrQLenPdf 2035 .init(writeBufferSize) 2036 .name(name() + ".wrQLenPdf") 2037 .desc("What write queue length does an incoming req see"); 2038 2039 bytesPerActivate 2040 .init(maxAccessesPerRow) 2041 .name(name() + ".bytesPerActivate") 2042 .desc("Bytes accessed per row activation") 2043 .flags(nozero); 2044 2045 rdPerTurnAround 2046 .init(readBufferSize) 2047 .name(name() + ".rdPerTurnAround") 2048 .desc("Reads before turning the bus around for writes") 2049 .flags(nozero); 2050 2051 wrPerTurnAround 2052 .init(writeBufferSize) 2053 .name(name() + ".wrPerTurnAround") 2054 .desc("Writes before turning the bus around for reads") 2055 .flags(nozero); 2056 2057 bytesReadDRAM 2058 .name(name() + ".bytesReadDRAM") 2059 .desc("Total number of bytes read from DRAM"); 2060 2061 bytesReadWrQ 2062 .name(name() + ".bytesReadWrQ") 2063 .desc("Total number of bytes read from write queue"); 2064 2065 bytesWritten 2066 .name(name() + ".bytesWritten") 2067 .desc("Total number of bytes written to DRAM"); 2068 2069 bytesReadSys 2070 .name(name() + ".bytesReadSys") 2071 .desc("Total read bytes from the system interface side"); 2072 2073 bytesWrittenSys 2074 .name(name() + ".bytesWrittenSys") 2075 .desc("Total written bytes from the system interface side"); 2076 2077 avgRdBW 2078 .name(name() + ".avgRdBW") 2079 .desc("Average DRAM read bandwidth in MiByte/s") 2080 .precision(2); 2081 2082 avgRdBW = (bytesReadDRAM / 1000000) / simSeconds; 2083 2084 avgWrBW 2085 .name(name() + ".avgWrBW") 2086 .desc("Average achieved write bandwidth in MiByte/s") 2087 .precision(2); 2088 2089 avgWrBW = (bytesWritten / 1000000) / simSeconds; 2090 2091 avgRdBWSys 2092 .name(name() + ".avgRdBWSys") 2093 .desc("Average system read bandwidth in MiByte/s") 2094 .precision(2); 2095 2096 avgRdBWSys = (bytesReadSys / 1000000) / simSeconds; 2097 2098 avgWrBWSys 2099 .name(name() + ".avgWrBWSys") 2100 .desc("Average system write bandwidth in MiByte/s") 2101 .precision(2); 2102 2103 avgWrBWSys = (bytesWrittenSys / 1000000) / simSeconds; 2104 2105 peakBW 2106 .name(name() + ".peakBW") 2107 .desc("Theoretical peak bandwidth in MiByte/s") 2108 .precision(2); 2109 2110 peakBW = (SimClock::Frequency / tBURST) * burstSize / 1000000; 2111 2112 busUtil 2113 .name(name() + ".busUtil") 2114 .desc("Data bus utilization in percentage") 2115 .precision(2); 2116 busUtil = (avgRdBW + avgWrBW) / peakBW * 100; 2117 2118 totGap 2119 .name(name() + ".totGap") 2120 .desc("Total gap between requests"); 2121 2122 avgGap 2123 .name(name() + ".avgGap") 2124 .desc("Average gap between requests") 2125 .precision(2); 2126 2127 avgGap = totGap / (readReqs + writeReqs); 2128 2129 // Stats for DRAM Power calculation based on Micron datasheet 2130 busUtilRead 2131 .name(name() + ".busUtilRead") 2132 .desc("Data bus utilization in percentage for reads") 2133 .precision(2); 2134 2135 busUtilRead = avgRdBW / peakBW * 100; 2136 2137 busUtilWrite 2138 .name(name() + ".busUtilWrite") 2139 .desc("Data bus utilization in percentage for writes") 2140 .precision(2); 2141 2142 busUtilWrite = avgWrBW / peakBW * 100; 2143 2144 pageHitRate 2145 .name(name() + ".pageHitRate") 2146 .desc("Row buffer hit rate, read and write combined") 2147 .precision(2); 2148 2149 pageHitRate = (writeRowHits + readRowHits) / 2150 (writeBursts - mergedWrBursts + readBursts - servicedByWrQ) * 100; 2151} 2152 2153void 2154DRAMCtrl::recvFunctional(PacketPtr pkt) 2155{ 2156 // rely on the abstract memory 2157 functionalAccess(pkt); 2158} 2159 2160BaseSlavePort& 2161DRAMCtrl::getSlavePort(const string &if_name, PortID idx) 2162{ 2163 if (if_name != "port") { 2164 return MemObject::getSlavePort(if_name, idx); 2165 } else { 2166 return port; 2167 } 2168} 2169 2170DrainState 2171DRAMCtrl::drain() 2172{ 2173 // if there is anything in any of our internal queues, keep track 2174 // of that as well 2175 if (!(writeQueue.empty() && readQueue.empty() && respQueue.empty())) { 2176 DPRINTF(Drain, "DRAM controller not drained, write: %d, read: %d," 2177 " resp: %d\n", writeQueue.size(), readQueue.size(), 2178 respQueue.size()); 2179 2180 // the only part that is not drained automatically over time 2181 // is the write queue, thus kick things into action if needed 2182 if (!writeQueue.empty() && !nextReqEvent.scheduled()) { 2183 schedule(nextReqEvent, curTick()); 2184 } 2185 return DrainState::Draining; 2186 } else { 2187 return DrainState::Drained; 2188 } 2189} 2190 2191void 2192DRAMCtrl::drainResume() 2193{ 2194 if (!isTimingMode && system()->isTimingMode()) { 2195 // if we switched to timing mode, kick things into action, 2196 // and behave as if we restored from a checkpoint 2197 startup(); 2198 } else if (isTimingMode && !system()->isTimingMode()) { 2199 // if we switch from timing mode, stop the refresh events to 2200 // not cause issues with KVM 2201 for (auto r : ranks) { 2202 r->suspend(); 2203 } 2204 } 2205 2206 // update the mode 2207 isTimingMode = system()->isTimingMode(); 2208} 2209 2210DRAMCtrl::MemoryPort::MemoryPort(const std::string& name, DRAMCtrl& _memory) 2211 : QueuedSlavePort(name, &_memory, queue), queue(_memory, *this), 2212 memory(_memory) 2213{ } 2214 2215AddrRangeList 2216DRAMCtrl::MemoryPort::getAddrRanges() const 2217{ 2218 AddrRangeList ranges; 2219 ranges.push_back(memory.getAddrRange()); 2220 return ranges; 2221} 2222 2223void 2224DRAMCtrl::MemoryPort::recvFunctional(PacketPtr pkt) 2225{ 2226 pkt->pushLabel(memory.name()); 2227 2228 if (!queue.checkFunctional(pkt)) { 2229 // Default implementation of SimpleTimingPort::recvFunctional() 2230 // calls recvAtomic() and throws away the latency; we can save a 2231 // little here by just not calculating the latency. 2232 memory.recvFunctional(pkt); 2233 } 2234 2235 pkt->popLabel(); 2236} 2237 2238Tick 2239DRAMCtrl::MemoryPort::recvAtomic(PacketPtr pkt) 2240{ 2241 return memory.recvAtomic(pkt); 2242} 2243 2244bool 2245DRAMCtrl::MemoryPort::recvTimingReq(PacketPtr pkt) 2246{ 2247 // pass it to the memory controller 2248 return memory.recvTimingReq(pkt); 2249} 2250 2251DRAMCtrl* 2252DRAMCtrlParams::create() 2253{ 2254 return new DRAMCtrl(this); 2255} 2256