dram_ctrl.cc revision 10147:3e51a30b8071
1/* 2 * Copyright (c) 2010-2013 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2013 Amin Farmahini-Farahani 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Andreas Hansson 41 * Ani Udipi 42 * Neha Agarwal 43 */ 44 45#include "base/bitfield.hh" 46#include "base/trace.hh" 47#include "debug/DRAM.hh" 48#include "debug/Drain.hh" 49#include "mem/dram_ctrl.hh" 50#include "sim/system.hh" 51 52using namespace std; 53 54DRAMCtrl::DRAMCtrl(const DRAMCtrlParams* p) : 55 AbstractMemory(p), 56 port(name() + ".port", *this), 57 retryRdReq(false), retryWrReq(false), 58 rowHitFlag(false), stopReads(false), 59 writeEvent(this), respondEvent(this), 60 refreshEvent(this), nextReqEvent(this), drainManager(NULL), 61 deviceBusWidth(p->device_bus_width), burstLength(p->burst_length), 62 deviceRowBufferSize(p->device_rowbuffer_size), 63 devicesPerRank(p->devices_per_rank), 64 burstSize((devicesPerRank * burstLength * deviceBusWidth) / 8), 65 rowBufferSize(devicesPerRank * deviceRowBufferSize), 66 columnsPerRowBuffer(rowBufferSize / burstSize), 67 ranksPerChannel(p->ranks_per_channel), 68 banksPerRank(p->banks_per_rank), channels(p->channels), rowsPerBank(0), 69 readBufferSize(p->read_buffer_size), 70 writeBufferSize(p->write_buffer_size), 71 writeHighThreshold(writeBufferSize * p->write_high_thresh_perc / 100.0), 72 writeLowThreshold(writeBufferSize * p->write_low_thresh_perc / 100.0), 73 minWritesPerSwitch(p->min_writes_per_switch), 74 writesThisTime(0), readsThisTime(0), 75 tWTR(p->tWTR), tBURST(p->tBURST), 76 tRCD(p->tRCD), tCL(p->tCL), tRP(p->tRP), tRAS(p->tRAS), 77 tRFC(p->tRFC), tREFI(p->tREFI), tRRD(p->tRRD), 78 tXAW(p->tXAW), activationLimit(p->activation_limit), 79 memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping), 80 pageMgmt(p->page_policy), 81 maxAccessesPerRow(p->max_accesses_per_row), 82 frontendLatency(p->static_frontend_latency), 83 backendLatency(p->static_backend_latency), 84 busBusyUntil(0), prevArrival(0), 85 newTime(0), startTickPrechargeAll(0), numBanksActive(0) 86{ 87 // create the bank states based on the dimensions of the ranks and 88 // banks 89 banks.resize(ranksPerChannel); 90 actTicks.resize(ranksPerChannel); 91 for (size_t c = 0; c < ranksPerChannel; ++c) { 92 banks[c].resize(banksPerRank); 93 actTicks[c].resize(activationLimit, 0); 94 } 95 96 // perform a basic check of the write thresholds 97 if (p->write_low_thresh_perc >= p->write_high_thresh_perc) 98 fatal("Write buffer low threshold %d must be smaller than the " 99 "high threshold %d\n", p->write_low_thresh_perc, 100 p->write_high_thresh_perc); 101 102 // determine the rows per bank by looking at the total capacity 103 uint64_t capacity = ULL(1) << ceilLog2(AbstractMemory::size()); 104 105 DPRINTF(DRAM, "Memory capacity %lld (%lld) bytes\n", capacity, 106 AbstractMemory::size()); 107 108 DPRINTF(DRAM, "Row buffer size %d bytes with %d columns per row buffer\n", 109 rowBufferSize, columnsPerRowBuffer); 110 111 rowsPerBank = capacity / (rowBufferSize * banksPerRank * ranksPerChannel); 112 113 if (range.interleaved()) { 114 if (channels != range.stripes()) 115 fatal("%s has %d interleaved address stripes but %d channel(s)\n", 116 name(), range.stripes(), channels); 117 118 if (addrMapping == Enums::RoRaBaChCo) { 119 if (rowBufferSize != range.granularity()) { 120 fatal("Interleaving of %s doesn't match RoRaBaChCo " 121 "address map\n", name()); 122 } 123 } else if (addrMapping == Enums::RoRaBaCoCh) { 124 if (system()->cacheLineSize() != range.granularity()) { 125 fatal("Interleaving of %s doesn't match RoRaBaCoCh " 126 "address map\n", name()); 127 } 128 } else if (addrMapping == Enums::RoCoRaBaCh) { 129 if (system()->cacheLineSize() != range.granularity()) 130 fatal("Interleaving of %s doesn't match RoCoRaBaCh " 131 "address map\n", name()); 132 } 133 } 134} 135 136void 137DRAMCtrl::init() 138{ 139 if (!port.isConnected()) { 140 fatal("DRAMCtrl %s is unconnected!\n", name()); 141 } else { 142 port.sendRangeChange(); 143 } 144} 145 146void 147DRAMCtrl::startup() 148{ 149 // update the start tick for the precharge accounting to the 150 // current tick 151 startTickPrechargeAll = curTick(); 152 153 // print the configuration of the controller 154 printParams(); 155 156 // kick off the refresh 157 schedule(refreshEvent, curTick() + tREFI); 158} 159 160Tick 161DRAMCtrl::recvAtomic(PacketPtr pkt) 162{ 163 DPRINTF(DRAM, "recvAtomic: %s 0x%x\n", pkt->cmdString(), pkt->getAddr()); 164 165 // do the actual memory access and turn the packet into a response 166 access(pkt); 167 168 Tick latency = 0; 169 if (!pkt->memInhibitAsserted() && pkt->hasData()) { 170 // this value is not supposed to be accurate, just enough to 171 // keep things going, mimic a closed page 172 latency = tRP + tRCD + tCL; 173 } 174 return latency; 175} 176 177bool 178DRAMCtrl::readQueueFull(unsigned int neededEntries) const 179{ 180 DPRINTF(DRAM, "Read queue limit %d, current size %d, entries needed %d\n", 181 readBufferSize, readQueue.size() + respQueue.size(), 182 neededEntries); 183 184 return 185 (readQueue.size() + respQueue.size() + neededEntries) > readBufferSize; 186} 187 188bool 189DRAMCtrl::writeQueueFull(unsigned int neededEntries) const 190{ 191 DPRINTF(DRAM, "Write queue limit %d, current size %d, entries needed %d\n", 192 writeBufferSize, writeQueue.size(), neededEntries); 193 return (writeQueue.size() + neededEntries) > writeBufferSize; 194} 195 196DRAMCtrl::DRAMPacket* 197DRAMCtrl::decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned size, 198 bool isRead) 199{ 200 // decode the address based on the address mapping scheme, with 201 // Ro, Ra, Co, Ba and Ch denoting row, rank, column, bank and 202 // channel, respectively 203 uint8_t rank; 204 uint8_t bank; 205 uint16_t row; 206 207 // truncate the address to the access granularity 208 Addr addr = dramPktAddr / burstSize; 209 210 // we have removed the lowest order address bits that denote the 211 // position within the column 212 if (addrMapping == Enums::RoRaBaChCo) { 213 // the lowest order bits denote the column to ensure that 214 // sequential cache lines occupy the same row 215 addr = addr / columnsPerRowBuffer; 216 217 // take out the channel part of the address 218 addr = addr / channels; 219 220 // after the channel bits, get the bank bits to interleave 221 // over the banks 222 bank = addr % banksPerRank; 223 addr = addr / banksPerRank; 224 225 // after the bank, we get the rank bits which thus interleaves 226 // over the ranks 227 rank = addr % ranksPerChannel; 228 addr = addr / ranksPerChannel; 229 230 // lastly, get the row bits 231 row = addr % rowsPerBank; 232 addr = addr / rowsPerBank; 233 } else if (addrMapping == Enums::RoRaBaCoCh) { 234 // take out the channel part of the address 235 addr = addr / channels; 236 237 // next, the column 238 addr = addr / columnsPerRowBuffer; 239 240 // after the column bits, we get the bank bits to interleave 241 // over the banks 242 bank = addr % banksPerRank; 243 addr = addr / banksPerRank; 244 245 // after the bank, we get the rank bits which thus interleaves 246 // over the ranks 247 rank = addr % ranksPerChannel; 248 addr = addr / ranksPerChannel; 249 250 // lastly, get the row bits 251 row = addr % rowsPerBank; 252 addr = addr / rowsPerBank; 253 } else if (addrMapping == Enums::RoCoRaBaCh) { 254 // optimise for closed page mode and utilise maximum 255 // parallelism of the DRAM (at the cost of power) 256 257 // take out the channel part of the address, not that this has 258 // to match with how accesses are interleaved between the 259 // controllers in the address mapping 260 addr = addr / channels; 261 262 // start with the bank bits, as this provides the maximum 263 // opportunity for parallelism between requests 264 bank = addr % banksPerRank; 265 addr = addr / banksPerRank; 266 267 // next get the rank bits 268 rank = addr % ranksPerChannel; 269 addr = addr / ranksPerChannel; 270 271 // next the column bits which we do not need to keep track of 272 // and simply skip past 273 addr = addr / columnsPerRowBuffer; 274 275 // lastly, get the row bits 276 row = addr % rowsPerBank; 277 addr = addr / rowsPerBank; 278 } else 279 panic("Unknown address mapping policy chosen!"); 280 281 assert(rank < ranksPerChannel); 282 assert(bank < banksPerRank); 283 assert(row < rowsPerBank); 284 285 DPRINTF(DRAM, "Address: %lld Rank %d Bank %d Row %d\n", 286 dramPktAddr, rank, bank, row); 287 288 // create the corresponding DRAM packet with the entry time and 289 // ready time set to the current tick, the latter will be updated 290 // later 291 uint16_t bank_id = banksPerRank * rank + bank; 292 return new DRAMPacket(pkt, isRead, rank, bank, row, bank_id, dramPktAddr, 293 size, banks[rank][bank]); 294} 295 296void 297DRAMCtrl::addToReadQueue(PacketPtr pkt, unsigned int pktCount) 298{ 299 // only add to the read queue here. whenever the request is 300 // eventually done, set the readyTime, and call schedule() 301 assert(!pkt->isWrite()); 302 303 assert(pktCount != 0); 304 305 // if the request size is larger than burst size, the pkt is split into 306 // multiple DRAM packets 307 // Note if the pkt starting address is not aligened to burst size, the 308 // address of first DRAM packet is kept unaliged. Subsequent DRAM packets 309 // are aligned to burst size boundaries. This is to ensure we accurately 310 // check read packets against packets in write queue. 311 Addr addr = pkt->getAddr(); 312 unsigned pktsServicedByWrQ = 0; 313 BurstHelper* burst_helper = NULL; 314 for (int cnt = 0; cnt < pktCount; ++cnt) { 315 unsigned size = std::min((addr | (burstSize - 1)) + 1, 316 pkt->getAddr() + pkt->getSize()) - addr; 317 readPktSize[ceilLog2(size)]++; 318 readBursts++; 319 320 // First check write buffer to see if the data is already at 321 // the controller 322 bool foundInWrQ = false; 323 for (auto i = writeQueue.begin(); i != writeQueue.end(); ++i) { 324 // check if the read is subsumed in the write entry we are 325 // looking at 326 if ((*i)->addr <= addr && 327 (addr + size) <= ((*i)->addr + (*i)->size)) { 328 foundInWrQ = true; 329 servicedByWrQ++; 330 pktsServicedByWrQ++; 331 DPRINTF(DRAM, "Read to addr %lld with size %d serviced by " 332 "write queue\n", addr, size); 333 bytesReadWrQ += burstSize; 334 break; 335 } 336 } 337 338 // If not found in the write q, make a DRAM packet and 339 // push it onto the read queue 340 if (!foundInWrQ) { 341 342 // Make the burst helper for split packets 343 if (pktCount > 1 && burst_helper == NULL) { 344 DPRINTF(DRAM, "Read to addr %lld translates to %d " 345 "dram requests\n", pkt->getAddr(), pktCount); 346 burst_helper = new BurstHelper(pktCount); 347 } 348 349 DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, true); 350 dram_pkt->burstHelper = burst_helper; 351 352 assert(!readQueueFull(1)); 353 rdQLenPdf[readQueue.size() + respQueue.size()]++; 354 355 DPRINTF(DRAM, "Adding to read queue\n"); 356 357 readQueue.push_back(dram_pkt); 358 359 // Update stats 360 avgRdQLen = readQueue.size() + respQueue.size(); 361 } 362 363 // Starting address of next dram pkt (aligend to burstSize boundary) 364 addr = (addr | (burstSize - 1)) + 1; 365 } 366 367 // If all packets are serviced by write queue, we send the repsonse back 368 if (pktsServicedByWrQ == pktCount) { 369 accessAndRespond(pkt, frontendLatency); 370 return; 371 } 372 373 // Update how many split packets are serviced by write queue 374 if (burst_helper != NULL) 375 burst_helper->burstsServiced = pktsServicedByWrQ; 376 377 // If we are not already scheduled to get the read request out of 378 // the queue, do so now 379 if (!nextReqEvent.scheduled() && !stopReads) { 380 DPRINTF(DRAM, "Request scheduled immediately\n"); 381 schedule(nextReqEvent, curTick()); 382 } 383} 384 385void 386DRAMCtrl::processWriteEvent() 387{ 388 assert(!writeQueue.empty()); 389 390 DPRINTF(DRAM, "Beginning DRAM Write\n"); 391 Tick temp1 M5_VAR_USED = std::max(curTick(), busBusyUntil); 392 Tick temp2 M5_VAR_USED = std::max(curTick(), maxBankFreeAt()); 393 394 chooseNextWrite(); 395 DRAMPacket* dram_pkt = writeQueue.front(); 396 // sanity check 397 assert(dram_pkt->size <= burstSize); 398 doDRAMAccess(dram_pkt); 399 400 writeQueue.pop_front(); 401 delete dram_pkt; 402 403 DPRINTF(DRAM, "Writing, bus busy for %lld ticks, banks busy " 404 "for %lld ticks\n", busBusyUntil - temp1, maxBankFreeAt() - temp2); 405 406 // If we emptied the write queue, or got sufficiently below the 407 // threshold (using the minWritesPerSwitch as the hysteresis) and 408 // are not draining, or we have reads waiting and have done enough 409 // writes, then switch to reads. The retry above could already 410 // have caused it to be scheduled, so first check 411 if (writeQueue.empty() || 412 (writeQueue.size() + minWritesPerSwitch < writeLowThreshold && 413 !drainManager) || 414 (!readQueue.empty() && writesThisTime >= minWritesPerSwitch)) { 415 // turn the bus back around for reads again 416 busBusyUntil += tWTR; 417 stopReads = false; 418 419 DPRINTF(DRAM, "Switching to reads after %d writes with %d writes " 420 "waiting\n", writesThisTime, writeQueue.size()); 421 422 wrPerTurnAround.sample(writesThisTime); 423 writesThisTime = 0; 424 425 if (!nextReqEvent.scheduled()) 426 schedule(nextReqEvent, busBusyUntil); 427 } else { 428 assert(!writeEvent.scheduled()); 429 DPRINTF(DRAM, "Next write scheduled at %lld\n", newTime); 430 schedule(writeEvent, newTime); 431 } 432 433 if (retryWrReq) { 434 retryWrReq = false; 435 port.sendRetry(); 436 } 437 438 // if there is nothing left in any queue, signal a drain 439 if (writeQueue.empty() && readQueue.empty() && 440 respQueue.empty () && drainManager) { 441 drainManager->signalDrainDone(); 442 drainManager = NULL; 443 } 444} 445 446 447void 448DRAMCtrl::triggerWrites() 449{ 450 DPRINTF(DRAM, "Switching to writes after %d reads with %d reads " 451 "waiting\n", readsThisTime, readQueue.size()); 452 453 // Flag variable to stop any more read scheduling 454 stopReads = true; 455 456 Tick write_start_time = std::max(busBusyUntil, curTick()) + tWTR; 457 458 DPRINTF(DRAM, "Writes scheduled at %lld\n", write_start_time); 459 460 // there is some danger here as there might still be reads 461 // happening before the switch actually takes place 462 rdPerTurnAround.sample(readsThisTime); 463 readsThisTime = 0; 464 465 assert(write_start_time >= curTick()); 466 assert(!writeEvent.scheduled()); 467 schedule(writeEvent, write_start_time); 468} 469 470void 471DRAMCtrl::addToWriteQueue(PacketPtr pkt, unsigned int pktCount) 472{ 473 // only add to the write queue here. whenever the request is 474 // eventually done, set the readyTime, and call schedule() 475 assert(pkt->isWrite()); 476 477 // if the request size is larger than burst size, the pkt is split into 478 // multiple DRAM packets 479 Addr addr = pkt->getAddr(); 480 for (int cnt = 0; cnt < pktCount; ++cnt) { 481 unsigned size = std::min((addr | (burstSize - 1)) + 1, 482 pkt->getAddr() + pkt->getSize()) - addr; 483 writePktSize[ceilLog2(size)]++; 484 writeBursts++; 485 486 // see if we can merge with an existing item in the write 487 // queue and keep track of whether we have merged or not so we 488 // can stop at that point and also avoid enqueueing a new 489 // request 490 bool merged = false; 491 auto w = writeQueue.begin(); 492 493 while(!merged && w != writeQueue.end()) { 494 // either of the two could be first, if they are the same 495 // it does not matter which way we go 496 if ((*w)->addr >= addr) { 497 // the existing one starts after the new one, figure 498 // out where the new one ends with respect to the 499 // existing one 500 if ((addr + size) >= ((*w)->addr + (*w)->size)) { 501 // check if the existing one is completely 502 // subsumed in the new one 503 DPRINTF(DRAM, "Merging write covering existing burst\n"); 504 merged = true; 505 // update both the address and the size 506 (*w)->addr = addr; 507 (*w)->size = size; 508 } else if ((addr + size) >= (*w)->addr && 509 ((*w)->addr + (*w)->size - addr) <= burstSize) { 510 // the new one is just before or partially 511 // overlapping with the existing one, and together 512 // they fit within a burst 513 DPRINTF(DRAM, "Merging write before existing burst\n"); 514 merged = true; 515 // the existing queue item needs to be adjusted with 516 // respect to both address and size 517 (*w)->size = (*w)->addr + (*w)->size - addr; 518 (*w)->addr = addr; 519 } 520 } else { 521 // the new one starts after the current one, figure 522 // out where the existing one ends with respect to the 523 // new one 524 if (((*w)->addr + (*w)->size) >= (addr + size)) { 525 // check if the new one is completely subsumed in the 526 // existing one 527 DPRINTF(DRAM, "Merging write into existing burst\n"); 528 merged = true; 529 // no adjustments necessary 530 } else if (((*w)->addr + (*w)->size) >= addr && 531 (addr + size - (*w)->addr) <= burstSize) { 532 // the existing one is just before or partially 533 // overlapping with the new one, and together 534 // they fit within a burst 535 DPRINTF(DRAM, "Merging write after existing burst\n"); 536 merged = true; 537 // the address is right, and only the size has 538 // to be adjusted 539 (*w)->size = addr + size - (*w)->addr; 540 } 541 } 542 ++w; 543 } 544 545 // if the item was not merged we need to create a new write 546 // and enqueue it 547 if (!merged) { 548 DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, false); 549 550 assert(writeQueue.size() < writeBufferSize); 551 wrQLenPdf[writeQueue.size()]++; 552 553 DPRINTF(DRAM, "Adding to write queue\n"); 554 555 writeQueue.push_back(dram_pkt); 556 557 // Update stats 558 avgWrQLen = writeQueue.size(); 559 } else { 560 // keep track of the fact that this burst effectively 561 // disappeared as it was merged with an existing one 562 mergedWrBursts++; 563 } 564 565 // Starting address of next dram pkt (aligend to burstSize boundary) 566 addr = (addr | (burstSize - 1)) + 1; 567 } 568 569 // we do not wait for the writes to be send to the actual memory, 570 // but instead take responsibility for the consistency here and 571 // snoop the write queue for any upcoming reads 572 // @todo, if a pkt size is larger than burst size, we might need a 573 // different front end latency 574 accessAndRespond(pkt, frontendLatency); 575 576 // If your write buffer is starting to fill up, drain it! 577 if (writeQueue.size() >= writeHighThreshold && !stopReads){ 578 triggerWrites(); 579 } 580} 581 582void 583DRAMCtrl::printParams() const 584{ 585 // Sanity check print of important parameters 586 DPRINTF(DRAM, 587 "Memory controller %s physical organization\n" \ 588 "Number of devices per rank %d\n" \ 589 "Device bus width (in bits) %d\n" \ 590 "DRAM data bus burst (bytes) %d\n" \ 591 "Row buffer size (bytes) %d\n" \ 592 "Columns per row buffer %d\n" \ 593 "Rows per bank %d\n" \ 594 "Banks per rank %d\n" \ 595 "Ranks per channel %d\n" \ 596 "Total mem capacity (bytes) %u\n", 597 name(), devicesPerRank, deviceBusWidth, burstSize, rowBufferSize, 598 columnsPerRowBuffer, rowsPerBank, banksPerRank, ranksPerChannel, 599 rowBufferSize * rowsPerBank * banksPerRank * ranksPerChannel); 600 601 string scheduler = memSchedPolicy == Enums::fcfs ? "FCFS" : "FR-FCFS"; 602 string address_mapping = addrMapping == Enums::RoRaBaChCo ? "RoRaBaChCo" : 603 (addrMapping == Enums::RoRaBaCoCh ? "RoRaBaCoCh" : "RoCoRaBaCh"); 604 string page_policy = pageMgmt == Enums::open ? "OPEN" : 605 (pageMgmt == Enums::open_adaptive ? "OPEN (adaptive)" : 606 (pageMgmt == Enums::close_adaptive ? "CLOSE (adaptive)" : "CLOSE")); 607 608 DPRINTF(DRAM, 609 "Memory controller %s characteristics\n" \ 610 "Read buffer size %d\n" \ 611 "Write buffer size %d\n" \ 612 "Write high thresh %d\n" \ 613 "Write low thresh %d\n" \ 614 "Scheduler %s\n" \ 615 "Address mapping %s\n" \ 616 "Page policy %s\n", 617 name(), readBufferSize, writeBufferSize, writeHighThreshold, 618 writeLowThreshold, scheduler, address_mapping, page_policy); 619 620 DPRINTF(DRAM, "Memory controller %s timing specs\n" \ 621 "tRCD %d ticks\n" \ 622 "tCL %d ticks\n" \ 623 "tRP %d ticks\n" \ 624 "tBURST %d ticks\n" \ 625 "tRFC %d ticks\n" \ 626 "tREFI %d ticks\n" \ 627 "tWTR %d ticks\n" \ 628 "tXAW (%d) %d ticks\n", 629 name(), tRCD, tCL, tRP, tBURST, tRFC, tREFI, tWTR, 630 activationLimit, tXAW); 631} 632 633void 634DRAMCtrl::printQs() const { 635 DPRINTF(DRAM, "===READ QUEUE===\n\n"); 636 for (auto i = readQueue.begin() ; i != readQueue.end() ; ++i) { 637 DPRINTF(DRAM, "Read %lu\n", (*i)->addr); 638 } 639 DPRINTF(DRAM, "\n===RESP QUEUE===\n\n"); 640 for (auto i = respQueue.begin() ; i != respQueue.end() ; ++i) { 641 DPRINTF(DRAM, "Response %lu\n", (*i)->addr); 642 } 643 DPRINTF(DRAM, "\n===WRITE QUEUE===\n\n"); 644 for (auto i = writeQueue.begin() ; i != writeQueue.end() ; ++i) { 645 DPRINTF(DRAM, "Write %lu\n", (*i)->addr); 646 } 647} 648 649bool 650DRAMCtrl::recvTimingReq(PacketPtr pkt) 651{ 652 /// @todo temporary hack to deal with memory corruption issues until 653 /// 4-phase transactions are complete 654 for (int x = 0; x < pendingDelete.size(); x++) 655 delete pendingDelete[x]; 656 pendingDelete.clear(); 657 658 // This is where we enter from the outside world 659 DPRINTF(DRAM, "recvTimingReq: request %s addr %lld size %d\n", 660 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 661 662 // simply drop inhibited packets for now 663 if (pkt->memInhibitAsserted()) { 664 DPRINTF(DRAM, "Inhibited packet -- Dropping it now\n"); 665 pendingDelete.push_back(pkt); 666 return true; 667 } 668 669 // Calc avg gap between requests 670 if (prevArrival != 0) { 671 totGap += curTick() - prevArrival; 672 } 673 prevArrival = curTick(); 674 675 676 // Find out how many dram packets a pkt translates to 677 // If the burst size is equal or larger than the pkt size, then a pkt 678 // translates to only one dram packet. Otherwise, a pkt translates to 679 // multiple dram packets 680 unsigned size = pkt->getSize(); 681 unsigned offset = pkt->getAddr() & (burstSize - 1); 682 unsigned int dram_pkt_count = divCeil(offset + size, burstSize); 683 684 // check local buffers and do not accept if full 685 if (pkt->isRead()) { 686 assert(size != 0); 687 if (readQueueFull(dram_pkt_count)) { 688 DPRINTF(DRAM, "Read queue full, not accepting\n"); 689 // remember that we have to retry this port 690 retryRdReq = true; 691 numRdRetry++; 692 return false; 693 } else { 694 addToReadQueue(pkt, dram_pkt_count); 695 readReqs++; 696 bytesReadSys += size; 697 } 698 } else if (pkt->isWrite()) { 699 assert(size != 0); 700 if (writeQueueFull(dram_pkt_count)) { 701 DPRINTF(DRAM, "Write queue full, not accepting\n"); 702 // remember that we have to retry this port 703 retryWrReq = true; 704 numWrRetry++; 705 return false; 706 } else { 707 addToWriteQueue(pkt, dram_pkt_count); 708 writeReqs++; 709 bytesWrittenSys += size; 710 } 711 } else { 712 DPRINTF(DRAM,"Neither read nor write, ignore timing\n"); 713 neitherReadNorWrite++; 714 accessAndRespond(pkt, 1); 715 } 716 717 return true; 718} 719 720void 721DRAMCtrl::processRespondEvent() 722{ 723 DPRINTF(DRAM, 724 "processRespondEvent(): Some req has reached its readyTime\n"); 725 726 DRAMPacket* dram_pkt = respQueue.front(); 727 728 if (dram_pkt->burstHelper) { 729 // it is a split packet 730 dram_pkt->burstHelper->burstsServiced++; 731 if (dram_pkt->burstHelper->burstsServiced == 732 dram_pkt->burstHelper->burstCount) { 733 // we have now serviced all children packets of a system packet 734 // so we can now respond to the requester 735 // @todo we probably want to have a different front end and back 736 // end latency for split packets 737 accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency); 738 delete dram_pkt->burstHelper; 739 dram_pkt->burstHelper = NULL; 740 } 741 } else { 742 // it is not a split packet 743 accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency); 744 } 745 746 delete respQueue.front(); 747 respQueue.pop_front(); 748 749 if (!respQueue.empty()) { 750 assert(respQueue.front()->readyTime >= curTick()); 751 assert(!respondEvent.scheduled()); 752 schedule(respondEvent, respQueue.front()->readyTime); 753 } else { 754 // if there is nothing left in any queue, signal a drain 755 if (writeQueue.empty() && readQueue.empty() && 756 drainManager) { 757 drainManager->signalDrainDone(); 758 drainManager = NULL; 759 } 760 } 761 762 // We have made a location in the queue available at this point, 763 // so if there is a read that was forced to wait, retry now 764 if (retryRdReq) { 765 retryRdReq = false; 766 port.sendRetry(); 767 } 768} 769 770void 771DRAMCtrl::chooseNextWrite() 772{ 773 // This method does the arbitration between write requests. The 774 // chosen packet is simply moved to the head of the write 775 // queue. The other methods know that this is the place to 776 // look. For example, with FCFS, this method does nothing 777 assert(!writeQueue.empty()); 778 779 if (writeQueue.size() == 1) { 780 DPRINTF(DRAM, "Single write request, nothing to do\n"); 781 return; 782 } 783 784 if (memSchedPolicy == Enums::fcfs) { 785 // Do nothing, since the correct request is already head 786 } else if (memSchedPolicy == Enums::frfcfs) { 787 reorderQueue(writeQueue); 788 } else 789 panic("No scheduling policy chosen\n"); 790 791 DPRINTF(DRAM, "Selected next write request\n"); 792} 793 794bool 795DRAMCtrl::chooseNextRead() 796{ 797 // This method does the arbitration between read requests. The 798 // chosen packet is simply moved to the head of the queue. The 799 // other methods know that this is the place to look. For example, 800 // with FCFS, this method does nothing 801 if (readQueue.empty()) { 802 DPRINTF(DRAM, "No read request to select\n"); 803 return false; 804 } 805 806 // If there is only one request then there is nothing left to do 807 if (readQueue.size() == 1) 808 return true; 809 810 if (memSchedPolicy == Enums::fcfs) { 811 // Do nothing, since the request to serve is already the first 812 // one in the read queue 813 } else if (memSchedPolicy == Enums::frfcfs) { 814 reorderQueue(readQueue); 815 } else 816 panic("No scheduling policy chosen!\n"); 817 818 DPRINTF(DRAM, "Selected next read request\n"); 819 return true; 820} 821 822void 823DRAMCtrl::reorderQueue(std::deque<DRAMPacket*>& queue) 824{ 825 // Only determine this when needed 826 uint64_t earliest_banks = 0; 827 828 // Search for row hits first, if no row hit is found then schedule the 829 // packet to one of the earliest banks available 830 bool found_earliest_pkt = false; 831 auto selected_pkt_it = queue.begin(); 832 833 for (auto i = queue.begin(); i != queue.end() ; ++i) { 834 DRAMPacket* dram_pkt = *i; 835 const Bank& bank = dram_pkt->bankRef; 836 // Check if it is a row hit 837 if (bank.openRow == dram_pkt->row) { 838 DPRINTF(DRAM, "Row buffer hit\n"); 839 selected_pkt_it = i; 840 break; 841 } else if (!found_earliest_pkt) { 842 // No row hit, go for first ready 843 if (earliest_banks == 0) 844 earliest_banks = minBankFreeAt(queue); 845 846 // Bank is ready or is the first available bank 847 if (bank.freeAt <= curTick() || 848 bits(earliest_banks, dram_pkt->bankId, dram_pkt->bankId)) { 849 // Remember the packet to be scheduled to one of the earliest 850 // banks available 851 selected_pkt_it = i; 852 found_earliest_pkt = true; 853 } 854 } 855 } 856 857 DRAMPacket* selected_pkt = *selected_pkt_it; 858 queue.erase(selected_pkt_it); 859 queue.push_front(selected_pkt); 860} 861 862void 863DRAMCtrl::accessAndRespond(PacketPtr pkt, Tick static_latency) 864{ 865 DPRINTF(DRAM, "Responding to Address %lld.. ",pkt->getAddr()); 866 867 bool needsResponse = pkt->needsResponse(); 868 // do the actual memory access which also turns the packet into a 869 // response 870 access(pkt); 871 872 // turn packet around to go back to requester if response expected 873 if (needsResponse) { 874 // access already turned the packet into a response 875 assert(pkt->isResponse()); 876 877 // @todo someone should pay for this 878 pkt->busFirstWordDelay = pkt->busLastWordDelay = 0; 879 880 // queue the packet in the response queue to be sent out after 881 // the static latency has passed 882 port.schedTimingResp(pkt, curTick() + static_latency); 883 } else { 884 // @todo the packet is going to be deleted, and the DRAMPacket 885 // is still having a pointer to it 886 pendingDelete.push_back(pkt); 887 } 888 889 DPRINTF(DRAM, "Done\n"); 890 891 return; 892} 893 894pair<Tick, Tick> 895DRAMCtrl::estimateLatency(DRAMPacket* dram_pkt, Tick inTime) 896{ 897 // If a request reaches a bank at tick 'inTime', how much time 898 // *after* that does it take to finish the request, depending 899 // on bank status and page open policy. Note that this method 900 // considers only the time taken for the actual read or write 901 // to complete, NOT any additional time thereafter for tRAS or 902 // tRP. 903 Tick accLat = 0; 904 Tick bankLat = 0; 905 rowHitFlag = false; 906 Tick potentialActTick; 907 908 const Bank& bank = dram_pkt->bankRef; 909 // open-page policy or close_adaptive policy 910 if (pageMgmt == Enums::open || pageMgmt == Enums::open_adaptive || 911 pageMgmt == Enums::close_adaptive) { 912 if (bank.openRow == dram_pkt->row) { 913 // When we have a row-buffer hit, 914 // we don't care about tRAS having expired or not, 915 // but do care about bank being free for access 916 rowHitFlag = true; 917 918 // When a series of requests arrive to the same row, 919 // DDR systems are capable of streaming data continuously 920 // at maximum bandwidth (subject to tCCD). Here, we approximate 921 // this condition, and assume that if whenever a bank is already 922 // busy and a new request comes in, it can be completed with no 923 // penalty beyond waiting for the existing read to complete. 924 if (bank.freeAt > inTime) { 925 accLat += bank.freeAt - inTime; 926 bankLat += 0; 927 } else { 928 // CAS latency only 929 accLat += tCL; 930 bankLat += tCL; 931 } 932 933 } else { 934 // Row-buffer miss, need to close existing row 935 // once tRAS has expired, then open the new one, 936 // then add cas latency. 937 Tick freeTime = std::max(bank.tRASDoneAt, bank.freeAt); 938 939 if (freeTime > inTime) 940 accLat += freeTime - inTime; 941 942 // If the there is no open row (open adaptive), then there 943 // is no precharge delay, otherwise go with tRP 944 Tick precharge_delay = bank.openRow == -1 ? 0 : tRP; 945 946 //The bank is free, and you may be able to activate 947 potentialActTick = inTime + accLat + precharge_delay; 948 if (potentialActTick < bank.actAllowedAt) 949 accLat += bank.actAllowedAt - potentialActTick; 950 951 accLat += precharge_delay + tRCD + tCL; 952 bankLat += precharge_delay + tRCD + tCL; 953 } 954 } else if (pageMgmt == Enums::close) { 955 // With a close page policy, no notion of 956 // bank.tRASDoneAt 957 if (bank.freeAt > inTime) 958 accLat += bank.freeAt - inTime; 959 960 //The bank is free, and you may be able to activate 961 potentialActTick = inTime + accLat; 962 if (potentialActTick < bank.actAllowedAt) 963 accLat += bank.actAllowedAt - potentialActTick; 964 965 // page already closed, simply open the row, and 966 // add cas latency 967 accLat += tRCD + tCL; 968 bankLat += tRCD + tCL; 969 } else 970 panic("No page management policy chosen\n"); 971 972 DPRINTF(DRAM, "Returning < %lld, %lld > from estimateLatency()\n", 973 bankLat, accLat); 974 975 return make_pair(bankLat, accLat); 976} 977 978void 979DRAMCtrl::processNextReqEvent() 980{ 981 scheduleNextReq(); 982} 983 984void 985DRAMCtrl::recordActivate(Tick act_tick, uint8_t rank, uint8_t bank) 986{ 987 assert(0 <= rank && rank < ranksPerChannel); 988 assert(actTicks[rank].size() == activationLimit); 989 990 DPRINTF(DRAM, "Activate at tick %d\n", act_tick); 991 992 // Tracking accesses after all banks are precharged. 993 // startTickPrechargeAll: is the tick when all the banks were again 994 // precharged. The difference between act_tick and startTickPrechargeAll 995 // gives the time for which DRAM doesn't get any accesses after refreshing 996 // or after a page is closed in closed-page or open-adaptive-page policy. 997 if ((numBanksActive == 0) && (act_tick > startTickPrechargeAll)) { 998 prechargeAllTime += act_tick - startTickPrechargeAll; 999 } 1000 1001 // No need to update number of active banks for closed-page policy as only 1 1002 // bank will be activated at any given point, which will be instatntly 1003 // precharged 1004 if (pageMgmt == Enums::open || pageMgmt == Enums::open_adaptive || 1005 pageMgmt == Enums::close_adaptive) 1006 ++numBanksActive; 1007 1008 // start by enforcing tRRD 1009 for(int i = 0; i < banksPerRank; i++) { 1010 // next activate must not happen before tRRD 1011 banks[rank][i].actAllowedAt = act_tick + tRRD; 1012 } 1013 // tRC should be added to activation tick of the bank currently accessed, 1014 // where tRC = tRAS + tRP, this is just for a check as actAllowedAt for same 1015 // bank is already captured by bank.freeAt and bank.tRASDoneAt 1016 banks[rank][bank].actAllowedAt = act_tick + tRAS + tRP; 1017 1018 // next, we deal with tXAW, if the activation limit is disabled 1019 // then we are done 1020 if (actTicks[rank].empty()) 1021 return; 1022 1023 // sanity check 1024 if (actTicks[rank].back() && (act_tick - actTicks[rank].back()) < tXAW) { 1025 // @todo For now, stick with a warning 1026 warn("Got %d activates in window %d (%d - %d) which is smaller " 1027 "than %d\n", activationLimit, act_tick - actTicks[rank].back(), 1028 act_tick, actTicks[rank].back(), tXAW); 1029 } 1030 1031 // shift the times used for the book keeping, the last element 1032 // (highest index) is the oldest one and hence the lowest value 1033 actTicks[rank].pop_back(); 1034 1035 // record an new activation (in the future) 1036 actTicks[rank].push_front(act_tick); 1037 1038 // cannot activate more than X times in time window tXAW, push the 1039 // next one (the X + 1'st activate) to be tXAW away from the 1040 // oldest in our window of X 1041 if (actTicks[rank].back() && (act_tick - actTicks[rank].back()) < tXAW) { 1042 DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate no earlier " 1043 "than %d\n", activationLimit, actTicks[rank].back() + tXAW); 1044 for(int j = 0; j < banksPerRank; j++) 1045 // next activate must not happen before end of window 1046 banks[rank][j].actAllowedAt = actTicks[rank].back() + tXAW; 1047 } 1048} 1049 1050void 1051DRAMCtrl::doDRAMAccess(DRAMPacket* dram_pkt) 1052{ 1053 1054 DPRINTF(DRAM, "Timing access to addr %lld, rank/bank/row %d %d %d\n", 1055 dram_pkt->addr, dram_pkt->rank, dram_pkt->bank, dram_pkt->row); 1056 1057 // estimate the bank and access latency 1058 pair<Tick, Tick> lat = estimateLatency(dram_pkt, curTick()); 1059 Tick bankLat = lat.first; 1060 Tick accessLat = lat.second; 1061 Tick actTick; 1062 1063 // This request was woken up at this time based on a prior call 1064 // to estimateLatency(). However, between then and now, both the 1065 // accessLatency and/or busBusyUntil may have changed. We need 1066 // to correct for that. 1067 1068 Tick addDelay = (curTick() + accessLat < busBusyUntil) ? 1069 busBusyUntil - (curTick() + accessLat) : 0; 1070 1071 Bank& bank = dram_pkt->bankRef; 1072 1073 // Update bank state 1074 if (pageMgmt == Enums::open || pageMgmt == Enums::open_adaptive || 1075 pageMgmt == Enums::close_adaptive) { 1076 bank.freeAt = curTick() + addDelay + accessLat; 1077 1078 // If you activated a new row do to this access, the next access 1079 // will have to respect tRAS for this bank. 1080 if (!rowHitFlag) { 1081 // any waiting for banks account for in freeAt 1082 actTick = bank.freeAt - tCL - tRCD; 1083 bank.tRASDoneAt = actTick + tRAS; 1084 recordActivate(actTick, dram_pkt->rank, dram_pkt->bank); 1085 1086 // if we closed an open row as a result of this access, 1087 // then sample the number of bytes accessed before 1088 // resetting it 1089 if (bank.openRow != -1) 1090 bytesPerActivate.sample(bank.bytesAccessed); 1091 1092 // update the open row 1093 bank.openRow = dram_pkt->row; 1094 1095 // start counting anew, this covers both the case when we 1096 // auto-precharged, and when this access is forced to 1097 // precharge 1098 bank.bytesAccessed = 0; 1099 bank.rowAccesses = 0; 1100 } 1101 1102 // increment the bytes accessed and the accesses per row 1103 bank.bytesAccessed += burstSize; 1104 ++bank.rowAccesses; 1105 1106 // if we reached the max, then issue with an auto-precharge 1107 bool auto_precharge = bank.rowAccesses == maxAccessesPerRow; 1108 1109 // if we did not hit the limit, we might still want to 1110 // auto-precharge 1111 if (!auto_precharge && 1112 (pageMgmt == Enums::open_adaptive || 1113 pageMgmt == Enums::close_adaptive)) { 1114 // a twist on the open and close page policies: 1115 // 1) open_adaptive page policy does not blindly keep the 1116 // page open, but close it if there are no row hits, and there 1117 // are bank conflicts in the queue 1118 // 2) close_adaptive page policy does not blindly close the 1119 // page, but closes it only if there are no row hits in the queue. 1120 // In this case, only force an auto precharge when there 1121 // are no same page hits in the queue 1122 bool got_more_hits = false; 1123 bool got_bank_conflict = false; 1124 1125 // either look at the read queue or write queue 1126 const deque<DRAMPacket*>& queue = dram_pkt->isRead ? readQueue : 1127 writeQueue; 1128 auto p = queue.begin(); 1129 // make sure we are not considering the packet that we are 1130 // currently dealing with (which is the head of the queue) 1131 ++p; 1132 1133 // keep on looking until we have found required condition or 1134 // reached the end 1135 while (!(got_more_hits && 1136 (got_bank_conflict || pageMgmt == Enums::close_adaptive)) && 1137 p != queue.end()) { 1138 bool same_rank_bank = (dram_pkt->rank == (*p)->rank) && 1139 (dram_pkt->bank == (*p)->bank); 1140 bool same_row = dram_pkt->row == (*p)->row; 1141 got_more_hits |= same_rank_bank && same_row; 1142 got_bank_conflict |= same_rank_bank && !same_row; 1143 ++p; 1144 } 1145 1146 // auto pre-charge when either 1147 // 1) open_adaptive policy, we have not got any more hits, and 1148 // have a bank conflict 1149 // 2) close_adaptive policy and we have not got any more hits 1150 auto_precharge = !got_more_hits && 1151 (got_bank_conflict || pageMgmt == Enums::close_adaptive); 1152 } 1153 1154 // if this access should use auto-precharge, then we are 1155 // closing the row 1156 if (auto_precharge) { 1157 bank.openRow = -1; 1158 bank.freeAt = std::max(bank.freeAt, bank.tRASDoneAt) + tRP; 1159 --numBanksActive; 1160 if (numBanksActive == 0) { 1161 startTickPrechargeAll = std::max(startTickPrechargeAll, 1162 bank.freeAt); 1163 DPRINTF(DRAM, "All banks precharged at tick: %ld\n", 1164 startTickPrechargeAll); 1165 } 1166 1167 // sample the bytes per activate here since we are closing 1168 // the page 1169 bytesPerActivate.sample(bank.bytesAccessed); 1170 1171 DPRINTF(DRAM, "Auto-precharged bank: %d\n", dram_pkt->bankId); 1172 } 1173 1174 DPRINTF(DRAM, "doDRAMAccess::bank.freeAt is %lld\n", bank.freeAt); 1175 } else if (pageMgmt == Enums::close) { 1176 actTick = curTick() + addDelay + accessLat - tRCD - tCL; 1177 recordActivate(actTick, dram_pkt->rank, dram_pkt->bank); 1178 1179 // If the DRAM has a very quick tRAS, bank can be made free 1180 // after consecutive tCL,tRCD,tRP times. In general, however, 1181 // an additional wait is required to respect tRAS. 1182 bank.freeAt = std::max(actTick + tRAS + tRP, 1183 actTick + tRCD + tCL + tRP); 1184 DPRINTF(DRAM, "doDRAMAccess::bank.freeAt is %lld\n", bank.freeAt); 1185 bytesPerActivate.sample(burstSize); 1186 startTickPrechargeAll = std::max(startTickPrechargeAll, bank.freeAt); 1187 } else 1188 panic("No page management policy chosen\n"); 1189 1190 // Update request parameters 1191 dram_pkt->readyTime = curTick() + addDelay + accessLat + tBURST; 1192 1193 1194 DPRINTF(DRAM, "Req %lld: curtick is %lld accessLat is %d " \ 1195 "readytime is %lld busbusyuntil is %lld. " \ 1196 "Scheduling at readyTime\n", dram_pkt->addr, 1197 curTick(), accessLat, dram_pkt->readyTime, busBusyUntil); 1198 1199 // Make sure requests are not overlapping on the databus 1200 assert (dram_pkt->readyTime - busBusyUntil >= tBURST); 1201 1202 // Update bus state 1203 busBusyUntil = dram_pkt->readyTime; 1204 1205 DPRINTF(DRAM,"Access time is %lld\n", 1206 dram_pkt->readyTime - dram_pkt->entryTime); 1207 1208 // Update the minimum timing between the requests 1209 newTime = (busBusyUntil > tRP + tRCD + tCL) ? 1210 std::max(busBusyUntil - (tRP + tRCD + tCL), curTick()) : curTick(); 1211 1212 // Update the access related stats 1213 if (dram_pkt->isRead) { 1214 ++readsThisTime; 1215 if (rowHitFlag) 1216 readRowHits++; 1217 bytesReadDRAM += burstSize; 1218 perBankRdBursts[dram_pkt->bankId]++; 1219 } else { 1220 ++writesThisTime; 1221 if (rowHitFlag) 1222 writeRowHits++; 1223 bytesWritten += burstSize; 1224 perBankWrBursts[dram_pkt->bankId]++; 1225 1226 // At this point, commonality between reads and writes ends. 1227 // For writes, we are done since we long ago responded to the 1228 // requestor. 1229 return; 1230 } 1231 1232 // Update latency stats 1233 totMemAccLat += dram_pkt->readyTime - dram_pkt->entryTime; 1234 totBankLat += bankLat; 1235 totBusLat += tBURST; 1236 totQLat += dram_pkt->readyTime - dram_pkt->entryTime - bankLat - tBURST; 1237 1238 1239 // At this point we're done dealing with the request 1240 // It will be moved to a separate response queue with a 1241 // correct readyTime, and eventually be sent back at that 1242 //time 1243 moveToRespQ(); 1244 1245 // Schedule the next read event 1246 if (!nextReqEvent.scheduled() && !stopReads) { 1247 schedule(nextReqEvent, newTime); 1248 } else { 1249 if (newTime < nextReqEvent.when()) 1250 reschedule(nextReqEvent, newTime); 1251 } 1252} 1253 1254void 1255DRAMCtrl::moveToRespQ() 1256{ 1257 // Remove from read queue 1258 DRAMPacket* dram_pkt = readQueue.front(); 1259 readQueue.pop_front(); 1260 1261 // sanity check 1262 assert(dram_pkt->size <= burstSize); 1263 1264 // Insert into response queue sorted by readyTime 1265 // It will be sent back to the requestor at its 1266 // readyTime 1267 if (respQueue.empty()) { 1268 respQueue.push_front(dram_pkt); 1269 assert(!respondEvent.scheduled()); 1270 assert(dram_pkt->readyTime >= curTick()); 1271 schedule(respondEvent, dram_pkt->readyTime); 1272 } else { 1273 bool done = false; 1274 auto i = respQueue.begin(); 1275 while (!done && i != respQueue.end()) { 1276 if ((*i)->readyTime > dram_pkt->readyTime) { 1277 respQueue.insert(i, dram_pkt); 1278 done = true; 1279 } 1280 ++i; 1281 } 1282 1283 if (!done) 1284 respQueue.push_back(dram_pkt); 1285 1286 assert(respondEvent.scheduled()); 1287 1288 if (respQueue.front()->readyTime < respondEvent.when()) { 1289 assert(respQueue.front()->readyTime >= curTick()); 1290 reschedule(respondEvent, respQueue.front()->readyTime); 1291 } 1292 } 1293} 1294 1295void 1296DRAMCtrl::scheduleNextReq() 1297{ 1298 DPRINTF(DRAM, "Reached scheduleNextReq()\n"); 1299 1300 // Figure out which read request goes next, and move it to the 1301 // front of the read queue 1302 if (!chooseNextRead()) { 1303 // In the case there is no read request to go next, trigger 1304 // writes if we have passed the low threshold (or if we are 1305 // draining) 1306 if (!writeQueue.empty() && !writeEvent.scheduled() && 1307 (writeQueue.size() > writeLowThreshold || drainManager)) 1308 triggerWrites(); 1309 } else { 1310 doDRAMAccess(readQueue.front()); 1311 } 1312} 1313 1314Tick 1315DRAMCtrl::maxBankFreeAt() const 1316{ 1317 Tick banksFree = 0; 1318 1319 for(int i = 0; i < ranksPerChannel; i++) 1320 for(int j = 0; j < banksPerRank; j++) 1321 banksFree = std::max(banks[i][j].freeAt, banksFree); 1322 1323 return banksFree; 1324} 1325 1326uint64_t 1327DRAMCtrl::minBankFreeAt(const deque<DRAMPacket*>& queue) const 1328{ 1329 uint64_t bank_mask = 0; 1330 Tick freeAt = MaxTick; 1331 1332 // detemrine if we have queued transactions targetting the 1333 // bank in question 1334 vector<bool> got_waiting(ranksPerChannel * banksPerRank, false); 1335 for (auto p = queue.begin(); p != queue.end(); ++p) { 1336 got_waiting[(*p)->bankId] = true; 1337 } 1338 1339 for (int i = 0; i < ranksPerChannel; i++) { 1340 for (int j = 0; j < banksPerRank; j++) { 1341 // if we have waiting requests for the bank, and it is 1342 // amongst the first available, update the mask 1343 if (got_waiting[i * banksPerRank + j] && 1344 banks[i][j].freeAt <= freeAt) { 1345 // reset bank mask if new minimum is found 1346 if (banks[i][j].freeAt < freeAt) 1347 bank_mask = 0; 1348 // set the bit corresponding to the available bank 1349 uint8_t bit_index = i * ranksPerChannel + j; 1350 replaceBits(bank_mask, bit_index, bit_index, 1); 1351 freeAt = banks[i][j].freeAt; 1352 } 1353 } 1354 } 1355 return bank_mask; 1356} 1357 1358void 1359DRAMCtrl::processRefreshEvent() 1360{ 1361 DPRINTF(DRAM, "Refreshing at tick %ld\n", curTick()); 1362 1363 Tick banksFree = std::max(curTick(), maxBankFreeAt()) + tRFC; 1364 1365 for(int i = 0; i < ranksPerChannel; i++) 1366 for(int j = 0; j < banksPerRank; j++) { 1367 banks[i][j].freeAt = banksFree; 1368 banks[i][j].openRow = -1; 1369 } 1370 1371 // updating startTickPrechargeAll, isprechargeAll 1372 numBanksActive = 0; 1373 startTickPrechargeAll = banksFree; 1374 1375 schedule(refreshEvent, curTick() + tREFI); 1376} 1377 1378void 1379DRAMCtrl::regStats() 1380{ 1381 using namespace Stats; 1382 1383 AbstractMemory::regStats(); 1384 1385 readReqs 1386 .name(name() + ".readReqs") 1387 .desc("Number of read requests accepted"); 1388 1389 writeReqs 1390 .name(name() + ".writeReqs") 1391 .desc("Number of write requests accepted"); 1392 1393 readBursts 1394 .name(name() + ".readBursts") 1395 .desc("Number of DRAM read bursts, " 1396 "including those serviced by the write queue"); 1397 1398 writeBursts 1399 .name(name() + ".writeBursts") 1400 .desc("Number of DRAM write bursts, " 1401 "including those merged in the write queue"); 1402 1403 servicedByWrQ 1404 .name(name() + ".servicedByWrQ") 1405 .desc("Number of DRAM read bursts serviced by the write queue"); 1406 1407 mergedWrBursts 1408 .name(name() + ".mergedWrBursts") 1409 .desc("Number of DRAM write bursts merged with an existing one"); 1410 1411 neitherReadNorWrite 1412 .name(name() + ".neitherReadNorWriteReqs") 1413 .desc("Number of requests that are neither read nor write"); 1414 1415 perBankRdBursts 1416 .init(banksPerRank * ranksPerChannel) 1417 .name(name() + ".perBankRdBursts") 1418 .desc("Per bank write bursts"); 1419 1420 perBankWrBursts 1421 .init(banksPerRank * ranksPerChannel) 1422 .name(name() + ".perBankWrBursts") 1423 .desc("Per bank write bursts"); 1424 1425 avgRdQLen 1426 .name(name() + ".avgRdQLen") 1427 .desc("Average read queue length when enqueuing") 1428 .precision(2); 1429 1430 avgWrQLen 1431 .name(name() + ".avgWrQLen") 1432 .desc("Average write queue length when enqueuing") 1433 .precision(2); 1434 1435 totQLat 1436 .name(name() + ".totQLat") 1437 .desc("Total ticks spent queuing"); 1438 1439 totBankLat 1440 .name(name() + ".totBankLat") 1441 .desc("Total ticks spent accessing banks"); 1442 1443 totBusLat 1444 .name(name() + ".totBusLat") 1445 .desc("Total ticks spent in databus transfers"); 1446 1447 totMemAccLat 1448 .name(name() + ".totMemAccLat") 1449 .desc("Total ticks spent from burst creation until serviced " 1450 "by the DRAM"); 1451 1452 avgQLat 1453 .name(name() + ".avgQLat") 1454 .desc("Average queueing delay per DRAM burst") 1455 .precision(2); 1456 1457 avgQLat = totQLat / (readBursts - servicedByWrQ); 1458 1459 avgBankLat 1460 .name(name() + ".avgBankLat") 1461 .desc("Average bank access latency per DRAM burst") 1462 .precision(2); 1463 1464 avgBankLat = totBankLat / (readBursts - servicedByWrQ); 1465 1466 avgBusLat 1467 .name(name() + ".avgBusLat") 1468 .desc("Average bus latency per DRAM burst") 1469 .precision(2); 1470 1471 avgBusLat = totBusLat / (readBursts - servicedByWrQ); 1472 1473 avgMemAccLat 1474 .name(name() + ".avgMemAccLat") 1475 .desc("Average memory access latency per DRAM burst") 1476 .precision(2); 1477 1478 avgMemAccLat = totMemAccLat / (readBursts - servicedByWrQ); 1479 1480 numRdRetry 1481 .name(name() + ".numRdRetry") 1482 .desc("Number of times read queue was full causing retry"); 1483 1484 numWrRetry 1485 .name(name() + ".numWrRetry") 1486 .desc("Number of times write queue was full causing retry"); 1487 1488 readRowHits 1489 .name(name() + ".readRowHits") 1490 .desc("Number of row buffer hits during reads"); 1491 1492 writeRowHits 1493 .name(name() + ".writeRowHits") 1494 .desc("Number of row buffer hits during writes"); 1495 1496 readRowHitRate 1497 .name(name() + ".readRowHitRate") 1498 .desc("Row buffer hit rate for reads") 1499 .precision(2); 1500 1501 readRowHitRate = (readRowHits / (readBursts - servicedByWrQ)) * 100; 1502 1503 writeRowHitRate 1504 .name(name() + ".writeRowHitRate") 1505 .desc("Row buffer hit rate for writes") 1506 .precision(2); 1507 1508 writeRowHitRate = (writeRowHits / (writeBursts - mergedWrBursts)) * 100; 1509 1510 readPktSize 1511 .init(ceilLog2(burstSize) + 1) 1512 .name(name() + ".readPktSize") 1513 .desc("Read request sizes (log2)"); 1514 1515 writePktSize 1516 .init(ceilLog2(burstSize) + 1) 1517 .name(name() + ".writePktSize") 1518 .desc("Write request sizes (log2)"); 1519 1520 rdQLenPdf 1521 .init(readBufferSize) 1522 .name(name() + ".rdQLenPdf") 1523 .desc("What read queue length does an incoming req see"); 1524 1525 wrQLenPdf 1526 .init(writeBufferSize) 1527 .name(name() + ".wrQLenPdf") 1528 .desc("What write queue length does an incoming req see"); 1529 1530 bytesPerActivate 1531 .init(maxAccessesPerRow) 1532 .name(name() + ".bytesPerActivate") 1533 .desc("Bytes accessed per row activation") 1534 .flags(nozero); 1535 1536 rdPerTurnAround 1537 .init(readBufferSize) 1538 .name(name() + ".rdPerTurnAround") 1539 .desc("Reads before turning the bus around for writes") 1540 .flags(nozero); 1541 1542 wrPerTurnAround 1543 .init(writeBufferSize) 1544 .name(name() + ".wrPerTurnAround") 1545 .desc("Writes before turning the bus around for reads") 1546 .flags(nozero); 1547 1548 bytesReadDRAM 1549 .name(name() + ".bytesReadDRAM") 1550 .desc("Total number of bytes read from DRAM"); 1551 1552 bytesReadWrQ 1553 .name(name() + ".bytesReadWrQ") 1554 .desc("Total number of bytes read from write queue"); 1555 1556 bytesWritten 1557 .name(name() + ".bytesWritten") 1558 .desc("Total number of bytes written to DRAM"); 1559 1560 bytesReadSys 1561 .name(name() + ".bytesReadSys") 1562 .desc("Total read bytes from the system interface side"); 1563 1564 bytesWrittenSys 1565 .name(name() + ".bytesWrittenSys") 1566 .desc("Total written bytes from the system interface side"); 1567 1568 avgRdBW 1569 .name(name() + ".avgRdBW") 1570 .desc("Average DRAM read bandwidth in MiByte/s") 1571 .precision(2); 1572 1573 avgRdBW = (bytesReadDRAM / 1000000) / simSeconds; 1574 1575 avgWrBW 1576 .name(name() + ".avgWrBW") 1577 .desc("Average achieved write bandwidth in MiByte/s") 1578 .precision(2); 1579 1580 avgWrBW = (bytesWritten / 1000000) / simSeconds; 1581 1582 avgRdBWSys 1583 .name(name() + ".avgRdBWSys") 1584 .desc("Average system read bandwidth in MiByte/s") 1585 .precision(2); 1586 1587 avgRdBWSys = (bytesReadSys / 1000000) / simSeconds; 1588 1589 avgWrBWSys 1590 .name(name() + ".avgWrBWSys") 1591 .desc("Average system write bandwidth in MiByte/s") 1592 .precision(2); 1593 1594 avgWrBWSys = (bytesWrittenSys / 1000000) / simSeconds; 1595 1596 peakBW 1597 .name(name() + ".peakBW") 1598 .desc("Theoretical peak bandwidth in MiByte/s") 1599 .precision(2); 1600 1601 peakBW = (SimClock::Frequency / tBURST) * burstSize / 1000000; 1602 1603 busUtil 1604 .name(name() + ".busUtil") 1605 .desc("Data bus utilization in percentage") 1606 .precision(2); 1607 1608 busUtil = (avgRdBW + avgWrBW) / peakBW * 100; 1609 1610 totGap 1611 .name(name() + ".totGap") 1612 .desc("Total gap between requests"); 1613 1614 avgGap 1615 .name(name() + ".avgGap") 1616 .desc("Average gap between requests") 1617 .precision(2); 1618 1619 avgGap = totGap / (readReqs + writeReqs); 1620 1621 // Stats for DRAM Power calculation based on Micron datasheet 1622 busUtilRead 1623 .name(name() + ".busUtilRead") 1624 .desc("Data bus utilization in percentage for reads") 1625 .precision(2); 1626 1627 busUtilRead = avgRdBW / peakBW * 100; 1628 1629 busUtilWrite 1630 .name(name() + ".busUtilWrite") 1631 .desc("Data bus utilization in percentage for writes") 1632 .precision(2); 1633 1634 busUtilWrite = avgWrBW / peakBW * 100; 1635 1636 pageHitRate 1637 .name(name() + ".pageHitRate") 1638 .desc("Row buffer hit rate, read and write combined") 1639 .precision(2); 1640 1641 pageHitRate = (writeRowHits + readRowHits) / 1642 (writeBursts - mergedWrBursts + readBursts - servicedByWrQ) * 100; 1643 1644 prechargeAllPercent 1645 .name(name() + ".prechargeAllPercent") 1646 .desc("Percentage of time for which DRAM has all the banks in " 1647 "precharge state") 1648 .precision(2); 1649 1650 prechargeAllPercent = prechargeAllTime / simTicks * 100; 1651} 1652 1653void 1654DRAMCtrl::recvFunctional(PacketPtr pkt) 1655{ 1656 // rely on the abstract memory 1657 functionalAccess(pkt); 1658} 1659 1660BaseSlavePort& 1661DRAMCtrl::getSlavePort(const string &if_name, PortID idx) 1662{ 1663 if (if_name != "port") { 1664 return MemObject::getSlavePort(if_name, idx); 1665 } else { 1666 return port; 1667 } 1668} 1669 1670unsigned int 1671DRAMCtrl::drain(DrainManager *dm) 1672{ 1673 unsigned int count = port.drain(dm); 1674 1675 // if there is anything in any of our internal queues, keep track 1676 // of that as well 1677 if (!(writeQueue.empty() && readQueue.empty() && 1678 respQueue.empty())) { 1679 DPRINTF(Drain, "DRAM controller not drained, write: %d, read: %d," 1680 " resp: %d\n", writeQueue.size(), readQueue.size(), 1681 respQueue.size()); 1682 ++count; 1683 drainManager = dm; 1684 // the only part that is not drained automatically over time 1685 // is the write queue, thus trigger writes if there are any 1686 // waiting and no reads waiting, otherwise wait until the 1687 // reads are done 1688 if (readQueue.empty() && !writeQueue.empty() && 1689 !writeEvent.scheduled()) 1690 triggerWrites(); 1691 } 1692 1693 if (count) 1694 setDrainState(Drainable::Draining); 1695 else 1696 setDrainState(Drainable::Drained); 1697 return count; 1698} 1699 1700DRAMCtrl::MemoryPort::MemoryPort(const std::string& name, DRAMCtrl& _memory) 1701 : QueuedSlavePort(name, &_memory, queue), queue(_memory, *this), 1702 memory(_memory) 1703{ } 1704 1705AddrRangeList 1706DRAMCtrl::MemoryPort::getAddrRanges() const 1707{ 1708 AddrRangeList ranges; 1709 ranges.push_back(memory.getAddrRange()); 1710 return ranges; 1711} 1712 1713void 1714DRAMCtrl::MemoryPort::recvFunctional(PacketPtr pkt) 1715{ 1716 pkt->pushLabel(memory.name()); 1717 1718 if (!queue.checkFunctional(pkt)) { 1719 // Default implementation of SimpleTimingPort::recvFunctional() 1720 // calls recvAtomic() and throws away the latency; we can save a 1721 // little here by just not calculating the latency. 1722 memory.recvFunctional(pkt); 1723 } 1724 1725 pkt->popLabel(); 1726} 1727 1728Tick 1729DRAMCtrl::MemoryPort::recvAtomic(PacketPtr pkt) 1730{ 1731 return memory.recvAtomic(pkt); 1732} 1733 1734bool 1735DRAMCtrl::MemoryPort::recvTimingReq(PacketPtr pkt) 1736{ 1737 // pass it to the memory controller 1738 return memory.recvTimingReq(pkt); 1739} 1740 1741DRAMCtrl* 1742DRAMCtrlParams::create() 1743{ 1744 return new DRAMCtrl(this); 1745} 1746