dram_ctrl.cc revision 10140
1/* 2 * Copyright (c) 2010-2013 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2013 Amin Farmahini-Farahani 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Andreas Hansson 41 * Ani Udipi 42 * Neha Agarwal 43 */ 44 45#include "base/trace.hh" 46#include "base/bitfield.hh" 47#include "debug/Drain.hh" 48#include "debug/DRAM.hh" 49#include "mem/simple_dram.hh" 50#include "sim/system.hh" 51 52using namespace std; 53 54SimpleDRAM::SimpleDRAM(const SimpleDRAMParams* p) : 55 AbstractMemory(p), 56 port(name() + ".port", *this), 57 retryRdReq(false), retryWrReq(false), 58 rowHitFlag(false), stopReads(false), 59 writeEvent(this), respondEvent(this), 60 refreshEvent(this), nextReqEvent(this), drainManager(NULL), 61 deviceBusWidth(p->device_bus_width), burstLength(p->burst_length), 62 deviceRowBufferSize(p->device_rowbuffer_size), 63 devicesPerRank(p->devices_per_rank), 64 burstSize((devicesPerRank * burstLength * deviceBusWidth) / 8), 65 rowBufferSize(devicesPerRank * deviceRowBufferSize), 66 columnsPerRowBuffer(rowBufferSize / burstSize), 67 ranksPerChannel(p->ranks_per_channel), 68 banksPerRank(p->banks_per_rank), channels(p->channels), rowsPerBank(0), 69 readBufferSize(p->read_buffer_size), 70 writeBufferSize(p->write_buffer_size), 71 writeHighThreshold(writeBufferSize * p->write_high_thresh_perc / 100.0), 72 writeLowThreshold(writeBufferSize * p->write_low_thresh_perc / 100.0), 73 minWritesPerSwitch(p->min_writes_per_switch), writesThisTime(0), 74 tWTR(p->tWTR), tBURST(p->tBURST), 75 tRCD(p->tRCD), tCL(p->tCL), tRP(p->tRP), tRAS(p->tRAS), 76 tRFC(p->tRFC), tREFI(p->tREFI), tRRD(p->tRRD), 77 tXAW(p->tXAW), activationLimit(p->activation_limit), 78 memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping), 79 pageMgmt(p->page_policy), 80 frontendLatency(p->static_frontend_latency), 81 backendLatency(p->static_backend_latency), 82 busBusyUntil(0), writeStartTime(0), 83 prevArrival(0), numReqs(0), 84 newTime(0), startTickPrechargeAll(0), numBanksActive(0) 85{ 86 // create the bank states based on the dimensions of the ranks and 87 // banks 88 banks.resize(ranksPerChannel); 89 actTicks.resize(ranksPerChannel); 90 for (size_t c = 0; c < ranksPerChannel; ++c) { 91 banks[c].resize(banksPerRank); 92 actTicks[c].resize(activationLimit, 0); 93 } 94 95 // perform a basic check of the write thresholds 96 if (p->write_low_thresh_perc >= p->write_high_thresh_perc) 97 fatal("Write buffer low threshold %d must be smaller than the " 98 "high threshold %d\n", p->write_low_thresh_perc, 99 p->write_high_thresh_perc); 100 101 // determine the rows per bank by looking at the total capacity 102 uint64_t capacity = ULL(1) << ceilLog2(AbstractMemory::size()); 103 104 DPRINTF(DRAM, "Memory capacity %lld (%lld) bytes\n", capacity, 105 AbstractMemory::size()); 106 107 DPRINTF(DRAM, "Row buffer size %d bytes with %d columns per row buffer\n", 108 rowBufferSize, columnsPerRowBuffer); 109 110 rowsPerBank = capacity / (rowBufferSize * banksPerRank * ranksPerChannel); 111 112 if (range.interleaved()) { 113 if (channels != range.stripes()) 114 panic("%s has %d interleaved address stripes but %d channel(s)\n", 115 name(), range.stripes(), channels); 116 117 if (addrMapping == Enums::RoRaBaChCo) { 118 if (rowBufferSize != range.granularity()) { 119 panic("Interleaving of %s doesn't match RoRaBaChCo " 120 "address map\n", name()); 121 } 122 } else if (addrMapping == Enums::RoRaBaCoCh) { 123 if (system()->cacheLineSize() != range.granularity()) { 124 panic("Interleaving of %s doesn't match RoRaBaCoCh " 125 "address map\n", name()); 126 } 127 } else if (addrMapping == Enums::RoCoRaBaCh) { 128 if (system()->cacheLineSize() != range.granularity()) 129 panic("Interleaving of %s doesn't match RoCoRaBaCh " 130 "address map\n", name()); 131 } 132 } 133} 134 135void 136SimpleDRAM::init() 137{ 138 if (!port.isConnected()) { 139 fatal("SimpleDRAM %s is unconnected!\n", name()); 140 } else { 141 port.sendRangeChange(); 142 } 143} 144 145void 146SimpleDRAM::startup() 147{ 148 // print the configuration of the controller 149 printParams(); 150 151 // kick off the refresh 152 schedule(refreshEvent, curTick() + tREFI); 153} 154 155Tick 156SimpleDRAM::recvAtomic(PacketPtr pkt) 157{ 158 DPRINTF(DRAM, "recvAtomic: %s 0x%x\n", pkt->cmdString(), pkt->getAddr()); 159 160 // do the actual memory access and turn the packet into a response 161 access(pkt); 162 163 Tick latency = 0; 164 if (!pkt->memInhibitAsserted() && pkt->hasData()) { 165 // this value is not supposed to be accurate, just enough to 166 // keep things going, mimic a closed page 167 latency = tRP + tRCD + tCL; 168 } 169 return latency; 170} 171 172bool 173SimpleDRAM::readQueueFull(unsigned int neededEntries) const 174{ 175 DPRINTF(DRAM, "Read queue limit %d, current size %d, entries needed %d\n", 176 readBufferSize, readQueue.size() + respQueue.size(), 177 neededEntries); 178 179 return 180 (readQueue.size() + respQueue.size() + neededEntries) > readBufferSize; 181} 182 183bool 184SimpleDRAM::writeQueueFull(unsigned int neededEntries) const 185{ 186 DPRINTF(DRAM, "Write queue limit %d, current size %d, entries needed %d\n", 187 writeBufferSize, writeQueue.size(), neededEntries); 188 return (writeQueue.size() + neededEntries) > writeBufferSize; 189} 190 191SimpleDRAM::DRAMPacket* 192SimpleDRAM::decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned size, bool isRead) 193{ 194 // decode the address based on the address mapping scheme, with 195 // Ro, Ra, Co, Ba and Ch denoting row, rank, column, bank and 196 // channel, respectively 197 uint8_t rank; 198 uint8_t bank; 199 uint16_t row; 200 201 // truncate the address to the access granularity 202 Addr addr = dramPktAddr / burstSize; 203 204 // we have removed the lowest order address bits that denote the 205 // position within the column 206 if (addrMapping == Enums::RoRaBaChCo) { 207 // the lowest order bits denote the column to ensure that 208 // sequential cache lines occupy the same row 209 addr = addr / columnsPerRowBuffer; 210 211 // take out the channel part of the address 212 addr = addr / channels; 213 214 // after the channel bits, get the bank bits to interleave 215 // over the banks 216 bank = addr % banksPerRank; 217 addr = addr / banksPerRank; 218 219 // after the bank, we get the rank bits which thus interleaves 220 // over the ranks 221 rank = addr % ranksPerChannel; 222 addr = addr / ranksPerChannel; 223 224 // lastly, get the row bits 225 row = addr % rowsPerBank; 226 addr = addr / rowsPerBank; 227 } else if (addrMapping == Enums::RoRaBaCoCh) { 228 // take out the channel part of the address 229 addr = addr / channels; 230 231 // next, the column 232 addr = addr / columnsPerRowBuffer; 233 234 // after the column bits, we get the bank bits to interleave 235 // over the banks 236 bank = addr % banksPerRank; 237 addr = addr / banksPerRank; 238 239 // after the bank, we get the rank bits which thus interleaves 240 // over the ranks 241 rank = addr % ranksPerChannel; 242 addr = addr / ranksPerChannel; 243 244 // lastly, get the row bits 245 row = addr % rowsPerBank; 246 addr = addr / rowsPerBank; 247 } else if (addrMapping == Enums::RoCoRaBaCh) { 248 // optimise for closed page mode and utilise maximum 249 // parallelism of the DRAM (at the cost of power) 250 251 // take out the channel part of the address, not that this has 252 // to match with how accesses are interleaved between the 253 // controllers in the address mapping 254 addr = addr / channels; 255 256 // start with the bank bits, as this provides the maximum 257 // opportunity for parallelism between requests 258 bank = addr % banksPerRank; 259 addr = addr / banksPerRank; 260 261 // next get the rank bits 262 rank = addr % ranksPerChannel; 263 addr = addr / ranksPerChannel; 264 265 // next the column bits which we do not need to keep track of 266 // and simply skip past 267 addr = addr / columnsPerRowBuffer; 268 269 // lastly, get the row bits 270 row = addr % rowsPerBank; 271 addr = addr / rowsPerBank; 272 } else 273 panic("Unknown address mapping policy chosen!"); 274 275 assert(rank < ranksPerChannel); 276 assert(bank < banksPerRank); 277 assert(row < rowsPerBank); 278 279 DPRINTF(DRAM, "Address: %lld Rank %d Bank %d Row %d\n", 280 dramPktAddr, rank, bank, row); 281 282 // create the corresponding DRAM packet with the entry time and 283 // ready time set to the current tick, the latter will be updated 284 // later 285 uint16_t bank_id = banksPerRank * rank + bank; 286 return new DRAMPacket(pkt, isRead, rank, bank, row, bank_id, dramPktAddr, 287 size, banks[rank][bank]); 288} 289 290void 291SimpleDRAM::addToReadQueue(PacketPtr pkt, unsigned int pktCount) 292{ 293 // only add to the read queue here. whenever the request is 294 // eventually done, set the readyTime, and call schedule() 295 assert(!pkt->isWrite()); 296 297 assert(pktCount != 0); 298 299 // if the request size is larger than burst size, the pkt is split into 300 // multiple DRAM packets 301 // Note if the pkt starting address is not aligened to burst size, the 302 // address of first DRAM packet is kept unaliged. Subsequent DRAM packets 303 // are aligned to burst size boundaries. This is to ensure we accurately 304 // check read packets against packets in write queue. 305 Addr addr = pkt->getAddr(); 306 unsigned pktsServicedByWrQ = 0; 307 BurstHelper* burst_helper = NULL; 308 for (int cnt = 0; cnt < pktCount; ++cnt) { 309 unsigned size = std::min((addr | (burstSize - 1)) + 1, 310 pkt->getAddr() + pkt->getSize()) - addr; 311 readPktSize[ceilLog2(size)]++; 312 readBursts++; 313 314 // First check write buffer to see if the data is already at 315 // the controller 316 bool foundInWrQ = false; 317 for (auto i = writeQueue.begin(); i != writeQueue.end(); ++i) { 318 // check if the read is subsumed in the write entry we are 319 // looking at 320 if ((*i)->addr <= addr && 321 (addr + size) <= ((*i)->addr + (*i)->size)) { 322 foundInWrQ = true; 323 servicedByWrQ++; 324 pktsServicedByWrQ++; 325 DPRINTF(DRAM, "Read to addr %lld with size %d serviced by " 326 "write queue\n", addr, size); 327 bytesReadWrQ += burstSize; 328 break; 329 } 330 } 331 332 // If not found in the write q, make a DRAM packet and 333 // push it onto the read queue 334 if (!foundInWrQ) { 335 336 // Make the burst helper for split packets 337 if (pktCount > 1 && burst_helper == NULL) { 338 DPRINTF(DRAM, "Read to addr %lld translates to %d " 339 "dram requests\n", pkt->getAddr(), pktCount); 340 burst_helper = new BurstHelper(pktCount); 341 } 342 343 DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, true); 344 dram_pkt->burstHelper = burst_helper; 345 346 assert(!readQueueFull(1)); 347 rdQLenPdf[readQueue.size() + respQueue.size()]++; 348 349 DPRINTF(DRAM, "Adding to read queue\n"); 350 351 readQueue.push_back(dram_pkt); 352 353 // Update stats 354 avgRdQLen = readQueue.size() + respQueue.size(); 355 } 356 357 // Starting address of next dram pkt (aligend to burstSize boundary) 358 addr = (addr | (burstSize - 1)) + 1; 359 } 360 361 // If all packets are serviced by write queue, we send the repsonse back 362 if (pktsServicedByWrQ == pktCount) { 363 accessAndRespond(pkt, frontendLatency); 364 return; 365 } 366 367 // Update how many split packets are serviced by write queue 368 if (burst_helper != NULL) 369 burst_helper->burstsServiced = pktsServicedByWrQ; 370 371 // If we are not already scheduled to get the read request out of 372 // the queue, do so now 373 if (!nextReqEvent.scheduled() && !stopReads) { 374 DPRINTF(DRAM, "Request scheduled immediately\n"); 375 schedule(nextReqEvent, curTick()); 376 } 377} 378 379void 380SimpleDRAM::processWriteEvent() 381{ 382 assert(!writeQueue.empty()); 383 384 DPRINTF(DRAM, "Beginning DRAM Write\n"); 385 Tick temp1 M5_VAR_USED = std::max(curTick(), busBusyUntil); 386 Tick temp2 M5_VAR_USED = std::max(curTick(), maxBankFreeAt()); 387 388 chooseNextWrite(); 389 DRAMPacket* dram_pkt = writeQueue.front(); 390 // sanity check 391 assert(dram_pkt->size <= burstSize); 392 doDRAMAccess(dram_pkt); 393 394 writeQueue.pop_front(); 395 delete dram_pkt; 396 397 ++writesThisTime; 398 399 DPRINTF(DRAM, "Writing, bus busy for %lld ticks, banks busy " 400 "for %lld ticks\n", busBusyUntil - temp1, maxBankFreeAt() - temp2); 401 402 // Update stats 403 avgWrQLen = writeQueue.size(); 404 405 // If we emptied the write queue, or got below the threshold and 406 // are not draining, or we have reads waiting and have done enough 407 // writes, then switch to reads. The retry above could already 408 // have caused it to be scheduled, so first check 409 if (writeQueue.empty() || 410 (writeQueue.size() < writeLowThreshold && !drainManager) || 411 (!readQueue.empty() && writesThisTime >= minWritesPerSwitch)) { 412 // turn the bus back around for reads again 413 busBusyUntil += tWTR; 414 stopReads = false; 415 writesThisTime = 0; 416 417 if (!nextReqEvent.scheduled()) 418 schedule(nextReqEvent, busBusyUntil); 419 } else { 420 assert(!writeEvent.scheduled()); 421 DPRINTF(DRAM, "Next write scheduled at %lld\n", newTime); 422 schedule(writeEvent, newTime); 423 } 424 425 if (retryWrReq) { 426 retryWrReq = false; 427 port.sendRetry(); 428 } 429 430 // if there is nothing left in any queue, signal a drain 431 if (writeQueue.empty() && readQueue.empty() && 432 respQueue.empty () && drainManager) { 433 drainManager->signalDrainDone(); 434 drainManager = NULL; 435 } 436} 437 438 439void 440SimpleDRAM::triggerWrites() 441{ 442 DPRINTF(DRAM, "Writes triggered at %lld\n", curTick()); 443 // Flag variable to stop any more read scheduling 444 stopReads = true; 445 446 writeStartTime = std::max(busBusyUntil, curTick()) + tWTR; 447 448 DPRINTF(DRAM, "Writes scheduled at %lld\n", writeStartTime); 449 450 assert(writeStartTime >= curTick()); 451 assert(!writeEvent.scheduled()); 452 schedule(writeEvent, writeStartTime); 453} 454 455void 456SimpleDRAM::addToWriteQueue(PacketPtr pkt, unsigned int pktCount) 457{ 458 // only add to the write queue here. whenever the request is 459 // eventually done, set the readyTime, and call schedule() 460 assert(pkt->isWrite()); 461 462 // if the request size is larger than burst size, the pkt is split into 463 // multiple DRAM packets 464 Addr addr = pkt->getAddr(); 465 for (int cnt = 0; cnt < pktCount; ++cnt) { 466 unsigned size = std::min((addr | (burstSize - 1)) + 1, 467 pkt->getAddr() + pkt->getSize()) - addr; 468 writePktSize[ceilLog2(size)]++; 469 writeBursts++; 470 471 // see if we can merge with an existing item in the write 472 // queue and keep track of whether we have merged or not so we 473 // can stop at that point and also avoid enqueueing a new 474 // request 475 bool merged = false; 476 auto w = writeQueue.begin(); 477 478 while(!merged && w != writeQueue.end()) { 479 // either of the two could be first, if they are the same 480 // it does not matter which way we go 481 if ((*w)->addr >= addr) { 482 // the existing one starts after the new one, figure 483 // out where the new one ends with respect to the 484 // existing one 485 if ((addr + size) >= ((*w)->addr + (*w)->size)) { 486 // check if the existing one is completely 487 // subsumed in the new one 488 DPRINTF(DRAM, "Merging write covering existing burst\n"); 489 merged = true; 490 // update both the address and the size 491 (*w)->addr = addr; 492 (*w)->size = size; 493 } else if ((addr + size) >= (*w)->addr && 494 ((*w)->addr + (*w)->size - addr) <= burstSize) { 495 // the new one is just before or partially 496 // overlapping with the existing one, and together 497 // they fit within a burst 498 DPRINTF(DRAM, "Merging write before existing burst\n"); 499 merged = true; 500 // the existing queue item needs to be adjusted with 501 // respect to both address and size 502 (*w)->size = (*w)->addr + (*w)->size - addr; 503 (*w)->addr = addr; 504 } 505 } else { 506 // the new one starts after the current one, figure 507 // out where the existing one ends with respect to the 508 // new one 509 if (((*w)->addr + (*w)->size) >= (addr + size)) { 510 // check if the new one is completely subsumed in the 511 // existing one 512 DPRINTF(DRAM, "Merging write into existing burst\n"); 513 merged = true; 514 // no adjustments necessary 515 } else if (((*w)->addr + (*w)->size) >= addr && 516 (addr + size - (*w)->addr) <= burstSize) { 517 // the existing one is just before or partially 518 // overlapping with the new one, and together 519 // they fit within a burst 520 DPRINTF(DRAM, "Merging write after existing burst\n"); 521 merged = true; 522 // the address is right, and only the size has 523 // to be adjusted 524 (*w)->size = addr + size - (*w)->addr; 525 } 526 } 527 ++w; 528 } 529 530 // if the item was not merged we need to create a new write 531 // and enqueue it 532 if (!merged) { 533 DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, false); 534 535 assert(writeQueue.size() < writeBufferSize); 536 wrQLenPdf[writeQueue.size()]++; 537 538 DPRINTF(DRAM, "Adding to write queue\n"); 539 540 writeQueue.push_back(dram_pkt); 541 542 // Update stats 543 avgWrQLen = writeQueue.size(); 544 } else { 545 // keep track of the fact that this burst effectively 546 // disappeared as it was merged with an existing one 547 mergedWrBursts++; 548 } 549 550 // Starting address of next dram pkt (aligend to burstSize boundary) 551 addr = (addr | (burstSize - 1)) + 1; 552 } 553 554 // we do not wait for the writes to be send to the actual memory, 555 // but instead take responsibility for the consistency here and 556 // snoop the write queue for any upcoming reads 557 // @todo, if a pkt size is larger than burst size, we might need a 558 // different front end latency 559 accessAndRespond(pkt, frontendLatency); 560 561 // If your write buffer is starting to fill up, drain it! 562 if (writeQueue.size() >= writeHighThreshold && !stopReads){ 563 triggerWrites(); 564 } 565} 566 567void 568SimpleDRAM::printParams() const 569{ 570 // Sanity check print of important parameters 571 DPRINTF(DRAM, 572 "Memory controller %s physical organization\n" \ 573 "Number of devices per rank %d\n" \ 574 "Device bus width (in bits) %d\n" \ 575 "DRAM data bus burst %d\n" \ 576 "Row buffer size %d\n" \ 577 "Columns per row buffer %d\n" \ 578 "Rows per bank %d\n" \ 579 "Banks per rank %d\n" \ 580 "Ranks per channel %d\n" \ 581 "Total mem capacity %u\n", 582 name(), devicesPerRank, deviceBusWidth, burstSize, rowBufferSize, 583 columnsPerRowBuffer, rowsPerBank, banksPerRank, ranksPerChannel, 584 rowBufferSize * rowsPerBank * banksPerRank * ranksPerChannel); 585 586 string scheduler = memSchedPolicy == Enums::fcfs ? "FCFS" : "FR-FCFS"; 587 string address_mapping = addrMapping == Enums::RoRaBaChCo ? "RoRaBaChCo" : 588 (addrMapping == Enums::RoRaBaCoCh ? "RoRaBaCoCh" : "RoCoRaBaCh"); 589 string page_policy = pageMgmt == Enums::open ? "OPEN" : 590 (pageMgmt == Enums::open_adaptive ? "OPEN (adaptive)" : "CLOSE"); 591 592 DPRINTF(DRAM, 593 "Memory controller %s characteristics\n" \ 594 "Read buffer size %d\n" \ 595 "Write buffer size %d\n" \ 596 "Write high thresh %d\n" \ 597 "Write low thresh %d\n" \ 598 "Scheduler %s\n" \ 599 "Address mapping %s\n" \ 600 "Page policy %s\n", 601 name(), readBufferSize, writeBufferSize, writeHighThreshold, 602 writeLowThreshold, scheduler, address_mapping, page_policy); 603 604 DPRINTF(DRAM, "Memory controller %s timing specs\n" \ 605 "tRCD %d ticks\n" \ 606 "tCL %d ticks\n" \ 607 "tRP %d ticks\n" \ 608 "tBURST %d ticks\n" \ 609 "tRFC %d ticks\n" \ 610 "tREFI %d ticks\n" \ 611 "tWTR %d ticks\n" \ 612 "tXAW (%d) %d ticks\n", 613 name(), tRCD, tCL, tRP, tBURST, tRFC, tREFI, tWTR, 614 activationLimit, tXAW); 615} 616 617void 618SimpleDRAM::printQs() const { 619 DPRINTF(DRAM, "===READ QUEUE===\n\n"); 620 for (auto i = readQueue.begin() ; i != readQueue.end() ; ++i) { 621 DPRINTF(DRAM, "Read %lu\n", (*i)->addr); 622 } 623 DPRINTF(DRAM, "\n===RESP QUEUE===\n\n"); 624 for (auto i = respQueue.begin() ; i != respQueue.end() ; ++i) { 625 DPRINTF(DRAM, "Response %lu\n", (*i)->addr); 626 } 627 DPRINTF(DRAM, "\n===WRITE QUEUE===\n\n"); 628 for (auto i = writeQueue.begin() ; i != writeQueue.end() ; ++i) { 629 DPRINTF(DRAM, "Write %lu\n", (*i)->addr); 630 } 631} 632 633bool 634SimpleDRAM::recvTimingReq(PacketPtr pkt) 635{ 636 /// @todo temporary hack to deal with memory corruption issues until 637 /// 4-phase transactions are complete 638 for (int x = 0; x < pendingDelete.size(); x++) 639 delete pendingDelete[x]; 640 pendingDelete.clear(); 641 642 // This is where we enter from the outside world 643 DPRINTF(DRAM, "recvTimingReq: request %s addr %lld size %d\n", 644 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 645 646 // simply drop inhibited packets for now 647 if (pkt->memInhibitAsserted()) { 648 DPRINTF(DRAM,"Inhibited packet -- Dropping it now\n"); 649 pendingDelete.push_back(pkt); 650 return true; 651 } 652 653 // Every million accesses, print the state of the queues 654 if (numReqs % 1000000 == 0) 655 printQs(); 656 657 // Calc avg gap between requests 658 if (prevArrival != 0) { 659 totGap += curTick() - prevArrival; 660 } 661 prevArrival = curTick(); 662 663 664 // Find out how many dram packets a pkt translates to 665 // If the burst size is equal or larger than the pkt size, then a pkt 666 // translates to only one dram packet. Otherwise, a pkt translates to 667 // multiple dram packets 668 unsigned size = pkt->getSize(); 669 unsigned offset = pkt->getAddr() & (burstSize - 1); 670 unsigned int dram_pkt_count = divCeil(offset + size, burstSize); 671 672 // check local buffers and do not accept if full 673 if (pkt->isRead()) { 674 assert(size != 0); 675 if (readQueueFull(dram_pkt_count)) { 676 DPRINTF(DRAM, "Read queue full, not accepting\n"); 677 // remember that we have to retry this port 678 retryRdReq = true; 679 numRdRetry++; 680 return false; 681 } else { 682 addToReadQueue(pkt, dram_pkt_count); 683 readReqs++; 684 numReqs++; 685 bytesReadSys += size; 686 } 687 } else if (pkt->isWrite()) { 688 assert(size != 0); 689 if (writeQueueFull(dram_pkt_count)) { 690 DPRINTF(DRAM, "Write queue full, not accepting\n"); 691 // remember that we have to retry this port 692 retryWrReq = true; 693 numWrRetry++; 694 return false; 695 } else { 696 addToWriteQueue(pkt, dram_pkt_count); 697 writeReqs++; 698 numReqs++; 699 bytesWrittenSys += size; 700 } 701 } else { 702 DPRINTF(DRAM,"Neither read nor write, ignore timing\n"); 703 neitherReadNorWrite++; 704 accessAndRespond(pkt, 1); 705 } 706 707 retryRdReq = false; 708 retryWrReq = false; 709 return true; 710} 711 712void 713SimpleDRAM::processRespondEvent() 714{ 715 DPRINTF(DRAM, 716 "processRespondEvent(): Some req has reached its readyTime\n"); 717 718 DRAMPacket* dram_pkt = respQueue.front(); 719 720 if (dram_pkt->burstHelper) { 721 // it is a split packet 722 dram_pkt->burstHelper->burstsServiced++; 723 if (dram_pkt->burstHelper->burstsServiced == 724 dram_pkt->burstHelper->burstCount) { 725 // we have now serviced all children packets of a system packet 726 // so we can now respond to the requester 727 // @todo we probably want to have a different front end and back 728 // end latency for split packets 729 accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency); 730 delete dram_pkt->burstHelper; 731 dram_pkt->burstHelper = NULL; 732 } 733 } else { 734 // it is not a split packet 735 accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency); 736 } 737 738 delete respQueue.front(); 739 respQueue.pop_front(); 740 741 // Update stats 742 avgRdQLen = readQueue.size() + respQueue.size(); 743 744 if (!respQueue.empty()) { 745 assert(respQueue.front()->readyTime >= curTick()); 746 assert(!respondEvent.scheduled()); 747 schedule(respondEvent, respQueue.front()->readyTime); 748 } else { 749 // if there is nothing left in any queue, signal a drain 750 if (writeQueue.empty() && readQueue.empty() && 751 drainManager) { 752 drainManager->signalDrainDone(); 753 drainManager = NULL; 754 } 755 } 756 757 // We have made a location in the queue available at this point, 758 // so if there is a read that was forced to wait, retry now 759 if (retryRdReq) { 760 retryRdReq = false; 761 port.sendRetry(); 762 } 763} 764 765void 766SimpleDRAM::chooseNextWrite() 767{ 768 // This method does the arbitration between write requests. The 769 // chosen packet is simply moved to the head of the write 770 // queue. The other methods know that this is the place to 771 // look. For example, with FCFS, this method does nothing 772 assert(!writeQueue.empty()); 773 774 if (writeQueue.size() == 1) { 775 DPRINTF(DRAM, "Single write request, nothing to do\n"); 776 return; 777 } 778 779 if (memSchedPolicy == Enums::fcfs) { 780 // Do nothing, since the correct request is already head 781 } else if (memSchedPolicy == Enums::frfcfs) { 782 reorderQueue(writeQueue); 783 } else 784 panic("No scheduling policy chosen\n"); 785 786 DPRINTF(DRAM, "Selected next write request\n"); 787} 788 789bool 790SimpleDRAM::chooseNextRead() 791{ 792 // This method does the arbitration between read requests. The 793 // chosen packet is simply moved to the head of the queue. The 794 // other methods know that this is the place to look. For example, 795 // with FCFS, this method does nothing 796 if (readQueue.empty()) { 797 DPRINTF(DRAM, "No read request to select\n"); 798 return false; 799 } 800 801 // If there is only one request then there is nothing left to do 802 if (readQueue.size() == 1) 803 return true; 804 805 if (memSchedPolicy == Enums::fcfs) { 806 // Do nothing, since the request to serve is already the first 807 // one in the read queue 808 } else if (memSchedPolicy == Enums::frfcfs) { 809 reorderQueue(readQueue); 810 } else 811 panic("No scheduling policy chosen!\n"); 812 813 DPRINTF(DRAM, "Selected next read request\n"); 814 return true; 815} 816 817void 818SimpleDRAM::reorderQueue(std::deque<DRAMPacket*>& queue) 819{ 820 // Only determine this when needed 821 uint64_t earliest_banks = 0; 822 823 // Search for row hits first, if no row hit is found then schedule the 824 // packet to one of the earliest banks available 825 bool found_earliest_pkt = false; 826 auto selected_pkt_it = queue.begin(); 827 828 for (auto i = queue.begin(); i != queue.end() ; ++i) { 829 DRAMPacket* dram_pkt = *i; 830 const Bank& bank = dram_pkt->bankRef; 831 // Check if it is a row hit 832 if (bank.openRow == dram_pkt->row) { 833 DPRINTF(DRAM, "Row buffer hit\n"); 834 selected_pkt_it = i; 835 break; 836 } else if (!found_earliest_pkt) { 837 // No row hit, go for first ready 838 if (earliest_banks == 0) 839 earliest_banks = minBankFreeAt(queue); 840 841 // Bank is ready or is the first available bank 842 if (bank.freeAt <= curTick() || 843 bits(earliest_banks, dram_pkt->bankId, dram_pkt->bankId)) { 844 // Remember the packet to be scheduled to one of the earliest 845 // banks available 846 selected_pkt_it = i; 847 found_earliest_pkt = true; 848 } 849 } 850 } 851 852 DRAMPacket* selected_pkt = *selected_pkt_it; 853 queue.erase(selected_pkt_it); 854 queue.push_front(selected_pkt); 855} 856 857void 858SimpleDRAM::accessAndRespond(PacketPtr pkt, Tick static_latency) 859{ 860 DPRINTF(DRAM, "Responding to Address %lld.. ",pkt->getAddr()); 861 862 bool needsResponse = pkt->needsResponse(); 863 // do the actual memory access which also turns the packet into a 864 // response 865 access(pkt); 866 867 // turn packet around to go back to requester if response expected 868 if (needsResponse) { 869 // access already turned the packet into a response 870 assert(pkt->isResponse()); 871 872 // @todo someone should pay for this 873 pkt->busFirstWordDelay = pkt->busLastWordDelay = 0; 874 875 // queue the packet in the response queue to be sent out after 876 // the static latency has passed 877 port.schedTimingResp(pkt, curTick() + static_latency); 878 } else { 879 // @todo the packet is going to be deleted, and the DRAMPacket 880 // is still having a pointer to it 881 pendingDelete.push_back(pkt); 882 } 883 884 DPRINTF(DRAM, "Done\n"); 885 886 return; 887} 888 889pair<Tick, Tick> 890SimpleDRAM::estimateLatency(DRAMPacket* dram_pkt, Tick inTime) 891{ 892 // If a request reaches a bank at tick 'inTime', how much time 893 // *after* that does it take to finish the request, depending 894 // on bank status and page open policy. Note that this method 895 // considers only the time taken for the actual read or write 896 // to complete, NOT any additional time thereafter for tRAS or 897 // tRP. 898 Tick accLat = 0; 899 Tick bankLat = 0; 900 rowHitFlag = false; 901 Tick potentialActTick; 902 903 const Bank& bank = dram_pkt->bankRef; 904 // open-page policy 905 if (pageMgmt == Enums::open || pageMgmt == Enums::open_adaptive) { 906 if (bank.openRow == dram_pkt->row) { 907 // When we have a row-buffer hit, 908 // we don't care about tRAS having expired or not, 909 // but do care about bank being free for access 910 rowHitFlag = true; 911 912 // When a series of requests arrive to the same row, 913 // DDR systems are capable of streaming data continuously 914 // at maximum bandwidth (subject to tCCD). Here, we approximate 915 // this condition, and assume that if whenever a bank is already 916 // busy and a new request comes in, it can be completed with no 917 // penalty beyond waiting for the existing read to complete. 918 if (bank.freeAt > inTime) { 919 accLat += bank.freeAt - inTime; 920 bankLat += 0; 921 } else { 922 // CAS latency only 923 accLat += tCL; 924 bankLat += tCL; 925 } 926 927 } else { 928 // Row-buffer miss, need to close existing row 929 // once tRAS has expired, then open the new one, 930 // then add cas latency. 931 Tick freeTime = std::max(bank.tRASDoneAt, bank.freeAt); 932 933 if (freeTime > inTime) 934 accLat += freeTime - inTime; 935 936 // If the there is no open row (open adaptive), then there 937 // is no precharge delay, otherwise go with tRP 938 Tick precharge_delay = bank.openRow == -1 ? 0 : tRP; 939 940 //The bank is free, and you may be able to activate 941 potentialActTick = inTime + accLat + precharge_delay; 942 if (potentialActTick < bank.actAllowedAt) 943 accLat += bank.actAllowedAt - potentialActTick; 944 945 accLat += precharge_delay + tRCD + tCL; 946 bankLat += precharge_delay + tRCD + tCL; 947 } 948 } else if (pageMgmt == Enums::close) { 949 // With a close page policy, no notion of 950 // bank.tRASDoneAt 951 if (bank.freeAt > inTime) 952 accLat += bank.freeAt - inTime; 953 954 //The bank is free, and you may be able to activate 955 potentialActTick = inTime + accLat; 956 if (potentialActTick < bank.actAllowedAt) 957 accLat += bank.actAllowedAt - potentialActTick; 958 959 // page already closed, simply open the row, and 960 // add cas latency 961 accLat += tRCD + tCL; 962 bankLat += tRCD + tCL; 963 } else 964 panic("No page management policy chosen\n"); 965 966 DPRINTF(DRAM, "Returning < %lld, %lld > from estimateLatency()\n", 967 bankLat, accLat); 968 969 return make_pair(bankLat, accLat); 970} 971 972void 973SimpleDRAM::processNextReqEvent() 974{ 975 scheduleNextReq(); 976} 977 978void 979SimpleDRAM::recordActivate(Tick act_tick, uint8_t rank, uint8_t bank) 980{ 981 assert(0 <= rank && rank < ranksPerChannel); 982 assert(actTicks[rank].size() == activationLimit); 983 984 DPRINTF(DRAM, "Activate at tick %d\n", act_tick); 985 986 // Tracking accesses after all banks are precharged. 987 // startTickPrechargeAll: is the tick when all the banks were again 988 // precharged. The difference between act_tick and startTickPrechargeAll 989 // gives the time for which DRAM doesn't get any accesses after refreshing 990 // or after a page is closed in closed-page or open-adaptive-page policy. 991 if ((numBanksActive == 0) && (act_tick > startTickPrechargeAll)) { 992 prechargeAllTime += act_tick - startTickPrechargeAll; 993 } 994 995 // No need to update number of active banks for closed-page policy as only 1 996 // bank will be activated at any given point, which will be instatntly 997 // precharged 998 if (pageMgmt == Enums::open || pageMgmt == Enums::open_adaptive) 999 ++numBanksActive; 1000 1001 // start by enforcing tRRD 1002 for(int i = 0; i < banksPerRank; i++) { 1003 // next activate must not happen before tRRD 1004 banks[rank][i].actAllowedAt = act_tick + tRRD; 1005 } 1006 // tRC should be added to activation tick of the bank currently accessed, 1007 // where tRC = tRAS + tRP, this is just for a check as actAllowedAt for same 1008 // bank is already captured by bank.freeAt and bank.tRASDoneAt 1009 banks[rank][bank].actAllowedAt = act_tick + tRAS + tRP; 1010 1011 // next, we deal with tXAW, if the activation limit is disabled 1012 // then we are done 1013 if (actTicks[rank].empty()) 1014 return; 1015 1016 // sanity check 1017 if (actTicks[rank].back() && (act_tick - actTicks[rank].back()) < tXAW) { 1018 // @todo For now, stick with a warning 1019 warn("Got %d activates in window %d (%d - %d) which is smaller " 1020 "than %d\n", activationLimit, act_tick - actTicks[rank].back(), 1021 act_tick, actTicks[rank].back(), tXAW); 1022 } 1023 1024 // shift the times used for the book keeping, the last element 1025 // (highest index) is the oldest one and hence the lowest value 1026 actTicks[rank].pop_back(); 1027 1028 // record an new activation (in the future) 1029 actTicks[rank].push_front(act_tick); 1030 1031 // cannot activate more than X times in time window tXAW, push the 1032 // next one (the X + 1'st activate) to be tXAW away from the 1033 // oldest in our window of X 1034 if (actTicks[rank].back() && (act_tick - actTicks[rank].back()) < tXAW) { 1035 DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate no earlier " 1036 "than %d\n", activationLimit, actTicks[rank].back() + tXAW); 1037 for(int j = 0; j < banksPerRank; j++) 1038 // next activate must not happen before end of window 1039 banks[rank][j].actAllowedAt = actTicks[rank].back() + tXAW; 1040 } 1041} 1042 1043void 1044SimpleDRAM::doDRAMAccess(DRAMPacket* dram_pkt) 1045{ 1046 1047 DPRINTF(DRAM, "Timing access to addr %lld, rank/bank/row %d %d %d\n", 1048 dram_pkt->addr, dram_pkt->rank, dram_pkt->bank, dram_pkt->row); 1049 1050 // estimate the bank and access latency 1051 pair<Tick, Tick> lat = estimateLatency(dram_pkt, curTick()); 1052 Tick bankLat = lat.first; 1053 Tick accessLat = lat.second; 1054 Tick actTick; 1055 1056 // This request was woken up at this time based on a prior call 1057 // to estimateLatency(). However, between then and now, both the 1058 // accessLatency and/or busBusyUntil may have changed. We need 1059 // to correct for that. 1060 1061 Tick addDelay = (curTick() + accessLat < busBusyUntil) ? 1062 busBusyUntil - (curTick() + accessLat) : 0; 1063 1064 Bank& bank = dram_pkt->bankRef; 1065 1066 // Update bank state 1067 if (pageMgmt == Enums::open || pageMgmt == Enums::open_adaptive) { 1068 bank.openRow = dram_pkt->row; 1069 bank.freeAt = curTick() + addDelay + accessLat; 1070 bank.bytesAccessed += burstSize; 1071 1072 // If you activated a new row do to this access, the next access 1073 // will have to respect tRAS for this bank. 1074 if (!rowHitFlag) { 1075 // any waiting for banks account for in freeAt 1076 actTick = bank.freeAt - tCL - tRCD; 1077 bank.tRASDoneAt = actTick + tRAS; 1078 recordActivate(actTick, dram_pkt->rank, dram_pkt->bank); 1079 1080 // sample the number of bytes accessed and reset it as 1081 // we are now closing this row 1082 bytesPerActivate.sample(bank.bytesAccessed); 1083 bank.bytesAccessed = 0; 1084 } 1085 1086 if (pageMgmt == Enums::open_adaptive) { 1087 // a twist on the open page policy is to not blindly keep the 1088 // page open, but close it if there are no row hits, and there 1089 // are bank conflicts in the queue 1090 bool got_more_hits = false; 1091 bool got_bank_conflict = false; 1092 1093 // either look at the read queue or write queue 1094 const deque<DRAMPacket*>& queue = dram_pkt->isRead ? readQueue : 1095 writeQueue; 1096 auto p = queue.begin(); 1097 // make sure we are not considering the packet that we are 1098 // currently dealing with (which is the head of the queue) 1099 ++p; 1100 1101 // keep on looking until we have found both or reached 1102 // the end 1103 while (!(got_more_hits && got_bank_conflict) && 1104 p != queue.end()) { 1105 bool same_rank_bank = (dram_pkt->rank == (*p)->rank) && 1106 (dram_pkt->bank == (*p)->bank); 1107 bool same_row = dram_pkt->row == (*p)->row; 1108 got_more_hits |= same_rank_bank && same_row; 1109 got_bank_conflict |= same_rank_bank && !same_row; 1110 ++p; 1111 } 1112 1113 // auto pre-charge 1114 if (!got_more_hits && got_bank_conflict) { 1115 bank.openRow = -1; 1116 bank.freeAt = std::max(bank.freeAt, bank.tRASDoneAt) + tRP; 1117 --numBanksActive; 1118 if (numBanksActive == 0) { 1119 startTickPrechargeAll = std::max(startTickPrechargeAll, 1120 bank.freeAt); 1121 DPRINTF(DRAM, "All banks precharged at tick: %ld\n", 1122 startTickPrechargeAll); 1123 } 1124 DPRINTF(DRAM, "Auto-precharged bank: %d\n", dram_pkt->bankId); 1125 } 1126 } 1127 1128 DPRINTF(DRAM, "doDRAMAccess::bank.freeAt is %lld\n", bank.freeAt); 1129 } else if (pageMgmt == Enums::close) { 1130 actTick = curTick() + addDelay + accessLat - tRCD - tCL; 1131 recordActivate(actTick, dram_pkt->rank, dram_pkt->bank); 1132 1133 // If the DRAM has a very quick tRAS, bank can be made free 1134 // after consecutive tCL,tRCD,tRP times. In general, however, 1135 // an additional wait is required to respect tRAS. 1136 bank.freeAt = std::max(actTick + tRAS + tRP, 1137 actTick + tRCD + tCL + tRP); 1138 DPRINTF(DRAM, "doDRAMAccess::bank.freeAt is %lld\n", bank.freeAt); 1139 bytesPerActivate.sample(burstSize); 1140 startTickPrechargeAll = std::max(startTickPrechargeAll, bank.freeAt); 1141 } else 1142 panic("No page management policy chosen\n"); 1143 1144 // Update request parameters 1145 dram_pkt->readyTime = curTick() + addDelay + accessLat + tBURST; 1146 1147 1148 DPRINTF(DRAM, "Req %lld: curtick is %lld accessLat is %d " \ 1149 "readytime is %lld busbusyuntil is %lld. " \ 1150 "Scheduling at readyTime\n", dram_pkt->addr, 1151 curTick(), accessLat, dram_pkt->readyTime, busBusyUntil); 1152 1153 // Make sure requests are not overlapping on the databus 1154 assert (dram_pkt->readyTime - busBusyUntil >= tBURST); 1155 1156 // Update bus state 1157 busBusyUntil = dram_pkt->readyTime; 1158 1159 DPRINTF(DRAM,"Access time is %lld\n", 1160 dram_pkt->readyTime - dram_pkt->entryTime); 1161 1162 // Update the minimum timing between the requests 1163 newTime = (busBusyUntil > tRP + tRCD + tCL) ? 1164 std::max(busBusyUntil - (tRP + tRCD + tCL), curTick()) : curTick(); 1165 1166 // Update the access related stats 1167 if (dram_pkt->isRead) { 1168 if (rowHitFlag) 1169 readRowHits++; 1170 bytesReadDRAM += burstSize; 1171 perBankRdBursts[dram_pkt->bankId]++; 1172 } else { 1173 if (rowHitFlag) 1174 writeRowHits++; 1175 bytesWritten += burstSize; 1176 perBankWrBursts[dram_pkt->bankId]++; 1177 1178 // At this point, commonality between reads and writes ends. 1179 // For writes, we are done since we long ago responded to the 1180 // requestor. 1181 return; 1182 } 1183 1184 // Update latency stats 1185 totMemAccLat += dram_pkt->readyTime - dram_pkt->entryTime; 1186 totBankLat += bankLat; 1187 totBusLat += tBURST; 1188 totQLat += dram_pkt->readyTime - dram_pkt->entryTime - bankLat - tBURST; 1189 1190 1191 // At this point we're done dealing with the request 1192 // It will be moved to a separate response queue with a 1193 // correct readyTime, and eventually be sent back at that 1194 //time 1195 moveToRespQ(); 1196 1197 // Schedule the next read event 1198 if (!nextReqEvent.scheduled() && !stopReads){ 1199 schedule(nextReqEvent, newTime); 1200 } else { 1201 if (newTime < nextReqEvent.when()) 1202 reschedule(nextReqEvent, newTime); 1203 } 1204} 1205 1206void 1207SimpleDRAM::moveToRespQ() 1208{ 1209 // Remove from read queue 1210 DRAMPacket* dram_pkt = readQueue.front(); 1211 readQueue.pop_front(); 1212 1213 // sanity check 1214 assert(dram_pkt->size <= burstSize); 1215 1216 // Insert into response queue sorted by readyTime 1217 // It will be sent back to the requestor at its 1218 // readyTime 1219 if (respQueue.empty()) { 1220 respQueue.push_front(dram_pkt); 1221 assert(!respondEvent.scheduled()); 1222 assert(dram_pkt->readyTime >= curTick()); 1223 schedule(respondEvent, dram_pkt->readyTime); 1224 } else { 1225 bool done = false; 1226 auto i = respQueue.begin(); 1227 while (!done && i != respQueue.end()) { 1228 if ((*i)->readyTime > dram_pkt->readyTime) { 1229 respQueue.insert(i, dram_pkt); 1230 done = true; 1231 } 1232 ++i; 1233 } 1234 1235 if (!done) 1236 respQueue.push_back(dram_pkt); 1237 1238 assert(respondEvent.scheduled()); 1239 1240 if (respQueue.front()->readyTime < respondEvent.when()) { 1241 assert(respQueue.front()->readyTime >= curTick()); 1242 reschedule(respondEvent, respQueue.front()->readyTime); 1243 } 1244 } 1245} 1246 1247void 1248SimpleDRAM::scheduleNextReq() 1249{ 1250 DPRINTF(DRAM, "Reached scheduleNextReq()\n"); 1251 1252 // Figure out which read request goes next, and move it to the 1253 // front of the read queue 1254 if (!chooseNextRead()) { 1255 // In the case there is no read request to go next, trigger 1256 // writes if we have passed the low threshold (or if we are 1257 // draining) 1258 if (!writeQueue.empty() && !writeEvent.scheduled() && 1259 (writeQueue.size() > writeLowThreshold || drainManager)) 1260 triggerWrites(); 1261 } else { 1262 doDRAMAccess(readQueue.front()); 1263 } 1264} 1265 1266Tick 1267SimpleDRAM::maxBankFreeAt() const 1268{ 1269 Tick banksFree = 0; 1270 1271 for(int i = 0; i < ranksPerChannel; i++) 1272 for(int j = 0; j < banksPerRank; j++) 1273 banksFree = std::max(banks[i][j].freeAt, banksFree); 1274 1275 return banksFree; 1276} 1277 1278uint64_t 1279SimpleDRAM::minBankFreeAt(const deque<DRAMPacket*>& queue) const 1280{ 1281 uint64_t bank_mask = 0; 1282 Tick freeAt = MaxTick; 1283 1284 // detemrine if we have queued transactions targetting the 1285 // bank in question 1286 vector<bool> got_waiting(ranksPerChannel * banksPerRank, false); 1287 for (auto p = queue.begin(); p != queue.end(); ++p) { 1288 got_waiting[(*p)->bankId] = true; 1289 } 1290 1291 for (int i = 0; i < ranksPerChannel; i++) { 1292 for (int j = 0; j < banksPerRank; j++) { 1293 // if we have waiting requests for the bank, and it is 1294 // amongst the first available, update the mask 1295 if (got_waiting[i * banksPerRank + j] && 1296 banks[i][j].freeAt <= freeAt) { 1297 // reset bank mask if new minimum is found 1298 if (banks[i][j].freeAt < freeAt) 1299 bank_mask = 0; 1300 // set the bit corresponding to the available bank 1301 uint8_t bit_index = i * ranksPerChannel + j; 1302 replaceBits(bank_mask, bit_index, bit_index, 1); 1303 freeAt = banks[i][j].freeAt; 1304 } 1305 } 1306 } 1307 return bank_mask; 1308} 1309 1310void 1311SimpleDRAM::processRefreshEvent() 1312{ 1313 DPRINTF(DRAM, "Refreshing at tick %ld\n", curTick()); 1314 1315 Tick banksFree = std::max(curTick(), maxBankFreeAt()) + tRFC; 1316 1317 for(int i = 0; i < ranksPerChannel; i++) 1318 for(int j = 0; j < banksPerRank; j++) { 1319 banks[i][j].freeAt = banksFree; 1320 banks[i][j].openRow = -1; 1321 } 1322 1323 // updating startTickPrechargeAll, isprechargeAll 1324 numBanksActive = 0; 1325 startTickPrechargeAll = banksFree; 1326 1327 schedule(refreshEvent, curTick() + tREFI); 1328} 1329 1330void 1331SimpleDRAM::regStats() 1332{ 1333 using namespace Stats; 1334 1335 AbstractMemory::regStats(); 1336 1337 readReqs 1338 .name(name() + ".readReqs") 1339 .desc("Number of read requests accepted"); 1340 1341 writeReqs 1342 .name(name() + ".writeReqs") 1343 .desc("Number of write requests accepted"); 1344 1345 readBursts 1346 .name(name() + ".readBursts") 1347 .desc("Number of DRAM read bursts, " 1348 "including those serviced by the write queue"); 1349 1350 writeBursts 1351 .name(name() + ".writeBursts") 1352 .desc("Number of DRAM write bursts, " 1353 "including those merged in the write queue"); 1354 1355 servicedByWrQ 1356 .name(name() + ".servicedByWrQ") 1357 .desc("Number of DRAM read bursts serviced by the write queue"); 1358 1359 mergedWrBursts 1360 .name(name() + ".mergedWrBursts") 1361 .desc("Number of DRAM write bursts merged with an existing one"); 1362 1363 neitherReadNorWrite 1364 .name(name() + ".neitherReadNorWriteReqs") 1365 .desc("Number of requests that are neither read nor write"); 1366 1367 perBankRdBursts 1368 .init(banksPerRank * ranksPerChannel) 1369 .name(name() + ".perBankRdBursts") 1370 .desc("Per bank write bursts"); 1371 1372 perBankWrBursts 1373 .init(banksPerRank * ranksPerChannel) 1374 .name(name() + ".perBankWrBursts") 1375 .desc("Per bank write bursts"); 1376 1377 avgRdQLen 1378 .name(name() + ".avgRdQLen") 1379 .desc("Average read queue length when enqueuing") 1380 .precision(2); 1381 1382 avgWrQLen 1383 .name(name() + ".avgWrQLen") 1384 .desc("Average write queue length when enqueuing") 1385 .precision(2); 1386 1387 totQLat 1388 .name(name() + ".totQLat") 1389 .desc("Total ticks spent queuing"); 1390 1391 totBankLat 1392 .name(name() + ".totBankLat") 1393 .desc("Total ticks spent accessing banks"); 1394 1395 totBusLat 1396 .name(name() + ".totBusLat") 1397 .desc("Total ticks spent in databus transfers"); 1398 1399 totMemAccLat 1400 .name(name() + ".totMemAccLat") 1401 .desc("Total ticks spent from burst creation until serviced " 1402 "by the DRAM"); 1403 1404 avgQLat 1405 .name(name() + ".avgQLat") 1406 .desc("Average queueing delay per DRAM burst") 1407 .precision(2); 1408 1409 avgQLat = totQLat / (readBursts - servicedByWrQ); 1410 1411 avgBankLat 1412 .name(name() + ".avgBankLat") 1413 .desc("Average bank access latency per DRAM burst") 1414 .precision(2); 1415 1416 avgBankLat = totBankLat / (readBursts - servicedByWrQ); 1417 1418 avgBusLat 1419 .name(name() + ".avgBusLat") 1420 .desc("Average bus latency per DRAM burst") 1421 .precision(2); 1422 1423 avgBusLat = totBusLat / (readBursts - servicedByWrQ); 1424 1425 avgMemAccLat 1426 .name(name() + ".avgMemAccLat") 1427 .desc("Average memory access latency per DRAM burst") 1428 .precision(2); 1429 1430 avgMemAccLat = totMemAccLat / (readBursts - servicedByWrQ); 1431 1432 numRdRetry 1433 .name(name() + ".numRdRetry") 1434 .desc("Number of times read queue was full causing retry"); 1435 1436 numWrRetry 1437 .name(name() + ".numWrRetry") 1438 .desc("Number of times write queue was full causing retry"); 1439 1440 readRowHits 1441 .name(name() + ".readRowHits") 1442 .desc("Number of row buffer hits during reads"); 1443 1444 writeRowHits 1445 .name(name() + ".writeRowHits") 1446 .desc("Number of row buffer hits during writes"); 1447 1448 readRowHitRate 1449 .name(name() + ".readRowHitRate") 1450 .desc("Row buffer hit rate for reads") 1451 .precision(2); 1452 1453 readRowHitRate = (readRowHits / (readBursts - servicedByWrQ)) * 100; 1454 1455 writeRowHitRate 1456 .name(name() + ".writeRowHitRate") 1457 .desc("Row buffer hit rate for writes") 1458 .precision(2); 1459 1460 writeRowHitRate = (writeRowHits / (writeBursts - mergedWrBursts)) * 100; 1461 1462 readPktSize 1463 .init(ceilLog2(burstSize) + 1) 1464 .name(name() + ".readPktSize") 1465 .desc("Read request sizes (log2)"); 1466 1467 writePktSize 1468 .init(ceilLog2(burstSize) + 1) 1469 .name(name() + ".writePktSize") 1470 .desc("Write request sizes (log2)"); 1471 1472 rdQLenPdf 1473 .init(readBufferSize) 1474 .name(name() + ".rdQLenPdf") 1475 .desc("What read queue length does an incoming req see"); 1476 1477 wrQLenPdf 1478 .init(writeBufferSize) 1479 .name(name() + ".wrQLenPdf") 1480 .desc("What write queue length does an incoming req see"); 1481 1482 bytesPerActivate 1483 .init(rowBufferSize) 1484 .name(name() + ".bytesPerActivate") 1485 .desc("Bytes accessed per row activation") 1486 .flags(nozero); 1487 1488 bytesReadDRAM 1489 .name(name() + ".bytesReadDRAM") 1490 .desc("Total number of bytes read from DRAM"); 1491 1492 bytesReadWrQ 1493 .name(name() + ".bytesReadWrQ") 1494 .desc("Total number of bytes read from write queue"); 1495 1496 bytesWritten 1497 .name(name() + ".bytesWritten") 1498 .desc("Total number of bytes written to DRAM"); 1499 1500 bytesReadSys 1501 .name(name() + ".bytesReadSys") 1502 .desc("Total read bytes from the system interface side"); 1503 1504 bytesWrittenSys 1505 .name(name() + ".bytesWrittenSys") 1506 .desc("Total written bytes from the system interface side"); 1507 1508 avgRdBW 1509 .name(name() + ".avgRdBW") 1510 .desc("Average DRAM read bandwidth in MiByte/s") 1511 .precision(2); 1512 1513 avgRdBW = (bytesReadDRAM / 1000000) / simSeconds; 1514 1515 avgWrBW 1516 .name(name() + ".avgWrBW") 1517 .desc("Average achieved write bandwidth in MiByte/s") 1518 .precision(2); 1519 1520 avgWrBW = (bytesWritten / 1000000) / simSeconds; 1521 1522 avgRdBWSys 1523 .name(name() + ".avgRdBWSys") 1524 .desc("Average system read bandwidth in MiByte/s") 1525 .precision(2); 1526 1527 avgRdBWSys = (bytesReadSys / 1000000) / simSeconds; 1528 1529 avgWrBWSys 1530 .name(name() + ".avgWrBWSys") 1531 .desc("Average system write bandwidth in MiByte/s") 1532 .precision(2); 1533 1534 avgWrBWSys = (bytesWrittenSys / 1000000) / simSeconds; 1535 1536 peakBW 1537 .name(name() + ".peakBW") 1538 .desc("Theoretical peak bandwidth in MiByte/s") 1539 .precision(2); 1540 1541 peakBW = (SimClock::Frequency / tBURST) * burstSize / 1000000; 1542 1543 busUtil 1544 .name(name() + ".busUtil") 1545 .desc("Data bus utilization in percentage") 1546 .precision(2); 1547 1548 busUtil = (avgRdBW + avgWrBW) / peakBW * 100; 1549 1550 totGap 1551 .name(name() + ".totGap") 1552 .desc("Total gap between requests"); 1553 1554 avgGap 1555 .name(name() + ".avgGap") 1556 .desc("Average gap between requests") 1557 .precision(2); 1558 1559 avgGap = totGap / (readReqs + writeReqs); 1560 1561 // Stats for DRAM Power calculation based on Micron datasheet 1562 busUtilRead 1563 .name(name() + ".busUtilRead") 1564 .desc("Data bus utilization in percentage for reads") 1565 .precision(2); 1566 1567 busUtilRead = avgRdBW / peakBW * 100; 1568 1569 busUtilWrite 1570 .name(name() + ".busUtilWrite") 1571 .desc("Data bus utilization in percentage for writes") 1572 .precision(2); 1573 1574 busUtilWrite = avgWrBW / peakBW * 100; 1575 1576 pageHitRate 1577 .name(name() + ".pageHitRate") 1578 .desc("Row buffer hit rate, read and write combined") 1579 .precision(2); 1580 1581 pageHitRate = (writeRowHits + readRowHits) / 1582 (writeBursts - mergedWrBursts + readBursts - servicedByWrQ) * 100; 1583 1584 prechargeAllPercent 1585 .name(name() + ".prechargeAllPercent") 1586 .desc("Percentage of time for which DRAM has all the banks in " 1587 "precharge state") 1588 .precision(2); 1589 1590 prechargeAllPercent = prechargeAllTime / simTicks * 100; 1591} 1592 1593void 1594SimpleDRAM::recvFunctional(PacketPtr pkt) 1595{ 1596 // rely on the abstract memory 1597 functionalAccess(pkt); 1598} 1599 1600BaseSlavePort& 1601SimpleDRAM::getSlavePort(const string &if_name, PortID idx) 1602{ 1603 if (if_name != "port") { 1604 return MemObject::getSlavePort(if_name, idx); 1605 } else { 1606 return port; 1607 } 1608} 1609 1610unsigned int 1611SimpleDRAM::drain(DrainManager *dm) 1612{ 1613 unsigned int count = port.drain(dm); 1614 1615 // if there is anything in any of our internal queues, keep track 1616 // of that as well 1617 if (!(writeQueue.empty() && readQueue.empty() && 1618 respQueue.empty())) { 1619 DPRINTF(Drain, "DRAM controller not drained, write: %d, read: %d," 1620 " resp: %d\n", writeQueue.size(), readQueue.size(), 1621 respQueue.size()); 1622 ++count; 1623 drainManager = dm; 1624 // the only part that is not drained automatically over time 1625 // is the write queue, thus trigger writes if there are any 1626 // waiting and no reads waiting, otherwise wait until the 1627 // reads are done 1628 if (readQueue.empty() && !writeQueue.empty() && 1629 !writeEvent.scheduled()) 1630 triggerWrites(); 1631 } 1632 1633 if (count) 1634 setDrainState(Drainable::Draining); 1635 else 1636 setDrainState(Drainable::Drained); 1637 return count; 1638} 1639 1640SimpleDRAM::MemoryPort::MemoryPort(const std::string& name, SimpleDRAM& _memory) 1641 : QueuedSlavePort(name, &_memory, queue), queue(_memory, *this), 1642 memory(_memory) 1643{ } 1644 1645AddrRangeList 1646SimpleDRAM::MemoryPort::getAddrRanges() const 1647{ 1648 AddrRangeList ranges; 1649 ranges.push_back(memory.getAddrRange()); 1650 return ranges; 1651} 1652 1653void 1654SimpleDRAM::MemoryPort::recvFunctional(PacketPtr pkt) 1655{ 1656 pkt->pushLabel(memory.name()); 1657 1658 if (!queue.checkFunctional(pkt)) { 1659 // Default implementation of SimpleTimingPort::recvFunctional() 1660 // calls recvAtomic() and throws away the latency; we can save a 1661 // little here by just not calculating the latency. 1662 memory.recvFunctional(pkt); 1663 } 1664 1665 pkt->popLabel(); 1666} 1667 1668Tick 1669SimpleDRAM::MemoryPort::recvAtomic(PacketPtr pkt) 1670{ 1671 return memory.recvAtomic(pkt); 1672} 1673 1674bool 1675SimpleDRAM::MemoryPort::recvTimingReq(PacketPtr pkt) 1676{ 1677 // pass it to the memory controller 1678 return memory.recvTimingReq(pkt); 1679} 1680 1681SimpleDRAM* 1682SimpleDRAMParams::create() 1683{ 1684 return new SimpleDRAM(this); 1685} 1686