dram_ctrl.cc revision 9977
1/* 2 * Copyright (c) 2010-2013 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2013 Amin Farmahini-Farahani 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Andreas Hansson 41 * Ani Udipi 42 * Neha Agarwal 43 */ 44 45#include "base/trace.hh" 46#include "base/bitfield.hh" 47#include "debug/Drain.hh" 48#include "debug/DRAM.hh" 49#include "mem/simple_dram.hh" 50#include "sim/system.hh" 51 52using namespace std; 53 54SimpleDRAM::SimpleDRAM(const SimpleDRAMParams* p) : 55 AbstractMemory(p), 56 port(name() + ".port", *this), 57 retryRdReq(false), retryWrReq(false), 58 rowHitFlag(false), stopReads(false), 59 writeEvent(this), respondEvent(this), 60 refreshEvent(this), nextReqEvent(this), drainManager(NULL), 61 deviceBusWidth(p->device_bus_width), burstLength(p->burst_length), 62 deviceRowBufferSize(p->device_rowbuffer_size), 63 devicesPerRank(p->devices_per_rank), 64 burstSize((devicesPerRank * burstLength * deviceBusWidth) / 8), 65 rowBufferSize(devicesPerRank * deviceRowBufferSize), 66 ranksPerChannel(p->ranks_per_channel), 67 banksPerRank(p->banks_per_rank), channels(p->channels), rowsPerBank(0), 68 readBufferSize(p->read_buffer_size), 69 writeBufferSize(p->write_buffer_size), 70 writeHighThresholdPerc(p->write_high_thresh_perc), 71 writeLowThresholdPerc(p->write_low_thresh_perc), 72 tWTR(p->tWTR), tBURST(p->tBURST), 73 tRCD(p->tRCD), tCL(p->tCL), tRP(p->tRP), tRAS(p->tRAS), 74 tRFC(p->tRFC), tREFI(p->tREFI), tRRD(p->tRRD), 75 tXAW(p->tXAW), activationLimit(p->activation_limit), 76 memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping), 77 pageMgmt(p->page_policy), 78 frontendLatency(p->static_frontend_latency), 79 backendLatency(p->static_backend_latency), 80 busBusyUntil(0), writeStartTime(0), 81 prevArrival(0), numReqs(0), 82 numWritesThisTime(0), newTime(0), 83 startTickPrechargeAll(0), numBanksActive(0) 84{ 85 // create the bank states based on the dimensions of the ranks and 86 // banks 87 banks.resize(ranksPerChannel); 88 actTicks.resize(ranksPerChannel); 89 for (size_t c = 0; c < ranksPerChannel; ++c) { 90 banks[c].resize(banksPerRank); 91 actTicks[c].resize(activationLimit, 0); 92 } 93 94 // round the write thresholds percent to a whole number of entries 95 // in the buffer. 96 writeHighThreshold = writeBufferSize * writeHighThresholdPerc / 100.0; 97 writeLowThreshold = writeBufferSize * writeLowThresholdPerc / 100.0; 98} 99 100void 101SimpleDRAM::init() 102{ 103 if (!port.isConnected()) { 104 fatal("SimpleDRAM %s is unconnected!\n", name()); 105 } else { 106 port.sendRangeChange(); 107 } 108 109 // we could deal with plenty options here, but for now do a quick 110 // sanity check 111 DPRINTF(DRAM, "Burst size %d bytes\n", burstSize); 112 113 // determine the rows per bank by looking at the total capacity 114 uint64_t capacity = ULL(1) << ceilLog2(AbstractMemory::size()); 115 116 DPRINTF(DRAM, "Memory capacity %lld (%lld) bytes\n", capacity, 117 AbstractMemory::size()); 118 119 columnsPerRowBuffer = rowBufferSize / burstSize; 120 121 DPRINTF(DRAM, "Row buffer size %d bytes with %d columns per row buffer\n", 122 rowBufferSize, columnsPerRowBuffer); 123 124 rowsPerBank = capacity / (rowBufferSize * banksPerRank * ranksPerChannel); 125 126 if (range.interleaved()) { 127 if (channels != range.stripes()) 128 panic("%s has %d interleaved address stripes but %d channel(s)\n", 129 name(), range.stripes(), channels); 130 131 if (addrMapping == Enums::RaBaChCo) { 132 if (rowBufferSize != range.granularity()) { 133 panic("Interleaving of %s doesn't match RaBaChCo address map\n", 134 name()); 135 } 136 } else if (addrMapping == Enums::RaBaCoCh) { 137 if (burstSize != range.granularity()) { 138 panic("Interleaving of %s doesn't match RaBaCoCh address map\n", 139 name()); 140 } 141 } else if (addrMapping == Enums::CoRaBaCh) { 142 if (burstSize != range.granularity()) 143 panic("Interleaving of %s doesn't match CoRaBaCh address map\n", 144 name()); 145 } 146 } 147} 148 149void 150SimpleDRAM::startup() 151{ 152 // print the configuration of the controller 153 printParams(); 154 155 // kick off the refresh 156 schedule(refreshEvent, curTick() + tREFI); 157} 158 159Tick 160SimpleDRAM::recvAtomic(PacketPtr pkt) 161{ 162 DPRINTF(DRAM, "recvAtomic: %s 0x%x\n", pkt->cmdString(), pkt->getAddr()); 163 164 // do the actual memory access and turn the packet into a response 165 access(pkt); 166 167 Tick latency = 0; 168 if (!pkt->memInhibitAsserted() && pkt->hasData()) { 169 // this value is not supposed to be accurate, just enough to 170 // keep things going, mimic a closed page 171 latency = tRP + tRCD + tCL; 172 } 173 return latency; 174} 175 176bool 177SimpleDRAM::readQueueFull(unsigned int neededEntries) const 178{ 179 DPRINTF(DRAM, "Read queue limit %d, current size %d, entries needed %d\n", 180 readBufferSize, readQueue.size() + respQueue.size(), 181 neededEntries); 182 183 return 184 (readQueue.size() + respQueue.size() + neededEntries) > readBufferSize; 185} 186 187bool 188SimpleDRAM::writeQueueFull(unsigned int neededEntries) const 189{ 190 DPRINTF(DRAM, "Write queue limit %d, current size %d, entries needed %d\n", 191 writeBufferSize, writeQueue.size(), neededEntries); 192 return (writeQueue.size() + neededEntries) > writeBufferSize; 193} 194 195SimpleDRAM::DRAMPacket* 196SimpleDRAM::decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned size, bool isRead) 197{ 198 // decode the address based on the address mapping scheme, with 199 // Ra, Co, Ba and Ch denoting rank, column, bank and channel, 200 // respectively 201 uint8_t rank; 202 uint8_t bank; 203 uint16_t row; 204 205 // truncate the address to the access granularity 206 Addr addr = dramPktAddr / burstSize; 207 208 // we have removed the lowest order address bits that denote the 209 // position within the column 210 if (addrMapping == Enums::RaBaChCo) { 211 // the lowest order bits denote the column to ensure that 212 // sequential cache lines occupy the same row 213 addr = addr / columnsPerRowBuffer; 214 215 // take out the channel part of the address 216 addr = addr / channels; 217 218 // after the channel bits, get the bank bits to interleave 219 // over the banks 220 bank = addr % banksPerRank; 221 addr = addr / banksPerRank; 222 223 // after the bank, we get the rank bits which thus interleaves 224 // over the ranks 225 rank = addr % ranksPerChannel; 226 addr = addr / ranksPerChannel; 227 228 // lastly, get the row bits 229 row = addr % rowsPerBank; 230 addr = addr / rowsPerBank; 231 } else if (addrMapping == Enums::RaBaCoCh) { 232 // take out the channel part of the address 233 addr = addr / channels; 234 235 // next, the column 236 addr = addr / columnsPerRowBuffer; 237 238 // after the column bits, we get the bank bits to interleave 239 // over the banks 240 bank = addr % banksPerRank; 241 addr = addr / banksPerRank; 242 243 // after the bank, we get the rank bits which thus interleaves 244 // over the ranks 245 rank = addr % ranksPerChannel; 246 addr = addr / ranksPerChannel; 247 248 // lastly, get the row bits 249 row = addr % rowsPerBank; 250 addr = addr / rowsPerBank; 251 } else if (addrMapping == Enums::CoRaBaCh) { 252 // optimise for closed page mode and utilise maximum 253 // parallelism of the DRAM (at the cost of power) 254 255 // take out the channel part of the address, not that this has 256 // to match with how accesses are interleaved between the 257 // controllers in the address mapping 258 addr = addr / channels; 259 260 // start with the bank bits, as this provides the maximum 261 // opportunity for parallelism between requests 262 bank = addr % banksPerRank; 263 addr = addr / banksPerRank; 264 265 // next get the rank bits 266 rank = addr % ranksPerChannel; 267 addr = addr / ranksPerChannel; 268 269 // next the column bits which we do not need to keep track of 270 // and simply skip past 271 addr = addr / columnsPerRowBuffer; 272 273 // lastly, get the row bits 274 row = addr % rowsPerBank; 275 addr = addr / rowsPerBank; 276 } else 277 panic("Unknown address mapping policy chosen!"); 278 279 assert(rank < ranksPerChannel); 280 assert(bank < banksPerRank); 281 assert(row < rowsPerBank); 282 283 DPRINTF(DRAM, "Address: %lld Rank %d Bank %d Row %d\n", 284 dramPktAddr, rank, bank, row); 285 286 // create the corresponding DRAM packet with the entry time and 287 // ready time set to the current tick, the latter will be updated 288 // later 289 uint16_t bank_id = banksPerRank * rank + bank; 290 return new DRAMPacket(pkt, isRead, rank, bank, row, bank_id, dramPktAddr, 291 size, banks[rank][bank]); 292} 293 294void 295SimpleDRAM::addToReadQueue(PacketPtr pkt, unsigned int pktCount) 296{ 297 // only add to the read queue here. whenever the request is 298 // eventually done, set the readyTime, and call schedule() 299 assert(!pkt->isWrite()); 300 301 assert(pktCount != 0); 302 303 // if the request size is larger than burst size, the pkt is split into 304 // multiple DRAM packets 305 // Note if the pkt starting address is not aligened to burst size, the 306 // address of first DRAM packet is kept unaliged. Subsequent DRAM packets 307 // are aligned to burst size boundaries. This is to ensure we accurately 308 // check read packets against packets in write queue. 309 Addr addr = pkt->getAddr(); 310 unsigned pktsServicedByWrQ = 0; 311 BurstHelper* burst_helper = NULL; 312 for (int cnt = 0; cnt < pktCount; ++cnt) { 313 unsigned size = std::min((addr | (burstSize - 1)) + 1, 314 pkt->getAddr() + pkt->getSize()) - addr; 315 readPktSize[ceilLog2(size)]++; 316 readBursts++; 317 318 // First check write buffer to see if the data is already at 319 // the controller 320 bool foundInWrQ = false; 321 for (auto i = writeQueue.begin(); i != writeQueue.end(); ++i) { 322 // check if the read is subsumed in the write entry we are 323 // looking at 324 if ((*i)->addr <= addr && 325 (addr + size) <= ((*i)->addr + (*i)->size)) { 326 foundInWrQ = true; 327 servicedByWrQ++; 328 pktsServicedByWrQ++; 329 DPRINTF(DRAM, "Read to addr %lld with size %d serviced by " 330 "write queue\n", addr, size); 331 bytesReadWrQ += burstSize; 332 break; 333 } 334 } 335 336 // If not found in the write q, make a DRAM packet and 337 // push it onto the read queue 338 if (!foundInWrQ) { 339 340 // Make the burst helper for split packets 341 if (pktCount > 1 && burst_helper == NULL) { 342 DPRINTF(DRAM, "Read to addr %lld translates to %d " 343 "dram requests\n", pkt->getAddr(), pktCount); 344 burst_helper = new BurstHelper(pktCount); 345 } 346 347 DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, true); 348 dram_pkt->burstHelper = burst_helper; 349 350 assert(!readQueueFull(1)); 351 rdQLenPdf[readQueue.size() + respQueue.size()]++; 352 353 DPRINTF(DRAM, "Adding to read queue\n"); 354 355 readQueue.push_back(dram_pkt); 356 357 // Update stats 358 avgRdQLen = readQueue.size() + respQueue.size(); 359 } 360 361 // Starting address of next dram pkt (aligend to burstSize boundary) 362 addr = (addr | (burstSize - 1)) + 1; 363 } 364 365 // If all packets are serviced by write queue, we send the repsonse back 366 if (pktsServicedByWrQ == pktCount) { 367 accessAndRespond(pkt, frontendLatency); 368 return; 369 } 370 371 // Update how many split packets are serviced by write queue 372 if (burst_helper != NULL) 373 burst_helper->burstsServiced = pktsServicedByWrQ; 374 375 // If we are not already scheduled to get the read request out of 376 // the queue, do so now 377 if (!nextReqEvent.scheduled() && !stopReads) { 378 DPRINTF(DRAM, "Request scheduled immediately\n"); 379 schedule(nextReqEvent, curTick()); 380 } 381} 382 383void 384SimpleDRAM::processWriteEvent() 385{ 386 assert(!writeQueue.empty()); 387 388 DPRINTF(DRAM, "Beginning DRAM Write\n"); 389 Tick temp1 M5_VAR_USED = std::max(curTick(), busBusyUntil); 390 Tick temp2 M5_VAR_USED = std::max(curTick(), maxBankFreeAt()); 391 392 chooseNextWrite(); 393 DRAMPacket* dram_pkt = writeQueue.front(); 394 // sanity check 395 assert(dram_pkt->size <= burstSize); 396 doDRAMAccess(dram_pkt); 397 398 writeQueue.pop_front(); 399 delete dram_pkt; 400 numWritesThisTime++; 401 402 DPRINTF(DRAM, "Completed %d writes, bus busy for %lld ticks,"\ 403 "banks busy for %lld ticks\n", numWritesThisTime, 404 busBusyUntil - temp1, maxBankFreeAt() - temp2); 405 406 // Update stats 407 avgWrQLen = writeQueue.size(); 408 409 if (numWritesThisTime >= writeHighThreshold) { 410 DPRINTF(DRAM, "Hit write threshold %d\n", writeHighThreshold); 411 } 412 413 // If number of writes in the queue fall below the low thresholds and 414 // read queue is not empty then schedule a request event else continue 415 // with writes. The retry above could already have caused it to be 416 // scheduled, so first check 417 if (((writeQueue.size() <= writeLowThreshold) && !readQueue.empty()) || 418 writeQueue.empty()) { 419 numWritesThisTime = 0; 420 // turn the bus back around for reads again 421 busBusyUntil += tWTR; 422 stopReads = false; 423 424 if (!nextReqEvent.scheduled()) 425 schedule(nextReqEvent, busBusyUntil); 426 } else { 427 assert(!writeEvent.scheduled()); 428 DPRINTF(DRAM, "Next write scheduled at %lld\n", newTime); 429 schedule(writeEvent, newTime); 430 } 431 432 if (retryWrReq) { 433 retryWrReq = false; 434 port.sendRetry(); 435 } 436 437 // if there is nothing left in any queue, signal a drain 438 if (writeQueue.empty() && readQueue.empty() && 439 respQueue.empty () && drainManager) { 440 drainManager->signalDrainDone(); 441 drainManager = NULL; 442 } 443} 444 445 446void 447SimpleDRAM::triggerWrites() 448{ 449 DPRINTF(DRAM, "Writes triggered at %lld\n", curTick()); 450 // Flag variable to stop any more read scheduling 451 stopReads = true; 452 453 writeStartTime = std::max(busBusyUntil, curTick()) + tWTR; 454 455 DPRINTF(DRAM, "Writes scheduled at %lld\n", writeStartTime); 456 457 assert(writeStartTime >= curTick()); 458 assert(!writeEvent.scheduled()); 459 schedule(writeEvent, writeStartTime); 460} 461 462void 463SimpleDRAM::addToWriteQueue(PacketPtr pkt, unsigned int pktCount) 464{ 465 // only add to the write queue here. whenever the request is 466 // eventually done, set the readyTime, and call schedule() 467 assert(pkt->isWrite()); 468 469 // if the request size is larger than burst size, the pkt is split into 470 // multiple DRAM packets 471 Addr addr = pkt->getAddr(); 472 for (int cnt = 0; cnt < pktCount; ++cnt) { 473 unsigned size = std::min((addr | (burstSize - 1)) + 1, 474 pkt->getAddr() + pkt->getSize()) - addr; 475 writePktSize[ceilLog2(size)]++; 476 writeBursts++; 477 478 // see if we can merge with an existing item in the write 479 // queue and keep track of whether we have merged or not so we 480 // can stop at that point and also avoid enqueueing a new 481 // request 482 bool merged = false; 483 auto w = writeQueue.begin(); 484 485 while(!merged && w != writeQueue.end()) { 486 // either of the two could be first, if they are the same 487 // it does not matter which way we go 488 if ((*w)->addr >= addr) { 489 // the existing one starts after the new one, figure 490 // out where the new one ends with respect to the 491 // existing one 492 if ((addr + size) >= ((*w)->addr + (*w)->size)) { 493 // check if the existing one is completely 494 // subsumed in the new one 495 DPRINTF(DRAM, "Merging write covering existing burst\n"); 496 merged = true; 497 // update both the address and the size 498 (*w)->addr = addr; 499 (*w)->size = size; 500 } else if ((addr + size) >= (*w)->addr && 501 ((*w)->addr + (*w)->size - addr) <= burstSize) { 502 // the new one is just before or partially 503 // overlapping with the existing one, and together 504 // they fit within a burst 505 DPRINTF(DRAM, "Merging write before existing burst\n"); 506 merged = true; 507 // the existing queue item needs to be adjusted with 508 // respect to both address and size 509 (*w)->addr = addr; 510 (*w)->size = (*w)->addr + (*w)->size - addr; 511 } 512 } else { 513 // the new one starts after the current one, figure 514 // out where the existing one ends with respect to the 515 // new one 516 if (((*w)->addr + (*w)->size) >= (addr + size)) { 517 // check if the new one is completely subsumed in the 518 // existing one 519 DPRINTF(DRAM, "Merging write into existing burst\n"); 520 merged = true; 521 // no adjustments necessary 522 } else if (((*w)->addr + (*w)->size) >= addr && 523 (addr + size - (*w)->addr) <= burstSize) { 524 // the existing one is just before or partially 525 // overlapping with the new one, and together 526 // they fit within a burst 527 DPRINTF(DRAM, "Merging write after existing burst\n"); 528 merged = true; 529 // the address is right, and only the size has 530 // to be adjusted 531 (*w)->size = addr + size - (*w)->addr; 532 } 533 } 534 ++w; 535 } 536 537 // if the item was not merged we need to create a new write 538 // and enqueue it 539 if (!merged) { 540 DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, false); 541 542 assert(writeQueue.size() < writeBufferSize); 543 wrQLenPdf[writeQueue.size()]++; 544 545 DPRINTF(DRAM, "Adding to write queue\n"); 546 547 writeQueue.push_back(dram_pkt); 548 549 // Update stats 550 avgWrQLen = writeQueue.size(); 551 } else { 552 // keep track of the fact that this burst effectively 553 // disappeared as it was merged with an existing one 554 mergedWrBursts++; 555 } 556 557 // Starting address of next dram pkt (aligend to burstSize boundary) 558 addr = (addr | (burstSize - 1)) + 1; 559 } 560 561 // we do not wait for the writes to be send to the actual memory, 562 // but instead take responsibility for the consistency here and 563 // snoop the write queue for any upcoming reads 564 // @todo, if a pkt size is larger than burst size, we might need a 565 // different front end latency 566 accessAndRespond(pkt, frontendLatency); 567 568 // If your write buffer is starting to fill up, drain it! 569 if (writeQueue.size() >= writeHighThreshold && !stopReads){ 570 triggerWrites(); 571 } 572} 573 574void 575SimpleDRAM::printParams() const 576{ 577 // Sanity check print of important parameters 578 DPRINTF(DRAM, 579 "Memory controller %s physical organization\n" \ 580 "Number of devices per rank %d\n" \ 581 "Device bus width (in bits) %d\n" \ 582 "DRAM data bus burst %d\n" \ 583 "Row buffer size %d\n" \ 584 "Columns per row buffer %d\n" \ 585 "Rows per bank %d\n" \ 586 "Banks per rank %d\n" \ 587 "Ranks per channel %d\n" \ 588 "Total mem capacity %u\n", 589 name(), devicesPerRank, deviceBusWidth, burstSize, rowBufferSize, 590 columnsPerRowBuffer, rowsPerBank, banksPerRank, ranksPerChannel, 591 rowBufferSize * rowsPerBank * banksPerRank * ranksPerChannel); 592 593 string scheduler = memSchedPolicy == Enums::fcfs ? "FCFS" : "FR-FCFS"; 594 string address_mapping = addrMapping == Enums::RaBaChCo ? "RaBaChCo" : 595 (addrMapping == Enums::RaBaCoCh ? "RaBaCoCh" : "CoRaBaCh"); 596 string page_policy = pageMgmt == Enums::open ? "OPEN" : 597 (pageMgmt == Enums::open_adaptive ? "OPEN (adaptive)" : "CLOSE"); 598 599 DPRINTF(DRAM, 600 "Memory controller %s characteristics\n" \ 601 "Read buffer size %d\n" \ 602 "Write buffer size %d\n" \ 603 "Write buffer thresh %d\n" \ 604 "Scheduler %s\n" \ 605 "Address mapping %s\n" \ 606 "Page policy %s\n", 607 name(), readBufferSize, writeBufferSize, writeHighThreshold, 608 scheduler, address_mapping, page_policy); 609 610 DPRINTF(DRAM, "Memory controller %s timing specs\n" \ 611 "tRCD %d ticks\n" \ 612 "tCL %d ticks\n" \ 613 "tRP %d ticks\n" \ 614 "tBURST %d ticks\n" \ 615 "tRFC %d ticks\n" \ 616 "tREFI %d ticks\n" \ 617 "tWTR %d ticks\n" \ 618 "tXAW (%d) %d ticks\n", 619 name(), tRCD, tCL, tRP, tBURST, tRFC, tREFI, tWTR, 620 activationLimit, tXAW); 621} 622 623void 624SimpleDRAM::printQs() const { 625 DPRINTF(DRAM, "===READ QUEUE===\n\n"); 626 for (auto i = readQueue.begin() ; i != readQueue.end() ; ++i) { 627 DPRINTF(DRAM, "Read %lu\n", (*i)->addr); 628 } 629 DPRINTF(DRAM, "\n===RESP QUEUE===\n\n"); 630 for (auto i = respQueue.begin() ; i != respQueue.end() ; ++i) { 631 DPRINTF(DRAM, "Response %lu\n", (*i)->addr); 632 } 633 DPRINTF(DRAM, "\n===WRITE QUEUE===\n\n"); 634 for (auto i = writeQueue.begin() ; i != writeQueue.end() ; ++i) { 635 DPRINTF(DRAM, "Write %lu\n", (*i)->addr); 636 } 637} 638 639bool 640SimpleDRAM::recvTimingReq(PacketPtr pkt) 641{ 642 /// @todo temporary hack to deal with memory corruption issues until 643 /// 4-phase transactions are complete 644 for (int x = 0; x < pendingDelete.size(); x++) 645 delete pendingDelete[x]; 646 pendingDelete.clear(); 647 648 // This is where we enter from the outside world 649 DPRINTF(DRAM, "recvTimingReq: request %s addr %lld size %d\n", 650 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 651 652 // simply drop inhibited packets for now 653 if (pkt->memInhibitAsserted()) { 654 DPRINTF(DRAM,"Inhibited packet -- Dropping it now\n"); 655 pendingDelete.push_back(pkt); 656 return true; 657 } 658 659 // Every million accesses, print the state of the queues 660 if (numReqs % 1000000 == 0) 661 printQs(); 662 663 // Calc avg gap between requests 664 if (prevArrival != 0) { 665 totGap += curTick() - prevArrival; 666 } 667 prevArrival = curTick(); 668 669 670 // Find out how many dram packets a pkt translates to 671 // If the burst size is equal or larger than the pkt size, then a pkt 672 // translates to only one dram packet. Otherwise, a pkt translates to 673 // multiple dram packets 674 unsigned size = pkt->getSize(); 675 unsigned offset = pkt->getAddr() & (burstSize - 1); 676 unsigned int dram_pkt_count = divCeil(offset + size, burstSize); 677 678 // check local buffers and do not accept if full 679 if (pkt->isRead()) { 680 assert(size != 0); 681 if (readQueueFull(dram_pkt_count)) { 682 DPRINTF(DRAM, "Read queue full, not accepting\n"); 683 // remember that we have to retry this port 684 retryRdReq = true; 685 numRdRetry++; 686 return false; 687 } else { 688 addToReadQueue(pkt, dram_pkt_count); 689 readReqs++; 690 numReqs++; 691 bytesReadSys += size; 692 } 693 } else if (pkt->isWrite()) { 694 assert(size != 0); 695 if (writeQueueFull(dram_pkt_count)) { 696 DPRINTF(DRAM, "Write queue full, not accepting\n"); 697 // remember that we have to retry this port 698 retryWrReq = true; 699 numWrRetry++; 700 return false; 701 } else { 702 addToWriteQueue(pkt, dram_pkt_count); 703 writeReqs++; 704 numReqs++; 705 bytesWrittenSys += size; 706 } 707 } else { 708 DPRINTF(DRAM,"Neither read nor write, ignore timing\n"); 709 neitherReadNorWrite++; 710 accessAndRespond(pkt, 1); 711 } 712 713 retryRdReq = false; 714 retryWrReq = false; 715 return true; 716} 717 718void 719SimpleDRAM::processRespondEvent() 720{ 721 DPRINTF(DRAM, 722 "processRespondEvent(): Some req has reached its readyTime\n"); 723 724 DRAMPacket* dram_pkt = respQueue.front(); 725 726 if (dram_pkt->burstHelper) { 727 // it is a split packet 728 dram_pkt->burstHelper->burstsServiced++; 729 if (dram_pkt->burstHelper->burstsServiced == 730 dram_pkt->burstHelper->burstCount) { 731 // we have now serviced all children packets of a system packet 732 // so we can now respond to the requester 733 // @todo we probably want to have a different front end and back 734 // end latency for split packets 735 accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency); 736 delete dram_pkt->burstHelper; 737 dram_pkt->burstHelper = NULL; 738 } 739 } else { 740 // it is not a split packet 741 accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency); 742 } 743 744 delete respQueue.front(); 745 respQueue.pop_front(); 746 747 // Update stats 748 avgRdQLen = readQueue.size() + respQueue.size(); 749 750 if (!respQueue.empty()) { 751 assert(respQueue.front()->readyTime >= curTick()); 752 assert(!respondEvent.scheduled()); 753 schedule(respondEvent, respQueue.front()->readyTime); 754 } else { 755 // if there is nothing left in any queue, signal a drain 756 if (writeQueue.empty() && readQueue.empty() && 757 drainManager) { 758 drainManager->signalDrainDone(); 759 drainManager = NULL; 760 } 761 } 762 763 // We have made a location in the queue available at this point, 764 // so if there is a read that was forced to wait, retry now 765 if (retryRdReq) { 766 retryRdReq = false; 767 port.sendRetry(); 768 } 769} 770 771void 772SimpleDRAM::chooseNextWrite() 773{ 774 // This method does the arbitration between write requests. The 775 // chosen packet is simply moved to the head of the write 776 // queue. The other methods know that this is the place to 777 // look. For example, with FCFS, this method does nothing 778 assert(!writeQueue.empty()); 779 780 if (writeQueue.size() == 1) { 781 DPRINTF(DRAM, "Single write request, nothing to do\n"); 782 return; 783 } 784 785 if (memSchedPolicy == Enums::fcfs) { 786 // Do nothing, since the correct request is already head 787 } else if (memSchedPolicy == Enums::frfcfs) { 788 reorderQueue(writeQueue); 789 } else 790 panic("No scheduling policy chosen\n"); 791 792 DPRINTF(DRAM, "Selected next write request\n"); 793} 794 795bool 796SimpleDRAM::chooseNextRead() 797{ 798 // This method does the arbitration between read requests. The 799 // chosen packet is simply moved to the head of the queue. The 800 // other methods know that this is the place to look. For example, 801 // with FCFS, this method does nothing 802 if (readQueue.empty()) { 803 DPRINTF(DRAM, "No read request to select\n"); 804 return false; 805 } 806 807 // If there is only one request then there is nothing left to do 808 if (readQueue.size() == 1) 809 return true; 810 811 if (memSchedPolicy == Enums::fcfs) { 812 // Do nothing, since the request to serve is already the first 813 // one in the read queue 814 } else if (memSchedPolicy == Enums::frfcfs) { 815 reorderQueue(readQueue); 816 } else 817 panic("No scheduling policy chosen!\n"); 818 819 DPRINTF(DRAM, "Selected next read request\n"); 820 return true; 821} 822 823void 824SimpleDRAM::reorderQueue(std::deque<DRAMPacket*>& queue) 825{ 826 // Only determine this when needed 827 uint64_t earliest_banks = 0; 828 829 // Search for row hits first, if no row hit is found then schedule the 830 // packet to one of the earliest banks available 831 bool found_earliest_pkt = false; 832 auto selected_pkt_it = queue.begin(); 833 834 for (auto i = queue.begin(); i != queue.end() ; ++i) { 835 DRAMPacket* dram_pkt = *i; 836 const Bank& bank = dram_pkt->bankRef; 837 // Check if it is a row hit 838 if (bank.openRow == dram_pkt->row) { 839 DPRINTF(DRAM, "Row buffer hit\n"); 840 selected_pkt_it = i; 841 break; 842 } else if (!found_earliest_pkt) { 843 // No row hit, go for first ready 844 if (earliest_banks == 0) 845 earliest_banks = minBankFreeAt(queue); 846 847 // Bank is ready or is the first available bank 848 if (bank.freeAt <= curTick() || 849 bits(earliest_banks, dram_pkt->bankId, dram_pkt->bankId)) { 850 // Remember the packet to be scheduled to one of the earliest 851 // banks available 852 selected_pkt_it = i; 853 found_earliest_pkt = true; 854 } 855 } 856 } 857 858 DRAMPacket* selected_pkt = *selected_pkt_it; 859 queue.erase(selected_pkt_it); 860 queue.push_front(selected_pkt); 861} 862 863void 864SimpleDRAM::accessAndRespond(PacketPtr pkt, Tick static_latency) 865{ 866 DPRINTF(DRAM, "Responding to Address %lld.. ",pkt->getAddr()); 867 868 bool needsResponse = pkt->needsResponse(); 869 // do the actual memory access which also turns the packet into a 870 // response 871 access(pkt); 872 873 // turn packet around to go back to requester if response expected 874 if (needsResponse) { 875 // access already turned the packet into a response 876 assert(pkt->isResponse()); 877 878 // @todo someone should pay for this 879 pkt->busFirstWordDelay = pkt->busLastWordDelay = 0; 880 881 // queue the packet in the response queue to be sent out after 882 // the static latency has passed 883 port.schedTimingResp(pkt, curTick() + static_latency); 884 } else { 885 // @todo the packet is going to be deleted, and the DRAMPacket 886 // is still having a pointer to it 887 pendingDelete.push_back(pkt); 888 } 889 890 DPRINTF(DRAM, "Done\n"); 891 892 return; 893} 894 895pair<Tick, Tick> 896SimpleDRAM::estimateLatency(DRAMPacket* dram_pkt, Tick inTime) 897{ 898 // If a request reaches a bank at tick 'inTime', how much time 899 // *after* that does it take to finish the request, depending 900 // on bank status and page open policy. Note that this method 901 // considers only the time taken for the actual read or write 902 // to complete, NOT any additional time thereafter for tRAS or 903 // tRP. 904 Tick accLat = 0; 905 Tick bankLat = 0; 906 rowHitFlag = false; 907 Tick potentialActTick; 908 909 const Bank& bank = dram_pkt->bankRef; 910 // open-page policy 911 if (pageMgmt == Enums::open || pageMgmt == Enums::open_adaptive) { 912 if (bank.openRow == dram_pkt->row) { 913 // When we have a row-buffer hit, 914 // we don't care about tRAS having expired or not, 915 // but do care about bank being free for access 916 rowHitFlag = true; 917 918 // When a series of requests arrive to the same row, 919 // DDR systems are capable of streaming data continuously 920 // at maximum bandwidth (subject to tCCD). Here, we approximate 921 // this condition, and assume that if whenever a bank is already 922 // busy and a new request comes in, it can be completed with no 923 // penalty beyond waiting for the existing read to complete. 924 if (bank.freeAt > inTime) { 925 accLat += bank.freeAt - inTime; 926 bankLat += 0; 927 } else { 928 // CAS latency only 929 accLat += tCL; 930 bankLat += tCL; 931 } 932 933 } else { 934 // Row-buffer miss, need to close existing row 935 // once tRAS has expired, then open the new one, 936 // then add cas latency. 937 Tick freeTime = std::max(bank.tRASDoneAt, bank.freeAt); 938 939 if (freeTime > inTime) 940 accLat += freeTime - inTime; 941 942 // If the there is no open row (open adaptive), then there 943 // is no precharge delay, otherwise go with tRP 944 Tick precharge_delay = bank.openRow == -1 ? 0 : tRP; 945 946 //The bank is free, and you may be able to activate 947 potentialActTick = inTime + accLat + precharge_delay; 948 if (potentialActTick < bank.actAllowedAt) 949 accLat += bank.actAllowedAt - potentialActTick; 950 951 accLat += precharge_delay + tRCD + tCL; 952 bankLat += precharge_delay + tRCD + tCL; 953 } 954 } else if (pageMgmt == Enums::close) { 955 // With a close page policy, no notion of 956 // bank.tRASDoneAt 957 if (bank.freeAt > inTime) 958 accLat += bank.freeAt - inTime; 959 960 //The bank is free, and you may be able to activate 961 potentialActTick = inTime + accLat; 962 if (potentialActTick < bank.actAllowedAt) 963 accLat += bank.actAllowedAt - potentialActTick; 964 965 // page already closed, simply open the row, and 966 // add cas latency 967 accLat += tRCD + tCL; 968 bankLat += tRCD + tCL; 969 } else 970 panic("No page management policy chosen\n"); 971 972 DPRINTF(DRAM, "Returning < %lld, %lld > from estimateLatency()\n", 973 bankLat, accLat); 974 975 return make_pair(bankLat, accLat); 976} 977 978void 979SimpleDRAM::processNextReqEvent() 980{ 981 scheduleNextReq(); 982} 983 984void 985SimpleDRAM::recordActivate(Tick act_tick, uint8_t rank, uint8_t bank) 986{ 987 assert(0 <= rank && rank < ranksPerChannel); 988 assert(actTicks[rank].size() == activationLimit); 989 990 DPRINTF(DRAM, "Activate at tick %d\n", act_tick); 991 992 // Tracking accesses after all banks are precharged. 993 // startTickPrechargeAll: is the tick when all the banks were again 994 // precharged. The difference between act_tick and startTickPrechargeAll 995 // gives the time for which DRAM doesn't get any accesses after refreshing 996 // or after a page is closed in closed-page or open-adaptive-page policy. 997 if ((numBanksActive == 0) && (act_tick > startTickPrechargeAll)) { 998 prechargeAllTime += act_tick - startTickPrechargeAll; 999 } 1000 1001 // No need to update number of active banks for closed-page policy as only 1 1002 // bank will be activated at any given point, which will be instatntly 1003 // precharged 1004 if (pageMgmt == Enums::open || pageMgmt == Enums::open_adaptive) 1005 ++numBanksActive; 1006 1007 // start by enforcing tRRD 1008 for(int i = 0; i < banksPerRank; i++) { 1009 // next activate must not happen before tRRD 1010 banks[rank][i].actAllowedAt = act_tick + tRRD; 1011 } 1012 // tRC should be added to activation tick of the bank currently accessed, 1013 // where tRC = tRAS + tRP, this is just for a check as actAllowedAt for same 1014 // bank is already captured by bank.freeAt and bank.tRASDoneAt 1015 banks[rank][bank].actAllowedAt = act_tick + tRAS + tRP; 1016 1017 // next, we deal with tXAW, if the activation limit is disabled 1018 // then we are done 1019 if (actTicks[rank].empty()) 1020 return; 1021 1022 // sanity check 1023 if (actTicks[rank].back() && (act_tick - actTicks[rank].back()) < tXAW) { 1024 // @todo For now, stick with a warning 1025 warn("Got %d activates in window %d (%d - %d) which is smaller " 1026 "than %d\n", activationLimit, act_tick - actTicks[rank].back(), 1027 act_tick, actTicks[rank].back(), tXAW); 1028 } 1029 1030 // shift the times used for the book keeping, the last element 1031 // (highest index) is the oldest one and hence the lowest value 1032 actTicks[rank].pop_back(); 1033 1034 // record an new activation (in the future) 1035 actTicks[rank].push_front(act_tick); 1036 1037 // cannot activate more than X times in time window tXAW, push the 1038 // next one (the X + 1'st activate) to be tXAW away from the 1039 // oldest in our window of X 1040 if (actTicks[rank].back() && (act_tick - actTicks[rank].back()) < tXAW) { 1041 DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate no earlier " 1042 "than %d\n", activationLimit, actTicks[rank].back() + tXAW); 1043 for(int j = 0; j < banksPerRank; j++) 1044 // next activate must not happen before end of window 1045 banks[rank][j].actAllowedAt = actTicks[rank].back() + tXAW; 1046 } 1047} 1048 1049void 1050SimpleDRAM::doDRAMAccess(DRAMPacket* dram_pkt) 1051{ 1052 1053 DPRINTF(DRAM, "Timing access to addr %lld, rank/bank/row %d %d %d\n", 1054 dram_pkt->addr, dram_pkt->rank, dram_pkt->bank, dram_pkt->row); 1055 1056 // estimate the bank and access latency 1057 pair<Tick, Tick> lat = estimateLatency(dram_pkt, curTick()); 1058 Tick bankLat = lat.first; 1059 Tick accessLat = lat.second; 1060 Tick actTick; 1061 1062 // This request was woken up at this time based on a prior call 1063 // to estimateLatency(). However, between then and now, both the 1064 // accessLatency and/or busBusyUntil may have changed. We need 1065 // to correct for that. 1066 1067 Tick addDelay = (curTick() + accessLat < busBusyUntil) ? 1068 busBusyUntil - (curTick() + accessLat) : 0; 1069 1070 Bank& bank = dram_pkt->bankRef; 1071 1072 // Update bank state 1073 if (pageMgmt == Enums::open || pageMgmt == Enums::open_adaptive) { 1074 bank.openRow = dram_pkt->row; 1075 bank.freeAt = curTick() + addDelay + accessLat; 1076 bank.bytesAccessed += burstSize; 1077 1078 // If you activated a new row do to this access, the next access 1079 // will have to respect tRAS for this bank. 1080 if (!rowHitFlag) { 1081 // any waiting for banks account for in freeAt 1082 actTick = bank.freeAt - tCL - tRCD; 1083 bank.tRASDoneAt = actTick + tRAS; 1084 recordActivate(actTick, dram_pkt->rank, dram_pkt->bank); 1085 1086 // sample the number of bytes accessed and reset it as 1087 // we are now closing this row 1088 bytesPerActivate.sample(bank.bytesAccessed); 1089 bank.bytesAccessed = 0; 1090 } 1091 1092 if (pageMgmt == Enums::open_adaptive) { 1093 // a twist on the open page policy is to not blindly keep the 1094 // page open, but close it if there are no row hits, and there 1095 // are bank conflicts in the queue 1096 bool got_more_hits = false; 1097 bool got_bank_conflict = false; 1098 1099 // either look at the read queue or write queue 1100 const deque<DRAMPacket*>& queue = dram_pkt->isRead ? readQueue : 1101 writeQueue; 1102 auto p = queue.begin(); 1103 // make sure we are not considering the packet that we are 1104 // currently dealing with (which is the head of the queue) 1105 ++p; 1106 1107 // keep on looking until we have found both or reached 1108 // the end 1109 while (!(got_more_hits && got_bank_conflict) && 1110 p != queue.end()) { 1111 bool same_rank_bank = (dram_pkt->rank == (*p)->rank) && 1112 (dram_pkt->bank == (*p)->bank); 1113 bool same_row = dram_pkt->row == (*p)->row; 1114 got_more_hits |= same_rank_bank && same_row; 1115 got_bank_conflict |= same_rank_bank && !same_row; 1116 ++p; 1117 } 1118 1119 // auto pre-charge 1120 if (!got_more_hits && got_bank_conflict) { 1121 bank.openRow = -1; 1122 bank.freeAt = std::max(bank.freeAt, bank.tRASDoneAt) + tRP; 1123 --numBanksActive; 1124 if (numBanksActive == 0) { 1125 startTickPrechargeAll = std::max(startTickPrechargeAll, 1126 bank.freeAt); 1127 DPRINTF(DRAM, "All banks precharged at tick: %ld\n", 1128 startTickPrechargeAll); 1129 } 1130 DPRINTF(DRAM, "Auto-precharged bank: %d\n", dram_pkt->bankId); 1131 } 1132 } 1133 1134 DPRINTF(DRAM, "doDRAMAccess::bank.freeAt is %lld\n", bank.freeAt); 1135 } else if (pageMgmt == Enums::close) { 1136 actTick = curTick() + addDelay + accessLat - tRCD - tCL; 1137 recordActivate(actTick, dram_pkt->rank, dram_pkt->bank); 1138 1139 // If the DRAM has a very quick tRAS, bank can be made free 1140 // after consecutive tCL,tRCD,tRP times. In general, however, 1141 // an additional wait is required to respect tRAS. 1142 bank.freeAt = std::max(actTick + tRAS + tRP, 1143 actTick + tRCD + tCL + tRP); 1144 DPRINTF(DRAM, "doDRAMAccess::bank.freeAt is %lld\n", bank.freeAt); 1145 bytesPerActivate.sample(burstSize); 1146 startTickPrechargeAll = std::max(startTickPrechargeAll, bank.freeAt); 1147 } else 1148 panic("No page management policy chosen\n"); 1149 1150 // Update request parameters 1151 dram_pkt->readyTime = curTick() + addDelay + accessLat + tBURST; 1152 1153 1154 DPRINTF(DRAM, "Req %lld: curtick is %lld accessLat is %d " \ 1155 "readytime is %lld busbusyuntil is %lld. " \ 1156 "Scheduling at readyTime\n", dram_pkt->addr, 1157 curTick(), accessLat, dram_pkt->readyTime, busBusyUntil); 1158 1159 // Make sure requests are not overlapping on the databus 1160 assert (dram_pkt->readyTime - busBusyUntil >= tBURST); 1161 1162 // Update bus state 1163 busBusyUntil = dram_pkt->readyTime; 1164 1165 DPRINTF(DRAM,"Access time is %lld\n", 1166 dram_pkt->readyTime - dram_pkt->entryTime); 1167 1168 // Update the minimum timing between the requests 1169 newTime = (busBusyUntil > tRP + tRCD + tCL) ? 1170 std::max(busBusyUntil - (tRP + tRCD + tCL), curTick()) : curTick(); 1171 1172 // Update the access related stats 1173 if (dram_pkt->isRead) { 1174 if (rowHitFlag) 1175 readRowHits++; 1176 bytesReadDRAM += burstSize; 1177 perBankRdBursts[dram_pkt->bankId]++; 1178 } else { 1179 if (rowHitFlag) 1180 writeRowHits++; 1181 bytesWritten += burstSize; 1182 perBankWrBursts[dram_pkt->bankId]++; 1183 1184 // At this point, commonality between reads and writes ends. 1185 // For writes, we are done since we long ago responded to the 1186 // requestor. 1187 return; 1188 } 1189 1190 // Update latency stats 1191 totMemAccLat += dram_pkt->readyTime - dram_pkt->entryTime; 1192 totBankLat += bankLat; 1193 totBusLat += tBURST; 1194 totQLat += dram_pkt->readyTime - dram_pkt->entryTime - bankLat - tBURST; 1195 1196 1197 // At this point we're done dealing with the request 1198 // It will be moved to a separate response queue with a 1199 // correct readyTime, and eventually be sent back at that 1200 //time 1201 moveToRespQ(); 1202 1203 // Schedule the next read event 1204 if (!nextReqEvent.scheduled() && !stopReads){ 1205 schedule(nextReqEvent, newTime); 1206 } else { 1207 if (newTime < nextReqEvent.when()) 1208 reschedule(nextReqEvent, newTime); 1209 } 1210} 1211 1212void 1213SimpleDRAM::moveToRespQ() 1214{ 1215 // Remove from read queue 1216 DRAMPacket* dram_pkt = readQueue.front(); 1217 readQueue.pop_front(); 1218 1219 // sanity check 1220 assert(dram_pkt->size <= burstSize); 1221 1222 // Insert into response queue sorted by readyTime 1223 // It will be sent back to the requestor at its 1224 // readyTime 1225 if (respQueue.empty()) { 1226 respQueue.push_front(dram_pkt); 1227 assert(!respondEvent.scheduled()); 1228 assert(dram_pkt->readyTime >= curTick()); 1229 schedule(respondEvent, dram_pkt->readyTime); 1230 } else { 1231 bool done = false; 1232 auto i = respQueue.begin(); 1233 while (!done && i != respQueue.end()) { 1234 if ((*i)->readyTime > dram_pkt->readyTime) { 1235 respQueue.insert(i, dram_pkt); 1236 done = true; 1237 } 1238 ++i; 1239 } 1240 1241 if (!done) 1242 respQueue.push_back(dram_pkt); 1243 1244 assert(respondEvent.scheduled()); 1245 1246 if (respQueue.front()->readyTime < respondEvent.when()) { 1247 assert(respQueue.front()->readyTime >= curTick()); 1248 reschedule(respondEvent, respQueue.front()->readyTime); 1249 } 1250 } 1251} 1252 1253void 1254SimpleDRAM::scheduleNextReq() 1255{ 1256 DPRINTF(DRAM, "Reached scheduleNextReq()\n"); 1257 1258 // Figure out which read request goes next, and move it to the 1259 // front of the read queue 1260 if (!chooseNextRead()) { 1261 // In the case there is no read request to go next, see if we 1262 // are asked to drain, and if so trigger writes, this also 1263 // ensures that if we hit the write limit we will do this 1264 // multiple times until we are completely drained 1265 if (drainManager && !writeQueue.empty() && !writeEvent.scheduled()) 1266 triggerWrites(); 1267 } else { 1268 doDRAMAccess(readQueue.front()); 1269 } 1270} 1271 1272Tick 1273SimpleDRAM::maxBankFreeAt() const 1274{ 1275 Tick banksFree = 0; 1276 1277 for(int i = 0; i < ranksPerChannel; i++) 1278 for(int j = 0; j < banksPerRank; j++) 1279 banksFree = std::max(banks[i][j].freeAt, banksFree); 1280 1281 return banksFree; 1282} 1283 1284uint64_t 1285SimpleDRAM::minBankFreeAt(const deque<DRAMPacket*>& queue) const 1286{ 1287 uint64_t bank_mask = 0; 1288 Tick freeAt = MaxTick; 1289 1290 // detemrine if we have queued transactions targetting the 1291 // bank in question 1292 vector<bool> got_waiting(ranksPerChannel * banksPerRank, false); 1293 for (auto p = queue.begin(); p != queue.end(); ++p) { 1294 got_waiting[(*p)->bankId] = true; 1295 } 1296 1297 for (int i = 0; i < ranksPerChannel; i++) { 1298 for (int j = 0; j < banksPerRank; j++) { 1299 // if we have waiting requests for the bank, and it is 1300 // amongst the first available, update the mask 1301 if (got_waiting[i * banksPerRank + j] && 1302 banks[i][j].freeAt <= freeAt) { 1303 // reset bank mask if new minimum is found 1304 if (banks[i][j].freeAt < freeAt) 1305 bank_mask = 0; 1306 // set the bit corresponding to the available bank 1307 uint8_t bit_index = i * ranksPerChannel + j; 1308 replaceBits(bank_mask, bit_index, bit_index, 1); 1309 freeAt = banks[i][j].freeAt; 1310 } 1311 } 1312 } 1313 return bank_mask; 1314} 1315 1316void 1317SimpleDRAM::processRefreshEvent() 1318{ 1319 DPRINTF(DRAM, "Refreshing at tick %ld\n", curTick()); 1320 1321 Tick banksFree = std::max(curTick(), maxBankFreeAt()) + tRFC; 1322 1323 for(int i = 0; i < ranksPerChannel; i++) 1324 for(int j = 0; j < banksPerRank; j++) { 1325 banks[i][j].freeAt = banksFree; 1326 banks[i][j].openRow = -1; 1327 } 1328 1329 // updating startTickPrechargeAll, isprechargeAll 1330 numBanksActive = 0; 1331 startTickPrechargeAll = banksFree; 1332 1333 schedule(refreshEvent, curTick() + tREFI); 1334} 1335 1336void 1337SimpleDRAM::regStats() 1338{ 1339 using namespace Stats; 1340 1341 AbstractMemory::regStats(); 1342 1343 readReqs 1344 .name(name() + ".readReqs") 1345 .desc("Number of read requests accepted"); 1346 1347 writeReqs 1348 .name(name() + ".writeReqs") 1349 .desc("Number of write requests accepted"); 1350 1351 readBursts 1352 .name(name() + ".readBursts") 1353 .desc("Number of DRAM read bursts, " 1354 "including those serviced by the write queue"); 1355 1356 writeBursts 1357 .name(name() + ".writeBursts") 1358 .desc("Number of DRAM write bursts, " 1359 "including those merged in the write queue"); 1360 1361 servicedByWrQ 1362 .name(name() + ".servicedByWrQ") 1363 .desc("Number of DRAM read bursts serviced by the write queue"); 1364 1365 mergedWrBursts 1366 .name(name() + ".mergedWrBursts") 1367 .desc("Number of DRAM write bursts merged with an existing one"); 1368 1369 neitherReadNorWrite 1370 .name(name() + ".neitherReadNorWriteReqs") 1371 .desc("Number of requests that are neither read nor write"); 1372 1373 perBankRdBursts 1374 .init(banksPerRank * ranksPerChannel) 1375 .name(name() + ".perBankRdBursts") 1376 .desc("Per bank write bursts"); 1377 1378 perBankWrBursts 1379 .init(banksPerRank * ranksPerChannel) 1380 .name(name() + ".perBankWrBursts") 1381 .desc("Per bank write bursts"); 1382 1383 avgRdQLen 1384 .name(name() + ".avgRdQLen") 1385 .desc("Average read queue length when enqueuing") 1386 .precision(2); 1387 1388 avgWrQLen 1389 .name(name() + ".avgWrQLen") 1390 .desc("Average write queue length when enqueuing") 1391 .precision(2); 1392 1393 totQLat 1394 .name(name() + ".totQLat") 1395 .desc("Total ticks spent queuing"); 1396 1397 totBankLat 1398 .name(name() + ".totBankLat") 1399 .desc("Total ticks spent accessing banks"); 1400 1401 totBusLat 1402 .name(name() + ".totBusLat") 1403 .desc("Total ticks spent in databus transfers"); 1404 1405 totMemAccLat 1406 .name(name() + ".totMemAccLat") 1407 .desc("Total ticks spent from burst creation until serviced " 1408 "by the DRAM"); 1409 1410 avgQLat 1411 .name(name() + ".avgQLat") 1412 .desc("Average queueing delay per DRAM burst") 1413 .precision(2); 1414 1415 avgQLat = totQLat / (readBursts - servicedByWrQ); 1416 1417 avgBankLat 1418 .name(name() + ".avgBankLat") 1419 .desc("Average bank access latency per DRAM burst") 1420 .precision(2); 1421 1422 avgBankLat = totBankLat / (readBursts - servicedByWrQ); 1423 1424 avgBusLat 1425 .name(name() + ".avgBusLat") 1426 .desc("Average bus latency per DRAM burst") 1427 .precision(2); 1428 1429 avgBusLat = totBusLat / (readBursts - servicedByWrQ); 1430 1431 avgMemAccLat 1432 .name(name() + ".avgMemAccLat") 1433 .desc("Average memory access latency per DRAM burst") 1434 .precision(2); 1435 1436 avgMemAccLat = totMemAccLat / (readBursts - servicedByWrQ); 1437 1438 numRdRetry 1439 .name(name() + ".numRdRetry") 1440 .desc("Number of times read queue was full causing retry"); 1441 1442 numWrRetry 1443 .name(name() + ".numWrRetry") 1444 .desc("Number of times write queue was full causing retry"); 1445 1446 readRowHits 1447 .name(name() + ".readRowHits") 1448 .desc("Number of row buffer hits during reads"); 1449 1450 writeRowHits 1451 .name(name() + ".writeRowHits") 1452 .desc("Number of row buffer hits during writes"); 1453 1454 readRowHitRate 1455 .name(name() + ".readRowHitRate") 1456 .desc("Row buffer hit rate for reads") 1457 .precision(2); 1458 1459 readRowHitRate = (readRowHits / (readBursts - servicedByWrQ)) * 100; 1460 1461 writeRowHitRate 1462 .name(name() + ".writeRowHitRate") 1463 .desc("Row buffer hit rate for writes") 1464 .precision(2); 1465 1466 writeRowHitRate = (writeRowHits / (writeBursts - mergedWrBursts)) * 100; 1467 1468 readPktSize 1469 .init(ceilLog2(burstSize) + 1) 1470 .name(name() + ".readPktSize") 1471 .desc("Read request sizes (log2)"); 1472 1473 writePktSize 1474 .init(ceilLog2(burstSize) + 1) 1475 .name(name() + ".writePktSize") 1476 .desc("Write request sizes (log2)"); 1477 1478 rdQLenPdf 1479 .init(readBufferSize) 1480 .name(name() + ".rdQLenPdf") 1481 .desc("What read queue length does an incoming req see"); 1482 1483 wrQLenPdf 1484 .init(writeBufferSize) 1485 .name(name() + ".wrQLenPdf") 1486 .desc("What write queue length does an incoming req see"); 1487 1488 bytesPerActivate 1489 .init(rowBufferSize) 1490 .name(name() + ".bytesPerActivate") 1491 .desc("Bytes accessed per row activation") 1492 .flags(nozero); 1493 1494 bytesReadDRAM 1495 .name(name() + ".bytesReadDRAM") 1496 .desc("Total number of bytes read from DRAM"); 1497 1498 bytesReadWrQ 1499 .name(name() + ".bytesReadWrQ") 1500 .desc("Total number of bytes read from write queue"); 1501 1502 bytesWritten 1503 .name(name() + ".bytesWritten") 1504 .desc("Total number of bytes written to DRAM"); 1505 1506 bytesReadSys 1507 .name(name() + ".bytesReadSys") 1508 .desc("Total read bytes from the system interface side"); 1509 1510 bytesWrittenSys 1511 .name(name() + ".bytesWrittenSys") 1512 .desc("Total written bytes from the system interface side"); 1513 1514 avgRdBW 1515 .name(name() + ".avgRdBW") 1516 .desc("Average DRAM read bandwidth in MiByte/s") 1517 .precision(2); 1518 1519 avgRdBW = (bytesReadDRAM / 1000000) / simSeconds; 1520 1521 avgWrBW 1522 .name(name() + ".avgWrBW") 1523 .desc("Average achieved write bandwidth in MiByte/s") 1524 .precision(2); 1525 1526 avgWrBW = (bytesWritten / 1000000) / simSeconds; 1527 1528 avgRdBWSys 1529 .name(name() + ".avgRdBWSys") 1530 .desc("Average system read bandwidth in MiByte/s") 1531 .precision(2); 1532 1533 avgRdBWSys = (bytesReadSys / 1000000) / simSeconds; 1534 1535 avgWrBWSys 1536 .name(name() + ".avgWrBWSys") 1537 .desc("Average system write bandwidth in MiByte/s") 1538 .precision(2); 1539 1540 avgWrBWSys = (bytesWrittenSys / 1000000) / simSeconds; 1541 1542 peakBW 1543 .name(name() + ".peakBW") 1544 .desc("Theoretical peak bandwidth in MiByte/s") 1545 .precision(2); 1546 1547 peakBW = (SimClock::Frequency / tBURST) * burstSize / 1000000; 1548 1549 busUtil 1550 .name(name() + ".busUtil") 1551 .desc("Data bus utilization in percentage") 1552 .precision(2); 1553 1554 busUtil = (avgRdBW + avgWrBW) / peakBW * 100; 1555 1556 totGap 1557 .name(name() + ".totGap") 1558 .desc("Total gap between requests"); 1559 1560 avgGap 1561 .name(name() + ".avgGap") 1562 .desc("Average gap between requests") 1563 .precision(2); 1564 1565 avgGap = totGap / (readReqs + writeReqs); 1566 1567 // Stats for DRAM Power calculation based on Micron datasheet 1568 busUtilRead 1569 .name(name() + ".busUtilRead") 1570 .desc("Data bus utilization in percentage for reads") 1571 .precision(2); 1572 1573 busUtilRead = avgRdBW / peakBW * 100; 1574 1575 busUtilWrite 1576 .name(name() + ".busUtilWrite") 1577 .desc("Data bus utilization in percentage for writes") 1578 .precision(2); 1579 1580 busUtilWrite = avgWrBW / peakBW * 100; 1581 1582 pageHitRate 1583 .name(name() + ".pageHitRate") 1584 .desc("Row buffer hit rate, read and write combined") 1585 .precision(2); 1586 1587 pageHitRate = (writeRowHits + readRowHits) / 1588 (writeBursts - mergedWrBursts + readBursts - servicedByWrQ) * 100; 1589 1590 prechargeAllPercent 1591 .name(name() + ".prechargeAllPercent") 1592 .desc("Percentage of time for which DRAM has all the banks in " 1593 "precharge state") 1594 .precision(2); 1595 1596 prechargeAllPercent = prechargeAllTime / simTicks * 100; 1597} 1598 1599void 1600SimpleDRAM::recvFunctional(PacketPtr pkt) 1601{ 1602 // rely on the abstract memory 1603 functionalAccess(pkt); 1604} 1605 1606BaseSlavePort& 1607SimpleDRAM::getSlavePort(const string &if_name, PortID idx) 1608{ 1609 if (if_name != "port") { 1610 return MemObject::getSlavePort(if_name, idx); 1611 } else { 1612 return port; 1613 } 1614} 1615 1616unsigned int 1617SimpleDRAM::drain(DrainManager *dm) 1618{ 1619 unsigned int count = port.drain(dm); 1620 1621 // if there is anything in any of our internal queues, keep track 1622 // of that as well 1623 if (!(writeQueue.empty() && readQueue.empty() && 1624 respQueue.empty())) { 1625 DPRINTF(Drain, "DRAM controller not drained, write: %d, read: %d," 1626 " resp: %d\n", writeQueue.size(), readQueue.size(), 1627 respQueue.size()); 1628 ++count; 1629 drainManager = dm; 1630 // the only part that is not drained automatically over time 1631 // is the write queue, thus trigger writes if there are any 1632 // waiting and no reads waiting, otherwise wait until the 1633 // reads are done 1634 if (readQueue.empty() && !writeQueue.empty() && 1635 !writeEvent.scheduled()) 1636 triggerWrites(); 1637 } 1638 1639 if (count) 1640 setDrainState(Drainable::Draining); 1641 else 1642 setDrainState(Drainable::Drained); 1643 return count; 1644} 1645 1646SimpleDRAM::MemoryPort::MemoryPort(const std::string& name, SimpleDRAM& _memory) 1647 : QueuedSlavePort(name, &_memory, queue), queue(_memory, *this), 1648 memory(_memory) 1649{ } 1650 1651AddrRangeList 1652SimpleDRAM::MemoryPort::getAddrRanges() const 1653{ 1654 AddrRangeList ranges; 1655 ranges.push_back(memory.getAddrRange()); 1656 return ranges; 1657} 1658 1659void 1660SimpleDRAM::MemoryPort::recvFunctional(PacketPtr pkt) 1661{ 1662 pkt->pushLabel(memory.name()); 1663 1664 if (!queue.checkFunctional(pkt)) { 1665 // Default implementation of SimpleTimingPort::recvFunctional() 1666 // calls recvAtomic() and throws away the latency; we can save a 1667 // little here by just not calculating the latency. 1668 memory.recvFunctional(pkt); 1669 } 1670 1671 pkt->popLabel(); 1672} 1673 1674Tick 1675SimpleDRAM::MemoryPort::recvAtomic(PacketPtr pkt) 1676{ 1677 return memory.recvAtomic(pkt); 1678} 1679 1680bool 1681SimpleDRAM::MemoryPort::recvTimingReq(PacketPtr pkt) 1682{ 1683 // pass it to the memory controller 1684 return memory.recvTimingReq(pkt); 1685} 1686 1687SimpleDRAM* 1688SimpleDRAMParams::create() 1689{ 1690 return new SimpleDRAM(this); 1691} 1692