dram_ctrl.cc revision 9969
1/* 2 * Copyright (c) 2010-2013 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2013 Amin Farmahini-Farahani 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Andreas Hansson 41 * Ani Udipi 42 * Neha Agarwal 43 */ 44 45#include "base/trace.hh" 46#include "base/bitfield.hh" 47#include "debug/Drain.hh" 48#include "debug/DRAM.hh" 49#include "mem/simple_dram.hh" 50#include "sim/system.hh" 51 52using namespace std; 53 54SimpleDRAM::SimpleDRAM(const SimpleDRAMParams* p) : 55 AbstractMemory(p), 56 port(name() + ".port", *this), 57 retryRdReq(false), retryWrReq(false), 58 rowHitFlag(false), stopReads(false), 59 writeEvent(this), respondEvent(this), 60 refreshEvent(this), nextReqEvent(this), drainManager(NULL), 61 deviceBusWidth(p->device_bus_width), burstLength(p->burst_length), 62 deviceRowBufferSize(p->device_rowbuffer_size), 63 devicesPerRank(p->devices_per_rank), 64 burstSize((devicesPerRank * burstLength * deviceBusWidth) / 8), 65 rowBufferSize(devicesPerRank * deviceRowBufferSize), 66 ranksPerChannel(p->ranks_per_channel), 67 banksPerRank(p->banks_per_rank), channels(p->channels), rowsPerBank(0), 68 readBufferSize(p->read_buffer_size), 69 writeBufferSize(p->write_buffer_size), 70 writeThresholdPerc(p->write_thresh_perc), 71 tWTR(p->tWTR), tBURST(p->tBURST), 72 tRCD(p->tRCD), tCL(p->tCL), tRP(p->tRP), tRAS(p->tRAS), 73 tRFC(p->tRFC), tREFI(p->tREFI), 74 tXAW(p->tXAW), activationLimit(p->activation_limit), 75 memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping), 76 pageMgmt(p->page_policy), 77 frontendLatency(p->static_frontend_latency), 78 backendLatency(p->static_backend_latency), 79 busBusyUntil(0), writeStartTime(0), 80 prevArrival(0), numReqs(0) 81{ 82 // create the bank states based on the dimensions of the ranks and 83 // banks 84 banks.resize(ranksPerChannel); 85 actTicks.resize(ranksPerChannel); 86 for (size_t c = 0; c < ranksPerChannel; ++c) { 87 banks[c].resize(banksPerRank); 88 actTicks[c].resize(activationLimit, 0); 89 } 90 91 // round the write threshold percent to a whole number of entries 92 // in the buffer 93 writeThreshold = writeBufferSize * writeThresholdPerc / 100.0; 94} 95 96void 97SimpleDRAM::init() 98{ 99 if (!port.isConnected()) { 100 fatal("SimpleDRAM %s is unconnected!\n", name()); 101 } else { 102 port.sendRangeChange(); 103 } 104 105 // we could deal with plenty options here, but for now do a quick 106 // sanity check 107 DPRINTF(DRAM, "Burst size %d bytes\n", burstSize); 108 109 // determine the rows per bank by looking at the total capacity 110 uint64_t capacity = ULL(1) << ceilLog2(AbstractMemory::size()); 111 112 DPRINTF(DRAM, "Memory capacity %lld (%lld) bytes\n", capacity, 113 AbstractMemory::size()); 114 115 columnsPerRowBuffer = rowBufferSize / burstSize; 116 117 DPRINTF(DRAM, "Row buffer size %d bytes with %d columns per row buffer\n", 118 rowBufferSize, columnsPerRowBuffer); 119 120 rowsPerBank = capacity / (rowBufferSize * banksPerRank * ranksPerChannel); 121 122 if (range.interleaved()) { 123 if (channels != range.stripes()) 124 panic("%s has %d interleaved address stripes but %d channel(s)\n", 125 name(), range.stripes(), channels); 126 127 if (addrMapping == Enums::RaBaChCo) { 128 if (rowBufferSize != range.granularity()) { 129 panic("Interleaving of %s doesn't match RaBaChCo address map\n", 130 name()); 131 } 132 } else if (addrMapping == Enums::RaBaCoCh) { 133 if (burstSize != range.granularity()) { 134 panic("Interleaving of %s doesn't match RaBaCoCh address map\n", 135 name()); 136 } 137 } else if (addrMapping == Enums::CoRaBaCh) { 138 if (burstSize != range.granularity()) 139 panic("Interleaving of %s doesn't match CoRaBaCh address map\n", 140 name()); 141 } 142 } 143} 144 145void 146SimpleDRAM::startup() 147{ 148 // print the configuration of the controller 149 printParams(); 150 151 // kick off the refresh 152 schedule(refreshEvent, curTick() + tREFI); 153} 154 155Tick 156SimpleDRAM::recvAtomic(PacketPtr pkt) 157{ 158 DPRINTF(DRAM, "recvAtomic: %s 0x%x\n", pkt->cmdString(), pkt->getAddr()); 159 160 // do the actual memory access and turn the packet into a response 161 access(pkt); 162 163 Tick latency = 0; 164 if (!pkt->memInhibitAsserted() && pkt->hasData()) { 165 // this value is not supposed to be accurate, just enough to 166 // keep things going, mimic a closed page 167 latency = tRP + tRCD + tCL; 168 } 169 return latency; 170} 171 172bool 173SimpleDRAM::readQueueFull(unsigned int neededEntries) const 174{ 175 DPRINTF(DRAM, "Read queue limit %d, current size %d, entries needed %d\n", 176 readBufferSize, readQueue.size() + respQueue.size(), 177 neededEntries); 178 179 return 180 (readQueue.size() + respQueue.size() + neededEntries) > readBufferSize; 181} 182 183bool 184SimpleDRAM::writeQueueFull(unsigned int neededEntries) const 185{ 186 DPRINTF(DRAM, "Write queue limit %d, current size %d, entries needed %d\n", 187 writeBufferSize, writeQueue.size(), neededEntries); 188 return (writeQueue.size() + neededEntries) > writeBufferSize; 189} 190 191SimpleDRAM::DRAMPacket* 192SimpleDRAM::decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned size, bool isRead) 193{ 194 // decode the address based on the address mapping scheme, with 195 // Ra, Co, Ba and Ch denoting rank, column, bank and channel, 196 // respectively 197 uint8_t rank; 198 uint8_t bank; 199 uint16_t row; 200 201 // truncate the address to the access granularity 202 Addr addr = dramPktAddr / burstSize; 203 204 // we have removed the lowest order address bits that denote the 205 // position within the column 206 if (addrMapping == Enums::RaBaChCo) { 207 // the lowest order bits denote the column to ensure that 208 // sequential cache lines occupy the same row 209 addr = addr / columnsPerRowBuffer; 210 211 // take out the channel part of the address 212 addr = addr / channels; 213 214 // after the channel bits, get the bank bits to interleave 215 // over the banks 216 bank = addr % banksPerRank; 217 addr = addr / banksPerRank; 218 219 // after the bank, we get the rank bits which thus interleaves 220 // over the ranks 221 rank = addr % ranksPerChannel; 222 addr = addr / ranksPerChannel; 223 224 // lastly, get the row bits 225 row = addr % rowsPerBank; 226 addr = addr / rowsPerBank; 227 } else if (addrMapping == Enums::RaBaCoCh) { 228 // take out the channel part of the address 229 addr = addr / channels; 230 231 // next, the column 232 addr = addr / columnsPerRowBuffer; 233 234 // after the column bits, we get the bank bits to interleave 235 // over the banks 236 bank = addr % banksPerRank; 237 addr = addr / banksPerRank; 238 239 // after the bank, we get the rank bits which thus interleaves 240 // over the ranks 241 rank = addr % ranksPerChannel; 242 addr = addr / ranksPerChannel; 243 244 // lastly, get the row bits 245 row = addr % rowsPerBank; 246 addr = addr / rowsPerBank; 247 } else if (addrMapping == Enums::CoRaBaCh) { 248 // optimise for closed page mode and utilise maximum 249 // parallelism of the DRAM (at the cost of power) 250 251 // take out the channel part of the address, not that this has 252 // to match with how accesses are interleaved between the 253 // controllers in the address mapping 254 addr = addr / channels; 255 256 // start with the bank bits, as this provides the maximum 257 // opportunity for parallelism between requests 258 bank = addr % banksPerRank; 259 addr = addr / banksPerRank; 260 261 // next get the rank bits 262 rank = addr % ranksPerChannel; 263 addr = addr / ranksPerChannel; 264 265 // next the column bits which we do not need to keep track of 266 // and simply skip past 267 addr = addr / columnsPerRowBuffer; 268 269 // lastly, get the row bits 270 row = addr % rowsPerBank; 271 addr = addr / rowsPerBank; 272 } else 273 panic("Unknown address mapping policy chosen!"); 274 275 assert(rank < ranksPerChannel); 276 assert(bank < banksPerRank); 277 assert(row < rowsPerBank); 278 279 DPRINTF(DRAM, "Address: %lld Rank %d Bank %d Row %d\n", 280 dramPktAddr, rank, bank, row); 281 282 // create the corresponding DRAM packet with the entry time and 283 // ready time set to the current tick, the latter will be updated 284 // later 285 uint16_t bank_id = banksPerRank * rank + bank; 286 return new DRAMPacket(pkt, isRead, rank, bank, row, bank_id, dramPktAddr, 287 size, banks[rank][bank]); 288} 289 290void 291SimpleDRAM::addToReadQueue(PacketPtr pkt, unsigned int pktCount) 292{ 293 // only add to the read queue here. whenever the request is 294 // eventually done, set the readyTime, and call schedule() 295 assert(!pkt->isWrite()); 296 297 assert(pktCount != 0); 298 299 // if the request size is larger than burst size, the pkt is split into 300 // multiple DRAM packets 301 // Note if the pkt starting address is not aligened to burst size, the 302 // address of first DRAM packet is kept unaliged. Subsequent DRAM packets 303 // are aligned to burst size boundaries. This is to ensure we accurately 304 // check read packets against packets in write queue. 305 Addr addr = pkt->getAddr(); 306 unsigned pktsServicedByWrQ = 0; 307 BurstHelper* burst_helper = NULL; 308 for (int cnt = 0; cnt < pktCount; ++cnt) { 309 unsigned size = std::min((addr | (burstSize - 1)) + 1, 310 pkt->getAddr() + pkt->getSize()) - addr; 311 readPktSize[ceilLog2(size)]++; 312 readBursts++; 313 314 // First check write buffer to see if the data is already at 315 // the controller 316 bool foundInWrQ = false; 317 for (auto i = writeQueue.begin(); i != writeQueue.end(); ++i) { 318 // check if the read is subsumed in the write entry we are 319 // looking at 320 if ((*i)->addr <= addr && 321 (addr + size) <= ((*i)->addr + (*i)->size)) { 322 foundInWrQ = true; 323 servicedByWrQ++; 324 pktsServicedByWrQ++; 325 DPRINTF(DRAM, "Read to addr %lld with size %d serviced by " 326 "write queue\n", addr, size); 327 bytesRead += burstSize; 328 bytesConsumedRd += size; 329 break; 330 } 331 } 332 333 // If not found in the write q, make a DRAM packet and 334 // push it onto the read queue 335 if (!foundInWrQ) { 336 337 // Make the burst helper for split packets 338 if (pktCount > 1 && burst_helper == NULL) { 339 DPRINTF(DRAM, "Read to addr %lld translates to %d " 340 "dram requests\n", pkt->getAddr(), pktCount); 341 burst_helper = new BurstHelper(pktCount); 342 } 343 344 DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, true); 345 dram_pkt->burstHelper = burst_helper; 346 347 assert(!readQueueFull(1)); 348 rdQLenPdf[readQueue.size() + respQueue.size()]++; 349 350 DPRINTF(DRAM, "Adding to read queue\n"); 351 352 readQueue.push_back(dram_pkt); 353 354 // Update stats 355 assert(dram_pkt->bankId < ranksPerChannel * banksPerRank); 356 perBankRdReqs[dram_pkt->bankId]++; 357 358 avgRdQLen = readQueue.size() + respQueue.size(); 359 } 360 361 // Starting address of next dram pkt (aligend to burstSize boundary) 362 addr = (addr | (burstSize - 1)) + 1; 363 } 364 365 // If all packets are serviced by write queue, we send the repsonse back 366 if (pktsServicedByWrQ == pktCount) { 367 accessAndRespond(pkt, frontendLatency); 368 return; 369 } 370 371 // Update how many split packets are serviced by write queue 372 if (burst_helper != NULL) 373 burst_helper->burstsServiced = pktsServicedByWrQ; 374 375 // If we are not already scheduled to get the read request out of 376 // the queue, do so now 377 if (!nextReqEvent.scheduled() && !stopReads) { 378 DPRINTF(DRAM, "Request scheduled immediately\n"); 379 schedule(nextReqEvent, curTick()); 380 } 381} 382 383void 384SimpleDRAM::processWriteEvent() 385{ 386 assert(!writeQueue.empty()); 387 uint32_t numWritesThisTime = 0; 388 389 DPRINTF(DRAM, "Beginning DRAM Writes\n"); 390 Tick temp1 M5_VAR_USED = std::max(curTick(), busBusyUntil); 391 Tick temp2 M5_VAR_USED = std::max(curTick(), maxBankFreeAt()); 392 393 // @todo: are there any dangers with the untimed while loop? 394 while (!writeQueue.empty()) { 395 if (numWritesThisTime >= writeThreshold) { 396 DPRINTF(DRAM, "Hit write threshold %d\n", writeThreshold); 397 break; 398 } 399 400 chooseNextWrite(); 401 DRAMPacket* dram_pkt = writeQueue.front(); 402 // sanity check 403 assert(dram_pkt->size <= burstSize); 404 doDRAMAccess(dram_pkt); 405 406 writeQueue.pop_front(); 407 delete dram_pkt; 408 numWritesThisTime++; 409 } 410 411 DPRINTF(DRAM, "Completed %d writes, bus busy for %lld ticks,"\ 412 "banks busy for %lld ticks\n", numWritesThisTime, 413 busBusyUntil - temp1, maxBankFreeAt() - temp2); 414 415 // Update stats 416 avgWrQLen = writeQueue.size(); 417 418 // turn the bus back around for reads again 419 busBusyUntil += tWTR; 420 stopReads = false; 421 422 if (retryWrReq) { 423 retryWrReq = false; 424 port.sendRetry(); 425 } 426 427 // if there is nothing left in any queue, signal a drain 428 if (writeQueue.empty() && readQueue.empty() && 429 respQueue.empty () && drainManager) { 430 drainManager->signalDrainDone(); 431 drainManager = NULL; 432 } 433 434 // Once you're done emptying the write queue, check if there's 435 // anything in the read queue, and call schedule if required. The 436 // retry above could already have caused it to be scheduled, so 437 // first check 438 if (!nextReqEvent.scheduled()) 439 schedule(nextReqEvent, busBusyUntil); 440} 441 442 443void 444SimpleDRAM::triggerWrites() 445{ 446 DPRINTF(DRAM, "Writes triggered at %lld\n", curTick()); 447 // Flag variable to stop any more read scheduling 448 stopReads = true; 449 450 writeStartTime = std::max(busBusyUntil, curTick()) + tWTR; 451 452 DPRINTF(DRAM, "Writes scheduled at %lld\n", writeStartTime); 453 454 assert(writeStartTime >= curTick()); 455 assert(!writeEvent.scheduled()); 456 schedule(writeEvent, writeStartTime); 457} 458 459void 460SimpleDRAM::addToWriteQueue(PacketPtr pkt, unsigned int pktCount) 461{ 462 // only add to the write queue here. whenever the request is 463 // eventually done, set the readyTime, and call schedule() 464 assert(pkt->isWrite()); 465 466 // if the request size is larger than burst size, the pkt is split into 467 // multiple DRAM packets 468 Addr addr = pkt->getAddr(); 469 for (int cnt = 0; cnt < pktCount; ++cnt) { 470 unsigned size = std::min((addr | (burstSize - 1)) + 1, 471 pkt->getAddr() + pkt->getSize()) - addr; 472 writePktSize[ceilLog2(size)]++; 473 writeBursts++; 474 475 // see if we can merge with an existing item in the write 476 // queue and keep track of whether we have merged or not so we 477 // can stop at that point and also avoid enqueueing a new 478 // request 479 bool merged = false; 480 auto w = writeQueue.begin(); 481 482 while(!merged && w != writeQueue.end()) { 483 // either of the two could be first, if they are the same 484 // it does not matter which way we go 485 if ((*w)->addr >= addr) { 486 // the existing one starts after the new one, figure 487 // out where the new one ends with respect to the 488 // existing one 489 if ((addr + size) >= ((*w)->addr + (*w)->size)) { 490 // check if the existing one is completely 491 // subsumed in the new one 492 DPRINTF(DRAM, "Merging write covering existing burst\n"); 493 merged = true; 494 // update both the address and the size 495 (*w)->addr = addr; 496 (*w)->size = size; 497 } else if ((addr + size) >= (*w)->addr && 498 ((*w)->addr + (*w)->size - addr) <= burstSize) { 499 // the new one is just before or partially 500 // overlapping with the existing one, and together 501 // they fit within a burst 502 DPRINTF(DRAM, "Merging write before existing burst\n"); 503 merged = true; 504 // the existing queue item needs to be adjusted with 505 // respect to both address and size 506 (*w)->addr = addr; 507 (*w)->size = (*w)->addr + (*w)->size - addr; 508 } 509 } else { 510 // the new one starts after the current one, figure 511 // out where the existing one ends with respect to the 512 // new one 513 if (((*w)->addr + (*w)->size) >= (addr + size)) { 514 // check if the new one is completely subsumed in the 515 // existing one 516 DPRINTF(DRAM, "Merging write into existing burst\n"); 517 merged = true; 518 // no adjustments necessary 519 } else if (((*w)->addr + (*w)->size) >= addr && 520 (addr + size - (*w)->addr) <= burstSize) { 521 // the existing one is just before or partially 522 // overlapping with the new one, and together 523 // they fit within a burst 524 DPRINTF(DRAM, "Merging write after existing burst\n"); 525 merged = true; 526 // the address is right, and only the size has 527 // to be adjusted 528 (*w)->size = addr + size - (*w)->addr; 529 } 530 } 531 ++w; 532 } 533 534 // if the item was not merged we need to create a new write 535 // and enqueue it 536 if (!merged) { 537 DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, false); 538 539 assert(writeQueue.size() < writeBufferSize); 540 wrQLenPdf[writeQueue.size()]++; 541 542 DPRINTF(DRAM, "Adding to write queue\n"); 543 544 writeQueue.push_back(dram_pkt); 545 546 // Update stats 547 assert(dram_pkt->bankId < ranksPerChannel * banksPerRank); 548 perBankWrReqs[dram_pkt->bankId]++; 549 550 avgWrQLen = writeQueue.size(); 551 } 552 553 bytesConsumedWr += size; 554 bytesWritten += burstSize; 555 556 // Starting address of next dram pkt (aligend to burstSize boundary) 557 addr = (addr | (burstSize - 1)) + 1; 558 } 559 560 // we do not wait for the writes to be send to the actual memory, 561 // but instead take responsibility for the consistency here and 562 // snoop the write queue for any upcoming reads 563 // @todo, if a pkt size is larger than burst size, we might need a 564 // different front end latency 565 accessAndRespond(pkt, frontendLatency); 566 567 // If your write buffer is starting to fill up, drain it! 568 if (writeQueue.size() >= writeThreshold && !stopReads){ 569 triggerWrites(); 570 } 571} 572 573void 574SimpleDRAM::printParams() const 575{ 576 // Sanity check print of important parameters 577 DPRINTF(DRAM, 578 "Memory controller %s physical organization\n" \ 579 "Number of devices per rank %d\n" \ 580 "Device bus width (in bits) %d\n" \ 581 "DRAM data bus burst %d\n" \ 582 "Row buffer size %d\n" \ 583 "Columns per row buffer %d\n" \ 584 "Rows per bank %d\n" \ 585 "Banks per rank %d\n" \ 586 "Ranks per channel %d\n" \ 587 "Total mem capacity %u\n", 588 name(), devicesPerRank, deviceBusWidth, burstSize, rowBufferSize, 589 columnsPerRowBuffer, rowsPerBank, banksPerRank, ranksPerChannel, 590 rowBufferSize * rowsPerBank * banksPerRank * ranksPerChannel); 591 592 string scheduler = memSchedPolicy == Enums::fcfs ? "FCFS" : "FR-FCFS"; 593 string address_mapping = addrMapping == Enums::RaBaChCo ? "RaBaChCo" : 594 (addrMapping == Enums::RaBaCoCh ? "RaBaCoCh" : "CoRaBaCh"); 595 string page_policy = pageMgmt == Enums::open ? "OPEN" : "CLOSE"; 596 597 DPRINTF(DRAM, 598 "Memory controller %s characteristics\n" \ 599 "Read buffer size %d\n" \ 600 "Write buffer size %d\n" \ 601 "Write buffer thresh %d\n" \ 602 "Scheduler %s\n" \ 603 "Address mapping %s\n" \ 604 "Page policy %s\n", 605 name(), readBufferSize, writeBufferSize, writeThreshold, 606 scheduler, address_mapping, page_policy); 607 608 DPRINTF(DRAM, "Memory controller %s timing specs\n" \ 609 "tRCD %d ticks\n" \ 610 "tCL %d ticks\n" \ 611 "tRP %d ticks\n" \ 612 "tBURST %d ticks\n" \ 613 "tRFC %d ticks\n" \ 614 "tREFI %d ticks\n" \ 615 "tWTR %d ticks\n" \ 616 "tXAW (%d) %d ticks\n", 617 name(), tRCD, tCL, tRP, tBURST, tRFC, tREFI, tWTR, 618 activationLimit, tXAW); 619} 620 621void 622SimpleDRAM::printQs() const { 623 DPRINTF(DRAM, "===READ QUEUE===\n\n"); 624 for (auto i = readQueue.begin() ; i != readQueue.end() ; ++i) { 625 DPRINTF(DRAM, "Read %lu\n", (*i)->addr); 626 } 627 DPRINTF(DRAM, "\n===RESP QUEUE===\n\n"); 628 for (auto i = respQueue.begin() ; i != respQueue.end() ; ++i) { 629 DPRINTF(DRAM, "Response %lu\n", (*i)->addr); 630 } 631 DPRINTF(DRAM, "\n===WRITE QUEUE===\n\n"); 632 for (auto i = writeQueue.begin() ; i != writeQueue.end() ; ++i) { 633 DPRINTF(DRAM, "Write %lu\n", (*i)->addr); 634 } 635} 636 637bool 638SimpleDRAM::recvTimingReq(PacketPtr pkt) 639{ 640 /// @todo temporary hack to deal with memory corruption issues until 641 /// 4-phase transactions are complete 642 for (int x = 0; x < pendingDelete.size(); x++) 643 delete pendingDelete[x]; 644 pendingDelete.clear(); 645 646 // This is where we enter from the outside world 647 DPRINTF(DRAM, "recvTimingReq: request %s addr %lld size %d\n", 648 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 649 650 // simply drop inhibited packets for now 651 if (pkt->memInhibitAsserted()) { 652 DPRINTF(DRAM,"Inhibited packet -- Dropping it now\n"); 653 pendingDelete.push_back(pkt); 654 return true; 655 } 656 657 // Every million accesses, print the state of the queues 658 if (numReqs % 1000000 == 0) 659 printQs(); 660 661 // Calc avg gap between requests 662 if (prevArrival != 0) { 663 totGap += curTick() - prevArrival; 664 } 665 prevArrival = curTick(); 666 667 668 // Find out how many dram packets a pkt translates to 669 // If the burst size is equal or larger than the pkt size, then a pkt 670 // translates to only one dram packet. Otherwise, a pkt translates to 671 // multiple dram packets 672 unsigned size = pkt->getSize(); 673 unsigned offset = pkt->getAddr() & (burstSize - 1); 674 unsigned int dram_pkt_count = divCeil(offset + size, burstSize); 675 676 // check local buffers and do not accept if full 677 if (pkt->isRead()) { 678 assert(size != 0); 679 if (readQueueFull(dram_pkt_count)) { 680 DPRINTF(DRAM, "Read queue full, not accepting\n"); 681 // remember that we have to retry this port 682 retryRdReq = true; 683 numRdRetry++; 684 return false; 685 } else { 686 addToReadQueue(pkt, dram_pkt_count); 687 readReqs++; 688 numReqs++; 689 } 690 } else if (pkt->isWrite()) { 691 assert(size != 0); 692 if (writeQueueFull(dram_pkt_count)) { 693 DPRINTF(DRAM, "Write queue full, not accepting\n"); 694 // remember that we have to retry this port 695 retryWrReq = true; 696 numWrRetry++; 697 return false; 698 } else { 699 addToWriteQueue(pkt, dram_pkt_count); 700 writeReqs++; 701 numReqs++; 702 } 703 } else { 704 DPRINTF(DRAM,"Neither read nor write, ignore timing\n"); 705 neitherReadNorWrite++; 706 accessAndRespond(pkt, 1); 707 } 708 709 retryRdReq = false; 710 retryWrReq = false; 711 return true; 712} 713 714void 715SimpleDRAM::processRespondEvent() 716{ 717 DPRINTF(DRAM, 718 "processRespondEvent(): Some req has reached its readyTime\n"); 719 720 DRAMPacket* dram_pkt = respQueue.front(); 721 722 // Actually responds to the requestor 723 bytesConsumedRd += dram_pkt->size; 724 bytesRead += burstSize; 725 if (dram_pkt->burstHelper) { 726 // it is a split packet 727 dram_pkt->burstHelper->burstsServiced++; 728 if (dram_pkt->burstHelper->burstsServiced == 729 dram_pkt->burstHelper->burstCount) { 730 // we have now serviced all children packets of a system packet 731 // so we can now respond to the requester 732 // @todo we probably want to have a different front end and back 733 // end latency for split packets 734 accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency); 735 delete dram_pkt->burstHelper; 736 dram_pkt->burstHelper = NULL; 737 } 738 } else { 739 // it is not a split packet 740 accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency); 741 } 742 743 delete respQueue.front(); 744 respQueue.pop_front(); 745 746 // Update stats 747 avgRdQLen = readQueue.size() + respQueue.size(); 748 749 if (!respQueue.empty()) { 750 assert(respQueue.front()->readyTime >= curTick()); 751 assert(!respondEvent.scheduled()); 752 schedule(respondEvent, respQueue.front()->readyTime); 753 } else { 754 // if there is nothing left in any queue, signal a drain 755 if (writeQueue.empty() && readQueue.empty() && 756 drainManager) { 757 drainManager->signalDrainDone(); 758 drainManager = NULL; 759 } 760 } 761 762 // We have made a location in the queue available at this point, 763 // so if there is a read that was forced to wait, retry now 764 if (retryRdReq) { 765 retryRdReq = false; 766 port.sendRetry(); 767 } 768} 769 770void 771SimpleDRAM::chooseNextWrite() 772{ 773 // This method does the arbitration between write requests. The 774 // chosen packet is simply moved to the head of the write 775 // queue. The other methods know that this is the place to 776 // look. For example, with FCFS, this method does nothing 777 assert(!writeQueue.empty()); 778 779 if (writeQueue.size() == 1) { 780 DPRINTF(DRAM, "Single write request, nothing to do\n"); 781 return; 782 } 783 784 if (memSchedPolicy == Enums::fcfs) { 785 // Do nothing, since the correct request is already head 786 } else if (memSchedPolicy == Enums::frfcfs) { 787 // Only determine bank availability when needed 788 uint64_t earliest_banks = 0; 789 790 auto i = writeQueue.begin(); 791 bool foundRowHit = false; 792 while (!foundRowHit && i != writeQueue.end()) { 793 DRAMPacket* dram_pkt = *i; 794 const Bank& bank = dram_pkt->bankRef; 795 if (bank.openRow == dram_pkt->row) { 796 DPRINTF(DRAM, "Write row buffer hit\n"); 797 writeQueue.erase(i); 798 writeQueue.push_front(dram_pkt); 799 foundRowHit = true; 800 } else { 801 // No row hit, go for first ready 802 if (earliest_banks == 0) 803 earliest_banks = minBankFreeAt(writeQueue); 804 805 // Bank is ready or is one of the first available bank 806 if (bank.freeAt <= curTick() || 807 bits(earliest_banks, dram_pkt->bankId, dram_pkt->bankId)) { 808 writeQueue.erase(i); 809 writeQueue.push_front(dram_pkt); 810 break; 811 } 812 } 813 ++i; 814 } 815 } else 816 panic("No scheduling policy chosen\n"); 817 818 DPRINTF(DRAM, "Selected next write request\n"); 819} 820 821bool 822SimpleDRAM::chooseNextRead() 823{ 824 // This method does the arbitration between read requests. The 825 // chosen packet is simply moved to the head of the queue. The 826 // other methods know that this is the place to look. For example, 827 // with FCFS, this method does nothing 828 if (readQueue.empty()) { 829 DPRINTF(DRAM, "No read request to select\n"); 830 return false; 831 } 832 833 // If there is only one request then there is nothing left to do 834 if (readQueue.size() == 1) 835 return true; 836 837 if (memSchedPolicy == Enums::fcfs) { 838 // Do nothing, since the request to serve is already the first 839 // one in the read queue 840 } else if (memSchedPolicy == Enums::frfcfs) { 841 // Only determine this when needed 842 uint64_t earliest_banks = 0; 843 844 for (auto i = readQueue.begin(); i != readQueue.end() ; ++i) { 845 DRAMPacket* dram_pkt = *i; 846 const Bank& bank = dram_pkt->bankRef; 847 // Check if it is a row hit 848 if (bank.openRow == dram_pkt->row) { 849 DPRINTF(DRAM, "Row buffer hit\n"); 850 readQueue.erase(i); 851 readQueue.push_front(dram_pkt); 852 break; 853 } else { 854 // No row hit, go for first ready 855 if (earliest_banks == 0) 856 earliest_banks = minBankFreeAt(readQueue); 857 858 // Bank is ready or is the first available bank 859 if (bank.freeAt <= curTick() || 860 bits(earliest_banks, dram_pkt->bankId, dram_pkt->bankId)) { 861 readQueue.erase(i); 862 readQueue.push_front(dram_pkt); 863 break; 864 } 865 } 866 } 867 } else 868 panic("No scheduling policy chosen!\n"); 869 870 DPRINTF(DRAM, "Selected next read request\n"); 871 return true; 872} 873 874void 875SimpleDRAM::accessAndRespond(PacketPtr pkt, Tick static_latency) 876{ 877 DPRINTF(DRAM, "Responding to Address %lld.. ",pkt->getAddr()); 878 879 bool needsResponse = pkt->needsResponse(); 880 // do the actual memory access which also turns the packet into a 881 // response 882 access(pkt); 883 884 // turn packet around to go back to requester if response expected 885 if (needsResponse) { 886 // access already turned the packet into a response 887 assert(pkt->isResponse()); 888 889 // @todo someone should pay for this 890 pkt->busFirstWordDelay = pkt->busLastWordDelay = 0; 891 892 // queue the packet in the response queue to be sent out after 893 // the static latency has passed 894 port.schedTimingResp(pkt, curTick() + static_latency); 895 } else { 896 // @todo the packet is going to be deleted, and the DRAMPacket 897 // is still having a pointer to it 898 pendingDelete.push_back(pkt); 899 } 900 901 DPRINTF(DRAM, "Done\n"); 902 903 return; 904} 905 906pair<Tick, Tick> 907SimpleDRAM::estimateLatency(DRAMPacket* dram_pkt, Tick inTime) 908{ 909 // If a request reaches a bank at tick 'inTime', how much time 910 // *after* that does it take to finish the request, depending 911 // on bank status and page open policy. Note that this method 912 // considers only the time taken for the actual read or write 913 // to complete, NOT any additional time thereafter for tRAS or 914 // tRP. 915 Tick accLat = 0; 916 Tick bankLat = 0; 917 rowHitFlag = false; 918 Tick potentialActTick; 919 920 const Bank& bank = dram_pkt->bankRef; 921 if (pageMgmt == Enums::open) { // open-page policy 922 if (bank.openRow == dram_pkt->row) { 923 // When we have a row-buffer hit, 924 // we don't care about tRAS having expired or not, 925 // but do care about bank being free for access 926 rowHitFlag = true; 927 928 // When a series of requests arrive to the same row, 929 // DDR systems are capable of streaming data continuously 930 // at maximum bandwidth (subject to tCCD). Here, we approximate 931 // this condition, and assume that if whenever a bank is already 932 // busy and a new request comes in, it can be completed with no 933 // penalty beyond waiting for the existing read to complete. 934 if (bank.freeAt > inTime) { 935 accLat += bank.freeAt - inTime; 936 bankLat += 0; 937 } else { 938 // CAS latency only 939 accLat += tCL; 940 bankLat += tCL; 941 } 942 943 } else { 944 // Row-buffer miss, need to close existing row 945 // once tRAS has expired, then open the new one, 946 // then add cas latency. 947 Tick freeTime = std::max(bank.tRASDoneAt, bank.freeAt); 948 949 if (freeTime > inTime) 950 accLat += freeTime - inTime; 951 952 //The bank is free, and you may be able to activate 953 potentialActTick = inTime + accLat + tRP; 954 if (potentialActTick < bank.actAllowedAt) 955 accLat += bank.actAllowedAt - potentialActTick; 956 957 accLat += tRP + tRCD + tCL; 958 bankLat += tRP + tRCD + tCL; 959 } 960 } else if (pageMgmt == Enums::close) { 961 // With a close page policy, no notion of 962 // bank.tRASDoneAt 963 if (bank.freeAt > inTime) 964 accLat += bank.freeAt - inTime; 965 966 //The bank is free, and you may be able to activate 967 potentialActTick = inTime + accLat; 968 if (potentialActTick < bank.actAllowedAt) 969 accLat += bank.actAllowedAt - potentialActTick; 970 971 // page already closed, simply open the row, and 972 // add cas latency 973 accLat += tRCD + tCL; 974 bankLat += tRCD + tCL; 975 } else 976 panic("No page management policy chosen\n"); 977 978 DPRINTF(DRAM, "Returning < %lld, %lld > from estimateLatency()\n", 979 bankLat, accLat); 980 981 return make_pair(bankLat, accLat); 982} 983 984void 985SimpleDRAM::processNextReqEvent() 986{ 987 scheduleNextReq(); 988} 989 990void 991SimpleDRAM::recordActivate(Tick act_tick, uint8_t rank) 992{ 993 assert(0 <= rank && rank < ranksPerChannel); 994 assert(actTicks[rank].size() == activationLimit); 995 996 DPRINTF(DRAM, "Activate at tick %d\n", act_tick); 997 998 // if the activation limit is disabled then we are done 999 if (actTicks[rank].empty()) 1000 return; 1001 1002 // sanity check 1003 if (actTicks[rank].back() && (act_tick - actTicks[rank].back()) < tXAW) { 1004 // @todo For now, stick with a warning 1005 warn("Got %d activates in window %d (%d - %d) which is smaller " 1006 "than %d\n", activationLimit, act_tick - actTicks[rank].back(), 1007 act_tick, actTicks[rank].back(), tXAW); 1008 } 1009 1010 // shift the times used for the book keeping, the last element 1011 // (highest index) is the oldest one and hence the lowest value 1012 actTicks[rank].pop_back(); 1013 1014 // record an new activation (in the future) 1015 actTicks[rank].push_front(act_tick); 1016 1017 // cannot activate more than X times in time window tXAW, push the 1018 // next one (the X + 1'st activate) to be tXAW away from the 1019 // oldest in our window of X 1020 if (actTicks[rank].back() && (act_tick - actTicks[rank].back()) < tXAW) { 1021 DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate no earlier " 1022 "than %d\n", activationLimit, actTicks[rank].back() + tXAW); 1023 for(int j = 0; j < banksPerRank; j++) 1024 // next activate must not happen before end of window 1025 banks[rank][j].actAllowedAt = actTicks[rank].back() + tXAW; 1026 } 1027} 1028 1029void 1030SimpleDRAM::doDRAMAccess(DRAMPacket* dram_pkt) 1031{ 1032 1033 DPRINTF(DRAM, "Timing access to addr %lld, rank/bank/row %d %d %d\n", 1034 dram_pkt->addr, dram_pkt->rank, dram_pkt->bank, dram_pkt->row); 1035 1036 // estimate the bank and access latency 1037 pair<Tick, Tick> lat = estimateLatency(dram_pkt, curTick()); 1038 Tick bankLat = lat.first; 1039 Tick accessLat = lat.second; 1040 Tick actTick; 1041 1042 // This request was woken up at this time based on a prior call 1043 // to estimateLatency(). However, between then and now, both the 1044 // accessLatency and/or busBusyUntil may have changed. We need 1045 // to correct for that. 1046 1047 Tick addDelay = (curTick() + accessLat < busBusyUntil) ? 1048 busBusyUntil - (curTick() + accessLat) : 0; 1049 1050 Bank& bank = dram_pkt->bankRef; 1051 1052 // Update bank state 1053 if (pageMgmt == Enums::open) { 1054 bank.openRow = dram_pkt->row; 1055 bank.freeAt = curTick() + addDelay + accessLat; 1056 bank.bytesAccessed += burstSize; 1057 1058 // If you activated a new row do to this access, the next access 1059 // will have to respect tRAS for this bank. 1060 if (!rowHitFlag) { 1061 // any waiting for banks account for in freeAt 1062 actTick = bank.freeAt - tCL - tRCD; 1063 bank.tRASDoneAt = actTick + tRAS; 1064 recordActivate(actTick, dram_pkt->rank); 1065 1066 // sample the number of bytes accessed and reset it as 1067 // we are now closing this row 1068 bytesPerActivate.sample(bank.bytesAccessed); 1069 bank.bytesAccessed = 0; 1070 } 1071 } else if (pageMgmt == Enums::close) { 1072 actTick = curTick() + addDelay + accessLat - tRCD - tCL; 1073 recordActivate(actTick, dram_pkt->rank); 1074 1075 // If the DRAM has a very quick tRAS, bank can be made free 1076 // after consecutive tCL,tRCD,tRP times. In general, however, 1077 // an additional wait is required to respect tRAS. 1078 bank.freeAt = std::max(actTick + tRAS + tRP, 1079 actTick + tRCD + tCL + tRP); 1080 1081 DPRINTF(DRAM,"doDRAMAccess::bank.freeAt is %lld\n",bank.freeAt); 1082 bytesPerActivate.sample(burstSize); 1083 } else 1084 panic("No page management policy chosen\n"); 1085 1086 // Update request parameters 1087 dram_pkt->readyTime = curTick() + addDelay + accessLat + tBURST; 1088 1089 1090 DPRINTF(DRAM, "Req %lld: curtick is %lld accessLat is %d " \ 1091 "readytime is %lld busbusyuntil is %lld. " \ 1092 "Scheduling at readyTime\n", dram_pkt->addr, 1093 curTick(), accessLat, dram_pkt->readyTime, busBusyUntil); 1094 1095 // Make sure requests are not overlapping on the databus 1096 assert (dram_pkt->readyTime - busBusyUntil >= tBURST); 1097 1098 // Update bus state 1099 busBusyUntil = dram_pkt->readyTime; 1100 1101 DPRINTF(DRAM,"Access time is %lld\n", 1102 dram_pkt->readyTime - dram_pkt->entryTime); 1103 1104 if (rowHitFlag) { 1105 if(dram_pkt->isRead) 1106 readRowHits++; 1107 else 1108 writeRowHits++; 1109 } 1110 1111 // At this point, commonality between reads and writes ends. 1112 // For writes, we are done since we long ago responded to the 1113 // requestor. We also don't care about stats for writes. For 1114 // reads, we still need to figure out respoding to the requestor, 1115 // and capture stats. 1116 1117 if (!dram_pkt->isRead) { 1118 return; 1119 } 1120 1121 // Update stats 1122 totMemAccLat += dram_pkt->readyTime - dram_pkt->entryTime; 1123 totBankLat += bankLat; 1124 totBusLat += tBURST; 1125 totQLat += dram_pkt->readyTime - dram_pkt->entryTime - bankLat - tBURST; 1126 1127 1128 // At this point we're done dealing with the request 1129 // It will be moved to a separate response queue with a 1130 // correct readyTime, and eventually be sent back at that 1131 //time 1132 moveToRespQ(); 1133 1134 // The absolute soonest you have to start thinking about the 1135 // next request is the longest access time that can occur before 1136 // busBusyUntil. Assuming you need to precharge, 1137 // open a new row, and access, it is tRP + tRCD + tCL 1138 1139 Tick newTime = (busBusyUntil > tRP + tRCD + tCL ) ? 1140 std::max(busBusyUntil - (tRP + tRCD + tCL) , curTick()) : 1141 curTick(); 1142 1143 if (!nextReqEvent.scheduled() && !stopReads){ 1144 schedule(nextReqEvent, newTime); 1145 } else { 1146 if (newTime < nextReqEvent.when()) 1147 reschedule(nextReqEvent, newTime); 1148 } 1149 1150 1151} 1152 1153void 1154SimpleDRAM::moveToRespQ() 1155{ 1156 // Remove from read queue 1157 DRAMPacket* dram_pkt = readQueue.front(); 1158 readQueue.pop_front(); 1159 1160 // sanity check 1161 assert(dram_pkt->size <= burstSize); 1162 1163 // Insert into response queue sorted by readyTime 1164 // It will be sent back to the requestor at its 1165 // readyTime 1166 if (respQueue.empty()) { 1167 respQueue.push_front(dram_pkt); 1168 assert(!respondEvent.scheduled()); 1169 assert(dram_pkt->readyTime >= curTick()); 1170 schedule(respondEvent, dram_pkt->readyTime); 1171 } else { 1172 bool done = false; 1173 auto i = respQueue.begin(); 1174 while (!done && i != respQueue.end()) { 1175 if ((*i)->readyTime > dram_pkt->readyTime) { 1176 respQueue.insert(i, dram_pkt); 1177 done = true; 1178 } 1179 ++i; 1180 } 1181 1182 if (!done) 1183 respQueue.push_back(dram_pkt); 1184 1185 assert(respondEvent.scheduled()); 1186 1187 if (respQueue.front()->readyTime < respondEvent.when()) { 1188 assert(respQueue.front()->readyTime >= curTick()); 1189 reschedule(respondEvent, respQueue.front()->readyTime); 1190 } 1191 } 1192} 1193 1194void 1195SimpleDRAM::scheduleNextReq() 1196{ 1197 DPRINTF(DRAM, "Reached scheduleNextReq()\n"); 1198 1199 // Figure out which read request goes next, and move it to the 1200 // front of the read queue 1201 if (!chooseNextRead()) { 1202 // In the case there is no read request to go next, see if we 1203 // are asked to drain, and if so trigger writes, this also 1204 // ensures that if we hit the write limit we will do this 1205 // multiple times until we are completely drained 1206 if (drainManager && !writeQueue.empty() && !writeEvent.scheduled()) 1207 triggerWrites(); 1208 } else { 1209 doDRAMAccess(readQueue.front()); 1210 } 1211} 1212 1213Tick 1214SimpleDRAM::maxBankFreeAt() const 1215{ 1216 Tick banksFree = 0; 1217 1218 for(int i = 0; i < ranksPerChannel; i++) 1219 for(int j = 0; j < banksPerRank; j++) 1220 banksFree = std::max(banks[i][j].freeAt, banksFree); 1221 1222 return banksFree; 1223} 1224 1225uint64_t 1226SimpleDRAM::minBankFreeAt(const deque<DRAMPacket*>& queue) const 1227{ 1228 uint64_t bank_mask = 0; 1229 Tick freeAt = MaxTick; 1230 1231 // detemrine if we have queued transactions targetting the 1232 // bank in question 1233 vector<bool> got_waiting(ranksPerChannel * banksPerRank, false); 1234 for (auto p = queue.begin(); p != queue.end(); ++p) { 1235 got_waiting[(*p)->bankId] = true; 1236 } 1237 1238 for (int i = 0; i < ranksPerChannel; i++) { 1239 for (int j = 0; j < banksPerRank; j++) { 1240 // if we have waiting requests for the bank, and it is 1241 // amongst the first available, update the mask 1242 if (got_waiting[i * banksPerRank + j] && 1243 banks[i][j].freeAt <= freeAt) { 1244 // reset bank mask if new minimum is found 1245 if (banks[i][j].freeAt < freeAt) 1246 bank_mask = 0; 1247 // set the bit corresponding to the available bank 1248 uint8_t bit_index = i * ranksPerChannel + j; 1249 replaceBits(bank_mask, bit_index, bit_index, 1); 1250 freeAt = banks[i][j].freeAt; 1251 } 1252 } 1253 } 1254 return bank_mask; 1255} 1256 1257void 1258SimpleDRAM::processRefreshEvent() 1259{ 1260 DPRINTF(DRAM, "Refreshing at tick %ld\n", curTick()); 1261 1262 Tick banksFree = std::max(curTick(), maxBankFreeAt()) + tRFC; 1263 1264 for(int i = 0; i < ranksPerChannel; i++) 1265 for(int j = 0; j < banksPerRank; j++) 1266 banks[i][j].freeAt = banksFree; 1267 1268 schedule(refreshEvent, curTick() + tREFI); 1269} 1270 1271void 1272SimpleDRAM::regStats() 1273{ 1274 using namespace Stats; 1275 1276 AbstractMemory::regStats(); 1277 1278 readReqs 1279 .name(name() + ".readReqs") 1280 .desc("Total number of read requests accepted by DRAM controller"); 1281 1282 writeReqs 1283 .name(name() + ".writeReqs") 1284 .desc("Total number of write requests accepted by DRAM controller"); 1285 1286 readBursts 1287 .name(name() + ".readBursts") 1288 .desc("Total number of DRAM read bursts. " 1289 "Each DRAM read request translates to either one or multiple " 1290 "DRAM read bursts"); 1291 1292 writeBursts 1293 .name(name() + ".writeBursts") 1294 .desc("Total number of DRAM write bursts. " 1295 "Each DRAM write request translates to either one or multiple " 1296 "DRAM write bursts"); 1297 1298 servicedByWrQ 1299 .name(name() + ".servicedByWrQ") 1300 .desc("Number of DRAM read bursts serviced by write Q"); 1301 1302 neitherReadNorWrite 1303 .name(name() + ".neitherReadNorWrite") 1304 .desc("Reqs where no action is needed"); 1305 1306 perBankRdReqs 1307 .init(banksPerRank * ranksPerChannel) 1308 .name(name() + ".perBankRdReqs") 1309 .desc("Track reads on a per bank basis"); 1310 1311 perBankWrReqs 1312 .init(banksPerRank * ranksPerChannel) 1313 .name(name() + ".perBankWrReqs") 1314 .desc("Track writes on a per bank basis"); 1315 1316 avgRdQLen 1317 .name(name() + ".avgRdQLen") 1318 .desc("Average read queue length over time") 1319 .precision(2); 1320 1321 avgWrQLen 1322 .name(name() + ".avgWrQLen") 1323 .desc("Average write queue length over time") 1324 .precision(2); 1325 1326 totQLat 1327 .name(name() + ".totQLat") 1328 .desc("Total cycles spent in queuing delays"); 1329 1330 totBankLat 1331 .name(name() + ".totBankLat") 1332 .desc("Total cycles spent in bank access"); 1333 1334 totBusLat 1335 .name(name() + ".totBusLat") 1336 .desc("Total cycles spent in databus access"); 1337 1338 totMemAccLat 1339 .name(name() + ".totMemAccLat") 1340 .desc("Sum of mem lat for all requests"); 1341 1342 avgQLat 1343 .name(name() + ".avgQLat") 1344 .desc("Average queueing delay per request") 1345 .precision(2); 1346 1347 avgQLat = totQLat / (readBursts - servicedByWrQ); 1348 1349 avgBankLat 1350 .name(name() + ".avgBankLat") 1351 .desc("Average bank access latency per request") 1352 .precision(2); 1353 1354 avgBankLat = totBankLat / (readBursts - servicedByWrQ); 1355 1356 avgBusLat 1357 .name(name() + ".avgBusLat") 1358 .desc("Average bus latency per request") 1359 .precision(2); 1360 1361 avgBusLat = totBusLat / (readBursts - servicedByWrQ); 1362 1363 avgMemAccLat 1364 .name(name() + ".avgMemAccLat") 1365 .desc("Average memory access latency") 1366 .precision(2); 1367 1368 avgMemAccLat = totMemAccLat / (readBursts - servicedByWrQ); 1369 1370 numRdRetry 1371 .name(name() + ".numRdRetry") 1372 .desc("Number of times rd buffer was full causing retry"); 1373 1374 numWrRetry 1375 .name(name() + ".numWrRetry") 1376 .desc("Number of times wr buffer was full causing retry"); 1377 1378 readRowHits 1379 .name(name() + ".readRowHits") 1380 .desc("Number of row buffer hits during reads"); 1381 1382 writeRowHits 1383 .name(name() + ".writeRowHits") 1384 .desc("Number of row buffer hits during writes"); 1385 1386 readRowHitRate 1387 .name(name() + ".readRowHitRate") 1388 .desc("Row buffer hit rate for reads") 1389 .precision(2); 1390 1391 readRowHitRate = (readRowHits / (readBursts - servicedByWrQ)) * 100; 1392 1393 writeRowHitRate 1394 .name(name() + ".writeRowHitRate") 1395 .desc("Row buffer hit rate for writes") 1396 .precision(2); 1397 1398 writeRowHitRate = (writeRowHits / writeBursts) * 100; 1399 1400 readPktSize 1401 .init(ceilLog2(burstSize) + 1) 1402 .name(name() + ".readPktSize") 1403 .desc("Categorize read packet sizes"); 1404 1405 writePktSize 1406 .init(ceilLog2(burstSize) + 1) 1407 .name(name() + ".writePktSize") 1408 .desc("Categorize write packet sizes"); 1409 1410 rdQLenPdf 1411 .init(readBufferSize) 1412 .name(name() + ".rdQLenPdf") 1413 .desc("What read queue length does an incoming req see"); 1414 1415 wrQLenPdf 1416 .init(writeBufferSize) 1417 .name(name() + ".wrQLenPdf") 1418 .desc("What write queue length does an incoming req see"); 1419 1420 bytesPerActivate 1421 .init(rowBufferSize) 1422 .name(name() + ".bytesPerActivate") 1423 .desc("Bytes accessed per row activation") 1424 .flags(nozero); 1425 1426 bytesRead 1427 .name(name() + ".bytesRead") 1428 .desc("Total number of bytes read from memory"); 1429 1430 bytesWritten 1431 .name(name() + ".bytesWritten") 1432 .desc("Total number of bytes written to memory"); 1433 1434 bytesConsumedRd 1435 .name(name() + ".bytesConsumedRd") 1436 .desc("bytesRead derated as per pkt->getSize()"); 1437 1438 bytesConsumedWr 1439 .name(name() + ".bytesConsumedWr") 1440 .desc("bytesWritten derated as per pkt->getSize()"); 1441 1442 avgRdBW 1443 .name(name() + ".avgRdBW") 1444 .desc("Average achieved read bandwidth in MB/s") 1445 .precision(2); 1446 1447 avgRdBW = (bytesRead / 1000000) / simSeconds; 1448 1449 avgWrBW 1450 .name(name() + ".avgWrBW") 1451 .desc("Average achieved write bandwidth in MB/s") 1452 .precision(2); 1453 1454 avgWrBW = (bytesWritten / 1000000) / simSeconds; 1455 1456 avgConsumedRdBW 1457 .name(name() + ".avgConsumedRdBW") 1458 .desc("Average consumed read bandwidth in MB/s") 1459 .precision(2); 1460 1461 avgConsumedRdBW = (bytesConsumedRd / 1000000) / simSeconds; 1462 1463 avgConsumedWrBW 1464 .name(name() + ".avgConsumedWrBW") 1465 .desc("Average consumed write bandwidth in MB/s") 1466 .precision(2); 1467 1468 avgConsumedWrBW = (bytesConsumedWr / 1000000) / simSeconds; 1469 1470 peakBW 1471 .name(name() + ".peakBW") 1472 .desc("Theoretical peak bandwidth in MB/s") 1473 .precision(2); 1474 1475 peakBW = (SimClock::Frequency / tBURST) * burstSize / 1000000; 1476 1477 busUtil 1478 .name(name() + ".busUtil") 1479 .desc("Data bus utilization in percentage") 1480 .precision(2); 1481 1482 busUtil = (avgRdBW + avgWrBW) / peakBW * 100; 1483 1484 totGap 1485 .name(name() + ".totGap") 1486 .desc("Total gap between requests"); 1487 1488 avgGap 1489 .name(name() + ".avgGap") 1490 .desc("Average gap between requests") 1491 .precision(2); 1492 1493 avgGap = totGap / (readReqs + writeReqs); 1494} 1495 1496void 1497SimpleDRAM::recvFunctional(PacketPtr pkt) 1498{ 1499 // rely on the abstract memory 1500 functionalAccess(pkt); 1501} 1502 1503BaseSlavePort& 1504SimpleDRAM::getSlavePort(const string &if_name, PortID idx) 1505{ 1506 if (if_name != "port") { 1507 return MemObject::getSlavePort(if_name, idx); 1508 } else { 1509 return port; 1510 } 1511} 1512 1513unsigned int 1514SimpleDRAM::drain(DrainManager *dm) 1515{ 1516 unsigned int count = port.drain(dm); 1517 1518 // if there is anything in any of our internal queues, keep track 1519 // of that as well 1520 if (!(writeQueue.empty() && readQueue.empty() && 1521 respQueue.empty())) { 1522 DPRINTF(Drain, "DRAM controller not drained, write: %d, read: %d," 1523 " resp: %d\n", writeQueue.size(), readQueue.size(), 1524 respQueue.size()); 1525 ++count; 1526 drainManager = dm; 1527 // the only part that is not drained automatically over time 1528 // is the write queue, thus trigger writes if there are any 1529 // waiting and no reads waiting, otherwise wait until the 1530 // reads are done 1531 if (readQueue.empty() && !writeQueue.empty() && 1532 !writeEvent.scheduled()) 1533 triggerWrites(); 1534 } 1535 1536 if (count) 1537 setDrainState(Drainable::Draining); 1538 else 1539 setDrainState(Drainable::Drained); 1540 return count; 1541} 1542 1543SimpleDRAM::MemoryPort::MemoryPort(const std::string& name, SimpleDRAM& _memory) 1544 : QueuedSlavePort(name, &_memory, queue), queue(_memory, *this), 1545 memory(_memory) 1546{ } 1547 1548AddrRangeList 1549SimpleDRAM::MemoryPort::getAddrRanges() const 1550{ 1551 AddrRangeList ranges; 1552 ranges.push_back(memory.getAddrRange()); 1553 return ranges; 1554} 1555 1556void 1557SimpleDRAM::MemoryPort::recvFunctional(PacketPtr pkt) 1558{ 1559 pkt->pushLabel(memory.name()); 1560 1561 if (!queue.checkFunctional(pkt)) { 1562 // Default implementation of SimpleTimingPort::recvFunctional() 1563 // calls recvAtomic() and throws away the latency; we can save a 1564 // little here by just not calculating the latency. 1565 memory.recvFunctional(pkt); 1566 } 1567 1568 pkt->popLabel(); 1569} 1570 1571Tick 1572SimpleDRAM::MemoryPort::recvAtomic(PacketPtr pkt) 1573{ 1574 return memory.recvAtomic(pkt); 1575} 1576 1577bool 1578SimpleDRAM::MemoryPort::recvTimingReq(PacketPtr pkt) 1579{ 1580 // pass it to the memory controller 1581 return memory.recvTimingReq(pkt); 1582} 1583 1584SimpleDRAM* 1585SimpleDRAMParams::create() 1586{ 1587 return new SimpleDRAM(this); 1588} 1589