dram_ctrl.cc revision 9967
1/* 2 * Copyright (c) 2010-2013 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2013 Amin Farmahini-Farahani 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Andreas Hansson 41 * Ani Udipi 42 * Neha Agarwal 43 */ 44 45#include "base/trace.hh" 46#include "base/bitfield.hh" 47#include "debug/Drain.hh" 48#include "debug/DRAM.hh" 49#include "mem/simple_dram.hh" 50#include "sim/system.hh" 51 52using namespace std; 53 54SimpleDRAM::SimpleDRAM(const SimpleDRAMParams* p) : 55 AbstractMemory(p), 56 port(name() + ".port", *this), 57 retryRdReq(false), retryWrReq(false), 58 rowHitFlag(false), stopReads(false), actTicks(p->activation_limit, 0), 59 writeEvent(this), respondEvent(this), 60 refreshEvent(this), nextReqEvent(this), drainManager(NULL), 61 deviceBusWidth(p->device_bus_width), burstLength(p->burst_length), 62 deviceRowBufferSize(p->device_rowbuffer_size), 63 devicesPerRank(p->devices_per_rank), 64 burstSize((devicesPerRank * burstLength * deviceBusWidth) / 8), 65 rowBufferSize(devicesPerRank * deviceRowBufferSize), 66 ranksPerChannel(p->ranks_per_channel), 67 banksPerRank(p->banks_per_rank), channels(p->channels), rowsPerBank(0), 68 readBufferSize(p->read_buffer_size), 69 writeBufferSize(p->write_buffer_size), 70 writeThresholdPerc(p->write_thresh_perc), 71 tWTR(p->tWTR), tBURST(p->tBURST), 72 tRCD(p->tRCD), tCL(p->tCL), tRP(p->tRP), tRAS(p->tRAS), 73 tRFC(p->tRFC), tREFI(p->tREFI), 74 tXAW(p->tXAW), activationLimit(p->activation_limit), 75 memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping), 76 pageMgmt(p->page_policy), 77 frontendLatency(p->static_frontend_latency), 78 backendLatency(p->static_backend_latency), 79 busBusyUntil(0), writeStartTime(0), 80 prevArrival(0), numReqs(0) 81{ 82 // create the bank states based on the dimensions of the ranks and 83 // banks 84 banks.resize(ranksPerChannel); 85 for (size_t c = 0; c < ranksPerChannel; ++c) { 86 banks[c].resize(banksPerRank); 87 } 88 89 // round the write threshold percent to a whole number of entries 90 // in the buffer 91 writeThreshold = writeBufferSize * writeThresholdPerc / 100.0; 92} 93 94void 95SimpleDRAM::init() 96{ 97 if (!port.isConnected()) { 98 fatal("SimpleDRAM %s is unconnected!\n", name()); 99 } else { 100 port.sendRangeChange(); 101 } 102 103 // we could deal with plenty options here, but for now do a quick 104 // sanity check 105 DPRINTF(DRAM, "Burst size %d bytes\n", burstSize); 106 107 // determine the rows per bank by looking at the total capacity 108 uint64_t capacity = ULL(1) << ceilLog2(AbstractMemory::size()); 109 110 DPRINTF(DRAM, "Memory capacity %lld (%lld) bytes\n", capacity, 111 AbstractMemory::size()); 112 113 columnsPerRowBuffer = rowBufferSize / burstSize; 114 115 DPRINTF(DRAM, "Row buffer size %d bytes with %d columns per row buffer\n", 116 rowBufferSize, columnsPerRowBuffer); 117 118 rowsPerBank = capacity / (rowBufferSize * banksPerRank * ranksPerChannel); 119 120 if (range.interleaved()) { 121 if (channels != range.stripes()) 122 panic("%s has %d interleaved address stripes but %d channel(s)\n", 123 name(), range.stripes(), channels); 124 125 if (addrMapping == Enums::RaBaChCo) { 126 if (rowBufferSize != range.granularity()) { 127 panic("Interleaving of %s doesn't match RaBaChCo address map\n", 128 name()); 129 } 130 } else if (addrMapping == Enums::RaBaCoCh) { 131 if (burstSize != range.granularity()) { 132 panic("Interleaving of %s doesn't match RaBaCoCh address map\n", 133 name()); 134 } 135 } else if (addrMapping == Enums::CoRaBaCh) { 136 if (burstSize != range.granularity()) 137 panic("Interleaving of %s doesn't match CoRaBaCh address map\n", 138 name()); 139 } 140 } 141} 142 143void 144SimpleDRAM::startup() 145{ 146 // print the configuration of the controller 147 printParams(); 148 149 // kick off the refresh 150 schedule(refreshEvent, curTick() + tREFI); 151} 152 153Tick 154SimpleDRAM::recvAtomic(PacketPtr pkt) 155{ 156 DPRINTF(DRAM, "recvAtomic: %s 0x%x\n", pkt->cmdString(), pkt->getAddr()); 157 158 // do the actual memory access and turn the packet into a response 159 access(pkt); 160 161 Tick latency = 0; 162 if (!pkt->memInhibitAsserted() && pkt->hasData()) { 163 // this value is not supposed to be accurate, just enough to 164 // keep things going, mimic a closed page 165 latency = tRP + tRCD + tCL; 166 } 167 return latency; 168} 169 170bool 171SimpleDRAM::readQueueFull(unsigned int neededEntries) const 172{ 173 DPRINTF(DRAM, "Read queue limit %d, current size %d, entries needed %d\n", 174 readBufferSize, readQueue.size() + respQueue.size(), 175 neededEntries); 176 177 return 178 (readQueue.size() + respQueue.size() + neededEntries) > readBufferSize; 179} 180 181bool 182SimpleDRAM::writeQueueFull(unsigned int neededEntries) const 183{ 184 DPRINTF(DRAM, "Write queue limit %d, current size %d, entries needed %d\n", 185 writeBufferSize, writeQueue.size(), neededEntries); 186 return (writeQueue.size() + neededEntries) > writeBufferSize; 187} 188 189SimpleDRAM::DRAMPacket* 190SimpleDRAM::decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned size, bool isRead) 191{ 192 // decode the address based on the address mapping scheme, with 193 // Ra, Co, Ba and Ch denoting rank, column, bank and channel, 194 // respectively 195 uint8_t rank; 196 uint8_t bank; 197 uint16_t row; 198 199 // truncate the address to the access granularity 200 Addr addr = dramPktAddr / burstSize; 201 202 // we have removed the lowest order address bits that denote the 203 // position within the column 204 if (addrMapping == Enums::RaBaChCo) { 205 // the lowest order bits denote the column to ensure that 206 // sequential cache lines occupy the same row 207 addr = addr / columnsPerRowBuffer; 208 209 // take out the channel part of the address 210 addr = addr / channels; 211 212 // after the channel bits, get the bank bits to interleave 213 // over the banks 214 bank = addr % banksPerRank; 215 addr = addr / banksPerRank; 216 217 // after the bank, we get the rank bits which thus interleaves 218 // over the ranks 219 rank = addr % ranksPerChannel; 220 addr = addr / ranksPerChannel; 221 222 // lastly, get the row bits 223 row = addr % rowsPerBank; 224 addr = addr / rowsPerBank; 225 } else if (addrMapping == Enums::RaBaCoCh) { 226 // take out the channel part of the address 227 addr = addr / channels; 228 229 // next, the column 230 addr = addr / columnsPerRowBuffer; 231 232 // after the column bits, we get the bank bits to interleave 233 // over the banks 234 bank = addr % banksPerRank; 235 addr = addr / banksPerRank; 236 237 // after the bank, we get the rank bits which thus interleaves 238 // over the ranks 239 rank = addr % ranksPerChannel; 240 addr = addr / ranksPerChannel; 241 242 // lastly, get the row bits 243 row = addr % rowsPerBank; 244 addr = addr / rowsPerBank; 245 } else if (addrMapping == Enums::CoRaBaCh) { 246 // optimise for closed page mode and utilise maximum 247 // parallelism of the DRAM (at the cost of power) 248 249 // take out the channel part of the address, not that this has 250 // to match with how accesses are interleaved between the 251 // controllers in the address mapping 252 addr = addr / channels; 253 254 // start with the bank bits, as this provides the maximum 255 // opportunity for parallelism between requests 256 bank = addr % banksPerRank; 257 addr = addr / banksPerRank; 258 259 // next get the rank bits 260 rank = addr % ranksPerChannel; 261 addr = addr / ranksPerChannel; 262 263 // next the column bits which we do not need to keep track of 264 // and simply skip past 265 addr = addr / columnsPerRowBuffer; 266 267 // lastly, get the row bits 268 row = addr % rowsPerBank; 269 addr = addr / rowsPerBank; 270 } else 271 panic("Unknown address mapping policy chosen!"); 272 273 assert(rank < ranksPerChannel); 274 assert(bank < banksPerRank); 275 assert(row < rowsPerBank); 276 277 DPRINTF(DRAM, "Address: %lld Rank %d Bank %d Row %d\n", 278 dramPktAddr, rank, bank, row); 279 280 // create the corresponding DRAM packet with the entry time and 281 // ready time set to the current tick, the latter will be updated 282 // later 283 uint16_t bank_id = banksPerRank * rank + bank; 284 return new DRAMPacket(pkt, isRead, rank, bank, row, bank_id, dramPktAddr, 285 size, banks[rank][bank]); 286} 287 288void 289SimpleDRAM::addToReadQueue(PacketPtr pkt, unsigned int pktCount) 290{ 291 // only add to the read queue here. whenever the request is 292 // eventually done, set the readyTime, and call schedule() 293 assert(!pkt->isWrite()); 294 295 assert(pktCount != 0); 296 297 // if the request size is larger than burst size, the pkt is split into 298 // multiple DRAM packets 299 // Note if the pkt starting address is not aligened to burst size, the 300 // address of first DRAM packet is kept unaliged. Subsequent DRAM packets 301 // are aligned to burst size boundaries. This is to ensure we accurately 302 // check read packets against packets in write queue. 303 Addr addr = pkt->getAddr(); 304 unsigned pktsServicedByWrQ = 0; 305 BurstHelper* burst_helper = NULL; 306 for (int cnt = 0; cnt < pktCount; ++cnt) { 307 unsigned size = std::min((addr | (burstSize - 1)) + 1, 308 pkt->getAddr() + pkt->getSize()) - addr; 309 readPktSize[ceilLog2(size)]++; 310 readBursts++; 311 312 // First check write buffer to see if the data is already at 313 // the controller 314 bool foundInWrQ = false; 315 for (auto i = writeQueue.begin(); i != writeQueue.end(); ++i) { 316 // check if the read is subsumed in the write entry we are 317 // looking at 318 if ((*i)->addr <= addr && 319 (addr + size) <= ((*i)->addr + (*i)->size)) { 320 foundInWrQ = true; 321 servicedByWrQ++; 322 pktsServicedByWrQ++; 323 DPRINTF(DRAM, "Read to addr %lld with size %d serviced by " 324 "write queue\n", addr, size); 325 bytesRead += burstSize; 326 bytesConsumedRd += size; 327 break; 328 } 329 } 330 331 // If not found in the write q, make a DRAM packet and 332 // push it onto the read queue 333 if (!foundInWrQ) { 334 335 // Make the burst helper for split packets 336 if (pktCount > 1 && burst_helper == NULL) { 337 DPRINTF(DRAM, "Read to addr %lld translates to %d " 338 "dram requests\n", pkt->getAddr(), pktCount); 339 burst_helper = new BurstHelper(pktCount); 340 } 341 342 DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, true); 343 dram_pkt->burstHelper = burst_helper; 344 345 assert(!readQueueFull(1)); 346 rdQLenPdf[readQueue.size() + respQueue.size()]++; 347 348 DPRINTF(DRAM, "Adding to read queue\n"); 349 350 readQueue.push_back(dram_pkt); 351 352 // Update stats 353 assert(dram_pkt->bankId < ranksPerChannel * banksPerRank); 354 perBankRdReqs[dram_pkt->bankId]++; 355 356 avgRdQLen = readQueue.size() + respQueue.size(); 357 } 358 359 // Starting address of next dram pkt (aligend to burstSize boundary) 360 addr = (addr | (burstSize - 1)) + 1; 361 } 362 363 // If all packets are serviced by write queue, we send the repsonse back 364 if (pktsServicedByWrQ == pktCount) { 365 accessAndRespond(pkt, frontendLatency); 366 return; 367 } 368 369 // Update how many split packets are serviced by write queue 370 if (burst_helper != NULL) 371 burst_helper->burstsServiced = pktsServicedByWrQ; 372 373 // If we are not already scheduled to get the read request out of 374 // the queue, do so now 375 if (!nextReqEvent.scheduled() && !stopReads) { 376 DPRINTF(DRAM, "Request scheduled immediately\n"); 377 schedule(nextReqEvent, curTick()); 378 } 379} 380 381void 382SimpleDRAM::processWriteEvent() 383{ 384 assert(!writeQueue.empty()); 385 uint32_t numWritesThisTime = 0; 386 387 DPRINTF(DRAM, "Beginning DRAM Writes\n"); 388 Tick temp1 M5_VAR_USED = std::max(curTick(), busBusyUntil); 389 Tick temp2 M5_VAR_USED = std::max(curTick(), maxBankFreeAt()); 390 391 // @todo: are there any dangers with the untimed while loop? 392 while (!writeQueue.empty()) { 393 if (numWritesThisTime >= writeThreshold) { 394 DPRINTF(DRAM, "Hit write threshold %d\n", writeThreshold); 395 break; 396 } 397 398 chooseNextWrite(); 399 DRAMPacket* dram_pkt = writeQueue.front(); 400 // sanity check 401 assert(dram_pkt->size <= burstSize); 402 doDRAMAccess(dram_pkt); 403 404 writeQueue.pop_front(); 405 delete dram_pkt; 406 numWritesThisTime++; 407 } 408 409 DPRINTF(DRAM, "Completed %d writes, bus busy for %lld ticks,"\ 410 "banks busy for %lld ticks\n", numWritesThisTime, 411 busBusyUntil - temp1, maxBankFreeAt() - temp2); 412 413 // Update stats 414 avgWrQLen = writeQueue.size(); 415 416 // turn the bus back around for reads again 417 busBusyUntil += tWTR; 418 stopReads = false; 419 420 if (retryWrReq) { 421 retryWrReq = false; 422 port.sendRetry(); 423 } 424 425 // if there is nothing left in any queue, signal a drain 426 if (writeQueue.empty() && readQueue.empty() && 427 respQueue.empty () && drainManager) { 428 drainManager->signalDrainDone(); 429 drainManager = NULL; 430 } 431 432 // Once you're done emptying the write queue, check if there's 433 // anything in the read queue, and call schedule if required. The 434 // retry above could already have caused it to be scheduled, so 435 // first check 436 if (!nextReqEvent.scheduled()) 437 schedule(nextReqEvent, busBusyUntil); 438} 439 440 441void 442SimpleDRAM::triggerWrites() 443{ 444 DPRINTF(DRAM, "Writes triggered at %lld\n", curTick()); 445 // Flag variable to stop any more read scheduling 446 stopReads = true; 447 448 writeStartTime = std::max(busBusyUntil, curTick()) + tWTR; 449 450 DPRINTF(DRAM, "Writes scheduled at %lld\n", writeStartTime); 451 452 assert(writeStartTime >= curTick()); 453 assert(!writeEvent.scheduled()); 454 schedule(writeEvent, writeStartTime); 455} 456 457void 458SimpleDRAM::addToWriteQueue(PacketPtr pkt, unsigned int pktCount) 459{ 460 // only add to the write queue here. whenever the request is 461 // eventually done, set the readyTime, and call schedule() 462 assert(pkt->isWrite()); 463 464 // if the request size is larger than burst size, the pkt is split into 465 // multiple DRAM packets 466 Addr addr = pkt->getAddr(); 467 for (int cnt = 0; cnt < pktCount; ++cnt) { 468 unsigned size = std::min((addr | (burstSize - 1)) + 1, 469 pkt->getAddr() + pkt->getSize()) - addr; 470 writePktSize[ceilLog2(size)]++; 471 writeBursts++; 472 473 // see if we can merge with an existing item in the write 474 // queue and keep track of whether we have merged or not so we 475 // can stop at that point and also avoid enqueueing a new 476 // request 477 bool merged = false; 478 auto w = writeQueue.begin(); 479 480 while(!merged && w != writeQueue.end()) { 481 // either of the two could be first, if they are the same 482 // it does not matter which way we go 483 if ((*w)->addr >= addr) { 484 // the existing one starts after the new one, figure 485 // out where the new one ends with respect to the 486 // existing one 487 if ((addr + size) >= ((*w)->addr + (*w)->size)) { 488 // check if the existing one is completely 489 // subsumed in the new one 490 DPRINTF(DRAM, "Merging write covering existing burst\n"); 491 merged = true; 492 // update both the address and the size 493 (*w)->addr = addr; 494 (*w)->size = size; 495 } else if ((addr + size) >= (*w)->addr && 496 ((*w)->addr + (*w)->size - addr) <= burstSize) { 497 // the new one is just before or partially 498 // overlapping with the existing one, and together 499 // they fit within a burst 500 DPRINTF(DRAM, "Merging write before existing burst\n"); 501 merged = true; 502 // the existing queue item needs to be adjusted with 503 // respect to both address and size 504 (*w)->addr = addr; 505 (*w)->size = (*w)->addr + (*w)->size - addr; 506 } 507 } else { 508 // the new one starts after the current one, figure 509 // out where the existing one ends with respect to the 510 // new one 511 if (((*w)->addr + (*w)->size) >= (addr + size)) { 512 // check if the new one is completely subsumed in the 513 // existing one 514 DPRINTF(DRAM, "Merging write into existing burst\n"); 515 merged = true; 516 // no adjustments necessary 517 } else if (((*w)->addr + (*w)->size) >= addr && 518 (addr + size - (*w)->addr) <= burstSize) { 519 // the existing one is just before or partially 520 // overlapping with the new one, and together 521 // they fit within a burst 522 DPRINTF(DRAM, "Merging write after existing burst\n"); 523 merged = true; 524 // the address is right, and only the size has 525 // to be adjusted 526 (*w)->size = addr + size - (*w)->addr; 527 } 528 } 529 ++w; 530 } 531 532 // if the item was not merged we need to create a new write 533 // and enqueue it 534 if (!merged) { 535 DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, false); 536 537 assert(writeQueue.size() < writeBufferSize); 538 wrQLenPdf[writeQueue.size()]++; 539 540 DPRINTF(DRAM, "Adding to write queue\n"); 541 542 writeQueue.push_back(dram_pkt); 543 544 // Update stats 545 assert(dram_pkt->bankId < ranksPerChannel * banksPerRank); 546 perBankWrReqs[dram_pkt->bankId]++; 547 548 avgWrQLen = writeQueue.size(); 549 } 550 551 bytesConsumedWr += size; 552 bytesWritten += burstSize; 553 554 // Starting address of next dram pkt (aligend to burstSize boundary) 555 addr = (addr | (burstSize - 1)) + 1; 556 } 557 558 // we do not wait for the writes to be send to the actual memory, 559 // but instead take responsibility for the consistency here and 560 // snoop the write queue for any upcoming reads 561 // @todo, if a pkt size is larger than burst size, we might need a 562 // different front end latency 563 accessAndRespond(pkt, frontendLatency); 564 565 // If your write buffer is starting to fill up, drain it! 566 if (writeQueue.size() > writeThreshold && !stopReads){ 567 triggerWrites(); 568 } 569} 570 571void 572SimpleDRAM::printParams() const 573{ 574 // Sanity check print of important parameters 575 DPRINTF(DRAM, 576 "Memory controller %s physical organization\n" \ 577 "Number of devices per rank %d\n" \ 578 "Device bus width (in bits) %d\n" \ 579 "DRAM data bus burst %d\n" \ 580 "Row buffer size %d\n" \ 581 "Columns per row buffer %d\n" \ 582 "Rows per bank %d\n" \ 583 "Banks per rank %d\n" \ 584 "Ranks per channel %d\n" \ 585 "Total mem capacity %u\n", 586 name(), devicesPerRank, deviceBusWidth, burstSize, rowBufferSize, 587 columnsPerRowBuffer, rowsPerBank, banksPerRank, ranksPerChannel, 588 rowBufferSize * rowsPerBank * banksPerRank * ranksPerChannel); 589 590 string scheduler = memSchedPolicy == Enums::fcfs ? "FCFS" : "FR-FCFS"; 591 string address_mapping = addrMapping == Enums::RaBaChCo ? "RaBaChCo" : 592 (addrMapping == Enums::RaBaCoCh ? "RaBaCoCh" : "CoRaBaCh"); 593 string page_policy = pageMgmt == Enums::open ? "OPEN" : "CLOSE"; 594 595 DPRINTF(DRAM, 596 "Memory controller %s characteristics\n" \ 597 "Read buffer size %d\n" \ 598 "Write buffer size %d\n" \ 599 "Write buffer thresh %d\n" \ 600 "Scheduler %s\n" \ 601 "Address mapping %s\n" \ 602 "Page policy %s\n", 603 name(), readBufferSize, writeBufferSize, writeThreshold, 604 scheduler, address_mapping, page_policy); 605 606 DPRINTF(DRAM, "Memory controller %s timing specs\n" \ 607 "tRCD %d ticks\n" \ 608 "tCL %d ticks\n" \ 609 "tRP %d ticks\n" \ 610 "tBURST %d ticks\n" \ 611 "tRFC %d ticks\n" \ 612 "tREFI %d ticks\n" \ 613 "tWTR %d ticks\n" \ 614 "tXAW (%d) %d ticks\n", 615 name(), tRCD, tCL, tRP, tBURST, tRFC, tREFI, tWTR, 616 activationLimit, tXAW); 617} 618 619void 620SimpleDRAM::printQs() const { 621 DPRINTF(DRAM, "===READ QUEUE===\n\n"); 622 for (auto i = readQueue.begin() ; i != readQueue.end() ; ++i) { 623 DPRINTF(DRAM, "Read %lu\n", (*i)->addr); 624 } 625 DPRINTF(DRAM, "\n===RESP QUEUE===\n\n"); 626 for (auto i = respQueue.begin() ; i != respQueue.end() ; ++i) { 627 DPRINTF(DRAM, "Response %lu\n", (*i)->addr); 628 } 629 DPRINTF(DRAM, "\n===WRITE QUEUE===\n\n"); 630 for (auto i = writeQueue.begin() ; i != writeQueue.end() ; ++i) { 631 DPRINTF(DRAM, "Write %lu\n", (*i)->addr); 632 } 633} 634 635bool 636SimpleDRAM::recvTimingReq(PacketPtr pkt) 637{ 638 /// @todo temporary hack to deal with memory corruption issues until 639 /// 4-phase transactions are complete 640 for (int x = 0; x < pendingDelete.size(); x++) 641 delete pendingDelete[x]; 642 pendingDelete.clear(); 643 644 // This is where we enter from the outside world 645 DPRINTF(DRAM, "recvTimingReq: request %s addr %lld size %d\n", 646 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 647 648 // simply drop inhibited packets for now 649 if (pkt->memInhibitAsserted()) { 650 DPRINTF(DRAM,"Inhibited packet -- Dropping it now\n"); 651 pendingDelete.push_back(pkt); 652 return true; 653 } 654 655 // Every million accesses, print the state of the queues 656 if (numReqs % 1000000 == 0) 657 printQs(); 658 659 // Calc avg gap between requests 660 if (prevArrival != 0) { 661 totGap += curTick() - prevArrival; 662 } 663 prevArrival = curTick(); 664 665 666 // Find out how many dram packets a pkt translates to 667 // If the burst size is equal or larger than the pkt size, then a pkt 668 // translates to only one dram packet. Otherwise, a pkt translates to 669 // multiple dram packets 670 unsigned size = pkt->getSize(); 671 unsigned offset = pkt->getAddr() & (burstSize - 1); 672 unsigned int dram_pkt_count = divCeil(offset + size, burstSize); 673 674 // check local buffers and do not accept if full 675 if (pkt->isRead()) { 676 assert(size != 0); 677 if (readQueueFull(dram_pkt_count)) { 678 DPRINTF(DRAM, "Read queue full, not accepting\n"); 679 // remember that we have to retry this port 680 retryRdReq = true; 681 numRdRetry++; 682 return false; 683 } else { 684 addToReadQueue(pkt, dram_pkt_count); 685 readReqs++; 686 numReqs++; 687 } 688 } else if (pkt->isWrite()) { 689 assert(size != 0); 690 if (writeQueueFull(dram_pkt_count)) { 691 DPRINTF(DRAM, "Write queue full, not accepting\n"); 692 // remember that we have to retry this port 693 retryWrReq = true; 694 numWrRetry++; 695 return false; 696 } else { 697 addToWriteQueue(pkt, dram_pkt_count); 698 writeReqs++; 699 numReqs++; 700 } 701 } else { 702 DPRINTF(DRAM,"Neither read nor write, ignore timing\n"); 703 neitherReadNorWrite++; 704 accessAndRespond(pkt, 1); 705 } 706 707 retryRdReq = false; 708 retryWrReq = false; 709 return true; 710} 711 712void 713SimpleDRAM::processRespondEvent() 714{ 715 DPRINTF(DRAM, 716 "processRespondEvent(): Some req has reached its readyTime\n"); 717 718 DRAMPacket* dram_pkt = respQueue.front(); 719 720 // Actually responds to the requestor 721 bytesConsumedRd += dram_pkt->size; 722 bytesRead += burstSize; 723 if (dram_pkt->burstHelper) { 724 // it is a split packet 725 dram_pkt->burstHelper->burstsServiced++; 726 if (dram_pkt->burstHelper->burstsServiced == 727 dram_pkt->burstHelper->burstCount) { 728 // we have now serviced all children packets of a system packet 729 // so we can now respond to the requester 730 // @todo we probably want to have a different front end and back 731 // end latency for split packets 732 accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency); 733 delete dram_pkt->burstHelper; 734 dram_pkt->burstHelper = NULL; 735 } 736 } else { 737 // it is not a split packet 738 accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency); 739 } 740 741 delete respQueue.front(); 742 respQueue.pop_front(); 743 744 // Update stats 745 avgRdQLen = readQueue.size() + respQueue.size(); 746 747 if (!respQueue.empty()) { 748 assert(respQueue.front()->readyTime >= curTick()); 749 assert(!respondEvent.scheduled()); 750 schedule(respondEvent, respQueue.front()->readyTime); 751 } else { 752 // if there is nothing left in any queue, signal a drain 753 if (writeQueue.empty() && readQueue.empty() && 754 drainManager) { 755 drainManager->signalDrainDone(); 756 drainManager = NULL; 757 } 758 } 759 760 // We have made a location in the queue available at this point, 761 // so if there is a read that was forced to wait, retry now 762 if (retryRdReq) { 763 retryRdReq = false; 764 port.sendRetry(); 765 } 766} 767 768void 769SimpleDRAM::chooseNextWrite() 770{ 771 // This method does the arbitration between write requests. The 772 // chosen packet is simply moved to the head of the write 773 // queue. The other methods know that this is the place to 774 // look. For example, with FCFS, this method does nothing 775 assert(!writeQueue.empty()); 776 777 if (writeQueue.size() == 1) { 778 DPRINTF(DRAM, "Single write request, nothing to do\n"); 779 return; 780 } 781 782 if (memSchedPolicy == Enums::fcfs) { 783 // Do nothing, since the correct request is already head 784 } else if (memSchedPolicy == Enums::frfcfs) { 785 // Only determine bank availability when needed 786 uint64_t earliest_banks = 0; 787 788 auto i = writeQueue.begin(); 789 bool foundRowHit = false; 790 while (!foundRowHit && i != writeQueue.end()) { 791 DRAMPacket* dram_pkt = *i; 792 const Bank& bank = dram_pkt->bankRef; 793 if (bank.openRow == dram_pkt->row) { 794 DPRINTF(DRAM, "Write row buffer hit\n"); 795 writeQueue.erase(i); 796 writeQueue.push_front(dram_pkt); 797 foundRowHit = true; 798 } else { 799 // No row hit, go for first ready 800 if (earliest_banks == 0) 801 earliest_banks = minBankFreeAt(writeQueue); 802 803 // Bank is ready or is one of the first available bank 804 if (bank.freeAt <= curTick() || 805 bits(earliest_banks, dram_pkt->bankId, dram_pkt->bankId)) { 806 writeQueue.erase(i); 807 writeQueue.push_front(dram_pkt); 808 break; 809 } 810 } 811 ++i; 812 } 813 } else 814 panic("No scheduling policy chosen\n"); 815 816 DPRINTF(DRAM, "Selected next write request\n"); 817} 818 819bool 820SimpleDRAM::chooseNextRead() 821{ 822 // This method does the arbitration between read requests. The 823 // chosen packet is simply moved to the head of the queue. The 824 // other methods know that this is the place to look. For example, 825 // with FCFS, this method does nothing 826 if (readQueue.empty()) { 827 DPRINTF(DRAM, "No read request to select\n"); 828 return false; 829 } 830 831 // If there is only one request then there is nothing left to do 832 if (readQueue.size() == 1) 833 return true; 834 835 if (memSchedPolicy == Enums::fcfs) { 836 // Do nothing, since the request to serve is already the first 837 // one in the read queue 838 } else if (memSchedPolicy == Enums::frfcfs) { 839 // Only determine this when needed 840 uint64_t earliest_banks = 0; 841 842 for (auto i = readQueue.begin(); i != readQueue.end() ; ++i) { 843 DRAMPacket* dram_pkt = *i; 844 const Bank& bank = dram_pkt->bankRef; 845 // Check if it is a row hit 846 if (bank.openRow == dram_pkt->row) { 847 DPRINTF(DRAM, "Row buffer hit\n"); 848 readQueue.erase(i); 849 readQueue.push_front(dram_pkt); 850 break; 851 } else { 852 // No row hit, go for first ready 853 if (earliest_banks == 0) 854 earliest_banks = minBankFreeAt(readQueue); 855 856 // Bank is ready or is the first available bank 857 if (bank.freeAt <= curTick() || 858 bits(earliest_banks, dram_pkt->bankId, dram_pkt->bankId)) { 859 readQueue.erase(i); 860 readQueue.push_front(dram_pkt); 861 break; 862 } 863 } 864 } 865 } else 866 panic("No scheduling policy chosen!\n"); 867 868 DPRINTF(DRAM, "Selected next read request\n"); 869 return true; 870} 871 872void 873SimpleDRAM::accessAndRespond(PacketPtr pkt, Tick static_latency) 874{ 875 DPRINTF(DRAM, "Responding to Address %lld.. ",pkt->getAddr()); 876 877 bool needsResponse = pkt->needsResponse(); 878 // do the actual memory access which also turns the packet into a 879 // response 880 access(pkt); 881 882 // turn packet around to go back to requester if response expected 883 if (needsResponse) { 884 // access already turned the packet into a response 885 assert(pkt->isResponse()); 886 887 // @todo someone should pay for this 888 pkt->busFirstWordDelay = pkt->busLastWordDelay = 0; 889 890 // queue the packet in the response queue to be sent out after 891 // the static latency has passed 892 port.schedTimingResp(pkt, curTick() + static_latency); 893 } else { 894 // @todo the packet is going to be deleted, and the DRAMPacket 895 // is still having a pointer to it 896 pendingDelete.push_back(pkt); 897 } 898 899 DPRINTF(DRAM, "Done\n"); 900 901 return; 902} 903 904pair<Tick, Tick> 905SimpleDRAM::estimateLatency(DRAMPacket* dram_pkt, Tick inTime) 906{ 907 // If a request reaches a bank at tick 'inTime', how much time 908 // *after* that does it take to finish the request, depending 909 // on bank status and page open policy. Note that this method 910 // considers only the time taken for the actual read or write 911 // to complete, NOT any additional time thereafter for tRAS or 912 // tRP. 913 Tick accLat = 0; 914 Tick bankLat = 0; 915 rowHitFlag = false; 916 917 const Bank& bank = dram_pkt->bankRef; 918 if (pageMgmt == Enums::open) { // open-page policy 919 if (bank.openRow == dram_pkt->row) { 920 // When we have a row-buffer hit, 921 // we don't care about tRAS having expired or not, 922 // but do care about bank being free for access 923 rowHitFlag = true; 924 925 // When a series of requests arrive to the same row, 926 // DDR systems are capable of streaming data continuously 927 // at maximum bandwidth (subject to tCCD). Here, we approximate 928 // this condition, and assume that if whenever a bank is already 929 // busy and a new request comes in, it can be completed with no 930 // penalty beyond waiting for the existing read to complete. 931 if (bank.freeAt > inTime) { 932 accLat += bank.freeAt - inTime; 933 bankLat += 0; 934 } else { 935 // CAS latency only 936 accLat += tCL; 937 bankLat += tCL; 938 } 939 940 } else { 941 // Row-buffer miss, need to close existing row 942 // once tRAS has expired, then open the new one, 943 // then add cas latency. 944 Tick freeTime = std::max(bank.tRASDoneAt, bank.freeAt); 945 946 if (freeTime > inTime) 947 accLat += freeTime - inTime; 948 949 accLat += tRP + tRCD + tCL; 950 bankLat += tRP + tRCD + tCL; 951 } 952 } else if (pageMgmt == Enums::close) { 953 // With a close page policy, no notion of 954 // bank.tRASDoneAt 955 if (bank.freeAt > inTime) 956 accLat += bank.freeAt - inTime; 957 958 // page already closed, simply open the row, and 959 // add cas latency 960 accLat += tRCD + tCL; 961 bankLat += tRCD + tCL; 962 } else 963 panic("No page management policy chosen\n"); 964 965 DPRINTF(DRAM, "Returning < %lld, %lld > from estimateLatency()\n", 966 bankLat, accLat); 967 968 return make_pair(bankLat, accLat); 969} 970 971void 972SimpleDRAM::processNextReqEvent() 973{ 974 scheduleNextReq(); 975} 976 977void 978SimpleDRAM::recordActivate(Tick act_tick) 979{ 980 assert(actTicks.size() == activationLimit); 981 982 DPRINTF(DRAM, "Activate at tick %d\n", act_tick); 983 984 // if the activation limit is disabled then we are done 985 if (actTicks.empty()) 986 return; 987 988 // sanity check 989 if (actTicks.back() && (act_tick - actTicks.back()) < tXAW) { 990 // @todo For now, stick with a warning 991 warn("Got %d activates in window %d (%d - %d) which is smaller " 992 "than %d\n", activationLimit, act_tick - actTicks.back(), 993 act_tick, actTicks.back(), tXAW); 994 } 995 996 // shift the times used for the book keeping, the last element 997 // (highest index) is the oldest one and hence the lowest value 998 actTicks.pop_back(); 999 1000 // record an new activation (in the future) 1001 actTicks.push_front(act_tick); 1002 1003 // cannot activate more than X times in time window tXAW, push the 1004 // next one (the X + 1'st activate) to be tXAW away from the 1005 // oldest in our window of X 1006 if (actTicks.back() && (act_tick - actTicks.back()) < tXAW) { 1007 DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate no earlier " 1008 "than %d\n", activationLimit, actTicks.back() + tXAW); 1009 for(int i = 0; i < ranksPerChannel; i++) 1010 for(int j = 0; j < banksPerRank; j++) 1011 // next activate must not happen before end of window 1012 banks[i][j].freeAt = std::max(banks[i][j].freeAt, 1013 actTicks.back() + tXAW); 1014 } 1015} 1016 1017void 1018SimpleDRAM::doDRAMAccess(DRAMPacket* dram_pkt) 1019{ 1020 1021 DPRINTF(DRAM, "Timing access to addr %lld, rank/bank/row %d %d %d\n", 1022 dram_pkt->addr, dram_pkt->rank, dram_pkt->bank, dram_pkt->row); 1023 1024 // estimate the bank and access latency 1025 pair<Tick, Tick> lat = estimateLatency(dram_pkt, curTick()); 1026 Tick bankLat = lat.first; 1027 Tick accessLat = lat.second; 1028 Tick actTick; 1029 1030 // This request was woken up at this time based on a prior call 1031 // to estimateLatency(). However, between then and now, both the 1032 // accessLatency and/or busBusyUntil may have changed. We need 1033 // to correct for that. 1034 1035 Tick addDelay = (curTick() + accessLat < busBusyUntil) ? 1036 busBusyUntil - (curTick() + accessLat) : 0; 1037 1038 Bank& bank = dram_pkt->bankRef; 1039 1040 // Update bank state 1041 if (pageMgmt == Enums::open) { 1042 bank.openRow = dram_pkt->row; 1043 bank.freeAt = curTick() + addDelay + accessLat; 1044 bank.bytesAccessed += burstSize; 1045 1046 // If you activated a new row do to this access, the next access 1047 // will have to respect tRAS for this bank. 1048 if (!rowHitFlag) { 1049 // any waiting for banks account for in freeAt 1050 actTick = bank.freeAt - tCL - tRCD; 1051 bank.tRASDoneAt = actTick + tRAS; 1052 recordActivate(actTick); 1053 1054 // sample the number of bytes accessed and reset it as 1055 // we are now closing this row 1056 bytesPerActivate.sample(bank.bytesAccessed); 1057 bank.bytesAccessed = 0; 1058 } 1059 } else if (pageMgmt == Enums::close) { 1060 actTick = curTick() + addDelay + accessLat - tRCD - tCL; 1061 recordActivate(actTick); 1062 1063 // If the DRAM has a very quick tRAS, bank can be made free 1064 // after consecutive tCL,tRCD,tRP times. In general, however, 1065 // an additional wait is required to respect tRAS. 1066 bank.freeAt = std::max(actTick + tRAS + tRP, 1067 actTick + tRCD + tCL + tRP); 1068 1069 DPRINTF(DRAM,"doDRAMAccess::bank.freeAt is %lld\n",bank.freeAt); 1070 bytesPerActivate.sample(burstSize); 1071 } else 1072 panic("No page management policy chosen\n"); 1073 1074 // Update request parameters 1075 dram_pkt->readyTime = curTick() + addDelay + accessLat + tBURST; 1076 1077 1078 DPRINTF(DRAM, "Req %lld: curtick is %lld accessLat is %d " \ 1079 "readytime is %lld busbusyuntil is %lld. " \ 1080 "Scheduling at readyTime\n", dram_pkt->addr, 1081 curTick(), accessLat, dram_pkt->readyTime, busBusyUntil); 1082 1083 // Make sure requests are not overlapping on the databus 1084 assert (dram_pkt->readyTime - busBusyUntil >= tBURST); 1085 1086 // Update bus state 1087 busBusyUntil = dram_pkt->readyTime; 1088 1089 DPRINTF(DRAM,"Access time is %lld\n", 1090 dram_pkt->readyTime - dram_pkt->entryTime); 1091 1092 if (rowHitFlag) { 1093 if(dram_pkt->isRead) 1094 readRowHits++; 1095 else 1096 writeRowHits++; 1097 } 1098 1099 // At this point, commonality between reads and writes ends. 1100 // For writes, we are done since we long ago responded to the 1101 // requestor. We also don't care about stats for writes. For 1102 // reads, we still need to figure out respoding to the requestor, 1103 // and capture stats. 1104 1105 if (!dram_pkt->isRead) { 1106 return; 1107 } 1108 1109 // Update stats 1110 totMemAccLat += dram_pkt->readyTime - dram_pkt->entryTime; 1111 totBankLat += bankLat; 1112 totBusLat += tBURST; 1113 totQLat += dram_pkt->readyTime - dram_pkt->entryTime - bankLat - tBURST; 1114 1115 1116 // At this point we're done dealing with the request 1117 // It will be moved to a separate response queue with a 1118 // correct readyTime, and eventually be sent back at that 1119 //time 1120 moveToRespQ(); 1121 1122 // The absolute soonest you have to start thinking about the 1123 // next request is the longest access time that can occur before 1124 // busBusyUntil. Assuming you need to precharge, 1125 // open a new row, and access, it is tRP + tRCD + tCL 1126 1127 Tick newTime = (busBusyUntil > tRP + tRCD + tCL ) ? 1128 std::max(busBusyUntil - (tRP + tRCD + tCL) , curTick()) : 1129 curTick(); 1130 1131 if (!nextReqEvent.scheduled() && !stopReads){ 1132 schedule(nextReqEvent, newTime); 1133 } else { 1134 if (newTime < nextReqEvent.when()) 1135 reschedule(nextReqEvent, newTime); 1136 } 1137 1138 1139} 1140 1141void 1142SimpleDRAM::moveToRespQ() 1143{ 1144 // Remove from read queue 1145 DRAMPacket* dram_pkt = readQueue.front(); 1146 readQueue.pop_front(); 1147 1148 // sanity check 1149 assert(dram_pkt->size <= burstSize); 1150 1151 // Insert into response queue sorted by readyTime 1152 // It will be sent back to the requestor at its 1153 // readyTime 1154 if (respQueue.empty()) { 1155 respQueue.push_front(dram_pkt); 1156 assert(!respondEvent.scheduled()); 1157 assert(dram_pkt->readyTime >= curTick()); 1158 schedule(respondEvent, dram_pkt->readyTime); 1159 } else { 1160 bool done = false; 1161 auto i = respQueue.begin(); 1162 while (!done && i != respQueue.end()) { 1163 if ((*i)->readyTime > dram_pkt->readyTime) { 1164 respQueue.insert(i, dram_pkt); 1165 done = true; 1166 } 1167 ++i; 1168 } 1169 1170 if (!done) 1171 respQueue.push_back(dram_pkt); 1172 1173 assert(respondEvent.scheduled()); 1174 1175 if (respQueue.front()->readyTime < respondEvent.when()) { 1176 assert(respQueue.front()->readyTime >= curTick()); 1177 reschedule(respondEvent, respQueue.front()->readyTime); 1178 } 1179 } 1180} 1181 1182void 1183SimpleDRAM::scheduleNextReq() 1184{ 1185 DPRINTF(DRAM, "Reached scheduleNextReq()\n"); 1186 1187 // Figure out which read request goes next, and move it to the 1188 // front of the read queue 1189 if (!chooseNextRead()) { 1190 // In the case there is no read request to go next, see if we 1191 // are asked to drain, and if so trigger writes, this also 1192 // ensures that if we hit the write limit we will do this 1193 // multiple times until we are completely drained 1194 if (drainManager && !writeQueue.empty() && !writeEvent.scheduled()) 1195 triggerWrites(); 1196 } else { 1197 doDRAMAccess(readQueue.front()); 1198 } 1199} 1200 1201Tick 1202SimpleDRAM::maxBankFreeAt() const 1203{ 1204 Tick banksFree = 0; 1205 1206 for(int i = 0; i < ranksPerChannel; i++) 1207 for(int j = 0; j < banksPerRank; j++) 1208 banksFree = std::max(banks[i][j].freeAt, banksFree); 1209 1210 return banksFree; 1211} 1212 1213uint64_t 1214SimpleDRAM::minBankFreeAt(const deque<DRAMPacket*>& queue) const 1215{ 1216 uint64_t bank_mask = 0; 1217 Tick freeAt = MaxTick; 1218 1219 // detemrine if we have queued transactions targetting the 1220 // bank in question 1221 vector<bool> got_waiting(ranksPerChannel * banksPerRank, false); 1222 for (auto p = queue.begin(); p != queue.end(); ++p) { 1223 got_waiting[(*p)->bankId] = true; 1224 } 1225 1226 for (int i = 0; i < ranksPerChannel; i++) { 1227 for (int j = 0; j < banksPerRank; j++) { 1228 // if we have waiting requests for the bank, and it is 1229 // amongst the first available, update the mask 1230 if (got_waiting[i * banksPerRank + j] && 1231 banks[i][j].freeAt <= freeAt) { 1232 // reset bank mask if new minimum is found 1233 if (banks[i][j].freeAt < freeAt) 1234 bank_mask = 0; 1235 // set the bit corresponding to the available bank 1236 uint8_t bit_index = i * ranksPerChannel + j; 1237 replaceBits(bank_mask, bit_index, bit_index, 1); 1238 freeAt = banks[i][j].freeAt; 1239 } 1240 } 1241 } 1242 return bank_mask; 1243} 1244 1245void 1246SimpleDRAM::processRefreshEvent() 1247{ 1248 DPRINTF(DRAM, "Refreshing at tick %ld\n", curTick()); 1249 1250 Tick banksFree = std::max(curTick(), maxBankFreeAt()) + tRFC; 1251 1252 for(int i = 0; i < ranksPerChannel; i++) 1253 for(int j = 0; j < banksPerRank; j++) 1254 banks[i][j].freeAt = banksFree; 1255 1256 schedule(refreshEvent, curTick() + tREFI); 1257} 1258 1259void 1260SimpleDRAM::regStats() 1261{ 1262 using namespace Stats; 1263 1264 AbstractMemory::regStats(); 1265 1266 readReqs 1267 .name(name() + ".readReqs") 1268 .desc("Total number of read requests accepted by DRAM controller"); 1269 1270 writeReqs 1271 .name(name() + ".writeReqs") 1272 .desc("Total number of write requests accepted by DRAM controller"); 1273 1274 readBursts 1275 .name(name() + ".readBursts") 1276 .desc("Total number of DRAM read bursts. " 1277 "Each DRAM read request translates to either one or multiple " 1278 "DRAM read bursts"); 1279 1280 writeBursts 1281 .name(name() + ".writeBursts") 1282 .desc("Total number of DRAM write bursts. " 1283 "Each DRAM write request translates to either one or multiple " 1284 "DRAM write bursts"); 1285 1286 servicedByWrQ 1287 .name(name() + ".servicedByWrQ") 1288 .desc("Number of DRAM read bursts serviced by write Q"); 1289 1290 neitherReadNorWrite 1291 .name(name() + ".neitherReadNorWrite") 1292 .desc("Reqs where no action is needed"); 1293 1294 perBankRdReqs 1295 .init(banksPerRank * ranksPerChannel) 1296 .name(name() + ".perBankRdReqs") 1297 .desc("Track reads on a per bank basis"); 1298 1299 perBankWrReqs 1300 .init(banksPerRank * ranksPerChannel) 1301 .name(name() + ".perBankWrReqs") 1302 .desc("Track writes on a per bank basis"); 1303 1304 avgRdQLen 1305 .name(name() + ".avgRdQLen") 1306 .desc("Average read queue length over time") 1307 .precision(2); 1308 1309 avgWrQLen 1310 .name(name() + ".avgWrQLen") 1311 .desc("Average write queue length over time") 1312 .precision(2); 1313 1314 totQLat 1315 .name(name() + ".totQLat") 1316 .desc("Total cycles spent in queuing delays"); 1317 1318 totBankLat 1319 .name(name() + ".totBankLat") 1320 .desc("Total cycles spent in bank access"); 1321 1322 totBusLat 1323 .name(name() + ".totBusLat") 1324 .desc("Total cycles spent in databus access"); 1325 1326 totMemAccLat 1327 .name(name() + ".totMemAccLat") 1328 .desc("Sum of mem lat for all requests"); 1329 1330 avgQLat 1331 .name(name() + ".avgQLat") 1332 .desc("Average queueing delay per request") 1333 .precision(2); 1334 1335 avgQLat = totQLat / (readBursts - servicedByWrQ); 1336 1337 avgBankLat 1338 .name(name() + ".avgBankLat") 1339 .desc("Average bank access latency per request") 1340 .precision(2); 1341 1342 avgBankLat = totBankLat / (readBursts - servicedByWrQ); 1343 1344 avgBusLat 1345 .name(name() + ".avgBusLat") 1346 .desc("Average bus latency per request") 1347 .precision(2); 1348 1349 avgBusLat = totBusLat / (readBursts - servicedByWrQ); 1350 1351 avgMemAccLat 1352 .name(name() + ".avgMemAccLat") 1353 .desc("Average memory access latency") 1354 .precision(2); 1355 1356 avgMemAccLat = totMemAccLat / (readBursts - servicedByWrQ); 1357 1358 numRdRetry 1359 .name(name() + ".numRdRetry") 1360 .desc("Number of times rd buffer was full causing retry"); 1361 1362 numWrRetry 1363 .name(name() + ".numWrRetry") 1364 .desc("Number of times wr buffer was full causing retry"); 1365 1366 readRowHits 1367 .name(name() + ".readRowHits") 1368 .desc("Number of row buffer hits during reads"); 1369 1370 writeRowHits 1371 .name(name() + ".writeRowHits") 1372 .desc("Number of row buffer hits during writes"); 1373 1374 readRowHitRate 1375 .name(name() + ".readRowHitRate") 1376 .desc("Row buffer hit rate for reads") 1377 .precision(2); 1378 1379 readRowHitRate = (readRowHits / (readBursts - servicedByWrQ)) * 100; 1380 1381 writeRowHitRate 1382 .name(name() + ".writeRowHitRate") 1383 .desc("Row buffer hit rate for writes") 1384 .precision(2); 1385 1386 writeRowHitRate = (writeRowHits / writeBursts) * 100; 1387 1388 readPktSize 1389 .init(ceilLog2(burstSize) + 1) 1390 .name(name() + ".readPktSize") 1391 .desc("Categorize read packet sizes"); 1392 1393 writePktSize 1394 .init(ceilLog2(burstSize) + 1) 1395 .name(name() + ".writePktSize") 1396 .desc("Categorize write packet sizes"); 1397 1398 rdQLenPdf 1399 .init(readBufferSize) 1400 .name(name() + ".rdQLenPdf") 1401 .desc("What read queue length does an incoming req see"); 1402 1403 wrQLenPdf 1404 .init(writeBufferSize) 1405 .name(name() + ".wrQLenPdf") 1406 .desc("What write queue length does an incoming req see"); 1407 1408 bytesPerActivate 1409 .init(rowBufferSize) 1410 .name(name() + ".bytesPerActivate") 1411 .desc("Bytes accessed per row activation") 1412 .flags(nozero); 1413 1414 bytesRead 1415 .name(name() + ".bytesRead") 1416 .desc("Total number of bytes read from memory"); 1417 1418 bytesWritten 1419 .name(name() + ".bytesWritten") 1420 .desc("Total number of bytes written to memory"); 1421 1422 bytesConsumedRd 1423 .name(name() + ".bytesConsumedRd") 1424 .desc("bytesRead derated as per pkt->getSize()"); 1425 1426 bytesConsumedWr 1427 .name(name() + ".bytesConsumedWr") 1428 .desc("bytesWritten derated as per pkt->getSize()"); 1429 1430 avgRdBW 1431 .name(name() + ".avgRdBW") 1432 .desc("Average achieved read bandwidth in MB/s") 1433 .precision(2); 1434 1435 avgRdBW = (bytesRead / 1000000) / simSeconds; 1436 1437 avgWrBW 1438 .name(name() + ".avgWrBW") 1439 .desc("Average achieved write bandwidth in MB/s") 1440 .precision(2); 1441 1442 avgWrBW = (bytesWritten / 1000000) / simSeconds; 1443 1444 avgConsumedRdBW 1445 .name(name() + ".avgConsumedRdBW") 1446 .desc("Average consumed read bandwidth in MB/s") 1447 .precision(2); 1448 1449 avgConsumedRdBW = (bytesConsumedRd / 1000000) / simSeconds; 1450 1451 avgConsumedWrBW 1452 .name(name() + ".avgConsumedWrBW") 1453 .desc("Average consumed write bandwidth in MB/s") 1454 .precision(2); 1455 1456 avgConsumedWrBW = (bytesConsumedWr / 1000000) / simSeconds; 1457 1458 peakBW 1459 .name(name() + ".peakBW") 1460 .desc("Theoretical peak bandwidth in MB/s") 1461 .precision(2); 1462 1463 peakBW = (SimClock::Frequency / tBURST) * burstSize / 1000000; 1464 1465 busUtil 1466 .name(name() + ".busUtil") 1467 .desc("Data bus utilization in percentage") 1468 .precision(2); 1469 1470 busUtil = (avgRdBW + avgWrBW) / peakBW * 100; 1471 1472 totGap 1473 .name(name() + ".totGap") 1474 .desc("Total gap between requests"); 1475 1476 avgGap 1477 .name(name() + ".avgGap") 1478 .desc("Average gap between requests") 1479 .precision(2); 1480 1481 avgGap = totGap / (readReqs + writeReqs); 1482} 1483 1484void 1485SimpleDRAM::recvFunctional(PacketPtr pkt) 1486{ 1487 // rely on the abstract memory 1488 functionalAccess(pkt); 1489} 1490 1491BaseSlavePort& 1492SimpleDRAM::getSlavePort(const string &if_name, PortID idx) 1493{ 1494 if (if_name != "port") { 1495 return MemObject::getSlavePort(if_name, idx); 1496 } else { 1497 return port; 1498 } 1499} 1500 1501unsigned int 1502SimpleDRAM::drain(DrainManager *dm) 1503{ 1504 unsigned int count = port.drain(dm); 1505 1506 // if there is anything in any of our internal queues, keep track 1507 // of that as well 1508 if (!(writeQueue.empty() && readQueue.empty() && 1509 respQueue.empty())) { 1510 DPRINTF(Drain, "DRAM controller not drained, write: %d, read: %d," 1511 " resp: %d\n", writeQueue.size(), readQueue.size(), 1512 respQueue.size()); 1513 ++count; 1514 drainManager = dm; 1515 // the only part that is not drained automatically over time 1516 // is the write queue, thus trigger writes if there are any 1517 // waiting and no reads waiting, otherwise wait until the 1518 // reads are done 1519 if (readQueue.empty() && !writeQueue.empty() && 1520 !writeEvent.scheduled()) 1521 triggerWrites(); 1522 } 1523 1524 if (count) 1525 setDrainState(Drainable::Draining); 1526 else 1527 setDrainState(Drainable::Drained); 1528 return count; 1529} 1530 1531SimpleDRAM::MemoryPort::MemoryPort(const std::string& name, SimpleDRAM& _memory) 1532 : QueuedSlavePort(name, &_memory, queue), queue(_memory, *this), 1533 memory(_memory) 1534{ } 1535 1536AddrRangeList 1537SimpleDRAM::MemoryPort::getAddrRanges() const 1538{ 1539 AddrRangeList ranges; 1540 ranges.push_back(memory.getAddrRange()); 1541 return ranges; 1542} 1543 1544void 1545SimpleDRAM::MemoryPort::recvFunctional(PacketPtr pkt) 1546{ 1547 pkt->pushLabel(memory.name()); 1548 1549 if (!queue.checkFunctional(pkt)) { 1550 // Default implementation of SimpleTimingPort::recvFunctional() 1551 // calls recvAtomic() and throws away the latency; we can save a 1552 // little here by just not calculating the latency. 1553 memory.recvFunctional(pkt); 1554 } 1555 1556 pkt->popLabel(); 1557} 1558 1559Tick 1560SimpleDRAM::MemoryPort::recvAtomic(PacketPtr pkt) 1561{ 1562 return memory.recvAtomic(pkt); 1563} 1564 1565bool 1566SimpleDRAM::MemoryPort::recvTimingReq(PacketPtr pkt) 1567{ 1568 // pass it to the memory controller 1569 return memory.recvTimingReq(pkt); 1570} 1571 1572SimpleDRAM* 1573SimpleDRAMParams::create() 1574{ 1575 return new SimpleDRAM(this); 1576} 1577