dram_ctrl.cc revision 9966
1/* 2 * Copyright (c) 2010-2013 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2013 Amin Farmahini-Farahani 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Andreas Hansson 41 * Ani Udipi 42 */ 43 44#include "base/trace.hh" 45#include "debug/Drain.hh" 46#include "debug/DRAM.hh" 47#include "mem/simple_dram.hh" 48#include "sim/system.hh" 49 50using namespace std; 51 52SimpleDRAM::SimpleDRAM(const SimpleDRAMParams* p) : 53 AbstractMemory(p), 54 port(name() + ".port", *this), 55 retryRdReq(false), retryWrReq(false), 56 rowHitFlag(false), stopReads(false), actTicks(p->activation_limit, 0), 57 writeEvent(this), respondEvent(this), 58 refreshEvent(this), nextReqEvent(this), drainManager(NULL), 59 deviceBusWidth(p->device_bus_width), burstLength(p->burst_length), 60 deviceRowBufferSize(p->device_rowbuffer_size), 61 devicesPerRank(p->devices_per_rank), 62 burstSize((devicesPerRank * burstLength * deviceBusWidth) / 8), 63 rowBufferSize(devicesPerRank * deviceRowBufferSize), 64 ranksPerChannel(p->ranks_per_channel), 65 banksPerRank(p->banks_per_rank), channels(p->channels), rowsPerBank(0), 66 readBufferSize(p->read_buffer_size), 67 writeBufferSize(p->write_buffer_size), 68 writeThresholdPerc(p->write_thresh_perc), 69 tWTR(p->tWTR), tBURST(p->tBURST), 70 tRCD(p->tRCD), tCL(p->tCL), tRP(p->tRP), tRAS(p->tRAS), 71 tRFC(p->tRFC), tREFI(p->tREFI), 72 tXAW(p->tXAW), activationLimit(p->activation_limit), 73 memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping), 74 pageMgmt(p->page_policy), 75 frontendLatency(p->static_frontend_latency), 76 backendLatency(p->static_backend_latency), 77 busBusyUntil(0), writeStartTime(0), 78 prevArrival(0), numReqs(0) 79{ 80 // create the bank states based on the dimensions of the ranks and 81 // banks 82 banks.resize(ranksPerChannel); 83 for (size_t c = 0; c < ranksPerChannel; ++c) { 84 banks[c].resize(banksPerRank); 85 } 86 87 // round the write threshold percent to a whole number of entries 88 // in the buffer 89 writeThreshold = writeBufferSize * writeThresholdPerc / 100.0; 90} 91 92void 93SimpleDRAM::init() 94{ 95 if (!port.isConnected()) { 96 fatal("SimpleDRAM %s is unconnected!\n", name()); 97 } else { 98 port.sendRangeChange(); 99 } 100 101 // we could deal with plenty options here, but for now do a quick 102 // sanity check 103 DPRINTF(DRAM, "Burst size %d bytes\n", burstSize); 104 105 // determine the rows per bank by looking at the total capacity 106 uint64_t capacity = ULL(1) << ceilLog2(AbstractMemory::size()); 107 108 DPRINTF(DRAM, "Memory capacity %lld (%lld) bytes\n", capacity, 109 AbstractMemory::size()); 110 111 columnsPerRowBuffer = rowBufferSize / burstSize; 112 113 DPRINTF(DRAM, "Row buffer size %d bytes with %d columns per row buffer\n", 114 rowBufferSize, columnsPerRowBuffer); 115 116 rowsPerBank = capacity / (rowBufferSize * banksPerRank * ranksPerChannel); 117 118 if (range.interleaved()) { 119 if (channels != range.stripes()) 120 panic("%s has %d interleaved address stripes but %d channel(s)\n", 121 name(), range.stripes(), channels); 122 123 if (addrMapping == Enums::RaBaChCo) { 124 if (rowBufferSize != range.granularity()) { 125 panic("Interleaving of %s doesn't match RaBaChCo address map\n", 126 name()); 127 } 128 } else if (addrMapping == Enums::RaBaCoCh) { 129 if (burstSize != range.granularity()) { 130 panic("Interleaving of %s doesn't match RaBaCoCh address map\n", 131 name()); 132 } 133 } else if (addrMapping == Enums::CoRaBaCh) { 134 if (burstSize != range.granularity()) 135 panic("Interleaving of %s doesn't match CoRaBaCh address map\n", 136 name()); 137 } 138 } 139} 140 141void 142SimpleDRAM::startup() 143{ 144 // print the configuration of the controller 145 printParams(); 146 147 // kick off the refresh 148 schedule(refreshEvent, curTick() + tREFI); 149} 150 151Tick 152SimpleDRAM::recvAtomic(PacketPtr pkt) 153{ 154 DPRINTF(DRAM, "recvAtomic: %s 0x%x\n", pkt->cmdString(), pkt->getAddr()); 155 156 // do the actual memory access and turn the packet into a response 157 access(pkt); 158 159 Tick latency = 0; 160 if (!pkt->memInhibitAsserted() && pkt->hasData()) { 161 // this value is not supposed to be accurate, just enough to 162 // keep things going, mimic a closed page 163 latency = tRP + tRCD + tCL; 164 } 165 return latency; 166} 167 168bool 169SimpleDRAM::readQueueFull(unsigned int neededEntries) const 170{ 171 DPRINTF(DRAM, "Read queue limit %d, current size %d, entries needed %d\n", 172 readBufferSize, readQueue.size() + respQueue.size(), 173 neededEntries); 174 175 return 176 (readQueue.size() + respQueue.size() + neededEntries) > readBufferSize; 177} 178 179bool 180SimpleDRAM::writeQueueFull(unsigned int neededEntries) const 181{ 182 DPRINTF(DRAM, "Write queue limit %d, current size %d, entries needed %d\n", 183 writeBufferSize, writeQueue.size(), neededEntries); 184 return (writeQueue.size() + neededEntries) > writeBufferSize; 185} 186 187SimpleDRAM::DRAMPacket* 188SimpleDRAM::decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned size, bool isRead) 189{ 190 // decode the address based on the address mapping scheme, with 191 // Ra, Co, Ba and Ch denoting rank, column, bank and channel, 192 // respectively 193 uint8_t rank; 194 uint16_t bank; 195 uint16_t row; 196 197 // truncate the address to the access granularity 198 Addr addr = dramPktAddr / burstSize; 199 200 // we have removed the lowest order address bits that denote the 201 // position within the column 202 if (addrMapping == Enums::RaBaChCo) { 203 // the lowest order bits denote the column to ensure that 204 // sequential cache lines occupy the same row 205 addr = addr / columnsPerRowBuffer; 206 207 // take out the channel part of the address 208 addr = addr / channels; 209 210 // after the channel bits, get the bank bits to interleave 211 // over the banks 212 bank = addr % banksPerRank; 213 addr = addr / banksPerRank; 214 215 // after the bank, we get the rank bits which thus interleaves 216 // over the ranks 217 rank = addr % ranksPerChannel; 218 addr = addr / ranksPerChannel; 219 220 // lastly, get the row bits 221 row = addr % rowsPerBank; 222 addr = addr / rowsPerBank; 223 } else if (addrMapping == Enums::RaBaCoCh) { 224 // take out the channel part of the address 225 addr = addr / channels; 226 227 // next, the column 228 addr = addr / columnsPerRowBuffer; 229 230 // after the column bits, we get the bank bits to interleave 231 // over the banks 232 bank = addr % banksPerRank; 233 addr = addr / banksPerRank; 234 235 // after the bank, we get the rank bits which thus interleaves 236 // over the ranks 237 rank = addr % ranksPerChannel; 238 addr = addr / ranksPerChannel; 239 240 // lastly, get the row bits 241 row = addr % rowsPerBank; 242 addr = addr / rowsPerBank; 243 } else if (addrMapping == Enums::CoRaBaCh) { 244 // optimise for closed page mode and utilise maximum 245 // parallelism of the DRAM (at the cost of power) 246 247 // take out the channel part of the address, not that this has 248 // to match with how accesses are interleaved between the 249 // controllers in the address mapping 250 addr = addr / channels; 251 252 // start with the bank bits, as this provides the maximum 253 // opportunity for parallelism between requests 254 bank = addr % banksPerRank; 255 addr = addr / banksPerRank; 256 257 // next get the rank bits 258 rank = addr % ranksPerChannel; 259 addr = addr / ranksPerChannel; 260 261 // next the column bits which we do not need to keep track of 262 // and simply skip past 263 addr = addr / columnsPerRowBuffer; 264 265 // lastly, get the row bits 266 row = addr % rowsPerBank; 267 addr = addr / rowsPerBank; 268 } else 269 panic("Unknown address mapping policy chosen!"); 270 271 assert(rank < ranksPerChannel); 272 assert(bank < banksPerRank); 273 assert(row < rowsPerBank); 274 275 DPRINTF(DRAM, "Address: %lld Rank %d Bank %d Row %d\n", 276 dramPktAddr, rank, bank, row); 277 278 // create the corresponding DRAM packet with the entry time and 279 // ready time set to the current tick, the latter will be updated 280 // later 281 return new DRAMPacket(pkt, isRead, rank, bank, row, dramPktAddr, size, 282 banks[rank][bank]); 283} 284 285void 286SimpleDRAM::addToReadQueue(PacketPtr pkt, unsigned int pktCount) 287{ 288 // only add to the read queue here. whenever the request is 289 // eventually done, set the readyTime, and call schedule() 290 assert(!pkt->isWrite()); 291 292 assert(pktCount != 0); 293 294 // if the request size is larger than burst size, the pkt is split into 295 // multiple DRAM packets 296 // Note if the pkt starting address is not aligened to burst size, the 297 // address of first DRAM packet is kept unaliged. Subsequent DRAM packets 298 // are aligned to burst size boundaries. This is to ensure we accurately 299 // check read packets against packets in write queue. 300 Addr addr = pkt->getAddr(); 301 unsigned pktsServicedByWrQ = 0; 302 BurstHelper* burst_helper = NULL; 303 for (int cnt = 0; cnt < pktCount; ++cnt) { 304 unsigned size = std::min((addr | (burstSize - 1)) + 1, 305 pkt->getAddr() + pkt->getSize()) - addr; 306 readPktSize[ceilLog2(size)]++; 307 readBursts++; 308 309 // First check write buffer to see if the data is already at 310 // the controller 311 bool foundInWrQ = false; 312 for (auto i = writeQueue.begin(); i != writeQueue.end(); ++i) { 313 // check if the read is subsumed in the write entry we are 314 // looking at 315 if ((*i)->addr <= addr && 316 (addr + size) <= ((*i)->addr + (*i)->size)) { 317 foundInWrQ = true; 318 servicedByWrQ++; 319 pktsServicedByWrQ++; 320 DPRINTF(DRAM, "Read to addr %lld with size %d serviced by " 321 "write queue\n", addr, size); 322 bytesRead += burstSize; 323 bytesConsumedRd += size; 324 break; 325 } 326 } 327 328 // If not found in the write q, make a DRAM packet and 329 // push it onto the read queue 330 if (!foundInWrQ) { 331 332 // Make the burst helper for split packets 333 if (pktCount > 1 && burst_helper == NULL) { 334 DPRINTF(DRAM, "Read to addr %lld translates to %d " 335 "dram requests\n", pkt->getAddr(), pktCount); 336 burst_helper = new BurstHelper(pktCount); 337 } 338 339 DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, true); 340 dram_pkt->burstHelper = burst_helper; 341 342 assert(!readQueueFull(1)); 343 rdQLenPdf[readQueue.size() + respQueue.size()]++; 344 345 DPRINTF(DRAM, "Adding to read queue\n"); 346 347 readQueue.push_back(dram_pkt); 348 349 // Update stats 350 uint32_t bank_id = banksPerRank * dram_pkt->rank + dram_pkt->bank; 351 assert(bank_id < ranksPerChannel * banksPerRank); 352 perBankRdReqs[bank_id]++; 353 354 avgRdQLen = readQueue.size() + respQueue.size(); 355 } 356 357 // Starting address of next dram pkt (aligend to burstSize boundary) 358 addr = (addr | (burstSize - 1)) + 1; 359 } 360 361 // If all packets are serviced by write queue, we send the repsonse back 362 if (pktsServicedByWrQ == pktCount) { 363 accessAndRespond(pkt, frontendLatency); 364 return; 365 } 366 367 // Update how many split packets are serviced by write queue 368 if (burst_helper != NULL) 369 burst_helper->burstsServiced = pktsServicedByWrQ; 370 371 // If we are not already scheduled to get the read request out of 372 // the queue, do so now 373 if (!nextReqEvent.scheduled() && !stopReads) { 374 DPRINTF(DRAM, "Request scheduled immediately\n"); 375 schedule(nextReqEvent, curTick()); 376 } 377} 378 379void 380SimpleDRAM::processWriteEvent() 381{ 382 assert(!writeQueue.empty()); 383 uint32_t numWritesThisTime = 0; 384 385 DPRINTF(DRAM, "Beginning DRAM Writes\n"); 386 Tick temp1 M5_VAR_USED = std::max(curTick(), busBusyUntil); 387 Tick temp2 M5_VAR_USED = std::max(curTick(), maxBankFreeAt()); 388 389 // @todo: are there any dangers with the untimed while loop? 390 while (!writeQueue.empty()) { 391 if (numWritesThisTime >= writeThreshold) { 392 DPRINTF(DRAM, "Hit write threshold %d\n", writeThreshold); 393 break; 394 } 395 396 chooseNextWrite(); 397 DRAMPacket* dram_pkt = writeQueue.front(); 398 // sanity check 399 assert(dram_pkt->size <= burstSize); 400 doDRAMAccess(dram_pkt); 401 402 writeQueue.pop_front(); 403 delete dram_pkt; 404 numWritesThisTime++; 405 } 406 407 DPRINTF(DRAM, "Completed %d writes, bus busy for %lld ticks,"\ 408 "banks busy for %lld ticks\n", numWritesThisTime, 409 busBusyUntil - temp1, maxBankFreeAt() - temp2); 410 411 // Update stats 412 avgWrQLen = writeQueue.size(); 413 414 // turn the bus back around for reads again 415 busBusyUntil += tWTR; 416 stopReads = false; 417 418 if (retryWrReq) { 419 retryWrReq = false; 420 port.sendRetry(); 421 } 422 423 // if there is nothing left in any queue, signal a drain 424 if (writeQueue.empty() && readQueue.empty() && 425 respQueue.empty () && drainManager) { 426 drainManager->signalDrainDone(); 427 drainManager = NULL; 428 } 429 430 // Once you're done emptying the write queue, check if there's 431 // anything in the read queue, and call schedule if required. The 432 // retry above could already have caused it to be scheduled, so 433 // first check 434 if (!nextReqEvent.scheduled()) 435 schedule(nextReqEvent, busBusyUntil); 436} 437 438 439void 440SimpleDRAM::triggerWrites() 441{ 442 DPRINTF(DRAM, "Writes triggered at %lld\n", curTick()); 443 // Flag variable to stop any more read scheduling 444 stopReads = true; 445 446 writeStartTime = std::max(busBusyUntil, curTick()) + tWTR; 447 448 DPRINTF(DRAM, "Writes scheduled at %lld\n", writeStartTime); 449 450 assert(writeStartTime >= curTick()); 451 assert(!writeEvent.scheduled()); 452 schedule(writeEvent, writeStartTime); 453} 454 455void 456SimpleDRAM::addToWriteQueue(PacketPtr pkt, unsigned int pktCount) 457{ 458 // only add to the write queue here. whenever the request is 459 // eventually done, set the readyTime, and call schedule() 460 assert(pkt->isWrite()); 461 462 // if the request size is larger than burst size, the pkt is split into 463 // multiple DRAM packets 464 Addr addr = pkt->getAddr(); 465 for (int cnt = 0; cnt < pktCount; ++cnt) { 466 unsigned size = std::min((addr | (burstSize - 1)) + 1, 467 pkt->getAddr() + pkt->getSize()) - addr; 468 writePktSize[ceilLog2(size)]++; 469 writeBursts++; 470 471 // see if we can merge with an existing item in the write 472 // queue and keep track of whether we have merged or not so we 473 // can stop at that point and also avoid enqueueing a new 474 // request 475 bool merged = false; 476 auto w = writeQueue.begin(); 477 478 while(!merged && w != writeQueue.end()) { 479 // either of the two could be first, if they are the same 480 // it does not matter which way we go 481 if ((*w)->addr >= addr) { 482 // the existing one starts after the new one, figure 483 // out where the new one ends with respect to the 484 // existing one 485 if ((addr + size) >= ((*w)->addr + (*w)->size)) { 486 // check if the existing one is completely 487 // subsumed in the new one 488 DPRINTF(DRAM, "Merging write covering existing burst\n"); 489 merged = true; 490 // update both the address and the size 491 (*w)->addr = addr; 492 (*w)->size = size; 493 } else if ((addr + size) >= (*w)->addr && 494 ((*w)->addr + (*w)->size - addr) <= burstSize) { 495 // the new one is just before or partially 496 // overlapping with the existing one, and together 497 // they fit within a burst 498 DPRINTF(DRAM, "Merging write before existing burst\n"); 499 merged = true; 500 // the existing queue item needs to be adjusted with 501 // respect to both address and size 502 (*w)->addr = addr; 503 (*w)->size = (*w)->addr + (*w)->size - addr; 504 } 505 } else { 506 // the new one starts after the current one, figure 507 // out where the existing one ends with respect to the 508 // new one 509 if (((*w)->addr + (*w)->size) >= (addr + size)) { 510 // check if the new one is completely subsumed in the 511 // existing one 512 DPRINTF(DRAM, "Merging write into existing burst\n"); 513 merged = true; 514 // no adjustments necessary 515 } else if (((*w)->addr + (*w)->size) >= addr && 516 (addr + size - (*w)->addr) <= burstSize) { 517 // the existing one is just before or partially 518 // overlapping with the new one, and together 519 // they fit within a burst 520 DPRINTF(DRAM, "Merging write after existing burst\n"); 521 merged = true; 522 // the address is right, and only the size has 523 // to be adjusted 524 (*w)->size = addr + size - (*w)->addr; 525 } 526 } 527 ++w; 528 } 529 530 // if the item was not merged we need to create a new write 531 // and enqueue it 532 if (!merged) { 533 DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, false); 534 535 assert(writeQueue.size() < writeBufferSize); 536 wrQLenPdf[writeQueue.size()]++; 537 538 DPRINTF(DRAM, "Adding to write queue\n"); 539 540 writeQueue.push_back(dram_pkt); 541 542 // Update stats 543 uint32_t bank_id = banksPerRank * dram_pkt->rank + dram_pkt->bank; 544 assert(bank_id < ranksPerChannel * banksPerRank); 545 perBankWrReqs[bank_id]++; 546 547 avgWrQLen = writeQueue.size(); 548 } 549 550 bytesConsumedWr += size; 551 bytesWritten += burstSize; 552 553 // Starting address of next dram pkt (aligend to burstSize boundary) 554 addr = (addr | (burstSize - 1)) + 1; 555 } 556 557 // we do not wait for the writes to be send to the actual memory, 558 // but instead take responsibility for the consistency here and 559 // snoop the write queue for any upcoming reads 560 // @todo, if a pkt size is larger than burst size, we might need a 561 // different front end latency 562 accessAndRespond(pkt, frontendLatency); 563 564 // If your write buffer is starting to fill up, drain it! 565 if (writeQueue.size() > writeThreshold && !stopReads){ 566 triggerWrites(); 567 } 568} 569 570void 571SimpleDRAM::printParams() const 572{ 573 // Sanity check print of important parameters 574 DPRINTF(DRAM, 575 "Memory controller %s physical organization\n" \ 576 "Number of devices per rank %d\n" \ 577 "Device bus width (in bits) %d\n" \ 578 "DRAM data bus burst %d\n" \ 579 "Row buffer size %d\n" \ 580 "Columns per row buffer %d\n" \ 581 "Rows per bank %d\n" \ 582 "Banks per rank %d\n" \ 583 "Ranks per channel %d\n" \ 584 "Total mem capacity %u\n", 585 name(), devicesPerRank, deviceBusWidth, burstSize, rowBufferSize, 586 columnsPerRowBuffer, rowsPerBank, banksPerRank, ranksPerChannel, 587 rowBufferSize * rowsPerBank * banksPerRank * ranksPerChannel); 588 589 string scheduler = memSchedPolicy == Enums::fcfs ? "FCFS" : "FR-FCFS"; 590 string address_mapping = addrMapping == Enums::RaBaChCo ? "RaBaChCo" : 591 (addrMapping == Enums::RaBaCoCh ? "RaBaCoCh" : "CoRaBaCh"); 592 string page_policy = pageMgmt == Enums::open ? "OPEN" : "CLOSE"; 593 594 DPRINTF(DRAM, 595 "Memory controller %s characteristics\n" \ 596 "Read buffer size %d\n" \ 597 "Write buffer size %d\n" \ 598 "Write buffer thresh %d\n" \ 599 "Scheduler %s\n" \ 600 "Address mapping %s\n" \ 601 "Page policy %s\n", 602 name(), readBufferSize, writeBufferSize, writeThreshold, 603 scheduler, address_mapping, page_policy); 604 605 DPRINTF(DRAM, "Memory controller %s timing specs\n" \ 606 "tRCD %d ticks\n" \ 607 "tCL %d ticks\n" \ 608 "tRP %d ticks\n" \ 609 "tBURST %d ticks\n" \ 610 "tRFC %d ticks\n" \ 611 "tREFI %d ticks\n" \ 612 "tWTR %d ticks\n" \ 613 "tXAW (%d) %d ticks\n", 614 name(), tRCD, tCL, tRP, tBURST, tRFC, tREFI, tWTR, 615 activationLimit, tXAW); 616} 617 618void 619SimpleDRAM::printQs() const { 620 DPRINTF(DRAM, "===READ QUEUE===\n\n"); 621 for (auto i = readQueue.begin() ; i != readQueue.end() ; ++i) { 622 DPRINTF(DRAM, "Read %lu\n", (*i)->addr); 623 } 624 DPRINTF(DRAM, "\n===RESP QUEUE===\n\n"); 625 for (auto i = respQueue.begin() ; i != respQueue.end() ; ++i) { 626 DPRINTF(DRAM, "Response %lu\n", (*i)->addr); 627 } 628 DPRINTF(DRAM, "\n===WRITE QUEUE===\n\n"); 629 for (auto i = writeQueue.begin() ; i != writeQueue.end() ; ++i) { 630 DPRINTF(DRAM, "Write %lu\n", (*i)->addr); 631 } 632} 633 634bool 635SimpleDRAM::recvTimingReq(PacketPtr pkt) 636{ 637 /// @todo temporary hack to deal with memory corruption issues until 638 /// 4-phase transactions are complete 639 for (int x = 0; x < pendingDelete.size(); x++) 640 delete pendingDelete[x]; 641 pendingDelete.clear(); 642 643 // This is where we enter from the outside world 644 DPRINTF(DRAM, "recvTimingReq: request %s addr %lld size %d\n", 645 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 646 647 // simply drop inhibited packets for now 648 if (pkt->memInhibitAsserted()) { 649 DPRINTF(DRAM,"Inhibited packet -- Dropping it now\n"); 650 pendingDelete.push_back(pkt); 651 return true; 652 } 653 654 // Every million accesses, print the state of the queues 655 if (numReqs % 1000000 == 0) 656 printQs(); 657 658 // Calc avg gap between requests 659 if (prevArrival != 0) { 660 totGap += curTick() - prevArrival; 661 } 662 prevArrival = curTick(); 663 664 665 // Find out how many dram packets a pkt translates to 666 // If the burst size is equal or larger than the pkt size, then a pkt 667 // translates to only one dram packet. Otherwise, a pkt translates to 668 // multiple dram packets 669 unsigned size = pkt->getSize(); 670 unsigned offset = pkt->getAddr() & (burstSize - 1); 671 unsigned int dram_pkt_count = divCeil(offset + size, burstSize); 672 673 // check local buffers and do not accept if full 674 if (pkt->isRead()) { 675 assert(size != 0); 676 if (readQueueFull(dram_pkt_count)) { 677 DPRINTF(DRAM, "Read queue full, not accepting\n"); 678 // remember that we have to retry this port 679 retryRdReq = true; 680 numRdRetry++; 681 return false; 682 } else { 683 addToReadQueue(pkt, dram_pkt_count); 684 readReqs++; 685 numReqs++; 686 } 687 } else if (pkt->isWrite()) { 688 assert(size != 0); 689 if (writeQueueFull(dram_pkt_count)) { 690 DPRINTF(DRAM, "Write queue full, not accepting\n"); 691 // remember that we have to retry this port 692 retryWrReq = true; 693 numWrRetry++; 694 return false; 695 } else { 696 addToWriteQueue(pkt, dram_pkt_count); 697 writeReqs++; 698 numReqs++; 699 } 700 } else { 701 DPRINTF(DRAM,"Neither read nor write, ignore timing\n"); 702 neitherReadNorWrite++; 703 accessAndRespond(pkt, 1); 704 } 705 706 retryRdReq = false; 707 retryWrReq = false; 708 return true; 709} 710 711void 712SimpleDRAM::processRespondEvent() 713{ 714 DPRINTF(DRAM, 715 "processRespondEvent(): Some req has reached its readyTime\n"); 716 717 DRAMPacket* dram_pkt = respQueue.front(); 718 719 // Actually responds to the requestor 720 bytesConsumedRd += dram_pkt->size; 721 bytesRead += burstSize; 722 if (dram_pkt->burstHelper) { 723 // it is a split packet 724 dram_pkt->burstHelper->burstsServiced++; 725 if (dram_pkt->burstHelper->burstsServiced == 726 dram_pkt->burstHelper->burstCount) { 727 // we have now serviced all children packets of a system packet 728 // so we can now respond to the requester 729 // @todo we probably want to have a different front end and back 730 // end latency for split packets 731 accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency); 732 delete dram_pkt->burstHelper; 733 dram_pkt->burstHelper = NULL; 734 } 735 } else { 736 // it is not a split packet 737 accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency); 738 } 739 740 delete respQueue.front(); 741 respQueue.pop_front(); 742 743 // Update stats 744 avgRdQLen = readQueue.size() + respQueue.size(); 745 746 if (!respQueue.empty()) { 747 assert(respQueue.front()->readyTime >= curTick()); 748 assert(!respondEvent.scheduled()); 749 schedule(respondEvent, respQueue.front()->readyTime); 750 } else { 751 // if there is nothing left in any queue, signal a drain 752 if (writeQueue.empty() && readQueue.empty() && 753 drainManager) { 754 drainManager->signalDrainDone(); 755 drainManager = NULL; 756 } 757 } 758 759 // We have made a location in the queue available at this point, 760 // so if there is a read that was forced to wait, retry now 761 if (retryRdReq) { 762 retryRdReq = false; 763 port.sendRetry(); 764 } 765} 766 767void 768SimpleDRAM::chooseNextWrite() 769{ 770 // This method does the arbitration between write requests. The 771 // chosen packet is simply moved to the head of the write 772 // queue. The other methods know that this is the place to 773 // look. For example, with FCFS, this method does nothing 774 assert(!writeQueue.empty()); 775 776 if (writeQueue.size() == 1) { 777 DPRINTF(DRAM, "Single write request, nothing to do\n"); 778 return; 779 } 780 781 if (memSchedPolicy == Enums::fcfs) { 782 // Do nothing, since the correct request is already head 783 } else if (memSchedPolicy == Enums::frfcfs) { 784 auto i = writeQueue.begin(); 785 bool foundRowHit = false; 786 while (!foundRowHit && i != writeQueue.end()) { 787 DRAMPacket* dram_pkt = *i; 788 const Bank& bank = dram_pkt->bank_ref; 789 if (bank.openRow == dram_pkt->row) { //FR part 790 DPRINTF(DRAM, "Write row buffer hit\n"); 791 writeQueue.erase(i); 792 writeQueue.push_front(dram_pkt); 793 foundRowHit = true; 794 } else { //FCFS part 795 ; 796 } 797 ++i; 798 } 799 } else 800 panic("No scheduling policy chosen\n"); 801 802 DPRINTF(DRAM, "Selected next write request\n"); 803} 804 805bool 806SimpleDRAM::chooseNextRead() 807{ 808 // This method does the arbitration between read requests. The 809 // chosen packet is simply moved to the head of the queue. The 810 // other methods know that this is the place to look. For example, 811 // with FCFS, this method does nothing 812 if (readQueue.empty()) { 813 DPRINTF(DRAM, "No read request to select\n"); 814 return false; 815 } 816 817 // If there is only one request then there is nothing left to do 818 if (readQueue.size() == 1) 819 return true; 820 821 if (memSchedPolicy == Enums::fcfs) { 822 // Do nothing, since the request to serve is already the first 823 // one in the read queue 824 } else if (memSchedPolicy == Enums::frfcfs) { 825 for (auto i = readQueue.begin(); i != readQueue.end() ; ++i) { 826 DRAMPacket* dram_pkt = *i; 827 const Bank& bank = dram_pkt->bank_ref; 828 // Check if it is a row hit 829 if (bank.openRow == dram_pkt->row) { //FR part 830 DPRINTF(DRAM, "Row buffer hit\n"); 831 readQueue.erase(i); 832 readQueue.push_front(dram_pkt); 833 break; 834 } else { //FCFS part 835 ; 836 } 837 } 838 } else 839 panic("No scheduling policy chosen!\n"); 840 841 DPRINTF(DRAM, "Selected next read request\n"); 842 return true; 843} 844 845void 846SimpleDRAM::accessAndRespond(PacketPtr pkt, Tick static_latency) 847{ 848 DPRINTF(DRAM, "Responding to Address %lld.. ",pkt->getAddr()); 849 850 bool needsResponse = pkt->needsResponse(); 851 // do the actual memory access which also turns the packet into a 852 // response 853 access(pkt); 854 855 // turn packet around to go back to requester if response expected 856 if (needsResponse) { 857 // access already turned the packet into a response 858 assert(pkt->isResponse()); 859 860 // @todo someone should pay for this 861 pkt->busFirstWordDelay = pkt->busLastWordDelay = 0; 862 863 // queue the packet in the response queue to be sent out after 864 // the static latency has passed 865 port.schedTimingResp(pkt, curTick() + static_latency); 866 } else { 867 // @todo the packet is going to be deleted, and the DRAMPacket 868 // is still having a pointer to it 869 pendingDelete.push_back(pkt); 870 } 871 872 DPRINTF(DRAM, "Done\n"); 873 874 return; 875} 876 877pair<Tick, Tick> 878SimpleDRAM::estimateLatency(DRAMPacket* dram_pkt, Tick inTime) 879{ 880 // If a request reaches a bank at tick 'inTime', how much time 881 // *after* that does it take to finish the request, depending 882 // on bank status and page open policy. Note that this method 883 // considers only the time taken for the actual read or write 884 // to complete, NOT any additional time thereafter for tRAS or 885 // tRP. 886 Tick accLat = 0; 887 Tick bankLat = 0; 888 rowHitFlag = false; 889 890 const Bank& bank = dram_pkt->bank_ref; 891 if (pageMgmt == Enums::open) { // open-page policy 892 if (bank.openRow == dram_pkt->row) { 893 // When we have a row-buffer hit, 894 // we don't care about tRAS having expired or not, 895 // but do care about bank being free for access 896 rowHitFlag = true; 897 898 // When a series of requests arrive to the same row, 899 // DDR systems are capable of streaming data continuously 900 // at maximum bandwidth (subject to tCCD). Here, we approximate 901 // this condition, and assume that if whenever a bank is already 902 // busy and a new request comes in, it can be completed with no 903 // penalty beyond waiting for the existing read to complete. 904 if (bank.freeAt > inTime) { 905 accLat += bank.freeAt - inTime; 906 bankLat += 0; 907 } else { 908 // CAS latency only 909 accLat += tCL; 910 bankLat += tCL; 911 } 912 913 } else { 914 // Row-buffer miss, need to close existing row 915 // once tRAS has expired, then open the new one, 916 // then add cas latency. 917 Tick freeTime = std::max(bank.tRASDoneAt, bank.freeAt); 918 919 if (freeTime > inTime) 920 accLat += freeTime - inTime; 921 922 accLat += tRP + tRCD + tCL; 923 bankLat += tRP + tRCD + tCL; 924 } 925 } else if (pageMgmt == Enums::close) { 926 // With a close page policy, no notion of 927 // bank.tRASDoneAt 928 if (bank.freeAt > inTime) 929 accLat += bank.freeAt - inTime; 930 931 // page already closed, simply open the row, and 932 // add cas latency 933 accLat += tRCD + tCL; 934 bankLat += tRCD + tCL; 935 } else 936 panic("No page management policy chosen\n"); 937 938 DPRINTF(DRAM, "Returning < %lld, %lld > from estimateLatency()\n", 939 bankLat, accLat); 940 941 return make_pair(bankLat, accLat); 942} 943 944void 945SimpleDRAM::processNextReqEvent() 946{ 947 scheduleNextReq(); 948} 949 950void 951SimpleDRAM::recordActivate(Tick act_tick) 952{ 953 assert(actTicks.size() == activationLimit); 954 955 DPRINTF(DRAM, "Activate at tick %d\n", act_tick); 956 957 // if the activation limit is disabled then we are done 958 if (actTicks.empty()) 959 return; 960 961 // sanity check 962 if (actTicks.back() && (act_tick - actTicks.back()) < tXAW) { 963 // @todo For now, stick with a warning 964 warn("Got %d activates in window %d (%d - %d) which is smaller " 965 "than %d\n", activationLimit, act_tick - actTicks.back(), 966 act_tick, actTicks.back(), tXAW); 967 } 968 969 // shift the times used for the book keeping, the last element 970 // (highest index) is the oldest one and hence the lowest value 971 actTicks.pop_back(); 972 973 // record an new activation (in the future) 974 actTicks.push_front(act_tick); 975 976 // cannot activate more than X times in time window tXAW, push the 977 // next one (the X + 1'st activate) to be tXAW away from the 978 // oldest in our window of X 979 if (actTicks.back() && (act_tick - actTicks.back()) < tXAW) { 980 DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate no earlier " 981 "than %d\n", activationLimit, actTicks.back() + tXAW); 982 for(int i = 0; i < ranksPerChannel; i++) 983 for(int j = 0; j < banksPerRank; j++) 984 // next activate must not happen before end of window 985 banks[i][j].freeAt = std::max(banks[i][j].freeAt, 986 actTicks.back() + tXAW); 987 } 988} 989 990void 991SimpleDRAM::doDRAMAccess(DRAMPacket* dram_pkt) 992{ 993 994 DPRINTF(DRAM, "Timing access to addr %lld, rank/bank/row %d %d %d\n", 995 dram_pkt->addr, dram_pkt->rank, dram_pkt->bank, dram_pkt->row); 996 997 // estimate the bank and access latency 998 pair<Tick, Tick> lat = estimateLatency(dram_pkt, curTick()); 999 Tick bankLat = lat.first; 1000 Tick accessLat = lat.second; 1001 Tick actTick; 1002 1003 // This request was woken up at this time based on a prior call 1004 // to estimateLatency(). However, between then and now, both the 1005 // accessLatency and/or busBusyUntil may have changed. We need 1006 // to correct for that. 1007 1008 Tick addDelay = (curTick() + accessLat < busBusyUntil) ? 1009 busBusyUntil - (curTick() + accessLat) : 0; 1010 1011 Bank& bank = dram_pkt->bank_ref; 1012 1013 // Update bank state 1014 if (pageMgmt == Enums::open) { 1015 bank.openRow = dram_pkt->row; 1016 bank.freeAt = curTick() + addDelay + accessLat; 1017 bank.bytesAccessed += burstSize; 1018 1019 // If you activated a new row do to this access, the next access 1020 // will have to respect tRAS for this bank. 1021 if (!rowHitFlag) { 1022 // any waiting for banks account for in freeAt 1023 actTick = bank.freeAt - tCL - tRCD; 1024 bank.tRASDoneAt = actTick + tRAS; 1025 recordActivate(actTick); 1026 1027 // sample the number of bytes accessed and reset it as 1028 // we are now closing this row 1029 bytesPerActivate.sample(bank.bytesAccessed); 1030 bank.bytesAccessed = 0; 1031 } 1032 } else if (pageMgmt == Enums::close) { 1033 actTick = curTick() + addDelay + accessLat - tRCD - tCL; 1034 recordActivate(actTick); 1035 1036 // If the DRAM has a very quick tRAS, bank can be made free 1037 // after consecutive tCL,tRCD,tRP times. In general, however, 1038 // an additional wait is required to respect tRAS. 1039 bank.freeAt = std::max(actTick + tRAS + tRP, 1040 actTick + tRCD + tCL + tRP); 1041 1042 DPRINTF(DRAM,"doDRAMAccess::bank.freeAt is %lld\n",bank.freeAt); 1043 bytesPerActivate.sample(burstSize); 1044 } else 1045 panic("No page management policy chosen\n"); 1046 1047 // Update request parameters 1048 dram_pkt->readyTime = curTick() + addDelay + accessLat + tBURST; 1049 1050 1051 DPRINTF(DRAM, "Req %lld: curtick is %lld accessLat is %d " \ 1052 "readytime is %lld busbusyuntil is %lld. " \ 1053 "Scheduling at readyTime\n", dram_pkt->addr, 1054 curTick(), accessLat, dram_pkt->readyTime, busBusyUntil); 1055 1056 // Make sure requests are not overlapping on the databus 1057 assert (dram_pkt->readyTime - busBusyUntil >= tBURST); 1058 1059 // Update bus state 1060 busBusyUntil = dram_pkt->readyTime; 1061 1062 DPRINTF(DRAM,"Access time is %lld\n", 1063 dram_pkt->readyTime - dram_pkt->entryTime); 1064 1065 if (rowHitFlag) { 1066 if(dram_pkt->isRead) 1067 readRowHits++; 1068 else 1069 writeRowHits++; 1070 } 1071 1072 // At this point, commonality between reads and writes ends. 1073 // For writes, we are done since we long ago responded to the 1074 // requestor. We also don't care about stats for writes. For 1075 // reads, we still need to figure out respoding to the requestor, 1076 // and capture stats. 1077 1078 if (!dram_pkt->isRead) { 1079 return; 1080 } 1081 1082 // Update stats 1083 totMemAccLat += dram_pkt->readyTime - dram_pkt->entryTime; 1084 totBankLat += bankLat; 1085 totBusLat += tBURST; 1086 totQLat += dram_pkt->readyTime - dram_pkt->entryTime - bankLat - tBURST; 1087 1088 1089 // At this point we're done dealing with the request 1090 // It will be moved to a separate response queue with a 1091 // correct readyTime, and eventually be sent back at that 1092 //time 1093 moveToRespQ(); 1094 1095 // The absolute soonest you have to start thinking about the 1096 // next request is the longest access time that can occur before 1097 // busBusyUntil. Assuming you need to precharge, 1098 // open a new row, and access, it is tRP + tRCD + tCL 1099 1100 Tick newTime = (busBusyUntil > tRP + tRCD + tCL ) ? 1101 std::max(busBusyUntil - (tRP + tRCD + tCL) , curTick()) : 1102 curTick(); 1103 1104 if (!nextReqEvent.scheduled() && !stopReads){ 1105 schedule(nextReqEvent, newTime); 1106 } else { 1107 if (newTime < nextReqEvent.when()) 1108 reschedule(nextReqEvent, newTime); 1109 } 1110 1111 1112} 1113 1114void 1115SimpleDRAM::moveToRespQ() 1116{ 1117 // Remove from read queue 1118 DRAMPacket* dram_pkt = readQueue.front(); 1119 readQueue.pop_front(); 1120 1121 // sanity check 1122 assert(dram_pkt->size <= burstSize); 1123 1124 // Insert into response queue sorted by readyTime 1125 // It will be sent back to the requestor at its 1126 // readyTime 1127 if (respQueue.empty()) { 1128 respQueue.push_front(dram_pkt); 1129 assert(!respondEvent.scheduled()); 1130 assert(dram_pkt->readyTime >= curTick()); 1131 schedule(respondEvent, dram_pkt->readyTime); 1132 } else { 1133 bool done = false; 1134 auto i = respQueue.begin(); 1135 while (!done && i != respQueue.end()) { 1136 if ((*i)->readyTime > dram_pkt->readyTime) { 1137 respQueue.insert(i, dram_pkt); 1138 done = true; 1139 } 1140 ++i; 1141 } 1142 1143 if (!done) 1144 respQueue.push_back(dram_pkt); 1145 1146 assert(respondEvent.scheduled()); 1147 1148 if (respQueue.front()->readyTime < respondEvent.when()) { 1149 assert(respQueue.front()->readyTime >= curTick()); 1150 reschedule(respondEvent, respQueue.front()->readyTime); 1151 } 1152 } 1153} 1154 1155void 1156SimpleDRAM::scheduleNextReq() 1157{ 1158 DPRINTF(DRAM, "Reached scheduleNextReq()\n"); 1159 1160 // Figure out which read request goes next, and move it to the 1161 // front of the read queue 1162 if (!chooseNextRead()) { 1163 // In the case there is no read request to go next, see if we 1164 // are asked to drain, and if so trigger writes, this also 1165 // ensures that if we hit the write limit we will do this 1166 // multiple times until we are completely drained 1167 if (drainManager && !writeQueue.empty() && !writeEvent.scheduled()) 1168 triggerWrites(); 1169 } else { 1170 doDRAMAccess(readQueue.front()); 1171 } 1172} 1173 1174Tick 1175SimpleDRAM::maxBankFreeAt() const 1176{ 1177 Tick banksFree = 0; 1178 1179 for(int i = 0; i < ranksPerChannel; i++) 1180 for(int j = 0; j < banksPerRank; j++) 1181 banksFree = std::max(banks[i][j].freeAt, banksFree); 1182 1183 return banksFree; 1184} 1185 1186void 1187SimpleDRAM::processRefreshEvent() 1188{ 1189 DPRINTF(DRAM, "Refreshing at tick %ld\n", curTick()); 1190 1191 Tick banksFree = std::max(curTick(), maxBankFreeAt()) + tRFC; 1192 1193 for(int i = 0; i < ranksPerChannel; i++) 1194 for(int j = 0; j < banksPerRank; j++) 1195 banks[i][j].freeAt = banksFree; 1196 1197 schedule(refreshEvent, curTick() + tREFI); 1198} 1199 1200void 1201SimpleDRAM::regStats() 1202{ 1203 using namespace Stats; 1204 1205 AbstractMemory::regStats(); 1206 1207 readReqs 1208 .name(name() + ".readReqs") 1209 .desc("Total number of read requests accepted by DRAM controller"); 1210 1211 writeReqs 1212 .name(name() + ".writeReqs") 1213 .desc("Total number of write requests accepted by DRAM controller"); 1214 1215 readBursts 1216 .name(name() + ".readBursts") 1217 .desc("Total number of DRAM read bursts. " 1218 "Each DRAM read request translates to either one or multiple " 1219 "DRAM read bursts"); 1220 1221 writeBursts 1222 .name(name() + ".writeBursts") 1223 .desc("Total number of DRAM write bursts. " 1224 "Each DRAM write request translates to either one or multiple " 1225 "DRAM write bursts"); 1226 1227 servicedByWrQ 1228 .name(name() + ".servicedByWrQ") 1229 .desc("Number of DRAM read bursts serviced by write Q"); 1230 1231 neitherReadNorWrite 1232 .name(name() + ".neitherReadNorWrite") 1233 .desc("Reqs where no action is needed"); 1234 1235 perBankRdReqs 1236 .init(banksPerRank * ranksPerChannel) 1237 .name(name() + ".perBankRdReqs") 1238 .desc("Track reads on a per bank basis"); 1239 1240 perBankWrReqs 1241 .init(banksPerRank * ranksPerChannel) 1242 .name(name() + ".perBankWrReqs") 1243 .desc("Track writes on a per bank basis"); 1244 1245 avgRdQLen 1246 .name(name() + ".avgRdQLen") 1247 .desc("Average read queue length over time") 1248 .precision(2); 1249 1250 avgWrQLen 1251 .name(name() + ".avgWrQLen") 1252 .desc("Average write queue length over time") 1253 .precision(2); 1254 1255 totQLat 1256 .name(name() + ".totQLat") 1257 .desc("Total cycles spent in queuing delays"); 1258 1259 totBankLat 1260 .name(name() + ".totBankLat") 1261 .desc("Total cycles spent in bank access"); 1262 1263 totBusLat 1264 .name(name() + ".totBusLat") 1265 .desc("Total cycles spent in databus access"); 1266 1267 totMemAccLat 1268 .name(name() + ".totMemAccLat") 1269 .desc("Sum of mem lat for all requests"); 1270 1271 avgQLat 1272 .name(name() + ".avgQLat") 1273 .desc("Average queueing delay per request") 1274 .precision(2); 1275 1276 avgQLat = totQLat / (readBursts - servicedByWrQ); 1277 1278 avgBankLat 1279 .name(name() + ".avgBankLat") 1280 .desc("Average bank access latency per request") 1281 .precision(2); 1282 1283 avgBankLat = totBankLat / (readBursts - servicedByWrQ); 1284 1285 avgBusLat 1286 .name(name() + ".avgBusLat") 1287 .desc("Average bus latency per request") 1288 .precision(2); 1289 1290 avgBusLat = totBusLat / (readBursts - servicedByWrQ); 1291 1292 avgMemAccLat 1293 .name(name() + ".avgMemAccLat") 1294 .desc("Average memory access latency") 1295 .precision(2); 1296 1297 avgMemAccLat = totMemAccLat / (readBursts - servicedByWrQ); 1298 1299 numRdRetry 1300 .name(name() + ".numRdRetry") 1301 .desc("Number of times rd buffer was full causing retry"); 1302 1303 numWrRetry 1304 .name(name() + ".numWrRetry") 1305 .desc("Number of times wr buffer was full causing retry"); 1306 1307 readRowHits 1308 .name(name() + ".readRowHits") 1309 .desc("Number of row buffer hits during reads"); 1310 1311 writeRowHits 1312 .name(name() + ".writeRowHits") 1313 .desc("Number of row buffer hits during writes"); 1314 1315 readRowHitRate 1316 .name(name() + ".readRowHitRate") 1317 .desc("Row buffer hit rate for reads") 1318 .precision(2); 1319 1320 readRowHitRate = (readRowHits / (readBursts - servicedByWrQ)) * 100; 1321 1322 writeRowHitRate 1323 .name(name() + ".writeRowHitRate") 1324 .desc("Row buffer hit rate for writes") 1325 .precision(2); 1326 1327 writeRowHitRate = (writeRowHits / writeBursts) * 100; 1328 1329 readPktSize 1330 .init(ceilLog2(burstSize) + 1) 1331 .name(name() + ".readPktSize") 1332 .desc("Categorize read packet sizes"); 1333 1334 writePktSize 1335 .init(ceilLog2(burstSize) + 1) 1336 .name(name() + ".writePktSize") 1337 .desc("Categorize write packet sizes"); 1338 1339 rdQLenPdf 1340 .init(readBufferSize) 1341 .name(name() + ".rdQLenPdf") 1342 .desc("What read queue length does an incoming req see"); 1343 1344 wrQLenPdf 1345 .init(writeBufferSize) 1346 .name(name() + ".wrQLenPdf") 1347 .desc("What write queue length does an incoming req see"); 1348 1349 bytesPerActivate 1350 .init(rowBufferSize) 1351 .name(name() + ".bytesPerActivate") 1352 .desc("Bytes accessed per row activation") 1353 .flags(nozero); 1354 1355 bytesRead 1356 .name(name() + ".bytesRead") 1357 .desc("Total number of bytes read from memory"); 1358 1359 bytesWritten 1360 .name(name() + ".bytesWritten") 1361 .desc("Total number of bytes written to memory"); 1362 1363 bytesConsumedRd 1364 .name(name() + ".bytesConsumedRd") 1365 .desc("bytesRead derated as per pkt->getSize()"); 1366 1367 bytesConsumedWr 1368 .name(name() + ".bytesConsumedWr") 1369 .desc("bytesWritten derated as per pkt->getSize()"); 1370 1371 avgRdBW 1372 .name(name() + ".avgRdBW") 1373 .desc("Average achieved read bandwidth in MB/s") 1374 .precision(2); 1375 1376 avgRdBW = (bytesRead / 1000000) / simSeconds; 1377 1378 avgWrBW 1379 .name(name() + ".avgWrBW") 1380 .desc("Average achieved write bandwidth in MB/s") 1381 .precision(2); 1382 1383 avgWrBW = (bytesWritten / 1000000) / simSeconds; 1384 1385 avgConsumedRdBW 1386 .name(name() + ".avgConsumedRdBW") 1387 .desc("Average consumed read bandwidth in MB/s") 1388 .precision(2); 1389 1390 avgConsumedRdBW = (bytesConsumedRd / 1000000) / simSeconds; 1391 1392 avgConsumedWrBW 1393 .name(name() + ".avgConsumedWrBW") 1394 .desc("Average consumed write bandwidth in MB/s") 1395 .precision(2); 1396 1397 avgConsumedWrBW = (bytesConsumedWr / 1000000) / simSeconds; 1398 1399 peakBW 1400 .name(name() + ".peakBW") 1401 .desc("Theoretical peak bandwidth in MB/s") 1402 .precision(2); 1403 1404 peakBW = (SimClock::Frequency / tBURST) * burstSize / 1000000; 1405 1406 busUtil 1407 .name(name() + ".busUtil") 1408 .desc("Data bus utilization in percentage") 1409 .precision(2); 1410 1411 busUtil = (avgRdBW + avgWrBW) / peakBW * 100; 1412 1413 totGap 1414 .name(name() + ".totGap") 1415 .desc("Total gap between requests"); 1416 1417 avgGap 1418 .name(name() + ".avgGap") 1419 .desc("Average gap between requests") 1420 .precision(2); 1421 1422 avgGap = totGap / (readReqs + writeReqs); 1423} 1424 1425void 1426SimpleDRAM::recvFunctional(PacketPtr pkt) 1427{ 1428 // rely on the abstract memory 1429 functionalAccess(pkt); 1430} 1431 1432BaseSlavePort& 1433SimpleDRAM::getSlavePort(const string &if_name, PortID idx) 1434{ 1435 if (if_name != "port") { 1436 return MemObject::getSlavePort(if_name, idx); 1437 } else { 1438 return port; 1439 } 1440} 1441 1442unsigned int 1443SimpleDRAM::drain(DrainManager *dm) 1444{ 1445 unsigned int count = port.drain(dm); 1446 1447 // if there is anything in any of our internal queues, keep track 1448 // of that as well 1449 if (!(writeQueue.empty() && readQueue.empty() && 1450 respQueue.empty())) { 1451 DPRINTF(Drain, "DRAM controller not drained, write: %d, read: %d," 1452 " resp: %d\n", writeQueue.size(), readQueue.size(), 1453 respQueue.size()); 1454 ++count; 1455 drainManager = dm; 1456 // the only part that is not drained automatically over time 1457 // is the write queue, thus trigger writes if there are any 1458 // waiting and no reads waiting, otherwise wait until the 1459 // reads are done 1460 if (readQueue.empty() && !writeQueue.empty() && 1461 !writeEvent.scheduled()) 1462 triggerWrites(); 1463 } 1464 1465 if (count) 1466 setDrainState(Drainable::Draining); 1467 else 1468 setDrainState(Drainable::Drained); 1469 return count; 1470} 1471 1472SimpleDRAM::MemoryPort::MemoryPort(const std::string& name, SimpleDRAM& _memory) 1473 : QueuedSlavePort(name, &_memory, queue), queue(_memory, *this), 1474 memory(_memory) 1475{ } 1476 1477AddrRangeList 1478SimpleDRAM::MemoryPort::getAddrRanges() const 1479{ 1480 AddrRangeList ranges; 1481 ranges.push_back(memory.getAddrRange()); 1482 return ranges; 1483} 1484 1485void 1486SimpleDRAM::MemoryPort::recvFunctional(PacketPtr pkt) 1487{ 1488 pkt->pushLabel(memory.name()); 1489 1490 if (!queue.checkFunctional(pkt)) { 1491 // Default implementation of SimpleTimingPort::recvFunctional() 1492 // calls recvAtomic() and throws away the latency; we can save a 1493 // little here by just not calculating the latency. 1494 memory.recvFunctional(pkt); 1495 } 1496 1497 pkt->popLabel(); 1498} 1499 1500Tick 1501SimpleDRAM::MemoryPort::recvAtomic(PacketPtr pkt) 1502{ 1503 return memory.recvAtomic(pkt); 1504} 1505 1506bool 1507SimpleDRAM::MemoryPort::recvTimingReq(PacketPtr pkt) 1508{ 1509 // pass it to the memory controller 1510 return memory.recvTimingReq(pkt); 1511} 1512 1513SimpleDRAM* 1514SimpleDRAMParams::create() 1515{ 1516 return new SimpleDRAM(this); 1517} 1518