dram_ctrl.cc revision 10147
19243SN/A/* 29832SN/A * Copyright (c) 2010-2013 ARM Limited 39243SN/A * All rights reserved 49243SN/A * 59243SN/A * The license below extends only to copyright in the software and shall 69243SN/A * not be construed as granting a license to any other intellectual 79243SN/A * property including but not limited to intellectual property relating 89243SN/A * to a hardware implementation of the functionality of the software 99243SN/A * licensed hereunder. You may use the software subject to the license 109243SN/A * terms below provided that you ensure that this notice is replicated 119243SN/A * unmodified and in its entirety in all distributions of the software, 129243SN/A * modified or unmodified, in source code or in binary form. 139243SN/A * 149831SN/A * Copyright (c) 2013 Amin Farmahini-Farahani 159831SN/A * All rights reserved. 169831SN/A * 179243SN/A * Redistribution and use in source and binary forms, with or without 189243SN/A * modification, are permitted provided that the following conditions are 199243SN/A * met: redistributions of source code must retain the above copyright 209243SN/A * notice, this list of conditions and the following disclaimer; 219243SN/A * redistributions in binary form must reproduce the above copyright 229243SN/A * notice, this list of conditions and the following disclaimer in the 239243SN/A * documentation and/or other materials provided with the distribution; 249243SN/A * neither the name of the copyright holders nor the names of its 259243SN/A * contributors may be used to endorse or promote products derived from 269243SN/A * this software without specific prior written permission. 279243SN/A * 289243SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 299243SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 309243SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 319243SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 329243SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 339243SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 349243SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 359243SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 369243SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 379243SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 389243SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 399243SN/A * 409243SN/A * Authors: Andreas Hansson 419243SN/A * Ani Udipi 429967SN/A * Neha Agarwal 439243SN/A */ 449243SN/A 4510146Sandreas.hansson@arm.com#include "base/bitfield.hh" 469356SN/A#include "base/trace.hh" 4710146Sandreas.hansson@arm.com#include "debug/DRAM.hh" 489352SN/A#include "debug/Drain.hh" 4910146Sandreas.hansson@arm.com#include "mem/dram_ctrl.hh" 509814SN/A#include "sim/system.hh" 519243SN/A 529243SN/Ausing namespace std; 539243SN/A 5410146Sandreas.hansson@arm.comDRAMCtrl::DRAMCtrl(const DRAMCtrlParams* p) : 559243SN/A AbstractMemory(p), 569243SN/A port(name() + ".port", *this), 579243SN/A retryRdReq(false), retryWrReq(false), 589969SN/A rowHitFlag(false), stopReads(false), 599243SN/A writeEvent(this), respondEvent(this), 609342SN/A refreshEvent(this), nextReqEvent(this), drainManager(NULL), 619831SN/A deviceBusWidth(p->device_bus_width), burstLength(p->burst_length), 629831SN/A deviceRowBufferSize(p->device_rowbuffer_size), 639831SN/A devicesPerRank(p->devices_per_rank), 649831SN/A burstSize((devicesPerRank * burstLength * deviceBusWidth) / 8), 659831SN/A rowBufferSize(devicesPerRank * deviceRowBufferSize), 6610140SN/A columnsPerRowBuffer(rowBufferSize / burstSize), 679243SN/A ranksPerChannel(p->ranks_per_channel), 689566SN/A banksPerRank(p->banks_per_rank), channels(p->channels), rowsPerBank(0), 699243SN/A readBufferSize(p->read_buffer_size), 709243SN/A writeBufferSize(p->write_buffer_size), 7110140SN/A writeHighThreshold(writeBufferSize * p->write_high_thresh_perc / 100.0), 7210140SN/A writeLowThreshold(writeBufferSize * p->write_low_thresh_perc / 100.0), 7310147Sandreas.hansson@arm.com minWritesPerSwitch(p->min_writes_per_switch), 7410147Sandreas.hansson@arm.com writesThisTime(0), readsThisTime(0), 759243SN/A tWTR(p->tWTR), tBURST(p->tBURST), 769963SN/A tRCD(p->tRCD), tCL(p->tCL), tRP(p->tRP), tRAS(p->tRAS), 779971SN/A tRFC(p->tRFC), tREFI(p->tREFI), tRRD(p->tRRD), 789488SN/A tXAW(p->tXAW), activationLimit(p->activation_limit), 799243SN/A memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping), 809243SN/A pageMgmt(p->page_policy), 8110141SN/A maxAccessesPerRow(p->max_accesses_per_row), 829726SN/A frontendLatency(p->static_frontend_latency), 839726SN/A backendLatency(p->static_backend_latency), 8410143SN/A busBusyUntil(0), prevArrival(0), 8510140SN/A newTime(0), startTickPrechargeAll(0), numBanksActive(0) 869243SN/A{ 879243SN/A // create the bank states based on the dimensions of the ranks and 889243SN/A // banks 899243SN/A banks.resize(ranksPerChannel); 909969SN/A actTicks.resize(ranksPerChannel); 919243SN/A for (size_t c = 0; c < ranksPerChannel; ++c) { 929243SN/A banks[c].resize(banksPerRank); 939969SN/A actTicks[c].resize(activationLimit, 0); 949243SN/A } 959243SN/A 9610140SN/A // perform a basic check of the write thresholds 9710140SN/A if (p->write_low_thresh_perc >= p->write_high_thresh_perc) 9810140SN/A fatal("Write buffer low threshold %d must be smaller than the " 9910140SN/A "high threshold %d\n", p->write_low_thresh_perc, 10010140SN/A p->write_high_thresh_perc); 1019243SN/A 1029243SN/A // determine the rows per bank by looking at the total capacity 1039567SN/A uint64_t capacity = ULL(1) << ceilLog2(AbstractMemory::size()); 1049243SN/A 1059243SN/A DPRINTF(DRAM, "Memory capacity %lld (%lld) bytes\n", capacity, 1069243SN/A AbstractMemory::size()); 1079831SN/A 1089831SN/A DPRINTF(DRAM, "Row buffer size %d bytes with %d columns per row buffer\n", 1099831SN/A rowBufferSize, columnsPerRowBuffer); 1109831SN/A 1119831SN/A rowsPerBank = capacity / (rowBufferSize * banksPerRank * ranksPerChannel); 1129243SN/A 1139566SN/A if (range.interleaved()) { 1149566SN/A if (channels != range.stripes()) 11510143SN/A fatal("%s has %d interleaved address stripes but %d channel(s)\n", 1169566SN/A name(), range.stripes(), channels); 1179566SN/A 11810136SN/A if (addrMapping == Enums::RoRaBaChCo) { 1199831SN/A if (rowBufferSize != range.granularity()) { 12010143SN/A fatal("Interleaving of %s doesn't match RoRaBaChCo " 12110136SN/A "address map\n", name()); 1229566SN/A } 12310136SN/A } else if (addrMapping == Enums::RoRaBaCoCh) { 12410136SN/A if (system()->cacheLineSize() != range.granularity()) { 12510143SN/A fatal("Interleaving of %s doesn't match RoRaBaCoCh " 12610136SN/A "address map\n", name()); 1279669SN/A } 12810136SN/A } else if (addrMapping == Enums::RoCoRaBaCh) { 12910136SN/A if (system()->cacheLineSize() != range.granularity()) 13010143SN/A fatal("Interleaving of %s doesn't match RoCoRaBaCh " 13110136SN/A "address map\n", name()); 1329566SN/A } 1339566SN/A } 1349243SN/A} 1359243SN/A 1369243SN/Avoid 13710146Sandreas.hansson@arm.comDRAMCtrl::init() 13810140SN/A{ 13910140SN/A if (!port.isConnected()) { 14010146Sandreas.hansson@arm.com fatal("DRAMCtrl %s is unconnected!\n", name()); 14110140SN/A } else { 14210140SN/A port.sendRangeChange(); 14310140SN/A } 14410140SN/A} 14510140SN/A 14610140SN/Avoid 14710146Sandreas.hansson@arm.comDRAMCtrl::startup() 1489243SN/A{ 14910143SN/A // update the start tick for the precharge accounting to the 15010143SN/A // current tick 15110143SN/A startTickPrechargeAll = curTick(); 15210143SN/A 1539243SN/A // print the configuration of the controller 1549243SN/A printParams(); 1559243SN/A 1569243SN/A // kick off the refresh 1579567SN/A schedule(refreshEvent, curTick() + tREFI); 1589243SN/A} 1599243SN/A 1609243SN/ATick 16110146Sandreas.hansson@arm.comDRAMCtrl::recvAtomic(PacketPtr pkt) 1629243SN/A{ 1639243SN/A DPRINTF(DRAM, "recvAtomic: %s 0x%x\n", pkt->cmdString(), pkt->getAddr()); 1649243SN/A 1659243SN/A // do the actual memory access and turn the packet into a response 1669243SN/A access(pkt); 1679243SN/A 1689243SN/A Tick latency = 0; 1699243SN/A if (!pkt->memInhibitAsserted() && pkt->hasData()) { 1709243SN/A // this value is not supposed to be accurate, just enough to 1719243SN/A // keep things going, mimic a closed page 1729243SN/A latency = tRP + tRCD + tCL; 1739243SN/A } 1749243SN/A return latency; 1759243SN/A} 1769243SN/A 1779243SN/Abool 17810146Sandreas.hansson@arm.comDRAMCtrl::readQueueFull(unsigned int neededEntries) const 1799243SN/A{ 1809831SN/A DPRINTF(DRAM, "Read queue limit %d, current size %d, entries needed %d\n", 1819831SN/A readBufferSize, readQueue.size() + respQueue.size(), 1829831SN/A neededEntries); 1839243SN/A 1849831SN/A return 1859831SN/A (readQueue.size() + respQueue.size() + neededEntries) > readBufferSize; 1869243SN/A} 1879243SN/A 1889243SN/Abool 18910146Sandreas.hansson@arm.comDRAMCtrl::writeQueueFull(unsigned int neededEntries) const 1909243SN/A{ 1919831SN/A DPRINTF(DRAM, "Write queue limit %d, current size %d, entries needed %d\n", 1929831SN/A writeBufferSize, writeQueue.size(), neededEntries); 1939831SN/A return (writeQueue.size() + neededEntries) > writeBufferSize; 1949243SN/A} 1959243SN/A 19610146Sandreas.hansson@arm.comDRAMCtrl::DRAMPacket* 19710146Sandreas.hansson@arm.comDRAMCtrl::decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned size, 19810143SN/A bool isRead) 1999243SN/A{ 2009669SN/A // decode the address based on the address mapping scheme, with 20110136SN/A // Ro, Ra, Co, Ba and Ch denoting row, rank, column, bank and 20210136SN/A // channel, respectively 2039243SN/A uint8_t rank; 2049967SN/A uint8_t bank; 2059243SN/A uint16_t row; 2069243SN/A 2079243SN/A // truncate the address to the access granularity 2089831SN/A Addr addr = dramPktAddr / burstSize; 2099243SN/A 2109491SN/A // we have removed the lowest order address bits that denote the 2119831SN/A // position within the column 21210136SN/A if (addrMapping == Enums::RoRaBaChCo) { 2139491SN/A // the lowest order bits denote the column to ensure that 2149491SN/A // sequential cache lines occupy the same row 2159831SN/A addr = addr / columnsPerRowBuffer; 2169243SN/A 2179669SN/A // take out the channel part of the address 2189566SN/A addr = addr / channels; 2199566SN/A 2209669SN/A // after the channel bits, get the bank bits to interleave 2219669SN/A // over the banks 2229669SN/A bank = addr % banksPerRank; 2239669SN/A addr = addr / banksPerRank; 2249669SN/A 2259669SN/A // after the bank, we get the rank bits which thus interleaves 2269669SN/A // over the ranks 2279669SN/A rank = addr % ranksPerChannel; 2289669SN/A addr = addr / ranksPerChannel; 2299669SN/A 2309669SN/A // lastly, get the row bits 2319669SN/A row = addr % rowsPerBank; 2329669SN/A addr = addr / rowsPerBank; 23310136SN/A } else if (addrMapping == Enums::RoRaBaCoCh) { 2349669SN/A // take out the channel part of the address 2359669SN/A addr = addr / channels; 2369669SN/A 2379669SN/A // next, the column 2389831SN/A addr = addr / columnsPerRowBuffer; 2399669SN/A 2409669SN/A // after the column bits, we get the bank bits to interleave 2419491SN/A // over the banks 2429243SN/A bank = addr % banksPerRank; 2439243SN/A addr = addr / banksPerRank; 2449243SN/A 2459491SN/A // after the bank, we get the rank bits which thus interleaves 2469491SN/A // over the ranks 2479243SN/A rank = addr % ranksPerChannel; 2489243SN/A addr = addr / ranksPerChannel; 2499243SN/A 2509491SN/A // lastly, get the row bits 2519243SN/A row = addr % rowsPerBank; 2529243SN/A addr = addr / rowsPerBank; 25310136SN/A } else if (addrMapping == Enums::RoCoRaBaCh) { 2549491SN/A // optimise for closed page mode and utilise maximum 2559491SN/A // parallelism of the DRAM (at the cost of power) 2569491SN/A 2579566SN/A // take out the channel part of the address, not that this has 2589566SN/A // to match with how accesses are interleaved between the 2599566SN/A // controllers in the address mapping 2609566SN/A addr = addr / channels; 2619566SN/A 2629491SN/A // start with the bank bits, as this provides the maximum 2639491SN/A // opportunity for parallelism between requests 2649243SN/A bank = addr % banksPerRank; 2659243SN/A addr = addr / banksPerRank; 2669243SN/A 2679491SN/A // next get the rank bits 2689243SN/A rank = addr % ranksPerChannel; 2699243SN/A addr = addr / ranksPerChannel; 2709243SN/A 2719491SN/A // next the column bits which we do not need to keep track of 2729491SN/A // and simply skip past 2739831SN/A addr = addr / columnsPerRowBuffer; 2749243SN/A 2759491SN/A // lastly, get the row bits 2769243SN/A row = addr % rowsPerBank; 2779243SN/A addr = addr / rowsPerBank; 2789243SN/A } else 2799243SN/A panic("Unknown address mapping policy chosen!"); 2809243SN/A 2819243SN/A assert(rank < ranksPerChannel); 2829243SN/A assert(bank < banksPerRank); 2839243SN/A assert(row < rowsPerBank); 2849243SN/A 2859243SN/A DPRINTF(DRAM, "Address: %lld Rank %d Bank %d Row %d\n", 2869831SN/A dramPktAddr, rank, bank, row); 2879243SN/A 2889243SN/A // create the corresponding DRAM packet with the entry time and 2899567SN/A // ready time set to the current tick, the latter will be updated 2909567SN/A // later 2919967SN/A uint16_t bank_id = banksPerRank * rank + bank; 2929967SN/A return new DRAMPacket(pkt, isRead, rank, bank, row, bank_id, dramPktAddr, 2939967SN/A size, banks[rank][bank]); 2949243SN/A} 2959243SN/A 2969243SN/Avoid 29710146Sandreas.hansson@arm.comDRAMCtrl::addToReadQueue(PacketPtr pkt, unsigned int pktCount) 2989243SN/A{ 2999243SN/A // only add to the read queue here. whenever the request is 3009243SN/A // eventually done, set the readyTime, and call schedule() 3019243SN/A assert(!pkt->isWrite()); 3029243SN/A 3039831SN/A assert(pktCount != 0); 3049831SN/A 3059831SN/A // if the request size is larger than burst size, the pkt is split into 3069831SN/A // multiple DRAM packets 3079831SN/A // Note if the pkt starting address is not aligened to burst size, the 3089831SN/A // address of first DRAM packet is kept unaliged. Subsequent DRAM packets 3099831SN/A // are aligned to burst size boundaries. This is to ensure we accurately 3109831SN/A // check read packets against packets in write queue. 3119243SN/A Addr addr = pkt->getAddr(); 3129831SN/A unsigned pktsServicedByWrQ = 0; 3139831SN/A BurstHelper* burst_helper = NULL; 3149831SN/A for (int cnt = 0; cnt < pktCount; ++cnt) { 3159831SN/A unsigned size = std::min((addr | (burstSize - 1)) + 1, 3169831SN/A pkt->getAddr() + pkt->getSize()) - addr; 3179831SN/A readPktSize[ceilLog2(size)]++; 3189831SN/A readBursts++; 3199243SN/A 3209831SN/A // First check write buffer to see if the data is already at 3219831SN/A // the controller 3229831SN/A bool foundInWrQ = false; 3239833SN/A for (auto i = writeQueue.begin(); i != writeQueue.end(); ++i) { 3249832SN/A // check if the read is subsumed in the write entry we are 3259832SN/A // looking at 3269832SN/A if ((*i)->addr <= addr && 3279832SN/A (addr + size) <= ((*i)->addr + (*i)->size)) { 3289831SN/A foundInWrQ = true; 3299831SN/A servicedByWrQ++; 3309831SN/A pktsServicedByWrQ++; 3319831SN/A DPRINTF(DRAM, "Read to addr %lld with size %d serviced by " 3329831SN/A "write queue\n", addr, size); 3339975SN/A bytesReadWrQ += burstSize; 3349831SN/A break; 3359831SN/A } 3369243SN/A } 3379831SN/A 3389831SN/A // If not found in the write q, make a DRAM packet and 3399831SN/A // push it onto the read queue 3409831SN/A if (!foundInWrQ) { 3419831SN/A 3429831SN/A // Make the burst helper for split packets 3439831SN/A if (pktCount > 1 && burst_helper == NULL) { 3449831SN/A DPRINTF(DRAM, "Read to addr %lld translates to %d " 3459831SN/A "dram requests\n", pkt->getAddr(), pktCount); 3469831SN/A burst_helper = new BurstHelper(pktCount); 3479831SN/A } 3489831SN/A 3499966SN/A DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, true); 3509831SN/A dram_pkt->burstHelper = burst_helper; 3519831SN/A 3529831SN/A assert(!readQueueFull(1)); 3539831SN/A rdQLenPdf[readQueue.size() + respQueue.size()]++; 3549831SN/A 3559831SN/A DPRINTF(DRAM, "Adding to read queue\n"); 3569831SN/A 3579831SN/A readQueue.push_back(dram_pkt); 3589831SN/A 3599831SN/A // Update stats 3609831SN/A avgRdQLen = readQueue.size() + respQueue.size(); 3619831SN/A } 3629831SN/A 3639831SN/A // Starting address of next dram pkt (aligend to burstSize boundary) 3649831SN/A addr = (addr | (burstSize - 1)) + 1; 3659243SN/A } 3669243SN/A 3679831SN/A // If all packets are serviced by write queue, we send the repsonse back 3689831SN/A if (pktsServicedByWrQ == pktCount) { 3699831SN/A accessAndRespond(pkt, frontendLatency); 3709831SN/A return; 3719831SN/A } 3729243SN/A 3739831SN/A // Update how many split packets are serviced by write queue 3749831SN/A if (burst_helper != NULL) 3759831SN/A burst_helper->burstsServiced = pktsServicedByWrQ; 3769243SN/A 3779567SN/A // If we are not already scheduled to get the read request out of 3789567SN/A // the queue, do so now 3799243SN/A if (!nextReqEvent.scheduled() && !stopReads) { 3809567SN/A DPRINTF(DRAM, "Request scheduled immediately\n"); 3819567SN/A schedule(nextReqEvent, curTick()); 3829243SN/A } 3839243SN/A} 3849243SN/A 3859243SN/Avoid 38610146Sandreas.hansson@arm.comDRAMCtrl::processWriteEvent() 3879243SN/A{ 3889567SN/A assert(!writeQueue.empty()); 3899243SN/A 3909972SN/A DPRINTF(DRAM, "Beginning DRAM Write\n"); 3919243SN/A Tick temp1 M5_VAR_USED = std::max(curTick(), busBusyUntil); 3929243SN/A Tick temp2 M5_VAR_USED = std::max(curTick(), maxBankFreeAt()); 3939243SN/A 3949972SN/A chooseNextWrite(); 3959972SN/A DRAMPacket* dram_pkt = writeQueue.front(); 3969972SN/A // sanity check 3979972SN/A assert(dram_pkt->size <= burstSize); 3989972SN/A doDRAMAccess(dram_pkt); 3999243SN/A 4009972SN/A writeQueue.pop_front(); 4019972SN/A delete dram_pkt; 4029243SN/A 40310140SN/A DPRINTF(DRAM, "Writing, bus busy for %lld ticks, banks busy " 40410140SN/A "for %lld ticks\n", busBusyUntil - temp1, maxBankFreeAt() - temp2); 4059243SN/A 40610147Sandreas.hansson@arm.com // If we emptied the write queue, or got sufficiently below the 40710147Sandreas.hansson@arm.com // threshold (using the minWritesPerSwitch as the hysteresis) and 40810140SN/A // are not draining, or we have reads waiting and have done enough 40910140SN/A // writes, then switch to reads. The retry above could already 41010140SN/A // have caused it to be scheduled, so first check 41110140SN/A if (writeQueue.empty() || 41210147Sandreas.hansson@arm.com (writeQueue.size() + minWritesPerSwitch < writeLowThreshold && 41310147Sandreas.hansson@arm.com !drainManager) || 41410140SN/A (!readQueue.empty() && writesThisTime >= minWritesPerSwitch)) { 4159972SN/A // turn the bus back around for reads again 4169972SN/A busBusyUntil += tWTR; 4179972SN/A stopReads = false; 41810147Sandreas.hansson@arm.com 41910147Sandreas.hansson@arm.com DPRINTF(DRAM, "Switching to reads after %d writes with %d writes " 42010147Sandreas.hansson@arm.com "waiting\n", writesThisTime, writeQueue.size()); 42110147Sandreas.hansson@arm.com 42210147Sandreas.hansson@arm.com wrPerTurnAround.sample(writesThisTime); 42310140SN/A writesThisTime = 0; 4249972SN/A 4259972SN/A if (!nextReqEvent.scheduled()) 4269972SN/A schedule(nextReqEvent, busBusyUntil); 4279972SN/A } else { 4289972SN/A assert(!writeEvent.scheduled()); 4299972SN/A DPRINTF(DRAM, "Next write scheduled at %lld\n", newTime); 4309972SN/A schedule(writeEvent, newTime); 4319972SN/A } 4329243SN/A 4339243SN/A if (retryWrReq) { 4349243SN/A retryWrReq = false; 4359243SN/A port.sendRetry(); 4369243SN/A } 4379243SN/A 4389243SN/A // if there is nothing left in any queue, signal a drain 4399567SN/A if (writeQueue.empty() && readQueue.empty() && 4409567SN/A respQueue.empty () && drainManager) { 4419342SN/A drainManager->signalDrainDone(); 4429342SN/A drainManager = NULL; 4439243SN/A } 4449243SN/A} 4459243SN/A 4469966SN/A 4479243SN/Avoid 44810146Sandreas.hansson@arm.comDRAMCtrl::triggerWrites() 4499243SN/A{ 45010147Sandreas.hansson@arm.com DPRINTF(DRAM, "Switching to writes after %d reads with %d reads " 45110147Sandreas.hansson@arm.com "waiting\n", readsThisTime, readQueue.size()); 45210147Sandreas.hansson@arm.com 4539243SN/A // Flag variable to stop any more read scheduling 4549243SN/A stopReads = true; 4559243SN/A 45610143SN/A Tick write_start_time = std::max(busBusyUntil, curTick()) + tWTR; 4579243SN/A 45810143SN/A DPRINTF(DRAM, "Writes scheduled at %lld\n", write_start_time); 4599243SN/A 46010147Sandreas.hansson@arm.com // there is some danger here as there might still be reads 46110147Sandreas.hansson@arm.com // happening before the switch actually takes place 46210147Sandreas.hansson@arm.com rdPerTurnAround.sample(readsThisTime); 46310147Sandreas.hansson@arm.com readsThisTime = 0; 46410147Sandreas.hansson@arm.com 46510143SN/A assert(write_start_time >= curTick()); 4669243SN/A assert(!writeEvent.scheduled()); 46710143SN/A schedule(writeEvent, write_start_time); 4689243SN/A} 4699243SN/A 4709243SN/Avoid 47110146Sandreas.hansson@arm.comDRAMCtrl::addToWriteQueue(PacketPtr pkt, unsigned int pktCount) 4729243SN/A{ 4739243SN/A // only add to the write queue here. whenever the request is 4749243SN/A // eventually done, set the readyTime, and call schedule() 4759243SN/A assert(pkt->isWrite()); 4769243SN/A 4779831SN/A // if the request size is larger than burst size, the pkt is split into 4789831SN/A // multiple DRAM packets 4799831SN/A Addr addr = pkt->getAddr(); 4809831SN/A for (int cnt = 0; cnt < pktCount; ++cnt) { 4819831SN/A unsigned size = std::min((addr | (burstSize - 1)) + 1, 4829831SN/A pkt->getAddr() + pkt->getSize()) - addr; 4839831SN/A writePktSize[ceilLog2(size)]++; 4849831SN/A writeBursts++; 4859243SN/A 4869832SN/A // see if we can merge with an existing item in the write 4879838SN/A // queue and keep track of whether we have merged or not so we 4889838SN/A // can stop at that point and also avoid enqueueing a new 4899838SN/A // request 4909832SN/A bool merged = false; 4919832SN/A auto w = writeQueue.begin(); 4929243SN/A 4939832SN/A while(!merged && w != writeQueue.end()) { 4949832SN/A // either of the two could be first, if they are the same 4959832SN/A // it does not matter which way we go 4969832SN/A if ((*w)->addr >= addr) { 4979838SN/A // the existing one starts after the new one, figure 4989838SN/A // out where the new one ends with respect to the 4999838SN/A // existing one 5009832SN/A if ((addr + size) >= ((*w)->addr + (*w)->size)) { 5019832SN/A // check if the existing one is completely 5029832SN/A // subsumed in the new one 5039832SN/A DPRINTF(DRAM, "Merging write covering existing burst\n"); 5049832SN/A merged = true; 5059832SN/A // update both the address and the size 5069832SN/A (*w)->addr = addr; 5079832SN/A (*w)->size = size; 5089832SN/A } else if ((addr + size) >= (*w)->addr && 5099832SN/A ((*w)->addr + (*w)->size - addr) <= burstSize) { 5109832SN/A // the new one is just before or partially 5119832SN/A // overlapping with the existing one, and together 5129832SN/A // they fit within a burst 5139832SN/A DPRINTF(DRAM, "Merging write before existing burst\n"); 5149832SN/A merged = true; 5159832SN/A // the existing queue item needs to be adjusted with 5169832SN/A // respect to both address and size 51710047SN/A (*w)->size = (*w)->addr + (*w)->size - addr; 5189832SN/A (*w)->addr = addr; 5199832SN/A } 5209832SN/A } else { 5219838SN/A // the new one starts after the current one, figure 5229838SN/A // out where the existing one ends with respect to the 5239838SN/A // new one 5249832SN/A if (((*w)->addr + (*w)->size) >= (addr + size)) { 5259832SN/A // check if the new one is completely subsumed in the 5269832SN/A // existing one 5279832SN/A DPRINTF(DRAM, "Merging write into existing burst\n"); 5289832SN/A merged = true; 5299832SN/A // no adjustments necessary 5309832SN/A } else if (((*w)->addr + (*w)->size) >= addr && 5319832SN/A (addr + size - (*w)->addr) <= burstSize) { 5329832SN/A // the existing one is just before or partially 5339832SN/A // overlapping with the new one, and together 5349832SN/A // they fit within a burst 5359832SN/A DPRINTF(DRAM, "Merging write after existing burst\n"); 5369832SN/A merged = true; 5379832SN/A // the address is right, and only the size has 5389832SN/A // to be adjusted 5399832SN/A (*w)->size = addr + size - (*w)->addr; 5409832SN/A } 5419832SN/A } 5429832SN/A ++w; 5439832SN/A } 5449243SN/A 5459832SN/A // if the item was not merged we need to create a new write 5469832SN/A // and enqueue it 5479832SN/A if (!merged) { 5489966SN/A DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, false); 5499243SN/A 5509832SN/A assert(writeQueue.size() < writeBufferSize); 5519832SN/A wrQLenPdf[writeQueue.size()]++; 5529243SN/A 5539832SN/A DPRINTF(DRAM, "Adding to write queue\n"); 5549831SN/A 5559832SN/A writeQueue.push_back(dram_pkt); 5569831SN/A 5579832SN/A // Update stats 5589832SN/A avgWrQLen = writeQueue.size(); 5599977SN/A } else { 5609977SN/A // keep track of the fact that this burst effectively 5619977SN/A // disappeared as it was merged with an existing one 5629977SN/A mergedWrBursts++; 5639832SN/A } 5649832SN/A 5659831SN/A // Starting address of next dram pkt (aligend to burstSize boundary) 5669831SN/A addr = (addr | (burstSize - 1)) + 1; 5679831SN/A } 5689243SN/A 5699243SN/A // we do not wait for the writes to be send to the actual memory, 5709243SN/A // but instead take responsibility for the consistency here and 5719243SN/A // snoop the write queue for any upcoming reads 5729831SN/A // @todo, if a pkt size is larger than burst size, we might need a 5739831SN/A // different front end latency 5749726SN/A accessAndRespond(pkt, frontendLatency); 5759243SN/A 5769243SN/A // If your write buffer is starting to fill up, drain it! 5779972SN/A if (writeQueue.size() >= writeHighThreshold && !stopReads){ 5789243SN/A triggerWrites(); 5799243SN/A } 5809243SN/A} 5819243SN/A 5829243SN/Avoid 58310146Sandreas.hansson@arm.comDRAMCtrl::printParams() const 5849243SN/A{ 5859243SN/A // Sanity check print of important parameters 5869243SN/A DPRINTF(DRAM, 5879243SN/A "Memory controller %s physical organization\n" \ 5889831SN/A "Number of devices per rank %d\n" \ 5899831SN/A "Device bus width (in bits) %d\n" \ 59010143SN/A "DRAM data bus burst (bytes) %d\n" \ 59110143SN/A "Row buffer size (bytes) %d\n" \ 5929831SN/A "Columns per row buffer %d\n" \ 5939831SN/A "Rows per bank %d\n" \ 5949831SN/A "Banks per rank %d\n" \ 5959831SN/A "Ranks per channel %d\n" \ 59610143SN/A "Total mem capacity (bytes) %u\n", 5979831SN/A name(), devicesPerRank, deviceBusWidth, burstSize, rowBufferSize, 5989831SN/A columnsPerRowBuffer, rowsPerBank, banksPerRank, ranksPerChannel, 5999831SN/A rowBufferSize * rowsPerBank * banksPerRank * ranksPerChannel); 6009243SN/A 6019243SN/A string scheduler = memSchedPolicy == Enums::fcfs ? "FCFS" : "FR-FCFS"; 60210136SN/A string address_mapping = addrMapping == Enums::RoRaBaChCo ? "RoRaBaChCo" : 60310136SN/A (addrMapping == Enums::RoRaBaCoCh ? "RoRaBaCoCh" : "RoCoRaBaCh"); 6049973SN/A string page_policy = pageMgmt == Enums::open ? "OPEN" : 60510144SN/A (pageMgmt == Enums::open_adaptive ? "OPEN (adaptive)" : 60610144SN/A (pageMgmt == Enums::close_adaptive ? "CLOSE (adaptive)" : "CLOSE")); 6079243SN/A 6089243SN/A DPRINTF(DRAM, 6099243SN/A "Memory controller %s characteristics\n" \ 6109243SN/A "Read buffer size %d\n" \ 6119243SN/A "Write buffer size %d\n" \ 61210140SN/A "Write high thresh %d\n" \ 61310140SN/A "Write low thresh %d\n" \ 6149243SN/A "Scheduler %s\n" \ 6159243SN/A "Address mapping %s\n" \ 6169243SN/A "Page policy %s\n", 6179972SN/A name(), readBufferSize, writeBufferSize, writeHighThreshold, 61810140SN/A writeLowThreshold, scheduler, address_mapping, page_policy); 6199243SN/A 6209243SN/A DPRINTF(DRAM, "Memory controller %s timing specs\n" \ 6219567SN/A "tRCD %d ticks\n" \ 6229567SN/A "tCL %d ticks\n" \ 6239567SN/A "tRP %d ticks\n" \ 6249567SN/A "tBURST %d ticks\n" \ 6259567SN/A "tRFC %d ticks\n" \ 6269567SN/A "tREFI %d ticks\n" \ 6279567SN/A "tWTR %d ticks\n" \ 6289567SN/A "tXAW (%d) %d ticks\n", 6299567SN/A name(), tRCD, tCL, tRP, tBURST, tRFC, tREFI, tWTR, 6309567SN/A activationLimit, tXAW); 6319243SN/A} 6329243SN/A 6339243SN/Avoid 63410146Sandreas.hansson@arm.comDRAMCtrl::printQs() const { 6359243SN/A DPRINTF(DRAM, "===READ QUEUE===\n\n"); 6369833SN/A for (auto i = readQueue.begin() ; i != readQueue.end() ; ++i) { 6379243SN/A DPRINTF(DRAM, "Read %lu\n", (*i)->addr); 6389243SN/A } 6399243SN/A DPRINTF(DRAM, "\n===RESP QUEUE===\n\n"); 6409833SN/A for (auto i = respQueue.begin() ; i != respQueue.end() ; ++i) { 6419243SN/A DPRINTF(DRAM, "Response %lu\n", (*i)->addr); 6429243SN/A } 6439243SN/A DPRINTF(DRAM, "\n===WRITE QUEUE===\n\n"); 6449833SN/A for (auto i = writeQueue.begin() ; i != writeQueue.end() ; ++i) { 6459243SN/A DPRINTF(DRAM, "Write %lu\n", (*i)->addr); 6469243SN/A } 6479243SN/A} 6489243SN/A 6499243SN/Abool 65010146Sandreas.hansson@arm.comDRAMCtrl::recvTimingReq(PacketPtr pkt) 6519243SN/A{ 6529349SN/A /// @todo temporary hack to deal with memory corruption issues until 6539349SN/A /// 4-phase transactions are complete 6549349SN/A for (int x = 0; x < pendingDelete.size(); x++) 6559349SN/A delete pendingDelete[x]; 6569349SN/A pendingDelete.clear(); 6579349SN/A 6589243SN/A // This is where we enter from the outside world 6599567SN/A DPRINTF(DRAM, "recvTimingReq: request %s addr %lld size %d\n", 6609831SN/A pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 6619243SN/A 6629567SN/A // simply drop inhibited packets for now 6639567SN/A if (pkt->memInhibitAsserted()) { 66410143SN/A DPRINTF(DRAM, "Inhibited packet -- Dropping it now\n"); 6659567SN/A pendingDelete.push_back(pkt); 6669567SN/A return true; 6679567SN/A } 6689243SN/A 6699243SN/A // Calc avg gap between requests 6709243SN/A if (prevArrival != 0) { 6719243SN/A totGap += curTick() - prevArrival; 6729243SN/A } 6739243SN/A prevArrival = curTick(); 6749243SN/A 6759831SN/A 6769831SN/A // Find out how many dram packets a pkt translates to 6779831SN/A // If the burst size is equal or larger than the pkt size, then a pkt 6789831SN/A // translates to only one dram packet. Otherwise, a pkt translates to 6799831SN/A // multiple dram packets 6809243SN/A unsigned size = pkt->getSize(); 6819831SN/A unsigned offset = pkt->getAddr() & (burstSize - 1); 6829831SN/A unsigned int dram_pkt_count = divCeil(offset + size, burstSize); 6839243SN/A 6849243SN/A // check local buffers and do not accept if full 6859243SN/A if (pkt->isRead()) { 6869567SN/A assert(size != 0); 6879831SN/A if (readQueueFull(dram_pkt_count)) { 6889567SN/A DPRINTF(DRAM, "Read queue full, not accepting\n"); 6899243SN/A // remember that we have to retry this port 6909243SN/A retryRdReq = true; 6919243SN/A numRdRetry++; 6929243SN/A return false; 6939243SN/A } else { 6949831SN/A addToReadQueue(pkt, dram_pkt_count); 6959243SN/A readReqs++; 6969977SN/A bytesReadSys += size; 6979243SN/A } 6989243SN/A } else if (pkt->isWrite()) { 6999567SN/A assert(size != 0); 7009831SN/A if (writeQueueFull(dram_pkt_count)) { 7019567SN/A DPRINTF(DRAM, "Write queue full, not accepting\n"); 7029243SN/A // remember that we have to retry this port 7039243SN/A retryWrReq = true; 7049243SN/A numWrRetry++; 7059243SN/A return false; 7069243SN/A } else { 7079831SN/A addToWriteQueue(pkt, dram_pkt_count); 7089243SN/A writeReqs++; 7099977SN/A bytesWrittenSys += size; 7109243SN/A } 7119243SN/A } else { 7129243SN/A DPRINTF(DRAM,"Neither read nor write, ignore timing\n"); 7139243SN/A neitherReadNorWrite++; 7149726SN/A accessAndRespond(pkt, 1); 7159243SN/A } 7169243SN/A 7179243SN/A return true; 7189243SN/A} 7199243SN/A 7209243SN/Avoid 72110146Sandreas.hansson@arm.comDRAMCtrl::processRespondEvent() 7229243SN/A{ 7239243SN/A DPRINTF(DRAM, 7249243SN/A "processRespondEvent(): Some req has reached its readyTime\n"); 7259243SN/A 7269831SN/A DRAMPacket* dram_pkt = respQueue.front(); 7279243SN/A 7289831SN/A if (dram_pkt->burstHelper) { 7299831SN/A // it is a split packet 7309831SN/A dram_pkt->burstHelper->burstsServiced++; 7319831SN/A if (dram_pkt->burstHelper->burstsServiced == 73210143SN/A dram_pkt->burstHelper->burstCount) { 7339831SN/A // we have now serviced all children packets of a system packet 7349831SN/A // so we can now respond to the requester 7359831SN/A // @todo we probably want to have a different front end and back 7369831SN/A // end latency for split packets 7379831SN/A accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency); 7389831SN/A delete dram_pkt->burstHelper; 7399831SN/A dram_pkt->burstHelper = NULL; 7409831SN/A } 7419831SN/A } else { 7429831SN/A // it is not a split packet 7439831SN/A accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency); 7449831SN/A } 7459243SN/A 7469831SN/A delete respQueue.front(); 7479831SN/A respQueue.pop_front(); 7489243SN/A 7499831SN/A if (!respQueue.empty()) { 7509831SN/A assert(respQueue.front()->readyTime >= curTick()); 7519831SN/A assert(!respondEvent.scheduled()); 7529831SN/A schedule(respondEvent, respQueue.front()->readyTime); 7539831SN/A } else { 7549831SN/A // if there is nothing left in any queue, signal a drain 7559831SN/A if (writeQueue.empty() && readQueue.empty() && 7569831SN/A drainManager) { 7579831SN/A drainManager->signalDrainDone(); 7589831SN/A drainManager = NULL; 7599831SN/A } 7609831SN/A } 7619567SN/A 7629831SN/A // We have made a location in the queue available at this point, 7639831SN/A // so if there is a read that was forced to wait, retry now 7649831SN/A if (retryRdReq) { 7659831SN/A retryRdReq = false; 7669831SN/A port.sendRetry(); 7679831SN/A } 7689243SN/A} 7699243SN/A 7709243SN/Avoid 77110146Sandreas.hansson@arm.comDRAMCtrl::chooseNextWrite() 7729243SN/A{ 7739567SN/A // This method does the arbitration between write requests. The 7749567SN/A // chosen packet is simply moved to the head of the write 7759567SN/A // queue. The other methods know that this is the place to 7769567SN/A // look. For example, with FCFS, this method does nothing 7779567SN/A assert(!writeQueue.empty()); 7789243SN/A 7799567SN/A if (writeQueue.size() == 1) { 7809966SN/A DPRINTF(DRAM, "Single write request, nothing to do\n"); 7819243SN/A return; 7829243SN/A } 7839243SN/A 7849243SN/A if (memSchedPolicy == Enums::fcfs) { 7859243SN/A // Do nothing, since the correct request is already head 7869243SN/A } else if (memSchedPolicy == Enums::frfcfs) { 7879974SN/A reorderQueue(writeQueue); 7889243SN/A } else 7899243SN/A panic("No scheduling policy chosen\n"); 7909243SN/A 7919966SN/A DPRINTF(DRAM, "Selected next write request\n"); 7929243SN/A} 7939243SN/A 7949243SN/Abool 79510146Sandreas.hansson@arm.comDRAMCtrl::chooseNextRead() 7969243SN/A{ 7979567SN/A // This method does the arbitration between read requests. The 7989567SN/A // chosen packet is simply moved to the head of the queue. The 7999567SN/A // other methods know that this is the place to look. For example, 8009567SN/A // with FCFS, this method does nothing 8019567SN/A if (readQueue.empty()) { 8029567SN/A DPRINTF(DRAM, "No read request to select\n"); 8039243SN/A return false; 8049243SN/A } 8059243SN/A 8069567SN/A // If there is only one request then there is nothing left to do 8079567SN/A if (readQueue.size() == 1) 8089243SN/A return true; 8099243SN/A 8109243SN/A if (memSchedPolicy == Enums::fcfs) { 8119567SN/A // Do nothing, since the request to serve is already the first 8129567SN/A // one in the read queue 8139243SN/A } else if (memSchedPolicy == Enums::frfcfs) { 8149974SN/A reorderQueue(readQueue); 8159243SN/A } else 8169243SN/A panic("No scheduling policy chosen!\n"); 8179243SN/A 8189567SN/A DPRINTF(DRAM, "Selected next read request\n"); 8199243SN/A return true; 8209243SN/A} 8219243SN/A 8229243SN/Avoid 82310146Sandreas.hansson@arm.comDRAMCtrl::reorderQueue(std::deque<DRAMPacket*>& queue) 8249974SN/A{ 8259974SN/A // Only determine this when needed 8269974SN/A uint64_t earliest_banks = 0; 8279974SN/A 8289974SN/A // Search for row hits first, if no row hit is found then schedule the 8299974SN/A // packet to one of the earliest banks available 8309974SN/A bool found_earliest_pkt = false; 8319974SN/A auto selected_pkt_it = queue.begin(); 8329974SN/A 8339974SN/A for (auto i = queue.begin(); i != queue.end() ; ++i) { 8349974SN/A DRAMPacket* dram_pkt = *i; 8359974SN/A const Bank& bank = dram_pkt->bankRef; 8369974SN/A // Check if it is a row hit 8379974SN/A if (bank.openRow == dram_pkt->row) { 8389974SN/A DPRINTF(DRAM, "Row buffer hit\n"); 8399974SN/A selected_pkt_it = i; 8409974SN/A break; 8419974SN/A } else if (!found_earliest_pkt) { 8429974SN/A // No row hit, go for first ready 8439974SN/A if (earliest_banks == 0) 8449974SN/A earliest_banks = minBankFreeAt(queue); 8459974SN/A 8469974SN/A // Bank is ready or is the first available bank 8479974SN/A if (bank.freeAt <= curTick() || 8489974SN/A bits(earliest_banks, dram_pkt->bankId, dram_pkt->bankId)) { 8499974SN/A // Remember the packet to be scheduled to one of the earliest 8509974SN/A // banks available 8519974SN/A selected_pkt_it = i; 8529974SN/A found_earliest_pkt = true; 8539974SN/A } 8549974SN/A } 8559974SN/A } 8569974SN/A 8579974SN/A DRAMPacket* selected_pkt = *selected_pkt_it; 8589974SN/A queue.erase(selected_pkt_it); 8599974SN/A queue.push_front(selected_pkt); 8609974SN/A} 8619974SN/A 8629974SN/Avoid 86310146Sandreas.hansson@arm.comDRAMCtrl::accessAndRespond(PacketPtr pkt, Tick static_latency) 8649243SN/A{ 8659243SN/A DPRINTF(DRAM, "Responding to Address %lld.. ",pkt->getAddr()); 8669243SN/A 8679243SN/A bool needsResponse = pkt->needsResponse(); 8689243SN/A // do the actual memory access which also turns the packet into a 8699243SN/A // response 8709243SN/A access(pkt); 8719243SN/A 8729243SN/A // turn packet around to go back to requester if response expected 8739243SN/A if (needsResponse) { 8749243SN/A // access already turned the packet into a response 8759243SN/A assert(pkt->isResponse()); 8769243SN/A 8779549SN/A // @todo someone should pay for this 8789549SN/A pkt->busFirstWordDelay = pkt->busLastWordDelay = 0; 8799549SN/A 8809726SN/A // queue the packet in the response queue to be sent out after 8819726SN/A // the static latency has passed 8829726SN/A port.schedTimingResp(pkt, curTick() + static_latency); 8839243SN/A } else { 8849587SN/A // @todo the packet is going to be deleted, and the DRAMPacket 8859587SN/A // is still having a pointer to it 8869587SN/A pendingDelete.push_back(pkt); 8879243SN/A } 8889243SN/A 8899243SN/A DPRINTF(DRAM, "Done\n"); 8909243SN/A 8919243SN/A return; 8929243SN/A} 8939243SN/A 8949243SN/Apair<Tick, Tick> 89510146Sandreas.hansson@arm.comDRAMCtrl::estimateLatency(DRAMPacket* dram_pkt, Tick inTime) 8969243SN/A{ 8979243SN/A // If a request reaches a bank at tick 'inTime', how much time 8989243SN/A // *after* that does it take to finish the request, depending 8999243SN/A // on bank status and page open policy. Note that this method 9009243SN/A // considers only the time taken for the actual read or write 9019243SN/A // to complete, NOT any additional time thereafter for tRAS or 9029243SN/A // tRP. 9039243SN/A Tick accLat = 0; 9049243SN/A Tick bankLat = 0; 9059243SN/A rowHitFlag = false; 9069969SN/A Tick potentialActTick; 9079243SN/A 9089967SN/A const Bank& bank = dram_pkt->bankRef; 90910144SN/A // open-page policy or close_adaptive policy 91010144SN/A if (pageMgmt == Enums::open || pageMgmt == Enums::open_adaptive || 91110144SN/A pageMgmt == Enums::close_adaptive) { 9129243SN/A if (bank.openRow == dram_pkt->row) { 9139243SN/A // When we have a row-buffer hit, 9149243SN/A // we don't care about tRAS having expired or not, 9159243SN/A // but do care about bank being free for access 9169243SN/A rowHitFlag = true; 9179243SN/A 9189965SN/A // When a series of requests arrive to the same row, 9199965SN/A // DDR systems are capable of streaming data continuously 9209965SN/A // at maximum bandwidth (subject to tCCD). Here, we approximate 9219965SN/A // this condition, and assume that if whenever a bank is already 9229965SN/A // busy and a new request comes in, it can be completed with no 9239965SN/A // penalty beyond waiting for the existing read to complete. 9249965SN/A if (bank.freeAt > inTime) { 9259965SN/A accLat += bank.freeAt - inTime; 9269966SN/A bankLat += 0; 9279965SN/A } else { 9289243SN/A // CAS latency only 9299243SN/A accLat += tCL; 9309243SN/A bankLat += tCL; 9319243SN/A } 9329243SN/A 9339243SN/A } else { 9349243SN/A // Row-buffer miss, need to close existing row 9359243SN/A // once tRAS has expired, then open the new one, 9369243SN/A // then add cas latency. 9379243SN/A Tick freeTime = std::max(bank.tRASDoneAt, bank.freeAt); 9389243SN/A 9399243SN/A if (freeTime > inTime) 9409243SN/A accLat += freeTime - inTime; 9419243SN/A 9429973SN/A // If the there is no open row (open adaptive), then there 9439973SN/A // is no precharge delay, otherwise go with tRP 9449973SN/A Tick precharge_delay = bank.openRow == -1 ? 0 : tRP; 9459973SN/A 9469969SN/A //The bank is free, and you may be able to activate 9479973SN/A potentialActTick = inTime + accLat + precharge_delay; 9489969SN/A if (potentialActTick < bank.actAllowedAt) 9499969SN/A accLat += bank.actAllowedAt - potentialActTick; 9509969SN/A 9519973SN/A accLat += precharge_delay + tRCD + tCL; 9529973SN/A bankLat += precharge_delay + tRCD + tCL; 9539243SN/A } 9549243SN/A } else if (pageMgmt == Enums::close) { 9559243SN/A // With a close page policy, no notion of 9569243SN/A // bank.tRASDoneAt 9579243SN/A if (bank.freeAt > inTime) 9589243SN/A accLat += bank.freeAt - inTime; 9599243SN/A 9609969SN/A //The bank is free, and you may be able to activate 9619969SN/A potentialActTick = inTime + accLat; 9629969SN/A if (potentialActTick < bank.actAllowedAt) 9639969SN/A accLat += bank.actAllowedAt - potentialActTick; 9649969SN/A 9659243SN/A // page already closed, simply open the row, and 9669243SN/A // add cas latency 9679243SN/A accLat += tRCD + tCL; 9689243SN/A bankLat += tRCD + tCL; 9699243SN/A } else 9709243SN/A panic("No page management policy chosen\n"); 9719243SN/A 9729487SN/A DPRINTF(DRAM, "Returning < %lld, %lld > from estimateLatency()\n", 9739487SN/A bankLat, accLat); 9749243SN/A 9759243SN/A return make_pair(bankLat, accLat); 9769243SN/A} 9779243SN/A 9789243SN/Avoid 97910146Sandreas.hansson@arm.comDRAMCtrl::processNextReqEvent() 9809243SN/A{ 9819243SN/A scheduleNextReq(); 9829243SN/A} 9839243SN/A 9849243SN/Avoid 98510146Sandreas.hansson@arm.comDRAMCtrl::recordActivate(Tick act_tick, uint8_t rank, uint8_t bank) 9869488SN/A{ 9879969SN/A assert(0 <= rank && rank < ranksPerChannel); 9889969SN/A assert(actTicks[rank].size() == activationLimit); 9899488SN/A 9909488SN/A DPRINTF(DRAM, "Activate at tick %d\n", act_tick); 9919488SN/A 9929975SN/A // Tracking accesses after all banks are precharged. 9939975SN/A // startTickPrechargeAll: is the tick when all the banks were again 9949975SN/A // precharged. The difference between act_tick and startTickPrechargeAll 9959975SN/A // gives the time for which DRAM doesn't get any accesses after refreshing 9969975SN/A // or after a page is closed in closed-page or open-adaptive-page policy. 9979975SN/A if ((numBanksActive == 0) && (act_tick > startTickPrechargeAll)) { 9989975SN/A prechargeAllTime += act_tick - startTickPrechargeAll; 9999975SN/A } 10009975SN/A 10019975SN/A // No need to update number of active banks for closed-page policy as only 1 10029975SN/A // bank will be activated at any given point, which will be instatntly 10039975SN/A // precharged 100410144SN/A if (pageMgmt == Enums::open || pageMgmt == Enums::open_adaptive || 100510144SN/A pageMgmt == Enums::close_adaptive) 10069975SN/A ++numBanksActive; 10079975SN/A 10089971SN/A // start by enforcing tRRD 10099971SN/A for(int i = 0; i < banksPerRank; i++) { 10109971SN/A // next activate must not happen before tRRD 10119971SN/A banks[rank][i].actAllowedAt = act_tick + tRRD; 10129971SN/A } 10139971SN/A // tRC should be added to activation tick of the bank currently accessed, 10149971SN/A // where tRC = tRAS + tRP, this is just for a check as actAllowedAt for same 10159971SN/A // bank is already captured by bank.freeAt and bank.tRASDoneAt 10169971SN/A banks[rank][bank].actAllowedAt = act_tick + tRAS + tRP; 10179971SN/A 10189971SN/A // next, we deal with tXAW, if the activation limit is disabled 10199971SN/A // then we are done 10209969SN/A if (actTicks[rank].empty()) 10219824SN/A return; 10229824SN/A 10239488SN/A // sanity check 10249969SN/A if (actTicks[rank].back() && (act_tick - actTicks[rank].back()) < tXAW) { 10259825SN/A // @todo For now, stick with a warning 10269825SN/A warn("Got %d activates in window %d (%d - %d) which is smaller " 10279969SN/A "than %d\n", activationLimit, act_tick - actTicks[rank].back(), 10289969SN/A act_tick, actTicks[rank].back(), tXAW); 10299488SN/A } 10309488SN/A 10319488SN/A // shift the times used for the book keeping, the last element 10329488SN/A // (highest index) is the oldest one and hence the lowest value 10339969SN/A actTicks[rank].pop_back(); 10349488SN/A 10359488SN/A // record an new activation (in the future) 10369969SN/A actTicks[rank].push_front(act_tick); 10379488SN/A 10389488SN/A // cannot activate more than X times in time window tXAW, push the 10399488SN/A // next one (the X + 1'st activate) to be tXAW away from the 10409488SN/A // oldest in our window of X 10419969SN/A if (actTicks[rank].back() && (act_tick - actTicks[rank].back()) < tXAW) { 10429488SN/A DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate no earlier " 10439969SN/A "than %d\n", activationLimit, actTicks[rank].back() + tXAW); 10449488SN/A for(int j = 0; j < banksPerRank; j++) 10459488SN/A // next activate must not happen before end of window 10469969SN/A banks[rank][j].actAllowedAt = actTicks[rank].back() + tXAW; 10479488SN/A } 10489488SN/A} 10499488SN/A 10509488SN/Avoid 105110146Sandreas.hansson@arm.comDRAMCtrl::doDRAMAccess(DRAMPacket* dram_pkt) 10529243SN/A{ 10539243SN/A 10549243SN/A DPRINTF(DRAM, "Timing access to addr %lld, rank/bank/row %d %d %d\n", 10559243SN/A dram_pkt->addr, dram_pkt->rank, dram_pkt->bank, dram_pkt->row); 10569243SN/A 10579243SN/A // estimate the bank and access latency 10589243SN/A pair<Tick, Tick> lat = estimateLatency(dram_pkt, curTick()); 10599243SN/A Tick bankLat = lat.first; 10609243SN/A Tick accessLat = lat.second; 10619963SN/A Tick actTick; 10629243SN/A 10639243SN/A // This request was woken up at this time based on a prior call 10649243SN/A // to estimateLatency(). However, between then and now, both the 10659243SN/A // accessLatency and/or busBusyUntil may have changed. We need 10669243SN/A // to correct for that. 10679243SN/A 10689243SN/A Tick addDelay = (curTick() + accessLat < busBusyUntil) ? 10699243SN/A busBusyUntil - (curTick() + accessLat) : 0; 10709243SN/A 10719967SN/A Bank& bank = dram_pkt->bankRef; 10729243SN/A 10739243SN/A // Update bank state 107410144SN/A if (pageMgmt == Enums::open || pageMgmt == Enums::open_adaptive || 107510144SN/A pageMgmt == Enums::close_adaptive) { 10769243SN/A bank.freeAt = curTick() + addDelay + accessLat; 10779727SN/A 10789243SN/A // If you activated a new row do to this access, the next access 10799963SN/A // will have to respect tRAS for this bank. 10809488SN/A if (!rowHitFlag) { 10819963SN/A // any waiting for banks account for in freeAt 10829963SN/A actTick = bank.freeAt - tCL - tRCD; 10839963SN/A bank.tRASDoneAt = actTick + tRAS; 10849971SN/A recordActivate(actTick, dram_pkt->rank, dram_pkt->bank); 10859963SN/A 108610142SN/A // if we closed an open row as a result of this access, 108710142SN/A // then sample the number of bytes accessed before 108810142SN/A // resetting it 108910142SN/A if (bank.openRow != -1) 109010142SN/A bytesPerActivate.sample(bank.bytesAccessed); 109110142SN/A 109210142SN/A // update the open row 109310142SN/A bank.openRow = dram_pkt->row; 109410142SN/A 109510142SN/A // start counting anew, this covers both the case when we 109610142SN/A // auto-precharged, and when this access is forced to 109710142SN/A // precharge 10989727SN/A bank.bytesAccessed = 0; 109910141SN/A bank.rowAccesses = 0; 11009488SN/A } 11019973SN/A 110210141SN/A // increment the bytes accessed and the accesses per row 110310141SN/A bank.bytesAccessed += burstSize; 110410141SN/A ++bank.rowAccesses; 110510141SN/A 110610141SN/A // if we reached the max, then issue with an auto-precharge 110710141SN/A bool auto_precharge = bank.rowAccesses == maxAccessesPerRow; 110810141SN/A 110910141SN/A // if we did not hit the limit, we might still want to 111010141SN/A // auto-precharge 111110144SN/A if (!auto_precharge && 111210144SN/A (pageMgmt == Enums::open_adaptive || 111310144SN/A pageMgmt == Enums::close_adaptive)) { 111410144SN/A // a twist on the open and close page policies: 111510144SN/A // 1) open_adaptive page policy does not blindly keep the 11169973SN/A // page open, but close it if there are no row hits, and there 11179973SN/A // are bank conflicts in the queue 111810144SN/A // 2) close_adaptive page policy does not blindly close the 111910144SN/A // page, but closes it only if there are no row hits in the queue. 112010144SN/A // In this case, only force an auto precharge when there 112110144SN/A // are no same page hits in the queue 11229973SN/A bool got_more_hits = false; 11239973SN/A bool got_bank_conflict = false; 11249973SN/A 11259973SN/A // either look at the read queue or write queue 11269973SN/A const deque<DRAMPacket*>& queue = dram_pkt->isRead ? readQueue : 11279973SN/A writeQueue; 11289973SN/A auto p = queue.begin(); 11299973SN/A // make sure we are not considering the packet that we are 11309973SN/A // currently dealing with (which is the head of the queue) 11319973SN/A ++p; 11329973SN/A 113310144SN/A // keep on looking until we have found required condition or 113410144SN/A // reached the end 113510144SN/A while (!(got_more_hits && 113610144SN/A (got_bank_conflict || pageMgmt == Enums::close_adaptive)) && 11379973SN/A p != queue.end()) { 11389973SN/A bool same_rank_bank = (dram_pkt->rank == (*p)->rank) && 11399973SN/A (dram_pkt->bank == (*p)->bank); 11409973SN/A bool same_row = dram_pkt->row == (*p)->row; 11419973SN/A got_more_hits |= same_rank_bank && same_row; 11429973SN/A got_bank_conflict |= same_rank_bank && !same_row; 11439973SN/A ++p; 11449973SN/A } 11459973SN/A 114610144SN/A // auto pre-charge when either 114710144SN/A // 1) open_adaptive policy, we have not got any more hits, and 114810144SN/A // have a bank conflict 114910144SN/A // 2) close_adaptive policy and we have not got any more hits 115010144SN/A auto_precharge = !got_more_hits && 115110144SN/A (got_bank_conflict || pageMgmt == Enums::close_adaptive); 115210141SN/A } 115310141SN/A 115410141SN/A // if this access should use auto-precharge, then we are 115510141SN/A // closing the row 115610141SN/A if (auto_precharge) { 115710141SN/A bank.openRow = -1; 115810141SN/A bank.freeAt = std::max(bank.freeAt, bank.tRASDoneAt) + tRP; 115910141SN/A --numBanksActive; 116010141SN/A if (numBanksActive == 0) { 116110141SN/A startTickPrechargeAll = std::max(startTickPrechargeAll, 116210141SN/A bank.freeAt); 116310141SN/A DPRINTF(DRAM, "All banks precharged at tick: %ld\n", 116410141SN/A startTickPrechargeAll); 11659973SN/A } 116610142SN/A 116710142SN/A // sample the bytes per activate here since we are closing 116810142SN/A // the page 116910142SN/A bytesPerActivate.sample(bank.bytesAccessed); 117010142SN/A 117110141SN/A DPRINTF(DRAM, "Auto-precharged bank: %d\n", dram_pkt->bankId); 11729973SN/A } 11739973SN/A 11749971SN/A DPRINTF(DRAM, "doDRAMAccess::bank.freeAt is %lld\n", bank.freeAt); 11759963SN/A } else if (pageMgmt == Enums::close) { 11769963SN/A actTick = curTick() + addDelay + accessLat - tRCD - tCL; 11779971SN/A recordActivate(actTick, dram_pkt->rank, dram_pkt->bank); 11789963SN/A 11799963SN/A // If the DRAM has a very quick tRAS, bank can be made free 11809963SN/A // after consecutive tCL,tRCD,tRP times. In general, however, 11819963SN/A // an additional wait is required to respect tRAS. 11829963SN/A bank.freeAt = std::max(actTick + tRAS + tRP, 11839963SN/A actTick + tRCD + tCL + tRP); 11849971SN/A DPRINTF(DRAM, "doDRAMAccess::bank.freeAt is %lld\n", bank.freeAt); 11859831SN/A bytesPerActivate.sample(burstSize); 11869975SN/A startTickPrechargeAll = std::max(startTickPrechargeAll, bank.freeAt); 11879243SN/A } else 11889243SN/A panic("No page management policy chosen\n"); 11899243SN/A 11909243SN/A // Update request parameters 11919243SN/A dram_pkt->readyTime = curTick() + addDelay + accessLat + tBURST; 11929243SN/A 11939243SN/A 11949243SN/A DPRINTF(DRAM, "Req %lld: curtick is %lld accessLat is %d " \ 11959243SN/A "readytime is %lld busbusyuntil is %lld. " \ 11969243SN/A "Scheduling at readyTime\n", dram_pkt->addr, 11979243SN/A curTick(), accessLat, dram_pkt->readyTime, busBusyUntil); 11989243SN/A 11999243SN/A // Make sure requests are not overlapping on the databus 12009243SN/A assert (dram_pkt->readyTime - busBusyUntil >= tBURST); 12019243SN/A 12029243SN/A // Update bus state 12039243SN/A busBusyUntil = dram_pkt->readyTime; 12049243SN/A 12059243SN/A DPRINTF(DRAM,"Access time is %lld\n", 12069243SN/A dram_pkt->readyTime - dram_pkt->entryTime); 12079243SN/A 12089972SN/A // Update the minimum timing between the requests 12099972SN/A newTime = (busBusyUntil > tRP + tRCD + tCL) ? 12109972SN/A std::max(busBusyUntil - (tRP + tRCD + tCL), curTick()) : curTick(); 12119972SN/A 12129977SN/A // Update the access related stats 12139977SN/A if (dram_pkt->isRead) { 121410147Sandreas.hansson@arm.com ++readsThisTime; 12159977SN/A if (rowHitFlag) 12169977SN/A readRowHits++; 12179977SN/A bytesReadDRAM += burstSize; 12189977SN/A perBankRdBursts[dram_pkt->bankId]++; 12199977SN/A } else { 122010147Sandreas.hansson@arm.com ++writesThisTime; 12219977SN/A if (rowHitFlag) 12229977SN/A writeRowHits++; 12239977SN/A bytesWritten += burstSize; 12249977SN/A perBankWrBursts[dram_pkt->bankId]++; 12259966SN/A 12269977SN/A // At this point, commonality between reads and writes ends. 12279977SN/A // For writes, we are done since we long ago responded to the 12289977SN/A // requestor. 12299966SN/A return; 12309966SN/A } 12319966SN/A 12329977SN/A // Update latency stats 12339243SN/A totMemAccLat += dram_pkt->readyTime - dram_pkt->entryTime; 12349243SN/A totBankLat += bankLat; 12359243SN/A totBusLat += tBURST; 12369243SN/A totQLat += dram_pkt->readyTime - dram_pkt->entryTime - bankLat - tBURST; 12379243SN/A 12389243SN/A 12399243SN/A // At this point we're done dealing with the request 12409243SN/A // It will be moved to a separate response queue with a 12419243SN/A // correct readyTime, and eventually be sent back at that 12429243SN/A //time 12439243SN/A moveToRespQ(); 12449243SN/A 12459972SN/A // Schedule the next read event 124610143SN/A if (!nextReqEvent.scheduled() && !stopReads) { 12479567SN/A schedule(nextReqEvent, newTime); 12489243SN/A } else { 12499243SN/A if (newTime < nextReqEvent.when()) 12509567SN/A reschedule(nextReqEvent, newTime); 12519243SN/A } 12529243SN/A} 12539243SN/A 12549243SN/Avoid 125510146Sandreas.hansson@arm.comDRAMCtrl::moveToRespQ() 12569243SN/A{ 12579243SN/A // Remove from read queue 12589567SN/A DRAMPacket* dram_pkt = readQueue.front(); 12599567SN/A readQueue.pop_front(); 12609243SN/A 12619832SN/A // sanity check 12629832SN/A assert(dram_pkt->size <= burstSize); 12639832SN/A 12649243SN/A // Insert into response queue sorted by readyTime 12659243SN/A // It will be sent back to the requestor at its 12669243SN/A // readyTime 12679567SN/A if (respQueue.empty()) { 12689567SN/A respQueue.push_front(dram_pkt); 12699243SN/A assert(!respondEvent.scheduled()); 12709243SN/A assert(dram_pkt->readyTime >= curTick()); 12719567SN/A schedule(respondEvent, dram_pkt->readyTime); 12729243SN/A } else { 12739243SN/A bool done = false; 12749833SN/A auto i = respQueue.begin(); 12759567SN/A while (!done && i != respQueue.end()) { 12769243SN/A if ((*i)->readyTime > dram_pkt->readyTime) { 12779567SN/A respQueue.insert(i, dram_pkt); 12789243SN/A done = true; 12799243SN/A } 12809243SN/A ++i; 12819243SN/A } 12829243SN/A 12839243SN/A if (!done) 12849567SN/A respQueue.push_back(dram_pkt); 12859243SN/A 12869243SN/A assert(respondEvent.scheduled()); 12879243SN/A 12889567SN/A if (respQueue.front()->readyTime < respondEvent.when()) { 12899567SN/A assert(respQueue.front()->readyTime >= curTick()); 12909567SN/A reschedule(respondEvent, respQueue.front()->readyTime); 12919243SN/A } 12929243SN/A } 12939243SN/A} 12949243SN/A 12959243SN/Avoid 129610146Sandreas.hansson@arm.comDRAMCtrl::scheduleNextReq() 12979243SN/A{ 12989243SN/A DPRINTF(DRAM, "Reached scheduleNextReq()\n"); 12999243SN/A 13009567SN/A // Figure out which read request goes next, and move it to the 13019567SN/A // front of the read queue 13029567SN/A if (!chooseNextRead()) { 130310140SN/A // In the case there is no read request to go next, trigger 130410140SN/A // writes if we have passed the low threshold (or if we are 130510140SN/A // draining) 130610140SN/A if (!writeQueue.empty() && !writeEvent.scheduled() && 130710140SN/A (writeQueue.size() > writeLowThreshold || drainManager)) 13089352SN/A triggerWrites(); 13099352SN/A } else { 13109567SN/A doDRAMAccess(readQueue.front()); 13119352SN/A } 13129243SN/A} 13139243SN/A 13149243SN/ATick 131510146Sandreas.hansson@arm.comDRAMCtrl::maxBankFreeAt() const 13169243SN/A{ 13179243SN/A Tick banksFree = 0; 13189243SN/A 13199243SN/A for(int i = 0; i < ranksPerChannel; i++) 13209243SN/A for(int j = 0; j < banksPerRank; j++) 13219243SN/A banksFree = std::max(banks[i][j].freeAt, banksFree); 13229243SN/A 13239243SN/A return banksFree; 13249243SN/A} 13259243SN/A 13269967SN/Auint64_t 132710146Sandreas.hansson@arm.comDRAMCtrl::minBankFreeAt(const deque<DRAMPacket*>& queue) const 13289967SN/A{ 13299967SN/A uint64_t bank_mask = 0; 13309967SN/A Tick freeAt = MaxTick; 13319967SN/A 13329967SN/A // detemrine if we have queued transactions targetting the 13339967SN/A // bank in question 13349967SN/A vector<bool> got_waiting(ranksPerChannel * banksPerRank, false); 13359967SN/A for (auto p = queue.begin(); p != queue.end(); ++p) { 13369967SN/A got_waiting[(*p)->bankId] = true; 13379967SN/A } 13389967SN/A 13399967SN/A for (int i = 0; i < ranksPerChannel; i++) { 13409967SN/A for (int j = 0; j < banksPerRank; j++) { 13419967SN/A // if we have waiting requests for the bank, and it is 13429967SN/A // amongst the first available, update the mask 13439967SN/A if (got_waiting[i * banksPerRank + j] && 13449967SN/A banks[i][j].freeAt <= freeAt) { 13459967SN/A // reset bank mask if new minimum is found 13469967SN/A if (banks[i][j].freeAt < freeAt) 13479967SN/A bank_mask = 0; 13489967SN/A // set the bit corresponding to the available bank 13499967SN/A uint8_t bit_index = i * ranksPerChannel + j; 13509967SN/A replaceBits(bank_mask, bit_index, bit_index, 1); 13519967SN/A freeAt = banks[i][j].freeAt; 13529967SN/A } 13539967SN/A } 13549967SN/A } 13559967SN/A return bank_mask; 13569967SN/A} 13579967SN/A 13589243SN/Avoid 135910146Sandreas.hansson@arm.comDRAMCtrl::processRefreshEvent() 13609243SN/A{ 13619243SN/A DPRINTF(DRAM, "Refreshing at tick %ld\n", curTick()); 13629243SN/A 13639243SN/A Tick banksFree = std::max(curTick(), maxBankFreeAt()) + tRFC; 13649243SN/A 13659243SN/A for(int i = 0; i < ranksPerChannel; i++) 13669975SN/A for(int j = 0; j < banksPerRank; j++) { 13679243SN/A banks[i][j].freeAt = banksFree; 13689975SN/A banks[i][j].openRow = -1; 13699975SN/A } 13709975SN/A 13719975SN/A // updating startTickPrechargeAll, isprechargeAll 13729975SN/A numBanksActive = 0; 13739975SN/A startTickPrechargeAll = banksFree; 13749243SN/A 13759567SN/A schedule(refreshEvent, curTick() + tREFI); 13769243SN/A} 13779243SN/A 13789243SN/Avoid 137910146Sandreas.hansson@arm.comDRAMCtrl::regStats() 13809243SN/A{ 13819243SN/A using namespace Stats; 13829243SN/A 13839243SN/A AbstractMemory::regStats(); 13849243SN/A 13859243SN/A readReqs 13869243SN/A .name(name() + ".readReqs") 13879977SN/A .desc("Number of read requests accepted"); 13889243SN/A 13899243SN/A writeReqs 13909243SN/A .name(name() + ".writeReqs") 13919977SN/A .desc("Number of write requests accepted"); 13929831SN/A 13939831SN/A readBursts 13949831SN/A .name(name() + ".readBursts") 13959977SN/A .desc("Number of DRAM read bursts, " 13969977SN/A "including those serviced by the write queue"); 13979831SN/A 13989831SN/A writeBursts 13999831SN/A .name(name() + ".writeBursts") 14009977SN/A .desc("Number of DRAM write bursts, " 14019977SN/A "including those merged in the write queue"); 14029243SN/A 14039243SN/A servicedByWrQ 14049243SN/A .name(name() + ".servicedByWrQ") 14059977SN/A .desc("Number of DRAM read bursts serviced by the write queue"); 14069977SN/A 14079977SN/A mergedWrBursts 14089977SN/A .name(name() + ".mergedWrBursts") 14099977SN/A .desc("Number of DRAM write bursts merged with an existing one"); 14109243SN/A 14119243SN/A neitherReadNorWrite 14129977SN/A .name(name() + ".neitherReadNorWriteReqs") 14139977SN/A .desc("Number of requests that are neither read nor write"); 14149243SN/A 14159977SN/A perBankRdBursts 14169243SN/A .init(banksPerRank * ranksPerChannel) 14179977SN/A .name(name() + ".perBankRdBursts") 14189977SN/A .desc("Per bank write bursts"); 14199243SN/A 14209977SN/A perBankWrBursts 14219243SN/A .init(banksPerRank * ranksPerChannel) 14229977SN/A .name(name() + ".perBankWrBursts") 14239977SN/A .desc("Per bank write bursts"); 14249243SN/A 14259243SN/A avgRdQLen 14269243SN/A .name(name() + ".avgRdQLen") 14279977SN/A .desc("Average read queue length when enqueuing") 14289243SN/A .precision(2); 14299243SN/A 14309243SN/A avgWrQLen 14319243SN/A .name(name() + ".avgWrQLen") 14329977SN/A .desc("Average write queue length when enqueuing") 14339243SN/A .precision(2); 14349243SN/A 14359243SN/A totQLat 14369243SN/A .name(name() + ".totQLat") 14379977SN/A .desc("Total ticks spent queuing"); 14389243SN/A 14399243SN/A totBankLat 14409243SN/A .name(name() + ".totBankLat") 14419977SN/A .desc("Total ticks spent accessing banks"); 14429243SN/A 14439243SN/A totBusLat 14449243SN/A .name(name() + ".totBusLat") 14459977SN/A .desc("Total ticks spent in databus transfers"); 14469243SN/A 14479243SN/A totMemAccLat 14489243SN/A .name(name() + ".totMemAccLat") 14499977SN/A .desc("Total ticks spent from burst creation until serviced " 14509977SN/A "by the DRAM"); 14519243SN/A 14529243SN/A avgQLat 14539243SN/A .name(name() + ".avgQLat") 14549977SN/A .desc("Average queueing delay per DRAM burst") 14559243SN/A .precision(2); 14569243SN/A 14579831SN/A avgQLat = totQLat / (readBursts - servicedByWrQ); 14589243SN/A 14599243SN/A avgBankLat 14609243SN/A .name(name() + ".avgBankLat") 14619977SN/A .desc("Average bank access latency per DRAM burst") 14629243SN/A .precision(2); 14639243SN/A 14649831SN/A avgBankLat = totBankLat / (readBursts - servicedByWrQ); 14659243SN/A 14669243SN/A avgBusLat 14679243SN/A .name(name() + ".avgBusLat") 14689977SN/A .desc("Average bus latency per DRAM burst") 14699243SN/A .precision(2); 14709243SN/A 14719831SN/A avgBusLat = totBusLat / (readBursts - servicedByWrQ); 14729243SN/A 14739243SN/A avgMemAccLat 14749243SN/A .name(name() + ".avgMemAccLat") 14759977SN/A .desc("Average memory access latency per DRAM burst") 14769243SN/A .precision(2); 14779243SN/A 14789831SN/A avgMemAccLat = totMemAccLat / (readBursts - servicedByWrQ); 14799243SN/A 14809243SN/A numRdRetry 14819243SN/A .name(name() + ".numRdRetry") 14829977SN/A .desc("Number of times read queue was full causing retry"); 14839243SN/A 14849243SN/A numWrRetry 14859243SN/A .name(name() + ".numWrRetry") 14869977SN/A .desc("Number of times write queue was full causing retry"); 14879243SN/A 14889243SN/A readRowHits 14899243SN/A .name(name() + ".readRowHits") 14909243SN/A .desc("Number of row buffer hits during reads"); 14919243SN/A 14929243SN/A writeRowHits 14939243SN/A .name(name() + ".writeRowHits") 14949243SN/A .desc("Number of row buffer hits during writes"); 14959243SN/A 14969243SN/A readRowHitRate 14979243SN/A .name(name() + ".readRowHitRate") 14989243SN/A .desc("Row buffer hit rate for reads") 14999243SN/A .precision(2); 15009243SN/A 15019831SN/A readRowHitRate = (readRowHits / (readBursts - servicedByWrQ)) * 100; 15029243SN/A 15039243SN/A writeRowHitRate 15049243SN/A .name(name() + ".writeRowHitRate") 15059243SN/A .desc("Row buffer hit rate for writes") 15069243SN/A .precision(2); 15079243SN/A 15089977SN/A writeRowHitRate = (writeRowHits / (writeBursts - mergedWrBursts)) * 100; 15099243SN/A 15109243SN/A readPktSize 15119831SN/A .init(ceilLog2(burstSize) + 1) 15129243SN/A .name(name() + ".readPktSize") 15139977SN/A .desc("Read request sizes (log2)"); 15149243SN/A 15159243SN/A writePktSize 15169831SN/A .init(ceilLog2(burstSize) + 1) 15179243SN/A .name(name() + ".writePktSize") 15189977SN/A .desc("Write request sizes (log2)"); 15199243SN/A 15209243SN/A rdQLenPdf 15219567SN/A .init(readBufferSize) 15229243SN/A .name(name() + ".rdQLenPdf") 15239243SN/A .desc("What read queue length does an incoming req see"); 15249243SN/A 15259243SN/A wrQLenPdf 15269567SN/A .init(writeBufferSize) 15279243SN/A .name(name() + ".wrQLenPdf") 15289243SN/A .desc("What write queue length does an incoming req see"); 15299243SN/A 15309727SN/A bytesPerActivate 153110141SN/A .init(maxAccessesPerRow) 15329727SN/A .name(name() + ".bytesPerActivate") 15339727SN/A .desc("Bytes accessed per row activation") 15349727SN/A .flags(nozero); 15359243SN/A 153610147Sandreas.hansson@arm.com rdPerTurnAround 153710147Sandreas.hansson@arm.com .init(readBufferSize) 153810147Sandreas.hansson@arm.com .name(name() + ".rdPerTurnAround") 153910147Sandreas.hansson@arm.com .desc("Reads before turning the bus around for writes") 154010147Sandreas.hansson@arm.com .flags(nozero); 154110147Sandreas.hansson@arm.com 154210147Sandreas.hansson@arm.com wrPerTurnAround 154310147Sandreas.hansson@arm.com .init(writeBufferSize) 154410147Sandreas.hansson@arm.com .name(name() + ".wrPerTurnAround") 154510147Sandreas.hansson@arm.com .desc("Writes before turning the bus around for reads") 154610147Sandreas.hansson@arm.com .flags(nozero); 154710147Sandreas.hansson@arm.com 15489975SN/A bytesReadDRAM 15499975SN/A .name(name() + ".bytesReadDRAM") 15509975SN/A .desc("Total number of bytes read from DRAM"); 15519975SN/A 15529975SN/A bytesReadWrQ 15539975SN/A .name(name() + ".bytesReadWrQ") 15549975SN/A .desc("Total number of bytes read from write queue"); 15559243SN/A 15569243SN/A bytesWritten 15579243SN/A .name(name() + ".bytesWritten") 15589977SN/A .desc("Total number of bytes written to DRAM"); 15599243SN/A 15609977SN/A bytesReadSys 15619977SN/A .name(name() + ".bytesReadSys") 15629977SN/A .desc("Total read bytes from the system interface side"); 15639243SN/A 15649977SN/A bytesWrittenSys 15659977SN/A .name(name() + ".bytesWrittenSys") 15669977SN/A .desc("Total written bytes from the system interface side"); 15679243SN/A 15689243SN/A avgRdBW 15699243SN/A .name(name() + ".avgRdBW") 15709977SN/A .desc("Average DRAM read bandwidth in MiByte/s") 15719243SN/A .precision(2); 15729243SN/A 15739977SN/A avgRdBW = (bytesReadDRAM / 1000000) / simSeconds; 15749243SN/A 15759243SN/A avgWrBW 15769243SN/A .name(name() + ".avgWrBW") 15779977SN/A .desc("Average achieved write bandwidth in MiByte/s") 15789243SN/A .precision(2); 15799243SN/A 15809243SN/A avgWrBW = (bytesWritten / 1000000) / simSeconds; 15819243SN/A 15829977SN/A avgRdBWSys 15839977SN/A .name(name() + ".avgRdBWSys") 15849977SN/A .desc("Average system read bandwidth in MiByte/s") 15859243SN/A .precision(2); 15869243SN/A 15879977SN/A avgRdBWSys = (bytesReadSys / 1000000) / simSeconds; 15889243SN/A 15899977SN/A avgWrBWSys 15909977SN/A .name(name() + ".avgWrBWSys") 15919977SN/A .desc("Average system write bandwidth in MiByte/s") 15929243SN/A .precision(2); 15939243SN/A 15949977SN/A avgWrBWSys = (bytesWrittenSys / 1000000) / simSeconds; 15959243SN/A 15969243SN/A peakBW 15979243SN/A .name(name() + ".peakBW") 15989977SN/A .desc("Theoretical peak bandwidth in MiByte/s") 15999243SN/A .precision(2); 16009243SN/A 16019831SN/A peakBW = (SimClock::Frequency / tBURST) * burstSize / 1000000; 16029243SN/A 16039243SN/A busUtil 16049243SN/A .name(name() + ".busUtil") 16059243SN/A .desc("Data bus utilization in percentage") 16069243SN/A .precision(2); 16079243SN/A 16089243SN/A busUtil = (avgRdBW + avgWrBW) / peakBW * 100; 16099243SN/A 16109243SN/A totGap 16119243SN/A .name(name() + ".totGap") 16129243SN/A .desc("Total gap between requests"); 16139243SN/A 16149243SN/A avgGap 16159243SN/A .name(name() + ".avgGap") 16169243SN/A .desc("Average gap between requests") 16179243SN/A .precision(2); 16189243SN/A 16199243SN/A avgGap = totGap / (readReqs + writeReqs); 16209975SN/A 16219975SN/A // Stats for DRAM Power calculation based on Micron datasheet 16229975SN/A busUtilRead 16239975SN/A .name(name() + ".busUtilRead") 16249975SN/A .desc("Data bus utilization in percentage for reads") 16259975SN/A .precision(2); 16269975SN/A 16279975SN/A busUtilRead = avgRdBW / peakBW * 100; 16289975SN/A 16299975SN/A busUtilWrite 16309975SN/A .name(name() + ".busUtilWrite") 16319975SN/A .desc("Data bus utilization in percentage for writes") 16329975SN/A .precision(2); 16339975SN/A 16349975SN/A busUtilWrite = avgWrBW / peakBW * 100; 16359975SN/A 16369975SN/A pageHitRate 16379975SN/A .name(name() + ".pageHitRate") 16389975SN/A .desc("Row buffer hit rate, read and write combined") 16399975SN/A .precision(2); 16409975SN/A 16419977SN/A pageHitRate = (writeRowHits + readRowHits) / 16429977SN/A (writeBursts - mergedWrBursts + readBursts - servicedByWrQ) * 100; 16439975SN/A 16449975SN/A prechargeAllPercent 16459975SN/A .name(name() + ".prechargeAllPercent") 16469975SN/A .desc("Percentage of time for which DRAM has all the banks in " 16479975SN/A "precharge state") 16489975SN/A .precision(2); 16499975SN/A 16509975SN/A prechargeAllPercent = prechargeAllTime / simTicks * 100; 16519243SN/A} 16529243SN/A 16539243SN/Avoid 165410146Sandreas.hansson@arm.comDRAMCtrl::recvFunctional(PacketPtr pkt) 16559243SN/A{ 16569243SN/A // rely on the abstract memory 16579243SN/A functionalAccess(pkt); 16589243SN/A} 16599243SN/A 16609294SN/ABaseSlavePort& 166110146Sandreas.hansson@arm.comDRAMCtrl::getSlavePort(const string &if_name, PortID idx) 16629243SN/A{ 16639243SN/A if (if_name != "port") { 16649243SN/A return MemObject::getSlavePort(if_name, idx); 16659243SN/A } else { 16669243SN/A return port; 16679243SN/A } 16689243SN/A} 16699243SN/A 16709243SN/Aunsigned int 167110146Sandreas.hansson@arm.comDRAMCtrl::drain(DrainManager *dm) 16729243SN/A{ 16739342SN/A unsigned int count = port.drain(dm); 16749243SN/A 16759243SN/A // if there is anything in any of our internal queues, keep track 16769243SN/A // of that as well 16779567SN/A if (!(writeQueue.empty() && readQueue.empty() && 16789567SN/A respQueue.empty())) { 16799352SN/A DPRINTF(Drain, "DRAM controller not drained, write: %d, read: %d," 16809567SN/A " resp: %d\n", writeQueue.size(), readQueue.size(), 16819567SN/A respQueue.size()); 16829243SN/A ++count; 16839342SN/A drainManager = dm; 16849352SN/A // the only part that is not drained automatically over time 16859352SN/A // is the write queue, thus trigger writes if there are any 16869352SN/A // waiting and no reads waiting, otherwise wait until the 16879352SN/A // reads are done 16889567SN/A if (readQueue.empty() && !writeQueue.empty() && 16899352SN/A !writeEvent.scheduled()) 16909352SN/A triggerWrites(); 16919243SN/A } 16929243SN/A 16939243SN/A if (count) 16949342SN/A setDrainState(Drainable::Draining); 16959243SN/A else 16969342SN/A setDrainState(Drainable::Drained); 16979243SN/A return count; 16989243SN/A} 16999243SN/A 170010146Sandreas.hansson@arm.comDRAMCtrl::MemoryPort::MemoryPort(const std::string& name, DRAMCtrl& _memory) 17019243SN/A : QueuedSlavePort(name, &_memory, queue), queue(_memory, *this), 17029243SN/A memory(_memory) 17039243SN/A{ } 17049243SN/A 17059243SN/AAddrRangeList 170610146Sandreas.hansson@arm.comDRAMCtrl::MemoryPort::getAddrRanges() const 17079243SN/A{ 17089243SN/A AddrRangeList ranges; 17099243SN/A ranges.push_back(memory.getAddrRange()); 17109243SN/A return ranges; 17119243SN/A} 17129243SN/A 17139243SN/Avoid 171410146Sandreas.hansson@arm.comDRAMCtrl::MemoryPort::recvFunctional(PacketPtr pkt) 17159243SN/A{ 17169243SN/A pkt->pushLabel(memory.name()); 17179243SN/A 17189243SN/A if (!queue.checkFunctional(pkt)) { 17199243SN/A // Default implementation of SimpleTimingPort::recvFunctional() 17209243SN/A // calls recvAtomic() and throws away the latency; we can save a 17219243SN/A // little here by just not calculating the latency. 17229243SN/A memory.recvFunctional(pkt); 17239243SN/A } 17249243SN/A 17259243SN/A pkt->popLabel(); 17269243SN/A} 17279243SN/A 17289243SN/ATick 172910146Sandreas.hansson@arm.comDRAMCtrl::MemoryPort::recvAtomic(PacketPtr pkt) 17309243SN/A{ 17319243SN/A return memory.recvAtomic(pkt); 17329243SN/A} 17339243SN/A 17349243SN/Abool 173510146Sandreas.hansson@arm.comDRAMCtrl::MemoryPort::recvTimingReq(PacketPtr pkt) 17369243SN/A{ 17379243SN/A // pass it to the memory controller 17389243SN/A return memory.recvTimingReq(pkt); 17399243SN/A} 17409243SN/A 174110146Sandreas.hansson@arm.comDRAMCtrl* 174210146Sandreas.hansson@arm.comDRAMCtrlParams::create() 17439243SN/A{ 174410146Sandreas.hansson@arm.com return new DRAMCtrl(this); 17459243SN/A} 1746