1/* 2 * Copyright (c) 2010-2018 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2013 Amin Farmahini-Farahani 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Andreas Hansson 41 * Ani Udipi 42 * Neha Agarwal 43 * Omar Naji 44 * Wendy Elsasser 45 * Radhika Jagtap 46 */ 47 48#include "mem/dram_ctrl.hh" 49 50#include "base/bitfield.hh" 51#include "base/trace.hh" 52#include "debug/DRAM.hh" 53#include "debug/DRAMPower.hh" 54#include "debug/DRAMState.hh" 55#include "debug/Drain.hh"
| 1/* 2 * Copyright (c) 2010-2018 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2013 Amin Farmahini-Farahani 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Andreas Hansson 41 * Ani Udipi 42 * Neha Agarwal 43 * Omar Naji 44 * Wendy Elsasser 45 * Radhika Jagtap 46 */ 47 48#include "mem/dram_ctrl.hh" 49 50#include "base/bitfield.hh" 51#include "base/trace.hh" 52#include "debug/DRAM.hh" 53#include "debug/DRAMPower.hh" 54#include "debug/DRAMState.hh" 55#include "debug/Drain.hh"
|
| 56#include "debug/QOS.hh"
|
56#include "sim/system.hh" 57 58using namespace std; 59using namespace Data; 60 61DRAMCtrl::DRAMCtrl(const DRAMCtrlParams* p) :
| 57#include "sim/system.hh" 58 59using namespace std; 60using namespace Data; 61 62DRAMCtrl::DRAMCtrl(const DRAMCtrlParams* p) :
|
62 AbstractMemory(p),
| 63 QoS::MemCtrl(p),
|
63 port(name() + ".port", *this), isTimingMode(false), 64 retryRdReq(false), retryWrReq(false),
| 64 port(name() + ".port", *this), isTimingMode(false), 65 retryRdReq(false), retryWrReq(false),
|
65 busState(READ), 66 busStateNext(READ),
| |
67 nextReqEvent([this]{ processNextReqEvent(); }, name()), 68 respondEvent([this]{ processRespondEvent(); }, name()), 69 deviceSize(p->device_size), 70 deviceBusWidth(p->device_bus_width), burstLength(p->burst_length), 71 deviceRowBufferSize(p->device_rowbuffer_size), 72 devicesPerRank(p->devices_per_rank), 73 burstSize((devicesPerRank * burstLength * deviceBusWidth) / 8), 74 rowBufferSize(devicesPerRank * deviceRowBufferSize), 75 columnsPerRowBuffer(rowBufferSize / burstSize), 76 columnsPerStripe(range.interleaved() ? range.granularity() / burstSize : 1), 77 ranksPerChannel(p->ranks_per_channel), 78 bankGroupsPerRank(p->bank_groups_per_rank), 79 bankGroupArch(p->bank_groups_per_rank > 0), 80 banksPerRank(p->banks_per_rank), channels(p->channels), rowsPerBank(0), 81 readBufferSize(p->read_buffer_size), 82 writeBufferSize(p->write_buffer_size), 83 writeHighThreshold(writeBufferSize * p->write_high_thresh_perc / 100.0), 84 writeLowThreshold(writeBufferSize * p->write_low_thresh_perc / 100.0), 85 minWritesPerSwitch(p->min_writes_per_switch), 86 writesThisTime(0), readsThisTime(0), 87 tCK(p->tCK), tRTW(p->tRTW), tCS(p->tCS), tBURST(p->tBURST), 88 tCCD_L_WR(p->tCCD_L_WR), 89 tCCD_L(p->tCCD_L), tRCD(p->tRCD), tCL(p->tCL), tRP(p->tRP), tRAS(p->tRAS), 90 tWR(p->tWR), tRTP(p->tRTP), tRFC(p->tRFC), tREFI(p->tREFI), tRRD(p->tRRD), 91 tRRD_L(p->tRRD_L), tXAW(p->tXAW), tXP(p->tXP), tXS(p->tXS), 92 activationLimit(p->activation_limit), rankToRankDly(tCS + tBURST), 93 wrToRdDly(tCL + tBURST + p->tWTR), rdToWrDly(tRTW + tBURST), 94 memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping), 95 pageMgmt(p->page_policy), 96 maxAccessesPerRow(p->max_accesses_per_row), 97 frontendLatency(p->static_frontend_latency), 98 backendLatency(p->static_backend_latency), 99 nextBurstAt(0), prevArrival(0), 100 nextReqTime(0), activeRank(0), timeStampOffset(0), 101 lastStatsResetTick(0) 102{ 103 // sanity check the ranks since we rely on bit slicing for the 104 // address decoding 105 fatal_if(!isPowerOf2(ranksPerChannel), "DRAM rank count of %d is not " 106 "allowed, must be a power of two\n", ranksPerChannel); 107 108 fatal_if(!isPowerOf2(burstSize), "DRAM burst size %d is not allowed, " 109 "must be a power of two\n", burstSize);
| 66 nextReqEvent([this]{ processNextReqEvent(); }, name()), 67 respondEvent([this]{ processRespondEvent(); }, name()), 68 deviceSize(p->device_size), 69 deviceBusWidth(p->device_bus_width), burstLength(p->burst_length), 70 deviceRowBufferSize(p->device_rowbuffer_size), 71 devicesPerRank(p->devices_per_rank), 72 burstSize((devicesPerRank * burstLength * deviceBusWidth) / 8), 73 rowBufferSize(devicesPerRank * deviceRowBufferSize), 74 columnsPerRowBuffer(rowBufferSize / burstSize), 75 columnsPerStripe(range.interleaved() ? range.granularity() / burstSize : 1), 76 ranksPerChannel(p->ranks_per_channel), 77 bankGroupsPerRank(p->bank_groups_per_rank), 78 bankGroupArch(p->bank_groups_per_rank > 0), 79 banksPerRank(p->banks_per_rank), channels(p->channels), rowsPerBank(0), 80 readBufferSize(p->read_buffer_size), 81 writeBufferSize(p->write_buffer_size), 82 writeHighThreshold(writeBufferSize * p->write_high_thresh_perc / 100.0), 83 writeLowThreshold(writeBufferSize * p->write_low_thresh_perc / 100.0), 84 minWritesPerSwitch(p->min_writes_per_switch), 85 writesThisTime(0), readsThisTime(0), 86 tCK(p->tCK), tRTW(p->tRTW), tCS(p->tCS), tBURST(p->tBURST), 87 tCCD_L_WR(p->tCCD_L_WR), 88 tCCD_L(p->tCCD_L), tRCD(p->tRCD), tCL(p->tCL), tRP(p->tRP), tRAS(p->tRAS), 89 tWR(p->tWR), tRTP(p->tRTP), tRFC(p->tRFC), tREFI(p->tREFI), tRRD(p->tRRD), 90 tRRD_L(p->tRRD_L), tXAW(p->tXAW), tXP(p->tXP), tXS(p->tXS), 91 activationLimit(p->activation_limit), rankToRankDly(tCS + tBURST), 92 wrToRdDly(tCL + tBURST + p->tWTR), rdToWrDly(tRTW + tBURST), 93 memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping), 94 pageMgmt(p->page_policy), 95 maxAccessesPerRow(p->max_accesses_per_row), 96 frontendLatency(p->static_frontend_latency), 97 backendLatency(p->static_backend_latency), 98 nextBurstAt(0), prevArrival(0), 99 nextReqTime(0), activeRank(0), timeStampOffset(0), 100 lastStatsResetTick(0) 101{ 102 // sanity check the ranks since we rely on bit slicing for the 103 // address decoding 104 fatal_if(!isPowerOf2(ranksPerChannel), "DRAM rank count of %d is not " 105 "allowed, must be a power of two\n", ranksPerChannel); 106 107 fatal_if(!isPowerOf2(burstSize), "DRAM burst size %d is not allowed, " 108 "must be a power of two\n", burstSize);
|
| 109 readQueue.resize(p->qos_priorities); 110 writeQueue.resize(p->qos_priorities);
|
110
| 111
|
| 112
|
111 for (int i = 0; i < ranksPerChannel; i++) { 112 Rank* rank = new Rank(*this, p, i); 113 ranks.push_back(rank); 114 } 115 116 // perform a basic check of the write thresholds 117 if (p->write_low_thresh_perc >= p->write_high_thresh_perc) 118 fatal("Write buffer low threshold %d must be smaller than the " 119 "high threshold %d\n", p->write_low_thresh_perc, 120 p->write_high_thresh_perc); 121 122 // determine the rows per bank by looking at the total capacity 123 uint64_t capacity = ULL(1) << ceilLog2(AbstractMemory::size()); 124 125 // determine the dram actual capacity from the DRAM config in Mbytes 126 uint64_t deviceCapacity = deviceSize / (1024 * 1024) * devicesPerRank * 127 ranksPerChannel; 128 129 // if actual DRAM size does not match memory capacity in system warn! 130 if (deviceCapacity != capacity / (1024 * 1024)) 131 warn("DRAM device capacity (%d Mbytes) does not match the " 132 "address range assigned (%d Mbytes)\n", deviceCapacity, 133 capacity / (1024 * 1024)); 134 135 DPRINTF(DRAM, "Memory capacity %lld (%lld) bytes\n", capacity, 136 AbstractMemory::size()); 137 138 DPRINTF(DRAM, "Row buffer size %d bytes with %d columns per row buffer\n", 139 rowBufferSize, columnsPerRowBuffer); 140 141 rowsPerBank = capacity / (rowBufferSize * banksPerRank * ranksPerChannel); 142 143 // some basic sanity checks 144 if (tREFI <= tRP || tREFI <= tRFC) { 145 fatal("tREFI (%d) must be larger than tRP (%d) and tRFC (%d)\n", 146 tREFI, tRP, tRFC); 147 } 148 149 // basic bank group architecture checks -> 150 if (bankGroupArch) { 151 // must have at least one bank per bank group 152 if (bankGroupsPerRank > banksPerRank) { 153 fatal("banks per rank (%d) must be equal to or larger than " 154 "banks groups per rank (%d)\n", 155 banksPerRank, bankGroupsPerRank); 156 } 157 // must have same number of banks in each bank group 158 if ((banksPerRank % bankGroupsPerRank) != 0) { 159 fatal("Banks per rank (%d) must be evenly divisible by bank groups " 160 "per rank (%d) for equal banks per bank group\n", 161 banksPerRank, bankGroupsPerRank); 162 } 163 // tCCD_L should be greater than minimal, back-to-back burst delay 164 if (tCCD_L <= tBURST) { 165 fatal("tCCD_L (%d) should be larger than tBURST (%d) when " 166 "bank groups per rank (%d) is greater than 1\n", 167 tCCD_L, tBURST, bankGroupsPerRank); 168 } 169 // tCCD_L_WR should be greater than minimal, back-to-back burst delay 170 if (tCCD_L_WR <= tBURST) { 171 fatal("tCCD_L_WR (%d) should be larger than tBURST (%d) when " 172 "bank groups per rank (%d) is greater than 1\n", 173 tCCD_L_WR, tBURST, bankGroupsPerRank); 174 } 175 // tRRD_L is greater than minimal, same bank group ACT-to-ACT delay 176 // some datasheets might specify it equal to tRRD 177 if (tRRD_L < tRRD) { 178 fatal("tRRD_L (%d) should be larger than tRRD (%d) when " 179 "bank groups per rank (%d) is greater than 1\n", 180 tRRD_L, tRRD, bankGroupsPerRank); 181 } 182 } 183 184} 185 186void 187DRAMCtrl::init() 188{
| 113 for (int i = 0; i < ranksPerChannel; i++) { 114 Rank* rank = new Rank(*this, p, i); 115 ranks.push_back(rank); 116 } 117 118 // perform a basic check of the write thresholds 119 if (p->write_low_thresh_perc >= p->write_high_thresh_perc) 120 fatal("Write buffer low threshold %d must be smaller than the " 121 "high threshold %d\n", p->write_low_thresh_perc, 122 p->write_high_thresh_perc); 123 124 // determine the rows per bank by looking at the total capacity 125 uint64_t capacity = ULL(1) << ceilLog2(AbstractMemory::size()); 126 127 // determine the dram actual capacity from the DRAM config in Mbytes 128 uint64_t deviceCapacity = deviceSize / (1024 * 1024) * devicesPerRank * 129 ranksPerChannel; 130 131 // if actual DRAM size does not match memory capacity in system warn! 132 if (deviceCapacity != capacity / (1024 * 1024)) 133 warn("DRAM device capacity (%d Mbytes) does not match the " 134 "address range assigned (%d Mbytes)\n", deviceCapacity, 135 capacity / (1024 * 1024)); 136 137 DPRINTF(DRAM, "Memory capacity %lld (%lld) bytes\n", capacity, 138 AbstractMemory::size()); 139 140 DPRINTF(DRAM, "Row buffer size %d bytes with %d columns per row buffer\n", 141 rowBufferSize, columnsPerRowBuffer); 142 143 rowsPerBank = capacity / (rowBufferSize * banksPerRank * ranksPerChannel); 144 145 // some basic sanity checks 146 if (tREFI <= tRP || tREFI <= tRFC) { 147 fatal("tREFI (%d) must be larger than tRP (%d) and tRFC (%d)\n", 148 tREFI, tRP, tRFC); 149 } 150 151 // basic bank group architecture checks -> 152 if (bankGroupArch) { 153 // must have at least one bank per bank group 154 if (bankGroupsPerRank > banksPerRank) { 155 fatal("banks per rank (%d) must be equal to or larger than " 156 "banks groups per rank (%d)\n", 157 banksPerRank, bankGroupsPerRank); 158 } 159 // must have same number of banks in each bank group 160 if ((banksPerRank % bankGroupsPerRank) != 0) { 161 fatal("Banks per rank (%d) must be evenly divisible by bank groups " 162 "per rank (%d) for equal banks per bank group\n", 163 banksPerRank, bankGroupsPerRank); 164 } 165 // tCCD_L should be greater than minimal, back-to-back burst delay 166 if (tCCD_L <= tBURST) { 167 fatal("tCCD_L (%d) should be larger than tBURST (%d) when " 168 "bank groups per rank (%d) is greater than 1\n", 169 tCCD_L, tBURST, bankGroupsPerRank); 170 } 171 // tCCD_L_WR should be greater than minimal, back-to-back burst delay 172 if (tCCD_L_WR <= tBURST) { 173 fatal("tCCD_L_WR (%d) should be larger than tBURST (%d) when " 174 "bank groups per rank (%d) is greater than 1\n", 175 tCCD_L_WR, tBURST, bankGroupsPerRank); 176 } 177 // tRRD_L is greater than minimal, same bank group ACT-to-ACT delay 178 // some datasheets might specify it equal to tRRD 179 if (tRRD_L < tRRD) { 180 fatal("tRRD_L (%d) should be larger than tRRD (%d) when " 181 "bank groups per rank (%d) is greater than 1\n", 182 tRRD_L, tRRD, bankGroupsPerRank); 183 } 184 } 185 186} 187 188void 189DRAMCtrl::init() 190{
|
189 AbstractMemory::init();
| 191 MemCtrl::init();
|
190 191 if (!port.isConnected()) { 192 fatal("DRAMCtrl %s is unconnected!\n", name()); 193 } else { 194 port.sendRangeChange(); 195 } 196 197 // a bit of sanity checks on the interleaving, save it for here to 198 // ensure that the system pointer is initialised 199 if (range.interleaved()) { 200 if (channels != range.stripes()) 201 fatal("%s has %d interleaved address stripes but %d channel(s)\n", 202 name(), range.stripes(), channels); 203 204 if (addrMapping == Enums::RoRaBaChCo) { 205 if (rowBufferSize != range.granularity()) { 206 fatal("Channel interleaving of %s doesn't match RoRaBaChCo " 207 "address map\n", name()); 208 } 209 } else if (addrMapping == Enums::RoRaBaCoCh || 210 addrMapping == Enums::RoCoRaBaCh) { 211 // for the interleavings with channel bits in the bottom, 212 // if the system uses a channel striping granularity that 213 // is larger than the DRAM burst size, then map the 214 // sequential accesses within a stripe to a number of 215 // columns in the DRAM, effectively placing some of the 216 // lower-order column bits as the least-significant bits 217 // of the address (above the ones denoting the burst size) 218 assert(columnsPerStripe >= 1); 219 220 // channel striping has to be done at a granularity that 221 // is equal or larger to a cache line 222 if (system()->cacheLineSize() > range.granularity()) { 223 fatal("Channel interleaving of %s must be at least as large " 224 "as the cache line size\n", name()); 225 } 226 227 // ...and equal or smaller than the row-buffer size 228 if (rowBufferSize < range.granularity()) { 229 fatal("Channel interleaving of %s must be at most as large " 230 "as the row-buffer size\n", name()); 231 } 232 // this is essentially the check above, so just to be sure 233 assert(columnsPerStripe <= columnsPerRowBuffer); 234 } 235 } 236} 237 238void 239DRAMCtrl::startup() 240{ 241 // remember the memory system mode of operation 242 isTimingMode = system()->isTimingMode(); 243 244 if (isTimingMode) { 245 // timestamp offset should be in clock cycles for DRAMPower 246 timeStampOffset = divCeil(curTick(), tCK); 247 248 // update the start tick for the precharge accounting to the 249 // current tick 250 for (auto r : ranks) { 251 r->startup(curTick() + tREFI - tRP); 252 } 253 254 // shift the bus busy time sufficiently far ahead that we never 255 // have to worry about negative values when computing the time for 256 // the next request, this will add an insignificant bubble at the 257 // start of simulation 258 nextBurstAt = curTick() + tRP + tRCD; 259 } 260} 261 262Tick 263DRAMCtrl::recvAtomic(PacketPtr pkt) 264{ 265 DPRINTF(DRAM, "recvAtomic: %s 0x%x\n", pkt->cmdString(), pkt->getAddr()); 266 267 panic_if(pkt->cacheResponding(), "Should not see packets where cache " 268 "is responding"); 269 270 // do the actual memory access and turn the packet into a response 271 access(pkt); 272 273 Tick latency = 0; 274 if (pkt->hasData()) { 275 // this value is not supposed to be accurate, just enough to 276 // keep things going, mimic a closed page 277 latency = tRP + tRCD + tCL; 278 } 279 return latency; 280} 281 282bool 283DRAMCtrl::readQueueFull(unsigned int neededEntries) const 284{ 285 DPRINTF(DRAM, "Read queue limit %d, current size %d, entries needed %d\n",
| 192 193 if (!port.isConnected()) { 194 fatal("DRAMCtrl %s is unconnected!\n", name()); 195 } else { 196 port.sendRangeChange(); 197 } 198 199 // a bit of sanity checks on the interleaving, save it for here to 200 // ensure that the system pointer is initialised 201 if (range.interleaved()) { 202 if (channels != range.stripes()) 203 fatal("%s has %d interleaved address stripes but %d channel(s)\n", 204 name(), range.stripes(), channels); 205 206 if (addrMapping == Enums::RoRaBaChCo) { 207 if (rowBufferSize != range.granularity()) { 208 fatal("Channel interleaving of %s doesn't match RoRaBaChCo " 209 "address map\n", name()); 210 } 211 } else if (addrMapping == Enums::RoRaBaCoCh || 212 addrMapping == Enums::RoCoRaBaCh) { 213 // for the interleavings with channel bits in the bottom, 214 // if the system uses a channel striping granularity that 215 // is larger than the DRAM burst size, then map the 216 // sequential accesses within a stripe to a number of 217 // columns in the DRAM, effectively placing some of the 218 // lower-order column bits as the least-significant bits 219 // of the address (above the ones denoting the burst size) 220 assert(columnsPerStripe >= 1); 221 222 // channel striping has to be done at a granularity that 223 // is equal or larger to a cache line 224 if (system()->cacheLineSize() > range.granularity()) { 225 fatal("Channel interleaving of %s must be at least as large " 226 "as the cache line size\n", name()); 227 } 228 229 // ...and equal or smaller than the row-buffer size 230 if (rowBufferSize < range.granularity()) { 231 fatal("Channel interleaving of %s must be at most as large " 232 "as the row-buffer size\n", name()); 233 } 234 // this is essentially the check above, so just to be sure 235 assert(columnsPerStripe <= columnsPerRowBuffer); 236 } 237 } 238} 239 240void 241DRAMCtrl::startup() 242{ 243 // remember the memory system mode of operation 244 isTimingMode = system()->isTimingMode(); 245 246 if (isTimingMode) { 247 // timestamp offset should be in clock cycles for DRAMPower 248 timeStampOffset = divCeil(curTick(), tCK); 249 250 // update the start tick for the precharge accounting to the 251 // current tick 252 for (auto r : ranks) { 253 r->startup(curTick() + tREFI - tRP); 254 } 255 256 // shift the bus busy time sufficiently far ahead that we never 257 // have to worry about negative values when computing the time for 258 // the next request, this will add an insignificant bubble at the 259 // start of simulation 260 nextBurstAt = curTick() + tRP + tRCD; 261 } 262} 263 264Tick 265DRAMCtrl::recvAtomic(PacketPtr pkt) 266{ 267 DPRINTF(DRAM, "recvAtomic: %s 0x%x\n", pkt->cmdString(), pkt->getAddr()); 268 269 panic_if(pkt->cacheResponding(), "Should not see packets where cache " 270 "is responding"); 271 272 // do the actual memory access and turn the packet into a response 273 access(pkt); 274 275 Tick latency = 0; 276 if (pkt->hasData()) { 277 // this value is not supposed to be accurate, just enough to 278 // keep things going, mimic a closed page 279 latency = tRP + tRCD + tCL; 280 } 281 return latency; 282} 283 284bool 285DRAMCtrl::readQueueFull(unsigned int neededEntries) const 286{ 287 DPRINTF(DRAM, "Read queue limit %d, current size %d, entries needed %d\n",
|
286 readBufferSize, readQueue.size() + respQueue.size(),
| 288 readBufferSize, totalReadQueueSize + respQueue.size(),
|
287 neededEntries); 288
| 289 neededEntries); 290
|
289 return 290 (readQueue.size() + respQueue.size() + neededEntries) > readBufferSize;
| 291 auto rdsize_new = totalReadQueueSize + respQueue.size() + neededEntries; 292 return rdsize_new > readBufferSize;
|
291} 292 293bool 294DRAMCtrl::writeQueueFull(unsigned int neededEntries) const 295{ 296 DPRINTF(DRAM, "Write queue limit %d, current size %d, entries needed %d\n",
| 293} 294 295bool 296DRAMCtrl::writeQueueFull(unsigned int neededEntries) const 297{ 298 DPRINTF(DRAM, "Write queue limit %d, current size %d, entries needed %d\n",
|
297 writeBufferSize, writeQueue.size(), neededEntries); 298 return (writeQueue.size() + neededEntries) > writeBufferSize;
| 299 writeBufferSize, totalWriteQueueSize, neededEntries); 300 301 auto wrsize_new = (totalWriteQueueSize + neededEntries); 302 return wrsize_new > writeBufferSize;
|
299} 300 301DRAMCtrl::DRAMPacket* 302DRAMCtrl::decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned size, 303 bool isRead) 304{ 305 // decode the address based on the address mapping scheme, with 306 // Ro, Ra, Co, Ba and Ch denoting row, rank, column, bank and 307 // channel, respectively 308 uint8_t rank; 309 uint8_t bank; 310 // use a 64-bit unsigned during the computations as the row is 311 // always the top bits, and check before creating the DRAMPacket 312 uint64_t row; 313 314 // truncate the address to a DRAM burst, which makes it unique to 315 // a specific column, row, bank, rank and channel 316 Addr addr = dramPktAddr / burstSize; 317 318 // we have removed the lowest order address bits that denote the 319 // position within the column 320 if (addrMapping == Enums::RoRaBaChCo) { 321 // the lowest order bits denote the column to ensure that 322 // sequential cache lines occupy the same row 323 addr = addr / columnsPerRowBuffer; 324 325 // take out the channel part of the address 326 addr = addr / channels; 327 328 // after the channel bits, get the bank bits to interleave 329 // over the banks 330 bank = addr % banksPerRank; 331 addr = addr / banksPerRank; 332 333 // after the bank, we get the rank bits which thus interleaves 334 // over the ranks 335 rank = addr % ranksPerChannel; 336 addr = addr / ranksPerChannel; 337 338 // lastly, get the row bits, no need to remove them from addr 339 row = addr % rowsPerBank; 340 } else if (addrMapping == Enums::RoRaBaCoCh) { 341 // take out the lower-order column bits 342 addr = addr / columnsPerStripe; 343 344 // take out the channel part of the address 345 addr = addr / channels; 346 347 // next, the higher-order column bites 348 addr = addr / (columnsPerRowBuffer / columnsPerStripe); 349 350 // after the column bits, we get the bank bits to interleave 351 // over the banks 352 bank = addr % banksPerRank; 353 addr = addr / banksPerRank; 354 355 // after the bank, we get the rank bits which thus interleaves 356 // over the ranks 357 rank = addr % ranksPerChannel; 358 addr = addr / ranksPerChannel; 359 360 // lastly, get the row bits, no need to remove them from addr 361 row = addr % rowsPerBank; 362 } else if (addrMapping == Enums::RoCoRaBaCh) { 363 // optimise for closed page mode and utilise maximum 364 // parallelism of the DRAM (at the cost of power) 365 366 // take out the lower-order column bits 367 addr = addr / columnsPerStripe; 368 369 // take out the channel part of the address, not that this has 370 // to match with how accesses are interleaved between the 371 // controllers in the address mapping 372 addr = addr / channels; 373 374 // start with the bank bits, as this provides the maximum 375 // opportunity for parallelism between requests 376 bank = addr % banksPerRank; 377 addr = addr / banksPerRank; 378 379 // next get the rank bits 380 rank = addr % ranksPerChannel; 381 addr = addr / ranksPerChannel; 382 383 // next, the higher-order column bites 384 addr = addr / (columnsPerRowBuffer / columnsPerStripe); 385 386 // lastly, get the row bits, no need to remove them from addr 387 row = addr % rowsPerBank; 388 } else 389 panic("Unknown address mapping policy chosen!"); 390 391 assert(rank < ranksPerChannel); 392 assert(bank < banksPerRank); 393 assert(row < rowsPerBank); 394 assert(row < Bank::NO_ROW); 395 396 DPRINTF(DRAM, "Address: %lld Rank %d Bank %d Row %d\n", 397 dramPktAddr, rank, bank, row); 398 399 // create the corresponding DRAM packet with the entry time and 400 // ready time set to the current tick, the latter will be updated 401 // later 402 uint16_t bank_id = banksPerRank * rank + bank; 403 return new DRAMPacket(pkt, isRead, rank, bank, row, bank_id, dramPktAddr, 404 size, ranks[rank]->banks[bank], *ranks[rank]); 405} 406 407void 408DRAMCtrl::addToReadQueue(PacketPtr pkt, unsigned int pktCount) 409{ 410 // only add to the read queue here. whenever the request is 411 // eventually done, set the readyTime, and call schedule() 412 assert(!pkt->isWrite()); 413 414 assert(pktCount != 0); 415 416 // if the request size is larger than burst size, the pkt is split into 417 // multiple DRAM packets 418 // Note if the pkt starting address is not aligened to burst size, the 419 // address of first DRAM packet is kept unaliged. Subsequent DRAM packets 420 // are aligned to burst size boundaries. This is to ensure we accurately 421 // check read packets against packets in write queue. 422 Addr addr = pkt->getAddr(); 423 unsigned pktsServicedByWrQ = 0; 424 BurstHelper* burst_helper = NULL; 425 for (int cnt = 0; cnt < pktCount; ++cnt) { 426 unsigned size = std::min((addr | (burstSize - 1)) + 1, 427 pkt->getAddr() + pkt->getSize()) - addr; 428 readPktSize[ceilLog2(size)]++; 429 readBursts++;
| 303} 304 305DRAMCtrl::DRAMPacket* 306DRAMCtrl::decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned size, 307 bool isRead) 308{ 309 // decode the address based on the address mapping scheme, with 310 // Ro, Ra, Co, Ba and Ch denoting row, rank, column, bank and 311 // channel, respectively 312 uint8_t rank; 313 uint8_t bank; 314 // use a 64-bit unsigned during the computations as the row is 315 // always the top bits, and check before creating the DRAMPacket 316 uint64_t row; 317 318 // truncate the address to a DRAM burst, which makes it unique to 319 // a specific column, row, bank, rank and channel 320 Addr addr = dramPktAddr / burstSize; 321 322 // we have removed the lowest order address bits that denote the 323 // position within the column 324 if (addrMapping == Enums::RoRaBaChCo) { 325 // the lowest order bits denote the column to ensure that 326 // sequential cache lines occupy the same row 327 addr = addr / columnsPerRowBuffer; 328 329 // take out the channel part of the address 330 addr = addr / channels; 331 332 // after the channel bits, get the bank bits to interleave 333 // over the banks 334 bank = addr % banksPerRank; 335 addr = addr / banksPerRank; 336 337 // after the bank, we get the rank bits which thus interleaves 338 // over the ranks 339 rank = addr % ranksPerChannel; 340 addr = addr / ranksPerChannel; 341 342 // lastly, get the row bits, no need to remove them from addr 343 row = addr % rowsPerBank; 344 } else if (addrMapping == Enums::RoRaBaCoCh) { 345 // take out the lower-order column bits 346 addr = addr / columnsPerStripe; 347 348 // take out the channel part of the address 349 addr = addr / channels; 350 351 // next, the higher-order column bites 352 addr = addr / (columnsPerRowBuffer / columnsPerStripe); 353 354 // after the column bits, we get the bank bits to interleave 355 // over the banks 356 bank = addr % banksPerRank; 357 addr = addr / banksPerRank; 358 359 // after the bank, we get the rank bits which thus interleaves 360 // over the ranks 361 rank = addr % ranksPerChannel; 362 addr = addr / ranksPerChannel; 363 364 // lastly, get the row bits, no need to remove them from addr 365 row = addr % rowsPerBank; 366 } else if (addrMapping == Enums::RoCoRaBaCh) { 367 // optimise for closed page mode and utilise maximum 368 // parallelism of the DRAM (at the cost of power) 369 370 // take out the lower-order column bits 371 addr = addr / columnsPerStripe; 372 373 // take out the channel part of the address, not that this has 374 // to match with how accesses are interleaved between the 375 // controllers in the address mapping 376 addr = addr / channels; 377 378 // start with the bank bits, as this provides the maximum 379 // opportunity for parallelism between requests 380 bank = addr % banksPerRank; 381 addr = addr / banksPerRank; 382 383 // next get the rank bits 384 rank = addr % ranksPerChannel; 385 addr = addr / ranksPerChannel; 386 387 // next, the higher-order column bites 388 addr = addr / (columnsPerRowBuffer / columnsPerStripe); 389 390 // lastly, get the row bits, no need to remove them from addr 391 row = addr % rowsPerBank; 392 } else 393 panic("Unknown address mapping policy chosen!"); 394 395 assert(rank < ranksPerChannel); 396 assert(bank < banksPerRank); 397 assert(row < rowsPerBank); 398 assert(row < Bank::NO_ROW); 399 400 DPRINTF(DRAM, "Address: %lld Rank %d Bank %d Row %d\n", 401 dramPktAddr, rank, bank, row); 402 403 // create the corresponding DRAM packet with the entry time and 404 // ready time set to the current tick, the latter will be updated 405 // later 406 uint16_t bank_id = banksPerRank * rank + bank; 407 return new DRAMPacket(pkt, isRead, rank, bank, row, bank_id, dramPktAddr, 408 size, ranks[rank]->banks[bank], *ranks[rank]); 409} 410 411void 412DRAMCtrl::addToReadQueue(PacketPtr pkt, unsigned int pktCount) 413{ 414 // only add to the read queue here. whenever the request is 415 // eventually done, set the readyTime, and call schedule() 416 assert(!pkt->isWrite()); 417 418 assert(pktCount != 0); 419 420 // if the request size is larger than burst size, the pkt is split into 421 // multiple DRAM packets 422 // Note if the pkt starting address is not aligened to burst size, the 423 // address of first DRAM packet is kept unaliged. Subsequent DRAM packets 424 // are aligned to burst size boundaries. This is to ensure we accurately 425 // check read packets against packets in write queue. 426 Addr addr = pkt->getAddr(); 427 unsigned pktsServicedByWrQ = 0; 428 BurstHelper* burst_helper = NULL; 429 for (int cnt = 0; cnt < pktCount; ++cnt) { 430 unsigned size = std::min((addr | (burstSize - 1)) + 1, 431 pkt->getAddr() + pkt->getSize()) - addr; 432 readPktSize[ceilLog2(size)]++; 433 readBursts++;
|
| 434 masterReadAccesses[pkt->masterId()]++;
|
430 431 // First check write buffer to see if the data is already at 432 // the controller 433 bool foundInWrQ = false; 434 Addr burst_addr = burstAlign(addr); 435 // if the burst address is not present then there is no need 436 // looking any further 437 if (isInWriteQueue.find(burst_addr) != isInWriteQueue.end()) {
| 435 436 // First check write buffer to see if the data is already at 437 // the controller 438 bool foundInWrQ = false; 439 Addr burst_addr = burstAlign(addr); 440 // if the burst address is not present then there is no need 441 // looking any further 442 if (isInWriteQueue.find(burst_addr) != isInWriteQueue.end()) {
|
438 for (const auto& p : writeQueue) { 439 // check if the read is subsumed in the write queue 440 // packet we are looking at 441 if (p->addr <= addr && (addr + size) <= (p->addr + p->size)) { 442 foundInWrQ = true; 443 servicedByWrQ++; 444 pktsServicedByWrQ++; 445 DPRINTF(DRAM, "Read to addr %lld with size %d serviced by " 446 "write queue\n", addr, size); 447 bytesReadWrQ += burstSize; 448 break;
| 443 for (const auto& vec : writeQueue) { 444 for (const auto& p : vec) { 445 // check if the read is subsumed in the write queue 446 // packet we are looking at 447 if (p->addr <= addr && 448 ((addr + size) <= (p->addr + p->size))) { 449 450 foundInWrQ = true; 451 servicedByWrQ++; 452 pktsServicedByWrQ++; 453 DPRINTF(DRAM, 454 "Read to addr %lld with size %d serviced by " 455 "write queue\n", 456 addr, size); 457 bytesReadWrQ += burstSize; 458 break; 459 }
|
449 } 450 } 451 } 452 453 // If not found in the write q, make a DRAM packet and 454 // push it onto the read queue 455 if (!foundInWrQ) { 456 457 // Make the burst helper for split packets 458 if (pktCount > 1 && burst_helper == NULL) { 459 DPRINTF(DRAM, "Read to addr %lld translates to %d " 460 "dram requests\n", pkt->getAddr(), pktCount); 461 burst_helper = new BurstHelper(pktCount); 462 } 463 464 DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, true); 465 dram_pkt->burstHelper = burst_helper; 466 467 assert(!readQueueFull(1));
| 460 } 461 } 462 } 463 464 // If not found in the write q, make a DRAM packet and 465 // push it onto the read queue 466 if (!foundInWrQ) { 467 468 // Make the burst helper for split packets 469 if (pktCount > 1 && burst_helper == NULL) { 470 DPRINTF(DRAM, "Read to addr %lld translates to %d " 471 "dram requests\n", pkt->getAddr(), pktCount); 472 burst_helper = new BurstHelper(pktCount); 473 } 474 475 DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, true); 476 dram_pkt->burstHelper = burst_helper; 477 478 assert(!readQueueFull(1));
|
468 rdQLenPdf[readQueue.size() + respQueue.size()]++;
| 479 rdQLenPdf[totalReadQueueSize + respQueue.size()]++;
|
469 470 DPRINTF(DRAM, "Adding to read queue\n"); 471
| 480 481 DPRINTF(DRAM, "Adding to read queue\n"); 482
|
472 readQueue.push_back(dram_pkt);
| 483 readQueue[dram_pkt->qosValue()].push_back(dram_pkt);
|
473
| 484
|
474 // increment read entries of the rank
| |
475 ++dram_pkt->rankRef.readEntries; 476
| 485 ++dram_pkt->rankRef.readEntries; 486
|
| 487 // log packet 488 logRequest(MemCtrl::READ, pkt->masterId(), pkt->qosValue(), 489 dram_pkt->addr, 1); 490
|
477 // Update stats
| 491 // Update stats
|
478 avgRdQLen = readQueue.size() + respQueue.size();
| 492 avgRdQLen = totalReadQueueSize + respQueue.size();
|
479 } 480 481 // Starting address of next dram pkt (aligend to burstSize boundary) 482 addr = (addr | (burstSize - 1)) + 1; 483 } 484 485 // If all packets are serviced by write queue, we send the repsonse back 486 if (pktsServicedByWrQ == pktCount) { 487 accessAndRespond(pkt, frontendLatency); 488 return; 489 } 490 491 // Update how many split packets are serviced by write queue 492 if (burst_helper != NULL) 493 burst_helper->burstsServiced = pktsServicedByWrQ; 494 495 // If we are not already scheduled to get a request out of the 496 // queue, do so now 497 if (!nextReqEvent.scheduled()) { 498 DPRINTF(DRAM, "Request scheduled immediately\n"); 499 schedule(nextReqEvent, curTick()); 500 } 501} 502 503void 504DRAMCtrl::addToWriteQueue(PacketPtr pkt, unsigned int pktCount) 505{ 506 // only add to the write queue here. whenever the request is 507 // eventually done, set the readyTime, and call schedule() 508 assert(pkt->isWrite()); 509 510 // if the request size is larger than burst size, the pkt is split into 511 // multiple DRAM packets 512 Addr addr = pkt->getAddr(); 513 for (int cnt = 0; cnt < pktCount; ++cnt) { 514 unsigned size = std::min((addr | (burstSize - 1)) + 1, 515 pkt->getAddr() + pkt->getSize()) - addr; 516 writePktSize[ceilLog2(size)]++; 517 writeBursts++;
| 493 } 494 495 // Starting address of next dram pkt (aligend to burstSize boundary) 496 addr = (addr | (burstSize - 1)) + 1; 497 } 498 499 // If all packets are serviced by write queue, we send the repsonse back 500 if (pktsServicedByWrQ == pktCount) { 501 accessAndRespond(pkt, frontendLatency); 502 return; 503 } 504 505 // Update how many split packets are serviced by write queue 506 if (burst_helper != NULL) 507 burst_helper->burstsServiced = pktsServicedByWrQ; 508 509 // If we are not already scheduled to get a request out of the 510 // queue, do so now 511 if (!nextReqEvent.scheduled()) { 512 DPRINTF(DRAM, "Request scheduled immediately\n"); 513 schedule(nextReqEvent, curTick()); 514 } 515} 516 517void 518DRAMCtrl::addToWriteQueue(PacketPtr pkt, unsigned int pktCount) 519{ 520 // only add to the write queue here. whenever the request is 521 // eventually done, set the readyTime, and call schedule() 522 assert(pkt->isWrite()); 523 524 // if the request size is larger than burst size, the pkt is split into 525 // multiple DRAM packets 526 Addr addr = pkt->getAddr(); 527 for (int cnt = 0; cnt < pktCount; ++cnt) { 528 unsigned size = std::min((addr | (burstSize - 1)) + 1, 529 pkt->getAddr() + pkt->getSize()) - addr; 530 writePktSize[ceilLog2(size)]++; 531 writeBursts++;
|
| 532 masterWriteAccesses[pkt->masterId()]++;
|
518 519 // see if we can merge with an existing item in the write 520 // queue and keep track of whether we have merged or not 521 bool merged = isInWriteQueue.find(burstAlign(addr)) != 522 isInWriteQueue.end(); 523 524 // if the item was not merged we need to create a new write 525 // and enqueue it 526 if (!merged) { 527 DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, false); 528
| 533 534 // see if we can merge with an existing item in the write 535 // queue and keep track of whether we have merged or not 536 bool merged = isInWriteQueue.find(burstAlign(addr)) != 537 isInWriteQueue.end(); 538 539 // if the item was not merged we need to create a new write 540 // and enqueue it 541 if (!merged) { 542 DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, false); 543
|
529 assert(writeQueue.size() < writeBufferSize); 530 wrQLenPdf[writeQueue.size()]++;
| 544 assert(totalWriteQueueSize < writeBufferSize); 545 wrQLenPdf[totalWriteQueueSize]++;
|
531 532 DPRINTF(DRAM, "Adding to write queue\n"); 533
| 546 547 DPRINTF(DRAM, "Adding to write queue\n"); 548
|
534 writeQueue.push_back(dram_pkt);
| 549 writeQueue[dram_pkt->qosValue()].push_back(dram_pkt);
|
535 isInWriteQueue.insert(burstAlign(addr));
| 550 isInWriteQueue.insert(burstAlign(addr));
|
536 assert(writeQueue.size() == isInWriteQueue.size());
| |
537
| 551
|
| 552 // log packet 553 logRequest(MemCtrl::WRITE, pkt->masterId(), pkt->qosValue(), 554 dram_pkt->addr, 1); 555 556 assert(totalWriteQueueSize == isInWriteQueue.size()); 557
|
538 // Update stats
| 558 // Update stats
|
539 avgWrQLen = writeQueue.size();
| 559 avgWrQLen = totalWriteQueueSize;
|
540 541 // increment write entries of the rank 542 ++dram_pkt->rankRef.writeEntries; 543 } else { 544 DPRINTF(DRAM, "Merging write burst with existing queue entry\n"); 545 546 // keep track of the fact that this burst effectively 547 // disappeared as it was merged with an existing one 548 mergedWrBursts++; 549 } 550 551 // Starting address of next dram pkt (aligend to burstSize boundary) 552 addr = (addr | (burstSize - 1)) + 1; 553 } 554 555 // we do not wait for the writes to be send to the actual memory, 556 // but instead take responsibility for the consistency here and 557 // snoop the write queue for any upcoming reads 558 // @todo, if a pkt size is larger than burst size, we might need a 559 // different front end latency 560 accessAndRespond(pkt, frontendLatency); 561 562 // If we are not already scheduled to get a request out of the 563 // queue, do so now 564 if (!nextReqEvent.scheduled()) { 565 DPRINTF(DRAM, "Request scheduled immediately\n"); 566 schedule(nextReqEvent, curTick()); 567 } 568} 569 570void
| 560 561 // increment write entries of the rank 562 ++dram_pkt->rankRef.writeEntries; 563 } else { 564 DPRINTF(DRAM, "Merging write burst with existing queue entry\n"); 565 566 // keep track of the fact that this burst effectively 567 // disappeared as it was merged with an existing one 568 mergedWrBursts++; 569 } 570 571 // Starting address of next dram pkt (aligend to burstSize boundary) 572 addr = (addr | (burstSize - 1)) + 1; 573 } 574 575 // we do not wait for the writes to be send to the actual memory, 576 // but instead take responsibility for the consistency here and 577 // snoop the write queue for any upcoming reads 578 // @todo, if a pkt size is larger than burst size, we might need a 579 // different front end latency 580 accessAndRespond(pkt, frontendLatency); 581 582 // If we are not already scheduled to get a request out of the 583 // queue, do so now 584 if (!nextReqEvent.scheduled()) { 585 DPRINTF(DRAM, "Request scheduled immediately\n"); 586 schedule(nextReqEvent, curTick()); 587 } 588} 589 590void
|
571DRAMCtrl::printQs() const {
| 591DRAMCtrl::printQs() const 592{ 593#if TRACING_ON
|
572 DPRINTF(DRAM, "===READ QUEUE===\n\n");
| 594 DPRINTF(DRAM, "===READ QUEUE===\n\n");
|
573 for (auto i = readQueue.begin() ; i != readQueue.end() ; ++i) { 574 DPRINTF(DRAM, "Read %lu\n", (*i)->addr);
| 595 for (const auto& queue : readQueue) { 596 for (const auto& packet : queue) { 597 DPRINTF(DRAM, "Read %lu\n", packet->addr); 598 }
|
575 }
| 599 }
|
| 600
|
576 DPRINTF(DRAM, "\n===RESP QUEUE===\n\n");
| 601 DPRINTF(DRAM, "\n===RESP QUEUE===\n\n");
|
577 for (auto i = respQueue.begin() ; i != respQueue.end() ; ++i) { 578 DPRINTF(DRAM, "Response %lu\n", (*i)->addr);
| 602 for (const auto& packet : respQueue) { 603 DPRINTF(DRAM, "Response %lu\n", packet->addr);
|
579 }
| 604 }
|
| 605
|
580 DPRINTF(DRAM, "\n===WRITE QUEUE===\n\n");
| 606 DPRINTF(DRAM, "\n===WRITE QUEUE===\n\n");
|
581 for (auto i = writeQueue.begin() ; i != writeQueue.end() ; ++i) { 582 DPRINTF(DRAM, "Write %lu\n", (*i)->addr);
| 607 for (const auto& queue : writeQueue) { 608 for (const auto& packet : queue) { 609 DPRINTF(DRAM, "Write %lu\n", packet->addr); 610 }
|
583 }
| 611 }
|
| 612#endif // TRACING_ON
|
584} 585 586bool 587DRAMCtrl::recvTimingReq(PacketPtr pkt) 588{ 589 // This is where we enter from the outside world 590 DPRINTF(DRAM, "recvTimingReq: request %s addr %lld size %d\n", 591 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 592 593 panic_if(pkt->cacheResponding(), "Should not see packets where cache " 594 "is responding"); 595 596 panic_if(!(pkt->isRead() || pkt->isWrite()), 597 "Should only see read and writes at memory controller\n"); 598 599 // Calc avg gap between requests 600 if (prevArrival != 0) { 601 totGap += curTick() - prevArrival; 602 } 603 prevArrival = curTick(); 604 605 606 // Find out how many dram packets a pkt translates to 607 // If the burst size is equal or larger than the pkt size, then a pkt 608 // translates to only one dram packet. Otherwise, a pkt translates to 609 // multiple dram packets 610 unsigned size = pkt->getSize(); 611 unsigned offset = pkt->getAddr() & (burstSize - 1); 612 unsigned int dram_pkt_count = divCeil(offset + size, burstSize); 613
| 613} 614 615bool 616DRAMCtrl::recvTimingReq(PacketPtr pkt) 617{ 618 // This is where we enter from the outside world 619 DPRINTF(DRAM, "recvTimingReq: request %s addr %lld size %d\n", 620 pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 621 622 panic_if(pkt->cacheResponding(), "Should not see packets where cache " 623 "is responding"); 624 625 panic_if(!(pkt->isRead() || pkt->isWrite()), 626 "Should only see read and writes at memory controller\n"); 627 628 // Calc avg gap between requests 629 if (prevArrival != 0) { 630 totGap += curTick() - prevArrival; 631 } 632 prevArrival = curTick(); 633 634 635 // Find out how many dram packets a pkt translates to 636 // If the burst size is equal or larger than the pkt size, then a pkt 637 // translates to only one dram packet. Otherwise, a pkt translates to 638 // multiple dram packets 639 unsigned size = pkt->getSize(); 640 unsigned offset = pkt->getAddr() & (burstSize - 1); 641 unsigned int dram_pkt_count = divCeil(offset + size, burstSize); 642
|
| 643 // run the QoS scheduler and assign a QoS priority value to the packet 644 qosSchedule( { &readQueue, &writeQueue }, burstSize, pkt); 645
|
614 // check local buffers and do not accept if full 615 if (pkt->isRead()) { 616 assert(size != 0); 617 if (readQueueFull(dram_pkt_count)) { 618 DPRINTF(DRAM, "Read queue full, not accepting\n"); 619 // remember that we have to retry this port 620 retryRdReq = true; 621 numRdRetry++; 622 return false; 623 } else { 624 addToReadQueue(pkt, dram_pkt_count); 625 readReqs++; 626 bytesReadSys += size; 627 } 628 } else { 629 assert(pkt->isWrite()); 630 assert(size != 0); 631 if (writeQueueFull(dram_pkt_count)) { 632 DPRINTF(DRAM, "Write queue full, not accepting\n"); 633 // remember that we have to retry this port 634 retryWrReq = true; 635 numWrRetry++; 636 return false; 637 } else { 638 addToWriteQueue(pkt, dram_pkt_count); 639 writeReqs++; 640 bytesWrittenSys += size; 641 } 642 } 643 644 return true; 645} 646 647void 648DRAMCtrl::processRespondEvent() 649{ 650 DPRINTF(DRAM, 651 "processRespondEvent(): Some req has reached its readyTime\n"); 652 653 DRAMPacket* dram_pkt = respQueue.front(); 654 655 // if a read has reached its ready-time, decrement the number of reads 656 // At this point the packet has been handled and there is a possibility 657 // to switch to low-power mode if no other packet is available 658 --dram_pkt->rankRef.readEntries; 659 DPRINTF(DRAM, "number of read entries for rank %d is %d\n", 660 dram_pkt->rank, dram_pkt->rankRef.readEntries); 661 662 // counter should at least indicate one outstanding request 663 // for this read 664 assert(dram_pkt->rankRef.outstandingEvents > 0); 665 // read response received, decrement count 666 --dram_pkt->rankRef.outstandingEvents; 667 668 // at this moment should not have transitioned to a low-power state 669 assert((dram_pkt->rankRef.pwrState != PWR_SREF) && 670 (dram_pkt->rankRef.pwrState != PWR_PRE_PDN) && 671 (dram_pkt->rankRef.pwrState != PWR_ACT_PDN)); 672 673 // track if this is the last packet before idling 674 // and that there are no outstanding commands to this rank 675 if (dram_pkt->rankRef.isQueueEmpty() && 676 dram_pkt->rankRef.outstandingEvents == 0) { 677 // verify that there are no events scheduled 678 assert(!dram_pkt->rankRef.activateEvent.scheduled()); 679 assert(!dram_pkt->rankRef.prechargeEvent.scheduled()); 680 681 // if coming from active state, schedule power event to 682 // active power-down else go to precharge power-down 683 DPRINTF(DRAMState, "Rank %d sleep at tick %d; current power state is " 684 "%d\n", dram_pkt->rank, curTick(), dram_pkt->rankRef.pwrState); 685 686 // default to ACT power-down unless already in IDLE state 687 // could be in IDLE if PRE issued before data returned 688 PowerState next_pwr_state = PWR_ACT_PDN; 689 if (dram_pkt->rankRef.pwrState == PWR_IDLE) { 690 next_pwr_state = PWR_PRE_PDN; 691 } 692 693 dram_pkt->rankRef.powerDownSleep(next_pwr_state, curTick()); 694 } 695 696 if (dram_pkt->burstHelper) { 697 // it is a split packet 698 dram_pkt->burstHelper->burstsServiced++; 699 if (dram_pkt->burstHelper->burstsServiced == 700 dram_pkt->burstHelper->burstCount) { 701 // we have now serviced all children packets of a system packet 702 // so we can now respond to the requester 703 // @todo we probably want to have a different front end and back 704 // end latency for split packets 705 accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency); 706 delete dram_pkt->burstHelper; 707 dram_pkt->burstHelper = NULL; 708 } 709 } else { 710 // it is not a split packet 711 accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency); 712 } 713 714 delete respQueue.front(); 715 respQueue.pop_front(); 716 717 if (!respQueue.empty()) { 718 assert(respQueue.front()->readyTime >= curTick()); 719 assert(!respondEvent.scheduled()); 720 schedule(respondEvent, respQueue.front()->readyTime); 721 } else { 722 // if there is nothing left in any queue, signal a drain 723 if (drainState() == DrainState::Draining &&
| 646 // check local buffers and do not accept if full 647 if (pkt->isRead()) { 648 assert(size != 0); 649 if (readQueueFull(dram_pkt_count)) { 650 DPRINTF(DRAM, "Read queue full, not accepting\n"); 651 // remember that we have to retry this port 652 retryRdReq = true; 653 numRdRetry++; 654 return false; 655 } else { 656 addToReadQueue(pkt, dram_pkt_count); 657 readReqs++; 658 bytesReadSys += size; 659 } 660 } else { 661 assert(pkt->isWrite()); 662 assert(size != 0); 663 if (writeQueueFull(dram_pkt_count)) { 664 DPRINTF(DRAM, "Write queue full, not accepting\n"); 665 // remember that we have to retry this port 666 retryWrReq = true; 667 numWrRetry++; 668 return false; 669 } else { 670 addToWriteQueue(pkt, dram_pkt_count); 671 writeReqs++; 672 bytesWrittenSys += size; 673 } 674 } 675 676 return true; 677} 678 679void 680DRAMCtrl::processRespondEvent() 681{ 682 DPRINTF(DRAM, 683 "processRespondEvent(): Some req has reached its readyTime\n"); 684 685 DRAMPacket* dram_pkt = respQueue.front(); 686 687 // if a read has reached its ready-time, decrement the number of reads 688 // At this point the packet has been handled and there is a possibility 689 // to switch to low-power mode if no other packet is available 690 --dram_pkt->rankRef.readEntries; 691 DPRINTF(DRAM, "number of read entries for rank %d is %d\n", 692 dram_pkt->rank, dram_pkt->rankRef.readEntries); 693 694 // counter should at least indicate one outstanding request 695 // for this read 696 assert(dram_pkt->rankRef.outstandingEvents > 0); 697 // read response received, decrement count 698 --dram_pkt->rankRef.outstandingEvents; 699 700 // at this moment should not have transitioned to a low-power state 701 assert((dram_pkt->rankRef.pwrState != PWR_SREF) && 702 (dram_pkt->rankRef.pwrState != PWR_PRE_PDN) && 703 (dram_pkt->rankRef.pwrState != PWR_ACT_PDN)); 704 705 // track if this is the last packet before idling 706 // and that there are no outstanding commands to this rank 707 if (dram_pkt->rankRef.isQueueEmpty() && 708 dram_pkt->rankRef.outstandingEvents == 0) { 709 // verify that there are no events scheduled 710 assert(!dram_pkt->rankRef.activateEvent.scheduled()); 711 assert(!dram_pkt->rankRef.prechargeEvent.scheduled()); 712 713 // if coming from active state, schedule power event to 714 // active power-down else go to precharge power-down 715 DPRINTF(DRAMState, "Rank %d sleep at tick %d; current power state is " 716 "%d\n", dram_pkt->rank, curTick(), dram_pkt->rankRef.pwrState); 717 718 // default to ACT power-down unless already in IDLE state 719 // could be in IDLE if PRE issued before data returned 720 PowerState next_pwr_state = PWR_ACT_PDN; 721 if (dram_pkt->rankRef.pwrState == PWR_IDLE) { 722 next_pwr_state = PWR_PRE_PDN; 723 } 724 725 dram_pkt->rankRef.powerDownSleep(next_pwr_state, curTick()); 726 } 727 728 if (dram_pkt->burstHelper) { 729 // it is a split packet 730 dram_pkt->burstHelper->burstsServiced++; 731 if (dram_pkt->burstHelper->burstsServiced == 732 dram_pkt->burstHelper->burstCount) { 733 // we have now serviced all children packets of a system packet 734 // so we can now respond to the requester 735 // @todo we probably want to have a different front end and back 736 // end latency for split packets 737 accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency); 738 delete dram_pkt->burstHelper; 739 dram_pkt->burstHelper = NULL; 740 } 741 } else { 742 // it is not a split packet 743 accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency); 744 } 745 746 delete respQueue.front(); 747 respQueue.pop_front(); 748 749 if (!respQueue.empty()) { 750 assert(respQueue.front()->readyTime >= curTick()); 751 assert(!respondEvent.scheduled()); 752 schedule(respondEvent, respQueue.front()->readyTime); 753 } else { 754 // if there is nothing left in any queue, signal a drain 755 if (drainState() == DrainState::Draining &&
|
724 writeQueue.empty() && readQueue.empty() && allRanksDrained()) {
| 756 !totalWriteQueueSize && !totalReadQueueSize && allRanksDrained()) {
|
725 726 DPRINTF(Drain, "DRAM controller done draining\n"); 727 signalDrainDone(); 728 } 729 } 730 731 // We have made a location in the queue available at this point, 732 // so if there is a read that was forced to wait, retry now 733 if (retryRdReq) { 734 retryRdReq = false; 735 port.sendRetryReq(); 736 } 737} 738
| 757 758 DPRINTF(Drain, "DRAM controller done draining\n"); 759 signalDrainDone(); 760 } 761 } 762 763 // We have made a location in the queue available at this point, 764 // so if there is a read that was forced to wait, retry now 765 if (retryRdReq) { 766 retryRdReq = false; 767 port.sendRetryReq(); 768 } 769} 770
|
739bool 740DRAMCtrl::chooseNext(std::deque<DRAMPacket*>& queue, Tick extra_col_delay)
| 771DRAMCtrl::DRAMPacketQueue::iterator 772DRAMCtrl::chooseNext(DRAMPacketQueue& queue, Tick extra_col_delay)
|
741{
| 773{
|
742 // This method does the arbitration between requests. The chosen 743 // packet is simply moved to the head of the queue. The other 744 // methods know that this is the place to look. For example, with 745 // FCFS, this method does nothing 746 assert(!queue.empty());
| 774 // This method does the arbitration between requests.
|
747
| 775
|
748 // bool to indicate if a packet to an available rank is found 749 bool found_packet = false; 750 if (queue.size() == 1) { 751 DRAMPacket* dram_pkt = queue.front(); 752 // available rank corresponds to state refresh idle 753 if (ranks[dram_pkt->rank]->inRefIdleState()) { 754 found_packet = true; 755 DPRINTF(DRAM, "Single request, going to a free rank\n"); 756 } else { 757 DPRINTF(DRAM, "Single request, going to a busy rank\n"); 758 } 759 return found_packet; 760 }
| 776 DRAMCtrl::DRAMPacketQueue::iterator ret = queue.end();
|
761
| 777
|
762 if (memSchedPolicy == Enums::fcfs) { 763 // check if there is a packet going to a free rank 764 for (auto i = queue.begin(); i != queue.end() ; ++i) { 765 DRAMPacket* dram_pkt = *i;
| 778 if (!queue.empty()) { 779 if (queue.size() == 1) { 780 // available rank corresponds to state refresh idle 781 DRAMPacket* dram_pkt = *(queue.begin());
|
766 if (ranks[dram_pkt->rank]->inRefIdleState()) {
| 782 if (ranks[dram_pkt->rank]->inRefIdleState()) {
|
767 queue.erase(i); 768 queue.push_front(dram_pkt); 769 found_packet = true; 770 break;
| 783 ret = queue.begin(); 784 DPRINTF(DRAM, "Single request, going to a free rank\n"); 785 } else { 786 DPRINTF(DRAM, "Single request, going to a busy rank\n");
|
771 }
| 787 }
|
| 788 } else if (memSchedPolicy == Enums::fcfs) { 789 // check if there is a packet going to a free rank 790 for (auto i = queue.begin(); i != queue.end(); ++i) { 791 DRAMPacket* dram_pkt = *i; 792 if (ranks[dram_pkt->rank]->inRefIdleState()) { 793 ret = i; 794 break; 795 } 796 } 797 } else if (memSchedPolicy == Enums::frfcfs) { 798 ret = chooseNextFRFCFS(queue, extra_col_delay); 799 } else { 800 panic("No scheduling policy chosen\n");
|
772 }
| 801 }
|
773 } else if (memSchedPolicy == Enums::frfcfs) { 774 found_packet = reorderQueue(queue, extra_col_delay); 775 } else 776 panic("No scheduling policy chosen\n"); 777 return found_packet;
| 802 } 803 return ret;
|
778} 779
| 804} 805
|
780bool 781DRAMCtrl::reorderQueue(std::deque<DRAMPacket*>& queue, Tick extra_col_delay)
| 806DRAMCtrl::DRAMPacketQueue::iterator 807DRAMCtrl::chooseNextFRFCFS(DRAMPacketQueue& queue, Tick extra_col_delay)
|
782{ 783 // Only determine this if needed 784 vector<uint32_t> earliest_banks(ranksPerChannel, 0); 785 786 // Has minBankPrep been called to populate earliest_banks? 787 bool filled_earliest_banks = false; 788 // can the PRE/ACT sequence be done without impacting utlization? 789 bool hidden_bank_prep = false; 790 791 // search for seamless row hits first, if no seamless row hit is 792 // found then determine if there are other packets that can be issued 793 // without incurring additional bus delay due to bank timing 794 // Will select closed rows first to enable more open row possibilies 795 // in future selections 796 bool found_hidden_bank = false; 797 798 // remember if we found a row hit, not seamless, but bank prepped 799 // and ready 800 bool found_prepped_pkt = false; 801 802 // if we have no row hit, prepped or not, and no seamless packet, 803 // just go for the earliest possible 804 bool found_earliest_pkt = false; 805 806 auto selected_pkt_it = queue.end(); 807 808 // time we need to issue a column command to be seamless 809 const Tick min_col_at = std::max(nextBurstAt + extra_col_delay, curTick()); 810 811 for (auto i = queue.begin(); i != queue.end() ; ++i) { 812 DRAMPacket* dram_pkt = *i; 813 const Bank& bank = dram_pkt->bankRef;
| 808{ 809 // Only determine this if needed 810 vector<uint32_t> earliest_banks(ranksPerChannel, 0); 811 812 // Has minBankPrep been called to populate earliest_banks? 813 bool filled_earliest_banks = false; 814 // can the PRE/ACT sequence be done without impacting utlization? 815 bool hidden_bank_prep = false; 816 817 // search for seamless row hits first, if no seamless row hit is 818 // found then determine if there are other packets that can be issued 819 // without incurring additional bus delay due to bank timing 820 // Will select closed rows first to enable more open row possibilies 821 // in future selections 822 bool found_hidden_bank = false; 823 824 // remember if we found a row hit, not seamless, but bank prepped 825 // and ready 826 bool found_prepped_pkt = false; 827 828 // if we have no row hit, prepped or not, and no seamless packet, 829 // just go for the earliest possible 830 bool found_earliest_pkt = false; 831 832 auto selected_pkt_it = queue.end(); 833 834 // time we need to issue a column command to be seamless 835 const Tick min_col_at = std::max(nextBurstAt + extra_col_delay, curTick()); 836 837 for (auto i = queue.begin(); i != queue.end() ; ++i) { 838 DRAMPacket* dram_pkt = *i; 839 const Bank& bank = dram_pkt->bankRef;
|
814 const Tick col_allowed_at = dram_pkt->isRead ? bank.rdAllowedAt : 815 bank.wrAllowedAt;
| 840 const Tick col_allowed_at = dram_pkt->isRead() ? bank.rdAllowedAt : 841 bank.wrAllowedAt;
|
816
| 842
|
| 843 DPRINTF(DRAM, "%s checking packet in bank %d\n", 844 __func__, dram_pkt->bankRef.bank); 845
|
817 // check if rank is not doing a refresh and thus is available, if not, 818 // jump to the next packet 819 if (dram_pkt->rankRef.inRefIdleState()) {
| 846 // check if rank is not doing a refresh and thus is available, if not, 847 // jump to the next packet 848 if (dram_pkt->rankRef.inRefIdleState()) {
|
| 849 850 DPRINTF(DRAM, 851 "%s bank %d - Rank %d available\n", __func__, 852 dram_pkt->bankRef.bank, dram_pkt->rankRef.rank); 853
|
820 // check if it is a row hit 821 if (bank.openRow == dram_pkt->row) { 822 // no additional rank-to-rank or same bank-group 823 // delays, or we switched read/write and might as well 824 // go for the row hit 825 if (col_allowed_at <= min_col_at) { 826 // FCFS within the hits, giving priority to 827 // commands that can issue seamlessly, without 828 // additional delay, such as same rank accesses 829 // and/or different bank-group accesses
| 854 // check if it is a row hit 855 if (bank.openRow == dram_pkt->row) { 856 // no additional rank-to-rank or same bank-group 857 // delays, or we switched read/write and might as well 858 // go for the row hit 859 if (col_allowed_at <= min_col_at) { 860 // FCFS within the hits, giving priority to 861 // commands that can issue seamlessly, without 862 // additional delay, such as same rank accesses 863 // and/or different bank-group accesses
|
830 DPRINTF(DRAM, "Seamless row buffer hit\n");
| 864 DPRINTF(DRAM, "%s Seamless row buffer hit\n", __func__);
|
831 selected_pkt_it = i; 832 // no need to look through the remaining queue entries 833 break; 834 } else if (!found_hidden_bank && !found_prepped_pkt) { 835 // if we did not find a packet to a closed row that can 836 // issue the bank commands without incurring delay, and 837 // did not yet find a packet to a prepped row, remember 838 // the current one 839 selected_pkt_it = i; 840 found_prepped_pkt = true;
| 865 selected_pkt_it = i; 866 // no need to look through the remaining queue entries 867 break; 868 } else if (!found_hidden_bank && !found_prepped_pkt) { 869 // if we did not find a packet to a closed row that can 870 // issue the bank commands without incurring delay, and 871 // did not yet find a packet to a prepped row, remember 872 // the current one 873 selected_pkt_it = i; 874 found_prepped_pkt = true;
|
841 DPRINTF(DRAM, "Prepped row buffer hit\n");
| 875 DPRINTF(DRAM, "%s Prepped row buffer hit\n", __func__);
|
842 } 843 } else if (!found_earliest_pkt) { 844 // if we have not initialised the bank status, do it 845 // now, and only once per scheduling decisions 846 if (!filled_earliest_banks) { 847 // determine entries with earliest bank delay 848 std::tie(earliest_banks, hidden_bank_prep) = 849 minBankPrep(queue, min_col_at); 850 filled_earliest_banks = true; 851 } 852 853 // bank is amongst first available banks 854 // minBankPrep will give priority to packets that can 855 // issue seamlessly 856 if (bits(earliest_banks[dram_pkt->rank], 857 dram_pkt->bank, dram_pkt->bank)) { 858 found_earliest_pkt = true; 859 found_hidden_bank = hidden_bank_prep; 860 861 // give priority to packets that can issue 862 // bank commands 'behind the scenes' 863 // any additional delay if any will be due to 864 // col-to-col command requirements 865 if (hidden_bank_prep || !found_prepped_pkt) 866 selected_pkt_it = i; 867 } 868 }
| 876 } 877 } else if (!found_earliest_pkt) { 878 // if we have not initialised the bank status, do it 879 // now, and only once per scheduling decisions 880 if (!filled_earliest_banks) { 881 // determine entries with earliest bank delay 882 std::tie(earliest_banks, hidden_bank_prep) = 883 minBankPrep(queue, min_col_at); 884 filled_earliest_banks = true; 885 } 886 887 // bank is amongst first available banks 888 // minBankPrep will give priority to packets that can 889 // issue seamlessly 890 if (bits(earliest_banks[dram_pkt->rank], 891 dram_pkt->bank, dram_pkt->bank)) { 892 found_earliest_pkt = true; 893 found_hidden_bank = hidden_bank_prep; 894 895 // give priority to packets that can issue 896 // bank commands 'behind the scenes' 897 // any additional delay if any will be due to 898 // col-to-col command requirements 899 if (hidden_bank_prep || !found_prepped_pkt) 900 selected_pkt_it = i; 901 } 902 }
|
| 903 } else { 904 DPRINTF(DRAM, "%s bank %d - Rank %d not available\n", __func__, 905 dram_pkt->bankRef.bank, dram_pkt->rankRef.rank);
|
869 } 870 } 871
| 906 } 907 } 908
|
872 if (selected_pkt_it != queue.end()) { 873 DRAMPacket* selected_pkt = *selected_pkt_it; 874 queue.erase(selected_pkt_it); 875 queue.push_front(selected_pkt); 876 return true;
| 909 if (selected_pkt_it == queue.end()) { 910 DPRINTF(DRAM, "%s no available ranks found\n", __func__);
|
877 } 878
| 911 } 912
|
879 return false;
| 913 return selected_pkt_it;
|
880} 881 882void 883DRAMCtrl::accessAndRespond(PacketPtr pkt, Tick static_latency) 884{ 885 DPRINTF(DRAM, "Responding to Address %lld.. ",pkt->getAddr()); 886 887 bool needsResponse = pkt->needsResponse(); 888 // do the actual memory access which also turns the packet into a 889 // response 890 access(pkt); 891 892 // turn packet around to go back to requester if response expected 893 if (needsResponse) { 894 // access already turned the packet into a response 895 assert(pkt->isResponse()); 896 // response_time consumes the static latency and is charged also 897 // with headerDelay that takes into account the delay provided by 898 // the xbar and also the payloadDelay that takes into account the 899 // number of data beats. 900 Tick response_time = curTick() + static_latency + pkt->headerDelay + 901 pkt->payloadDelay; 902 // Here we reset the timing of the packet before sending it out. 903 pkt->headerDelay = pkt->payloadDelay = 0; 904 905 // queue the packet in the response queue to be sent out after 906 // the static latency has passed 907 port.schedTimingResp(pkt, response_time, true); 908 } else { 909 // @todo the packet is going to be deleted, and the DRAMPacket 910 // is still having a pointer to it 911 pendingDelete.reset(pkt); 912 } 913 914 DPRINTF(DRAM, "Done\n"); 915 916 return; 917} 918 919void 920DRAMCtrl::activateBank(Rank& rank_ref, Bank& bank_ref, 921 Tick act_tick, uint32_t row) 922{ 923 assert(rank_ref.actTicks.size() == activationLimit); 924 925 DPRINTF(DRAM, "Activate at tick %d\n", act_tick); 926 927 // update the open row 928 assert(bank_ref.openRow == Bank::NO_ROW); 929 bank_ref.openRow = row; 930 931 // start counting anew, this covers both the case when we 932 // auto-precharged, and when this access is forced to 933 // precharge 934 bank_ref.bytesAccessed = 0; 935 bank_ref.rowAccesses = 0; 936 937 ++rank_ref.numBanksActive; 938 assert(rank_ref.numBanksActive <= banksPerRank); 939 940 DPRINTF(DRAM, "Activate bank %d, rank %d at tick %lld, now got %d active\n", 941 bank_ref.bank, rank_ref.rank, act_tick, 942 ranks[rank_ref.rank]->numBanksActive); 943 944 rank_ref.cmdList.push_back(Command(MemCommand::ACT, bank_ref.bank, 945 act_tick)); 946 947 DPRINTF(DRAMPower, "%llu,ACT,%d,%d\n", divCeil(act_tick, tCK) - 948 timeStampOffset, bank_ref.bank, rank_ref.rank); 949 950 // The next access has to respect tRAS for this bank 951 bank_ref.preAllowedAt = act_tick + tRAS; 952 953 // Respect the row-to-column command delay for both read and write cmds 954 bank_ref.rdAllowedAt = std::max(act_tick + tRCD, bank_ref.rdAllowedAt); 955 bank_ref.wrAllowedAt = std::max(act_tick + tRCD, bank_ref.wrAllowedAt); 956 957 // start by enforcing tRRD 958 for (int i = 0; i < banksPerRank; i++) { 959 // next activate to any bank in this rank must not happen 960 // before tRRD 961 if (bankGroupArch && (bank_ref.bankgr == rank_ref.banks[i].bankgr)) { 962 // bank group architecture requires longer delays between 963 // ACT commands within the same bank group. Use tRRD_L 964 // in this case 965 rank_ref.banks[i].actAllowedAt = std::max(act_tick + tRRD_L, 966 rank_ref.banks[i].actAllowedAt); 967 } else { 968 // use shorter tRRD value when either 969 // 1) bank group architecture is not supportted 970 // 2) bank is in a different bank group 971 rank_ref.banks[i].actAllowedAt = std::max(act_tick + tRRD, 972 rank_ref.banks[i].actAllowedAt); 973 } 974 } 975 976 // next, we deal with tXAW, if the activation limit is disabled 977 // then we directly schedule an activate power event 978 if (!rank_ref.actTicks.empty()) { 979 // sanity check 980 if (rank_ref.actTicks.back() && 981 (act_tick - rank_ref.actTicks.back()) < tXAW) { 982 panic("Got %d activates in window %d (%llu - %llu) which " 983 "is smaller than %llu\n", activationLimit, act_tick - 984 rank_ref.actTicks.back(), act_tick, 985 rank_ref.actTicks.back(), tXAW); 986 } 987 988 // shift the times used for the book keeping, the last element 989 // (highest index) is the oldest one and hence the lowest value 990 rank_ref.actTicks.pop_back(); 991 992 // record an new activation (in the future) 993 rank_ref.actTicks.push_front(act_tick); 994 995 // cannot activate more than X times in time window tXAW, push the 996 // next one (the X + 1'st activate) to be tXAW away from the 997 // oldest in our window of X 998 if (rank_ref.actTicks.back() && 999 (act_tick - rank_ref.actTicks.back()) < tXAW) { 1000 DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate " 1001 "no earlier than %llu\n", activationLimit, 1002 rank_ref.actTicks.back() + tXAW); 1003 for (int j = 0; j < banksPerRank; j++) 1004 // next activate must not happen before end of window 1005 rank_ref.banks[j].actAllowedAt = 1006 std::max(rank_ref.actTicks.back() + tXAW, 1007 rank_ref.banks[j].actAllowedAt); 1008 } 1009 } 1010 1011 // at the point when this activate takes place, make sure we 1012 // transition to the active power state 1013 if (!rank_ref.activateEvent.scheduled()) 1014 schedule(rank_ref.activateEvent, act_tick); 1015 else if (rank_ref.activateEvent.when() > act_tick) 1016 // move it sooner in time 1017 reschedule(rank_ref.activateEvent, act_tick); 1018} 1019 1020void 1021DRAMCtrl::prechargeBank(Rank& rank_ref, Bank& bank, Tick pre_at, bool trace) 1022{ 1023 // make sure the bank has an open row 1024 assert(bank.openRow != Bank::NO_ROW); 1025 1026 // sample the bytes per activate here since we are closing 1027 // the page 1028 bytesPerActivate.sample(bank.bytesAccessed); 1029 1030 bank.openRow = Bank::NO_ROW; 1031 1032 // no precharge allowed before this one 1033 bank.preAllowedAt = pre_at; 1034 1035 Tick pre_done_at = pre_at + tRP; 1036 1037 bank.actAllowedAt = std::max(bank.actAllowedAt, pre_done_at); 1038 1039 assert(rank_ref.numBanksActive != 0); 1040 --rank_ref.numBanksActive; 1041 1042 DPRINTF(DRAM, "Precharging bank %d, rank %d at tick %lld, now got " 1043 "%d active\n", bank.bank, rank_ref.rank, pre_at, 1044 rank_ref.numBanksActive); 1045 1046 if (trace) { 1047 1048 rank_ref.cmdList.push_back(Command(MemCommand::PRE, bank.bank, 1049 pre_at)); 1050 DPRINTF(DRAMPower, "%llu,PRE,%d,%d\n", divCeil(pre_at, tCK) - 1051 timeStampOffset, bank.bank, rank_ref.rank); 1052 } 1053 // if we look at the current number of active banks we might be 1054 // tempted to think the DRAM is now idle, however this can be 1055 // undone by an activate that is scheduled to happen before we 1056 // would have reached the idle state, so schedule an event and 1057 // rather check once we actually make it to the point in time when 1058 // the (last) precharge takes place 1059 if (!rank_ref.prechargeEvent.scheduled()) { 1060 schedule(rank_ref.prechargeEvent, pre_done_at); 1061 // New event, increment count 1062 ++rank_ref.outstandingEvents; 1063 } else if (rank_ref.prechargeEvent.when() < pre_done_at) { 1064 reschedule(rank_ref.prechargeEvent, pre_done_at); 1065 } 1066} 1067 1068void 1069DRAMCtrl::doDRAMAccess(DRAMPacket* dram_pkt) 1070{ 1071 DPRINTF(DRAM, "Timing access to addr %lld, rank/bank/row %d %d %d\n", 1072 dram_pkt->addr, dram_pkt->rank, dram_pkt->bank, dram_pkt->row); 1073 1074 // get the rank 1075 Rank& rank = dram_pkt->rankRef; 1076 1077 // are we in or transitioning to a low-power state and have not scheduled 1078 // a power-up event? 1079 // if so, wake up from power down to issue RD/WR burst 1080 if (rank.inLowPowerState) { 1081 assert(rank.pwrState != PWR_SREF); 1082 rank.scheduleWakeUpEvent(tXP); 1083 } 1084 1085 // get the bank 1086 Bank& bank = dram_pkt->bankRef; 1087 1088 // for the state we need to track if it is a row hit or not 1089 bool row_hit = true; 1090 1091 // Determine the access latency and update the bank state 1092 if (bank.openRow == dram_pkt->row) { 1093 // nothing to do 1094 } else { 1095 row_hit = false; 1096 1097 // If there is a page open, precharge it. 1098 if (bank.openRow != Bank::NO_ROW) { 1099 prechargeBank(rank, bank, std::max(bank.preAllowedAt, curTick())); 1100 } 1101 1102 // next we need to account for the delay in activating the 1103 // page 1104 Tick act_tick = std::max(bank.actAllowedAt, curTick()); 1105 1106 // Record the activation and deal with all the global timing 1107 // constraints caused be a new activation (tRRD and tXAW) 1108 activateBank(rank, bank, act_tick, dram_pkt->row); 1109 } 1110 1111 // respect any constraints on the command (e.g. tRCD or tCCD)
| 914} 915 916void 917DRAMCtrl::accessAndRespond(PacketPtr pkt, Tick static_latency) 918{ 919 DPRINTF(DRAM, "Responding to Address %lld.. ",pkt->getAddr()); 920 921 bool needsResponse = pkt->needsResponse(); 922 // do the actual memory access which also turns the packet into a 923 // response 924 access(pkt); 925 926 // turn packet around to go back to requester if response expected 927 if (needsResponse) { 928 // access already turned the packet into a response 929 assert(pkt->isResponse()); 930 // response_time consumes the static latency and is charged also 931 // with headerDelay that takes into account the delay provided by 932 // the xbar and also the payloadDelay that takes into account the 933 // number of data beats. 934 Tick response_time = curTick() + static_latency + pkt->headerDelay + 935 pkt->payloadDelay; 936 // Here we reset the timing of the packet before sending it out. 937 pkt->headerDelay = pkt->payloadDelay = 0; 938 939 // queue the packet in the response queue to be sent out after 940 // the static latency has passed 941 port.schedTimingResp(pkt, response_time, true); 942 } else { 943 // @todo the packet is going to be deleted, and the DRAMPacket 944 // is still having a pointer to it 945 pendingDelete.reset(pkt); 946 } 947 948 DPRINTF(DRAM, "Done\n"); 949 950 return; 951} 952 953void 954DRAMCtrl::activateBank(Rank& rank_ref, Bank& bank_ref, 955 Tick act_tick, uint32_t row) 956{ 957 assert(rank_ref.actTicks.size() == activationLimit); 958 959 DPRINTF(DRAM, "Activate at tick %d\n", act_tick); 960 961 // update the open row 962 assert(bank_ref.openRow == Bank::NO_ROW); 963 bank_ref.openRow = row; 964 965 // start counting anew, this covers both the case when we 966 // auto-precharged, and when this access is forced to 967 // precharge 968 bank_ref.bytesAccessed = 0; 969 bank_ref.rowAccesses = 0; 970 971 ++rank_ref.numBanksActive; 972 assert(rank_ref.numBanksActive <= banksPerRank); 973 974 DPRINTF(DRAM, "Activate bank %d, rank %d at tick %lld, now got %d active\n", 975 bank_ref.bank, rank_ref.rank, act_tick, 976 ranks[rank_ref.rank]->numBanksActive); 977 978 rank_ref.cmdList.push_back(Command(MemCommand::ACT, bank_ref.bank, 979 act_tick)); 980 981 DPRINTF(DRAMPower, "%llu,ACT,%d,%d\n", divCeil(act_tick, tCK) - 982 timeStampOffset, bank_ref.bank, rank_ref.rank); 983 984 // The next access has to respect tRAS for this bank 985 bank_ref.preAllowedAt = act_tick + tRAS; 986 987 // Respect the row-to-column command delay for both read and write cmds 988 bank_ref.rdAllowedAt = std::max(act_tick + tRCD, bank_ref.rdAllowedAt); 989 bank_ref.wrAllowedAt = std::max(act_tick + tRCD, bank_ref.wrAllowedAt); 990 991 // start by enforcing tRRD 992 for (int i = 0; i < banksPerRank; i++) { 993 // next activate to any bank in this rank must not happen 994 // before tRRD 995 if (bankGroupArch && (bank_ref.bankgr == rank_ref.banks[i].bankgr)) { 996 // bank group architecture requires longer delays between 997 // ACT commands within the same bank group. Use tRRD_L 998 // in this case 999 rank_ref.banks[i].actAllowedAt = std::max(act_tick + tRRD_L, 1000 rank_ref.banks[i].actAllowedAt); 1001 } else { 1002 // use shorter tRRD value when either 1003 // 1) bank group architecture is not supportted 1004 // 2) bank is in a different bank group 1005 rank_ref.banks[i].actAllowedAt = std::max(act_tick + tRRD, 1006 rank_ref.banks[i].actAllowedAt); 1007 } 1008 } 1009 1010 // next, we deal with tXAW, if the activation limit is disabled 1011 // then we directly schedule an activate power event 1012 if (!rank_ref.actTicks.empty()) { 1013 // sanity check 1014 if (rank_ref.actTicks.back() && 1015 (act_tick - rank_ref.actTicks.back()) < tXAW) { 1016 panic("Got %d activates in window %d (%llu - %llu) which " 1017 "is smaller than %llu\n", activationLimit, act_tick - 1018 rank_ref.actTicks.back(), act_tick, 1019 rank_ref.actTicks.back(), tXAW); 1020 } 1021 1022 // shift the times used for the book keeping, the last element 1023 // (highest index) is the oldest one and hence the lowest value 1024 rank_ref.actTicks.pop_back(); 1025 1026 // record an new activation (in the future) 1027 rank_ref.actTicks.push_front(act_tick); 1028 1029 // cannot activate more than X times in time window tXAW, push the 1030 // next one (the X + 1'st activate) to be tXAW away from the 1031 // oldest in our window of X 1032 if (rank_ref.actTicks.back() && 1033 (act_tick - rank_ref.actTicks.back()) < tXAW) { 1034 DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate " 1035 "no earlier than %llu\n", activationLimit, 1036 rank_ref.actTicks.back() + tXAW); 1037 for (int j = 0; j < banksPerRank; j++) 1038 // next activate must not happen before end of window 1039 rank_ref.banks[j].actAllowedAt = 1040 std::max(rank_ref.actTicks.back() + tXAW, 1041 rank_ref.banks[j].actAllowedAt); 1042 } 1043 } 1044 1045 // at the point when this activate takes place, make sure we 1046 // transition to the active power state 1047 if (!rank_ref.activateEvent.scheduled()) 1048 schedule(rank_ref.activateEvent, act_tick); 1049 else if (rank_ref.activateEvent.when() > act_tick) 1050 // move it sooner in time 1051 reschedule(rank_ref.activateEvent, act_tick); 1052} 1053 1054void 1055DRAMCtrl::prechargeBank(Rank& rank_ref, Bank& bank, Tick pre_at, bool trace) 1056{ 1057 // make sure the bank has an open row 1058 assert(bank.openRow != Bank::NO_ROW); 1059 1060 // sample the bytes per activate here since we are closing 1061 // the page 1062 bytesPerActivate.sample(bank.bytesAccessed); 1063 1064 bank.openRow = Bank::NO_ROW; 1065 1066 // no precharge allowed before this one 1067 bank.preAllowedAt = pre_at; 1068 1069 Tick pre_done_at = pre_at + tRP; 1070 1071 bank.actAllowedAt = std::max(bank.actAllowedAt, pre_done_at); 1072 1073 assert(rank_ref.numBanksActive != 0); 1074 --rank_ref.numBanksActive; 1075 1076 DPRINTF(DRAM, "Precharging bank %d, rank %d at tick %lld, now got " 1077 "%d active\n", bank.bank, rank_ref.rank, pre_at, 1078 rank_ref.numBanksActive); 1079 1080 if (trace) { 1081 1082 rank_ref.cmdList.push_back(Command(MemCommand::PRE, bank.bank, 1083 pre_at)); 1084 DPRINTF(DRAMPower, "%llu,PRE,%d,%d\n", divCeil(pre_at, tCK) - 1085 timeStampOffset, bank.bank, rank_ref.rank); 1086 } 1087 // if we look at the current number of active banks we might be 1088 // tempted to think the DRAM is now idle, however this can be 1089 // undone by an activate that is scheduled to happen before we 1090 // would have reached the idle state, so schedule an event and 1091 // rather check once we actually make it to the point in time when 1092 // the (last) precharge takes place 1093 if (!rank_ref.prechargeEvent.scheduled()) { 1094 schedule(rank_ref.prechargeEvent, pre_done_at); 1095 // New event, increment count 1096 ++rank_ref.outstandingEvents; 1097 } else if (rank_ref.prechargeEvent.when() < pre_done_at) { 1098 reschedule(rank_ref.prechargeEvent, pre_done_at); 1099 } 1100} 1101 1102void 1103DRAMCtrl::doDRAMAccess(DRAMPacket* dram_pkt) 1104{ 1105 DPRINTF(DRAM, "Timing access to addr %lld, rank/bank/row %d %d %d\n", 1106 dram_pkt->addr, dram_pkt->rank, dram_pkt->bank, dram_pkt->row); 1107 1108 // get the rank 1109 Rank& rank = dram_pkt->rankRef; 1110 1111 // are we in or transitioning to a low-power state and have not scheduled 1112 // a power-up event? 1113 // if so, wake up from power down to issue RD/WR burst 1114 if (rank.inLowPowerState) { 1115 assert(rank.pwrState != PWR_SREF); 1116 rank.scheduleWakeUpEvent(tXP); 1117 } 1118 1119 // get the bank 1120 Bank& bank = dram_pkt->bankRef; 1121 1122 // for the state we need to track if it is a row hit or not 1123 bool row_hit = true; 1124 1125 // Determine the access latency and update the bank state 1126 if (bank.openRow == dram_pkt->row) { 1127 // nothing to do 1128 } else { 1129 row_hit = false; 1130 1131 // If there is a page open, precharge it. 1132 if (bank.openRow != Bank::NO_ROW) { 1133 prechargeBank(rank, bank, std::max(bank.preAllowedAt, curTick())); 1134 } 1135 1136 // next we need to account for the delay in activating the 1137 // page 1138 Tick act_tick = std::max(bank.actAllowedAt, curTick()); 1139 1140 // Record the activation and deal with all the global timing 1141 // constraints caused be a new activation (tRRD and tXAW) 1142 activateBank(rank, bank, act_tick, dram_pkt->row); 1143 } 1144 1145 // respect any constraints on the command (e.g. tRCD or tCCD)
|
1112 const Tick col_allowed_at = dram_pkt->isRead ?
| 1146 const Tick col_allowed_at = dram_pkt->isRead() ?
|
1113 bank.rdAllowedAt : bank.wrAllowedAt; 1114 1115 // we need to wait until the bus is available before we can issue 1116 // the command; need minimum of tBURST between commands 1117 Tick cmd_at = std::max({col_allowed_at, nextBurstAt, curTick()}); 1118 1119 // update the packet ready time 1120 dram_pkt->readyTime = cmd_at + tCL + tBURST; 1121 1122 // update the time for the next read/write burst for each 1123 // bank (add a max with tCCD/tCCD_L/tCCD_L_WR here) 1124 Tick dly_to_rd_cmd; 1125 Tick dly_to_wr_cmd; 1126 for (int j = 0; j < ranksPerChannel; j++) { 1127 for (int i = 0; i < banksPerRank; i++) { 1128 // next burst to same bank group in this rank must not happen 1129 // before tCCD_L. Different bank group timing requirement is 1130 // tBURST; Add tCS for different ranks 1131 if (dram_pkt->rank == j) { 1132 if (bankGroupArch && 1133 (bank.bankgr == ranks[j]->banks[i].bankgr)) { 1134 // bank group architecture requires longer delays between 1135 // RD/WR burst commands to the same bank group. 1136 // tCCD_L is default requirement for same BG timing 1137 // tCCD_L_WR is required for write-to-write 1138 // Need to also take bus turnaround delays into account
| 1147 bank.rdAllowedAt : bank.wrAllowedAt; 1148 1149 // we need to wait until the bus is available before we can issue 1150 // the command; need minimum of tBURST between commands 1151 Tick cmd_at = std::max({col_allowed_at, nextBurstAt, curTick()}); 1152 1153 // update the packet ready time 1154 dram_pkt->readyTime = cmd_at + tCL + tBURST; 1155 1156 // update the time for the next read/write burst for each 1157 // bank (add a max with tCCD/tCCD_L/tCCD_L_WR here) 1158 Tick dly_to_rd_cmd; 1159 Tick dly_to_wr_cmd; 1160 for (int j = 0; j < ranksPerChannel; j++) { 1161 for (int i = 0; i < banksPerRank; i++) { 1162 // next burst to same bank group in this rank must not happen 1163 // before tCCD_L. Different bank group timing requirement is 1164 // tBURST; Add tCS for different ranks 1165 if (dram_pkt->rank == j) { 1166 if (bankGroupArch && 1167 (bank.bankgr == ranks[j]->banks[i].bankgr)) { 1168 // bank group architecture requires longer delays between 1169 // RD/WR burst commands to the same bank group. 1170 // tCCD_L is default requirement for same BG timing 1171 // tCCD_L_WR is required for write-to-write 1172 // Need to also take bus turnaround delays into account
|
1139 dly_to_rd_cmd = dram_pkt->isRead ?
| 1173 dly_to_rd_cmd = dram_pkt->isRead() ?
|
1140 tCCD_L : std::max(tCCD_L, wrToRdDly);
| 1174 tCCD_L : std::max(tCCD_L, wrToRdDly);
|
1141 dly_to_wr_cmd = dram_pkt->isRead ?
| 1175 dly_to_wr_cmd = dram_pkt->isRead() ?
|
1142 std::max(tCCD_L, rdToWrDly) : tCCD_L_WR; 1143 } else { 1144 // tBURST is default requirement for diff BG timing 1145 // Need to also take bus turnaround delays into account
| 1176 std::max(tCCD_L, rdToWrDly) : tCCD_L_WR; 1177 } else { 1178 // tBURST is default requirement for diff BG timing 1179 // Need to also take bus turnaround delays into account
|
1146 dly_to_rd_cmd = dram_pkt->isRead ? tBURST : wrToRdDly; 1147 dly_to_wr_cmd = dram_pkt->isRead ? rdToWrDly : tBURST;
| 1180 dly_to_rd_cmd = dram_pkt->isRead() ? tBURST : wrToRdDly; 1181 dly_to_wr_cmd = dram_pkt->isRead() ? rdToWrDly : tBURST;
|
1148 } 1149 } else { 1150 // different rank is by default in a different bank group and 1151 // doesn't require longer tCCD or additional RTW, WTR delays 1152 // Need to account for rank-to-rank switching with tCS 1153 dly_to_wr_cmd = rankToRankDly; 1154 dly_to_rd_cmd = rankToRankDly; 1155 } 1156 ranks[j]->banks[i].rdAllowedAt = std::max(cmd_at + dly_to_rd_cmd, 1157 ranks[j]->banks[i].rdAllowedAt); 1158 ranks[j]->banks[i].wrAllowedAt = std::max(cmd_at + dly_to_wr_cmd, 1159 ranks[j]->banks[i].wrAllowedAt); 1160 } 1161 } 1162 1163 // Save rank of current access 1164 activeRank = dram_pkt->rank; 1165 1166 // If this is a write, we also need to respect the write recovery 1167 // time before a precharge, in the case of a read, respect the 1168 // read to precharge constraint 1169 bank.preAllowedAt = std::max(bank.preAllowedAt,
| 1182 } 1183 } else { 1184 // different rank is by default in a different bank group and 1185 // doesn't require longer tCCD or additional RTW, WTR delays 1186 // Need to account for rank-to-rank switching with tCS 1187 dly_to_wr_cmd = rankToRankDly; 1188 dly_to_rd_cmd = rankToRankDly; 1189 } 1190 ranks[j]->banks[i].rdAllowedAt = std::max(cmd_at + dly_to_rd_cmd, 1191 ranks[j]->banks[i].rdAllowedAt); 1192 ranks[j]->banks[i].wrAllowedAt = std::max(cmd_at + dly_to_wr_cmd, 1193 ranks[j]->banks[i].wrAllowedAt); 1194 } 1195 } 1196 1197 // Save rank of current access 1198 activeRank = dram_pkt->rank; 1199 1200 // If this is a write, we also need to respect the write recovery 1201 // time before a precharge, in the case of a read, respect the 1202 // read to precharge constraint 1203 bank.preAllowedAt = std::max(bank.preAllowedAt,
|
1170 dram_pkt->isRead ? cmd_at + tRTP :
| 1204 dram_pkt->isRead() ? cmd_at + tRTP :
|
1171 dram_pkt->readyTime + tWR); 1172 1173 // increment the bytes accessed and the accesses per row 1174 bank.bytesAccessed += burstSize; 1175 ++bank.rowAccesses; 1176 1177 // if we reached the max, then issue with an auto-precharge 1178 bool auto_precharge = pageMgmt == Enums::close || 1179 bank.rowAccesses == maxAccessesPerRow; 1180 1181 // if we did not hit the limit, we might still want to 1182 // auto-precharge 1183 if (!auto_precharge && 1184 (pageMgmt == Enums::open_adaptive || 1185 pageMgmt == Enums::close_adaptive)) { 1186 // a twist on the open and close page policies: 1187 // 1) open_adaptive page policy does not blindly keep the 1188 // page open, but close it if there are no row hits, and there 1189 // are bank conflicts in the queue 1190 // 2) close_adaptive page policy does not blindly close the 1191 // page, but closes it only if there are no row hits in the queue. 1192 // In this case, only force an auto precharge when there 1193 // are no same page hits in the queue 1194 bool got_more_hits = false; 1195 bool got_bank_conflict = false; 1196 1197 // either look at the read queue or write queue
| 1205 dram_pkt->readyTime + tWR); 1206 1207 // increment the bytes accessed and the accesses per row 1208 bank.bytesAccessed += burstSize; 1209 ++bank.rowAccesses; 1210 1211 // if we reached the max, then issue with an auto-precharge 1212 bool auto_precharge = pageMgmt == Enums::close || 1213 bank.rowAccesses == maxAccessesPerRow; 1214 1215 // if we did not hit the limit, we might still want to 1216 // auto-precharge 1217 if (!auto_precharge && 1218 (pageMgmt == Enums::open_adaptive || 1219 pageMgmt == Enums::close_adaptive)) { 1220 // a twist on the open and close page policies: 1221 // 1) open_adaptive page policy does not blindly keep the 1222 // page open, but close it if there are no row hits, and there 1223 // are bank conflicts in the queue 1224 // 2) close_adaptive page policy does not blindly close the 1225 // page, but closes it only if there are no row hits in the queue. 1226 // In this case, only force an auto precharge when there 1227 // are no same page hits in the queue 1228 bool got_more_hits = false; 1229 bool got_bank_conflict = false; 1230 1231 // either look at the read queue or write queue
|
1198 const deque<DRAMPacket*>& queue = dram_pkt->isRead ? readQueue : 1199 writeQueue; 1200 auto p = queue.begin(); 1201 // make sure we are not considering the packet that we are 1202 // currently dealing with (which is the head of the queue) 1203 ++p;
| 1232 const std::vector<DRAMPacketQueue>& queue = 1233 dram_pkt->isRead() ? readQueue : writeQueue;
|
1204
| 1234
|
1205 // keep on looking until we find a hit or reach the end of the queue 1206 // 1) if a hit is found, then both open and close adaptive policies keep 1207 // the page open 1208 // 2) if no hit is found, got_bank_conflict is set to true if a bank 1209 // conflict request is waiting in the queue 1210 while (!got_more_hits && p != queue.end()) { 1211 bool same_rank_bank = (dram_pkt->rank == (*p)->rank) && 1212 (dram_pkt->bank == (*p)->bank); 1213 bool same_row = dram_pkt->row == (*p)->row; 1214 got_more_hits |= same_rank_bank && same_row; 1215 got_bank_conflict |= same_rank_bank && !same_row; 1216 ++p;
| 1235 for (uint8_t i = 0; i < numPriorities(); ++i) { 1236 auto p = queue[i].begin(); 1237 // keep on looking until we find a hit or reach the end of the queue 1238 // 1) if a hit is found, then both open and close adaptive policies keep 1239 // the page open 1240 // 2) if no hit is found, got_bank_conflict is set to true if a bank 1241 // conflict request is waiting in the queue 1242 // 3) make sure we are not considering the packet that we are 1243 // currently dealing with 1244 while (!got_more_hits && p != queue[i].end()) { 1245 if (dram_pkt != (*p)) { 1246 bool same_rank_bank = (dram_pkt->rank == (*p)->rank) && 1247 (dram_pkt->bank == (*p)->bank); 1248 1249 bool same_row = dram_pkt->row == (*p)->row; 1250 got_more_hits |= same_rank_bank && same_row; 1251 got_bank_conflict |= same_rank_bank && !same_row; 1252 } 1253 ++p; 1254 } 1255 1256 if (got_more_hits) 1257 break;
|
1217 } 1218 1219 // auto pre-charge when either 1220 // 1) open_adaptive policy, we have not got any more hits, and 1221 // have a bank conflict 1222 // 2) close_adaptive policy and we have not got any more hits 1223 auto_precharge = !got_more_hits && 1224 (got_bank_conflict || pageMgmt == Enums::close_adaptive); 1225 } 1226 1227 // DRAMPower trace command to be written
| 1258 } 1259 1260 // auto pre-charge when either 1261 // 1) open_adaptive policy, we have not got any more hits, and 1262 // have a bank conflict 1263 // 2) close_adaptive policy and we have not got any more hits 1264 auto_precharge = !got_more_hits && 1265 (got_bank_conflict || pageMgmt == Enums::close_adaptive); 1266 } 1267 1268 // DRAMPower trace command to be written
|
1228 std::string mem_cmd = dram_pkt->isRead ? "RD" : "WR";
| 1269 std::string mem_cmd = dram_pkt->isRead() ? "RD" : "WR";
|
1229 1230 // MemCommand required for DRAMPower library 1231 MemCommand::cmds command = (mem_cmd == "RD") ? MemCommand::RD : 1232 MemCommand::WR; 1233 1234 // Update bus state to reflect when previous command was issued 1235 nextBurstAt = cmd_at + tBURST; 1236 1237 DPRINTF(DRAM, "Access to %lld, ready at %lld next burst at %lld.\n", 1238 dram_pkt->addr, dram_pkt->readyTime, nextBurstAt); 1239 1240 dram_pkt->rankRef.cmdList.push_back(Command(command, dram_pkt->bank, 1241 cmd_at)); 1242 1243 DPRINTF(DRAMPower, "%llu,%s,%d,%d\n", divCeil(cmd_at, tCK) - 1244 timeStampOffset, mem_cmd, dram_pkt->bank, dram_pkt->rank); 1245 1246 // if this access should use auto-precharge, then we are 1247 // closing the row after the read/write burst 1248 if (auto_precharge) { 1249 // if auto-precharge push a PRE command at the correct tick to the 1250 // list used by DRAMPower library to calculate power 1251 prechargeBank(rank, bank, std::max(curTick(), bank.preAllowedAt)); 1252 1253 DPRINTF(DRAM, "Auto-precharged bank: %d\n", dram_pkt->bankId); 1254 } 1255 1256 // Update the minimum timing between the requests, this is a 1257 // conservative estimate of when we have to schedule the next 1258 // request to not introduce any unecessary bubbles. In most cases 1259 // we will wake up sooner than we have to. 1260 nextReqTime = nextBurstAt - (tRP + tRCD); 1261 1262 // Update the stats and schedule the next request
| 1270 1271 // MemCommand required for DRAMPower library 1272 MemCommand::cmds command = (mem_cmd == "RD") ? MemCommand::RD : 1273 MemCommand::WR; 1274 1275 // Update bus state to reflect when previous command was issued 1276 nextBurstAt = cmd_at + tBURST; 1277 1278 DPRINTF(DRAM, "Access to %lld, ready at %lld next burst at %lld.\n", 1279 dram_pkt->addr, dram_pkt->readyTime, nextBurstAt); 1280 1281 dram_pkt->rankRef.cmdList.push_back(Command(command, dram_pkt->bank, 1282 cmd_at)); 1283 1284 DPRINTF(DRAMPower, "%llu,%s,%d,%d\n", divCeil(cmd_at, tCK) - 1285 timeStampOffset, mem_cmd, dram_pkt->bank, dram_pkt->rank); 1286 1287 // if this access should use auto-precharge, then we are 1288 // closing the row after the read/write burst 1289 if (auto_precharge) { 1290 // if auto-precharge push a PRE command at the correct tick to the 1291 // list used by DRAMPower library to calculate power 1292 prechargeBank(rank, bank, std::max(curTick(), bank.preAllowedAt)); 1293 1294 DPRINTF(DRAM, "Auto-precharged bank: %d\n", dram_pkt->bankId); 1295 } 1296 1297 // Update the minimum timing between the requests, this is a 1298 // conservative estimate of when we have to schedule the next 1299 // request to not introduce any unecessary bubbles. In most cases 1300 // we will wake up sooner than we have to. 1301 nextReqTime = nextBurstAt - (tRP + tRCD); 1302 1303 // Update the stats and schedule the next request
|
1263 if (dram_pkt->isRead) {
| 1304 if (dram_pkt->isRead()) {
|
1264 ++readsThisTime; 1265 if (row_hit) 1266 readRowHits++; 1267 bytesReadDRAM += burstSize; 1268 perBankRdBursts[dram_pkt->bankId]++; 1269 1270 // Update latency stats 1271 totMemAccLat += dram_pkt->readyTime - dram_pkt->entryTime;
| 1305 ++readsThisTime; 1306 if (row_hit) 1307 readRowHits++; 1308 bytesReadDRAM += burstSize; 1309 perBankRdBursts[dram_pkt->bankId]++; 1310 1311 // Update latency stats 1312 totMemAccLat += dram_pkt->readyTime - dram_pkt->entryTime;
|
| 1313 masterReadTotalLat[dram_pkt->masterId()] += 1314 dram_pkt->readyTime - dram_pkt->entryTime; 1315
|
1272 totBusLat += tBURST; 1273 totQLat += cmd_at - dram_pkt->entryTime;
| 1316 totBusLat += tBURST; 1317 totQLat += cmd_at - dram_pkt->entryTime;
|
| 1318 masterReadBytes[dram_pkt->masterId()] += dram_pkt->size;
|
1274 } else { 1275 ++writesThisTime; 1276 if (row_hit) 1277 writeRowHits++; 1278 bytesWritten += burstSize; 1279 perBankWrBursts[dram_pkt->bankId]++;
| 1319 } else { 1320 ++writesThisTime; 1321 if (row_hit) 1322 writeRowHits++; 1323 bytesWritten += burstSize; 1324 perBankWrBursts[dram_pkt->bankId]++;
|
| 1325 masterWriteBytes[dram_pkt->masterId()] += dram_pkt->size; 1326 masterWriteTotalLat[dram_pkt->masterId()] += 1327 dram_pkt->readyTime - dram_pkt->entryTime;
|
1280 } 1281} 1282 1283void 1284DRAMCtrl::processNextReqEvent() 1285{
| 1328 } 1329} 1330 1331void 1332DRAMCtrl::processNextReqEvent() 1333{
|
| 1334 // transition is handled by QoS algorithm if enabled 1335 if (turnPolicy) { 1336 // select bus state - only done if QoS algorithms are in use 1337 busStateNext = selectNextBusState(); 1338 } 1339 1340 // detect bus state change 1341 bool switched_cmd_type = (busState != busStateNext); 1342 // record stats 1343 recordTurnaroundStats(); 1344 1345 DPRINTF(DRAM, "QoS Turnarounds selected state %s %s\n", 1346 (busState==MemCtrl::READ)?"READ":"WRITE", 1347 switched_cmd_type?"[turnaround triggered]":""); 1348 1349 if (switched_cmd_type) { 1350 if (busState == READ) { 1351 DPRINTF(DRAM, 1352 "Switching to writes after %d reads with %d reads " 1353 "waiting\n", readsThisTime, totalReadQueueSize); 1354 rdPerTurnAround.sample(readsThisTime); 1355 readsThisTime = 0; 1356 } else { 1357 DPRINTF(DRAM, 1358 "Switching to reads after %d writes with %d writes " 1359 "waiting\n", writesThisTime, totalWriteQueueSize); 1360 wrPerTurnAround.sample(writesThisTime); 1361 writesThisTime = 0; 1362 } 1363 } 1364 1365 // updates current state 1366 busState = busStateNext; 1367 1368 // check ranks for refresh/wakeup - uses busStateNext, so done after turnaround 1369 // decisions
|
1286 int busyRanks = 0; 1287 for (auto r : ranks) { 1288 if (!r->inRefIdleState()) { 1289 if (r->pwrState != PWR_SREF) { 1290 // rank is busy refreshing 1291 DPRINTF(DRAMState, "Rank %d is not available\n", r->rank); 1292 busyRanks++; 1293 1294 // let the rank know that if it was waiting to drain, it 1295 // is now done and ready to proceed 1296 r->checkDrainDone(); 1297 } 1298 1299 // check if we were in self-refresh and haven't started 1300 // to transition out 1301 if ((r->pwrState == PWR_SREF) && r->inLowPowerState) { 1302 DPRINTF(DRAMState, "Rank %d is in self-refresh\n", r->rank); 1303 // if we have commands queued to this rank and we don't have 1304 // a minimum number of active commands enqueued, 1305 // exit self-refresh 1306 if (r->forceSelfRefreshExit()) { 1307 DPRINTF(DRAMState, "rank %d was in self refresh and" 1308 " should wake up\n", r->rank); 1309 //wake up from self-refresh 1310 r->scheduleWakeUpEvent(tXS); 1311 // things are brought back into action once a refresh is 1312 // performed after self-refresh 1313 // continue with selection for other ranks 1314 } 1315 } 1316 } 1317 } 1318 1319 if (busyRanks == ranksPerChannel) { 1320 // if all ranks are refreshing wait for them to finish 1321 // and stall this state machine without taking any further 1322 // action, and do not schedule a new nextReqEvent 1323 return; 1324 } 1325
| 1370 int busyRanks = 0; 1371 for (auto r : ranks) { 1372 if (!r->inRefIdleState()) { 1373 if (r->pwrState != PWR_SREF) { 1374 // rank is busy refreshing 1375 DPRINTF(DRAMState, "Rank %d is not available\n", r->rank); 1376 busyRanks++; 1377 1378 // let the rank know that if it was waiting to drain, it 1379 // is now done and ready to proceed 1380 r->checkDrainDone(); 1381 } 1382 1383 // check if we were in self-refresh and haven't started 1384 // to transition out 1385 if ((r->pwrState == PWR_SREF) && r->inLowPowerState) { 1386 DPRINTF(DRAMState, "Rank %d is in self-refresh\n", r->rank); 1387 // if we have commands queued to this rank and we don't have 1388 // a minimum number of active commands enqueued, 1389 // exit self-refresh 1390 if (r->forceSelfRefreshExit()) { 1391 DPRINTF(DRAMState, "rank %d was in self refresh and" 1392 " should wake up\n", r->rank); 1393 //wake up from self-refresh 1394 r->scheduleWakeUpEvent(tXS); 1395 // things are brought back into action once a refresh is 1396 // performed after self-refresh 1397 // continue with selection for other ranks 1398 } 1399 } 1400 } 1401 } 1402 1403 if (busyRanks == ranksPerChannel) { 1404 // if all ranks are refreshing wait for them to finish 1405 // and stall this state machine without taking any further 1406 // action, and do not schedule a new nextReqEvent 1407 return; 1408 } 1409
|
1326 // pre-emptively set to false. Overwrite if in transitioning to 1327 // a new state 1328 bool switched_cmd_type = false; 1329 if (busState != busStateNext) { 1330 if (busState == READ) { 1331 DPRINTF(DRAM, "Switching to writes after %d reads with %d reads " 1332 "waiting\n", readsThisTime, readQueue.size()); 1333 1334 // sample and reset the read-related stats as we are now 1335 // transitioning to writes, and all reads are done 1336 rdPerTurnAround.sample(readsThisTime); 1337 readsThisTime = 0; 1338 1339 // now proceed to do the actual writes 1340 switched_cmd_type = true; 1341 } else { 1342 DPRINTF(DRAM, "Switching to reads after %d writes with %d writes " 1343 "waiting\n", writesThisTime, writeQueue.size()); 1344 1345 wrPerTurnAround.sample(writesThisTime); 1346 writesThisTime = 0; 1347 1348 switched_cmd_type = true; 1349 } 1350 // update busState to match next state until next transition 1351 busState = busStateNext; 1352 } 1353
| |
1354 // when we get here it is either a read or a write 1355 if (busState == READ) { 1356 1357 // track if we should switch or not 1358 bool switch_to_writes = false; 1359
| 1410 // when we get here it is either a read or a write 1411 if (busState == READ) { 1412 1413 // track if we should switch or not 1414 bool switch_to_writes = false; 1415
|
1360 if (readQueue.empty()) {
| 1416 if (totalReadQueueSize == 0) {
|
1361 // In the case there is no read request to go next, 1362 // trigger writes if we have passed the low threshold (or 1363 // if we are draining)
| 1417 // In the case there is no read request to go next, 1418 // trigger writes if we have passed the low threshold (or 1419 // if we are draining)
|
1364 if (!writeQueue.empty() &&
| 1420 if (!(totalWriteQueueSize == 0) &&
|
1365 (drainState() == DrainState::Draining ||
| 1421 (drainState() == DrainState::Draining ||
|
1366 writeQueue.size() > writeLowThreshold)) {
| 1422 totalWriteQueueSize > writeLowThreshold)) {
|
1367
| 1423
|
| 1424 DPRINTF(DRAM, "Switching to writes due to read queue empty\n");
|
1368 switch_to_writes = true; 1369 } else { 1370 // check if we are drained 1371 // not done draining until in PWR_IDLE state 1372 // ensuring all banks are closed and 1373 // have exited low power states 1374 if (drainState() == DrainState::Draining && 1375 respQueue.empty() && allRanksDrained()) { 1376 1377 DPRINTF(Drain, "DRAM controller done draining\n"); 1378 signalDrainDone(); 1379 } 1380 1381 // nothing to do, not even any point in scheduling an 1382 // event for the next request 1383 return; 1384 } 1385 } else {
| 1425 switch_to_writes = true; 1426 } else { 1427 // check if we are drained 1428 // not done draining until in PWR_IDLE state 1429 // ensuring all banks are closed and 1430 // have exited low power states 1431 if (drainState() == DrainState::Draining && 1432 respQueue.empty() && allRanksDrained()) { 1433 1434 DPRINTF(Drain, "DRAM controller done draining\n"); 1435 signalDrainDone(); 1436 } 1437 1438 // nothing to do, not even any point in scheduling an 1439 // event for the next request 1440 return; 1441 } 1442 } else {
|
1386 // bool to check if there is a read to a free rank 1387 bool found_read = false;
| |
1388
| 1443
|
1389 // Figure out which read request goes next, and move it to the 1390 // front of the read queue 1391 // If we are changing command type, incorporate the minimum 1392 // bus turnaround delay which will be tCS (different rank) case 1393 found_read = chooseNext(readQueue, switched_cmd_type ? tCS : 0);
| 1444 bool read_found = false; 1445 DRAMPacketQueue::iterator to_read; 1446 uint8_t prio = numPriorities();
|
1394
| 1447
|
| 1448 for (auto queue = readQueue.rbegin(); 1449 queue != readQueue.rend(); ++queue) { 1450 1451 prio--; 1452 1453 DPRINTF(QOS, 1454 "DRAM controller checking READ queue [%d] priority [%d elements]\n", 1455 prio, queue->size()); 1456 1457 // Figure out which read request goes next 1458 // If we are changing command type, incorporate the minimum 1459 // bus turnaround delay which will be tCS (different rank) case 1460 to_read = chooseNext((*queue), switched_cmd_type ? tCS : 0); 1461 1462 if (to_read != queue->end()) { 1463 // candidate read found 1464 read_found = true; 1465 break; 1466 } 1467 } 1468
|
1395 // if no read to an available rank is found then return 1396 // at this point. There could be writes to the available ranks 1397 // which are above the required threshold. However, to 1398 // avoid adding more complexity to the code, return and wait 1399 // for a refresh event to kick things into action again.
| 1469 // if no read to an available rank is found then return 1470 // at this point. There could be writes to the available ranks 1471 // which are above the required threshold. However, to 1472 // avoid adding more complexity to the code, return and wait 1473 // for a refresh event to kick things into action again.
|
1400 if (!found_read)
| 1474 if (!read_found) { 1475 DPRINTF(DRAM, "No Reads Found - exiting\n");
|
1401 return;
| 1476 return;
|
| 1477 }
|
1402
| 1478
|
1403 DRAMPacket* dram_pkt = readQueue.front();
| 1479 auto dram_pkt = *to_read; 1480
|
1404 assert(dram_pkt->rankRef.inRefIdleState()); 1405 1406 doDRAMAccess(dram_pkt); 1407
| 1481 assert(dram_pkt->rankRef.inRefIdleState()); 1482 1483 doDRAMAccess(dram_pkt); 1484
|
1408 // At this point we're done dealing with the request 1409 readQueue.pop_front(); 1410
| |
1411 // Every respQueue which will generate an event, increment count 1412 ++dram_pkt->rankRef.outstandingEvents;
| 1485 // Every respQueue which will generate an event, increment count 1486 ++dram_pkt->rankRef.outstandingEvents;
|
1413
| |
1414 // sanity check 1415 assert(dram_pkt->size <= burstSize); 1416 assert(dram_pkt->readyTime >= curTick()); 1417
| 1487 // sanity check 1488 assert(dram_pkt->size <= burstSize); 1489 assert(dram_pkt->readyTime >= curTick()); 1490
|
| 1491 // log the response 1492 logResponse(MemCtrl::READ, (*to_read)->masterId(), 1493 dram_pkt->qosValue(), dram_pkt->getAddr(), 1, 1494 dram_pkt->readyTime - dram_pkt->entryTime); 1495 1496
|
1418 // Insert into response queue. It will be sent back to the
| 1497 // Insert into response queue. It will be sent back to the
|
1419 // requestor at its readyTime
| 1498 // requester at its readyTime
|
1420 if (respQueue.empty()) { 1421 assert(!respondEvent.scheduled()); 1422 schedule(respondEvent, dram_pkt->readyTime); 1423 } else { 1424 assert(respQueue.back()->readyTime <= dram_pkt->readyTime); 1425 assert(respondEvent.scheduled()); 1426 } 1427 1428 respQueue.push_back(dram_pkt); 1429 1430 // we have so many writes that we have to transition
| 1499 if (respQueue.empty()) { 1500 assert(!respondEvent.scheduled()); 1501 schedule(respondEvent, dram_pkt->readyTime); 1502 } else { 1503 assert(respQueue.back()->readyTime <= dram_pkt->readyTime); 1504 assert(respondEvent.scheduled()); 1505 } 1506 1507 respQueue.push_back(dram_pkt); 1508 1509 // we have so many writes that we have to transition
|
1431 if (writeQueue.size() > writeHighThreshold) {
| 1510 if (totalWriteQueueSize > writeHighThreshold) {
|
1432 switch_to_writes = true; 1433 }
| 1511 switch_to_writes = true; 1512 }
|
| 1513 1514 // remove the request from the queue - the iterator is no longer valid . 1515 readQueue[dram_pkt->qosValue()].erase(to_read);
|
1434 } 1435 1436 // switching to writes, either because the read queue is empty 1437 // and the writes have passed the low threshold (or we are 1438 // draining), or because the writes hit the hight threshold 1439 if (switch_to_writes) { 1440 // transition to writing 1441 busStateNext = WRITE; 1442 } 1443 } else {
| 1516 } 1517 1518 // switching to writes, either because the read queue is empty 1519 // and the writes have passed the low threshold (or we are 1520 // draining), or because the writes hit the hight threshold 1521 if (switch_to_writes) { 1522 // transition to writing 1523 busStateNext = WRITE; 1524 } 1525 } else {
|
1444 // bool to check if write to free rank is found 1445 bool found_write = false;
| |
1446
| 1526
|
1447 // If we are changing command type, incorporate the minimum 1448 // bus turnaround delay 1449 found_write = chooseNext(writeQueue, 1450 switched_cmd_type ? std::min(tRTW, tCS) : 0);
| 1527 bool write_found = false; 1528 DRAMPacketQueue::iterator to_write; 1529 uint8_t prio = numPriorities();
|
1451
| 1530
|
| 1531 for (auto queue = writeQueue.rbegin(); 1532 queue != writeQueue.rend(); ++queue) { 1533 1534 prio--; 1535 1536 DPRINTF(QOS, 1537 "DRAM controller checking WRITE queue [%d] priority [%d elements]\n", 1538 prio, queue->size()); 1539 1540 // If we are changing command type, incorporate the minimum 1541 // bus turnaround delay 1542 to_write = chooseNext((*queue), 1543 switched_cmd_type ? std::min(tRTW, tCS) : 0); 1544 1545 if (to_write != queue->end()) { 1546 write_found = true; 1547 break; 1548 } 1549 } 1550
|
1452 // if there are no writes to a rank that is available to service 1453 // requests (i.e. rank is in refresh idle state) are found then 1454 // return. There could be reads to the available ranks. However, to 1455 // avoid adding more complexity to the code, return at this point and 1456 // wait for a refresh event to kick things into action again.
| 1551 // if there are no writes to a rank that is available to service 1552 // requests (i.e. rank is in refresh idle state) are found then 1553 // return. There could be reads to the available ranks. However, to 1554 // avoid adding more complexity to the code, return at this point and 1555 // wait for a refresh event to kick things into action again.
|
1457 if (!found_write)
| 1556 if (!write_found) { 1557 DPRINTF(DRAM, "No Writes Found - exiting\n");
|
1458 return;
| 1558 return;
|
| 1559 }
|
1459
| 1560
|
1460 DRAMPacket* dram_pkt = writeQueue.front();
| 1561 auto dram_pkt = *to_write; 1562
|
1461 assert(dram_pkt->rankRef.inRefIdleState()); 1462 // sanity check 1463 assert(dram_pkt->size <= burstSize); 1464 1465 doDRAMAccess(dram_pkt); 1466
| 1563 assert(dram_pkt->rankRef.inRefIdleState()); 1564 // sanity check 1565 assert(dram_pkt->size <= burstSize); 1566 1567 doDRAMAccess(dram_pkt); 1568
|
1467 writeQueue.pop_front(); 1468
| |
1469 // removed write from queue, decrement count 1470 --dram_pkt->rankRef.writeEntries; 1471 1472 // Schedule write done event to decrement event count 1473 // after the readyTime has been reached 1474 // Only schedule latest write event to minimize events 1475 // required; only need to ensure that final event scheduled covers 1476 // the time that writes are outstanding and bus is active 1477 // to holdoff power-down entry events 1478 if (!dram_pkt->rankRef.writeDoneEvent.scheduled()) { 1479 schedule(dram_pkt->rankRef.writeDoneEvent, dram_pkt->readyTime); 1480 // New event, increment count 1481 ++dram_pkt->rankRef.outstandingEvents; 1482 1483 } else if (dram_pkt->rankRef.writeDoneEvent.when() <
| 1569 // removed write from queue, decrement count 1570 --dram_pkt->rankRef.writeEntries; 1571 1572 // Schedule write done event to decrement event count 1573 // after the readyTime has been reached 1574 // Only schedule latest write event to minimize events 1575 // required; only need to ensure that final event scheduled covers 1576 // the time that writes are outstanding and bus is active 1577 // to holdoff power-down entry events 1578 if (!dram_pkt->rankRef.writeDoneEvent.scheduled()) { 1579 schedule(dram_pkt->rankRef.writeDoneEvent, dram_pkt->readyTime); 1580 // New event, increment count 1581 ++dram_pkt->rankRef.outstandingEvents; 1582 1583 } else if (dram_pkt->rankRef.writeDoneEvent.when() <
|
1484 dram_pkt-> readyTime) {
| 1584 dram_pkt->readyTime) { 1585
|
1485 reschedule(dram_pkt->rankRef.writeDoneEvent, dram_pkt->readyTime); 1486 } 1487 1488 isInWriteQueue.erase(burstAlign(dram_pkt->addr));
| 1586 reschedule(dram_pkt->rankRef.writeDoneEvent, dram_pkt->readyTime); 1587 } 1588 1589 isInWriteQueue.erase(burstAlign(dram_pkt->addr));
|
| 1590 1591 // log the response 1592 logResponse(MemCtrl::WRITE, dram_pkt->masterId(), 1593 dram_pkt->qosValue(), dram_pkt->getAddr(), 1, 1594 dram_pkt->readyTime - dram_pkt->entryTime); 1595 1596 1597 // remove the request from the queue - the iterator is no longer valid 1598 writeQueue[dram_pkt->qosValue()].erase(to_write); 1599
|
1489 delete dram_pkt; 1490 1491 // If we emptied the write queue, or got sufficiently below the 1492 // threshold (using the minWritesPerSwitch as the hysteresis) and 1493 // are not draining, or we have reads waiting and have done enough 1494 // writes, then switch to reads.
| 1600 delete dram_pkt; 1601 1602 // If we emptied the write queue, or got sufficiently below the 1603 // threshold (using the minWritesPerSwitch as the hysteresis) and 1604 // are not draining, or we have reads waiting and have done enough 1605 // writes, then switch to reads.
|
1495 if (writeQueue.empty() || 1496 (writeQueue.size() + minWritesPerSwitch < writeLowThreshold && 1497 drainState() != DrainState::Draining) || 1498 (!readQueue.empty() && writesThisTime >= minWritesPerSwitch)) {
| 1606 bool below_threshold = 1607 totalWriteQueueSize + minWritesPerSwitch < writeLowThreshold; 1608 1609 if (totalWriteQueueSize == 0 || 1610 (below_threshold && drainState() != DrainState::Draining) || 1611 (totalReadQueueSize && writesThisTime >= minWritesPerSwitch)) { 1612
|
1499 // turn the bus back around for reads again 1500 busStateNext = READ; 1501 1502 // note that the we switch back to reads also in the idle 1503 // case, which eventually will check for any draining and 1504 // also pause any further scheduling if there is really 1505 // nothing to do 1506 } 1507 } 1508 // It is possible that a refresh to another rank kicks things back into 1509 // action before reaching this point. 1510 if (!nextReqEvent.scheduled()) 1511 schedule(nextReqEvent, std::max(nextReqTime, curTick())); 1512 1513 // If there is space available and we have writes waiting then let 1514 // them retry. This is done here to ensure that the retry does not 1515 // cause a nextReqEvent to be scheduled before we do so as part of 1516 // the next request processing
| 1613 // turn the bus back around for reads again 1614 busStateNext = READ; 1615 1616 // note that the we switch back to reads also in the idle 1617 // case, which eventually will check for any draining and 1618 // also pause any further scheduling if there is really 1619 // nothing to do 1620 } 1621 } 1622 // It is possible that a refresh to another rank kicks things back into 1623 // action before reaching this point. 1624 if (!nextReqEvent.scheduled()) 1625 schedule(nextReqEvent, std::max(nextReqTime, curTick())); 1626 1627 // If there is space available and we have writes waiting then let 1628 // them retry. This is done here to ensure that the retry does not 1629 // cause a nextReqEvent to be scheduled before we do so as part of 1630 // the next request processing
|
1517 if (retryWrReq && writeQueue.size() < writeBufferSize) {
| 1631 if (retryWrReq && totalWriteQueueSize < writeBufferSize) {
|
1518 retryWrReq = false; 1519 port.sendRetryReq(); 1520 } 1521} 1522 1523pair<vector<uint32_t>, bool>
| 1632 retryWrReq = false; 1633 port.sendRetryReq(); 1634 } 1635} 1636 1637pair<vector<uint32_t>, bool>
|
1524DRAMCtrl::minBankPrep(const deque<DRAMPacket*>& queue,
| 1638DRAMCtrl::minBankPrep(const DRAMPacketQueue& queue,
|
1525 Tick min_col_at) const 1526{ 1527 Tick min_act_at = MaxTick; 1528 vector<uint32_t> bank_mask(ranksPerChannel, 0); 1529 1530 // latest Tick for which ACT can occur without incurring additoinal 1531 // delay on the data bus 1532 const Tick hidden_act_max = std::max(min_col_at - tRCD, curTick()); 1533 1534 // Flag condition when burst can issue back-to-back with previous burst 1535 bool found_seamless_bank = false; 1536 1537 // Flag condition when bank can be opened without incurring additional 1538 // delay on the data bus 1539 bool hidden_bank_prep = false; 1540 1541 // determine if we have queued transactions targetting the 1542 // bank in question 1543 vector<bool> got_waiting(ranksPerChannel * banksPerRank, false); 1544 for (const auto& p : queue) { 1545 if (p->rankRef.inRefIdleState()) 1546 got_waiting[p->bankId] = true; 1547 } 1548 1549 // Find command with optimal bank timing 1550 // Will prioritize commands that can issue seamlessly. 1551 for (int i = 0; i < ranksPerChannel; i++) { 1552 for (int j = 0; j < banksPerRank; j++) { 1553 uint16_t bank_id = i * banksPerRank + j; 1554 1555 // if we have waiting requests for the bank, and it is 1556 // amongst the first available, update the mask 1557 if (got_waiting[bank_id]) { 1558 // make sure this rank is not currently refreshing. 1559 assert(ranks[i]->inRefIdleState()); 1560 // simplistic approximation of when the bank can issue 1561 // an activate, ignoring any rank-to-rank switching 1562 // cost in this calculation 1563 Tick act_at = ranks[i]->banks[j].openRow == Bank::NO_ROW ? 1564 std::max(ranks[i]->banks[j].actAllowedAt, curTick()) : 1565 std::max(ranks[i]->banks[j].preAllowedAt, curTick()) + tRP; 1566 1567 // When is the earliest the R/W burst can issue? 1568 const Tick col_allowed_at = (busState == READ) ? 1569 ranks[i]->banks[j].rdAllowedAt : 1570 ranks[i]->banks[j].wrAllowedAt; 1571 Tick col_at = std::max(col_allowed_at, act_at + tRCD); 1572 1573 // bank can issue burst back-to-back (seamlessly) with 1574 // previous burst 1575 bool new_seamless_bank = col_at <= min_col_at; 1576 1577 // if we found a new seamless bank or we have no 1578 // seamless banks, and got a bank with an earlier 1579 // activate time, it should be added to the bit mask 1580 if (new_seamless_bank || 1581 (!found_seamless_bank && act_at <= min_act_at)) { 1582 // if we did not have a seamless bank before, and 1583 // we do now, reset the bank mask, also reset it 1584 // if we have not yet found a seamless bank and 1585 // the activate time is smaller than what we have 1586 // seen so far 1587 if (!found_seamless_bank && 1588 (new_seamless_bank || act_at < min_act_at)) { 1589 std::fill(bank_mask.begin(), bank_mask.end(), 0); 1590 } 1591 1592 found_seamless_bank |= new_seamless_bank; 1593 1594 // ACT can occur 'behind the scenes' 1595 hidden_bank_prep = act_at <= hidden_act_max; 1596 1597 // set the bit corresponding to the available bank 1598 replaceBits(bank_mask[i], j, j, 1); 1599 min_act_at = act_at; 1600 } 1601 } 1602 } 1603 } 1604 1605 return make_pair(bank_mask, hidden_bank_prep); 1606} 1607 1608DRAMCtrl::Rank::Rank(DRAMCtrl& _memory, const DRAMCtrlParams* _p, int rank) 1609 : EventManager(&_memory), memory(_memory), 1610 pwrStateTrans(PWR_IDLE), pwrStatePostRefresh(PWR_IDLE), 1611 pwrStateTick(0), refreshDueAt(0), pwrState(PWR_IDLE), 1612 refreshState(REF_IDLE), inLowPowerState(false), rank(rank), 1613 readEntries(0), writeEntries(0), outstandingEvents(0), 1614 wakeUpAllowedAt(0), power(_p, false), banks(_p->banks_per_rank), 1615 numBanksActive(0), actTicks(_p->activation_limit, 0), 1616 writeDoneEvent([this]{ processWriteDoneEvent(); }, name()), 1617 activateEvent([this]{ processActivateEvent(); }, name()), 1618 prechargeEvent([this]{ processPrechargeEvent(); }, name()), 1619 refreshEvent([this]{ processRefreshEvent(); }, name()), 1620 powerEvent([this]{ processPowerEvent(); }, name()), 1621 wakeUpEvent([this]{ processWakeUpEvent(); }, name()) 1622{ 1623 for (int b = 0; b < _p->banks_per_rank; b++) { 1624 banks[b].bank = b; 1625 // GDDR addressing of banks to BG is linear. 1626 // Here we assume that all DRAM generations address bank groups as 1627 // follows: 1628 if (_p->bank_groups_per_rank > 0) { 1629 // Simply assign lower bits to bank group in order to 1630 // rotate across bank groups as banks are incremented 1631 // e.g. with 4 banks per bank group and 16 banks total: 1632 // banks 0,4,8,12 are in bank group 0 1633 // banks 1,5,9,13 are in bank group 1 1634 // banks 2,6,10,14 are in bank group 2 1635 // banks 3,7,11,15 are in bank group 3 1636 banks[b].bankgr = b % _p->bank_groups_per_rank; 1637 } else { 1638 // No bank groups; simply assign to bank number 1639 banks[b].bankgr = b; 1640 } 1641 } 1642} 1643 1644void 1645DRAMCtrl::Rank::startup(Tick ref_tick) 1646{ 1647 assert(ref_tick > curTick()); 1648 1649 pwrStateTick = curTick(); 1650 1651 // kick off the refresh, and give ourselves enough time to 1652 // precharge 1653 schedule(refreshEvent, ref_tick); 1654} 1655 1656void 1657DRAMCtrl::Rank::suspend() 1658{ 1659 deschedule(refreshEvent); 1660 1661 // Update the stats 1662 updatePowerStats(); 1663 1664 // don't automatically transition back to LP state after next REF 1665 pwrStatePostRefresh = PWR_IDLE; 1666} 1667 1668bool 1669DRAMCtrl::Rank::isQueueEmpty() const 1670{ 1671 // check commmands in Q based on current bus direction 1672 bool no_queued_cmds = ((memory.busStateNext == READ) && (readEntries == 0)) 1673 || ((memory.busStateNext == WRITE) && 1674 (writeEntries == 0)); 1675 return no_queued_cmds; 1676} 1677 1678void 1679DRAMCtrl::Rank::checkDrainDone() 1680{ 1681 // if this rank was waiting to drain it is now able to proceed to 1682 // precharge 1683 if (refreshState == REF_DRAIN) { 1684 DPRINTF(DRAM, "Refresh drain done, now precharging\n"); 1685 1686 refreshState = REF_PD_EXIT; 1687 1688 // hand control back to the refresh event loop 1689 schedule(refreshEvent, curTick()); 1690 } 1691} 1692 1693void 1694DRAMCtrl::Rank::flushCmdList() 1695{ 1696 // at the moment sort the list of commands and update the counters 1697 // for DRAMPower libray when doing a refresh 1698 sort(cmdList.begin(), cmdList.end(), DRAMCtrl::sortTime); 1699 1700 auto next_iter = cmdList.begin(); 1701 // push to commands to DRAMPower 1702 for ( ; next_iter != cmdList.end() ; ++next_iter) { 1703 Command cmd = *next_iter; 1704 if (cmd.timeStamp <= curTick()) { 1705 // Move all commands at or before curTick to DRAMPower 1706 power.powerlib.doCommand(cmd.type, cmd.bank, 1707 divCeil(cmd.timeStamp, memory.tCK) - 1708 memory.timeStampOffset); 1709 } else { 1710 // done - found all commands at or before curTick() 1711 // next_iter references the 1st command after curTick 1712 break; 1713 } 1714 } 1715 // reset cmdList to only contain commands after curTick 1716 // if there are no commands after curTick, updated cmdList will be empty 1717 // in this case, next_iter is cmdList.end() 1718 cmdList.assign(next_iter, cmdList.end()); 1719} 1720 1721void 1722DRAMCtrl::Rank::processActivateEvent() 1723{ 1724 // we should transition to the active state as soon as any bank is active 1725 if (pwrState != PWR_ACT) 1726 // note that at this point numBanksActive could be back at 1727 // zero again due to a precharge scheduled in the future 1728 schedulePowerEvent(PWR_ACT, curTick()); 1729} 1730 1731void 1732DRAMCtrl::Rank::processPrechargeEvent() 1733{ 1734 // counter should at least indicate one outstanding request 1735 // for this precharge 1736 assert(outstandingEvents > 0); 1737 // precharge complete, decrement count 1738 --outstandingEvents; 1739 1740 // if we reached zero, then special conditions apply as we track 1741 // if all banks are precharged for the power models 1742 if (numBanksActive == 0) { 1743 // no reads to this rank in the Q and no pending 1744 // RD/WR or refresh commands 1745 if (isQueueEmpty() && outstandingEvents == 0) { 1746 // should still be in ACT state since bank still open 1747 assert(pwrState == PWR_ACT); 1748 1749 // All banks closed - switch to precharge power down state. 1750 DPRINTF(DRAMState, "Rank %d sleep at tick %d\n", 1751 rank, curTick()); 1752 powerDownSleep(PWR_PRE_PDN, curTick()); 1753 } else { 1754 // we should transition to the idle state when the last bank 1755 // is precharged 1756 schedulePowerEvent(PWR_IDLE, curTick()); 1757 } 1758 } 1759} 1760 1761void 1762DRAMCtrl::Rank::processWriteDoneEvent() 1763{ 1764 // counter should at least indicate one outstanding request 1765 // for this write 1766 assert(outstandingEvents > 0); 1767 // Write transfer on bus has completed 1768 // decrement per rank counter 1769 --outstandingEvents; 1770} 1771 1772void 1773DRAMCtrl::Rank::processRefreshEvent() 1774{ 1775 // when first preparing the refresh, remember when it was due 1776 if ((refreshState == REF_IDLE) || (refreshState == REF_SREF_EXIT)) { 1777 // remember when the refresh is due 1778 refreshDueAt = curTick(); 1779 1780 // proceed to drain 1781 refreshState = REF_DRAIN; 1782 1783 // make nonzero while refresh is pending to ensure 1784 // power down and self-refresh are not entered 1785 ++outstandingEvents; 1786 1787 DPRINTF(DRAM, "Refresh due\n"); 1788 } 1789 1790 // let any scheduled read or write to the same rank go ahead, 1791 // after which it will 1792 // hand control back to this event loop 1793 if (refreshState == REF_DRAIN) { 1794 // if a request is at the moment being handled and this request is 1795 // accessing the current rank then wait for it to finish 1796 if ((rank == memory.activeRank) 1797 && (memory.nextReqEvent.scheduled())) { 1798 // hand control over to the request loop until it is 1799 // evaluated next 1800 DPRINTF(DRAM, "Refresh awaiting draining\n"); 1801 1802 return; 1803 } else { 1804 refreshState = REF_PD_EXIT; 1805 } 1806 } 1807 1808 // at this point, ensure that rank is not in a power-down state 1809 if (refreshState == REF_PD_EXIT) { 1810 // if rank was sleeping and we have't started exit process, 1811 // wake-up for refresh 1812 if (inLowPowerState) { 1813 DPRINTF(DRAM, "Wake Up for refresh\n"); 1814 // save state and return after refresh completes 1815 scheduleWakeUpEvent(memory.tXP); 1816 return; 1817 } else { 1818 refreshState = REF_PRE; 1819 } 1820 } 1821 1822 // at this point, ensure that all banks are precharged 1823 if (refreshState == REF_PRE) { 1824 // precharge any active bank 1825 if (numBanksActive != 0) { 1826 // at the moment, we use a precharge all even if there is 1827 // only a single bank open 1828 DPRINTF(DRAM, "Precharging all\n"); 1829 1830 // first determine when we can precharge 1831 Tick pre_at = curTick(); 1832 1833 for (auto &b : banks) { 1834 // respect both causality and any existing bank 1835 // constraints, some banks could already have a 1836 // (auto) precharge scheduled 1837 pre_at = std::max(b.preAllowedAt, pre_at); 1838 } 1839 1840 // make sure all banks per rank are precharged, and for those that 1841 // already are, update their availability 1842 Tick act_allowed_at = pre_at + memory.tRP; 1843 1844 for (auto &b : banks) { 1845 if (b.openRow != Bank::NO_ROW) { 1846 memory.prechargeBank(*this, b, pre_at, false); 1847 } else { 1848 b.actAllowedAt = std::max(b.actAllowedAt, act_allowed_at); 1849 b.preAllowedAt = std::max(b.preAllowedAt, pre_at); 1850 } 1851 } 1852 1853 // precharge all banks in rank 1854 cmdList.push_back(Command(MemCommand::PREA, 0, pre_at)); 1855 1856 DPRINTF(DRAMPower, "%llu,PREA,0,%d\n", 1857 divCeil(pre_at, memory.tCK) - 1858 memory.timeStampOffset, rank); 1859 } else if ((pwrState == PWR_IDLE) && (outstandingEvents == 1)) { 1860 // Banks are closed, have transitioned to IDLE state, and 1861 // no outstanding ACT,RD/WR,Auto-PRE sequence scheduled 1862 DPRINTF(DRAM, "All banks already precharged, starting refresh\n"); 1863 1864 // go ahead and kick the power state machine into gear since 1865 // we are already idle 1866 schedulePowerEvent(PWR_REF, curTick()); 1867 } else { 1868 // banks state is closed but haven't transitioned pwrState to IDLE 1869 // or have outstanding ACT,RD/WR,Auto-PRE sequence scheduled 1870 // should have outstanding precharge event in this case 1871 assert(prechargeEvent.scheduled()); 1872 // will start refresh when pwrState transitions to IDLE 1873 } 1874 1875 assert(numBanksActive == 0); 1876 1877 // wait for all banks to be precharged, at which point the 1878 // power state machine will transition to the idle state, and 1879 // automatically move to a refresh, at that point it will also 1880 // call this method to get the refresh event loop going again 1881 return; 1882 } 1883 1884 // last but not least we perform the actual refresh 1885 if (refreshState == REF_START) { 1886 // should never get here with any banks active 1887 assert(numBanksActive == 0); 1888 assert(pwrState == PWR_REF); 1889 1890 Tick ref_done_at = curTick() + memory.tRFC; 1891 1892 for (auto &b : banks) { 1893 b.actAllowedAt = ref_done_at; 1894 } 1895 1896 // at the moment this affects all ranks 1897 cmdList.push_back(Command(MemCommand::REF, 0, curTick())); 1898 1899 // Update the stats 1900 updatePowerStats(); 1901 1902 DPRINTF(DRAMPower, "%llu,REF,0,%d\n", divCeil(curTick(), memory.tCK) - 1903 memory.timeStampOffset, rank); 1904 1905 // Update for next refresh 1906 refreshDueAt += memory.tREFI; 1907 1908 // make sure we did not wait so long that we cannot make up 1909 // for it 1910 if (refreshDueAt < ref_done_at) { 1911 fatal("Refresh was delayed so long we cannot catch up\n"); 1912 } 1913 1914 // Run the refresh and schedule event to transition power states 1915 // when refresh completes 1916 refreshState = REF_RUN; 1917 schedule(refreshEvent, ref_done_at); 1918 return; 1919 } 1920 1921 if (refreshState == REF_RUN) { 1922 // should never get here with any banks active 1923 assert(numBanksActive == 0); 1924 assert(pwrState == PWR_REF); 1925 1926 assert(!powerEvent.scheduled()); 1927 1928 if ((memory.drainState() == DrainState::Draining) || 1929 (memory.drainState() == DrainState::Drained)) { 1930 // if draining, do not re-enter low-power mode. 1931 // simply go to IDLE and wait 1932 schedulePowerEvent(PWR_IDLE, curTick()); 1933 } else { 1934 // At the moment, we sleep when the refresh ends and wait to be 1935 // woken up again if previously in a low-power state. 1936 if (pwrStatePostRefresh != PWR_IDLE) { 1937 // power State should be power Refresh 1938 assert(pwrState == PWR_REF); 1939 DPRINTF(DRAMState, "Rank %d sleeping after refresh and was in " 1940 "power state %d before refreshing\n", rank, 1941 pwrStatePostRefresh); 1942 powerDownSleep(pwrState, curTick()); 1943 1944 // Force PRE power-down if there are no outstanding commands 1945 // in Q after refresh. 1946 } else if (isQueueEmpty()) { 1947 // still have refresh event outstanding but there should 1948 // be no other events outstanding 1949 assert(outstandingEvents == 1); 1950 DPRINTF(DRAMState, "Rank %d sleeping after refresh but was NOT" 1951 " in a low power state before refreshing\n", rank); 1952 powerDownSleep(PWR_PRE_PDN, curTick()); 1953 1954 } else { 1955 // move to the idle power state once the refresh is done, this 1956 // will also move the refresh state machine to the refresh 1957 // idle state 1958 schedulePowerEvent(PWR_IDLE, curTick()); 1959 } 1960 } 1961 1962 // At this point, we have completed the current refresh. 1963 // In the SREF bypass case, we do not get to this state in the 1964 // refresh STM and therefore can always schedule next event. 1965 // Compensate for the delay in actually performing the refresh 1966 // when scheduling the next one 1967 schedule(refreshEvent, refreshDueAt - memory.tRP); 1968 1969 DPRINTF(DRAMState, "Refresh done at %llu and next refresh" 1970 " at %llu\n", curTick(), refreshDueAt); 1971 } 1972} 1973 1974void 1975DRAMCtrl::Rank::schedulePowerEvent(PowerState pwr_state, Tick tick) 1976{ 1977 // respect causality 1978 assert(tick >= curTick()); 1979 1980 if (!powerEvent.scheduled()) { 1981 DPRINTF(DRAMState, "Scheduling power event at %llu to state %d\n", 1982 tick, pwr_state); 1983 1984 // insert the new transition 1985 pwrStateTrans = pwr_state; 1986 1987 schedule(powerEvent, tick); 1988 } else { 1989 panic("Scheduled power event at %llu to state %d, " 1990 "with scheduled event at %llu to %d\n", tick, pwr_state, 1991 powerEvent.when(), pwrStateTrans); 1992 } 1993} 1994 1995void 1996DRAMCtrl::Rank::powerDownSleep(PowerState pwr_state, Tick tick) 1997{ 1998 // if low power state is active low, schedule to active low power state. 1999 // in reality tCKE is needed to enter active low power. This is neglected 2000 // here and could be added in the future. 2001 if (pwr_state == PWR_ACT_PDN) { 2002 schedulePowerEvent(pwr_state, tick); 2003 // push command to DRAMPower 2004 cmdList.push_back(Command(MemCommand::PDN_F_ACT, 0, tick)); 2005 DPRINTF(DRAMPower, "%llu,PDN_F_ACT,0,%d\n", divCeil(tick, 2006 memory.tCK) - memory.timeStampOffset, rank); 2007 } else if (pwr_state == PWR_PRE_PDN) { 2008 // if low power state is precharge low, schedule to precharge low 2009 // power state. In reality tCKE is needed to enter active low power. 2010 // This is neglected here. 2011 schedulePowerEvent(pwr_state, tick); 2012 //push Command to DRAMPower 2013 cmdList.push_back(Command(MemCommand::PDN_F_PRE, 0, tick)); 2014 DPRINTF(DRAMPower, "%llu,PDN_F_PRE,0,%d\n", divCeil(tick, 2015 memory.tCK) - memory.timeStampOffset, rank); 2016 } else if (pwr_state == PWR_REF) { 2017 // if a refresh just occurred 2018 // transition to PRE_PDN now that all banks are closed 2019 // precharge power down requires tCKE to enter. For simplicity 2020 // this is not considered. 2021 schedulePowerEvent(PWR_PRE_PDN, tick); 2022 //push Command to DRAMPower 2023 cmdList.push_back(Command(MemCommand::PDN_F_PRE, 0, tick)); 2024 DPRINTF(DRAMPower, "%llu,PDN_F_PRE,0,%d\n", divCeil(tick, 2025 memory.tCK) - memory.timeStampOffset, rank); 2026 } else if (pwr_state == PWR_SREF) { 2027 // should only enter SREF after PRE-PD wakeup to do a refresh 2028 assert(pwrStatePostRefresh == PWR_PRE_PDN); 2029 // self refresh requires time tCKESR to enter. For simplicity, 2030 // this is not considered. 2031 schedulePowerEvent(PWR_SREF, tick); 2032 // push Command to DRAMPower 2033 cmdList.push_back(Command(MemCommand::SREN, 0, tick)); 2034 DPRINTF(DRAMPower, "%llu,SREN,0,%d\n", divCeil(tick, 2035 memory.tCK) - memory.timeStampOffset, rank); 2036 } 2037 // Ensure that we don't power-down and back up in same tick 2038 // Once we commit to PD entry, do it and wait for at least 1tCK 2039 // This could be replaced with tCKE if/when that is added to the model 2040 wakeUpAllowedAt = tick + memory.tCK; 2041 2042 // Transitioning to a low power state, set flag 2043 inLowPowerState = true; 2044} 2045 2046void 2047DRAMCtrl::Rank::scheduleWakeUpEvent(Tick exit_delay) 2048{ 2049 Tick wake_up_tick = std::max(curTick(), wakeUpAllowedAt); 2050 2051 DPRINTF(DRAMState, "Scheduling wake-up for rank %d at tick %d\n", 2052 rank, wake_up_tick); 2053 2054 // if waking for refresh, hold previous state 2055 // else reset state back to IDLE 2056 if (refreshState == REF_PD_EXIT) { 2057 pwrStatePostRefresh = pwrState; 2058 } else { 2059 // don't automatically transition back to LP state after next REF 2060 pwrStatePostRefresh = PWR_IDLE; 2061 } 2062 2063 // schedule wake-up with event to ensure entry has completed before 2064 // we try to wake-up 2065 schedule(wakeUpEvent, wake_up_tick); 2066 2067 for (auto &b : banks) { 2068 // respect both causality and any existing bank 2069 // constraints, some banks could already have a 2070 // (auto) precharge scheduled 2071 b.wrAllowedAt = std::max(wake_up_tick + exit_delay, b.wrAllowedAt); 2072 b.rdAllowedAt = std::max(wake_up_tick + exit_delay, b.rdAllowedAt); 2073 b.preAllowedAt = std::max(wake_up_tick + exit_delay, b.preAllowedAt); 2074 b.actAllowedAt = std::max(wake_up_tick + exit_delay, b.actAllowedAt); 2075 } 2076 // Transitioning out of low power state, clear flag 2077 inLowPowerState = false; 2078 2079 // push to DRAMPower 2080 // use pwrStateTrans for cases where we have a power event scheduled 2081 // to enter low power that has not yet been processed 2082 if (pwrStateTrans == PWR_ACT_PDN) { 2083 cmdList.push_back(Command(MemCommand::PUP_ACT, 0, wake_up_tick)); 2084 DPRINTF(DRAMPower, "%llu,PUP_ACT,0,%d\n", divCeil(wake_up_tick, 2085 memory.tCK) - memory.timeStampOffset, rank); 2086 2087 } else if (pwrStateTrans == PWR_PRE_PDN) { 2088 cmdList.push_back(Command(MemCommand::PUP_PRE, 0, wake_up_tick)); 2089 DPRINTF(DRAMPower, "%llu,PUP_PRE,0,%d\n", divCeil(wake_up_tick, 2090 memory.tCK) - memory.timeStampOffset, rank); 2091 } else if (pwrStateTrans == PWR_SREF) { 2092 cmdList.push_back(Command(MemCommand::SREX, 0, wake_up_tick)); 2093 DPRINTF(DRAMPower, "%llu,SREX,0,%d\n", divCeil(wake_up_tick, 2094 memory.tCK) - memory.timeStampOffset, rank); 2095 } 2096} 2097 2098void 2099DRAMCtrl::Rank::processWakeUpEvent() 2100{ 2101 // Should be in a power-down or self-refresh state 2102 assert((pwrState == PWR_ACT_PDN) || (pwrState == PWR_PRE_PDN) || 2103 (pwrState == PWR_SREF)); 2104 2105 // Check current state to determine transition state 2106 if (pwrState == PWR_ACT_PDN) { 2107 // banks still open, transition to PWR_ACT 2108 schedulePowerEvent(PWR_ACT, curTick()); 2109 } else { 2110 // transitioning from a precharge power-down or self-refresh state 2111 // banks are closed - transition to PWR_IDLE 2112 schedulePowerEvent(PWR_IDLE, curTick()); 2113 } 2114} 2115 2116void 2117DRAMCtrl::Rank::processPowerEvent() 2118{ 2119 assert(curTick() >= pwrStateTick); 2120 // remember where we were, and for how long 2121 Tick duration = curTick() - pwrStateTick; 2122 PowerState prev_state = pwrState; 2123 2124 // update the accounting 2125 pwrStateTime[prev_state] += duration; 2126 2127 // track to total idle time 2128 if ((prev_state == PWR_PRE_PDN) || (prev_state == PWR_ACT_PDN) || 2129 (prev_state == PWR_SREF)) { 2130 totalIdleTime += duration; 2131 } 2132 2133 pwrState = pwrStateTrans; 2134 pwrStateTick = curTick(); 2135 2136 // if rank was refreshing, make sure to start scheduling requests again 2137 if (prev_state == PWR_REF) { 2138 // bus IDLED prior to REF 2139 // counter should be one for refresh command only 2140 assert(outstandingEvents == 1); 2141 // REF complete, decrement count and go back to IDLE 2142 --outstandingEvents; 2143 refreshState = REF_IDLE; 2144 2145 DPRINTF(DRAMState, "Was refreshing for %llu ticks\n", duration); 2146 // if moving back to power-down after refresh 2147 if (pwrState != PWR_IDLE) { 2148 assert(pwrState == PWR_PRE_PDN); 2149 DPRINTF(DRAMState, "Switching to power down state after refreshing" 2150 " rank %d at %llu tick\n", rank, curTick()); 2151 } 2152 2153 // completed refresh event, ensure next request is scheduled 2154 if (!memory.nextReqEvent.scheduled()) { 2155 DPRINTF(DRAM, "Scheduling next request after refreshing" 2156 " rank %d\n", rank); 2157 schedule(memory.nextReqEvent, curTick()); 2158 } 2159 } 2160 2161 if ((pwrState == PWR_ACT) && (refreshState == REF_PD_EXIT)) { 2162 // have exited ACT PD 2163 assert(prev_state == PWR_ACT_PDN); 2164 2165 // go back to REF event and close banks 2166 refreshState = REF_PRE; 2167 schedule(refreshEvent, curTick()); 2168 } else if (pwrState == PWR_IDLE) { 2169 DPRINTF(DRAMState, "All banks precharged\n"); 2170 if (prev_state == PWR_SREF) { 2171 // set refresh state to REF_SREF_EXIT, ensuring inRefIdleState 2172 // continues to return false during tXS after SREF exit 2173 // Schedule a refresh which kicks things back into action 2174 // when it finishes 2175 refreshState = REF_SREF_EXIT; 2176 schedule(refreshEvent, curTick() + memory.tXS); 2177 } else { 2178 // if we have a pending refresh, and are now moving to 2179 // the idle state, directly transition to, or schedule refresh 2180 if ((refreshState == REF_PRE) || (refreshState == REF_PD_EXIT)) { 2181 // ensure refresh is restarted only after final PRE command. 2182 // do not restart refresh if controller is in an intermediate 2183 // state, after PRE_PDN exit, when banks are IDLE but an 2184 // ACT is scheduled. 2185 if (!activateEvent.scheduled()) { 2186 // there should be nothing waiting at this point 2187 assert(!powerEvent.scheduled()); 2188 if (refreshState == REF_PD_EXIT) { 2189 // exiting PRE PD, will be in IDLE until tXP expires 2190 // and then should transition to PWR_REF state 2191 assert(prev_state == PWR_PRE_PDN); 2192 schedulePowerEvent(PWR_REF, curTick() + memory.tXP); 2193 } else if (refreshState == REF_PRE) { 2194 // can directly move to PWR_REF state and proceed below 2195 pwrState = PWR_REF; 2196 } 2197 } else { 2198 // must have PRE scheduled to transition back to IDLE 2199 // and re-kick off refresh 2200 assert(prechargeEvent.scheduled()); 2201 } 2202 } 2203 } 2204 } 2205 2206 // transition to the refresh state and re-start refresh process 2207 // refresh state machine will schedule the next power state transition 2208 if (pwrState == PWR_REF) { 2209 // completed final PRE for refresh or exiting power-down 2210 assert(refreshState == REF_PRE || refreshState == REF_PD_EXIT); 2211 2212 // exited PRE PD for refresh, with no pending commands 2213 // bypass auto-refresh and go straight to SREF, where memory 2214 // will issue refresh immediately upon entry 2215 if (pwrStatePostRefresh == PWR_PRE_PDN && isQueueEmpty() && 2216 (memory.drainState() != DrainState::Draining) && 2217 (memory.drainState() != DrainState::Drained)) { 2218 DPRINTF(DRAMState, "Rank %d bypassing refresh and transitioning " 2219 "to self refresh at %11u tick\n", rank, curTick()); 2220 powerDownSleep(PWR_SREF, curTick()); 2221 2222 // Since refresh was bypassed, remove event by decrementing count 2223 assert(outstandingEvents == 1); 2224 --outstandingEvents; 2225 2226 // reset state back to IDLE temporarily until SREF is entered 2227 pwrState = PWR_IDLE; 2228 2229 // Not bypassing refresh for SREF entry 2230 } else { 2231 DPRINTF(DRAMState, "Refreshing\n"); 2232 2233 // there should be nothing waiting at this point 2234 assert(!powerEvent.scheduled()); 2235 2236 // kick the refresh event loop into action again, and that 2237 // in turn will schedule a transition to the idle power 2238 // state once the refresh is done 2239 schedule(refreshEvent, curTick()); 2240 2241 // Banks transitioned to IDLE, start REF 2242 refreshState = REF_START; 2243 } 2244 } 2245 2246} 2247 2248void 2249DRAMCtrl::Rank::updatePowerStats() 2250{ 2251 // All commands up to refresh have completed 2252 // flush cmdList to DRAMPower 2253 flushCmdList(); 2254 2255 // Call the function that calculates window energy at intermediate update 2256 // events like at refresh, stats dump as well as at simulation exit. 2257 // Window starts at the last time the calcWindowEnergy function was called 2258 // and is upto current time. 2259 power.powerlib.calcWindowEnergy(divCeil(curTick(), memory.tCK) - 2260 memory.timeStampOffset); 2261 2262 // Get the energy from DRAMPower 2263 Data::MemoryPowerModel::Energy energy = power.powerlib.getEnergy(); 2264 2265 // The energy components inside the power lib are calculated over 2266 // the window so accumulate into the corresponding gem5 stat 2267 actEnergy += energy.act_energy * memory.devicesPerRank; 2268 preEnergy += energy.pre_energy * memory.devicesPerRank; 2269 readEnergy += energy.read_energy * memory.devicesPerRank; 2270 writeEnergy += energy.write_energy * memory.devicesPerRank; 2271 refreshEnergy += energy.ref_energy * memory.devicesPerRank; 2272 actBackEnergy += energy.act_stdby_energy * memory.devicesPerRank; 2273 preBackEnergy += energy.pre_stdby_energy * memory.devicesPerRank; 2274 actPowerDownEnergy += energy.f_act_pd_energy * memory.devicesPerRank; 2275 prePowerDownEnergy += energy.f_pre_pd_energy * memory.devicesPerRank; 2276 selfRefreshEnergy += energy.sref_energy * memory.devicesPerRank; 2277 2278 // Accumulate window energy into the total energy. 2279 totalEnergy += energy.window_energy * memory.devicesPerRank; 2280 // Average power must not be accumulated but calculated over the time 2281 // since last stats reset. SimClock::Frequency is tick period not tick 2282 // frequency. 2283 // energy (pJ) 1e-9 2284 // power (mW) = ----------- * ---------- 2285 // time (tick) tick_frequency 2286 averagePower = (totalEnergy.value() / 2287 (curTick() - memory.lastStatsResetTick)) * 2288 (SimClock::Frequency / 1000000000.0); 2289} 2290 2291void 2292DRAMCtrl::Rank::computeStats() 2293{ 2294 DPRINTF(DRAM,"Computing stats due to a dump callback\n"); 2295 2296 // Update the stats 2297 updatePowerStats(); 2298 2299 // final update of power state times 2300 pwrStateTime[pwrState] += (curTick() - pwrStateTick); 2301 pwrStateTick = curTick(); 2302 2303} 2304 2305void 2306DRAMCtrl::Rank::resetStats() { 2307 // The only way to clear the counters in DRAMPower is to call 2308 // calcWindowEnergy function as that then calls clearCounters. The 2309 // clearCounters method itself is private. 2310 power.powerlib.calcWindowEnergy(divCeil(curTick(), memory.tCK) - 2311 memory.timeStampOffset); 2312 2313} 2314 2315void 2316DRAMCtrl::Rank::regStats() 2317{ 2318 pwrStateTime 2319 .init(6) 2320 .name(name() + ".memoryStateTime") 2321 .desc("Time in different power states"); 2322 pwrStateTime.subname(0, "IDLE"); 2323 pwrStateTime.subname(1, "REF"); 2324 pwrStateTime.subname(2, "SREF"); 2325 pwrStateTime.subname(3, "PRE_PDN"); 2326 pwrStateTime.subname(4, "ACT"); 2327 pwrStateTime.subname(5, "ACT_PDN"); 2328 2329 actEnergy 2330 .name(name() + ".actEnergy") 2331 .desc("Energy for activate commands per rank (pJ)"); 2332 2333 preEnergy 2334 .name(name() + ".preEnergy") 2335 .desc("Energy for precharge commands per rank (pJ)"); 2336 2337 readEnergy 2338 .name(name() + ".readEnergy") 2339 .desc("Energy for read commands per rank (pJ)"); 2340 2341 writeEnergy 2342 .name(name() + ".writeEnergy") 2343 .desc("Energy for write commands per rank (pJ)"); 2344 2345 refreshEnergy 2346 .name(name() + ".refreshEnergy") 2347 .desc("Energy for refresh commands per rank (pJ)"); 2348 2349 actBackEnergy 2350 .name(name() + ".actBackEnergy") 2351 .desc("Energy for active background per rank (pJ)"); 2352 2353 preBackEnergy 2354 .name(name() + ".preBackEnergy") 2355 .desc("Energy for precharge background per rank (pJ)"); 2356 2357 actPowerDownEnergy 2358 .name(name() + ".actPowerDownEnergy") 2359 .desc("Energy for active power-down per rank (pJ)"); 2360 2361 prePowerDownEnergy 2362 .name(name() + ".prePowerDownEnergy") 2363 .desc("Energy for precharge power-down per rank (pJ)"); 2364 2365 selfRefreshEnergy 2366 .name(name() + ".selfRefreshEnergy") 2367 .desc("Energy for self refresh per rank (pJ)"); 2368 2369 totalEnergy 2370 .name(name() + ".totalEnergy") 2371 .desc("Total energy per rank (pJ)"); 2372 2373 averagePower 2374 .name(name() + ".averagePower") 2375 .desc("Core power per rank (mW)"); 2376 2377 totalIdleTime 2378 .name(name() + ".totalIdleTime") 2379 .desc("Total Idle time Per DRAM Rank"); 2380 2381 Stats::registerDumpCallback(new RankDumpCallback(this)); 2382 Stats::registerResetCallback(new RankResetCallback(this)); 2383} 2384void 2385DRAMCtrl::regStats() 2386{ 2387 using namespace Stats; 2388
| 1639 Tick min_col_at) const 1640{ 1641 Tick min_act_at = MaxTick; 1642 vector<uint32_t> bank_mask(ranksPerChannel, 0); 1643 1644 // latest Tick for which ACT can occur without incurring additoinal 1645 // delay on the data bus 1646 const Tick hidden_act_max = std::max(min_col_at - tRCD, curTick()); 1647 1648 // Flag condition when burst can issue back-to-back with previous burst 1649 bool found_seamless_bank = false; 1650 1651 // Flag condition when bank can be opened without incurring additional 1652 // delay on the data bus 1653 bool hidden_bank_prep = false; 1654 1655 // determine if we have queued transactions targetting the 1656 // bank in question 1657 vector<bool> got_waiting(ranksPerChannel * banksPerRank, false); 1658 for (const auto& p : queue) { 1659 if (p->rankRef.inRefIdleState()) 1660 got_waiting[p->bankId] = true; 1661 } 1662 1663 // Find command with optimal bank timing 1664 // Will prioritize commands that can issue seamlessly. 1665 for (int i = 0; i < ranksPerChannel; i++) { 1666 for (int j = 0; j < banksPerRank; j++) { 1667 uint16_t bank_id = i * banksPerRank + j; 1668 1669 // if we have waiting requests for the bank, and it is 1670 // amongst the first available, update the mask 1671 if (got_waiting[bank_id]) { 1672 // make sure this rank is not currently refreshing. 1673 assert(ranks[i]->inRefIdleState()); 1674 // simplistic approximation of when the bank can issue 1675 // an activate, ignoring any rank-to-rank switching 1676 // cost in this calculation 1677 Tick act_at = ranks[i]->banks[j].openRow == Bank::NO_ROW ? 1678 std::max(ranks[i]->banks[j].actAllowedAt, curTick()) : 1679 std::max(ranks[i]->banks[j].preAllowedAt, curTick()) + tRP; 1680 1681 // When is the earliest the R/W burst can issue? 1682 const Tick col_allowed_at = (busState == READ) ? 1683 ranks[i]->banks[j].rdAllowedAt : 1684 ranks[i]->banks[j].wrAllowedAt; 1685 Tick col_at = std::max(col_allowed_at, act_at + tRCD); 1686 1687 // bank can issue burst back-to-back (seamlessly) with 1688 // previous burst 1689 bool new_seamless_bank = col_at <= min_col_at; 1690 1691 // if we found a new seamless bank or we have no 1692 // seamless banks, and got a bank with an earlier 1693 // activate time, it should be added to the bit mask 1694 if (new_seamless_bank || 1695 (!found_seamless_bank && act_at <= min_act_at)) { 1696 // if we did not have a seamless bank before, and 1697 // we do now, reset the bank mask, also reset it 1698 // if we have not yet found a seamless bank and 1699 // the activate time is smaller than what we have 1700 // seen so far 1701 if (!found_seamless_bank && 1702 (new_seamless_bank || act_at < min_act_at)) { 1703 std::fill(bank_mask.begin(), bank_mask.end(), 0); 1704 } 1705 1706 found_seamless_bank |= new_seamless_bank; 1707 1708 // ACT can occur 'behind the scenes' 1709 hidden_bank_prep = act_at <= hidden_act_max; 1710 1711 // set the bit corresponding to the available bank 1712 replaceBits(bank_mask[i], j, j, 1); 1713 min_act_at = act_at; 1714 } 1715 } 1716 } 1717 } 1718 1719 return make_pair(bank_mask, hidden_bank_prep); 1720} 1721 1722DRAMCtrl::Rank::Rank(DRAMCtrl& _memory, const DRAMCtrlParams* _p, int rank) 1723 : EventManager(&_memory), memory(_memory), 1724 pwrStateTrans(PWR_IDLE), pwrStatePostRefresh(PWR_IDLE), 1725 pwrStateTick(0), refreshDueAt(0), pwrState(PWR_IDLE), 1726 refreshState(REF_IDLE), inLowPowerState(false), rank(rank), 1727 readEntries(0), writeEntries(0), outstandingEvents(0), 1728 wakeUpAllowedAt(0), power(_p, false), banks(_p->banks_per_rank), 1729 numBanksActive(0), actTicks(_p->activation_limit, 0), 1730 writeDoneEvent([this]{ processWriteDoneEvent(); }, name()), 1731 activateEvent([this]{ processActivateEvent(); }, name()), 1732 prechargeEvent([this]{ processPrechargeEvent(); }, name()), 1733 refreshEvent([this]{ processRefreshEvent(); }, name()), 1734 powerEvent([this]{ processPowerEvent(); }, name()), 1735 wakeUpEvent([this]{ processWakeUpEvent(); }, name()) 1736{ 1737 for (int b = 0; b < _p->banks_per_rank; b++) { 1738 banks[b].bank = b; 1739 // GDDR addressing of banks to BG is linear. 1740 // Here we assume that all DRAM generations address bank groups as 1741 // follows: 1742 if (_p->bank_groups_per_rank > 0) { 1743 // Simply assign lower bits to bank group in order to 1744 // rotate across bank groups as banks are incremented 1745 // e.g. with 4 banks per bank group and 16 banks total: 1746 // banks 0,4,8,12 are in bank group 0 1747 // banks 1,5,9,13 are in bank group 1 1748 // banks 2,6,10,14 are in bank group 2 1749 // banks 3,7,11,15 are in bank group 3 1750 banks[b].bankgr = b % _p->bank_groups_per_rank; 1751 } else { 1752 // No bank groups; simply assign to bank number 1753 banks[b].bankgr = b; 1754 } 1755 } 1756} 1757 1758void 1759DRAMCtrl::Rank::startup(Tick ref_tick) 1760{ 1761 assert(ref_tick > curTick()); 1762 1763 pwrStateTick = curTick(); 1764 1765 // kick off the refresh, and give ourselves enough time to 1766 // precharge 1767 schedule(refreshEvent, ref_tick); 1768} 1769 1770void 1771DRAMCtrl::Rank::suspend() 1772{ 1773 deschedule(refreshEvent); 1774 1775 // Update the stats 1776 updatePowerStats(); 1777 1778 // don't automatically transition back to LP state after next REF 1779 pwrStatePostRefresh = PWR_IDLE; 1780} 1781 1782bool 1783DRAMCtrl::Rank::isQueueEmpty() const 1784{ 1785 // check commmands in Q based on current bus direction 1786 bool no_queued_cmds = ((memory.busStateNext == READ) && (readEntries == 0)) 1787 || ((memory.busStateNext == WRITE) && 1788 (writeEntries == 0)); 1789 return no_queued_cmds; 1790} 1791 1792void 1793DRAMCtrl::Rank::checkDrainDone() 1794{ 1795 // if this rank was waiting to drain it is now able to proceed to 1796 // precharge 1797 if (refreshState == REF_DRAIN) { 1798 DPRINTF(DRAM, "Refresh drain done, now precharging\n"); 1799 1800 refreshState = REF_PD_EXIT; 1801 1802 // hand control back to the refresh event loop 1803 schedule(refreshEvent, curTick()); 1804 } 1805} 1806 1807void 1808DRAMCtrl::Rank::flushCmdList() 1809{ 1810 // at the moment sort the list of commands and update the counters 1811 // for DRAMPower libray when doing a refresh 1812 sort(cmdList.begin(), cmdList.end(), DRAMCtrl::sortTime); 1813 1814 auto next_iter = cmdList.begin(); 1815 // push to commands to DRAMPower 1816 for ( ; next_iter != cmdList.end() ; ++next_iter) { 1817 Command cmd = *next_iter; 1818 if (cmd.timeStamp <= curTick()) { 1819 // Move all commands at or before curTick to DRAMPower 1820 power.powerlib.doCommand(cmd.type, cmd.bank, 1821 divCeil(cmd.timeStamp, memory.tCK) - 1822 memory.timeStampOffset); 1823 } else { 1824 // done - found all commands at or before curTick() 1825 // next_iter references the 1st command after curTick 1826 break; 1827 } 1828 } 1829 // reset cmdList to only contain commands after curTick 1830 // if there are no commands after curTick, updated cmdList will be empty 1831 // in this case, next_iter is cmdList.end() 1832 cmdList.assign(next_iter, cmdList.end()); 1833} 1834 1835void 1836DRAMCtrl::Rank::processActivateEvent() 1837{ 1838 // we should transition to the active state as soon as any bank is active 1839 if (pwrState != PWR_ACT) 1840 // note that at this point numBanksActive could be back at 1841 // zero again due to a precharge scheduled in the future 1842 schedulePowerEvent(PWR_ACT, curTick()); 1843} 1844 1845void 1846DRAMCtrl::Rank::processPrechargeEvent() 1847{ 1848 // counter should at least indicate one outstanding request 1849 // for this precharge 1850 assert(outstandingEvents > 0); 1851 // precharge complete, decrement count 1852 --outstandingEvents; 1853 1854 // if we reached zero, then special conditions apply as we track 1855 // if all banks are precharged for the power models 1856 if (numBanksActive == 0) { 1857 // no reads to this rank in the Q and no pending 1858 // RD/WR or refresh commands 1859 if (isQueueEmpty() && outstandingEvents == 0) { 1860 // should still be in ACT state since bank still open 1861 assert(pwrState == PWR_ACT); 1862 1863 // All banks closed - switch to precharge power down state. 1864 DPRINTF(DRAMState, "Rank %d sleep at tick %d\n", 1865 rank, curTick()); 1866 powerDownSleep(PWR_PRE_PDN, curTick()); 1867 } else { 1868 // we should transition to the idle state when the last bank 1869 // is precharged 1870 schedulePowerEvent(PWR_IDLE, curTick()); 1871 } 1872 } 1873} 1874 1875void 1876DRAMCtrl::Rank::processWriteDoneEvent() 1877{ 1878 // counter should at least indicate one outstanding request 1879 // for this write 1880 assert(outstandingEvents > 0); 1881 // Write transfer on bus has completed 1882 // decrement per rank counter 1883 --outstandingEvents; 1884} 1885 1886void 1887DRAMCtrl::Rank::processRefreshEvent() 1888{ 1889 // when first preparing the refresh, remember when it was due 1890 if ((refreshState == REF_IDLE) || (refreshState == REF_SREF_EXIT)) { 1891 // remember when the refresh is due 1892 refreshDueAt = curTick(); 1893 1894 // proceed to drain 1895 refreshState = REF_DRAIN; 1896 1897 // make nonzero while refresh is pending to ensure 1898 // power down and self-refresh are not entered 1899 ++outstandingEvents; 1900 1901 DPRINTF(DRAM, "Refresh due\n"); 1902 } 1903 1904 // let any scheduled read or write to the same rank go ahead, 1905 // after which it will 1906 // hand control back to this event loop 1907 if (refreshState == REF_DRAIN) { 1908 // if a request is at the moment being handled and this request is 1909 // accessing the current rank then wait for it to finish 1910 if ((rank == memory.activeRank) 1911 && (memory.nextReqEvent.scheduled())) { 1912 // hand control over to the request loop until it is 1913 // evaluated next 1914 DPRINTF(DRAM, "Refresh awaiting draining\n"); 1915 1916 return; 1917 } else { 1918 refreshState = REF_PD_EXIT; 1919 } 1920 } 1921 1922 // at this point, ensure that rank is not in a power-down state 1923 if (refreshState == REF_PD_EXIT) { 1924 // if rank was sleeping and we have't started exit process, 1925 // wake-up for refresh 1926 if (inLowPowerState) { 1927 DPRINTF(DRAM, "Wake Up for refresh\n"); 1928 // save state and return after refresh completes 1929 scheduleWakeUpEvent(memory.tXP); 1930 return; 1931 } else { 1932 refreshState = REF_PRE; 1933 } 1934 } 1935 1936 // at this point, ensure that all banks are precharged 1937 if (refreshState == REF_PRE) { 1938 // precharge any active bank 1939 if (numBanksActive != 0) { 1940 // at the moment, we use a precharge all even if there is 1941 // only a single bank open 1942 DPRINTF(DRAM, "Precharging all\n"); 1943 1944 // first determine when we can precharge 1945 Tick pre_at = curTick(); 1946 1947 for (auto &b : banks) { 1948 // respect both causality and any existing bank 1949 // constraints, some banks could already have a 1950 // (auto) precharge scheduled 1951 pre_at = std::max(b.preAllowedAt, pre_at); 1952 } 1953 1954 // make sure all banks per rank are precharged, and for those that 1955 // already are, update their availability 1956 Tick act_allowed_at = pre_at + memory.tRP; 1957 1958 for (auto &b : banks) { 1959 if (b.openRow != Bank::NO_ROW) { 1960 memory.prechargeBank(*this, b, pre_at, false); 1961 } else { 1962 b.actAllowedAt = std::max(b.actAllowedAt, act_allowed_at); 1963 b.preAllowedAt = std::max(b.preAllowedAt, pre_at); 1964 } 1965 } 1966 1967 // precharge all banks in rank 1968 cmdList.push_back(Command(MemCommand::PREA, 0, pre_at)); 1969 1970 DPRINTF(DRAMPower, "%llu,PREA,0,%d\n", 1971 divCeil(pre_at, memory.tCK) - 1972 memory.timeStampOffset, rank); 1973 } else if ((pwrState == PWR_IDLE) && (outstandingEvents == 1)) { 1974 // Banks are closed, have transitioned to IDLE state, and 1975 // no outstanding ACT,RD/WR,Auto-PRE sequence scheduled 1976 DPRINTF(DRAM, "All banks already precharged, starting refresh\n"); 1977 1978 // go ahead and kick the power state machine into gear since 1979 // we are already idle 1980 schedulePowerEvent(PWR_REF, curTick()); 1981 } else { 1982 // banks state is closed but haven't transitioned pwrState to IDLE 1983 // or have outstanding ACT,RD/WR,Auto-PRE sequence scheduled 1984 // should have outstanding precharge event in this case 1985 assert(prechargeEvent.scheduled()); 1986 // will start refresh when pwrState transitions to IDLE 1987 } 1988 1989 assert(numBanksActive == 0); 1990 1991 // wait for all banks to be precharged, at which point the 1992 // power state machine will transition to the idle state, and 1993 // automatically move to a refresh, at that point it will also 1994 // call this method to get the refresh event loop going again 1995 return; 1996 } 1997 1998 // last but not least we perform the actual refresh 1999 if (refreshState == REF_START) { 2000 // should never get here with any banks active 2001 assert(numBanksActive == 0); 2002 assert(pwrState == PWR_REF); 2003 2004 Tick ref_done_at = curTick() + memory.tRFC; 2005 2006 for (auto &b : banks) { 2007 b.actAllowedAt = ref_done_at; 2008 } 2009 2010 // at the moment this affects all ranks 2011 cmdList.push_back(Command(MemCommand::REF, 0, curTick())); 2012 2013 // Update the stats 2014 updatePowerStats(); 2015 2016 DPRINTF(DRAMPower, "%llu,REF,0,%d\n", divCeil(curTick(), memory.tCK) - 2017 memory.timeStampOffset, rank); 2018 2019 // Update for next refresh 2020 refreshDueAt += memory.tREFI; 2021 2022 // make sure we did not wait so long that we cannot make up 2023 // for it 2024 if (refreshDueAt < ref_done_at) { 2025 fatal("Refresh was delayed so long we cannot catch up\n"); 2026 } 2027 2028 // Run the refresh and schedule event to transition power states 2029 // when refresh completes 2030 refreshState = REF_RUN; 2031 schedule(refreshEvent, ref_done_at); 2032 return; 2033 } 2034 2035 if (refreshState == REF_RUN) { 2036 // should never get here with any banks active 2037 assert(numBanksActive == 0); 2038 assert(pwrState == PWR_REF); 2039 2040 assert(!powerEvent.scheduled()); 2041 2042 if ((memory.drainState() == DrainState::Draining) || 2043 (memory.drainState() == DrainState::Drained)) { 2044 // if draining, do not re-enter low-power mode. 2045 // simply go to IDLE and wait 2046 schedulePowerEvent(PWR_IDLE, curTick()); 2047 } else { 2048 // At the moment, we sleep when the refresh ends and wait to be 2049 // woken up again if previously in a low-power state. 2050 if (pwrStatePostRefresh != PWR_IDLE) { 2051 // power State should be power Refresh 2052 assert(pwrState == PWR_REF); 2053 DPRINTF(DRAMState, "Rank %d sleeping after refresh and was in " 2054 "power state %d before refreshing\n", rank, 2055 pwrStatePostRefresh); 2056 powerDownSleep(pwrState, curTick()); 2057 2058 // Force PRE power-down if there are no outstanding commands 2059 // in Q after refresh. 2060 } else if (isQueueEmpty()) { 2061 // still have refresh event outstanding but there should 2062 // be no other events outstanding 2063 assert(outstandingEvents == 1); 2064 DPRINTF(DRAMState, "Rank %d sleeping after refresh but was NOT" 2065 " in a low power state before refreshing\n", rank); 2066 powerDownSleep(PWR_PRE_PDN, curTick()); 2067 2068 } else { 2069 // move to the idle power state once the refresh is done, this 2070 // will also move the refresh state machine to the refresh 2071 // idle state 2072 schedulePowerEvent(PWR_IDLE, curTick()); 2073 } 2074 } 2075 2076 // At this point, we have completed the current refresh. 2077 // In the SREF bypass case, we do not get to this state in the 2078 // refresh STM and therefore can always schedule next event. 2079 // Compensate for the delay in actually performing the refresh 2080 // when scheduling the next one 2081 schedule(refreshEvent, refreshDueAt - memory.tRP); 2082 2083 DPRINTF(DRAMState, "Refresh done at %llu and next refresh" 2084 " at %llu\n", curTick(), refreshDueAt); 2085 } 2086} 2087 2088void 2089DRAMCtrl::Rank::schedulePowerEvent(PowerState pwr_state, Tick tick) 2090{ 2091 // respect causality 2092 assert(tick >= curTick()); 2093 2094 if (!powerEvent.scheduled()) { 2095 DPRINTF(DRAMState, "Scheduling power event at %llu to state %d\n", 2096 tick, pwr_state); 2097 2098 // insert the new transition 2099 pwrStateTrans = pwr_state; 2100 2101 schedule(powerEvent, tick); 2102 } else { 2103 panic("Scheduled power event at %llu to state %d, " 2104 "with scheduled event at %llu to %d\n", tick, pwr_state, 2105 powerEvent.when(), pwrStateTrans); 2106 } 2107} 2108 2109void 2110DRAMCtrl::Rank::powerDownSleep(PowerState pwr_state, Tick tick) 2111{ 2112 // if low power state is active low, schedule to active low power state. 2113 // in reality tCKE is needed to enter active low power. This is neglected 2114 // here and could be added in the future. 2115 if (pwr_state == PWR_ACT_PDN) { 2116 schedulePowerEvent(pwr_state, tick); 2117 // push command to DRAMPower 2118 cmdList.push_back(Command(MemCommand::PDN_F_ACT, 0, tick)); 2119 DPRINTF(DRAMPower, "%llu,PDN_F_ACT,0,%d\n", divCeil(tick, 2120 memory.tCK) - memory.timeStampOffset, rank); 2121 } else if (pwr_state == PWR_PRE_PDN) { 2122 // if low power state is precharge low, schedule to precharge low 2123 // power state. In reality tCKE is needed to enter active low power. 2124 // This is neglected here. 2125 schedulePowerEvent(pwr_state, tick); 2126 //push Command to DRAMPower 2127 cmdList.push_back(Command(MemCommand::PDN_F_PRE, 0, tick)); 2128 DPRINTF(DRAMPower, "%llu,PDN_F_PRE,0,%d\n", divCeil(tick, 2129 memory.tCK) - memory.timeStampOffset, rank); 2130 } else if (pwr_state == PWR_REF) { 2131 // if a refresh just occurred 2132 // transition to PRE_PDN now that all banks are closed 2133 // precharge power down requires tCKE to enter. For simplicity 2134 // this is not considered. 2135 schedulePowerEvent(PWR_PRE_PDN, tick); 2136 //push Command to DRAMPower 2137 cmdList.push_back(Command(MemCommand::PDN_F_PRE, 0, tick)); 2138 DPRINTF(DRAMPower, "%llu,PDN_F_PRE,0,%d\n", divCeil(tick, 2139 memory.tCK) - memory.timeStampOffset, rank); 2140 } else if (pwr_state == PWR_SREF) { 2141 // should only enter SREF after PRE-PD wakeup to do a refresh 2142 assert(pwrStatePostRefresh == PWR_PRE_PDN); 2143 // self refresh requires time tCKESR to enter. For simplicity, 2144 // this is not considered. 2145 schedulePowerEvent(PWR_SREF, tick); 2146 // push Command to DRAMPower 2147 cmdList.push_back(Command(MemCommand::SREN, 0, tick)); 2148 DPRINTF(DRAMPower, "%llu,SREN,0,%d\n", divCeil(tick, 2149 memory.tCK) - memory.timeStampOffset, rank); 2150 } 2151 // Ensure that we don't power-down and back up in same tick 2152 // Once we commit to PD entry, do it and wait for at least 1tCK 2153 // This could be replaced with tCKE if/when that is added to the model 2154 wakeUpAllowedAt = tick + memory.tCK; 2155 2156 // Transitioning to a low power state, set flag 2157 inLowPowerState = true; 2158} 2159 2160void 2161DRAMCtrl::Rank::scheduleWakeUpEvent(Tick exit_delay) 2162{ 2163 Tick wake_up_tick = std::max(curTick(), wakeUpAllowedAt); 2164 2165 DPRINTF(DRAMState, "Scheduling wake-up for rank %d at tick %d\n", 2166 rank, wake_up_tick); 2167 2168 // if waking for refresh, hold previous state 2169 // else reset state back to IDLE 2170 if (refreshState == REF_PD_EXIT) { 2171 pwrStatePostRefresh = pwrState; 2172 } else { 2173 // don't automatically transition back to LP state after next REF 2174 pwrStatePostRefresh = PWR_IDLE; 2175 } 2176 2177 // schedule wake-up with event to ensure entry has completed before 2178 // we try to wake-up 2179 schedule(wakeUpEvent, wake_up_tick); 2180 2181 for (auto &b : banks) { 2182 // respect both causality and any existing bank 2183 // constraints, some banks could already have a 2184 // (auto) precharge scheduled 2185 b.wrAllowedAt = std::max(wake_up_tick + exit_delay, b.wrAllowedAt); 2186 b.rdAllowedAt = std::max(wake_up_tick + exit_delay, b.rdAllowedAt); 2187 b.preAllowedAt = std::max(wake_up_tick + exit_delay, b.preAllowedAt); 2188 b.actAllowedAt = std::max(wake_up_tick + exit_delay, b.actAllowedAt); 2189 } 2190 // Transitioning out of low power state, clear flag 2191 inLowPowerState = false; 2192 2193 // push to DRAMPower 2194 // use pwrStateTrans for cases where we have a power event scheduled 2195 // to enter low power that has not yet been processed 2196 if (pwrStateTrans == PWR_ACT_PDN) { 2197 cmdList.push_back(Command(MemCommand::PUP_ACT, 0, wake_up_tick)); 2198 DPRINTF(DRAMPower, "%llu,PUP_ACT,0,%d\n", divCeil(wake_up_tick, 2199 memory.tCK) - memory.timeStampOffset, rank); 2200 2201 } else if (pwrStateTrans == PWR_PRE_PDN) { 2202 cmdList.push_back(Command(MemCommand::PUP_PRE, 0, wake_up_tick)); 2203 DPRINTF(DRAMPower, "%llu,PUP_PRE,0,%d\n", divCeil(wake_up_tick, 2204 memory.tCK) - memory.timeStampOffset, rank); 2205 } else if (pwrStateTrans == PWR_SREF) { 2206 cmdList.push_back(Command(MemCommand::SREX, 0, wake_up_tick)); 2207 DPRINTF(DRAMPower, "%llu,SREX,0,%d\n", divCeil(wake_up_tick, 2208 memory.tCK) - memory.timeStampOffset, rank); 2209 } 2210} 2211 2212void 2213DRAMCtrl::Rank::processWakeUpEvent() 2214{ 2215 // Should be in a power-down or self-refresh state 2216 assert((pwrState == PWR_ACT_PDN) || (pwrState == PWR_PRE_PDN) || 2217 (pwrState == PWR_SREF)); 2218 2219 // Check current state to determine transition state 2220 if (pwrState == PWR_ACT_PDN) { 2221 // banks still open, transition to PWR_ACT 2222 schedulePowerEvent(PWR_ACT, curTick()); 2223 } else { 2224 // transitioning from a precharge power-down or self-refresh state 2225 // banks are closed - transition to PWR_IDLE 2226 schedulePowerEvent(PWR_IDLE, curTick()); 2227 } 2228} 2229 2230void 2231DRAMCtrl::Rank::processPowerEvent() 2232{ 2233 assert(curTick() >= pwrStateTick); 2234 // remember where we were, and for how long 2235 Tick duration = curTick() - pwrStateTick; 2236 PowerState prev_state = pwrState; 2237 2238 // update the accounting 2239 pwrStateTime[prev_state] += duration; 2240 2241 // track to total idle time 2242 if ((prev_state == PWR_PRE_PDN) || (prev_state == PWR_ACT_PDN) || 2243 (prev_state == PWR_SREF)) { 2244 totalIdleTime += duration; 2245 } 2246 2247 pwrState = pwrStateTrans; 2248 pwrStateTick = curTick(); 2249 2250 // if rank was refreshing, make sure to start scheduling requests again 2251 if (prev_state == PWR_REF) { 2252 // bus IDLED prior to REF 2253 // counter should be one for refresh command only 2254 assert(outstandingEvents == 1); 2255 // REF complete, decrement count and go back to IDLE 2256 --outstandingEvents; 2257 refreshState = REF_IDLE; 2258 2259 DPRINTF(DRAMState, "Was refreshing for %llu ticks\n", duration); 2260 // if moving back to power-down after refresh 2261 if (pwrState != PWR_IDLE) { 2262 assert(pwrState == PWR_PRE_PDN); 2263 DPRINTF(DRAMState, "Switching to power down state after refreshing" 2264 " rank %d at %llu tick\n", rank, curTick()); 2265 } 2266 2267 // completed refresh event, ensure next request is scheduled 2268 if (!memory.nextReqEvent.scheduled()) { 2269 DPRINTF(DRAM, "Scheduling next request after refreshing" 2270 " rank %d\n", rank); 2271 schedule(memory.nextReqEvent, curTick()); 2272 } 2273 } 2274 2275 if ((pwrState == PWR_ACT) && (refreshState == REF_PD_EXIT)) { 2276 // have exited ACT PD 2277 assert(prev_state == PWR_ACT_PDN); 2278 2279 // go back to REF event and close banks 2280 refreshState = REF_PRE; 2281 schedule(refreshEvent, curTick()); 2282 } else if (pwrState == PWR_IDLE) { 2283 DPRINTF(DRAMState, "All banks precharged\n"); 2284 if (prev_state == PWR_SREF) { 2285 // set refresh state to REF_SREF_EXIT, ensuring inRefIdleState 2286 // continues to return false during tXS after SREF exit 2287 // Schedule a refresh which kicks things back into action 2288 // when it finishes 2289 refreshState = REF_SREF_EXIT; 2290 schedule(refreshEvent, curTick() + memory.tXS); 2291 } else { 2292 // if we have a pending refresh, and are now moving to 2293 // the idle state, directly transition to, or schedule refresh 2294 if ((refreshState == REF_PRE) || (refreshState == REF_PD_EXIT)) { 2295 // ensure refresh is restarted only after final PRE command. 2296 // do not restart refresh if controller is in an intermediate 2297 // state, after PRE_PDN exit, when banks are IDLE but an 2298 // ACT is scheduled. 2299 if (!activateEvent.scheduled()) { 2300 // there should be nothing waiting at this point 2301 assert(!powerEvent.scheduled()); 2302 if (refreshState == REF_PD_EXIT) { 2303 // exiting PRE PD, will be in IDLE until tXP expires 2304 // and then should transition to PWR_REF state 2305 assert(prev_state == PWR_PRE_PDN); 2306 schedulePowerEvent(PWR_REF, curTick() + memory.tXP); 2307 } else if (refreshState == REF_PRE) { 2308 // can directly move to PWR_REF state and proceed below 2309 pwrState = PWR_REF; 2310 } 2311 } else { 2312 // must have PRE scheduled to transition back to IDLE 2313 // and re-kick off refresh 2314 assert(prechargeEvent.scheduled()); 2315 } 2316 } 2317 } 2318 } 2319 2320 // transition to the refresh state and re-start refresh process 2321 // refresh state machine will schedule the next power state transition 2322 if (pwrState == PWR_REF) { 2323 // completed final PRE for refresh or exiting power-down 2324 assert(refreshState == REF_PRE || refreshState == REF_PD_EXIT); 2325 2326 // exited PRE PD for refresh, with no pending commands 2327 // bypass auto-refresh and go straight to SREF, where memory 2328 // will issue refresh immediately upon entry 2329 if (pwrStatePostRefresh == PWR_PRE_PDN && isQueueEmpty() && 2330 (memory.drainState() != DrainState::Draining) && 2331 (memory.drainState() != DrainState::Drained)) { 2332 DPRINTF(DRAMState, "Rank %d bypassing refresh and transitioning " 2333 "to self refresh at %11u tick\n", rank, curTick()); 2334 powerDownSleep(PWR_SREF, curTick()); 2335 2336 // Since refresh was bypassed, remove event by decrementing count 2337 assert(outstandingEvents == 1); 2338 --outstandingEvents; 2339 2340 // reset state back to IDLE temporarily until SREF is entered 2341 pwrState = PWR_IDLE; 2342 2343 // Not bypassing refresh for SREF entry 2344 } else { 2345 DPRINTF(DRAMState, "Refreshing\n"); 2346 2347 // there should be nothing waiting at this point 2348 assert(!powerEvent.scheduled()); 2349 2350 // kick the refresh event loop into action again, and that 2351 // in turn will schedule a transition to the idle power 2352 // state once the refresh is done 2353 schedule(refreshEvent, curTick()); 2354 2355 // Banks transitioned to IDLE, start REF 2356 refreshState = REF_START; 2357 } 2358 } 2359 2360} 2361 2362void 2363DRAMCtrl::Rank::updatePowerStats() 2364{ 2365 // All commands up to refresh have completed 2366 // flush cmdList to DRAMPower 2367 flushCmdList(); 2368 2369 // Call the function that calculates window energy at intermediate update 2370 // events like at refresh, stats dump as well as at simulation exit. 2371 // Window starts at the last time the calcWindowEnergy function was called 2372 // and is upto current time. 2373 power.powerlib.calcWindowEnergy(divCeil(curTick(), memory.tCK) - 2374 memory.timeStampOffset); 2375 2376 // Get the energy from DRAMPower 2377 Data::MemoryPowerModel::Energy energy = power.powerlib.getEnergy(); 2378 2379 // The energy components inside the power lib are calculated over 2380 // the window so accumulate into the corresponding gem5 stat 2381 actEnergy += energy.act_energy * memory.devicesPerRank; 2382 preEnergy += energy.pre_energy * memory.devicesPerRank; 2383 readEnergy += energy.read_energy * memory.devicesPerRank; 2384 writeEnergy += energy.write_energy * memory.devicesPerRank; 2385 refreshEnergy += energy.ref_energy * memory.devicesPerRank; 2386 actBackEnergy += energy.act_stdby_energy * memory.devicesPerRank; 2387 preBackEnergy += energy.pre_stdby_energy * memory.devicesPerRank; 2388 actPowerDownEnergy += energy.f_act_pd_energy * memory.devicesPerRank; 2389 prePowerDownEnergy += energy.f_pre_pd_energy * memory.devicesPerRank; 2390 selfRefreshEnergy += energy.sref_energy * memory.devicesPerRank; 2391 2392 // Accumulate window energy into the total energy. 2393 totalEnergy += energy.window_energy * memory.devicesPerRank; 2394 // Average power must not be accumulated but calculated over the time 2395 // since last stats reset. SimClock::Frequency is tick period not tick 2396 // frequency. 2397 // energy (pJ) 1e-9 2398 // power (mW) = ----------- * ---------- 2399 // time (tick) tick_frequency 2400 averagePower = (totalEnergy.value() / 2401 (curTick() - memory.lastStatsResetTick)) * 2402 (SimClock::Frequency / 1000000000.0); 2403} 2404 2405void 2406DRAMCtrl::Rank::computeStats() 2407{ 2408 DPRINTF(DRAM,"Computing stats due to a dump callback\n"); 2409 2410 // Update the stats 2411 updatePowerStats(); 2412 2413 // final update of power state times 2414 pwrStateTime[pwrState] += (curTick() - pwrStateTick); 2415 pwrStateTick = curTick(); 2416 2417} 2418 2419void 2420DRAMCtrl::Rank::resetStats() { 2421 // The only way to clear the counters in DRAMPower is to call 2422 // calcWindowEnergy function as that then calls clearCounters. The 2423 // clearCounters method itself is private. 2424 power.powerlib.calcWindowEnergy(divCeil(curTick(), memory.tCK) - 2425 memory.timeStampOffset); 2426 2427} 2428 2429void 2430DRAMCtrl::Rank::regStats() 2431{ 2432 pwrStateTime 2433 .init(6) 2434 .name(name() + ".memoryStateTime") 2435 .desc("Time in different power states"); 2436 pwrStateTime.subname(0, "IDLE"); 2437 pwrStateTime.subname(1, "REF"); 2438 pwrStateTime.subname(2, "SREF"); 2439 pwrStateTime.subname(3, "PRE_PDN"); 2440 pwrStateTime.subname(4, "ACT"); 2441 pwrStateTime.subname(5, "ACT_PDN"); 2442 2443 actEnergy 2444 .name(name() + ".actEnergy") 2445 .desc("Energy for activate commands per rank (pJ)"); 2446 2447 preEnergy 2448 .name(name() + ".preEnergy") 2449 .desc("Energy for precharge commands per rank (pJ)"); 2450 2451 readEnergy 2452 .name(name() + ".readEnergy") 2453 .desc("Energy for read commands per rank (pJ)"); 2454 2455 writeEnergy 2456 .name(name() + ".writeEnergy") 2457 .desc("Energy for write commands per rank (pJ)"); 2458 2459 refreshEnergy 2460 .name(name() + ".refreshEnergy") 2461 .desc("Energy for refresh commands per rank (pJ)"); 2462 2463 actBackEnergy 2464 .name(name() + ".actBackEnergy") 2465 .desc("Energy for active background per rank (pJ)"); 2466 2467 preBackEnergy 2468 .name(name() + ".preBackEnergy") 2469 .desc("Energy for precharge background per rank (pJ)"); 2470 2471 actPowerDownEnergy 2472 .name(name() + ".actPowerDownEnergy") 2473 .desc("Energy for active power-down per rank (pJ)"); 2474 2475 prePowerDownEnergy 2476 .name(name() + ".prePowerDownEnergy") 2477 .desc("Energy for precharge power-down per rank (pJ)"); 2478 2479 selfRefreshEnergy 2480 .name(name() + ".selfRefreshEnergy") 2481 .desc("Energy for self refresh per rank (pJ)"); 2482 2483 totalEnergy 2484 .name(name() + ".totalEnergy") 2485 .desc("Total energy per rank (pJ)"); 2486 2487 averagePower 2488 .name(name() + ".averagePower") 2489 .desc("Core power per rank (mW)"); 2490 2491 totalIdleTime 2492 .name(name() + ".totalIdleTime") 2493 .desc("Total Idle time Per DRAM Rank"); 2494 2495 Stats::registerDumpCallback(new RankDumpCallback(this)); 2496 Stats::registerResetCallback(new RankResetCallback(this)); 2497} 2498void 2499DRAMCtrl::regStats() 2500{ 2501 using namespace Stats; 2502
|
2389 AbstractMemory::regStats();
| 2503 MemCtrl::regStats();
|
2390 2391 for (auto r : ranks) { 2392 r->regStats(); 2393 } 2394 2395 registerResetCallback(new MemResetCallback(this)); 2396 2397 readReqs 2398 .name(name() + ".readReqs") 2399 .desc("Number of read requests accepted"); 2400 2401 writeReqs 2402 .name(name() + ".writeReqs") 2403 .desc("Number of write requests accepted"); 2404 2405 readBursts 2406 .name(name() + ".readBursts") 2407 .desc("Number of DRAM read bursts, " 2408 "including those serviced by the write queue"); 2409 2410 writeBursts 2411 .name(name() + ".writeBursts") 2412 .desc("Number of DRAM write bursts, " 2413 "including those merged in the write queue"); 2414 2415 servicedByWrQ 2416 .name(name() + ".servicedByWrQ") 2417 .desc("Number of DRAM read bursts serviced by the write queue"); 2418 2419 mergedWrBursts 2420 .name(name() + ".mergedWrBursts") 2421 .desc("Number of DRAM write bursts merged with an existing one"); 2422 2423 neitherReadNorWrite 2424 .name(name() + ".neitherReadNorWriteReqs") 2425 .desc("Number of requests that are neither read nor write"); 2426 2427 perBankRdBursts 2428 .init(banksPerRank * ranksPerChannel) 2429 .name(name() + ".perBankRdBursts") 2430 .desc("Per bank write bursts"); 2431 2432 perBankWrBursts 2433 .init(banksPerRank * ranksPerChannel) 2434 .name(name() + ".perBankWrBursts") 2435 .desc("Per bank write bursts"); 2436 2437 avgRdQLen 2438 .name(name() + ".avgRdQLen") 2439 .desc("Average read queue length when enqueuing") 2440 .precision(2); 2441 2442 avgWrQLen 2443 .name(name() + ".avgWrQLen") 2444 .desc("Average write queue length when enqueuing") 2445 .precision(2); 2446 2447 totQLat 2448 .name(name() + ".totQLat") 2449 .desc("Total ticks spent queuing"); 2450 2451 totBusLat 2452 .name(name() + ".totBusLat") 2453 .desc("Total ticks spent in databus transfers"); 2454 2455 totMemAccLat 2456 .name(name() + ".totMemAccLat") 2457 .desc("Total ticks spent from burst creation until serviced " 2458 "by the DRAM"); 2459 2460 avgQLat 2461 .name(name() + ".avgQLat") 2462 .desc("Average queueing delay per DRAM burst") 2463 .precision(2); 2464 2465 avgQLat = totQLat / (readBursts - servicedByWrQ); 2466 2467 avgBusLat 2468 .name(name() + ".avgBusLat") 2469 .desc("Average bus latency per DRAM burst") 2470 .precision(2); 2471 2472 avgBusLat = totBusLat / (readBursts - servicedByWrQ); 2473 2474 avgMemAccLat 2475 .name(name() + ".avgMemAccLat") 2476 .desc("Average memory access latency per DRAM burst") 2477 .precision(2); 2478 2479 avgMemAccLat = totMemAccLat / (readBursts - servicedByWrQ); 2480 2481 numRdRetry 2482 .name(name() + ".numRdRetry") 2483 .desc("Number of times read queue was full causing retry"); 2484 2485 numWrRetry 2486 .name(name() + ".numWrRetry") 2487 .desc("Number of times write queue was full causing retry"); 2488 2489 readRowHits 2490 .name(name() + ".readRowHits") 2491 .desc("Number of row buffer hits during reads"); 2492 2493 writeRowHits 2494 .name(name() + ".writeRowHits") 2495 .desc("Number of row buffer hits during writes"); 2496 2497 readRowHitRate 2498 .name(name() + ".readRowHitRate") 2499 .desc("Row buffer hit rate for reads") 2500 .precision(2); 2501 2502 readRowHitRate = (readRowHits / (readBursts - servicedByWrQ)) * 100; 2503 2504 writeRowHitRate 2505 .name(name() + ".writeRowHitRate") 2506 .desc("Row buffer hit rate for writes") 2507 .precision(2); 2508 2509 writeRowHitRate = (writeRowHits / (writeBursts - mergedWrBursts)) * 100; 2510 2511 readPktSize 2512 .init(ceilLog2(burstSize) + 1) 2513 .name(name() + ".readPktSize") 2514 .desc("Read request sizes (log2)"); 2515 2516 writePktSize 2517 .init(ceilLog2(burstSize) + 1) 2518 .name(name() + ".writePktSize") 2519 .desc("Write request sizes (log2)"); 2520 2521 rdQLenPdf 2522 .init(readBufferSize) 2523 .name(name() + ".rdQLenPdf") 2524 .desc("What read queue length does an incoming req see"); 2525 2526 wrQLenPdf 2527 .init(writeBufferSize) 2528 .name(name() + ".wrQLenPdf") 2529 .desc("What write queue length does an incoming req see"); 2530 2531 bytesPerActivate
| 2504 2505 for (auto r : ranks) { 2506 r->regStats(); 2507 } 2508 2509 registerResetCallback(new MemResetCallback(this)); 2510 2511 readReqs 2512 .name(name() + ".readReqs") 2513 .desc("Number of read requests accepted"); 2514 2515 writeReqs 2516 .name(name() + ".writeReqs") 2517 .desc("Number of write requests accepted"); 2518 2519 readBursts 2520 .name(name() + ".readBursts") 2521 .desc("Number of DRAM read bursts, " 2522 "including those serviced by the write queue"); 2523 2524 writeBursts 2525 .name(name() + ".writeBursts") 2526 .desc("Number of DRAM write bursts, " 2527 "including those merged in the write queue"); 2528 2529 servicedByWrQ 2530 .name(name() + ".servicedByWrQ") 2531 .desc("Number of DRAM read bursts serviced by the write queue"); 2532 2533 mergedWrBursts 2534 .name(name() + ".mergedWrBursts") 2535 .desc("Number of DRAM write bursts merged with an existing one"); 2536 2537 neitherReadNorWrite 2538 .name(name() + ".neitherReadNorWriteReqs") 2539 .desc("Number of requests that are neither read nor write"); 2540 2541 perBankRdBursts 2542 .init(banksPerRank * ranksPerChannel) 2543 .name(name() + ".perBankRdBursts") 2544 .desc("Per bank write bursts"); 2545 2546 perBankWrBursts 2547 .init(banksPerRank * ranksPerChannel) 2548 .name(name() + ".perBankWrBursts") 2549 .desc("Per bank write bursts"); 2550 2551 avgRdQLen 2552 .name(name() + ".avgRdQLen") 2553 .desc("Average read queue length when enqueuing") 2554 .precision(2); 2555 2556 avgWrQLen 2557 .name(name() + ".avgWrQLen") 2558 .desc("Average write queue length when enqueuing") 2559 .precision(2); 2560 2561 totQLat 2562 .name(name() + ".totQLat") 2563 .desc("Total ticks spent queuing"); 2564 2565 totBusLat 2566 .name(name() + ".totBusLat") 2567 .desc("Total ticks spent in databus transfers"); 2568 2569 totMemAccLat 2570 .name(name() + ".totMemAccLat") 2571 .desc("Total ticks spent from burst creation until serviced " 2572 "by the DRAM"); 2573 2574 avgQLat 2575 .name(name() + ".avgQLat") 2576 .desc("Average queueing delay per DRAM burst") 2577 .precision(2); 2578 2579 avgQLat = totQLat / (readBursts - servicedByWrQ); 2580 2581 avgBusLat 2582 .name(name() + ".avgBusLat") 2583 .desc("Average bus latency per DRAM burst") 2584 .precision(2); 2585 2586 avgBusLat = totBusLat / (readBursts - servicedByWrQ); 2587 2588 avgMemAccLat 2589 .name(name() + ".avgMemAccLat") 2590 .desc("Average memory access latency per DRAM burst") 2591 .precision(2); 2592 2593 avgMemAccLat = totMemAccLat / (readBursts - servicedByWrQ); 2594 2595 numRdRetry 2596 .name(name() + ".numRdRetry") 2597 .desc("Number of times read queue was full causing retry"); 2598 2599 numWrRetry 2600 .name(name() + ".numWrRetry") 2601 .desc("Number of times write queue was full causing retry"); 2602 2603 readRowHits 2604 .name(name() + ".readRowHits") 2605 .desc("Number of row buffer hits during reads"); 2606 2607 writeRowHits 2608 .name(name() + ".writeRowHits") 2609 .desc("Number of row buffer hits during writes"); 2610 2611 readRowHitRate 2612 .name(name() + ".readRowHitRate") 2613 .desc("Row buffer hit rate for reads") 2614 .precision(2); 2615 2616 readRowHitRate = (readRowHits / (readBursts - servicedByWrQ)) * 100; 2617 2618 writeRowHitRate 2619 .name(name() + ".writeRowHitRate") 2620 .desc("Row buffer hit rate for writes") 2621 .precision(2); 2622 2623 writeRowHitRate = (writeRowHits / (writeBursts - mergedWrBursts)) * 100; 2624 2625 readPktSize 2626 .init(ceilLog2(burstSize) + 1) 2627 .name(name() + ".readPktSize") 2628 .desc("Read request sizes (log2)"); 2629 2630 writePktSize 2631 .init(ceilLog2(burstSize) + 1) 2632 .name(name() + ".writePktSize") 2633 .desc("Write request sizes (log2)"); 2634 2635 rdQLenPdf 2636 .init(readBufferSize) 2637 .name(name() + ".rdQLenPdf") 2638 .desc("What read queue length does an incoming req see"); 2639 2640 wrQLenPdf 2641 .init(writeBufferSize) 2642 .name(name() + ".wrQLenPdf") 2643 .desc("What write queue length does an incoming req see"); 2644 2645 bytesPerActivate
|
2532 .init(maxAccessesPerRow)
| 2646 .init(maxAccessesPerRow ? maxAccessesPerRow : rowBufferSize)
|
2533 .name(name() + ".bytesPerActivate") 2534 .desc("Bytes accessed per row activation") 2535 .flags(nozero); 2536 2537 rdPerTurnAround 2538 .init(readBufferSize) 2539 .name(name() + ".rdPerTurnAround") 2540 .desc("Reads before turning the bus around for writes") 2541 .flags(nozero); 2542 2543 wrPerTurnAround 2544 .init(writeBufferSize) 2545 .name(name() + ".wrPerTurnAround") 2546 .desc("Writes before turning the bus around for reads") 2547 .flags(nozero); 2548 2549 bytesReadDRAM 2550 .name(name() + ".bytesReadDRAM") 2551 .desc("Total number of bytes read from DRAM"); 2552 2553 bytesReadWrQ 2554 .name(name() + ".bytesReadWrQ") 2555 .desc("Total number of bytes read from write queue"); 2556 2557 bytesWritten 2558 .name(name() + ".bytesWritten") 2559 .desc("Total number of bytes written to DRAM"); 2560 2561 bytesReadSys 2562 .name(name() + ".bytesReadSys") 2563 .desc("Total read bytes from the system interface side"); 2564 2565 bytesWrittenSys 2566 .name(name() + ".bytesWrittenSys") 2567 .desc("Total written bytes from the system interface side"); 2568 2569 avgRdBW 2570 .name(name() + ".avgRdBW") 2571 .desc("Average DRAM read bandwidth in MiByte/s") 2572 .precision(2); 2573 2574 avgRdBW = (bytesReadDRAM / 1000000) / simSeconds; 2575 2576 avgWrBW 2577 .name(name() + ".avgWrBW") 2578 .desc("Average achieved write bandwidth in MiByte/s") 2579 .precision(2); 2580 2581 avgWrBW = (bytesWritten / 1000000) / simSeconds; 2582 2583 avgRdBWSys 2584 .name(name() + ".avgRdBWSys") 2585 .desc("Average system read bandwidth in MiByte/s") 2586 .precision(2); 2587 2588 avgRdBWSys = (bytesReadSys / 1000000) / simSeconds; 2589 2590 avgWrBWSys 2591 .name(name() + ".avgWrBWSys") 2592 .desc("Average system write bandwidth in MiByte/s") 2593 .precision(2); 2594 2595 avgWrBWSys = (bytesWrittenSys / 1000000) / simSeconds; 2596 2597 peakBW 2598 .name(name() + ".peakBW") 2599 .desc("Theoretical peak bandwidth in MiByte/s") 2600 .precision(2); 2601 2602 peakBW = (SimClock::Frequency / tBURST) * burstSize / 1000000; 2603 2604 busUtil 2605 .name(name() + ".busUtil") 2606 .desc("Data bus utilization in percentage") 2607 .precision(2); 2608 busUtil = (avgRdBW + avgWrBW) / peakBW * 100; 2609 2610 totGap 2611 .name(name() + ".totGap") 2612 .desc("Total gap between requests"); 2613 2614 avgGap 2615 .name(name() + ".avgGap") 2616 .desc("Average gap between requests") 2617 .precision(2); 2618 2619 avgGap = totGap / (readReqs + writeReqs); 2620 2621 // Stats for DRAM Power calculation based on Micron datasheet 2622 busUtilRead 2623 .name(name() + ".busUtilRead") 2624 .desc("Data bus utilization in percentage for reads") 2625 .precision(2); 2626 2627 busUtilRead = avgRdBW / peakBW * 100; 2628 2629 busUtilWrite 2630 .name(name() + ".busUtilWrite") 2631 .desc("Data bus utilization in percentage for writes") 2632 .precision(2); 2633 2634 busUtilWrite = avgWrBW / peakBW * 100; 2635 2636 pageHitRate 2637 .name(name() + ".pageHitRate") 2638 .desc("Row buffer hit rate, read and write combined") 2639 .precision(2); 2640 2641 pageHitRate = (writeRowHits + readRowHits) / 2642 (writeBursts - mergedWrBursts + readBursts - servicedByWrQ) * 100;
| 2647 .name(name() + ".bytesPerActivate") 2648 .desc("Bytes accessed per row activation") 2649 .flags(nozero); 2650 2651 rdPerTurnAround 2652 .init(readBufferSize) 2653 .name(name() + ".rdPerTurnAround") 2654 .desc("Reads before turning the bus around for writes") 2655 .flags(nozero); 2656 2657 wrPerTurnAround 2658 .init(writeBufferSize) 2659 .name(name() + ".wrPerTurnAround") 2660 .desc("Writes before turning the bus around for reads") 2661 .flags(nozero); 2662 2663 bytesReadDRAM 2664 .name(name() + ".bytesReadDRAM") 2665 .desc("Total number of bytes read from DRAM"); 2666 2667 bytesReadWrQ 2668 .name(name() + ".bytesReadWrQ") 2669 .desc("Total number of bytes read from write queue"); 2670 2671 bytesWritten 2672 .name(name() + ".bytesWritten") 2673 .desc("Total number of bytes written to DRAM"); 2674 2675 bytesReadSys 2676 .name(name() + ".bytesReadSys") 2677 .desc("Total read bytes from the system interface side"); 2678 2679 bytesWrittenSys 2680 .name(name() + ".bytesWrittenSys") 2681 .desc("Total written bytes from the system interface side"); 2682 2683 avgRdBW 2684 .name(name() + ".avgRdBW") 2685 .desc("Average DRAM read bandwidth in MiByte/s") 2686 .precision(2); 2687 2688 avgRdBW = (bytesReadDRAM / 1000000) / simSeconds; 2689 2690 avgWrBW 2691 .name(name() + ".avgWrBW") 2692 .desc("Average achieved write bandwidth in MiByte/s") 2693 .precision(2); 2694 2695 avgWrBW = (bytesWritten / 1000000) / simSeconds; 2696 2697 avgRdBWSys 2698 .name(name() + ".avgRdBWSys") 2699 .desc("Average system read bandwidth in MiByte/s") 2700 .precision(2); 2701 2702 avgRdBWSys = (bytesReadSys / 1000000) / simSeconds; 2703 2704 avgWrBWSys 2705 .name(name() + ".avgWrBWSys") 2706 .desc("Average system write bandwidth in MiByte/s") 2707 .precision(2); 2708 2709 avgWrBWSys = (bytesWrittenSys / 1000000) / simSeconds; 2710 2711 peakBW 2712 .name(name() + ".peakBW") 2713 .desc("Theoretical peak bandwidth in MiByte/s") 2714 .precision(2); 2715 2716 peakBW = (SimClock::Frequency / tBURST) * burstSize / 1000000; 2717 2718 busUtil 2719 .name(name() + ".busUtil") 2720 .desc("Data bus utilization in percentage") 2721 .precision(2); 2722 busUtil = (avgRdBW + avgWrBW) / peakBW * 100; 2723 2724 totGap 2725 .name(name() + ".totGap") 2726 .desc("Total gap between requests"); 2727 2728 avgGap 2729 .name(name() + ".avgGap") 2730 .desc("Average gap between requests") 2731 .precision(2); 2732 2733 avgGap = totGap / (readReqs + writeReqs); 2734 2735 // Stats for DRAM Power calculation based on Micron datasheet 2736 busUtilRead 2737 .name(name() + ".busUtilRead") 2738 .desc("Data bus utilization in percentage for reads") 2739 .precision(2); 2740 2741 busUtilRead = avgRdBW / peakBW * 100; 2742 2743 busUtilWrite 2744 .name(name() + ".busUtilWrite") 2745 .desc("Data bus utilization in percentage for writes") 2746 .precision(2); 2747 2748 busUtilWrite = avgWrBW / peakBW * 100; 2749 2750 pageHitRate 2751 .name(name() + ".pageHitRate") 2752 .desc("Row buffer hit rate, read and write combined") 2753 .precision(2); 2754 2755 pageHitRate = (writeRowHits + readRowHits) / 2756 (writeBursts - mergedWrBursts + readBursts - servicedByWrQ) * 100;
|
| 2757 2758 // per-master bytes read and written to memory 2759 masterReadBytes 2760 .init(_system->maxMasters()) 2761 .name(name() + ".masterReadBytes") 2762 .desc("Per-master bytes read from memory") 2763 .flags(nozero | nonan); 2764 2765 masterWriteBytes 2766 .init(_system->maxMasters()) 2767 .name(name() + ".masterWriteBytes") 2768 .desc("Per-master bytes write to memory") 2769 .flags(nozero | nonan); 2770 2771 // per-master bytes read and written to memory rate 2772 masterReadRate.name(name() + ".masterReadRate") 2773 .desc("Per-master bytes read from memory rate (Bytes/sec)") 2774 .flags(nozero | nonan) 2775 .precision(12); 2776 2777 masterReadRate = masterReadBytes/simSeconds; 2778 2779 masterWriteRate 2780 .name(name() + ".masterWriteRate") 2781 .desc("Per-master bytes write to memory rate (Bytes/sec)") 2782 .flags(nozero | nonan) 2783 .precision(12); 2784 2785 masterWriteRate = masterWriteBytes/simSeconds; 2786 2787 masterReadAccesses 2788 .init(_system->maxMasters()) 2789 .name(name() + ".masterReadAccesses") 2790 .desc("Per-master read serviced memory accesses") 2791 .flags(nozero); 2792 2793 masterWriteAccesses 2794 .init(_system->maxMasters()) 2795 .name(name() + ".masterWriteAccesses") 2796 .desc("Per-master write serviced memory accesses") 2797 .flags(nozero); 2798 2799 2800 masterReadTotalLat 2801 .init(_system->maxMasters()) 2802 .name(name() + ".masterReadTotalLat") 2803 .desc("Per-master read total memory access latency") 2804 .flags(nozero | nonan); 2805 2806 masterReadAvgLat.name(name() + ".masterReadAvgLat") 2807 .desc("Per-master read average memory access latency") 2808 .flags(nonan) 2809 .precision(2); 2810 2811 masterReadAvgLat = masterReadTotalLat/masterReadAccesses; 2812 2813 masterWriteTotalLat 2814 .init(_system->maxMasters()) 2815 .name(name() + ".masterWriteTotalLat") 2816 .desc("Per-master write total memory access latency") 2817 .flags(nozero | nonan); 2818 2819 masterWriteAvgLat.name(name() + ".masterWriteAvgLat") 2820 .desc("Per-master write average memory access latency") 2821 .flags(nonan) 2822 .precision(2); 2823 2824 masterWriteAvgLat = masterWriteTotalLat/masterWriteAccesses; 2825 2826 for (int i = 0; i < _system->maxMasters(); i++) { 2827 const std::string master = _system->getMasterName(i); 2828 masterReadBytes.subname(i, master); 2829 masterReadRate.subname(i, master); 2830 masterWriteBytes.subname(i, master); 2831 masterWriteRate.subname(i, master); 2832 masterReadAccesses.subname(i, master); 2833 masterWriteAccesses.subname(i, master); 2834 masterReadTotalLat.subname(i, master); 2835 masterReadAvgLat.subname(i, master); 2836 masterWriteTotalLat.subname(i, master); 2837 masterWriteAvgLat.subname(i, master); 2838 }
|
2643} 2644 2645void 2646DRAMCtrl::recvFunctional(PacketPtr pkt) 2647{ 2648 // rely on the abstract memory 2649 functionalAccess(pkt); 2650} 2651 2652BaseSlavePort& 2653DRAMCtrl::getSlavePort(const string &if_name, PortID idx) 2654{ 2655 if (if_name != "port") { 2656 return MemObject::getSlavePort(if_name, idx); 2657 } else { 2658 return port; 2659 } 2660} 2661 2662DrainState 2663DRAMCtrl::drain() 2664{ 2665 // if there is anything in any of our internal queues, keep track 2666 // of that as well
| 2839} 2840 2841void 2842DRAMCtrl::recvFunctional(PacketPtr pkt) 2843{ 2844 // rely on the abstract memory 2845 functionalAccess(pkt); 2846} 2847 2848BaseSlavePort& 2849DRAMCtrl::getSlavePort(const string &if_name, PortID idx) 2850{ 2851 if (if_name != "port") { 2852 return MemObject::getSlavePort(if_name, idx); 2853 } else { 2854 return port; 2855 } 2856} 2857 2858DrainState 2859DRAMCtrl::drain() 2860{ 2861 // if there is anything in any of our internal queues, keep track 2862 // of that as well
|
2667 if (!(writeQueue.empty() && readQueue.empty() && respQueue.empty() &&
| 2863 if (!(!totalWriteQueueSize && !totalReadQueueSize && respQueue.empty() &&
|
2668 allRanksDrained())) { 2669 2670 DPRINTF(Drain, "DRAM controller not drained, write: %d, read: %d,"
| 2864 allRanksDrained())) { 2865 2866 DPRINTF(Drain, "DRAM controller not drained, write: %d, read: %d,"
|
2671 " resp: %d\n", writeQueue.size(), readQueue.size(),
| 2867 " resp: %d\n", totalWriteQueueSize, totalReadQueueSize,
|
2672 respQueue.size()); 2673 2674 // the only queue that is not drained automatically over time 2675 // is the write queue, thus kick things into action if needed
| 2868 respQueue.size()); 2869 2870 // the only queue that is not drained automatically over time 2871 // is the write queue, thus kick things into action if needed
|
2676 if (!writeQueue.empty() && !nextReqEvent.scheduled()) {
| 2872 if (!totalWriteQueueSize && !nextReqEvent.scheduled()) {
|
2677 schedule(nextReqEvent, curTick()); 2678 } 2679 2680 // also need to kick off events to exit self-refresh 2681 for (auto r : ranks) { 2682 // force self-refresh exit, which in turn will issue auto-refresh 2683 if (r->pwrState == PWR_SREF) { 2684 DPRINTF(DRAM,"Rank%d: Forcing self-refresh wakeup in drain\n", 2685 r->rank); 2686 r->scheduleWakeUpEvent(tXS); 2687 } 2688 } 2689 2690 return DrainState::Draining; 2691 } else { 2692 return DrainState::Drained; 2693 } 2694} 2695 2696bool 2697DRAMCtrl::allRanksDrained() const 2698{ 2699 // true until proven false 2700 bool all_ranks_drained = true; 2701 for (auto r : ranks) { 2702 // then verify that the power state is IDLE ensuring all banks are 2703 // closed and rank is not in a low power state. Also verify that rank 2704 // is idle from a refresh point of view. 2705 all_ranks_drained = r->inPwrIdleState() && r->inRefIdleState() && 2706 all_ranks_drained; 2707 } 2708 return all_ranks_drained; 2709} 2710 2711void 2712DRAMCtrl::drainResume() 2713{ 2714 if (!isTimingMode && system()->isTimingMode()) { 2715 // if we switched to timing mode, kick things into action, 2716 // and behave as if we restored from a checkpoint 2717 startup(); 2718 } else if (isTimingMode && !system()->isTimingMode()) { 2719 // if we switch from timing mode, stop the refresh events to 2720 // not cause issues with KVM 2721 for (auto r : ranks) { 2722 r->suspend(); 2723 } 2724 } 2725 2726 // update the mode 2727 isTimingMode = system()->isTimingMode(); 2728} 2729 2730DRAMCtrl::MemoryPort::MemoryPort(const std::string& name, DRAMCtrl& _memory) 2731 : QueuedSlavePort(name, &_memory, queue), queue(_memory, *this), 2732 memory(_memory) 2733{ } 2734 2735AddrRangeList 2736DRAMCtrl::MemoryPort::getAddrRanges() const 2737{ 2738 AddrRangeList ranges; 2739 ranges.push_back(memory.getAddrRange()); 2740 return ranges; 2741} 2742 2743void 2744DRAMCtrl::MemoryPort::recvFunctional(PacketPtr pkt) 2745{ 2746 pkt->pushLabel(memory.name()); 2747 2748 if (!queue.trySatisfyFunctional(pkt)) { 2749 // Default implementation of SimpleTimingPort::recvFunctional() 2750 // calls recvAtomic() and throws away the latency; we can save a 2751 // little here by just not calculating the latency. 2752 memory.recvFunctional(pkt); 2753 } 2754 2755 pkt->popLabel(); 2756} 2757 2758Tick 2759DRAMCtrl::MemoryPort::recvAtomic(PacketPtr pkt) 2760{ 2761 return memory.recvAtomic(pkt); 2762} 2763 2764bool 2765DRAMCtrl::MemoryPort::recvTimingReq(PacketPtr pkt) 2766{ 2767 // pass it to the memory controller 2768 return memory.recvTimingReq(pkt); 2769} 2770 2771DRAMCtrl* 2772DRAMCtrlParams::create() 2773{ 2774 return new DRAMCtrl(this); 2775}
| 2873 schedule(nextReqEvent, curTick()); 2874 } 2875 2876 // also need to kick off events to exit self-refresh 2877 for (auto r : ranks) { 2878 // force self-refresh exit, which in turn will issue auto-refresh 2879 if (r->pwrState == PWR_SREF) { 2880 DPRINTF(DRAM,"Rank%d: Forcing self-refresh wakeup in drain\n", 2881 r->rank); 2882 r->scheduleWakeUpEvent(tXS); 2883 } 2884 } 2885 2886 return DrainState::Draining; 2887 } else { 2888 return DrainState::Drained; 2889 } 2890} 2891 2892bool 2893DRAMCtrl::allRanksDrained() const 2894{ 2895 // true until proven false 2896 bool all_ranks_drained = true; 2897 for (auto r : ranks) { 2898 // then verify that the power state is IDLE ensuring all banks are 2899 // closed and rank is not in a low power state. Also verify that rank 2900 // is idle from a refresh point of view. 2901 all_ranks_drained = r->inPwrIdleState() && r->inRefIdleState() && 2902 all_ranks_drained; 2903 } 2904 return all_ranks_drained; 2905} 2906 2907void 2908DRAMCtrl::drainResume() 2909{ 2910 if (!isTimingMode && system()->isTimingMode()) { 2911 // if we switched to timing mode, kick things into action, 2912 // and behave as if we restored from a checkpoint 2913 startup(); 2914 } else if (isTimingMode && !system()->isTimingMode()) { 2915 // if we switch from timing mode, stop the refresh events to 2916 // not cause issues with KVM 2917 for (auto r : ranks) { 2918 r->suspend(); 2919 } 2920 } 2921 2922 // update the mode 2923 isTimingMode = system()->isTimingMode(); 2924} 2925 2926DRAMCtrl::MemoryPort::MemoryPort(const std::string& name, DRAMCtrl& _memory) 2927 : QueuedSlavePort(name, &_memory, queue), queue(_memory, *this), 2928 memory(_memory) 2929{ } 2930 2931AddrRangeList 2932DRAMCtrl::MemoryPort::getAddrRanges() const 2933{ 2934 AddrRangeList ranges; 2935 ranges.push_back(memory.getAddrRange()); 2936 return ranges; 2937} 2938 2939void 2940DRAMCtrl::MemoryPort::recvFunctional(PacketPtr pkt) 2941{ 2942 pkt->pushLabel(memory.name()); 2943 2944 if (!queue.trySatisfyFunctional(pkt)) { 2945 // Default implementation of SimpleTimingPort::recvFunctional() 2946 // calls recvAtomic() and throws away the latency; we can save a 2947 // little here by just not calculating the latency. 2948 memory.recvFunctional(pkt); 2949 } 2950 2951 pkt->popLabel(); 2952} 2953 2954Tick 2955DRAMCtrl::MemoryPort::recvAtomic(PacketPtr pkt) 2956{ 2957 return memory.recvAtomic(pkt); 2958} 2959 2960bool 2961DRAMCtrl::MemoryPort::recvTimingReq(PacketPtr pkt) 2962{ 2963 // pass it to the memory controller 2964 return memory.recvTimingReq(pkt); 2965} 2966 2967DRAMCtrl* 2968DRAMCtrlParams::create() 2969{ 2970 return new DRAMCtrl(this); 2971}
|