dram_ctrl.cc (10286:e95a0ab1d368) dram_ctrl.cc (10393:0fafa62b6c01)
1/*
2 * Copyright (c) 2010-2014 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software

--- 62 unchanged lines hidden (view full) ---

71 ranksPerChannel(p->ranks_per_channel),
72 banksPerRank(p->banks_per_rank), channels(p->channels), rowsPerBank(0),
73 readBufferSize(p->read_buffer_size),
74 writeBufferSize(p->write_buffer_size),
75 writeHighThreshold(writeBufferSize * p->write_high_thresh_perc / 100.0),
76 writeLowThreshold(writeBufferSize * p->write_low_thresh_perc / 100.0),
77 minWritesPerSwitch(p->min_writes_per_switch),
78 writesThisTime(0), readsThisTime(0),
1/*
2 * Copyright (c) 2010-2014 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software

--- 62 unchanged lines hidden (view full) ---

71 ranksPerChannel(p->ranks_per_channel),
72 banksPerRank(p->banks_per_rank), channels(p->channels), rowsPerBank(0),
73 readBufferSize(p->read_buffer_size),
74 writeBufferSize(p->write_buffer_size),
75 writeHighThreshold(writeBufferSize * p->write_high_thresh_perc / 100.0),
76 writeLowThreshold(writeBufferSize * p->write_low_thresh_perc / 100.0),
77 minWritesPerSwitch(p->min_writes_per_switch),
78 writesThisTime(0), readsThisTime(0),
79 tCK(p->tCK), tWTR(p->tWTR), tRTW(p->tRTW), tBURST(p->tBURST),
79 tCK(p->tCK), tWTR(p->tWTR), tRTW(p->tRTW), tCS(p->tCS), tBURST(p->tBURST),
80 tRCD(p->tRCD), tCL(p->tCL), tRP(p->tRP), tRAS(p->tRAS), tWR(p->tWR),
81 tRTP(p->tRTP), tRFC(p->tRFC), tREFI(p->tREFI), tRRD(p->tRRD),
82 tXAW(p->tXAW), activationLimit(p->activation_limit),
83 memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping),
84 pageMgmt(p->page_policy),
85 maxAccessesPerRow(p->max_accesses_per_row),
86 frontendLatency(p->static_frontend_latency),
87 backendLatency(p->static_backend_latency),
88 busBusyUntil(0), refreshDueAt(0), refreshState(REF_IDLE),
89 pwrStateTrans(PWR_IDLE), pwrState(PWR_IDLE), prevArrival(0),
80 tRCD(p->tRCD), tCL(p->tCL), tRP(p->tRP), tRAS(p->tRAS), tWR(p->tWR),
81 tRTP(p->tRTP), tRFC(p->tRFC), tREFI(p->tREFI), tRRD(p->tRRD),
82 tXAW(p->tXAW), activationLimit(p->activation_limit),
83 memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping),
84 pageMgmt(p->page_policy),
85 maxAccessesPerRow(p->max_accesses_per_row),
86 frontendLatency(p->static_frontend_latency),
87 backendLatency(p->static_backend_latency),
88 busBusyUntil(0), refreshDueAt(0), refreshState(REF_IDLE),
89 pwrStateTrans(PWR_IDLE), pwrState(PWR_IDLE), prevArrival(0),
90 nextReqTime(0), pwrStateTick(0), numBanksActive(0)
90 nextReqTime(0), pwrStateTick(0), numBanksActive(0),
91 activeRank(0)
91{
92 // create the bank states based on the dimensions of the ranks and
93 // banks
94 banks.resize(ranksPerChannel);
95 actTicks.resize(ranksPerChannel);
96 for (size_t c = 0; c < ranksPerChannel; ++c) {
97 banks[c].resize(banksPerRank);
98 actTicks[c].resize(activationLimit, 0);

--- 579 unchanged lines hidden (view full) ---

678 // so if there is a read that was forced to wait, retry now
679 if (retryRdReq) {
680 retryRdReq = false;
681 port.sendRetry();
682 }
683}
684
685void
92{
93 // create the bank states based on the dimensions of the ranks and
94 // banks
95 banks.resize(ranksPerChannel);
96 actTicks.resize(ranksPerChannel);
97 for (size_t c = 0; c < ranksPerChannel; ++c) {
98 banks[c].resize(banksPerRank);
99 actTicks[c].resize(activationLimit, 0);

--- 579 unchanged lines hidden (view full) ---

679 // so if there is a read that was forced to wait, retry now
680 if (retryRdReq) {
681 retryRdReq = false;
682 port.sendRetry();
683 }
684}
685
686void
686DRAMCtrl::chooseNext(std::deque& queue)
687DRAMCtrl::chooseNext(std::deque<DRAMPacket*>& queue, bool switched_cmd_type)
687{
688 // This method does the arbitration between requests. The chosen
689 // packet is simply moved to the head of the queue. The other
690 // methods know that this is the place to look. For example, with
691 // FCFS, this method does nothing
692 assert(!queue.empty());
693
694 if (queue.size() == 1) {
695 DPRINTF(DRAM, "Single request, nothing to do\n");
696 return;
697 }
698
699 if (memSchedPolicy == Enums::fcfs) {
700 // Do nothing, since the correct request is already head
701 } else if (memSchedPolicy == Enums::frfcfs) {
688{
689 // This method does the arbitration between requests. The chosen
690 // packet is simply moved to the head of the queue. The other
691 // methods know that this is the place to look. For example, with
692 // FCFS, this method does nothing
693 assert(!queue.empty());
694
695 if (queue.size() == 1) {
696 DPRINTF(DRAM, "Single request, nothing to do\n");
697 return;
698 }
699
700 if (memSchedPolicy == Enums::fcfs) {
701 // Do nothing, since the correct request is already head
702 } else if (memSchedPolicy == Enums::frfcfs) {
702 reorderQueue(queue);
703 reorderQueue(queue, switched_cmd_type);
703 } else
704 panic("No scheduling policy chosen\n");
705}
706
707void
704 } else
705 panic("No scheduling policy chosen\n");
706}
707
708void
708DRAMCtrl::reorderQueue(std::deque& queue)
709DRAMCtrl::reorderQueue(std::deque<DRAMPacket*>& queue, bool switched_cmd_type)
709{
710 // Only determine this when needed
711 uint64_t earliest_banks = 0;
712
713 // Search for row hits first, if no row hit is found then schedule the
714 // packet to one of the earliest banks available
715 bool found_earliest_pkt = false;
710{
711 // Only determine this when needed
712 uint64_t earliest_banks = 0;
713
714 // Search for row hits first, if no row hit is found then schedule the
715 // packet to one of the earliest banks available
716 bool found_earliest_pkt = false;
717 bool found_prepped_diff_rank_pkt = false;
716 auto selected_pkt_it = queue.begin();
717
718 for (auto i = queue.begin(); i != queue.end() ; ++i) {
719 DRAMPacket* dram_pkt = *i;
720 const Bank& bank = dram_pkt->bankRef;
721 // Check if it is a row hit
722 if (bank.openRow == dram_pkt->row) {
718 auto selected_pkt_it = queue.begin();
719
720 for (auto i = queue.begin(); i != queue.end() ; ++i) {
721 DRAMPacket* dram_pkt = *i;
722 const Bank& bank = dram_pkt->bankRef;
723 // Check if it is a row hit
724 if (bank.openRow == dram_pkt->row) {
723 // FCFS within the hits
724 DPRINTF(DRAM, "Row buffer hit\n");
725 selected_pkt_it = i;
726 break;
727 } else if (!found_earliest_pkt) {
728 // No row hit, go for first ready
725 if (dram_pkt->rank == activeRank || switched_cmd_type) {
726 // FCFS within the hits, giving priority to commands
727 // that access the same rank as the previous burst
728 // to minimize bus turnaround delays
729 // Only give rank prioity when command type is not changing
730 DPRINTF(DRAM, "Row buffer hit\n");
731 selected_pkt_it = i;
732 break;
733 } else if (!found_prepped_diff_rank_pkt) {
734 // found row hit for command on different rank than prev burst
735 selected_pkt_it = i;
736 found_prepped_diff_rank_pkt = true;
737 }
738 } else if (!found_earliest_pkt & !found_prepped_diff_rank_pkt) {
739 // No row hit and
740 // haven't found an entry with a row hit to a new rank
729 if (earliest_banks == 0)
741 if (earliest_banks == 0)
730 earliest_banks = minBankActAt(queue);
742 // Determine entries with earliest bank prep delay
743 // Function will give priority to commands that access the
744 // same rank as previous burst and can prep the bank seamlessly
745 earliest_banks = minBankPrep(queue, switched_cmd_type);
731
746
732 // simplistic approximation of when the bank can issue an
733 // activate, this is calculated in minBankActAt and could
734 // be cached
735 Tick act_at = bank.openRow == Bank::NO_ROW ?
736 bank.actAllowedAt :
737 std::max(bank.preAllowedAt, curTick()) + tRP;
738
739 // Bank is ready or is the first available bank
740 if (act_at <= curTick() ||
741 bits(earliest_banks, dram_pkt->bankId, dram_pkt->bankId)) {
747 // FCFS - Bank is first available bank
748 if (bits(earliest_banks, dram_pkt->bankId, dram_pkt->bankId)) {
742 // Remember the packet to be scheduled to one of the earliest
743 // banks available, FCFS amongst the earliest banks
744 selected_pkt_it = i;
745 found_earliest_pkt = true;
746 }
747 }
748 }
749

--- 228 unchanged lines hidden (view full) ---

978
979 // only one burst can use the bus at any one point in time
980 assert(dram_pkt->readyTime - busBusyUntil >= tBURST);
981
982 // not strictly necessary, but update the time for the next
983 // read/write (add a max with tCCD here)
984 bank.colAllowedAt = cmd_at + tBURST;
985
749 // Remember the packet to be scheduled to one of the earliest
750 // banks available, FCFS amongst the earliest banks
751 selected_pkt_it = i;
752 found_earliest_pkt = true;
753 }
754 }
755 }
756

--- 228 unchanged lines hidden (view full) ---

985
986 // only one burst can use the bus at any one point in time
987 assert(dram_pkt->readyTime - busBusyUntil >= tBURST);
988
989 // not strictly necessary, but update the time for the next
990 // read/write (add a max with tCCD here)
991 bank.colAllowedAt = cmd_at + tBURST;
992
993 // Save rank of current access
994 activeRank = dram_pkt->rank;
995
986 // If this is a write, we also need to respect the write recovery
987 // time before a precharge, in the case of a read, respect the
988 // read to precharge constraint
989 bank.preAllowedAt = std::max(bank.preAllowedAt,
990 dram_pkt->isRead ? cmd_at + tRTP :
991 dram_pkt->readyTime + tWR);
992
993 // increment the bytes accessed and the accesses per row

--- 96 unchanged lines hidden (view full) ---

1090 bytesWritten += burstSize;
1091 perBankWrBursts[dram_pkt->bankId]++;
1092 }
1093}
1094
1095void
1096DRAMCtrl::processNextReqEvent()
1097{
996 // If this is a write, we also need to respect the write recovery
997 // time before a precharge, in the case of a read, respect the
998 // read to precharge constraint
999 bank.preAllowedAt = std::max(bank.preAllowedAt,
1000 dram_pkt->isRead ? cmd_at + tRTP :
1001 dram_pkt->readyTime + tWR);
1002
1003 // increment the bytes accessed and the accesses per row

--- 96 unchanged lines hidden (view full) ---

1100 bytesWritten += burstSize;
1101 perBankWrBursts[dram_pkt->bankId]++;
1102 }
1103}
1104
1105void
1106DRAMCtrl::processNextReqEvent()
1107{
1108 // pre-emptively set to false. Overwrite if in READ_TO_WRITE
1109 // or WRITE_TO_READ state
1110 bool switched_cmd_type = false;
1098 if (busState == READ_TO_WRITE) {
1099 DPRINTF(DRAM, "Switching to writes after %d reads with %d reads "
1100 "waiting\n", readsThisTime, readQueue.size());
1101
1102 // sample and reset the read-related stats as we are now
1103 // transitioning to writes, and all reads are done
1104 rdPerTurnAround.sample(readsThisTime);
1105 readsThisTime = 0;
1106
1107 // now proceed to do the actual writes
1108 busState = WRITE;
1111 if (busState == READ_TO_WRITE) {
1112 DPRINTF(DRAM, "Switching to writes after %d reads with %d reads "
1113 "waiting\n", readsThisTime, readQueue.size());
1114
1115 // sample and reset the read-related stats as we are now
1116 // transitioning to writes, and all reads are done
1117 rdPerTurnAround.sample(readsThisTime);
1118 readsThisTime = 0;
1119
1120 // now proceed to do the actual writes
1121 busState = WRITE;
1122 switched_cmd_type = true;
1109 } else if (busState == WRITE_TO_READ) {
1110 DPRINTF(DRAM, "Switching to reads after %d writes with %d writes "
1111 "waiting\n", writesThisTime, writeQueue.size());
1112
1113 wrPerTurnAround.sample(writesThisTime);
1114 writesThisTime = 0;
1115
1116 busState = READ;
1123 } else if (busState == WRITE_TO_READ) {
1124 DPRINTF(DRAM, "Switching to reads after %d writes with %d writes "
1125 "waiting\n", writesThisTime, writeQueue.size());
1126
1127 wrPerTurnAround.sample(writesThisTime);
1128 writesThisTime = 0;
1129
1130 busState = READ;
1131 switched_cmd_type = true;
1117 }
1118
1119 if (refreshState != REF_IDLE) {
1120 // if a refresh waiting for this event loop to finish, then hand
1121 // over now, and do not schedule a new nextReqEvent
1122 if (refreshState == REF_DRAIN) {
1123 DPRINTF(DRAM, "Refresh drain done, now precharging\n");
1124

--- 30 unchanged lines hidden (view full) ---

1155
1156 // nothing to do, not even any point in scheduling an
1157 // event for the next request
1158 return;
1159 }
1160 } else {
1161 // Figure out which read request goes next, and move it to the
1162 // front of the read queue
1132 }
1133
1134 if (refreshState != REF_IDLE) {
1135 // if a refresh waiting for this event loop to finish, then hand
1136 // over now, and do not schedule a new nextReqEvent
1137 if (refreshState == REF_DRAIN) {
1138 DPRINTF(DRAM, "Refresh drain done, now precharging\n");
1139

--- 30 unchanged lines hidden (view full) ---

1170
1171 // nothing to do, not even any point in scheduling an
1172 // event for the next request
1173 return;
1174 }
1175 } else {
1176 // Figure out which read request goes next, and move it to the
1177 // front of the read queue
1163 chooseNext(readQueue);
1178 chooseNext(readQueue, switched_cmd_type);
1164
1165 DRAMPacket* dram_pkt = readQueue.front();
1166
1179
1180 DRAMPacket* dram_pkt = readQueue.front();
1181
1182 // here we get a bit creative and shift the bus busy time not
1183 // just the tWTR, but also a CAS latency to capture the fact
1184 // that we are allowed to prepare a new bank, but not issue a
1185 // read command until after tWTR, in essence we capture a
1186 // bubble on the data bus that is tWTR + tCL
1187 if (switched_cmd_type) {
1188 // add a bubble to the data bus for write-to-read turn around
1189 // or tCS (different rank bus delay).
1190 busBusyUntil += (dram_pkt->rank == activeRank) ? tWTR + tCL :
1191 tCS;
1192 } else if (dram_pkt->rank != activeRank) {
1193 // add a bubble to the data bus, as defined by the
1194 // tCS parameter for rank-to-rank delay
1195 busBusyUntil += tCS;
1196 }
1197
1167 doDRAMAccess(dram_pkt);
1168
1169 // At this point we're done dealing with the request
1170 readQueue.pop_front();
1171
1172 // sanity check
1173 assert(dram_pkt->size <= burstSize);
1174 assert(dram_pkt->readyTime >= curTick());

--- 17 unchanged lines hidden (view full) ---

1192 }
1193
1194 // switching to writes, either because the read queue is empty
1195 // and the writes have passed the low threshold (or we are
1196 // draining), or because the writes hit the hight threshold
1197 if (switch_to_writes) {
1198 // transition to writing
1199 busState = READ_TO_WRITE;
1198 doDRAMAccess(dram_pkt);
1199
1200 // At this point we're done dealing with the request
1201 readQueue.pop_front();
1202
1203 // sanity check
1204 assert(dram_pkt->size <= burstSize);
1205 assert(dram_pkt->readyTime >= curTick());

--- 17 unchanged lines hidden (view full) ---

1223 }
1224
1225 // switching to writes, either because the read queue is empty
1226 // and the writes have passed the low threshold (or we are
1227 // draining), or because the writes hit the hight threshold
1228 if (switch_to_writes) {
1229 // transition to writing
1230 busState = READ_TO_WRITE;
1200
1201 // add a bubble to the data bus, as defined by the
1202 // tRTW parameter
1203 busBusyUntil += tRTW;
1204
1205 // update the minimum timing between the requests,
1206 // this shifts us back in time far enough to do any
1207 // bank preparation
1208 nextReqTime = busBusyUntil - (tRP + tRCD + tCL);
1209 }
1210 } else {
1231 }
1232 } else {
1211 chooseNext(writeQueue);
1233 chooseNext(writeQueue, switched_cmd_type);
1212 DRAMPacket* dram_pkt = writeQueue.front();
1213 // sanity check
1214 assert(dram_pkt->size <= burstSize);
1234 DRAMPacket* dram_pkt = writeQueue.front();
1235 // sanity check
1236 assert(dram_pkt->size <= burstSize);
1237
1238 if (switched_cmd_type) {
1239 // add a bubble to the data bus, as defined by the
1240 // tRTW or tCS parameter, depending on whether changing ranks
1241 busBusyUntil += (dram_pkt->rank == activeRank) ? tRTW : tCS;
1242 } else if (dram_pkt->rank != activeRank) {
1243 // add a bubble to the data bus, as defined by the
1244 // tCS parameter for rank-to-rank delay
1245 busBusyUntil += tCS;
1246 }
1247
1215 doDRAMAccess(dram_pkt);
1216
1217 writeQueue.pop_front();
1218 delete dram_pkt;
1219
1220 // If we emptied the write queue, or got sufficiently below the
1221 // threshold (using the minWritesPerSwitch as the hysteresis) and
1222 // are not draining, or we have reads waiting and have done enough

--- 4 unchanged lines hidden (view full) ---

1227 (!readQueue.empty() && writesThisTime >= minWritesPerSwitch)) {
1228 // turn the bus back around for reads again
1229 busState = WRITE_TO_READ;
1230
1231 // note that the we switch back to reads also in the idle
1232 // case, which eventually will check for any draining and
1233 // also pause any further scheduling if there is really
1234 // nothing to do
1248 doDRAMAccess(dram_pkt);
1249
1250 writeQueue.pop_front();
1251 delete dram_pkt;
1252
1253 // If we emptied the write queue, or got sufficiently below the
1254 // threshold (using the minWritesPerSwitch as the hysteresis) and
1255 // are not draining, or we have reads waiting and have done enough

--- 4 unchanged lines hidden (view full) ---

1260 (!readQueue.empty() && writesThisTime >= minWritesPerSwitch)) {
1261 // turn the bus back around for reads again
1262 busState = WRITE_TO_READ;
1263
1264 // note that the we switch back to reads also in the idle
1265 // case, which eventually will check for any draining and
1266 // also pause any further scheduling if there is really
1267 // nothing to do
1235
1236 // here we get a bit creative and shift the bus busy time not
1237 // just the tWTR, but also a CAS latency to capture the fact
1238 // that we are allowed to prepare a new bank, but not issue a
1239 // read command until after tWTR, in essence we capture a
1240 // bubble on the data bus that is tWTR + tCL
1241 busBusyUntil += tWTR + tCL;
1242
1243 // update the minimum timing between the requests, this shifts
1244 // us back in time far enough to do any bank preparation
1245 nextReqTime = busBusyUntil - (tRP + tRCD + tCL);
1246 }
1247 }
1248
1249 schedule(nextReqEvent, std::max(nextReqTime, curTick()));
1250
1251 // If there is space available and we have writes waiting then let
1252 // them retry. This is done here to ensure that the retry does not
1253 // cause a nextReqEvent to be scheduled before we do so as part of
1254 // the next request processing
1255 if (retryWrReq && writeQueue.size() < writeBufferSize) {
1256 retryWrReq = false;
1257 port.sendRetry();
1258 }
1259}
1260
1261uint64_t
1268 }
1269 }
1270
1271 schedule(nextReqEvent, std::max(nextReqTime, curTick()));
1272
1273 // If there is space available and we have writes waiting then let
1274 // them retry. This is done here to ensure that the retry does not
1275 // cause a nextReqEvent to be scheduled before we do so as part of
1276 // the next request processing
1277 if (retryWrReq && writeQueue.size() < writeBufferSize) {
1278 retryWrReq = false;
1279 port.sendRetry();
1280 }
1281}
1282
1283uint64_t
1262DRAMCtrl::minBankActAt(const deque<DRAMPacket*>& queue) const
1284DRAMCtrl::minBankPrep(const deque<DRAMPacket*>& queue,
1285 bool switched_cmd_type) const
1263{
1264 uint64_t bank_mask = 0;
1265 Tick min_act_at = MaxTick;
1266
1286{
1287 uint64_t bank_mask = 0;
1288 Tick min_act_at = MaxTick;
1289
1267 // deterimne if we have queued transactions targetting a
1290 uint64_t bank_mask_same_rank = 0;
1291 Tick min_act_at_same_rank = MaxTick;
1292
1293 // Give precedence to commands that access same rank as previous command
1294 bool same_rank_match = false;
1295
1296 // determine if we have queued transactions targetting the
1268 // bank in question
1269 vector<bool> got_waiting(ranksPerChannel * banksPerRank, false);
1270 for (auto p = queue.begin(); p != queue.end(); ++p) {
1271 got_waiting[(*p)->bankId] = true;
1272 }
1273
1274 for (int i = 0; i < ranksPerChannel; i++) {
1275 for (int j = 0; j < banksPerRank; j++) {
1276 uint8_t bank_id = i * banksPerRank + j;
1277
1278 // if we have waiting requests for the bank, and it is
1279 // amongst the first available, update the mask
1280 if (got_waiting[bank_id]) {
1281 // simplistic approximation of when the bank can issue
1282 // an activate, ignoring any rank-to-rank switching
1297 // bank in question
1298 vector<bool> got_waiting(ranksPerChannel * banksPerRank, false);
1299 for (auto p = queue.begin(); p != queue.end(); ++p) {
1300 got_waiting[(*p)->bankId] = true;
1301 }
1302
1303 for (int i = 0; i < ranksPerChannel; i++) {
1304 for (int j = 0; j < banksPerRank; j++) {
1305 uint8_t bank_id = i * banksPerRank + j;
1306
1307 // if we have waiting requests for the bank, and it is
1308 // amongst the first available, update the mask
1309 if (got_waiting[bank_id]) {
1310 // simplistic approximation of when the bank can issue
1311 // an activate, ignoring any rank-to-rank switching
1283 // cost
1312 // cost in this calculation
1284 Tick act_at = banks[i][j].openRow == Bank::NO_ROW ?
1285 banks[i][j].actAllowedAt :
1286 std::max(banks[i][j].preAllowedAt, curTick()) + tRP;
1287
1313 Tick act_at = banks[i][j].openRow == Bank::NO_ROW ?
1314 banks[i][j].actAllowedAt :
1315 std::max(banks[i][j].preAllowedAt, curTick()) + tRP;
1316
1288 if (act_at <= min_act_at) {
1289 // reset bank mask if new minimum is found
1290 if (act_at < min_act_at)
1291 bank_mask = 0;
1292 // set the bit corresponding to the available bank
1293 replaceBits(bank_mask, bank_id, bank_id, 1);
1294 min_act_at = act_at;
1317 // prioritize commands that access the
1318 // same rank as previous burst
1319 // Calculate bank mask separately for the case and
1320 // evaluate after loop iterations complete
1321 if (i == activeRank && ranksPerChannel > 1) {
1322 if (act_at <= min_act_at_same_rank) {
1323 // reset same rank bank mask if new minimum is found
1324 // and previous minimum could not immediately send ACT
1325 if (act_at < min_act_at_same_rank &&
1326 min_act_at_same_rank > curTick())
1327 bank_mask_same_rank = 0;
1328
1329 // Set flag indicating that a same rank
1330 // opportunity was found
1331 same_rank_match = true;
1332
1333 // set the bit corresponding to the available bank
1334 replaceBits(bank_mask_same_rank, bank_id, bank_id, 1);
1335 min_act_at_same_rank = act_at;
1336 }
1337 } else {
1338 if (act_at <= min_act_at) {
1339 // reset bank mask if new minimum is found
1340 // and either previous minimum could not immediately send ACT
1341 if (act_at < min_act_at && min_act_at > curTick())
1342 bank_mask = 0;
1343 // set the bit corresponding to the available bank
1344 replaceBits(bank_mask, bank_id, bank_id, 1);
1345 min_act_at = act_at;
1346 }
1295 }
1296 }
1297 }
1298 }
1299
1347 }
1348 }
1349 }
1350 }
1351
1352 // Determine the earliest time when the next burst can issue based
1353 // on the current busBusyUntil delay.
1354 // Offset by tRCD to correlate with ACT timing variables
1355 Tick min_cmd_at = busBusyUntil - tCL - tRCD;
1356
1357 // Prioritize same rank accesses that can issue B2B
1358 // Only optimize for same ranks when the command type
1359 // does not change; do not want to unnecessarily incur tWTR
1360 //
1361 // Resulting FCFS prioritization Order is:
1362 // 1) Commands that access the same rank as previous burst
1363 // and can prep the bank seamlessly.
1364 // 2) Commands (any rank) with earliest bank prep
1365 if (!switched_cmd_type && same_rank_match &&
1366 min_act_at_same_rank <= min_cmd_at) {
1367 bank_mask = bank_mask_same_rank;
1368 }
1369
1300 return bank_mask;
1301}
1302
1303void
1304DRAMCtrl::processRefreshEvent()
1305{
1306 // when first preparing the refresh, remember when it was due
1307 if (refreshState == REF_IDLE) {

--- 553 unchanged lines hidden ---
1370 return bank_mask;
1371}
1372
1373void
1374DRAMCtrl::processRefreshEvent()
1375{
1376 // when first preparing the refresh, remember when it was due
1377 if (refreshState == REF_IDLE) {

--- 553 unchanged lines hidden ---