42a43
> * Omar Naji
62,63c63
< nextReqEvent(this), respondEvent(this), activateEvent(this),
< prechargeEvent(this), refreshEvent(this), powerEvent(this),
---
> nextReqEvent(this), respondEvent(this),
92,95c92,93
< busBusyUntil(0), refreshDueAt(0), refreshState(REF_IDLE),
< pwrStateTrans(PWR_IDLE), pwrState(PWR_IDLE), prevArrival(0),
< nextReqTime(0), pwrStateTick(0), numBanksActive(0),
< activeRank(0), timeStampOffset(0)
---
> busBusyUntil(0), prevArrival(0),
> nextReqTime(0), activeRank(0), timeStampOffset(0)
97,101d94
< // create the bank states based on the dimensions of the ranks and
< // banks
< banks.resize(ranksPerChannel);
<
< //create list of drampower objects. For each rank 1 drampower instance.
103,105c96,97
< DRAMPower drampower = DRAMPower(p, false);
< rankPower.emplace_back(drampower);
< }
---
> Rank* rank = new Rank(*this, p);
> ranks.push_back(rank);
107,111c99,101
< actTicks.resize(ranksPerChannel);
< for (size_t c = 0; c < ranksPerChannel; ++c) {
< banks[c].resize(banksPerRank);
< actTicks[c].resize(activationLimit, 0);
< }
---
> rank->actTicks.resize(activationLimit, 0);
> rank->banks.resize(banksPerRank);
> rank->rank = i;
113,114d102
< // set the bank indices
< for (int r = 0; r < ranksPerChannel; r++) {
116,117c104
< banks[r][b].rank = r;
< banks[r][b].bank = b;
---
> rank->banks[b].bank = b;
129c116
< banks[r][b].bankgr = b % bankGroupsPerRank;
---
> rank->banks[b].bankgr = b % bankGroupsPerRank;
132c119
< banks[r][b].bankgr = b;
---
> rank->banks[b].bankgr = b;
256a244
>
259c247,249
< pwrStateTick = curTick();
---
> for (auto r : ranks) {
> r->startup(curTick() + tREFI - tRP);
> }
266,269d255
<
< // kick off the refresh, and give ourselves enough time to
< // precharge
< schedule(refreshEvent, curTick() + tREFI - tRP);
414c400
< size, banks[rank][bank]);
---
> size, ranks[rank]->banks[bank], *ranks[rank]);
758c744
< void
---
> bool
766a753,754
> // bool to indicate if a packet to an available rank is found
> bool found_packet = false;
768,769c756,764
< DPRINTF(DRAM, "Single request, nothing to do\n");
< return;
---
> DRAMPacket* dram_pkt = queue.front();
> // available rank corresponds to state refresh idle
> if (ranks[dram_pkt->rank]->isAvailable()) {
> found_packet = true;
> DPRINTF(DRAM, "Single request, going to a free rank\n");
> } else {
> DPRINTF(DRAM, "Single request, going to a busy rank\n");
> }
> return found_packet;
773c768,777
< // Do nothing, since the correct request is already head
---
> // check if there is a packet going to a free rank
> for(auto i = queue.begin(); i != queue.end() ; ++i) {
> DRAMPacket* dram_pkt = *i;
> if (ranks[dram_pkt->rank]->isAvailable()) {
> queue.erase(i);
> queue.push_front(dram_pkt);
> found_packet = true;
> break;
> }
> }
775c779
< reorderQueue(queue, switched_cmd_type);
---
> found_packet = reorderQueue(queue, switched_cmd_type);
777a782
> return found_packet;
780c785
< void
---
> bool
787a793
> bool found_packet = false;
790c796
< auto selected_pkt_it = queue.begin();
---
> auto selected_pkt_it = queue.end();
794a801
> // check if rank is busy. If this is the case jump to the next packet
796,817c803,829
< if (bank.openRow == dram_pkt->row) {
< if (dram_pkt->rank == activeRank || switched_cmd_type) {
< // FCFS within the hits, giving priority to commands
< // that access the same rank as the previous burst
< // to minimize bus turnaround delays
< // Only give rank prioity when command type is not changing
< DPRINTF(DRAM, "Row buffer hit\n");
< selected_pkt_it = i;
< break;
< } else if (!found_prepped_diff_rank_pkt) {
< // found row hit for command on different rank than prev burst
< selected_pkt_it = i;
< found_prepped_diff_rank_pkt = true;
< }
< } else if (!found_earliest_pkt & !found_prepped_diff_rank_pkt) {
< // No row hit and
< // haven't found an entry with a row hit to a new rank
< if (earliest_banks == 0)
< // Determine entries with earliest bank prep delay
< // Function will give priority to commands that access the
< // same rank as previous burst and can prep the bank seamlessly
< earliest_banks = minBankPrep(queue, switched_cmd_type);
---
> if (dram_pkt->rankRef.isAvailable()) {
> if (bank.openRow == dram_pkt->row) {
> if (dram_pkt->rank == activeRank || switched_cmd_type) {
> // FCFS within the hits, giving priority to commands
> // that access the same rank as the previous burst
> // to minimize bus turnaround delays
> // Only give rank prioity when command type is
> // not changing
> DPRINTF(DRAM, "Row buffer hit\n");
> selected_pkt_it = i;
> break;
> } else if (!found_prepped_diff_rank_pkt) {
> // found row hit for command on different rank
> // than prev burst
> selected_pkt_it = i;
> found_prepped_diff_rank_pkt = true;
> }
> } else if (!found_earliest_pkt & !found_prepped_diff_rank_pkt) {
> // packet going to a rank which is currently not waiting for a
> // refresh, No row hit and
> // haven't found an entry with a row hit to a new rank
> if (earliest_banks == 0)
> // Determine entries with earliest bank prep delay
> // Function will give priority to commands that access the
> // same rank as previous burst and can prep
> // the bank seamlessly
> earliest_banks = minBankPrep(queue, switched_cmd_type);
819,824c831,841
< // FCFS - Bank is first available bank
< if (bits(earliest_banks, dram_pkt->bankId, dram_pkt->bankId)) {
< // Remember the packet to be scheduled to one of the earliest
< // banks available, FCFS amongst the earliest banks
< selected_pkt_it = i;
< found_earliest_pkt = true;
---
> // FCFS - Bank is first available bank
> if (bits(earliest_banks, dram_pkt->bankId,
> dram_pkt->bankId)) {
> // Remember the packet to be scheduled to one of
> // the earliest banks available, FCFS amongst the
> // earliest banks
> selected_pkt_it = i;
> //if the packet found is going to a rank that is currently
> //not busy then update the found_packet to true
> found_earliest_pkt = true;
> }
829,831c846,852
< DRAMPacket* selected_pkt = *selected_pkt_it;
< queue.erase(selected_pkt_it);
< queue.push_front(selected_pkt);
---
> if (selected_pkt_it != queue.end()) {
> DRAMPacket* selected_pkt = *selected_pkt_it;
> queue.erase(selected_pkt_it);
> queue.push_front(selected_pkt);
> found_packet = true;
> }
> return found_packet;
867c888,889
< DRAMCtrl::activateBank(Bank& bank, Tick act_tick, uint32_t row)
---
> DRAMCtrl::activateBank(Rank& rank_ref, Bank& bank_ref,
> Tick act_tick, uint32_t row)
869,870c891
< // get the rank index from the bank
< uint8_t rank = bank.rank;
---
> assert(rank_ref.actTicks.size() == activationLimit);
872,873d892
< assert(actTicks[rank].size() == activationLimit);
<
877,878c896,897
< assert(bank.openRow == Bank::NO_ROW);
< bank.openRow = row;
---
> assert(bank_ref.openRow == Bank::NO_ROW);
> bank_ref.openRow = row;
883,884c902,903
< bank.bytesAccessed = 0;
< bank.rowAccesses = 0;
---
> bank_ref.bytesAccessed = 0;
> bank_ref.rowAccesses = 0;
886,887c905,906
< ++numBanksActive;
< assert(numBanksActive <= banksPerRank * ranksPerChannel);
---
> ++rank_ref.numBanksActive;
> assert(rank_ref.numBanksActive <= banksPerRank);
890c909,910
< bank.bank, bank.rank, act_tick, numBanksActive);
---
> bank_ref.bank, rank_ref.rank, act_tick,
> ranks[rank_ref.rank]->numBanksActive);
892,894c912,914
< rankPower[bank.rank].powerlib.doCommand(MemCommand::ACT, bank.bank,
< divCeil(act_tick, tCK) -
< timeStampOffset);
---
> rank_ref.power.powerlib.doCommand(MemCommand::ACT, bank_ref.bank,
> divCeil(act_tick, tCK) -
> timeStampOffset);
897c917
< timeStampOffset, bank.bank, bank.rank);
---
> timeStampOffset, bank_ref.bank, rank_ref.rank);
900c920
< bank.preAllowedAt = act_tick + tRAS;
---
> bank_ref.preAllowedAt = act_tick + tRAS;
903c923
< bank.colAllowedAt = std::max(act_tick + tRCD, bank.colAllowedAt);
---
> bank_ref.colAllowedAt = std::max(act_tick + tRCD, bank_ref.colAllowedAt);
909c929
< if (bankGroupArch && (bank.bankgr == banks[rank][i].bankgr)) {
---
> if (bankGroupArch && (bank_ref.bankgr == rank_ref.banks[i].bankgr)) {
913,914c933,934
< banks[rank][i].actAllowedAt = std::max(act_tick + tRRD_L,
< banks[rank][i].actAllowedAt);
---
> rank_ref.banks[i].actAllowedAt = std::max(act_tick + tRRD_L,
> rank_ref.banks[i].actAllowedAt);
919,920c939,940
< banks[rank][i].actAllowedAt = std::max(act_tick + tRRD,
< banks[rank][i].actAllowedAt);
---
> rank_ref.banks[i].actAllowedAt = std::max(act_tick + tRRD,
> rank_ref.banks[i].actAllowedAt);
926c946
< if (!actTicks[rank].empty()) {
---
> if (!rank_ref.actTicks.empty()) {
928,929c948,949
< if (actTicks[rank].back() &&
< (act_tick - actTicks[rank].back()) < tXAW) {
---
> if (rank_ref.actTicks.back() &&
> (act_tick - rank_ref.actTicks.back()) < tXAW) {
932,933c952,953
< actTicks[rank].back(), act_tick, actTicks[rank].back(),
< tXAW);
---
> rank_ref.actTicks.back(), act_tick,
> rank_ref.actTicks.back(), tXAW);
938c958
< actTicks[rank].pop_back();
---
> rank_ref.actTicks.pop_back();
941c961
< actTicks[rank].push_front(act_tick);
---
> rank_ref.actTicks.push_front(act_tick);
946,947c966,967
< if (actTicks[rank].back() &&
< (act_tick - actTicks[rank].back()) < tXAW) {
---
> if (rank_ref.actTicks.back() &&
> (act_tick - rank_ref.actTicks.back()) < tXAW) {
950c970
< actTicks[rank].back() + tXAW);
---
> rank_ref.actTicks.back() + tXAW);
953,955c973,975
< banks[rank][j].actAllowedAt =
< std::max(actTicks[rank].back() + tXAW,
< banks[rank][j].actAllowedAt);
---
> rank_ref.banks[j].actAllowedAt =
> std::max(rank_ref.actTicks.back() + tXAW,
> rank_ref.banks[j].actAllowedAt);
961,963c981,983
< if (!activateEvent.scheduled())
< schedule(activateEvent, act_tick);
< else if (activateEvent.when() > act_tick)
---
> if (!rank_ref.activateEvent.scheduled())
> schedule(rank_ref.activateEvent, act_tick);
> else if (rank_ref.activateEvent.when() > act_tick)
965c985
< reschedule(activateEvent, act_tick);
---
> reschedule(rank_ref.activateEvent, act_tick);
969c989
< DRAMCtrl::processActivateEvent()
---
> DRAMCtrl::prechargeBank(Rank& rank_ref, Bank& bank, Tick pre_at, bool trace)
971,980d990
< // we should transition to the active state as soon as any bank is active
< if (pwrState != PWR_ACT)
< // note that at this point numBanksActive could be back at
< // zero again due to a precharge scheduled in the future
< schedulePowerEvent(PWR_ACT, curTick());
< }
<
< void
< DRAMCtrl::prechargeBank(Bank& bank, Tick pre_at, bool trace)
< {
997,998c1007,1008
< assert(numBanksActive != 0);
< --numBanksActive;
---
> assert(rank_ref.numBanksActive != 0);
> --rank_ref.numBanksActive;
1001c1011,1012
< "%d active\n", bank.bank, bank.rank, pre_at, numBanksActive);
---
> "%d active\n", bank.bank, rank_ref.rank, pre_at,
> rank_ref.numBanksActive);
1005c1016
< rankPower[bank.rank].powerlib.doCommand(MemCommand::PRE, bank.bank,
---
> rank_ref.power.powerlib.doCommand(MemCommand::PRE, bank.bank,
1009c1020
< timeStampOffset, bank.bank, bank.rank);
---
> timeStampOffset, bank.bank, rank_ref.rank);
1017,1020c1028,1031
< if (!prechargeEvent.scheduled())
< schedule(prechargeEvent, pre_done_at);
< else if (prechargeEvent.when() < pre_done_at)
< reschedule(prechargeEvent, pre_done_at);
---
> if (!rank_ref.prechargeEvent.scheduled())
> schedule(rank_ref.prechargeEvent, pre_done_at);
> else if (rank_ref.prechargeEvent.when() < pre_done_at)
> reschedule(rank_ref.prechargeEvent, pre_done_at);
1024,1035d1034
< DRAMCtrl::processPrechargeEvent()
< {
< // if we reached zero, then special conditions apply as we track
< // if all banks are precharged for the power models
< if (numBanksActive == 0) {
< // we should transition to the idle state when the last bank
< // is precharged
< schedulePowerEvent(PWR_IDLE, curTick());
< }
< }
<
< void
1040a1040,1042
> // get the rank
> Rank& rank = dram_pkt->rankRef;
>
1058c1060
< prechargeBank(bank, std::max(bank.preAllowedAt, curTick()));
---
> prechargeBank(rank, bank, std::max(bank.preAllowedAt, curTick()));
1067c1069
< activateBank(bank, act_tick, dram_pkt->row);
---
> activateBank(rank, bank, act_tick, dram_pkt->row);
1092c1094,1095
< if (bankGroupArch && (bank.bankgr == banks[j][i].bankgr)) {
---
> if (bankGroupArch &&
> (bank.bankgr == ranks[j]->banks[i].bankgr)) {
1111,1112c1114,1115
< banks[j][i].colAllowedAt = std::max(cmd_at + cmd_dly,
< banks[j][i].colAllowedAt);
---
> ranks[j]->banks[i].colAllowedAt = std::max(cmd_at + cmd_dly,
> ranks[j]->banks[i].colAllowedAt);
1191c1194
< prechargeBank(bank, std::max(curTick(), bank.preAllowedAt));
---
> prechargeBank(rank, bank, std::max(curTick(), bank.preAllowedAt));
1202c1205
< rankPower[dram_pkt->rank].powerlib.doCommand(command, dram_pkt->bank,
---
> dram_pkt->rankRef.power.powerlib.doCommand(command, dram_pkt->bank,
1238a1242,1260
> int busyRanks = 0;
> for (auto r : ranks) {
> if (!r->isAvailable()) {
> // rank is busy refreshing
> busyRanks++;
>
> // let the rank know that if it was waiting to drain, it
> // is now done and ready to proceed
> r->checkDrainDone();
> }
> }
>
> if (busyRanks == ranksPerChannel) {
> // if all ranks are refreshing wait for them to finish
> // and stall this state machine without taking any further
> // action, and do not schedule a new nextReqEvent
> return;
> }
>
1265,1280d1286
< if (refreshState != REF_IDLE) {
< // if a refresh waiting for this event loop to finish, then hand
< // over now, and do not schedule a new nextReqEvent
< if (refreshState == REF_DRAIN) {
< DPRINTF(DRAM, "Refresh drain done, now precharging\n");
<
< refreshState = REF_PRE;
<
< // hand control back to the refresh event loop
< schedule(refreshEvent, curTick());
< }
<
< // let the refresh finish before issuing any further requests
< return;
< }
<
1307a1314,1316
> // bool to check if there is a read to a free rank
> bool found_read = false;
>
1310c1319
< chooseNext(readQueue, switched_cmd_type);
---
> found_read = chooseNext(readQueue, switched_cmd_type);
1312c1321,1327
< DRAMPacket* dram_pkt = readQueue.front();
---
> // if no read to an available rank is found then return
> // at this point. There could be writes to the available ranks
> // which are above the required threshold. However, to
> // avoid adding more complexity to the code, return and wait
> // for a refresh event to kick things into action again.
> if (!found_read)
> return;
1313a1329,1330
> DRAMPacket* dram_pkt = readQueue.front();
> assert(dram_pkt->rankRef.isAvailable());
1358c1375,1386
< chooseNext(writeQueue, switched_cmd_type);
---
> // bool to check if write to free rank is found
> bool found_write = false;
>
> found_write = chooseNext(writeQueue, switched_cmd_type);
>
> // if no writes to an available rank are found then return.
> // There could be reads to the available ranks. However, to avoid
> // adding more complexity to the code, return at this point and wait
> // for a refresh event to kick things into action again.
> if (!found_write)
> return;
>
1359a1388
> assert(dram_pkt->rankRef.isAvailable());
1392a1422,1425
> // It is possible that a refresh to another rank kicks things back into
> // action before reaching this point.
> if (!nextReqEvent.scheduled())
> schedule(nextReqEvent, std::max(nextReqTime, curTick()));
1394,1395d1426
< schedule(nextReqEvent, std::max(nextReqTime, curTick()));
<
1422,1423c1453,1455
< for (auto p = queue.begin(); p != queue.end(); ++p) {
< got_waiting[(*p)->bankId] = true;
---
> for (const auto& p : queue) {
> if(p->rankRef.isAvailable())
> got_waiting[p->bankId] = true;
1428c1460
< uint8_t bank_id = i * banksPerRank + j;
---
> uint16_t bank_id = i * banksPerRank + j;
1432a1465,1466
> // make sure this rank is not currently refreshing.
> assert(ranks[i]->isAvailable());
1436,1438c1470,1472
< Tick act_at = banks[i][j].openRow == Bank::NO_ROW ?
< banks[i][j].actAllowedAt :
< std::max(banks[i][j].preAllowedAt, curTick()) + tRP;
---
> Tick act_at = ranks[i]->banks[j].openRow == Bank::NO_ROW ?
> ranks[i]->banks[j].actAllowedAt :
> std::max(ranks[i]->banks[j].preAllowedAt, curTick()) + tRP;
1501a1536,1544
> DRAMCtrl::Rank::Rank(DRAMCtrl& _memory, const DRAMCtrlParams* _p)
> : EventManager(&_memory), memory(_memory),
> pwrStateTrans(PWR_IDLE), pwrState(PWR_IDLE), pwrStateTick(0),
> refreshState(REF_IDLE), refreshDueAt(0),
> power(_p, false), numBanksActive(0),
> activateEvent(*this), prechargeEvent(*this),
> refreshEvent(*this), powerEvent(*this)
> { }
>
1503c1546
< DRAMCtrl::processRefreshEvent()
---
> DRAMCtrl::Rank::startup(Tick ref_tick)
1504a1548,1596
> assert(ref_tick > curTick());
>
> pwrStateTick = curTick();
>
> // kick off the refresh, and give ourselves enough time to
> // precharge
> schedule(refreshEvent, ref_tick);
> }
>
> void
> DRAMCtrl::Rank::checkDrainDone()
> {
> // if this rank was waiting to drain it is now able to proceed to
> // precharge
> if (refreshState == REF_DRAIN) {
> DPRINTF(DRAM, "Refresh drain done, now precharging\n");
>
> refreshState = REF_PRE;
>
> // hand control back to the refresh event loop
> schedule(refreshEvent, curTick());
> }
> }
>
> void
> DRAMCtrl::Rank::processActivateEvent()
> {
> // we should transition to the active state as soon as any bank is active
> if (pwrState != PWR_ACT)
> // note that at this point numBanksActive could be back at
> // zero again due to a precharge scheduled in the future
> schedulePowerEvent(PWR_ACT, curTick());
> }
>
> void
> DRAMCtrl::Rank::processPrechargeEvent()
> {
> // if we reached zero, then special conditions apply as we track
> // if all banks are precharged for the power models
> if (numBanksActive == 0) {
> // we should transition to the idle state when the last bank
> // is precharged
> schedulePowerEvent(PWR_IDLE, curTick());
> }
> }
>
> void
> DRAMCtrl::Rank::processRefreshEvent()
> {
1516c1608,1609
< // let any scheduled read or write go ahead, after which it will
---
> // let any scheduled read or write to the same rank go ahead,
> // after which it will
1519c1612,1615
< if (nextReqEvent.scheduled()) {
---
> // if a request is at the moment being handled and this request is
> // accessing the current rank then wait for it to finish
> if ((rank == memory.activeRank)
> && (memory.nextReqEvent.scheduled())) {
1541,1547c1637,1642
< for (int i = 0; i < ranksPerChannel; i++) {
< for (int j = 0; j < banksPerRank; j++) {
< // respect both causality and any existing bank
< // constraints, some banks could already have a
< // (auto) precharge scheduled
< pre_at = std::max(banks[i][j].preAllowedAt, pre_at);
< }
---
>
> for (auto &b : banks) {
> // respect both causality and any existing bank
> // constraints, some banks could already have a
> // (auto) precharge scheduled
> pre_at = std::max(b.preAllowedAt, pre_at);
1550c1645
< // make sure all banks are precharged, and for those that
---
> // make sure all banks per rank are precharged, and for those that
1552c1647
< Tick act_allowed_at = pre_at + tRP;
---
> Tick act_allowed_at = pre_at + memory.tRP;
1554,1563c1649,1654
< for (int i = 0; i < ranksPerChannel; i++) {
< for (int j = 0; j < banksPerRank; j++) {
< if (banks[i][j].openRow != Bank::NO_ROW) {
< prechargeBank(banks[i][j], pre_at, false);
< } else {
< banks[i][j].actAllowedAt =
< std::max(banks[i][j].actAllowedAt, act_allowed_at);
< banks[i][j].preAllowedAt =
< std::max(banks[i][j].preAllowedAt, pre_at);
< }
---
> for (auto &b : banks) {
> if (b.openRow != Bank::NO_ROW) {
> memory.prechargeBank(*this, b, pre_at, false);
> } else {
> b.actAllowedAt = std::max(b.actAllowedAt, act_allowed_at);
> b.preAllowedAt = std::max(b.preAllowedAt, pre_at);
1564a1656
> }
1566,1569c1658,1661
< // at the moment this affects all ranks
< rankPower[i].powerlib.doCommand(MemCommand::PREA, 0,
< divCeil(pre_at, tCK) -
< timeStampOffset);
---
> // precharge all banks in rank
> power.powerlib.doCommand(MemCommand::PREA, 0,
> divCeil(pre_at, memory.tCK) -
> memory.timeStampOffset);
1571,1573c1663,1665
< DPRINTF(DRAMPower, "%llu,PREA,0,%d\n", divCeil(pre_at, tCK) -
< timeStampOffset, i);
< }
---
> DPRINTF(DRAMPower, "%llu,PREA,0,%d\n",
> divCeil(pre_at, memory.tCK) -
> memory.timeStampOffset, rank);
1598c1690
< Tick ref_done_at = curTick() + tRFC;
---
> Tick ref_done_at = curTick() + memory.tRFC;
1600,1603c1692,1694
< for (int i = 0; i < ranksPerChannel; i++) {
< for (int j = 0; j < banksPerRank; j++) {
< banks[i][j].actAllowedAt = ref_done_at;
< }
---
> for (auto &b : banks) {
> b.actAllowedAt = ref_done_at;
> }
1605,1608c1696,1699
< // at the moment this affects all ranks
< rankPower[i].powerlib.doCommand(MemCommand::REF, 0,
< divCeil(curTick(), tCK) -
< timeStampOffset);
---
> // at the moment this affects all ranks
> power.powerlib.doCommand(MemCommand::REF, 0,
> divCeil(curTick(), memory.tCK) -
> memory.timeStampOffset);
1610,1613c1701,1704
< // at the moment sort the list of commands and update the counters
< // for DRAMPower libray when doing a refresh
< sort(rankPower[i].powerlib.cmdList.begin(),
< rankPower[i].powerlib.cmdList.end(), DRAMCtrl::sortTime);
---
> // at the moment sort the list of commands and update the counters
> // for DRAMPower libray when doing a refresh
> sort(power.powerlib.cmdList.begin(),
> power.powerlib.cmdList.end(), DRAMCtrl::sortTime);
1615,1623c1706,1714
< // update the counters for DRAMPower, passing false to
< // indicate that this is not the last command in the
< // list. DRAMPower requires this information for the
< // correct calculation of the background energy at the end
< // of the simulation. Ideally we would want to call this
< // function with true once at the end of the
< // simulation. However, the discarded energy is extremly
< // small and does not effect the final results.
< rankPower[i].powerlib.updateCounters(false);
---
> // update the counters for DRAMPower, passing false to
> // indicate that this is not the last command in the
> // list. DRAMPower requires this information for the
> // correct calculation of the background energy at the end
> // of the simulation. Ideally we would want to call this
> // function with true once at the end of the
> // simulation. However, the discarded energy is extremly
> // small and does not effect the final results.
> power.powerlib.updateCounters(false);
1625,1626c1716,1717
< // call the energy function
< rankPower[i].powerlib.calcEnergy();
---
> // call the energy function
> power.powerlib.calcEnergy();
1628,1629c1719,1720
< // Update the stats
< updatePowerStats(i);
---
> // Update the stats
> updatePowerStats();
1631,1633c1722,1723
< DPRINTF(DRAMPower, "%llu,REF,0,%d\n", divCeil(curTick(), tCK) -
< timeStampOffset, i);
< }
---
> DPRINTF(DRAMPower, "%llu,REF,0,%d\n", divCeil(curTick(), memory.tCK) -
> memory.timeStampOffset, rank);
1637c1727
< if (refreshDueAt + tREFI < ref_done_at) {
---
> if (refreshDueAt + memory.tREFI < ref_done_at) {
1643c1733
< schedule(refreshEvent, refreshDueAt + tREFI - tRP);
---
> schedule(refreshEvent, refreshDueAt + memory.tREFI - memory.tRP);
1653c1743
< ref_done_at, refreshDueAt + tREFI);
---
> ref_done_at, refreshDueAt + memory.tREFI);
1658c1748
< DRAMCtrl::schedulePowerEvent(PowerState pwr_state, Tick tick)
---
> DRAMCtrl::Rank::schedulePowerEvent(PowerState pwr_state, Tick tick)
1679c1769
< DRAMCtrl::processPowerEvent()
---
> DRAMCtrl::Rank::processPowerEvent()
1701,1702c1791,1794
< assert(!nextReqEvent.scheduled());
< schedule(nextReqEvent, curTick());
---
> // a request event could be already scheduled by the state
> // machine of the other rank
> if (!memory.nextReqEvent.scheduled())
> schedule(memory.nextReqEvent, curTick());
1733c1825
< DRAMCtrl::updatePowerStats(uint8_t rank)
---
> DRAMCtrl::Rank::updatePowerStats()
1737,1739c1829,1831
< rankPower[rank].powerlib.getEnergy();
< Data::MemoryPowerModel::Power power =
< rankPower[rank].powerlib.getPower();
---
> power.powerlib.getEnergy();
> Data::MemoryPowerModel::Power rank_power =
> power.powerlib.getPower();
1741,1749c1833,1841
< actEnergy[rank] = energy.act_energy * devicesPerRank;
< preEnergy[rank] = energy.pre_energy * devicesPerRank;
< readEnergy[rank] = energy.read_energy * devicesPerRank;
< writeEnergy[rank] = energy.write_energy * devicesPerRank;
< refreshEnergy[rank] = energy.ref_energy * devicesPerRank;
< actBackEnergy[rank] = energy.act_stdby_energy * devicesPerRank;
< preBackEnergy[rank] = energy.pre_stdby_energy * devicesPerRank;
< totalEnergy[rank] = energy.total_energy * devicesPerRank;
< averagePower[rank] = power.average_power * devicesPerRank;
---
> actEnergy = energy.act_energy * memory.devicesPerRank;
> preEnergy = energy.pre_energy * memory.devicesPerRank;
> readEnergy = energy.read_energy * memory.devicesPerRank;
> writeEnergy = energy.write_energy * memory.devicesPerRank;
> refreshEnergy = energy.ref_energy * memory.devicesPerRank;
> actBackEnergy = energy.act_stdby_energy * memory.devicesPerRank;
> preBackEnergy = energy.pre_stdby_energy * memory.devicesPerRank;
> totalEnergy = energy.total_energy * memory.devicesPerRank;
> averagePower = rank_power.average_power * memory.devicesPerRank;
1752a1845,1895
> DRAMCtrl::Rank::regStats()
> {
> using namespace Stats;
>
> pwrStateTime
> .init(5)
> .name(name() + ".memoryStateTime")
> .desc("Time in different power states");
> pwrStateTime.subname(0, "IDLE");
> pwrStateTime.subname(1, "REF");
> pwrStateTime.subname(2, "PRE_PDN");
> pwrStateTime.subname(3, "ACT");
> pwrStateTime.subname(4, "ACT_PDN");
>
> actEnergy
> .name(name() + ".actEnergy")
> .desc("Energy for activate commands per rank (pJ)");
>
> preEnergy
> .name(name() + ".preEnergy")
> .desc("Energy for precharge commands per rank (pJ)");
>
> readEnergy
> .name(name() + ".readEnergy")
> .desc("Energy for read commands per rank (pJ)");
>
> writeEnergy
> .name(name() + ".writeEnergy")
> .desc("Energy for write commands per rank (pJ)");
>
> refreshEnergy
> .name(name() + ".refreshEnergy")
> .desc("Energy for refresh commands per rank (pJ)");
>
> actBackEnergy
> .name(name() + ".actBackEnergy")
> .desc("Energy for active background per rank (pJ)");
>
> preBackEnergy
> .name(name() + ".preBackEnergy")
> .desc("Energy for precharge background per rank (pJ)");
>
> totalEnergy
> .name(name() + ".totalEnergy")
> .desc("Total energy per rank (pJ)");
>
> averagePower
> .name(name() + ".averagePower")
> .desc("Core power per rank (mW)");
> }
> void
1758a1902,1905
> for (auto r : ranks) {
> r->regStats();
> }
>
1970d2116
<
2006,2060d2151
<
< pwrStateTime
< .init(5)
< .name(name() + ".memoryStateTime")
< .desc("Time in different power states");
< pwrStateTime.subname(0, "IDLE");
< pwrStateTime.subname(1, "REF");
< pwrStateTime.subname(2, "PRE_PDN");
< pwrStateTime.subname(3, "ACT");
< pwrStateTime.subname(4, "ACT_PDN");
<
< actEnergy
< .init(ranksPerChannel)
< .name(name() + ".actEnergy")
< .desc("Energy for activate commands per rank (pJ)");
<
< preEnergy
< .init(ranksPerChannel)
< .name(name() + ".preEnergy")
< .desc("Energy for precharge commands per rank (pJ)");
<
< readEnergy
< .init(ranksPerChannel)
< .name(name() + ".readEnergy")
< .desc("Energy for read commands per rank (pJ)");
<
< writeEnergy
< .init(ranksPerChannel)
< .name(name() + ".writeEnergy")
< .desc("Energy for write commands per rank (pJ)");
<
< refreshEnergy
< .init(ranksPerChannel)
< .name(name() + ".refreshEnergy")
< .desc("Energy for refresh commands per rank (pJ)");
<
< actBackEnergy
< .init(ranksPerChannel)
< .name(name() + ".actBackEnergy")
< .desc("Energy for active background per rank (pJ)");
<
< preBackEnergy
< .init(ranksPerChannel)
< .name(name() + ".preBackEnergy")
< .desc("Energy for precharge background per rank (pJ)");
<
< totalEnergy
< .init(ranksPerChannel)
< .name(name() + ".totalEnergy")
< .desc("Total energy per rank (pJ)");
<
< averagePower
< .init(ranksPerChannel)
< .name(name() + ".averagePower")
< .desc("Core power per rank (mW)");