dram_ctrl.cc (10617:471d390943f0) dram_ctrl.cc (10618:bb665366cc00)
1/*
2 * Copyright (c) 2010-2014 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software

--- 26 unchanged lines hidden (view full) ---

35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Andreas Hansson
41 * Ani Udipi
42 * Neha Agarwal
1/*
2 * Copyright (c) 2010-2014 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software

--- 26 unchanged lines hidden (view full) ---

35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Andreas Hansson
41 * Ani Udipi
42 * Neha Agarwal
43 * Omar Naji
43 */
44
45#include "base/bitfield.hh"
46#include "base/trace.hh"
47#include "debug/DRAM.hh"
48#include "debug/DRAMPower.hh"
49#include "debug/DRAMState.hh"
50#include "debug/Drain.hh"
51#include "mem/dram_ctrl.hh"
52#include "sim/system.hh"
53
54using namespace std;
55using namespace Data;
56
57DRAMCtrl::DRAMCtrl(const DRAMCtrlParams* p) :
58 AbstractMemory(p),
59 port(name() + ".port", *this),
60 retryRdReq(false), retryWrReq(false),
61 busState(READ),
44 */
45
46#include "base/bitfield.hh"
47#include "base/trace.hh"
48#include "debug/DRAM.hh"
49#include "debug/DRAMPower.hh"
50#include "debug/DRAMState.hh"
51#include "debug/Drain.hh"
52#include "mem/dram_ctrl.hh"
53#include "sim/system.hh"
54
55using namespace std;
56using namespace Data;
57
58DRAMCtrl::DRAMCtrl(const DRAMCtrlParams* p) :
59 AbstractMemory(p),
60 port(name() + ".port", *this),
61 retryRdReq(false), retryWrReq(false),
62 busState(READ),
62 nextReqEvent(this), respondEvent(this), activateEvent(this),
63 prechargeEvent(this), refreshEvent(this), powerEvent(this),
63 nextReqEvent(this), respondEvent(this),
64 drainManager(NULL),
65 deviceSize(p->device_size),
66 deviceBusWidth(p->device_bus_width), burstLength(p->burst_length),
67 deviceRowBufferSize(p->device_rowbuffer_size),
68 devicesPerRank(p->devices_per_rank),
69 burstSize((devicesPerRank * burstLength * deviceBusWidth) / 8),
70 rowBufferSize(devicesPerRank * deviceRowBufferSize),
71 columnsPerRowBuffer(rowBufferSize / burstSize),

--- 12 unchanged lines hidden (view full) ---

84 tCCD_L(p->tCCD_L), tRCD(p->tRCD), tCL(p->tCL), tRP(p->tRP), tRAS(p->tRAS),
85 tWR(p->tWR), tRTP(p->tRTP), tRFC(p->tRFC), tREFI(p->tREFI), tRRD(p->tRRD),
86 tRRD_L(p->tRRD_L), tXAW(p->tXAW), activationLimit(p->activation_limit),
87 memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping),
88 pageMgmt(p->page_policy),
89 maxAccessesPerRow(p->max_accesses_per_row),
90 frontendLatency(p->static_frontend_latency),
91 backendLatency(p->static_backend_latency),
64 drainManager(NULL),
65 deviceSize(p->device_size),
66 deviceBusWidth(p->device_bus_width), burstLength(p->burst_length),
67 deviceRowBufferSize(p->device_rowbuffer_size),
68 devicesPerRank(p->devices_per_rank),
69 burstSize((devicesPerRank * burstLength * deviceBusWidth) / 8),
70 rowBufferSize(devicesPerRank * deviceRowBufferSize),
71 columnsPerRowBuffer(rowBufferSize / burstSize),

--- 12 unchanged lines hidden (view full) ---

84 tCCD_L(p->tCCD_L), tRCD(p->tRCD), tCL(p->tCL), tRP(p->tRP), tRAS(p->tRAS),
85 tWR(p->tWR), tRTP(p->tRTP), tRFC(p->tRFC), tREFI(p->tREFI), tRRD(p->tRRD),
86 tRRD_L(p->tRRD_L), tXAW(p->tXAW), activationLimit(p->activation_limit),
87 memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping),
88 pageMgmt(p->page_policy),
89 maxAccessesPerRow(p->max_accesses_per_row),
90 frontendLatency(p->static_frontend_latency),
91 backendLatency(p->static_backend_latency),
92 busBusyUntil(0), refreshDueAt(0), refreshState(REF_IDLE),
93 pwrStateTrans(PWR_IDLE), pwrState(PWR_IDLE), prevArrival(0),
94 nextReqTime(0), pwrStateTick(0), numBanksActive(0),
95 activeRank(0), timeStampOffset(0)
92 busBusyUntil(0), prevArrival(0),
93 nextReqTime(0), activeRank(0), timeStampOffset(0)
96{
94{
97 // create the bank states based on the dimensions of the ranks and
98 // banks
99 banks.resize(ranksPerChannel);
100
101 //create list of drampower objects. For each rank 1 drampower instance.
102 for (int i = 0; i < ranksPerChannel; i++) {
95 for (int i = 0; i < ranksPerChannel; i++) {
103 DRAMPower drampower = DRAMPower(p, false);
104 rankPower.emplace_back(drampower);
105 }
96 Rank* rank = new Rank(*this, p);
97 ranks.push_back(rank);
106
98
107 actTicks.resize(ranksPerChannel);
108 for (size_t c = 0; c < ranksPerChannel; ++c) {
109 banks[c].resize(banksPerRank);
110 actTicks[c].resize(activationLimit, 0);
111 }
99 rank->actTicks.resize(activationLimit, 0);
100 rank->banks.resize(banksPerRank);
101 rank->rank = i;
112
102
113 // set the bank indices
114 for (int r = 0; r < ranksPerChannel; r++) {
115 for (int b = 0; b < banksPerRank; b++) {
103 for (int b = 0; b < banksPerRank; b++) {
116 banks[r][b].rank = r;
117 banks[r][b].bank = b;
104 rank->banks[b].bank = b;
118 // GDDR addressing of banks to BG is linear.
119 // Here we assume that all DRAM generations address bank groups as
120 // follows:
121 if (bankGroupArch) {
122 // Simply assign lower bits to bank group in order to
123 // rotate across bank groups as banks are incremented
124 // e.g. with 4 banks per bank group and 16 banks total:
125 // banks 0,4,8,12 are in bank group 0
126 // banks 1,5,9,13 are in bank group 1
127 // banks 2,6,10,14 are in bank group 2
128 // banks 3,7,11,15 are in bank group 3
105 // GDDR addressing of banks to BG is linear.
106 // Here we assume that all DRAM generations address bank groups as
107 // follows:
108 if (bankGroupArch) {
109 // Simply assign lower bits to bank group in order to
110 // rotate across bank groups as banks are incremented
111 // e.g. with 4 banks per bank group and 16 banks total:
112 // banks 0,4,8,12 are in bank group 0
113 // banks 1,5,9,13 are in bank group 1
114 // banks 2,6,10,14 are in bank group 2
115 // banks 3,7,11,15 are in bank group 3
129 banks[r][b].bankgr = b % bankGroupsPerRank;
116 rank->banks[b].bankgr = b % bankGroupsPerRank;
130 } else {
131 // No bank groups; simply assign to bank number
117 } else {
118 // No bank groups; simply assign to bank number
132 banks[r][b].bankgr = b;
119 rank->banks[b].bankgr = b;
133 }
134 }
135 }
136
137 // perform a basic check of the write thresholds
138 if (p->write_low_thresh_perc >= p->write_high_thresh_perc)
139 fatal("Write buffer low threshold %d must be smaller than the "
140 "high threshold %d\n", p->write_low_thresh_perc,

--- 108 unchanged lines hidden (view full) ---

249 }
250}
251
252void
253DRAMCtrl::startup()
254{
255 // timestamp offset should be in clock cycles for DRAMPower
256 timeStampOffset = divCeil(curTick(), tCK);
120 }
121 }
122 }
123
124 // perform a basic check of the write thresholds
125 if (p->write_low_thresh_perc >= p->write_high_thresh_perc)
126 fatal("Write buffer low threshold %d must be smaller than the "
127 "high threshold %d\n", p->write_low_thresh_perc,

--- 108 unchanged lines hidden (view full) ---

236 }
237}
238
239void
240DRAMCtrl::startup()
241{
242 // timestamp offset should be in clock cycles for DRAMPower
243 timeStampOffset = divCeil(curTick(), tCK);
244
257 // update the start tick for the precharge accounting to the
258 // current tick
245 // update the start tick for the precharge accounting to the
246 // current tick
259 pwrStateTick = curTick();
247 for (auto r : ranks) {
248 r->startup(curTick() + tREFI - tRP);
249 }
260
261 // shift the bus busy time sufficiently far ahead that we never
262 // have to worry about negative values when computing the time for
263 // the next request, this will add an insignificant bubble at the
264 // start of simulation
265 busBusyUntil = curTick() + tRP + tRCD + tCL;
250
251 // shift the bus busy time sufficiently far ahead that we never
252 // have to worry about negative values when computing the time for
253 // the next request, this will add an insignificant bubble at the
254 // start of simulation
255 busBusyUntil = curTick() + tRP + tRCD + tCL;
266
267 // kick off the refresh, and give ourselves enough time to
268 // precharge
269 schedule(refreshEvent, curTick() + tREFI - tRP);
270}
271
272Tick
273DRAMCtrl::recvAtomic(PacketPtr pkt)
274{
275 DPRINTF(DRAM, "recvAtomic: %s 0x%x\n", pkt->cmdString(), pkt->getAddr());
276
277 // do the actual memory access and turn the packet into a response

--- 128 unchanged lines hidden (view full) ---

406 DPRINTF(DRAM, "Address: %lld Rank %d Bank %d Row %d\n",
407 dramPktAddr, rank, bank, row);
408
409 // create the corresponding DRAM packet with the entry time and
410 // ready time set to the current tick, the latter will be updated
411 // later
412 uint16_t bank_id = banksPerRank * rank + bank;
413 return new DRAMPacket(pkt, isRead, rank, bank, row, bank_id, dramPktAddr,
256}
257
258Tick
259DRAMCtrl::recvAtomic(PacketPtr pkt)
260{
261 DPRINTF(DRAM, "recvAtomic: %s 0x%x\n", pkt->cmdString(), pkt->getAddr());
262
263 // do the actual memory access and turn the packet into a response

--- 128 unchanged lines hidden (view full) ---

392 DPRINTF(DRAM, "Address: %lld Rank %d Bank %d Row %d\n",
393 dramPktAddr, rank, bank, row);
394
395 // create the corresponding DRAM packet with the entry time and
396 // ready time set to the current tick, the latter will be updated
397 // later
398 uint16_t bank_id = banksPerRank * rank + bank;
399 return new DRAMPacket(pkt, isRead, rank, bank, row, bank_id, dramPktAddr,
414 size, banks[rank][bank]);
400 size, ranks[rank]->banks[bank], *ranks[rank]);
415}
416
417void
418DRAMCtrl::addToReadQueue(PacketPtr pkt, unsigned int pktCount)
419{
420 // only add to the read queue here. whenever the request is
421 // eventually done, set the readyTime, and call schedule()
422 assert(!pkt->isWrite());

--- 327 unchanged lines hidden (view full) ---

750 // We have made a location in the queue available at this point,
751 // so if there is a read that was forced to wait, retry now
752 if (retryRdReq) {
753 retryRdReq = false;
754 port.sendRetry();
755 }
756}
757
401}
402
403void
404DRAMCtrl::addToReadQueue(PacketPtr pkt, unsigned int pktCount)
405{
406 // only add to the read queue here. whenever the request is
407 // eventually done, set the readyTime, and call schedule()
408 assert(!pkt->isWrite());

--- 327 unchanged lines hidden (view full) ---

736 // We have made a location in the queue available at this point,
737 // so if there is a read that was forced to wait, retry now
738 if (retryRdReq) {
739 retryRdReq = false;
740 port.sendRetry();
741 }
742}
743
758void
744bool
759DRAMCtrl::chooseNext(std::deque<DRAMPacket*>& queue, bool switched_cmd_type)
760{
761 // This method does the arbitration between requests. The chosen
762 // packet is simply moved to the head of the queue. The other
763 // methods know that this is the place to look. For example, with
764 // FCFS, this method does nothing
765 assert(!queue.empty());
766
745DRAMCtrl::chooseNext(std::deque<DRAMPacket*>& queue, bool switched_cmd_type)
746{
747 // This method does the arbitration between requests. The chosen
748 // packet is simply moved to the head of the queue. The other
749 // methods know that this is the place to look. For example, with
750 // FCFS, this method does nothing
751 assert(!queue.empty());
752
753 // bool to indicate if a packet to an available rank is found
754 bool found_packet = false;
767 if (queue.size() == 1) {
755 if (queue.size() == 1) {
768 DPRINTF(DRAM, "Single request, nothing to do\n");
769 return;
756 DRAMPacket* dram_pkt = queue.front();
757 // available rank corresponds to state refresh idle
758 if (ranks[dram_pkt->rank]->isAvailable()) {
759 found_packet = true;
760 DPRINTF(DRAM, "Single request, going to a free rank\n");
761 } else {
762 DPRINTF(DRAM, "Single request, going to a busy rank\n");
763 }
764 return found_packet;
770 }
771
772 if (memSchedPolicy == Enums::fcfs) {
765 }
766
767 if (memSchedPolicy == Enums::fcfs) {
773 // Do nothing, since the correct request is already head
768 // check if there is a packet going to a free rank
769 for(auto i = queue.begin(); i != queue.end() ; ++i) {
770 DRAMPacket* dram_pkt = *i;
771 if (ranks[dram_pkt->rank]->isAvailable()) {
772 queue.erase(i);
773 queue.push_front(dram_pkt);
774 found_packet = true;
775 break;
776 }
777 }
774 } else if (memSchedPolicy == Enums::frfcfs) {
778 } else if (memSchedPolicy == Enums::frfcfs) {
775 reorderQueue(queue, switched_cmd_type);
779 found_packet = reorderQueue(queue, switched_cmd_type);
776 } else
777 panic("No scheduling policy chosen\n");
780 } else
781 panic("No scheduling policy chosen\n");
782 return found_packet;
778}
779
783}
784
780void
785bool
781DRAMCtrl::reorderQueue(std::deque<DRAMPacket*>& queue, bool switched_cmd_type)
782{
783 // Only determine this when needed
784 uint64_t earliest_banks = 0;
785
786 // Search for row hits first, if no row hit is found then schedule the
787 // packet to one of the earliest banks available
786DRAMCtrl::reorderQueue(std::deque<DRAMPacket*>& queue, bool switched_cmd_type)
787{
788 // Only determine this when needed
789 uint64_t earliest_banks = 0;
790
791 // Search for row hits first, if no row hit is found then schedule the
792 // packet to one of the earliest banks available
793 bool found_packet = false;
788 bool found_earliest_pkt = false;
789 bool found_prepped_diff_rank_pkt = false;
794 bool found_earliest_pkt = false;
795 bool found_prepped_diff_rank_pkt = false;
790 auto selected_pkt_it = queue.begin();
796 auto selected_pkt_it = queue.end();
791
792 for (auto i = queue.begin(); i != queue.end() ; ++i) {
793 DRAMPacket* dram_pkt = *i;
794 const Bank& bank = dram_pkt->bankRef;
797
798 for (auto i = queue.begin(); i != queue.end() ; ++i) {
799 DRAMPacket* dram_pkt = *i;
800 const Bank& bank = dram_pkt->bankRef;
801 // check if rank is busy. If this is the case jump to the next packet
795 // Check if it is a row hit
802 // Check if it is a row hit
796 if (bank.openRow == dram_pkt->row) {
797 if (dram_pkt->rank == activeRank || switched_cmd_type) {
798 // FCFS within the hits, giving priority to commands
799 // that access the same rank as the previous burst
800 // to minimize bus turnaround delays
801 // Only give rank prioity when command type is not changing
802 DPRINTF(DRAM, "Row buffer hit\n");
803 selected_pkt_it = i;
804 break;
805 } else if (!found_prepped_diff_rank_pkt) {
806 // found row hit for command on different rank than prev burst
807 selected_pkt_it = i;
808 found_prepped_diff_rank_pkt = true;
809 }
810 } else if (!found_earliest_pkt & !found_prepped_diff_rank_pkt) {
811 // No row hit and
812 // haven't found an entry with a row hit to a new rank
813 if (earliest_banks == 0)
814 // Determine entries with earliest bank prep delay
815 // Function will give priority to commands that access the
816 // same rank as previous burst and can prep the bank seamlessly
817 earliest_banks = minBankPrep(queue, switched_cmd_type);
803 if (dram_pkt->rankRef.isAvailable()) {
804 if (bank.openRow == dram_pkt->row) {
805 if (dram_pkt->rank == activeRank || switched_cmd_type) {
806 // FCFS within the hits, giving priority to commands
807 // that access the same rank as the previous burst
808 // to minimize bus turnaround delays
809 // Only give rank prioity when command type is
810 // not changing
811 DPRINTF(DRAM, "Row buffer hit\n");
812 selected_pkt_it = i;
813 break;
814 } else if (!found_prepped_diff_rank_pkt) {
815 // found row hit for command on different rank
816 // than prev burst
817 selected_pkt_it = i;
818 found_prepped_diff_rank_pkt = true;
819 }
820 } else if (!found_earliest_pkt & !found_prepped_diff_rank_pkt) {
821 // packet going to a rank which is currently not waiting for a
822 // refresh, No row hit and
823 // haven't found an entry with a row hit to a new rank
824 if (earliest_banks == 0)
825 // Determine entries with earliest bank prep delay
826 // Function will give priority to commands that access the
827 // same rank as previous burst and can prep
828 // the bank seamlessly
829 earliest_banks = minBankPrep(queue, switched_cmd_type);
818
830
819 // FCFS - Bank is first available bank
820 if (bits(earliest_banks, dram_pkt->bankId, dram_pkt->bankId)) {
821 // Remember the packet to be scheduled to one of the earliest
822 // banks available, FCFS amongst the earliest banks
823 selected_pkt_it = i;
824 found_earliest_pkt = true;
831 // FCFS - Bank is first available bank
832 if (bits(earliest_banks, dram_pkt->bankId,
833 dram_pkt->bankId)) {
834 // Remember the packet to be scheduled to one of
835 // the earliest banks available, FCFS amongst the
836 // earliest banks
837 selected_pkt_it = i;
838 //if the packet found is going to a rank that is currently
839 //not busy then update the found_packet to true
840 found_earliest_pkt = true;
841 }
825 }
826 }
827 }
828
842 }
843 }
844 }
845
829 DRAMPacket* selected_pkt = *selected_pkt_it;
830 queue.erase(selected_pkt_it);
831 queue.push_front(selected_pkt);
846 if (selected_pkt_it != queue.end()) {
847 DRAMPacket* selected_pkt = *selected_pkt_it;
848 queue.erase(selected_pkt_it);
849 queue.push_front(selected_pkt);
850 found_packet = true;
851 }
852 return found_packet;
832}
833
834void
835DRAMCtrl::accessAndRespond(PacketPtr pkt, Tick static_latency)
836{
837 DPRINTF(DRAM, "Responding to Address %lld.. ",pkt->getAddr());
838
839 bool needsResponse = pkt->needsResponse();

--- 19 unchanged lines hidden (view full) ---

859 }
860
861 DPRINTF(DRAM, "Done\n");
862
863 return;
864}
865
866void
853}
854
855void
856DRAMCtrl::accessAndRespond(PacketPtr pkt, Tick static_latency)
857{
858 DPRINTF(DRAM, "Responding to Address %lld.. ",pkt->getAddr());
859
860 bool needsResponse = pkt->needsResponse();

--- 19 unchanged lines hidden (view full) ---

880 }
881
882 DPRINTF(DRAM, "Done\n");
883
884 return;
885}
886
887void
867DRAMCtrl::activateBank(Bank& bank, Tick act_tick, uint32_t row)
888DRAMCtrl::activateBank(Rank& rank_ref, Bank& bank_ref,
889 Tick act_tick, uint32_t row)
868{
890{
869 // get the rank index from the bank
870 uint8_t rank = bank.rank;
891 assert(rank_ref.actTicks.size() == activationLimit);
871
892
872 assert(actTicks[rank].size() == activationLimit);
873
874 DPRINTF(DRAM, "Activate at tick %d\n", act_tick);
875
876 // update the open row
893 DPRINTF(DRAM, "Activate at tick %d\n", act_tick);
894
895 // update the open row
877 assert(bank.openRow == Bank::NO_ROW);
878 bank.openRow = row;
896 assert(bank_ref.openRow == Bank::NO_ROW);
897 bank_ref.openRow = row;
879
880 // start counting anew, this covers both the case when we
881 // auto-precharged, and when this access is forced to
882 // precharge
898
899 // start counting anew, this covers both the case when we
900 // auto-precharged, and when this access is forced to
901 // precharge
883 bank.bytesAccessed = 0;
884 bank.rowAccesses = 0;
902 bank_ref.bytesAccessed = 0;
903 bank_ref.rowAccesses = 0;
885
904
886 ++numBanksActive;
887 assert(numBanksActive <= banksPerRank * ranksPerChannel);
905 ++rank_ref.numBanksActive;
906 assert(rank_ref.numBanksActive <= banksPerRank);
888
889 DPRINTF(DRAM, "Activate bank %d, rank %d at tick %lld, now got %d active\n",
907
908 DPRINTF(DRAM, "Activate bank %d, rank %d at tick %lld, now got %d active\n",
890 bank.bank, bank.rank, act_tick, numBanksActive);
909 bank_ref.bank, rank_ref.rank, act_tick,
910 ranks[rank_ref.rank]->numBanksActive);
891
911
892 rankPower[bank.rank].powerlib.doCommand(MemCommand::ACT, bank.bank,
893 divCeil(act_tick, tCK) -
894 timeStampOffset);
912 rank_ref.power.powerlib.doCommand(MemCommand::ACT, bank_ref.bank,
913 divCeil(act_tick, tCK) -
914 timeStampOffset);
895
896 DPRINTF(DRAMPower, "%llu,ACT,%d,%d\n", divCeil(act_tick, tCK) -
915
916 DPRINTF(DRAMPower, "%llu,ACT,%d,%d\n", divCeil(act_tick, tCK) -
897 timeStampOffset, bank.bank, bank.rank);
917 timeStampOffset, bank_ref.bank, rank_ref.rank);
898
899 // The next access has to respect tRAS for this bank
918
919 // The next access has to respect tRAS for this bank
900 bank.preAllowedAt = act_tick + tRAS;
920 bank_ref.preAllowedAt = act_tick + tRAS;
901
902 // Respect the row-to-column command delay
921
922 // Respect the row-to-column command delay
903 bank.colAllowedAt = std::max(act_tick + tRCD, bank.colAllowedAt);
923 bank_ref.colAllowedAt = std::max(act_tick + tRCD, bank_ref.colAllowedAt);
904
905 // start by enforcing tRRD
906 for(int i = 0; i < banksPerRank; i++) {
907 // next activate to any bank in this rank must not happen
908 // before tRRD
924
925 // start by enforcing tRRD
926 for(int i = 0; i < banksPerRank; i++) {
927 // next activate to any bank in this rank must not happen
928 // before tRRD
909 if (bankGroupArch && (bank.bankgr == banks[rank][i].bankgr)) {
929 if (bankGroupArch && (bank_ref.bankgr == rank_ref.banks[i].bankgr)) {
910 // bank group architecture requires longer delays between
911 // ACT commands within the same bank group. Use tRRD_L
912 // in this case
930 // bank group architecture requires longer delays between
931 // ACT commands within the same bank group. Use tRRD_L
932 // in this case
913 banks[rank][i].actAllowedAt = std::max(act_tick + tRRD_L,
914 banks[rank][i].actAllowedAt);
933 rank_ref.banks[i].actAllowedAt = std::max(act_tick + tRRD_L,
934 rank_ref.banks[i].actAllowedAt);
915 } else {
916 // use shorter tRRD value when either
917 // 1) bank group architecture is not supportted
918 // 2) bank is in a different bank group
935 } else {
936 // use shorter tRRD value when either
937 // 1) bank group architecture is not supportted
938 // 2) bank is in a different bank group
919 banks[rank][i].actAllowedAt = std::max(act_tick + tRRD,
920 banks[rank][i].actAllowedAt);
939 rank_ref.banks[i].actAllowedAt = std::max(act_tick + tRRD,
940 rank_ref.banks[i].actAllowedAt);
921 }
922 }
923
924 // next, we deal with tXAW, if the activation limit is disabled
925 // then we directly schedule an activate power event
941 }
942 }
943
944 // next, we deal with tXAW, if the activation limit is disabled
945 // then we directly schedule an activate power event
926 if (!actTicks[rank].empty()) {
946 if (!rank_ref.actTicks.empty()) {
927 // sanity check
947 // sanity check
928 if (actTicks[rank].back() &&
929 (act_tick - actTicks[rank].back()) < tXAW) {
948 if (rank_ref.actTicks.back() &&
949 (act_tick - rank_ref.actTicks.back()) < tXAW) {
930 panic("Got %d activates in window %d (%llu - %llu) which "
931 "is smaller than %llu\n", activationLimit, act_tick -
950 panic("Got %d activates in window %d (%llu - %llu) which "
951 "is smaller than %llu\n", activationLimit, act_tick -
932 actTicks[rank].back(), act_tick, actTicks[rank].back(),
933 tXAW);
952 rank_ref.actTicks.back(), act_tick,
953 rank_ref.actTicks.back(), tXAW);
934 }
935
936 // shift the times used for the book keeping, the last element
937 // (highest index) is the oldest one and hence the lowest value
954 }
955
956 // shift the times used for the book keeping, the last element
957 // (highest index) is the oldest one and hence the lowest value
938 actTicks[rank].pop_back();
958 rank_ref.actTicks.pop_back();
939
940 // record an new activation (in the future)
959
960 // record an new activation (in the future)
941 actTicks[rank].push_front(act_tick);
961 rank_ref.actTicks.push_front(act_tick);
942
943 // cannot activate more than X times in time window tXAW, push the
944 // next one (the X + 1'st activate) to be tXAW away from the
945 // oldest in our window of X
962
963 // cannot activate more than X times in time window tXAW, push the
964 // next one (the X + 1'st activate) to be tXAW away from the
965 // oldest in our window of X
946 if (actTicks[rank].back() &&
947 (act_tick - actTicks[rank].back()) < tXAW) {
966 if (rank_ref.actTicks.back() &&
967 (act_tick - rank_ref.actTicks.back()) < tXAW) {
948 DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate "
949 "no earlier than %llu\n", activationLimit,
968 DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate "
969 "no earlier than %llu\n", activationLimit,
950 actTicks[rank].back() + tXAW);
970 rank_ref.actTicks.back() + tXAW);
951 for(int j = 0; j < banksPerRank; j++)
952 // next activate must not happen before end of window
971 for(int j = 0; j < banksPerRank; j++)
972 // next activate must not happen before end of window
953 banks[rank][j].actAllowedAt =
954 std::max(actTicks[rank].back() + tXAW,
955 banks[rank][j].actAllowedAt);
973 rank_ref.banks[j].actAllowedAt =
974 std::max(rank_ref.actTicks.back() + tXAW,
975 rank_ref.banks[j].actAllowedAt);
956 }
957 }
958
959 // at the point when this activate takes place, make sure we
960 // transition to the active power state
976 }
977 }
978
979 // at the point when this activate takes place, make sure we
980 // transition to the active power state
961 if (!activateEvent.scheduled())
962 schedule(activateEvent, act_tick);
963 else if (activateEvent.when() > act_tick)
981 if (!rank_ref.activateEvent.scheduled())
982 schedule(rank_ref.activateEvent, act_tick);
983 else if (rank_ref.activateEvent.when() > act_tick)
964 // move it sooner in time
984 // move it sooner in time
965 reschedule(activateEvent, act_tick);
985 reschedule(rank_ref.activateEvent, act_tick);
966}
967
968void
986}
987
988void
969DRAMCtrl::processActivateEvent()
989DRAMCtrl::prechargeBank(Rank& rank_ref, Bank& bank, Tick pre_at, bool trace)
970{
990{
971 // we should transition to the active state as soon as any bank is active
972 if (pwrState != PWR_ACT)
973 // note that at this point numBanksActive could be back at
974 // zero again due to a precharge scheduled in the future
975 schedulePowerEvent(PWR_ACT, curTick());
976}
977
978void
979DRAMCtrl::prechargeBank(Bank& bank, Tick pre_at, bool trace)
980{
981 // make sure the bank has an open row
982 assert(bank.openRow != Bank::NO_ROW);
983
984 // sample the bytes per activate here since we are closing
985 // the page
986 bytesPerActivate.sample(bank.bytesAccessed);
987
988 bank.openRow = Bank::NO_ROW;
989
990 // no precharge allowed before this one
991 bank.preAllowedAt = pre_at;
992
993 Tick pre_done_at = pre_at + tRP;
994
995 bank.actAllowedAt = std::max(bank.actAllowedAt, pre_done_at);
996
991 // make sure the bank has an open row
992 assert(bank.openRow != Bank::NO_ROW);
993
994 // sample the bytes per activate here since we are closing
995 // the page
996 bytesPerActivate.sample(bank.bytesAccessed);
997
998 bank.openRow = Bank::NO_ROW;
999
1000 // no precharge allowed before this one
1001 bank.preAllowedAt = pre_at;
1002
1003 Tick pre_done_at = pre_at + tRP;
1004
1005 bank.actAllowedAt = std::max(bank.actAllowedAt, pre_done_at);
1006
997 assert(numBanksActive != 0);
998 --numBanksActive;
1007 assert(rank_ref.numBanksActive != 0);
1008 --rank_ref.numBanksActive;
999
1000 DPRINTF(DRAM, "Precharging bank %d, rank %d at tick %lld, now got "
1009
1010 DPRINTF(DRAM, "Precharging bank %d, rank %d at tick %lld, now got "
1001 "%d active\n", bank.bank, bank.rank, pre_at, numBanksActive);
1011 "%d active\n", bank.bank, rank_ref.rank, pre_at,
1012 rank_ref.numBanksActive);
1002
1003 if (trace) {
1004
1013
1014 if (trace) {
1015
1005 rankPower[bank.rank].powerlib.doCommand(MemCommand::PRE, bank.bank,
1016 rank_ref.power.powerlib.doCommand(MemCommand::PRE, bank.bank,
1006 divCeil(pre_at, tCK) -
1007 timeStampOffset);
1008 DPRINTF(DRAMPower, "%llu,PRE,%d,%d\n", divCeil(pre_at, tCK) -
1017 divCeil(pre_at, tCK) -
1018 timeStampOffset);
1019 DPRINTF(DRAMPower, "%llu,PRE,%d,%d\n", divCeil(pre_at, tCK) -
1009 timeStampOffset, bank.bank, bank.rank);
1020 timeStampOffset, bank.bank, rank_ref.rank);
1010 }
1011 // if we look at the current number of active banks we might be
1012 // tempted to think the DRAM is now idle, however this can be
1013 // undone by an activate that is scheduled to happen before we
1014 // would have reached the idle state, so schedule an event and
1015 // rather check once we actually make it to the point in time when
1016 // the (last) precharge takes place
1021 }
1022 // if we look at the current number of active banks we might be
1023 // tempted to think the DRAM is now idle, however this can be
1024 // undone by an activate that is scheduled to happen before we
1025 // would have reached the idle state, so schedule an event and
1026 // rather check once we actually make it to the point in time when
1027 // the (last) precharge takes place
1017 if (!prechargeEvent.scheduled())
1018 schedule(prechargeEvent, pre_done_at);
1019 else if (prechargeEvent.when() < pre_done_at)
1020 reschedule(prechargeEvent, pre_done_at);
1028 if (!rank_ref.prechargeEvent.scheduled())
1029 schedule(rank_ref.prechargeEvent, pre_done_at);
1030 else if (rank_ref.prechargeEvent.when() < pre_done_at)
1031 reschedule(rank_ref.prechargeEvent, pre_done_at);
1021}
1022
1023void
1032}
1033
1034void
1024DRAMCtrl::processPrechargeEvent()
1025{
1026 // if we reached zero, then special conditions apply as we track
1027 // if all banks are precharged for the power models
1028 if (numBanksActive == 0) {
1029 // we should transition to the idle state when the last bank
1030 // is precharged
1031 schedulePowerEvent(PWR_IDLE, curTick());
1032 }
1033}
1034
1035void
1036DRAMCtrl::doDRAMAccess(DRAMPacket* dram_pkt)
1037{
1038 DPRINTF(DRAM, "Timing access to addr %lld, rank/bank/row %d %d %d\n",
1039 dram_pkt->addr, dram_pkt->rank, dram_pkt->bank, dram_pkt->row);
1040
1035DRAMCtrl::doDRAMAccess(DRAMPacket* dram_pkt)
1036{
1037 DPRINTF(DRAM, "Timing access to addr %lld, rank/bank/row %d %d %d\n",
1038 dram_pkt->addr, dram_pkt->rank, dram_pkt->bank, dram_pkt->row);
1039
1040 // get the rank
1041 Rank& rank = dram_pkt->rankRef;
1042
1041 // get the bank
1042 Bank& bank = dram_pkt->bankRef;
1043
1044 // for the state we need to track if it is a row hit or not
1045 bool row_hit = true;
1046
1047 // respect any constraints on the command (e.g. tRCD or tCCD)
1048 Tick cmd_at = std::max(bank.colAllowedAt, curTick());
1049
1050 // Determine the access latency and update the bank state
1051 if (bank.openRow == dram_pkt->row) {
1052 // nothing to do
1053 } else {
1054 row_hit = false;
1055
1056 // If there is a page open, precharge it.
1057 if (bank.openRow != Bank::NO_ROW) {
1043 // get the bank
1044 Bank& bank = dram_pkt->bankRef;
1045
1046 // for the state we need to track if it is a row hit or not
1047 bool row_hit = true;
1048
1049 // respect any constraints on the command (e.g. tRCD or tCCD)
1050 Tick cmd_at = std::max(bank.colAllowedAt, curTick());
1051
1052 // Determine the access latency and update the bank state
1053 if (bank.openRow == dram_pkt->row) {
1054 // nothing to do
1055 } else {
1056 row_hit = false;
1057
1058 // If there is a page open, precharge it.
1059 if (bank.openRow != Bank::NO_ROW) {
1058 prechargeBank(bank, std::max(bank.preAllowedAt, curTick()));
1060 prechargeBank(rank, bank, std::max(bank.preAllowedAt, curTick()));
1059 }
1060
1061 // next we need to account for the delay in activating the
1062 // page
1063 Tick act_tick = std::max(bank.actAllowedAt, curTick());
1064
1065 // Record the activation and deal with all the global timing
1066 // constraints caused be a new activation (tRRD and tXAW)
1061 }
1062
1063 // next we need to account for the delay in activating the
1064 // page
1065 Tick act_tick = std::max(bank.actAllowedAt, curTick());
1066
1067 // Record the activation and deal with all the global timing
1068 // constraints caused be a new activation (tRRD and tXAW)
1067 activateBank(bank, act_tick, dram_pkt->row);
1069 activateBank(rank, bank, act_tick, dram_pkt->row);
1068
1069 // issue the command as early as possible
1070 cmd_at = bank.colAllowedAt;
1071 }
1072
1073 // we need to wait until the bus is available before we can issue
1074 // the command
1075 cmd_at = std::max(cmd_at, busBusyUntil - tCL);

--- 8 unchanged lines hidden (view full) ---

1084 // bank (add a max with tCCD/tCCD_L here)
1085 Tick cmd_dly;
1086 for(int j = 0; j < ranksPerChannel; j++) {
1087 for(int i = 0; i < banksPerRank; i++) {
1088 // next burst to same bank group in this rank must not happen
1089 // before tCCD_L. Different bank group timing requirement is
1090 // tBURST; Add tCS for different ranks
1091 if (dram_pkt->rank == j) {
1070
1071 // issue the command as early as possible
1072 cmd_at = bank.colAllowedAt;
1073 }
1074
1075 // we need to wait until the bus is available before we can issue
1076 // the command
1077 cmd_at = std::max(cmd_at, busBusyUntil - tCL);

--- 8 unchanged lines hidden (view full) ---

1086 // bank (add a max with tCCD/tCCD_L here)
1087 Tick cmd_dly;
1088 for(int j = 0; j < ranksPerChannel; j++) {
1089 for(int i = 0; i < banksPerRank; i++) {
1090 // next burst to same bank group in this rank must not happen
1091 // before tCCD_L. Different bank group timing requirement is
1092 // tBURST; Add tCS for different ranks
1093 if (dram_pkt->rank == j) {
1092 if (bankGroupArch && (bank.bankgr == banks[j][i].bankgr)) {
1094 if (bankGroupArch &&
1095 (bank.bankgr == ranks[j]->banks[i].bankgr)) {
1093 // bank group architecture requires longer delays between
1094 // RD/WR burst commands to the same bank group.
1095 // Use tCCD_L in this case
1096 cmd_dly = tCCD_L;
1097 } else {
1098 // use tBURST (equivalent to tCCD_S), the shorter
1099 // cas-to-cas delay value, when either:
1100 // 1) bank group architecture is not supportted
1101 // 2) bank is in a different bank group
1102 cmd_dly = tBURST;
1103 }
1104 } else {
1105 // different rank is by default in a different bank group
1106 // use tBURST (equivalent to tCCD_S), which is the shorter
1107 // cas-to-cas delay in this case
1108 // Add tCS to account for rank-to-rank bus delay requirements
1109 cmd_dly = tBURST + tCS;
1110 }
1096 // bank group architecture requires longer delays between
1097 // RD/WR burst commands to the same bank group.
1098 // Use tCCD_L in this case
1099 cmd_dly = tCCD_L;
1100 } else {
1101 // use tBURST (equivalent to tCCD_S), the shorter
1102 // cas-to-cas delay value, when either:
1103 // 1) bank group architecture is not supportted
1104 // 2) bank is in a different bank group
1105 cmd_dly = tBURST;
1106 }
1107 } else {
1108 // different rank is by default in a different bank group
1109 // use tBURST (equivalent to tCCD_S), which is the shorter
1110 // cas-to-cas delay in this case
1111 // Add tCS to account for rank-to-rank bus delay requirements
1112 cmd_dly = tBURST + tCS;
1113 }
1111 banks[j][i].colAllowedAt = std::max(cmd_at + cmd_dly,
1112 banks[j][i].colAllowedAt);
1114 ranks[j]->banks[i].colAllowedAt = std::max(cmd_at + cmd_dly,
1115 ranks[j]->banks[i].colAllowedAt);
1113 }
1114 }
1115
1116 // Save rank of current access
1117 activeRank = dram_pkt->rank;
1118
1119 // If this is a write, we also need to respect the write recovery
1120 // time before a precharge, in the case of a read, respect the

--- 62 unchanged lines hidden (view full) ---

1183 MemCommand::cmds command = (mem_cmd == "RD") ? MemCommand::RD :
1184 MemCommand::WR;
1185
1186 // if this access should use auto-precharge, then we are
1187 // closing the row
1188 if (auto_precharge) {
1189 // if auto-precharge push a PRE command at the correct tick to the
1190 // list used by DRAMPower library to calculate power
1116 }
1117 }
1118
1119 // Save rank of current access
1120 activeRank = dram_pkt->rank;
1121
1122 // If this is a write, we also need to respect the write recovery
1123 // time before a precharge, in the case of a read, respect the

--- 62 unchanged lines hidden (view full) ---

1186 MemCommand::cmds command = (mem_cmd == "RD") ? MemCommand::RD :
1187 MemCommand::WR;
1188
1189 // if this access should use auto-precharge, then we are
1190 // closing the row
1191 if (auto_precharge) {
1192 // if auto-precharge push a PRE command at the correct tick to the
1193 // list used by DRAMPower library to calculate power
1191 prechargeBank(bank, std::max(curTick(), bank.preAllowedAt));
1194 prechargeBank(rank, bank, std::max(curTick(), bank.preAllowedAt));
1192
1193 DPRINTF(DRAM, "Auto-precharged bank: %d\n", dram_pkt->bankId);
1194 }
1195
1196 // Update bus state
1197 busBusyUntil = dram_pkt->readyTime;
1198
1199 DPRINTF(DRAM, "Access to %lld, ready at %lld bus busy until %lld.\n",
1200 dram_pkt->addr, dram_pkt->readyTime, busBusyUntil);
1201
1195
1196 DPRINTF(DRAM, "Auto-precharged bank: %d\n", dram_pkt->bankId);
1197 }
1198
1199 // Update bus state
1200 busBusyUntil = dram_pkt->readyTime;
1201
1202 DPRINTF(DRAM, "Access to %lld, ready at %lld bus busy until %lld.\n",
1203 dram_pkt->addr, dram_pkt->readyTime, busBusyUntil);
1204
1202 rankPower[dram_pkt->rank].powerlib.doCommand(command, dram_pkt->bank,
1205 dram_pkt->rankRef.power.powerlib.doCommand(command, dram_pkt->bank,
1203 divCeil(cmd_at, tCK) -
1204 timeStampOffset);
1205
1206 DPRINTF(DRAMPower, "%llu,%s,%d,%d\n", divCeil(cmd_at, tCK) -
1207 timeStampOffset, mem_cmd, dram_pkt->bank, dram_pkt->rank);
1208
1209 // Update the minimum timing between the requests, this is a
1210 // conservative estimate of when we have to schedule the next

--- 20 unchanged lines hidden (view full) ---

1231 bytesWritten += burstSize;
1232 perBankWrBursts[dram_pkt->bankId]++;
1233 }
1234}
1235
1236void
1237DRAMCtrl::processNextReqEvent()
1238{
1206 divCeil(cmd_at, tCK) -
1207 timeStampOffset);
1208
1209 DPRINTF(DRAMPower, "%llu,%s,%d,%d\n", divCeil(cmd_at, tCK) -
1210 timeStampOffset, mem_cmd, dram_pkt->bank, dram_pkt->rank);
1211
1212 // Update the minimum timing between the requests, this is a
1213 // conservative estimate of when we have to schedule the next

--- 20 unchanged lines hidden (view full) ---

1234 bytesWritten += burstSize;
1235 perBankWrBursts[dram_pkt->bankId]++;
1236 }
1237}
1238
1239void
1240DRAMCtrl::processNextReqEvent()
1241{
1242 int busyRanks = 0;
1243 for (auto r : ranks) {
1244 if (!r->isAvailable()) {
1245 // rank is busy refreshing
1246 busyRanks++;
1247
1248 // let the rank know that if it was waiting to drain, it
1249 // is now done and ready to proceed
1250 r->checkDrainDone();
1251 }
1252 }
1253
1254 if (busyRanks == ranksPerChannel) {
1255 // if all ranks are refreshing wait for them to finish
1256 // and stall this state machine without taking any further
1257 // action, and do not schedule a new nextReqEvent
1258 return;
1259 }
1260
1239 // pre-emptively set to false. Overwrite if in READ_TO_WRITE
1240 // or WRITE_TO_READ state
1241 bool switched_cmd_type = false;
1242 if (busState == READ_TO_WRITE) {
1243 DPRINTF(DRAM, "Switching to writes after %d reads with %d reads "
1244 "waiting\n", readsThisTime, readQueue.size());
1245
1246 // sample and reset the read-related stats as we are now

--- 10 unchanged lines hidden (view full) ---

1257
1258 wrPerTurnAround.sample(writesThisTime);
1259 writesThisTime = 0;
1260
1261 busState = READ;
1262 switched_cmd_type = true;
1263 }
1264
1261 // pre-emptively set to false. Overwrite if in READ_TO_WRITE
1262 // or WRITE_TO_READ state
1263 bool switched_cmd_type = false;
1264 if (busState == READ_TO_WRITE) {
1265 DPRINTF(DRAM, "Switching to writes after %d reads with %d reads "
1266 "waiting\n", readsThisTime, readQueue.size());
1267
1268 // sample and reset the read-related stats as we are now

--- 10 unchanged lines hidden (view full) ---

1279
1280 wrPerTurnAround.sample(writesThisTime);
1281 writesThisTime = 0;
1282
1283 busState = READ;
1284 switched_cmd_type = true;
1285 }
1286
1265 if (refreshState != REF_IDLE) {
1266 // if a refresh waiting for this event loop to finish, then hand
1267 // over now, and do not schedule a new nextReqEvent
1268 if (refreshState == REF_DRAIN) {
1269 DPRINTF(DRAM, "Refresh drain done, now precharging\n");
1270
1271 refreshState = REF_PRE;
1272
1273 // hand control back to the refresh event loop
1274 schedule(refreshEvent, curTick());
1275 }
1276
1277 // let the refresh finish before issuing any further requests
1278 return;
1279 }
1280
1281 // when we get here it is either a read or a write
1282 if (busState == READ) {
1283
1284 // track if we should switch or not
1285 bool switch_to_writes = false;
1286
1287 if (readQueue.empty()) {
1288 // In the case there is no read request to go next,

--- 11 unchanged lines hidden (view full) ---

1300 drainManager = NULL;
1301 }
1302
1303 // nothing to do, not even any point in scheduling an
1304 // event for the next request
1305 return;
1306 }
1307 } else {
1287 // when we get here it is either a read or a write
1288 if (busState == READ) {
1289
1290 // track if we should switch or not
1291 bool switch_to_writes = false;
1292
1293 if (readQueue.empty()) {
1294 // In the case there is no read request to go next,

--- 11 unchanged lines hidden (view full) ---

1306 drainManager = NULL;
1307 }
1308
1309 // nothing to do, not even any point in scheduling an
1310 // event for the next request
1311 return;
1312 }
1313 } else {
1314 // bool to check if there is a read to a free rank
1315 bool found_read = false;
1316
1308 // Figure out which read request goes next, and move it to the
1309 // front of the read queue
1317 // Figure out which read request goes next, and move it to the
1318 // front of the read queue
1310 chooseNext(readQueue, switched_cmd_type);
1319 found_read = chooseNext(readQueue, switched_cmd_type);
1311
1320
1312 DRAMPacket* dram_pkt = readQueue.front();
1321 // if no read to an available rank is found then return
1322 // at this point. There could be writes to the available ranks
1323 // which are above the required threshold. However, to
1324 // avoid adding more complexity to the code, return and wait
1325 // for a refresh event to kick things into action again.
1326 if (!found_read)
1327 return;
1313
1328
1329 DRAMPacket* dram_pkt = readQueue.front();
1330 assert(dram_pkt->rankRef.isAvailable());
1314 // here we get a bit creative and shift the bus busy time not
1315 // just the tWTR, but also a CAS latency to capture the fact
1316 // that we are allowed to prepare a new bank, but not issue a
1317 // read command until after tWTR, in essence we capture a
1318 // bubble on the data bus that is tWTR + tCL
1319 if (switched_cmd_type && dram_pkt->rank == activeRank) {
1320 busBusyUntil += tWTR + tCL;
1321 }

--- 28 unchanged lines hidden (view full) ---

1350 // switching to writes, either because the read queue is empty
1351 // and the writes have passed the low threshold (or we are
1352 // draining), or because the writes hit the hight threshold
1353 if (switch_to_writes) {
1354 // transition to writing
1355 busState = READ_TO_WRITE;
1356 }
1357 } else {
1331 // here we get a bit creative and shift the bus busy time not
1332 // just the tWTR, but also a CAS latency to capture the fact
1333 // that we are allowed to prepare a new bank, but not issue a
1334 // read command until after tWTR, in essence we capture a
1335 // bubble on the data bus that is tWTR + tCL
1336 if (switched_cmd_type && dram_pkt->rank == activeRank) {
1337 busBusyUntil += tWTR + tCL;
1338 }

--- 28 unchanged lines hidden (view full) ---

1367 // switching to writes, either because the read queue is empty
1368 // and the writes have passed the low threshold (or we are
1369 // draining), or because the writes hit the hight threshold
1370 if (switch_to_writes) {
1371 // transition to writing
1372 busState = READ_TO_WRITE;
1373 }
1374 } else {
1358 chooseNext(writeQueue, switched_cmd_type);
1375 // bool to check if write to free rank is found
1376 bool found_write = false;
1377
1378 found_write = chooseNext(writeQueue, switched_cmd_type);
1379
1380 // if no writes to an available rank are found then return.
1381 // There could be reads to the available ranks. However, to avoid
1382 // adding more complexity to the code, return at this point and wait
1383 // for a refresh event to kick things into action again.
1384 if (!found_write)
1385 return;
1386
1359 DRAMPacket* dram_pkt = writeQueue.front();
1387 DRAMPacket* dram_pkt = writeQueue.front();
1388 assert(dram_pkt->rankRef.isAvailable());
1360 // sanity check
1361 assert(dram_pkt->size <= burstSize);
1362
1363 // add a bubble to the data bus, as defined by the
1364 // tRTW when access is to the same rank as previous burst
1365 // Different rank timing is handled with tCS, which is
1366 // applied to colAllowedAt
1367 if (switched_cmd_type && dram_pkt->rank == activeRank) {

--- 17 unchanged lines hidden (view full) ---

1385 busState = WRITE_TO_READ;
1386
1387 // note that the we switch back to reads also in the idle
1388 // case, which eventually will check for any draining and
1389 // also pause any further scheduling if there is really
1390 // nothing to do
1391 }
1392 }
1389 // sanity check
1390 assert(dram_pkt->size <= burstSize);
1391
1392 // add a bubble to the data bus, as defined by the
1393 // tRTW when access is to the same rank as previous burst
1394 // Different rank timing is handled with tCS, which is
1395 // applied to colAllowedAt
1396 if (switched_cmd_type && dram_pkt->rank == activeRank) {

--- 17 unchanged lines hidden (view full) ---

1414 busState = WRITE_TO_READ;
1415
1416 // note that the we switch back to reads also in the idle
1417 // case, which eventually will check for any draining and
1418 // also pause any further scheduling if there is really
1419 // nothing to do
1420 }
1421 }
1422 // It is possible that a refresh to another rank kicks things back into
1423 // action before reaching this point.
1424 if (!nextReqEvent.scheduled())
1425 schedule(nextReqEvent, std::max(nextReqTime, curTick()));
1393
1426
1394 schedule(nextReqEvent, std::max(nextReqTime, curTick()));
1395
1396 // If there is space available and we have writes waiting then let
1397 // them retry. This is done here to ensure that the retry does not
1398 // cause a nextReqEvent to be scheduled before we do so as part of
1399 // the next request processing
1400 if (retryWrReq && writeQueue.size() < writeBufferSize) {
1401 retryWrReq = false;
1402 port.sendRetry();
1403 }

--- 10 unchanged lines hidden (view full) ---

1414 Tick min_act_at_same_rank = MaxTick;
1415
1416 // Give precedence to commands that access same rank as previous command
1417 bool same_rank_match = false;
1418
1419 // determine if we have queued transactions targetting the
1420 // bank in question
1421 vector<bool> got_waiting(ranksPerChannel * banksPerRank, false);
1427 // If there is space available and we have writes waiting then let
1428 // them retry. This is done here to ensure that the retry does not
1429 // cause a nextReqEvent to be scheduled before we do so as part of
1430 // the next request processing
1431 if (retryWrReq && writeQueue.size() < writeBufferSize) {
1432 retryWrReq = false;
1433 port.sendRetry();
1434 }

--- 10 unchanged lines hidden (view full) ---

1445 Tick min_act_at_same_rank = MaxTick;
1446
1447 // Give precedence to commands that access same rank as previous command
1448 bool same_rank_match = false;
1449
1450 // determine if we have queued transactions targetting the
1451 // bank in question
1452 vector<bool> got_waiting(ranksPerChannel * banksPerRank, false);
1422 for (auto p = queue.begin(); p != queue.end(); ++p) {
1423 got_waiting[(*p)->bankId] = true;
1453 for (const auto& p : queue) {
1454 if(p->rankRef.isAvailable())
1455 got_waiting[p->bankId] = true;
1424 }
1425
1426 for (int i = 0; i < ranksPerChannel; i++) {
1427 for (int j = 0; j < banksPerRank; j++) {
1456 }
1457
1458 for (int i = 0; i < ranksPerChannel; i++) {
1459 for (int j = 0; j < banksPerRank; j++) {
1428 uint8_t bank_id = i * banksPerRank + j;
1460 uint16_t bank_id = i * banksPerRank + j;
1429
1430 // if we have waiting requests for the bank, and it is
1431 // amongst the first available, update the mask
1432 if (got_waiting[bank_id]) {
1461
1462 // if we have waiting requests for the bank, and it is
1463 // amongst the first available, update the mask
1464 if (got_waiting[bank_id]) {
1465 // make sure this rank is not currently refreshing.
1466 assert(ranks[i]->isAvailable());
1433 // simplistic approximation of when the bank can issue
1434 // an activate, ignoring any rank-to-rank switching
1435 // cost in this calculation
1467 // simplistic approximation of when the bank can issue
1468 // an activate, ignoring any rank-to-rank switching
1469 // cost in this calculation
1436 Tick act_at = banks[i][j].openRow == Bank::NO_ROW ?
1437 banks[i][j].actAllowedAt :
1438 std::max(banks[i][j].preAllowedAt, curTick()) + tRP;
1470 Tick act_at = ranks[i]->banks[j].openRow == Bank::NO_ROW ?
1471 ranks[i]->banks[j].actAllowedAt :
1472 std::max(ranks[i]->banks[j].preAllowedAt, curTick()) + tRP;
1439
1440 // prioritize commands that access the
1441 // same rank as previous burst
1442 // Calculate bank mask separately for the case and
1443 // evaluate after loop iterations complete
1444 if (i == activeRank && ranksPerChannel > 1) {
1445 if (act_at <= min_act_at_same_rank) {
1446 // reset same rank bank mask if new minimum is found

--- 47 unchanged lines hidden (view full) ---

1494 if ((bank_mask == 0) || (!switched_cmd_type && same_rank_match &&
1495 min_act_at_same_rank <= min_cmd_at)) {
1496 bank_mask = bank_mask_same_rank;
1497 }
1498
1499 return bank_mask;
1500}
1501
1473
1474 // prioritize commands that access the
1475 // same rank as previous burst
1476 // Calculate bank mask separately for the case and
1477 // evaluate after loop iterations complete
1478 if (i == activeRank && ranksPerChannel > 1) {
1479 if (act_at <= min_act_at_same_rank) {
1480 // reset same rank bank mask if new minimum is found

--- 47 unchanged lines hidden (view full) ---

1528 if ((bank_mask == 0) || (!switched_cmd_type && same_rank_match &&
1529 min_act_at_same_rank <= min_cmd_at)) {
1530 bank_mask = bank_mask_same_rank;
1531 }
1532
1533 return bank_mask;
1534}
1535
1536DRAMCtrl::Rank::Rank(DRAMCtrl& _memory, const DRAMCtrlParams* _p)
1537 : EventManager(&_memory), memory(_memory),
1538 pwrStateTrans(PWR_IDLE), pwrState(PWR_IDLE), pwrStateTick(0),
1539 refreshState(REF_IDLE), refreshDueAt(0),
1540 power(_p, false), numBanksActive(0),
1541 activateEvent(*this), prechargeEvent(*this),
1542 refreshEvent(*this), powerEvent(*this)
1543{ }
1544
1502void
1545void
1503DRAMCtrl::processRefreshEvent()
1546DRAMCtrl::Rank::startup(Tick ref_tick)
1504{
1547{
1548 assert(ref_tick > curTick());
1549
1550 pwrStateTick = curTick();
1551
1552 // kick off the refresh, and give ourselves enough time to
1553 // precharge
1554 schedule(refreshEvent, ref_tick);
1555}
1556
1557void
1558DRAMCtrl::Rank::checkDrainDone()
1559{
1560 // if this rank was waiting to drain it is now able to proceed to
1561 // precharge
1562 if (refreshState == REF_DRAIN) {
1563 DPRINTF(DRAM, "Refresh drain done, now precharging\n");
1564
1565 refreshState = REF_PRE;
1566
1567 // hand control back to the refresh event loop
1568 schedule(refreshEvent, curTick());
1569 }
1570}
1571
1572void
1573DRAMCtrl::Rank::processActivateEvent()
1574{
1575 // we should transition to the active state as soon as any bank is active
1576 if (pwrState != PWR_ACT)
1577 // note that at this point numBanksActive could be back at
1578 // zero again due to a precharge scheduled in the future
1579 schedulePowerEvent(PWR_ACT, curTick());
1580}
1581
1582void
1583DRAMCtrl::Rank::processPrechargeEvent()
1584{
1585 // if we reached zero, then special conditions apply as we track
1586 // if all banks are precharged for the power models
1587 if (numBanksActive == 0) {
1588 // we should transition to the idle state when the last bank
1589 // is precharged
1590 schedulePowerEvent(PWR_IDLE, curTick());
1591 }
1592}
1593
1594void
1595DRAMCtrl::Rank::processRefreshEvent()
1596{
1505 // when first preparing the refresh, remember when it was due
1506 if (refreshState == REF_IDLE) {
1507 // remember when the refresh is due
1508 refreshDueAt = curTick();
1509
1510 // proceed to drain
1511 refreshState = REF_DRAIN;
1512
1513 DPRINTF(DRAM, "Refresh due\n");
1514 }
1515
1597 // when first preparing the refresh, remember when it was due
1598 if (refreshState == REF_IDLE) {
1599 // remember when the refresh is due
1600 refreshDueAt = curTick();
1601
1602 // proceed to drain
1603 refreshState = REF_DRAIN;
1604
1605 DPRINTF(DRAM, "Refresh due\n");
1606 }
1607
1516 // let any scheduled read or write go ahead, after which it will
1608 // let any scheduled read or write to the same rank go ahead,
1609 // after which it will
1517 // hand control back to this event loop
1518 if (refreshState == REF_DRAIN) {
1610 // hand control back to this event loop
1611 if (refreshState == REF_DRAIN) {
1519 if (nextReqEvent.scheduled()) {
1612 // if a request is at the moment being handled and this request is
1613 // accessing the current rank then wait for it to finish
1614 if ((rank == memory.activeRank)
1615 && (memory.nextReqEvent.scheduled())) {
1520 // hand control over to the request loop until it is
1521 // evaluated next
1522 DPRINTF(DRAM, "Refresh awaiting draining\n");
1523
1524 return;
1525 } else {
1526 refreshState = REF_PRE;
1527 }

--- 5 unchanged lines hidden (view full) ---

1533 // state
1534 if (pwrState != PWR_IDLE) {
1535 // at the moment, we use a precharge all even if there is
1536 // only a single bank open
1537 DPRINTF(DRAM, "Precharging all\n");
1538
1539 // first determine when we can precharge
1540 Tick pre_at = curTick();
1616 // hand control over to the request loop until it is
1617 // evaluated next
1618 DPRINTF(DRAM, "Refresh awaiting draining\n");
1619
1620 return;
1621 } else {
1622 refreshState = REF_PRE;
1623 }

--- 5 unchanged lines hidden (view full) ---

1629 // state
1630 if (pwrState != PWR_IDLE) {
1631 // at the moment, we use a precharge all even if there is
1632 // only a single bank open
1633 DPRINTF(DRAM, "Precharging all\n");
1634
1635 // first determine when we can precharge
1636 Tick pre_at = curTick();
1541 for (int i = 0; i < ranksPerChannel; i++) {
1542 for (int j = 0; j < banksPerRank; j++) {
1543 // respect both causality and any existing bank
1544 // constraints, some banks could already have a
1545 // (auto) precharge scheduled
1546 pre_at = std::max(banks[i][j].preAllowedAt, pre_at);
1547 }
1637
1638 for (auto &b : banks) {
1639 // respect both causality and any existing bank
1640 // constraints, some banks could already have a
1641 // (auto) precharge scheduled
1642 pre_at = std::max(b.preAllowedAt, pre_at);
1548 }
1549
1643 }
1644
1550 // make sure all banks are precharged, and for those that
1645 // make sure all banks per rank are precharged, and for those that
1551 // already are, update their availability
1646 // already are, update their availability
1552 Tick act_allowed_at = pre_at + tRP;
1647 Tick act_allowed_at = pre_at + memory.tRP;
1553
1648
1554 for (int i = 0; i < ranksPerChannel; i++) {
1555 for (int j = 0; j < banksPerRank; j++) {
1556 if (banks[i][j].openRow != Bank::NO_ROW) {
1557 prechargeBank(banks[i][j], pre_at, false);
1558 } else {
1559 banks[i][j].actAllowedAt =
1560 std::max(banks[i][j].actAllowedAt, act_allowed_at);
1561 banks[i][j].preAllowedAt =
1562 std::max(banks[i][j].preAllowedAt, pre_at);
1563 }
1649 for (auto &b : banks) {
1650 if (b.openRow != Bank::NO_ROW) {
1651 memory.prechargeBank(*this, b, pre_at, false);
1652 } else {
1653 b.actAllowedAt = std::max(b.actAllowedAt, act_allowed_at);
1654 b.preAllowedAt = std::max(b.preAllowedAt, pre_at);
1564 }
1655 }
1656 }
1565
1657
1566 // at the moment this affects all ranks
1567 rankPower[i].powerlib.doCommand(MemCommand::PREA, 0,
1568 divCeil(pre_at, tCK) -
1569 timeStampOffset);
1658 // precharge all banks in rank
1659 power.powerlib.doCommand(MemCommand::PREA, 0,
1660 divCeil(pre_at, memory.tCK) -
1661 memory.timeStampOffset);
1570
1662
1571 DPRINTF(DRAMPower, "%llu,PREA,0,%d\n", divCeil(pre_at, tCK) -
1572 timeStampOffset, i);
1573 }
1663 DPRINTF(DRAMPower, "%llu,PREA,0,%d\n",
1664 divCeil(pre_at, memory.tCK) -
1665 memory.timeStampOffset, rank);
1574 } else {
1575 DPRINTF(DRAM, "All banks already precharged, starting refresh\n");
1576
1577 // go ahead and kick the power state machine into gear if
1578 // we are already idle
1579 schedulePowerEvent(PWR_REF, curTick());
1580 }
1581

--- 8 unchanged lines hidden (view full) ---

1590 }
1591
1592 // last but not least we perform the actual refresh
1593 if (refreshState == REF_RUN) {
1594 // should never get here with any banks active
1595 assert(numBanksActive == 0);
1596 assert(pwrState == PWR_REF);
1597
1666 } else {
1667 DPRINTF(DRAM, "All banks already precharged, starting refresh\n");
1668
1669 // go ahead and kick the power state machine into gear if
1670 // we are already idle
1671 schedulePowerEvent(PWR_REF, curTick());
1672 }
1673

--- 8 unchanged lines hidden (view full) ---

1682 }
1683
1684 // last but not least we perform the actual refresh
1685 if (refreshState == REF_RUN) {
1686 // should never get here with any banks active
1687 assert(numBanksActive == 0);
1688 assert(pwrState == PWR_REF);
1689
1598 Tick ref_done_at = curTick() + tRFC;
1690 Tick ref_done_at = curTick() + memory.tRFC;
1599
1691
1600 for (int i = 0; i < ranksPerChannel; i++) {
1601 for (int j = 0; j < banksPerRank; j++) {
1602 banks[i][j].actAllowedAt = ref_done_at;
1603 }
1692 for (auto &b : banks) {
1693 b.actAllowedAt = ref_done_at;
1694 }
1604
1695
1605 // at the moment this affects all ranks
1606 rankPower[i].powerlib.doCommand(MemCommand::REF, 0,
1607 divCeil(curTick(), tCK) -
1608 timeStampOffset);
1696 // at the moment this affects all ranks
1697 power.powerlib.doCommand(MemCommand::REF, 0,
1698 divCeil(curTick(), memory.tCK) -
1699 memory.timeStampOffset);
1609
1700
1610 // at the moment sort the list of commands and update the counters
1611 // for DRAMPower libray when doing a refresh
1612 sort(rankPower[i].powerlib.cmdList.begin(),
1613 rankPower[i].powerlib.cmdList.end(), DRAMCtrl::sortTime);
1701 // at the moment sort the list of commands and update the counters
1702 // for DRAMPower libray when doing a refresh
1703 sort(power.powerlib.cmdList.begin(),
1704 power.powerlib.cmdList.end(), DRAMCtrl::sortTime);
1614
1705
1615 // update the counters for DRAMPower, passing false to
1616 // indicate that this is not the last command in the
1617 // list. DRAMPower requires this information for the
1618 // correct calculation of the background energy at the end
1619 // of the simulation. Ideally we would want to call this
1620 // function with true once at the end of the
1621 // simulation. However, the discarded energy is extremly
1622 // small and does not effect the final results.
1623 rankPower[i].powerlib.updateCounters(false);
1706 // update the counters for DRAMPower, passing false to
1707 // indicate that this is not the last command in the
1708 // list. DRAMPower requires this information for the
1709 // correct calculation of the background energy at the end
1710 // of the simulation. Ideally we would want to call this
1711 // function with true once at the end of the
1712 // simulation. However, the discarded energy is extremly
1713 // small and does not effect the final results.
1714 power.powerlib.updateCounters(false);
1624
1715
1625 // call the energy function
1626 rankPower[i].powerlib.calcEnergy();
1716 // call the energy function
1717 power.powerlib.calcEnergy();
1627
1718
1628 // Update the stats
1629 updatePowerStats(i);
1719 // Update the stats
1720 updatePowerStats();
1630
1721
1631 DPRINTF(DRAMPower, "%llu,REF,0,%d\n", divCeil(curTick(), tCK) -
1632 timeStampOffset, i);
1633 }
1722 DPRINTF(DRAMPower, "%llu,REF,0,%d\n", divCeil(curTick(), memory.tCK) -
1723 memory.timeStampOffset, rank);
1634
1635 // make sure we did not wait so long that we cannot make up
1636 // for it
1724
1725 // make sure we did not wait so long that we cannot make up
1726 // for it
1637 if (refreshDueAt + tREFI < ref_done_at) {
1727 if (refreshDueAt + memory.tREFI < ref_done_at) {
1638 fatal("Refresh was delayed so long we cannot catch up\n");
1639 }
1640
1641 // compensate for the delay in actually performing the refresh
1642 // when scheduling the next one
1728 fatal("Refresh was delayed so long we cannot catch up\n");
1729 }
1730
1731 // compensate for the delay in actually performing the refresh
1732 // when scheduling the next one
1643 schedule(refreshEvent, refreshDueAt + tREFI - tRP);
1733 schedule(refreshEvent, refreshDueAt + memory.tREFI - memory.tRP);
1644
1645 assert(!powerEvent.scheduled());
1646
1647 // move to the idle power state once the refresh is done, this
1648 // will also move the refresh state machine to the refresh
1649 // idle state
1650 schedulePowerEvent(PWR_IDLE, ref_done_at);
1651
1652 DPRINTF(DRAMState, "Refresh done at %llu and next refresh at %llu\n",
1734
1735 assert(!powerEvent.scheduled());
1736
1737 // move to the idle power state once the refresh is done, this
1738 // will also move the refresh state machine to the refresh
1739 // idle state
1740 schedulePowerEvent(PWR_IDLE, ref_done_at);
1741
1742 DPRINTF(DRAMState, "Refresh done at %llu and next refresh at %llu\n",
1653 ref_done_at, refreshDueAt + tREFI);
1743 ref_done_at, refreshDueAt + memory.tREFI);
1654 }
1655}
1656
1657void
1744 }
1745}
1746
1747void
1658DRAMCtrl::schedulePowerEvent(PowerState pwr_state, Tick tick)
1748DRAMCtrl::Rank::schedulePowerEvent(PowerState pwr_state, Tick tick)
1659{
1660 // respect causality
1661 assert(tick >= curTick());
1662
1663 if (!powerEvent.scheduled()) {
1664 DPRINTF(DRAMState, "Scheduling power event at %llu to state %d\n",
1665 tick, pwr_state);
1666

--- 4 unchanged lines hidden (view full) ---

1671 } else {
1672 panic("Scheduled power event at %llu to state %d, "
1673 "with scheduled event at %llu to %d\n", tick, pwr_state,
1674 powerEvent.when(), pwrStateTrans);
1675 }
1676}
1677
1678void
1749{
1750 // respect causality
1751 assert(tick >= curTick());
1752
1753 if (!powerEvent.scheduled()) {
1754 DPRINTF(DRAMState, "Scheduling power event at %llu to state %d\n",
1755 tick, pwr_state);
1756

--- 4 unchanged lines hidden (view full) ---

1761 } else {
1762 panic("Scheduled power event at %llu to state %d, "
1763 "with scheduled event at %llu to %d\n", tick, pwr_state,
1764 powerEvent.when(), pwrStateTrans);
1765 }
1766}
1767
1768void
1679DRAMCtrl::processPowerEvent()
1769DRAMCtrl::Rank::processPowerEvent()
1680{
1681 // remember where we were, and for how long
1682 Tick duration = curTick() - pwrStateTick;
1683 PowerState prev_state = pwrState;
1684
1685 // update the accounting
1686 pwrStateTime[prev_state] += duration;
1687

--- 5 unchanged lines hidden (view full) ---

1693
1694 // if we were refreshing, make sure we start scheduling requests again
1695 if (prev_state == PWR_REF) {
1696 DPRINTF(DRAMState, "Was refreshing for %llu ticks\n", duration);
1697 assert(pwrState == PWR_IDLE);
1698
1699 // kick things into action again
1700 refreshState = REF_IDLE;
1770{
1771 // remember where we were, and for how long
1772 Tick duration = curTick() - pwrStateTick;
1773 PowerState prev_state = pwrState;
1774
1775 // update the accounting
1776 pwrStateTime[prev_state] += duration;
1777

--- 5 unchanged lines hidden (view full) ---

1783
1784 // if we were refreshing, make sure we start scheduling requests again
1785 if (prev_state == PWR_REF) {
1786 DPRINTF(DRAMState, "Was refreshing for %llu ticks\n", duration);
1787 assert(pwrState == PWR_IDLE);
1788
1789 // kick things into action again
1790 refreshState = REF_IDLE;
1701 assert(!nextReqEvent.scheduled());
1702 schedule(nextReqEvent, curTick());
1791 // a request event could be already scheduled by the state
1792 // machine of the other rank
1793 if (!memory.nextReqEvent.scheduled())
1794 schedule(memory.nextReqEvent, curTick());
1703 } else {
1704 assert(prev_state == PWR_ACT);
1705
1706 // if we have a pending refresh, and are now moving to
1707 // the idle state, direclty transition to a refresh
1708 if (refreshState == REF_RUN) {
1709 // there should be nothing waiting at this point
1710 assert(!powerEvent.scheduled());

--- 14 unchanged lines hidden (view full) ---

1725 // in turn will schedule a transition to the idle power
1726 // state once the refresh is done
1727 assert(refreshState == REF_RUN);
1728 processRefreshEvent();
1729 }
1730}
1731
1732void
1795 } else {
1796 assert(prev_state == PWR_ACT);
1797
1798 // if we have a pending refresh, and are now moving to
1799 // the idle state, direclty transition to a refresh
1800 if (refreshState == REF_RUN) {
1801 // there should be nothing waiting at this point
1802 assert(!powerEvent.scheduled());

--- 14 unchanged lines hidden (view full) ---

1817 // in turn will schedule a transition to the idle power
1818 // state once the refresh is done
1819 assert(refreshState == REF_RUN);
1820 processRefreshEvent();
1821 }
1822}
1823
1824void
1733DRAMCtrl::updatePowerStats(uint8_t rank)
1825DRAMCtrl::Rank::updatePowerStats()
1734{
1735 // Get the energy and power from DRAMPower
1736 Data::MemoryPowerModel::Energy energy =
1826{
1827 // Get the energy and power from DRAMPower
1828 Data::MemoryPowerModel::Energy energy =
1737 rankPower[rank].powerlib.getEnergy();
1738 Data::MemoryPowerModel::Power power =
1739 rankPower[rank].powerlib.getPower();
1829 power.powerlib.getEnergy();
1830 Data::MemoryPowerModel::Power rank_power =
1831 power.powerlib.getPower();
1740
1832
1741 actEnergy[rank] = energy.act_energy * devicesPerRank;
1742 preEnergy[rank] = energy.pre_energy * devicesPerRank;
1743 readEnergy[rank] = energy.read_energy * devicesPerRank;
1744 writeEnergy[rank] = energy.write_energy * devicesPerRank;
1745 refreshEnergy[rank] = energy.ref_energy * devicesPerRank;
1746 actBackEnergy[rank] = energy.act_stdby_energy * devicesPerRank;
1747 preBackEnergy[rank] = energy.pre_stdby_energy * devicesPerRank;
1748 totalEnergy[rank] = energy.total_energy * devicesPerRank;
1749 averagePower[rank] = power.average_power * devicesPerRank;
1833 actEnergy = energy.act_energy * memory.devicesPerRank;
1834 preEnergy = energy.pre_energy * memory.devicesPerRank;
1835 readEnergy = energy.read_energy * memory.devicesPerRank;
1836 writeEnergy = energy.write_energy * memory.devicesPerRank;
1837 refreshEnergy = energy.ref_energy * memory.devicesPerRank;
1838 actBackEnergy = energy.act_stdby_energy * memory.devicesPerRank;
1839 preBackEnergy = energy.pre_stdby_energy * memory.devicesPerRank;
1840 totalEnergy = energy.total_energy * memory.devicesPerRank;
1841 averagePower = rank_power.average_power * memory.devicesPerRank;
1750}
1751
1752void
1842}
1843
1844void
1845DRAMCtrl::Rank::regStats()
1846{
1847 using namespace Stats;
1848
1849 pwrStateTime
1850 .init(5)
1851 .name(name() + ".memoryStateTime")
1852 .desc("Time in different power states");
1853 pwrStateTime.subname(0, "IDLE");
1854 pwrStateTime.subname(1, "REF");
1855 pwrStateTime.subname(2, "PRE_PDN");
1856 pwrStateTime.subname(3, "ACT");
1857 pwrStateTime.subname(4, "ACT_PDN");
1858
1859 actEnergy
1860 .name(name() + ".actEnergy")
1861 .desc("Energy for activate commands per rank (pJ)");
1862
1863 preEnergy
1864 .name(name() + ".preEnergy")
1865 .desc("Energy for precharge commands per rank (pJ)");
1866
1867 readEnergy
1868 .name(name() + ".readEnergy")
1869 .desc("Energy for read commands per rank (pJ)");
1870
1871 writeEnergy
1872 .name(name() + ".writeEnergy")
1873 .desc("Energy for write commands per rank (pJ)");
1874
1875 refreshEnergy
1876 .name(name() + ".refreshEnergy")
1877 .desc("Energy for refresh commands per rank (pJ)");
1878
1879 actBackEnergy
1880 .name(name() + ".actBackEnergy")
1881 .desc("Energy for active background per rank (pJ)");
1882
1883 preBackEnergy
1884 .name(name() + ".preBackEnergy")
1885 .desc("Energy for precharge background per rank (pJ)");
1886
1887 totalEnergy
1888 .name(name() + ".totalEnergy")
1889 .desc("Total energy per rank (pJ)");
1890
1891 averagePower
1892 .name(name() + ".averagePower")
1893 .desc("Core power per rank (mW)");
1894}
1895void
1753DRAMCtrl::regStats()
1754{
1755 using namespace Stats;
1756
1757 AbstractMemory::regStats();
1758
1896DRAMCtrl::regStats()
1897{
1898 using namespace Stats;
1899
1900 AbstractMemory::regStats();
1901
1902 for (auto r : ranks) {
1903 r->regStats();
1904 }
1905
1759 readReqs
1760 .name(name() + ".readReqs")
1761 .desc("Number of read requests accepted");
1762
1763 writeReqs
1764 .name(name() + ".writeReqs")
1765 .desc("Number of write requests accepted");
1766

--- 195 unchanged lines hidden (view full) ---

1962 .precision(2);
1963
1964 peakBW = (SimClock::Frequency / tBURST) * burstSize / 1000000;
1965
1966 busUtil
1967 .name(name() + ".busUtil")
1968 .desc("Data bus utilization in percentage")
1969 .precision(2);
1906 readReqs
1907 .name(name() + ".readReqs")
1908 .desc("Number of read requests accepted");
1909
1910 writeReqs
1911 .name(name() + ".writeReqs")
1912 .desc("Number of write requests accepted");
1913

--- 195 unchanged lines hidden (view full) ---

2109 .precision(2);
2110
2111 peakBW = (SimClock::Frequency / tBURST) * burstSize / 1000000;
2112
2113 busUtil
2114 .name(name() + ".busUtil")
2115 .desc("Data bus utilization in percentage")
2116 .precision(2);
1970
1971 busUtil = (avgRdBW + avgWrBW) / peakBW * 100;
1972
1973 totGap
1974 .name(name() + ".totGap")
1975 .desc("Total gap between requests");
1976
1977 avgGap
1978 .name(name() + ".avgGap")

--- 19 unchanged lines hidden (view full) ---

1998
1999 pageHitRate
2000 .name(name() + ".pageHitRate")
2001 .desc("Row buffer hit rate, read and write combined")
2002 .precision(2);
2003
2004 pageHitRate = (writeRowHits + readRowHits) /
2005 (writeBursts - mergedWrBursts + readBursts - servicedByWrQ) * 100;
2117 busUtil = (avgRdBW + avgWrBW) / peakBW * 100;
2118
2119 totGap
2120 .name(name() + ".totGap")
2121 .desc("Total gap between requests");
2122
2123 avgGap
2124 .name(name() + ".avgGap")

--- 19 unchanged lines hidden (view full) ---

2144
2145 pageHitRate
2146 .name(name() + ".pageHitRate")
2147 .desc("Row buffer hit rate, read and write combined")
2148 .precision(2);
2149
2150 pageHitRate = (writeRowHits + readRowHits) /
2151 (writeBursts - mergedWrBursts + readBursts - servicedByWrQ) * 100;
2006
2007 pwrStateTime
2008 .init(5)
2009 .name(name() + ".memoryStateTime")
2010 .desc("Time in different power states");
2011 pwrStateTime.subname(0, "IDLE");
2012 pwrStateTime.subname(1, "REF");
2013 pwrStateTime.subname(2, "PRE_PDN");
2014 pwrStateTime.subname(3, "ACT");
2015 pwrStateTime.subname(4, "ACT_PDN");
2016
2017 actEnergy
2018 .init(ranksPerChannel)
2019 .name(name() + ".actEnergy")
2020 .desc("Energy for activate commands per rank (pJ)");
2021
2022 preEnergy
2023 .init(ranksPerChannel)
2024 .name(name() + ".preEnergy")
2025 .desc("Energy for precharge commands per rank (pJ)");
2026
2027 readEnergy
2028 .init(ranksPerChannel)
2029 .name(name() + ".readEnergy")
2030 .desc("Energy for read commands per rank (pJ)");
2031
2032 writeEnergy
2033 .init(ranksPerChannel)
2034 .name(name() + ".writeEnergy")
2035 .desc("Energy for write commands per rank (pJ)");
2036
2037 refreshEnergy
2038 .init(ranksPerChannel)
2039 .name(name() + ".refreshEnergy")
2040 .desc("Energy for refresh commands per rank (pJ)");
2041
2042 actBackEnergy
2043 .init(ranksPerChannel)
2044 .name(name() + ".actBackEnergy")
2045 .desc("Energy for active background per rank (pJ)");
2046
2047 preBackEnergy
2048 .init(ranksPerChannel)
2049 .name(name() + ".preBackEnergy")
2050 .desc("Energy for precharge background per rank (pJ)");
2051
2052 totalEnergy
2053 .init(ranksPerChannel)
2054 .name(name() + ".totalEnergy")
2055 .desc("Total energy per rank (pJ)");
2056
2057 averagePower
2058 .init(ranksPerChannel)
2059 .name(name() + ".averagePower")
2060 .desc("Core power per rank (mW)");
2061}
2062
2063void
2064DRAMCtrl::recvFunctional(PacketPtr pkt)
2065{
2066 // rely on the abstract memory
2067 functionalAccess(pkt);
2068}

--- 86 unchanged lines hidden ---
2152}
2153
2154void
2155DRAMCtrl::recvFunctional(PacketPtr pkt)
2156{
2157 // rely on the abstract memory
2158 functionalAccess(pkt);
2159}

--- 86 unchanged lines hidden ---