1/* 2 * Copyright (c) 2012-2018 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software --- 43 unchanged lines hidden (view full) --- 52 */ 53 54#ifndef __MEM_DRAM_CTRL_HH__ 55#define __MEM_DRAM_CTRL_HH__ 56 57#include <deque> 58#include <string> 59#include <unordered_set> |
60#include <vector> |
61 62#include "base/callback.hh" 63#include "base/statistics.hh" 64#include "enums/AddrMap.hh" 65#include "enums/MemSched.hh" 66#include "enums/PageManage.hh" |
67#include "mem/drampower.hh" 68#include "mem/qos/mem_ctrl.hh" |
69#include "mem/qport.hh" 70#include "params/DRAMCtrl.hh" 71#include "sim/eventq.hh" |
72 73/** 74 * The DRAM controller is a single-channel memory controller capturing 75 * the most important timing constraints associated with a 76 * contemporary DRAM. For multi-channel memory systems, the controller 77 * is combined with a crossbar model, with the channel address 78 * interleaving taking part in the crossbar. 79 * --- 10 unchanged lines hidden (view full) --- 90 * controllers for future system architecture exploration", 91 * Proc. ISPASS, 2014. If you use this model as part of your research 92 * please cite the paper. 93 * 94 * The low-power functionality implements a staggered powerdown 95 * similar to that described in "Optimized Active and Power-Down Mode 96 * Refresh Control in 3D-DRAMs" by Jung et al, VLSI-SoC, 2014. 97 */ |
98class DRAMCtrl : public QoS::MemCtrl |
99{ 100 101 private: 102 103 // For now, make use of a queued slave port to avoid dealing with 104 // flow control for the responses being sent back 105 class MemoryPort : public QueuedSlavePort 106 { --- 19 unchanged lines hidden (view full) --- 126 127 /** 128 * Our incoming port, for a multi-ported controller add a crossbar 129 * in front of it 130 */ 131 MemoryPort port; 132 133 /** |
134 * Remember if the memory system is in timing mode |
135 */ 136 bool isTimingMode; 137 138 /** 139 * Remember if we have to retry a request when available. 140 */ 141 bool retryRdReq; 142 bool retryWrReq; 143 |
144 /**/ |
145 |
146 /** 147 * Simple structure to hold the values needed to keep track of 148 * commands for DRAMPower 149 */ 150 struct Command { 151 Data::MemCommand::cmds type; 152 uint8_t bank; 153 Tick timeStamp; --- 261 unchanged lines hidden (view full) --- 415 Tick wakeUpAllowedAt; 416 417 /** 418 * One DRAMPower instance per rank 419 */ 420 DRAMPower power; 421 422 /** |
423 * List of commands issued, to be sent to DRAMPpower at refresh |
424 * and stats dump. Keep commands here since commands to different 425 * banks are added out of order. Will only pass commands up to 426 * curTick() to DRAMPower after sorting. 427 */ 428 std::vector<Command> cmdList; 429 430 /** 431 * Vector of Banks. Each rank is made of several devices which in --- 207 unchanged lines hidden (view full) --- 639 const Tick entryTime; 640 641 /** When will request leave the controller */ 642 Tick readyTime; 643 644 /** This comes from the outside world */ 645 const PacketPtr pkt; 646 |
647 /** MasterID associated with the packet */ 648 const MasterID _masterId; |
649 |
650 const bool read; 651 |
652 /** Will be populated by address decoder */ 653 const uint8_t rank; 654 const uint8_t bank; 655 const uint32_t row; 656 657 /** 658 * Bank id is calculated considering banks in all the ranks 659 * eg: 2 ranks each with 8 banks, then bankId = 0 --> rank0, bank0 and --- 18 unchanged lines hidden (view full) --- 678 /** 679 * A pointer to the BurstHelper if this DRAMPacket is a split packet 680 * If not a split packet (common case), this is set to NULL 681 */ 682 BurstHelper* burstHelper; 683 Bank& bankRef; 684 Rank& rankRef; 685 |
686 /** 687 * QoS value of the encapsulated packet read at queuing time 688 */ 689 uint8_t _qosValue; 690 691 /** 692 * Set the packet QoS value 693 * (interface compatibility with Packet) 694 */ 695 inline void qosValue(const uint8_t qv) { _qosValue = qv; } 696 697 /** 698 * Get the packet QoS value 699 * (interface compatibility with Packet) 700 */ 701 inline uint8_t qosValue() const { return _qosValue; } 702 703 /** 704 * Get the packet MasterID 705 * (interface compatibility with Packet) 706 */ 707 inline MasterID masterId() const { return _masterId; } 708 709 /** 710 * Get the packet size 711 * (interface compatibility with Packet) 712 */ 713 inline unsigned int getSize() const { return size; } 714 715 /** 716 * Get the packet address 717 * (interface compatibility with Packet) 718 */ 719 inline Addr getAddr() const { return addr; } 720 721 /** 722 * Return true if its a read packet 723 * (interface compatibility with Packet) 724 */ 725 inline bool isRead() const { return read; } 726 727 /** 728 * Return true if its a write packet 729 * (interface compatibility with Packet) 730 */ 731 inline bool isWrite() const { return !read; } 732 733 |
734 DRAMPacket(PacketPtr _pkt, bool is_read, uint8_t _rank, uint8_t _bank, 735 uint32_t _row, uint16_t bank_id, Addr _addr, 736 unsigned int _size, Bank& bank_ref, Rank& rank_ref) |
737 : entryTime(curTick()), readyTime(curTick()), pkt(_pkt), 738 _masterId(pkt->masterId()), 739 read(is_read), rank(_rank), bank(_bank), row(_row), |
740 bankId(bank_id), addr(_addr), size(_size), burstHelper(NULL), |
741 bankRef(bank_ref), rankRef(rank_ref), _qosValue(_pkt->qosValue()) |
742 { } 743 744 }; 745 |
746 // The DRAM packets are store in a multiple dequeue structure, 747 // based on their QoS priority 748 typedef std::deque<DRAMPacket*> DRAMPacketQueue; 749 |
750 /** 751 * Bunch of things requires to setup "events" in gem5 752 * When event "respondEvent" occurs for example, the method 753 * processRespondEvent is called; no parameters are allowed 754 * in these methods 755 */ 756 void processNextReqEvent(); 757 EventFunctionWrapper nextReqEvent; --- 88 unchanged lines hidden (view full) --- 846 * The memory schduler/arbiter - picks which request needs to 847 * go next, based on the specified policy such as FCFS or FR-FCFS 848 * and moves it to the head of the queue. 849 * Prioritizes accesses to the same rank as previous burst unless 850 * controller is switching command type. 851 * 852 * @param queue Queued requests to consider 853 * @param extra_col_delay Any extra delay due to a read/write switch |
854 * @return an iterator to the selected packet, else queue.end() |
855 */ |
856 DRAMPacketQueue::iterator chooseNext(DRAMPacketQueue& queue, 857 Tick extra_col_delay); |
858 859 /** 860 * For FR-FCFS policy reorder the read/write queue depending on row buffer 861 * hits and earliest bursts available in DRAM 862 * 863 * @param queue Queued requests to consider 864 * @param extra_col_delay Any extra delay due to a read/write switch |
865 * @return an iterator to the selected packet, else queue.end() |
866 */ |
867 DRAMPacketQueue::iterator chooseNextFRFCFS(DRAMPacketQueue& queue, 868 Tick extra_col_delay); |
869 870 /** 871 * Find which are the earliest banks ready to issue an activate 872 * for the enqueued requests. Assumes maximum of 32 banks per rank 873 * Also checks if the bank is already prepped. 874 * 875 * @param queue Queued requests to consider 876 * @param min_col_at time of seamless burst command 877 * @return One-hot encoded mask of bank indices 878 * @return boolean indicating burst can issue seamlessly, with no gaps 879 */ |
880 std::pair 881 minBankPrep(const DRAMPacketQueue& queue, Tick min_col_at) const; |
882 883 /** 884 * Keep track of when row activations happen, in order to enforce 885 * the maximum number of activations in the activation window. The 886 * method updates the time that the banks become available based 887 * on the current limits. 888 * 889 * @param rank_ref Reference to the rank --- 27 unchanged lines hidden (view full) --- 917 * 918 * @param addr The potentially unaligned address 919 * 920 * @return An address aligned to a DRAM burst 921 */ 922 Addr burstAlign(Addr addr) const { return (addr & ~(Addr(burstSize - 1))); } 923 924 /** |
925 * The controller's main read and write queues, with support for QoS reordering |
926 */ |
927 std::vector<DRAMPacketQueue> readQueue; 928 std::vector<DRAMPacketQueue> writeQueue; |
929 930 /** 931 * To avoid iterating over the write queue to check for 932 * overlapping transactions, maintain a set of burst addresses 933 * that are currently queued. Since we merge writes to the same 934 * location we never have more than one address to the same burst 935 * address. 936 */ 937 std::unordered_set<Addr> isInWriteQueue; 938 939 /** 940 * Response queue where read packets wait after we're done working 941 * with them, but it's not time to send the response yet. The |
942 * responses are stored separately mostly to keep the code clean |
943 * and help with events scheduling. For all logical purposes such 944 * as sizing the read queue, this and the main read queue need to 945 * be added together. 946 */ 947 std::deque<DRAMPacket*> respQueue; 948 949 /** 950 * Vector of ranks --- 61 unchanged lines hidden (view full) --- 1012 * Memory controller configuration initialized based on parameter 1013 * values. 1014 */ 1015 Enums::MemSched memSchedPolicy; 1016 Enums::AddrMap addrMapping; 1017 Enums::PageManage pageMgmt; 1018 1019 /** |
1020 * Max column accesses (read and write) per row, before forcefully |
1021 * closing it. 1022 */ 1023 const uint32_t maxAccessesPerRow; 1024 1025 /** 1026 * Pipeline latency of the controller frontend. The frontend 1027 * contribution is added to writes (that complete when they are in 1028 * the write buffer) and reads that are serviced the write buffer. --- 43 unchanged lines hidden (view full) --- 1072 Stats::Vector readPktSize; 1073 Stats::Vector writePktSize; 1074 Stats::Vector rdQLenPdf; 1075 Stats::Vector wrQLenPdf; 1076 Stats::Histogram bytesPerActivate; 1077 Stats::Histogram rdPerTurnAround; 1078 Stats::Histogram wrPerTurnAround; 1079 |
1080 // per-master bytes read and written to memory 1081 Stats::Vector masterReadBytes; 1082 Stats::Vector masterWriteBytes; 1083 1084 // per-master bytes read and written to memory rate 1085 Stats::Formula masterReadRate; 1086 Stats::Formula masterWriteRate; 1087 1088 // per-master read and write serviced memory accesses 1089 Stats::Vector masterReadAccesses; 1090 Stats::Vector masterWriteAccesses; 1091 1092 // per-master read and write total memory access latency 1093 Stats::Vector masterReadTotalLat; 1094 Stats::Vector masterWriteTotalLat; 1095 1096 // per-master raed and write average memory access latency 1097 Stats::Formula masterReadAvgLat; 1098 Stats::Formula masterWriteAvgLat; 1099 |
1100 // Latencies summed over all requests 1101 Stats::Scalar totQLat; 1102 Stats::Scalar totMemAccLat; 1103 Stats::Scalar totBusLat; 1104 1105 // Average latencies per request 1106 Stats::Formula avgQLat; 1107 Stats::Formula avgBusLat; --- 40 unchanged lines hidden (view full) --- 1148 1149 /** 1150 * This function increments the energy when called. If stats are 1151 * dumped periodically, note accumulated energy values will 1152 * appear in the stats (even if the stats are reset). This is a 1153 * result of the energy values coming from DRAMPower, and there 1154 * is currently no support for resetting the state. 1155 * |
1156 * @param rank Current rank |
1157 */ 1158 void updatePowerStats(Rank& rank_ref); 1159 1160 /** 1161 * Function for sorting Command structures based on timeStamp 1162 * 1163 * @param a Memory Command 1164 * @param next Memory Command --- 41 unchanged lines hidden --- |