fetch2.hh (10259:ebb376f73dd2) fetch2.hh (11567:560d7fbbddd1)
1/*
2 * Copyright (c) 2013-2014 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software

--- 64 unchanged lines hidden (view full) ---

73
74 /** Output port carrying predictions back to Fetch1 */
75 Latch<BranchData>::Input predictionOut;
76
77 /** Output port carrying instructions into Decode */
78 Latch<ForwardInstData>::Input out;
79
80 /** Interface to reserve space in the next stage */
1/*
2 * Copyright (c) 2013-2014 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software

--- 64 unchanged lines hidden (view full) ---

73
74 /** Output port carrying predictions back to Fetch1 */
75 Latch<BranchData>::Input predictionOut;
76
77 /** Output port carrying instructions into Decode */
78 Latch<ForwardInstData>::Input out;
79
80 /** Interface to reserve space in the next stage */
81 Reservable &nextStageReserve;
81 std::vector<InputBuffer<ForwardInstData>> &nextStageReserve;
82
83 /** Width of output of this stage/input of next in instructions */
84 unsigned int outputWidth;
85
86 /** If true, more than one input word can be processed each cycle if
87 * there is room in the output to contain its processed data */
88 bool processMoreThanOneInput;
89
90 /** Branch predictor passed from Python configuration */
91 BPredUnit &branchPredictor;
92
93 public:
94 /* Public so that Pipeline can pass it to Fetch1 */
82
83 /** Width of output of this stage/input of next in instructions */
84 unsigned int outputWidth;
85
86 /** If true, more than one input word can be processed each cycle if
87 * there is room in the output to contain its processed data */
88 bool processMoreThanOneInput;
89
90 /** Branch predictor passed from Python configuration */
91 BPredUnit &branchPredictor;
92
93 public:
94 /* Public so that Pipeline can pass it to Fetch1 */
95 InputBuffer<ForwardLineData> inputBuffer;
95 std::vector<InputBuffer<ForwardLineData>> inputBuffer;
96
97 protected:
98 /** Data members after this line are cycle-to-cycle state */
99
96
97 protected:
98 /** Data members after this line are cycle-to-cycle state */
99
100 /** Index into an incompletely processed input line that instructions
101 * are to be extracted from */
102 unsigned int inputIndex;
100 struct Fetch2ThreadInfo {
103
101
104 /** Remembered program counter value. Between contiguous lines, this
105 * is just updated with advancePC. For lines following changes of
106 * stream, a new PC must be loaded and havePC be set.
107 * havePC is needed to accomodate instructions which span across
108 * lines meaning that Fetch2 and the decoder need to remember a PC
109 * value and a partially-offered instruction from the previous line */
110 TheISA::PCState pc;
102 /** Default constructor */
103 Fetch2ThreadInfo() :
104 inputIndex(0),
105 pc(TheISA::PCState(0)),
106 havePC(false),
107 lastStreamSeqNum(InstId::firstStreamSeqNum),
108 fetchSeqNum(InstId::firstFetchSeqNum),
109 expectedStreamSeqNum(InstId::firstStreamSeqNum),
110 predictionSeqNum(InstId::firstPredictionSeqNum),
111 blocked(false)
112 { }
111
113
112 /** PC is currently valid. Initially false, gets set to true when a
113 * change-of-stream line is received and false again when lines are
114 * discarded for any reason */
115 bool havePC;
114 Fetch2ThreadInfo(const Fetch2ThreadInfo& other) :
115 inputIndex(other.inputIndex),
116 pc(other.pc),
117 havePC(other.havePC),
118 lastStreamSeqNum(other.lastStreamSeqNum),
119 expectedStreamSeqNum(other.expectedStreamSeqNum),
120 predictionSeqNum(other.predictionSeqNum),
121 blocked(other.blocked)
122 { }
116
123
117 /** Stream sequence number of the last seen line used to identify changes
118 * of instruction stream */
119 InstSeqNum lastStreamSeqNum;
124 /** Index into an incompletely processed input line that instructions
125 * are to be extracted from */
126 unsigned int inputIndex;
120
127
121 /** Fetch2 is the source of fetch sequence numbers. These represent the
122 * sequence that instructions were extracted from fetched lines. */
123 InstSeqNum fetchSeqNum;
124
128
125 /** Stream sequence number remembered from last time the predictionSeqNum
126 * changed. Lines should only be discarded when their predictionSeqNums
127 * disagree with Fetch2::predictionSeqNum *and* they are from the same
128 * stream that bore that prediction number */
129 InstSeqNum expectedStreamSeqNum;
129 /** Remembered program counter value. Between contiguous lines, this
130 * is just updated with advancePC. For lines following changes of
131 * stream, a new PC must be loaded and havePC be set.
132 * havePC is needed to accomodate instructions which span across
133 * lines meaning that Fetch2 and the decoder need to remember a PC
134 * value and a partially-offered instruction from the previous line */
135 TheISA::PCState pc;
130
136
131 /** Fetch2 is the source of prediction sequence numbers. These represent
132 * predicted changes of control flow sources from branch prediction in
133 * Fetch2. */
134 InstSeqNum predictionSeqNum;
137 /** PC is currently valid. Initially false, gets set to true when a
138 * change-of-stream line is received and false again when lines are
139 * discarded for any reason */
140 bool havePC;
135
141
136 /** Blocked indication for report */
137 bool blocked;
142 /** Stream sequence number of the last seen line used to identify
143 * changes of instruction stream */
144 InstSeqNum lastStreamSeqNum;
138
145
146 /** Fetch2 is the source of fetch sequence numbers. These represent the
147 * sequence that instructions were extracted from fetched lines. */
148 InstSeqNum fetchSeqNum;
149
150 /** Stream sequence number remembered from last time the
151 * predictionSeqNum changed. Lines should only be discarded when their
152 * predictionSeqNums disagree with Fetch2::predictionSeqNum *and* they
153 * are from the same stream that bore that prediction number */
154 InstSeqNum expectedStreamSeqNum;
155
156 /** Fetch2 is the source of prediction sequence numbers. These
157 * represent predicted changes of control flow sources from branch
158 * prediction in Fetch2. */
159 InstSeqNum predictionSeqNum;
160
161 /** Blocked indication for report */
162 bool blocked;
163 };
164
165 std::vector<Fetch2ThreadInfo> fetchInfo;
166 ThreadID threadPriority;
167
139 protected:
140 /** Get a piece of data to work on from the inputBuffer, or 0 if there
141 * is no data. */
168 protected:
169 /** Get a piece of data to work on from the inputBuffer, or 0 if there
170 * is no data. */
142 const ForwardLineData *getInput();
171 const ForwardLineData *getInput(ThreadID tid);
143
144 /** Pop an element off the input buffer, if there are any */
172
173 /** Pop an element off the input buffer, if there are any */
145 void popInput();
174 void popInput(ThreadID tid);
146
147 /** Dump the whole contents of the input buffer. Useful after a
148 * prediction changes control flow */
175
176 /** Dump the whole contents of the input buffer. Useful after a
177 * prediction changes control flow */
149 void dumpAllInput();
178 void dumpAllInput(ThreadID tid);
150
151 /** Update local branch prediction structures from feedback from
152 * Execute. */
153 void updateBranchPrediction(const BranchData &branch);
154
155 /** Predicts branches for the given instruction. Updates the
156 * instruction's predicted... fields and also the branch which
157 * carries the prediction to Fetch1 */
158 void predictBranch(MinorDynInstPtr inst, BranchData &branch);
159
179
180 /** Update local branch prediction structures from feedback from
181 * Execute. */
182 void updateBranchPrediction(const BranchData &branch);
183
184 /** Predicts branches for the given instruction. Updates the
185 * instruction's predicted... fields and also the branch which
186 * carries the prediction to Fetch1 */
187 void predictBranch(MinorDynInstPtr inst, BranchData &branch);
188
189 /** Use the current threading policy to determine the next thread to
190 * fetch from. */
191 ThreadID getScheduledThread();
192
160 public:
161 Fetch2(const std::string &name,
162 MinorCPU &cpu_,
163 MinorCPUParams &params,
164 Latch<ForwardLineData>::Output inp_,
165 Latch<BranchData>::Output branchInp_,
166 Latch<BranchData>::Input predictionOut_,
167 Latch<ForwardInstData>::Input out_,
193 public:
194 Fetch2(const std::string &name,
195 MinorCPU &cpu_,
196 MinorCPUParams &params,
197 Latch<ForwardLineData>::Output inp_,
198 Latch<BranchData>::Output branchInp_,
199 Latch<BranchData>::Input predictionOut_,
200 Latch<ForwardInstData>::Input out_,
168 Reservable &next_stage_input_buffer);
201 std::vector<InputBuffer<ForwardInstData>> &next_stage_input_buffer);
169
170 public:
171 /** Pass on input/buffer data to the output if you can */
172 void evaluate();
173
174 void minorTrace() const;
175
176 /** Is this stage drained? For Fetch2, draining is initiated by
177 * Execute halting Fetch1 causing Fetch2 to naturally drain.
178 * Branch predictions are ignored by Fetch1 during halt */
179 bool isDrained();
180};
181
182}
183
184#endif /* __CPU_MINOR_FETCH2_HH__ */
202
203 public:
204 /** Pass on input/buffer data to the output if you can */
205 void evaluate();
206
207 void minorTrace() const;
208
209 /** Is this stage drained? For Fetch2, draining is initiated by
210 * Execute halting Fetch1 causing Fetch2 to naturally drain.
211 * Branch predictions are ignored by Fetch1 during halt */
212 bool isDrained();
213};
214
215}
216
217#endif /* __CPU_MINOR_FETCH2_HH__ */