fetch.hh (2654:9559cfa91b9d) fetch.hh (2665:a124942bacb8)
1/*
1/*
2 * Copyright (c) 2004-2006 The Regents of The University of Michigan
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the

--- 8 unchanged lines hidden (view full) ---

19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the

--- 8 unchanged lines hidden (view full) ---

19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Kevin Lim
27 */
28
29 */
30
29#ifndef __CPU_O3_FETCH_HH__
30#define __CPU_O3_FETCH_HH__
31// Todo: SMT fetch,
32// Add a way to get a stage's current status.
31
33
34#ifndef __CPU_O3_CPU_SIMPLE_FETCH_HH__
35#define __CPU_O3_CPU_SIMPLE_FETCH_HH__
36
32#include "base/statistics.hh"
33#include "base/timebuf.hh"
34#include "cpu/pc_event.hh"
35#include "mem/mem_interface.hh"
36#include "sim/eventq.hh"
37
37#include "base/statistics.hh"
38#include "base/timebuf.hh"
39#include "cpu/pc_event.hh"
40#include "mem/mem_interface.hh"
41#include "sim/eventq.hh"
42
38class Sampler;
39
40/**
43/**
41 * DefaultFetch class handles both single threaded and SMT fetch. Its
42 * width is specified by the parameters; each cycle it tries to fetch
43 * that many instructions. It supports using a branch predictor to
44 * predict direction and targets.
45 * It supports the idling functionalitiy of the CPU by indicating to
46 * the CPU when it is active and inactive.
44 * SimpleFetch class to fetch a single instruction each cycle. SimpleFetch
45 * will stall if there's an Icache miss, but otherwise assumes a one cycle
46 * Icache hit.
47 */
47 */
48
48template <class Impl>
49template <class Impl>
49class DefaultFetch
50class SimpleFetch
50{
51 public:
52 /** Typedefs from Impl. */
53 typedef typename Impl::CPUPol CPUPol;
54 typedef typename Impl::DynInst DynInst;
55 typedef typename Impl::DynInstPtr DynInstPtr;
56 typedef typename Impl::FullCPU FullCPU;
57 typedef typename Impl::Params Params;
58
51{
52 public:
53 /** Typedefs from Impl. */
54 typedef typename Impl::CPUPol CPUPol;
55 typedef typename Impl::DynInst DynInst;
56 typedef typename Impl::DynInstPtr DynInstPtr;
57 typedef typename Impl::FullCPU FullCPU;
58 typedef typename Impl::Params Params;
59
59 /** Typedefs from the CPU policy. */
60 typedef typename CPUPol::BPredUnit BPredUnit;
61 typedef typename CPUPol::FetchStruct FetchStruct;
62 typedef typename CPUPol::TimeStruct TimeStruct;
63
64 /** Typedefs from ISA. */
65 typedef TheISA::MachInst MachInst;
60 typedef typename CPUPol::BPredUnit BPredUnit;
61 typedef typename CPUPol::FetchStruct FetchStruct;
62 typedef typename CPUPol::TimeStruct TimeStruct;
63
64 /** Typedefs from ISA. */
65 typedef TheISA::MachInst MachInst;
66 typedef TheISA::ExtMachInst ExtMachInst;
67
68 public:
66
67 public:
69 /** Overall fetch status. Used to determine if the CPU can
70 * deschedule itsef due to a lack of activity.
71 */
72 enum FetchStatus {
73 Active,
74 Inactive
75 };
76
77 /** Individual thread status. */
78 enum ThreadStatus {
68 enum Status {
79 Running,
80 Idle,
81 Squashing,
82 Blocked,
69 Running,
70 Idle,
71 Squashing,
72 Blocked,
83 Fetching,
84 TrapPending,
85 QuiescePending,
86 SwitchOut,
87 IcacheMissStall,
88 IcacheMissComplete
89 };
90
73 IcacheMissStall,
74 IcacheMissComplete
75 };
76
91 /** Fetching Policy, Add new policies here.*/
92 enum FetchPriority {
93 SingleThread,
94 RoundRobin,
95 Branch,
96 IQ,
97 LSQ
98 };
77 // May eventually need statuses on a per thread basis.
78 Status _status;
99
79
100 private:
101 /** Fetch status. */
102 FetchStatus _status;
80 bool stalled;
103
81
104 /** Per-thread status. */
105 ThreadStatus fetchStatus[Impl::MaxThreads];
106
107 /** Fetch policy. */
108 FetchPriority fetchPolicy;
109
110 /** List that has the threads organized by priority. */
111 std::list<unsigned> priorityList;
112
113 public:
114 class CacheCompletionEvent : public Event
115 {
116 private:
82 public:
83 class CacheCompletionEvent : public Event
84 {
85 private:
117 MemReqPtr req;
118 /** Pointer to fetch. */
119 DefaultFetch *fetch;
120 /** Thread id. */
121// unsigned threadId;
86 SimpleFetch *fetch;
122
123 public:
87
88 public:
124 /** Constructs a cache completion event, which tells fetch when the
125 * cache miss is complete.
126 */
127 CacheCompletionEvent(MemReqPtr &_req, DefaultFetch *_fetch);
89 CacheCompletionEvent(SimpleFetch *_fetch);
128
90
129 /** Processes cache completion event. */
130 virtual void process();
91 virtual void process();
131 /** Returns the description of the cache completion event. */
132 virtual const char *description();
133 };
134
135 public:
92 virtual const char *description();
93 };
94
95 public:
136 /** DefaultFetch constructor. */
137 DefaultFetch(Params *params);
96 /** SimpleFetch constructor. */
97 SimpleFetch(Params &params);
138
98
139 /** Returns the name of fetch. */
140 std::string name() const;
141
142 /** Registers statistics. */
143 void regStats();
144
99 void regStats();
100
145 /** Sets CPU pointer. */
146 void setCPU(FullCPU *cpu_ptr);
147
101 void setCPU(FullCPU *cpu_ptr);
102
148 /** Sets the main backwards communication time buffer pointer. */
149 void setTimeBuffer(TimeBuffer<TimeStruct> *time_buffer);
150
103 void setTimeBuffer(TimeBuffer<TimeStruct> *time_buffer);
104
151 /** Sets pointer to list of active threads. */
152 void setActiveThreads(std::list<unsigned> *at_ptr);
153
154 /** Sets pointer to time buffer used to communicate to the next stage. */
155 void setFetchQueue(TimeBuffer<FetchStruct> *fq_ptr);
156
105 void setFetchQueue(TimeBuffer<FetchStruct> *fq_ptr);
106
157 /** Sets pointer to page table. */
158// void setPageTable(PageTable *pt_ptr);
107 void processCacheCompletion();
159
108
160 /** Initialize stage. */
161 void initStage();
162
163 /** Processes cache completion event. */
164 void processCacheCompletion(MemReqPtr &req);
165
166 void switchOut();
167
168 void doSwitchOut();
169
170 void takeOverFrom();
171
172 bool isSwitchedOut() { return switchedOut; }
173
174 void wakeFromQuiesce();
175
176 private:
109 private:
177 /** Changes the status of this stage to active, and indicates this
178 * to the CPU.
179 */
180 inline void switchToActive();
181
182 /** Changes the status of this stage to inactive, and indicates
183 * this to the CPU.
184 */
185 inline void switchToInactive();
186
187 /**
188 * Looks up in the branch predictor to see if the next PC should be
189 * either next PC+=MachInst or a branch target.
190 * @param next_PC Next PC variable passed in by reference. It is
191 * expected to be set to the current PC; it will be updated with what
192 * the next PC will be.
193 * @return Whether or not a branch was predicted as taken.
194 */
195 bool lookupAndUpdateNextPC(DynInstPtr &inst, Addr &next_PC);
196
197 /**
198 * Fetches the cache line that contains fetch_PC. Returns any
199 * fault that happened. Puts the data into the class variable
200 * cacheData.
201 * @param fetch_PC The PC address that is being fetched from.
110 /**
111 * Looks up in the branch predictor to see if the next PC should be
112 * either next PC+=MachInst or a branch target.
113 * @param next_PC Next PC variable passed in by reference. It is
114 * expected to be set to the current PC; it will be updated with what
115 * the next PC will be.
116 * @return Whether or not a branch was predicted as taken.
117 */
118 bool lookupAndUpdateNextPC(DynInstPtr &inst, Addr &next_PC);
119
120 /**
121 * Fetches the cache line that contains fetch_PC. Returns any
122 * fault that happened. Puts the data into the class variable
123 * cacheData.
124 * @param fetch_PC The PC address that is being fetched from.
202 * @param ret_fault The fault reference that will be set to the result of
203 * the icache access.
204 * @param tid Thread id.
205 * @return Any fault that occured.
206 */
125 * @return Any fault that occured.
126 */
207 bool fetchCacheLine(Addr fetch_PC, Fault &ret_fault, unsigned tid);
127 Fault fetchCacheLine(Addr fetch_PC);
208
128
209 /** Squashes a specific thread and resets the PC. */
210 inline void doSquash(const Addr &new_PC, unsigned tid);
129 inline void doSquash(const Addr &new_PC);
211
130
212 /** Squashes a specific thread and resets the PC. Also tells the CPU to
213 * remove any instructions between fetch and decode that should be sqaushed.
214 */
215 void squashFromDecode(const Addr &new_PC, const InstSeqNum &seq_num,
216 unsigned tid);
131 void squashFromDecode(const Addr &new_PC, const InstSeqNum &seq_num);
217
132
218 /** Checks if a thread is stalled. */
219 bool checkStall(unsigned tid) const;
220
221 /** Updates overall fetch stage status; to be called at the end of each
222 * cycle. */
223 FetchStatus updateFetchStatus();
224
225 public:
133 public:
226 /** Squashes a specific thread and resets the PC. Also tells the CPU to
227 * remove any instructions that are not in the ROB. The source of this
228 * squash should be the commit stage.
229 */
230 void squash(const Addr &new_PC, unsigned tid);
134 // Figure out PC vs next PC and how it should be updated
135 void squash(const Addr &new_PC);
231
136
232 /** Ticks the fetch stage, processing all inputs signals and fetching
233 * as many instructions as possible.
234 */
235 void tick();
236
137 void tick();
138
237 /** Checks all input signals and updates the status as necessary.
238 * @return: Returns if the status has changed due to input signals.
239 */
240 bool checkSignalsAndUpdate(unsigned tid);
139 void fetch();
241
140
242 /** Does the actual fetching of instructions and passing them on to the
243 * next stage.
244 * @param status_change fetch() sets this variable if there was a status
245 * change (ie switching to IcacheMissStall).
246 */
247 void fetch(bool &status_change);
248
249 /** Align a PC to the start of an I-cache block. */
141 // Align an address (typically a PC) to the start of an I-cache block.
142 // We fold in the PISA 64- to 32-bit conversion here as well.
250 Addr icacheBlockAlignPC(Addr addr)
251 {
252 addr = TheISA::realPCToFetchPC(addr);
253 return (addr & ~(cacheBlkMask));
254 }
255
256 private:
143 Addr icacheBlockAlignPC(Addr addr)
144 {
145 addr = TheISA::realPCToFetchPC(addr);
146 return (addr & ~(cacheBlkMask));
147 }
148
149 private:
257 /** Returns the appropriate thread to fetch, given the fetch policy. */
258 int getFetchingThread(FetchPriority &fetch_priority);
259
260 /** Returns the appropriate thread to fetch using a round robin policy. */
261 int roundRobin();
262
263 /** Returns the appropriate thread to fetch using the IQ count policy. */
264 int iqCount();
265
266 /** Returns the appropriate thread to fetch using the LSQ count policy. */
267 int lsqCount();
268
269 /** Returns the appropriate thread to fetch using the branch count policy. */
270 int branchCount();
271
272 private:
273 /** Pointer to the FullCPU. */
274 FullCPU *cpu;
275
276 /** Time buffer interface. */
277 TimeBuffer<TimeStruct> *timeBuffer;
278
279 /** Wire to get decode's information from backwards time buffer. */
280 typename TimeBuffer<TimeStruct>::wire fromDecode;

--- 15 unchanged lines hidden (view full) ---

296 typename TimeBuffer<FetchStruct>::wire toDecode;
297
298 /** Icache interface. */
299 MemInterface *icacheInterface;
300
301 /** BPredUnit. */
302 BPredUnit branchPred;
303
150 /** Pointer to the FullCPU. */
151 FullCPU *cpu;
152
153 /** Time buffer interface. */
154 TimeBuffer<TimeStruct> *timeBuffer;
155
156 /** Wire to get decode's information from backwards time buffer. */
157 typename TimeBuffer<TimeStruct>::wire fromDecode;

--- 15 unchanged lines hidden (view full) ---

173 typename TimeBuffer<FetchStruct>::wire toDecode;
174
175 /** Icache interface. */
176 MemInterface *icacheInterface;
177
178 /** BPredUnit. */
179 BPredUnit branchPred;
180
304 Addr PC[Impl::MaxThreads];
305
306 Addr nextPC[Impl::MaxThreads];
307
308 /** Memory request used to access cache. */
181 /** Memory request used to access cache. */
309 MemReqPtr memReq[Impl::MaxThreads];
182 MemReqPtr memReq;
310
183
311 /** Variable that tracks if fetch has written to the time buffer this
312 * cycle. Used to tell CPU if there is activity this cycle.
313 */
314 bool wroteToTimeBuffer;
315
316 /** Tracks how many instructions has been fetched this cycle. */
317 int numInst;
318
319 /** Source of possible stalls. */
320 struct Stalls {
321 bool decode;
322 bool rename;
323 bool iew;
324 bool commit;
325 };
326
327 /** Tracks which stages are telling fetch to stall. */
328 Stalls stalls[Impl::MaxThreads];
329
330 /** Decode to fetch delay, in ticks. */
331 unsigned decodeToFetchDelay;
332
333 /** Rename to fetch delay, in ticks. */
334 unsigned renameToFetchDelay;
335
336 /** IEW to fetch delay, in ticks. */
337 unsigned iewToFetchDelay;

--- 6 unchanged lines hidden (view full) ---

344
345 /** Cache block size. */
346 int cacheBlkSize;
347
348 /** Mask to get a cache block's address. */
349 Addr cacheBlkMask;
350
351 /** The cache line being fetched. */
184 /** Decode to fetch delay, in ticks. */
185 unsigned decodeToFetchDelay;
186
187 /** Rename to fetch delay, in ticks. */
188 unsigned renameToFetchDelay;
189
190 /** IEW to fetch delay, in ticks. */
191 unsigned iewToFetchDelay;

--- 6 unchanged lines hidden (view full) ---

198
199 /** Cache block size. */
200 int cacheBlkSize;
201
202 /** Mask to get a cache block's address. */
203 Addr cacheBlkMask;
204
205 /** The cache line being fetched. */
352 uint8_t *cacheData[Impl::MaxThreads];
206 uint8_t *cacheData;
353
354 /** Size of instructions. */
355 int instSize;
356
357 /** Icache stall statistics. */
207
208 /** Size of instructions. */
209 int instSize;
210
211 /** Icache stall statistics. */
358 Counter lastIcacheStall[Impl::MaxThreads];
212 Counter lastIcacheStall;
359
213
360 /** List of Active Threads */
361 std::list<unsigned> *activeThreads;
362
363 /** Number of threads. */
364 unsigned numThreads;
365
366 /** Number of threads that are actively fetching. */
367 unsigned numFetchingThreads;
368
369 /** Thread ID being fetched. */
370 int threadFetched;
371
372 bool interruptPending;
373
374 bool switchedOut;
375
376#if !FULL_SYSTEM
377 /** Page table pointer. */
378// PageTable *pTable;
379#endif
380
381 // @todo: Consider making these vectors and tracking on a per thread basis.
382 /** Stat for total number of cycles stalled due to an icache miss. */
383 Stats::Scalar<> icacheStallCycles;
214 Stats::Scalar<> icacheStallCycles;
384 /** Stat for total number of fetched instructions. */
385 Stats::Scalar<> fetchedInsts;
215 Stats::Scalar<> fetchedInsts;
386 Stats::Scalar<> fetchedBranches;
387 /** Stat for total number of predicted branches. */
388 Stats::Scalar<> predictedBranches;
216 Stats::Scalar<> predictedBranches;
389 /** Stat for total number of cycles spent fetching. */
390 Stats::Scalar<> fetchCycles;
217 Stats::Scalar<> fetchCycles;
391 /** Stat for total number of cycles spent squashing. */
392 Stats::Scalar<> fetchSquashCycles;
218 Stats::Scalar<> fetchSquashCycles;
393 /** Stat for total number of cycles spent blocked due to other stages in
394 * the pipeline.
395 */
396 Stats::Scalar<> fetchIdleCycles;
397 Stats::Scalar<> fetchBlockedCycles;
219 Stats::Scalar<> fetchBlockedCycles;
398
399 Stats::Scalar<> fetchMiscStallCycles;
400 /** Stat for total number of fetched cache lines. */
401 Stats::Scalar<> fetchedCacheLines;
402
220 Stats::Scalar<> fetchedCacheLines;
221
403 Stats::Scalar<> fetchIcacheSquashes;
404 /** Distribution of number of instructions fetched each cycle. */
405 Stats::Distribution<> fetchNisnDist;
406 Stats::Formula idleRate;
407 Stats::Formula branchRate;
408 Stats::Formula fetchRate;
222 Stats::Distribution<> fetch_nisn_dist;
409};
410
223};
224
411#endif //__CPU_O3_FETCH_HH__
225#endif //__CPU_O3_CPU_SIMPLE_FETCH_HH__