iew_impl.hh (10231:cb2e6950956d) iew_impl.hh (10239:592f0bb6bd6f)
1/*
2 * Copyright (c) 2010-2013 ARM Limited
1/*
2 * Copyright (c) 2010-2013 ARM Limited
3 * Copyright (c) 2013 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2004-2006 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Kevin Lim
41 */
42
43#ifndef __CPU_O3_IEW_IMPL_IMPL_HH__
44#define __CPU_O3_IEW_IMPL_IMPL_HH__
45
46// @todo: Fix the instantaneous communication among all the stages within
47// iew. There's a clear delay between issue and execute, yet backwards
48// communication happens simultaneously.
49
50#include <queue>
51
52#include "arch/utility.hh"
53#include "config/the_isa.hh"
54#include "cpu/checker/cpu.hh"
55#include "cpu/o3/fu_pool.hh"
56#include "cpu/o3/iew.hh"
57#include "cpu/timebuf.hh"
58#include "debug/Activity.hh"
59#include "debug/Drain.hh"
60#include "debug/IEW.hh"
61#include "debug/O3PipeView.hh"
62#include "params/DerivO3CPU.hh"
63
64using namespace std;
65
66template<class Impl>
67DefaultIEW<Impl>::DefaultIEW(O3CPU *_cpu, DerivO3CPUParams *params)
68 : issueToExecQueue(params->backComSize, params->forwardComSize),
69 cpu(_cpu),
70 instQueue(_cpu, this, params),
71 ldstQueue(_cpu, this, params),
72 fuPool(params->fuPool),
73 commitToIEWDelay(params->commitToIEWDelay),
74 renameToIEWDelay(params->renameToIEWDelay),
75 issueToExecuteDelay(params->issueToExecuteDelay),
76 dispatchWidth(params->dispatchWidth),
77 issueWidth(params->issueWidth),
78 wbOutstanding(0),
79 wbWidth(params->wbWidth),
80 numThreads(params->numThreads)
81{
82 if (dispatchWidth > Impl::MaxWidth)
83 fatal("dispatchWidth (%d) is larger than compiled limit (%d),\n"
84 "\tincrease MaxWidth in src/cpu/o3/impl.hh\n",
85 dispatchWidth, static_cast<int>(Impl::MaxWidth));
86 if (issueWidth > Impl::MaxWidth)
87 fatal("issueWidth (%d) is larger than compiled limit (%d),\n"
88 "\tincrease MaxWidth in src/cpu/o3/impl.hh\n",
89 issueWidth, static_cast<int>(Impl::MaxWidth));
90 if (wbWidth > Impl::MaxWidth)
91 fatal("wbWidth (%d) is larger than compiled limit (%d),\n"
92 "\tincrease MaxWidth in src/cpu/o3/impl.hh\n",
93 wbWidth, static_cast<int>(Impl::MaxWidth));
94
95 _status = Active;
96 exeStatus = Running;
97 wbStatus = Idle;
98
99 // Setup wire to read instructions coming from issue.
100 fromIssue = issueToExecQueue.getWire(-issueToExecuteDelay);
101
102 // Instruction queue needs the queue between issue and execute.
103 instQueue.setIssueToExecuteQueue(&issueToExecQueue);
104
105 for (ThreadID tid = 0; tid < numThreads; tid++) {
106 dispatchStatus[tid] = Running;
107 stalls[tid].commit = false;
108 fetchRedirect[tid] = false;
109 }
110
111 wbMax = wbWidth * params->wbDepth;
112
113 updateLSQNextCycle = false;
114
115 ableToIssue = true;
116
117 skidBufferMax = (3 * (renameToIEWDelay * params->renameWidth)) + issueWidth;
118}
119
120template <class Impl>
121std::string
122DefaultIEW<Impl>::name() const
123{
124 return cpu->name() + ".iew";
125}
126
127template <class Impl>
128void
129DefaultIEW<Impl>::regProbePoints()
130{
131 ppDispatch = new ProbePointArg<DynInstPtr>(cpu->getProbeManager(), "Dispatch");
132 ppMispredict = new ProbePointArg<DynInstPtr>(cpu->getProbeManager(), "Mispredict");
133}
134
135template <class Impl>
136void
137DefaultIEW<Impl>::regStats()
138{
139 using namespace Stats;
140
141 instQueue.regStats();
142 ldstQueue.regStats();
143
144 iewIdleCycles
145 .name(name() + ".iewIdleCycles")
146 .desc("Number of cycles IEW is idle");
147
148 iewSquashCycles
149 .name(name() + ".iewSquashCycles")
150 .desc("Number of cycles IEW is squashing");
151
152 iewBlockCycles
153 .name(name() + ".iewBlockCycles")
154 .desc("Number of cycles IEW is blocking");
155
156 iewUnblockCycles
157 .name(name() + ".iewUnblockCycles")
158 .desc("Number of cycles IEW is unblocking");
159
160 iewDispatchedInsts
161 .name(name() + ".iewDispatchedInsts")
162 .desc("Number of instructions dispatched to IQ");
163
164 iewDispSquashedInsts
165 .name(name() + ".iewDispSquashedInsts")
166 .desc("Number of squashed instructions skipped by dispatch");
167
168 iewDispLoadInsts
169 .name(name() + ".iewDispLoadInsts")
170 .desc("Number of dispatched load instructions");
171
172 iewDispStoreInsts
173 .name(name() + ".iewDispStoreInsts")
174 .desc("Number of dispatched store instructions");
175
176 iewDispNonSpecInsts
177 .name(name() + ".iewDispNonSpecInsts")
178 .desc("Number of dispatched non-speculative instructions");
179
180 iewIQFullEvents
181 .name(name() + ".iewIQFullEvents")
182 .desc("Number of times the IQ has become full, causing a stall");
183
184 iewLSQFullEvents
185 .name(name() + ".iewLSQFullEvents")
186 .desc("Number of times the LSQ has become full, causing a stall");
187
188 memOrderViolationEvents
189 .name(name() + ".memOrderViolationEvents")
190 .desc("Number of memory order violations");
191
192 predictedTakenIncorrect
193 .name(name() + ".predictedTakenIncorrect")
194 .desc("Number of branches that were predicted taken incorrectly");
195
196 predictedNotTakenIncorrect
197 .name(name() + ".predictedNotTakenIncorrect")
198 .desc("Number of branches that were predicted not taken incorrectly");
199
200 branchMispredicts
201 .name(name() + ".branchMispredicts")
202 .desc("Number of branch mispredicts detected at execute");
203
204 branchMispredicts = predictedTakenIncorrect + predictedNotTakenIncorrect;
205
206 iewExecutedInsts
207 .name(name() + ".iewExecutedInsts")
208 .desc("Number of executed instructions");
209
210 iewExecLoadInsts
211 .init(cpu->numThreads)
212 .name(name() + ".iewExecLoadInsts")
213 .desc("Number of load instructions executed")
214 .flags(total);
215
216 iewExecSquashedInsts
217 .name(name() + ".iewExecSquashedInsts")
218 .desc("Number of squashed instructions skipped in execute");
219
220 iewExecutedSwp
221 .init(cpu->numThreads)
222 .name(name() + ".exec_swp")
223 .desc("number of swp insts executed")
224 .flags(total);
225
226 iewExecutedNop
227 .init(cpu->numThreads)
228 .name(name() + ".exec_nop")
229 .desc("number of nop insts executed")
230 .flags(total);
231
232 iewExecutedRefs
233 .init(cpu->numThreads)
234 .name(name() + ".exec_refs")
235 .desc("number of memory reference insts executed")
236 .flags(total);
237
238 iewExecutedBranches
239 .init(cpu->numThreads)
240 .name(name() + ".exec_branches")
241 .desc("Number of branches executed")
242 .flags(total);
243
244 iewExecStoreInsts
245 .name(name() + ".exec_stores")
246 .desc("Number of stores executed")
247 .flags(total);
248 iewExecStoreInsts = iewExecutedRefs - iewExecLoadInsts;
249
250 iewExecRate
251 .name(name() + ".exec_rate")
252 .desc("Inst execution rate")
253 .flags(total);
254
255 iewExecRate = iewExecutedInsts / cpu->numCycles;
256
257 iewInstsToCommit
258 .init(cpu->numThreads)
259 .name(name() + ".wb_sent")
260 .desc("cumulative count of insts sent to commit")
261 .flags(total);
262
263 writebackCount
264 .init(cpu->numThreads)
265 .name(name() + ".wb_count")
266 .desc("cumulative count of insts written-back")
267 .flags(total);
268
269 producerInst
270 .init(cpu->numThreads)
271 .name(name() + ".wb_producers")
272 .desc("num instructions producing a value")
273 .flags(total);
274
275 consumerInst
276 .init(cpu->numThreads)
277 .name(name() + ".wb_consumers")
278 .desc("num instructions consuming a value")
279 .flags(total);
280
281 wbPenalized
282 .init(cpu->numThreads)
283 .name(name() + ".wb_penalized")
284 .desc("number of instrctions required to write to 'other' IQ")
285 .flags(total);
286
287 wbPenalizedRate
288 .name(name() + ".wb_penalized_rate")
289 .desc ("fraction of instructions written-back that wrote to 'other' IQ")
290 .flags(total);
291
292 wbPenalizedRate = wbPenalized / writebackCount;
293
294 wbFanout
295 .name(name() + ".wb_fanout")
296 .desc("average fanout of values written-back")
297 .flags(total);
298
299 wbFanout = producerInst / consumerInst;
300
301 wbRate
302 .name(name() + ".wb_rate")
303 .desc("insts written-back per cycle")
304 .flags(total);
305 wbRate = writebackCount / cpu->numCycles;
306}
307
308template<class Impl>
309void
310DefaultIEW<Impl>::startupStage()
311{
312 for (ThreadID tid = 0; tid < numThreads; tid++) {
313 toRename->iewInfo[tid].usedIQ = true;
314 toRename->iewInfo[tid].freeIQEntries =
315 instQueue.numFreeEntries(tid);
316
317 toRename->iewInfo[tid].usedLSQ = true;
4 * All rights reserved.
5 *
6 * The license below extends only to copyright in the software and shall
7 * not be construed as granting a license to any other intellectual
8 * property including but not limited to intellectual property relating
9 * to a hardware implementation of the functionality of the software
10 * licensed hereunder. You may use the software subject to the license
11 * terms below provided that you ensure that this notice is replicated
12 * unmodified and in its entirety in all distributions of the software,
13 * modified or unmodified, in source code or in binary form.
14 *
15 * Copyright (c) 2004-2006 The Regents of The University of Michigan
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Kevin Lim
42 */
43
44#ifndef __CPU_O3_IEW_IMPL_IMPL_HH__
45#define __CPU_O3_IEW_IMPL_IMPL_HH__
46
47// @todo: Fix the instantaneous communication among all the stages within
48// iew. There's a clear delay between issue and execute, yet backwards
49// communication happens simultaneously.
50
51#include <queue>
52
53#include "arch/utility.hh"
54#include "config/the_isa.hh"
55#include "cpu/checker/cpu.hh"
56#include "cpu/o3/fu_pool.hh"
57#include "cpu/o3/iew.hh"
58#include "cpu/timebuf.hh"
59#include "debug/Activity.hh"
60#include "debug/Drain.hh"
61#include "debug/IEW.hh"
62#include "debug/O3PipeView.hh"
63#include "params/DerivO3CPU.hh"
64
65using namespace std;
66
67template<class Impl>
68DefaultIEW<Impl>::DefaultIEW(O3CPU *_cpu, DerivO3CPUParams *params)
69 : issueToExecQueue(params->backComSize, params->forwardComSize),
70 cpu(_cpu),
71 instQueue(_cpu, this, params),
72 ldstQueue(_cpu, this, params),
73 fuPool(params->fuPool),
74 commitToIEWDelay(params->commitToIEWDelay),
75 renameToIEWDelay(params->renameToIEWDelay),
76 issueToExecuteDelay(params->issueToExecuteDelay),
77 dispatchWidth(params->dispatchWidth),
78 issueWidth(params->issueWidth),
79 wbOutstanding(0),
80 wbWidth(params->wbWidth),
81 numThreads(params->numThreads)
82{
83 if (dispatchWidth > Impl::MaxWidth)
84 fatal("dispatchWidth (%d) is larger than compiled limit (%d),\n"
85 "\tincrease MaxWidth in src/cpu/o3/impl.hh\n",
86 dispatchWidth, static_cast<int>(Impl::MaxWidth));
87 if (issueWidth > Impl::MaxWidth)
88 fatal("issueWidth (%d) is larger than compiled limit (%d),\n"
89 "\tincrease MaxWidth in src/cpu/o3/impl.hh\n",
90 issueWidth, static_cast<int>(Impl::MaxWidth));
91 if (wbWidth > Impl::MaxWidth)
92 fatal("wbWidth (%d) is larger than compiled limit (%d),\n"
93 "\tincrease MaxWidth in src/cpu/o3/impl.hh\n",
94 wbWidth, static_cast<int>(Impl::MaxWidth));
95
96 _status = Active;
97 exeStatus = Running;
98 wbStatus = Idle;
99
100 // Setup wire to read instructions coming from issue.
101 fromIssue = issueToExecQueue.getWire(-issueToExecuteDelay);
102
103 // Instruction queue needs the queue between issue and execute.
104 instQueue.setIssueToExecuteQueue(&issueToExecQueue);
105
106 for (ThreadID tid = 0; tid < numThreads; tid++) {
107 dispatchStatus[tid] = Running;
108 stalls[tid].commit = false;
109 fetchRedirect[tid] = false;
110 }
111
112 wbMax = wbWidth * params->wbDepth;
113
114 updateLSQNextCycle = false;
115
116 ableToIssue = true;
117
118 skidBufferMax = (3 * (renameToIEWDelay * params->renameWidth)) + issueWidth;
119}
120
121template <class Impl>
122std::string
123DefaultIEW<Impl>::name() const
124{
125 return cpu->name() + ".iew";
126}
127
128template <class Impl>
129void
130DefaultIEW<Impl>::regProbePoints()
131{
132 ppDispatch = new ProbePointArg<DynInstPtr>(cpu->getProbeManager(), "Dispatch");
133 ppMispredict = new ProbePointArg<DynInstPtr>(cpu->getProbeManager(), "Mispredict");
134}
135
136template <class Impl>
137void
138DefaultIEW<Impl>::regStats()
139{
140 using namespace Stats;
141
142 instQueue.regStats();
143 ldstQueue.regStats();
144
145 iewIdleCycles
146 .name(name() + ".iewIdleCycles")
147 .desc("Number of cycles IEW is idle");
148
149 iewSquashCycles
150 .name(name() + ".iewSquashCycles")
151 .desc("Number of cycles IEW is squashing");
152
153 iewBlockCycles
154 .name(name() + ".iewBlockCycles")
155 .desc("Number of cycles IEW is blocking");
156
157 iewUnblockCycles
158 .name(name() + ".iewUnblockCycles")
159 .desc("Number of cycles IEW is unblocking");
160
161 iewDispatchedInsts
162 .name(name() + ".iewDispatchedInsts")
163 .desc("Number of instructions dispatched to IQ");
164
165 iewDispSquashedInsts
166 .name(name() + ".iewDispSquashedInsts")
167 .desc("Number of squashed instructions skipped by dispatch");
168
169 iewDispLoadInsts
170 .name(name() + ".iewDispLoadInsts")
171 .desc("Number of dispatched load instructions");
172
173 iewDispStoreInsts
174 .name(name() + ".iewDispStoreInsts")
175 .desc("Number of dispatched store instructions");
176
177 iewDispNonSpecInsts
178 .name(name() + ".iewDispNonSpecInsts")
179 .desc("Number of dispatched non-speculative instructions");
180
181 iewIQFullEvents
182 .name(name() + ".iewIQFullEvents")
183 .desc("Number of times the IQ has become full, causing a stall");
184
185 iewLSQFullEvents
186 .name(name() + ".iewLSQFullEvents")
187 .desc("Number of times the LSQ has become full, causing a stall");
188
189 memOrderViolationEvents
190 .name(name() + ".memOrderViolationEvents")
191 .desc("Number of memory order violations");
192
193 predictedTakenIncorrect
194 .name(name() + ".predictedTakenIncorrect")
195 .desc("Number of branches that were predicted taken incorrectly");
196
197 predictedNotTakenIncorrect
198 .name(name() + ".predictedNotTakenIncorrect")
199 .desc("Number of branches that were predicted not taken incorrectly");
200
201 branchMispredicts
202 .name(name() + ".branchMispredicts")
203 .desc("Number of branch mispredicts detected at execute");
204
205 branchMispredicts = predictedTakenIncorrect + predictedNotTakenIncorrect;
206
207 iewExecutedInsts
208 .name(name() + ".iewExecutedInsts")
209 .desc("Number of executed instructions");
210
211 iewExecLoadInsts
212 .init(cpu->numThreads)
213 .name(name() + ".iewExecLoadInsts")
214 .desc("Number of load instructions executed")
215 .flags(total);
216
217 iewExecSquashedInsts
218 .name(name() + ".iewExecSquashedInsts")
219 .desc("Number of squashed instructions skipped in execute");
220
221 iewExecutedSwp
222 .init(cpu->numThreads)
223 .name(name() + ".exec_swp")
224 .desc("number of swp insts executed")
225 .flags(total);
226
227 iewExecutedNop
228 .init(cpu->numThreads)
229 .name(name() + ".exec_nop")
230 .desc("number of nop insts executed")
231 .flags(total);
232
233 iewExecutedRefs
234 .init(cpu->numThreads)
235 .name(name() + ".exec_refs")
236 .desc("number of memory reference insts executed")
237 .flags(total);
238
239 iewExecutedBranches
240 .init(cpu->numThreads)
241 .name(name() + ".exec_branches")
242 .desc("Number of branches executed")
243 .flags(total);
244
245 iewExecStoreInsts
246 .name(name() + ".exec_stores")
247 .desc("Number of stores executed")
248 .flags(total);
249 iewExecStoreInsts = iewExecutedRefs - iewExecLoadInsts;
250
251 iewExecRate
252 .name(name() + ".exec_rate")
253 .desc("Inst execution rate")
254 .flags(total);
255
256 iewExecRate = iewExecutedInsts / cpu->numCycles;
257
258 iewInstsToCommit
259 .init(cpu->numThreads)
260 .name(name() + ".wb_sent")
261 .desc("cumulative count of insts sent to commit")
262 .flags(total);
263
264 writebackCount
265 .init(cpu->numThreads)
266 .name(name() + ".wb_count")
267 .desc("cumulative count of insts written-back")
268 .flags(total);
269
270 producerInst
271 .init(cpu->numThreads)
272 .name(name() + ".wb_producers")
273 .desc("num instructions producing a value")
274 .flags(total);
275
276 consumerInst
277 .init(cpu->numThreads)
278 .name(name() + ".wb_consumers")
279 .desc("num instructions consuming a value")
280 .flags(total);
281
282 wbPenalized
283 .init(cpu->numThreads)
284 .name(name() + ".wb_penalized")
285 .desc("number of instrctions required to write to 'other' IQ")
286 .flags(total);
287
288 wbPenalizedRate
289 .name(name() + ".wb_penalized_rate")
290 .desc ("fraction of instructions written-back that wrote to 'other' IQ")
291 .flags(total);
292
293 wbPenalizedRate = wbPenalized / writebackCount;
294
295 wbFanout
296 .name(name() + ".wb_fanout")
297 .desc("average fanout of values written-back")
298 .flags(total);
299
300 wbFanout = producerInst / consumerInst;
301
302 wbRate
303 .name(name() + ".wb_rate")
304 .desc("insts written-back per cycle")
305 .flags(total);
306 wbRate = writebackCount / cpu->numCycles;
307}
308
309template<class Impl>
310void
311DefaultIEW<Impl>::startupStage()
312{
313 for (ThreadID tid = 0; tid < numThreads; tid++) {
314 toRename->iewInfo[tid].usedIQ = true;
315 toRename->iewInfo[tid].freeIQEntries =
316 instQueue.numFreeEntries(tid);
317
318 toRename->iewInfo[tid].usedLSQ = true;
318 toRename->iewInfo[tid].freeLSQEntries =
319 ldstQueue.numFreeEntries(tid);
319 toRename->iewInfo[tid].freeLQEntries = ldstQueue.numFreeLoadEntries(tid);
320 toRename->iewInfo[tid].freeSQEntries = ldstQueue.numFreeStoreEntries(tid);
320 }
321
322 // Initialize the checker's dcache port here
323 if (cpu->checker) {
324 cpu->checker->setDcachePort(&cpu->getDataPort());
325 }
326
327 cpu->activateStage(O3CPU::IEWIdx);
328}
329
330template<class Impl>
331void
332DefaultIEW<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr)
333{
334 timeBuffer = tb_ptr;
335
336 // Setup wire to read information from time buffer, from commit.
337 fromCommit = timeBuffer->getWire(-commitToIEWDelay);
338
339 // Setup wire to write information back to previous stages.
340 toRename = timeBuffer->getWire(0);
341
342 toFetch = timeBuffer->getWire(0);
343
344 // Instruction queue also needs main time buffer.
345 instQueue.setTimeBuffer(tb_ptr);
346}
347
348template<class Impl>
349void
350DefaultIEW<Impl>::setRenameQueue(TimeBuffer<RenameStruct> *rq_ptr)
351{
352 renameQueue = rq_ptr;
353
354 // Setup wire to read information from rename queue.
355 fromRename = renameQueue->getWire(-renameToIEWDelay);
356}
357
358template<class Impl>
359void
360DefaultIEW<Impl>::setIEWQueue(TimeBuffer<IEWStruct> *iq_ptr)
361{
362 iewQueue = iq_ptr;
363
364 // Setup wire to write instructions to commit.
365 toCommit = iewQueue->getWire(0);
366}
367
368template<class Impl>
369void
370DefaultIEW<Impl>::setActiveThreads(list<ThreadID> *at_ptr)
371{
372 activeThreads = at_ptr;
373
374 ldstQueue.setActiveThreads(at_ptr);
375 instQueue.setActiveThreads(at_ptr);
376}
377
378template<class Impl>
379void
380DefaultIEW<Impl>::setScoreboard(Scoreboard *sb_ptr)
381{
382 scoreboard = sb_ptr;
383}
384
385template <class Impl>
386bool
387DefaultIEW<Impl>::isDrained() const
388{
389 bool drained(ldstQueue.isDrained());
390
391 for (ThreadID tid = 0; tid < numThreads; tid++) {
392 if (!insts[tid].empty()) {
393 DPRINTF(Drain, "%i: Insts not empty.\n", tid);
394 drained = false;
395 }
396 if (!skidBuffer[tid].empty()) {
397 DPRINTF(Drain, "%i: Skid buffer not empty.\n", tid);
398 drained = false;
399 }
400 }
401
402 // Also check the FU pool as instructions are "stored" in FU
403 // completion events until they are done and not accounted for
404 // above
405 if (drained && !fuPool->isDrained()) {
406 DPRINTF(Drain, "FU pool still busy.\n");
407 drained = false;
408 }
409
410 return drained;
411}
412
413template <class Impl>
414void
415DefaultIEW<Impl>::drainSanityCheck() const
416{
417 assert(isDrained());
418
419 instQueue.drainSanityCheck();
420 ldstQueue.drainSanityCheck();
421}
422
423template <class Impl>
424void
425DefaultIEW<Impl>::takeOverFrom()
426{
427 // Reset all state.
428 _status = Active;
429 exeStatus = Running;
430 wbStatus = Idle;
431
432 instQueue.takeOverFrom();
433 ldstQueue.takeOverFrom();
434 fuPool->takeOverFrom();
435
436 startupStage();
437 cpu->activityThisCycle();
438
439 for (ThreadID tid = 0; tid < numThreads; tid++) {
440 dispatchStatus[tid] = Running;
441 stalls[tid].commit = false;
442 fetchRedirect[tid] = false;
443 }
444
445 updateLSQNextCycle = false;
446
447 for (int i = 0; i < issueToExecQueue.getSize(); ++i) {
448 issueToExecQueue.advance();
449 }
450}
451
452template<class Impl>
453void
454DefaultIEW<Impl>::squash(ThreadID tid)
455{
456 DPRINTF(IEW, "[tid:%i]: Squashing all instructions.\n", tid);
457
458 // Tell the IQ to start squashing.
459 instQueue.squash(tid);
460
461 // Tell the LDSTQ to start squashing.
462 ldstQueue.squash(fromCommit->commitInfo[tid].doneSeqNum, tid);
463 updatedQueues = true;
464
465 // Clear the skid buffer in case it has any data in it.
466 DPRINTF(IEW, "[tid:%i]: Removing skidbuffer instructions until [sn:%i].\n",
467 tid, fromCommit->commitInfo[tid].doneSeqNum);
468
469 while (!skidBuffer[tid].empty()) {
321 }
322
323 // Initialize the checker's dcache port here
324 if (cpu->checker) {
325 cpu->checker->setDcachePort(&cpu->getDataPort());
326 }
327
328 cpu->activateStage(O3CPU::IEWIdx);
329}
330
331template<class Impl>
332void
333DefaultIEW<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr)
334{
335 timeBuffer = tb_ptr;
336
337 // Setup wire to read information from time buffer, from commit.
338 fromCommit = timeBuffer->getWire(-commitToIEWDelay);
339
340 // Setup wire to write information back to previous stages.
341 toRename = timeBuffer->getWire(0);
342
343 toFetch = timeBuffer->getWire(0);
344
345 // Instruction queue also needs main time buffer.
346 instQueue.setTimeBuffer(tb_ptr);
347}
348
349template<class Impl>
350void
351DefaultIEW<Impl>::setRenameQueue(TimeBuffer<RenameStruct> *rq_ptr)
352{
353 renameQueue = rq_ptr;
354
355 // Setup wire to read information from rename queue.
356 fromRename = renameQueue->getWire(-renameToIEWDelay);
357}
358
359template<class Impl>
360void
361DefaultIEW<Impl>::setIEWQueue(TimeBuffer<IEWStruct> *iq_ptr)
362{
363 iewQueue = iq_ptr;
364
365 // Setup wire to write instructions to commit.
366 toCommit = iewQueue->getWire(0);
367}
368
369template<class Impl>
370void
371DefaultIEW<Impl>::setActiveThreads(list<ThreadID> *at_ptr)
372{
373 activeThreads = at_ptr;
374
375 ldstQueue.setActiveThreads(at_ptr);
376 instQueue.setActiveThreads(at_ptr);
377}
378
379template<class Impl>
380void
381DefaultIEW<Impl>::setScoreboard(Scoreboard *sb_ptr)
382{
383 scoreboard = sb_ptr;
384}
385
386template <class Impl>
387bool
388DefaultIEW<Impl>::isDrained() const
389{
390 bool drained(ldstQueue.isDrained());
391
392 for (ThreadID tid = 0; tid < numThreads; tid++) {
393 if (!insts[tid].empty()) {
394 DPRINTF(Drain, "%i: Insts not empty.\n", tid);
395 drained = false;
396 }
397 if (!skidBuffer[tid].empty()) {
398 DPRINTF(Drain, "%i: Skid buffer not empty.\n", tid);
399 drained = false;
400 }
401 }
402
403 // Also check the FU pool as instructions are "stored" in FU
404 // completion events until they are done and not accounted for
405 // above
406 if (drained && !fuPool->isDrained()) {
407 DPRINTF(Drain, "FU pool still busy.\n");
408 drained = false;
409 }
410
411 return drained;
412}
413
414template <class Impl>
415void
416DefaultIEW<Impl>::drainSanityCheck() const
417{
418 assert(isDrained());
419
420 instQueue.drainSanityCheck();
421 ldstQueue.drainSanityCheck();
422}
423
424template <class Impl>
425void
426DefaultIEW<Impl>::takeOverFrom()
427{
428 // Reset all state.
429 _status = Active;
430 exeStatus = Running;
431 wbStatus = Idle;
432
433 instQueue.takeOverFrom();
434 ldstQueue.takeOverFrom();
435 fuPool->takeOverFrom();
436
437 startupStage();
438 cpu->activityThisCycle();
439
440 for (ThreadID tid = 0; tid < numThreads; tid++) {
441 dispatchStatus[tid] = Running;
442 stalls[tid].commit = false;
443 fetchRedirect[tid] = false;
444 }
445
446 updateLSQNextCycle = false;
447
448 for (int i = 0; i < issueToExecQueue.getSize(); ++i) {
449 issueToExecQueue.advance();
450 }
451}
452
453template<class Impl>
454void
455DefaultIEW<Impl>::squash(ThreadID tid)
456{
457 DPRINTF(IEW, "[tid:%i]: Squashing all instructions.\n", tid);
458
459 // Tell the IQ to start squashing.
460 instQueue.squash(tid);
461
462 // Tell the LDSTQ to start squashing.
463 ldstQueue.squash(fromCommit->commitInfo[tid].doneSeqNum, tid);
464 updatedQueues = true;
465
466 // Clear the skid buffer in case it has any data in it.
467 DPRINTF(IEW, "[tid:%i]: Removing skidbuffer instructions until [sn:%i].\n",
468 tid, fromCommit->commitInfo[tid].doneSeqNum);
469
470 while (!skidBuffer[tid].empty()) {
470 if (skidBuffer[tid].front()->isLoad() ||
471 skidBuffer[tid].front()->isStore() ) {
472 toRename->iewInfo[tid].dispatchedToLSQ++;
471 if (skidBuffer[tid].front()->isLoad()) {
472 toRename->iewInfo[tid].dispatchedToLQ++;
473 }
473 }
474 if (skidBuffer[tid].front()->isStore()) {
475 toRename->iewInfo[tid].dispatchedToSQ++;
476 }
474
475 toRename->iewInfo[tid].dispatched++;
476
477 skidBuffer[tid].pop();
478 }
479
480 emptyRenameInsts(tid);
481}
482
483template<class Impl>
484void
485DefaultIEW<Impl>::squashDueToBranch(DynInstPtr &inst, ThreadID tid)
486{
487 DPRINTF(IEW, "[tid:%i]: Squashing from a specific instruction, PC: %s "
488 "[sn:%i].\n", tid, inst->pcState(), inst->seqNum);
489
490 if (!toCommit->squash[tid] ||
491 inst->seqNum < toCommit->squashedSeqNum[tid]) {
492 toCommit->squash[tid] = true;
493 toCommit->squashedSeqNum[tid] = inst->seqNum;
494 toCommit->branchTaken[tid] = inst->pcState().branching();
495
496 TheISA::PCState pc = inst->pcState();
497 TheISA::advancePC(pc, inst->staticInst);
498
499 toCommit->pc[tid] = pc;
500 toCommit->mispredictInst[tid] = inst;
501 toCommit->includeSquashInst[tid] = false;
502
503 wroteToTimeBuffer = true;
504 }
505
506}
507
508template<class Impl>
509void
510DefaultIEW<Impl>::squashDueToMemOrder(DynInstPtr &inst, ThreadID tid)
511{
512 DPRINTF(IEW, "[tid:%i]: Memory violation, squashing violator and younger "
513 "insts, PC: %s [sn:%i].\n", tid, inst->pcState(), inst->seqNum);
514 // Need to include inst->seqNum in the following comparison to cover the
515 // corner case when a branch misprediction and a memory violation for the
516 // same instruction (e.g. load PC) are detected in the same cycle. In this
517 // case the memory violator should take precedence over the branch
518 // misprediction because it requires the violator itself to be included in
519 // the squash.
520 if (!toCommit->squash[tid] ||
521 inst->seqNum <= toCommit->squashedSeqNum[tid]) {
522 toCommit->squash[tid] = true;
523
524 toCommit->squashedSeqNum[tid] = inst->seqNum;
525 toCommit->pc[tid] = inst->pcState();
526 toCommit->mispredictInst[tid] = NULL;
527
528 // Must include the memory violator in the squash.
529 toCommit->includeSquashInst[tid] = true;
530
531 wroteToTimeBuffer = true;
532 }
533}
534
535template<class Impl>
536void
537DefaultIEW<Impl>::squashDueToMemBlocked(DynInstPtr &inst, ThreadID tid)
538{
539 DPRINTF(IEW, "[tid:%i]: Memory blocked, squashing load and younger insts, "
540 "PC: %s [sn:%i].\n", tid, inst->pcState(), inst->seqNum);
541 if (!toCommit->squash[tid] ||
542 inst->seqNum < toCommit->squashedSeqNum[tid]) {
543 toCommit->squash[tid] = true;
544
545 toCommit->squashedSeqNum[tid] = inst->seqNum;
546 toCommit->pc[tid] = inst->pcState();
547 toCommit->mispredictInst[tid] = NULL;
548
549 // Must include the broadcasted SN in the squash.
550 toCommit->includeSquashInst[tid] = true;
551
552 ldstQueue.setLoadBlockedHandled(tid);
553
554 wroteToTimeBuffer = true;
555 }
556}
557
558template<class Impl>
559void
560DefaultIEW<Impl>::block(ThreadID tid)
561{
562 DPRINTF(IEW, "[tid:%u]: Blocking.\n", tid);
563
564 if (dispatchStatus[tid] != Blocked &&
565 dispatchStatus[tid] != Unblocking) {
566 toRename->iewBlock[tid] = true;
567 wroteToTimeBuffer = true;
568 }
569
570 // Add the current inputs to the skid buffer so they can be
571 // reprocessed when this stage unblocks.
572 skidInsert(tid);
573
574 dispatchStatus[tid] = Blocked;
575}
576
577template<class Impl>
578void
579DefaultIEW<Impl>::unblock(ThreadID tid)
580{
581 DPRINTF(IEW, "[tid:%i]: Reading instructions out of the skid "
582 "buffer %u.\n",tid, tid);
583
584 // If the skid bufffer is empty, signal back to previous stages to unblock.
585 // Also switch status to running.
586 if (skidBuffer[tid].empty()) {
587 toRename->iewUnblock[tid] = true;
588 wroteToTimeBuffer = true;
589 DPRINTF(IEW, "[tid:%i]: Done unblocking.\n",tid);
590 dispatchStatus[tid] = Running;
591 }
592}
593
594template<class Impl>
595void
596DefaultIEW<Impl>::wakeDependents(DynInstPtr &inst)
597{
598 instQueue.wakeDependents(inst);
599}
600
601template<class Impl>
602void
603DefaultIEW<Impl>::rescheduleMemInst(DynInstPtr &inst)
604{
605 instQueue.rescheduleMemInst(inst);
606}
607
608template<class Impl>
609void
610DefaultIEW<Impl>::replayMemInst(DynInstPtr &inst)
611{
612 instQueue.replayMemInst(inst);
613}
614
615template<class Impl>
616void
617DefaultIEW<Impl>::instToCommit(DynInstPtr &inst)
618{
619 // This function should not be called after writebackInsts in a
620 // single cycle. That will cause problems with an instruction
621 // being added to the queue to commit without being processed by
622 // writebackInsts prior to being sent to commit.
623
624 // First check the time slot that this instruction will write
625 // to. If there are free write ports at the time, then go ahead
626 // and write the instruction to that time. If there are not,
627 // keep looking back to see where's the first time there's a
628 // free slot.
629 while ((*iewQueue)[wbCycle].insts[wbNumInst]) {
630 ++wbNumInst;
631 if (wbNumInst == wbWidth) {
632 ++wbCycle;
633 wbNumInst = 0;
634 }
635
636 assert((wbCycle * wbWidth + wbNumInst) <= wbMax);
637 }
638
639 DPRINTF(IEW, "Current wb cycle: %i, width: %i, numInst: %i\nwbActual:%i\n",
640 wbCycle, wbWidth, wbNumInst, wbCycle * wbWidth + wbNumInst);
641 // Add finished instruction to queue to commit.
642 (*iewQueue)[wbCycle].insts[wbNumInst] = inst;
643 (*iewQueue)[wbCycle].size++;
644}
645
646template <class Impl>
647unsigned
648DefaultIEW<Impl>::validInstsFromRename()
649{
650 unsigned inst_count = 0;
651
652 for (int i=0; i<fromRename->size; i++) {
653 if (!fromRename->insts[i]->isSquashed())
654 inst_count++;
655 }
656
657 return inst_count;
658}
659
660template<class Impl>
661void
662DefaultIEW<Impl>::skidInsert(ThreadID tid)
663{
664 DynInstPtr inst = NULL;
665
666 while (!insts[tid].empty()) {
667 inst = insts[tid].front();
668
669 insts[tid].pop();
670
671 DPRINTF(IEW,"[tid:%i]: Inserting [sn:%lli] PC:%s into "
672 "dispatch skidBuffer %i\n",tid, inst->seqNum,
673 inst->pcState(),tid);
674
675 skidBuffer[tid].push(inst);
676 }
677
678 assert(skidBuffer[tid].size() <= skidBufferMax &&
679 "Skidbuffer Exceeded Max Size");
680}
681
682template<class Impl>
683int
684DefaultIEW<Impl>::skidCount()
685{
686 int max=0;
687
688 list<ThreadID>::iterator threads = activeThreads->begin();
689 list<ThreadID>::iterator end = activeThreads->end();
690
691 while (threads != end) {
692 ThreadID tid = *threads++;
693 unsigned thread_count = skidBuffer[tid].size();
694 if (max < thread_count)
695 max = thread_count;
696 }
697
698 return max;
699}
700
701template<class Impl>
702bool
703DefaultIEW<Impl>::skidsEmpty()
704{
705 list<ThreadID>::iterator threads = activeThreads->begin();
706 list<ThreadID>::iterator end = activeThreads->end();
707
708 while (threads != end) {
709 ThreadID tid = *threads++;
710
711 if (!skidBuffer[tid].empty())
712 return false;
713 }
714
715 return true;
716}
717
718template <class Impl>
719void
720DefaultIEW<Impl>::updateStatus()
721{
722 bool any_unblocking = false;
723
724 list<ThreadID>::iterator threads = activeThreads->begin();
725 list<ThreadID>::iterator end = activeThreads->end();
726
727 while (threads != end) {
728 ThreadID tid = *threads++;
729
730 if (dispatchStatus[tid] == Unblocking) {
731 any_unblocking = true;
732 break;
733 }
734 }
735
736 // If there are no ready instructions waiting to be scheduled by the IQ,
737 // and there's no stores waiting to write back, and dispatch is not
738 // unblocking, then there is no internal activity for the IEW stage.
739 instQueue.intInstQueueReads++;
740 if (_status == Active && !instQueue.hasReadyInsts() &&
741 !ldstQueue.willWB() && !any_unblocking) {
742 DPRINTF(IEW, "IEW switching to idle\n");
743
744 deactivateStage();
745
746 _status = Inactive;
747 } else if (_status == Inactive && (instQueue.hasReadyInsts() ||
748 ldstQueue.willWB() ||
749 any_unblocking)) {
750 // Otherwise there is internal activity. Set to active.
751 DPRINTF(IEW, "IEW switching to active\n");
752
753 activateStage();
754
755 _status = Active;
756 }
757}
758
759template <class Impl>
760void
761DefaultIEW<Impl>::resetEntries()
762{
763 instQueue.resetEntries();
764 ldstQueue.resetEntries();
765}
766
767template <class Impl>
768void
769DefaultIEW<Impl>::readStallSignals(ThreadID tid)
770{
771 if (fromCommit->commitBlock[tid]) {
772 stalls[tid].commit = true;
773 }
774
775 if (fromCommit->commitUnblock[tid]) {
776 assert(stalls[tid].commit);
777 stalls[tid].commit = false;
778 }
779}
780
781template <class Impl>
782bool
783DefaultIEW<Impl>::checkStall(ThreadID tid)
784{
785 bool ret_val(false);
786
787 if (stalls[tid].commit) {
788 DPRINTF(IEW,"[tid:%i]: Stall from Commit stage detected.\n",tid);
789 ret_val = true;
790 } else if (instQueue.isFull(tid)) {
791 DPRINTF(IEW,"[tid:%i]: Stall: IQ is full.\n",tid);
792 ret_val = true;
793 } else if (ldstQueue.isFull(tid)) {
794 DPRINTF(IEW,"[tid:%i]: Stall: LSQ is full\n",tid);
795
796 if (ldstQueue.numLoads(tid) > 0 ) {
797
798 DPRINTF(IEW,"[tid:%i]: LSQ oldest load: [sn:%i] \n",
799 tid,ldstQueue.getLoadHeadSeqNum(tid));
800 }
801
802 if (ldstQueue.numStores(tid) > 0) {
803
804 DPRINTF(IEW,"[tid:%i]: LSQ oldest store: [sn:%i] \n",
805 tid,ldstQueue.getStoreHeadSeqNum(tid));
806 }
807
808 ret_val = true;
809 } else if (ldstQueue.isStalled(tid)) {
810 DPRINTF(IEW,"[tid:%i]: Stall: LSQ stall detected.\n",tid);
811 ret_val = true;
812 }
813
814 return ret_val;
815}
816
817template <class Impl>
818void
819DefaultIEW<Impl>::checkSignalsAndUpdate(ThreadID tid)
820{
821 // Check if there's a squash signal, squash if there is
822 // Check stall signals, block if there is.
823 // If status was Blocked
824 // if so then go to unblocking
825 // If status was Squashing
826 // check if squashing is not high. Switch to running this cycle.
827
828 readStallSignals(tid);
829
830 if (fromCommit->commitInfo[tid].squash) {
831 squash(tid);
832
833 if (dispatchStatus[tid] == Blocked ||
834 dispatchStatus[tid] == Unblocking) {
835 toRename->iewUnblock[tid] = true;
836 wroteToTimeBuffer = true;
837 }
838
839 dispatchStatus[tid] = Squashing;
840 fetchRedirect[tid] = false;
841 return;
842 }
843
844 if (fromCommit->commitInfo[tid].robSquashing) {
845 DPRINTF(IEW, "[tid:%i]: ROB is still squashing.\n", tid);
846
847 dispatchStatus[tid] = Squashing;
848 emptyRenameInsts(tid);
849 wroteToTimeBuffer = true;
850 return;
851 }
852
853 if (checkStall(tid)) {
854 block(tid);
855 dispatchStatus[tid] = Blocked;
856 return;
857 }
858
859 if (dispatchStatus[tid] == Blocked) {
860 // Status from previous cycle was blocked, but there are no more stall
861 // conditions. Switch over to unblocking.
862 DPRINTF(IEW, "[tid:%i]: Done blocking, switching to unblocking.\n",
863 tid);
864
865 dispatchStatus[tid] = Unblocking;
866
867 unblock(tid);
868
869 return;
870 }
871
872 if (dispatchStatus[tid] == Squashing) {
873 // Switch status to running if rename isn't being told to block or
874 // squash this cycle.
875 DPRINTF(IEW, "[tid:%i]: Done squashing, switching to running.\n",
876 tid);
877
878 dispatchStatus[tid] = Running;
879
880 return;
881 }
882}
883
884template <class Impl>
885void
886DefaultIEW<Impl>::sortInsts()
887{
888 int insts_from_rename = fromRename->size;
889#ifdef DEBUG
890 for (ThreadID tid = 0; tid < numThreads; tid++)
891 assert(insts[tid].empty());
892#endif
893 for (int i = 0; i < insts_from_rename; ++i) {
894 insts[fromRename->insts[i]->threadNumber].push(fromRename->insts[i]);
895 }
896}
897
898template <class Impl>
899void
900DefaultIEW<Impl>::emptyRenameInsts(ThreadID tid)
901{
902 DPRINTF(IEW, "[tid:%i]: Removing incoming rename instructions\n", tid);
903
904 while (!insts[tid].empty()) {
905
477
478 toRename->iewInfo[tid].dispatched++;
479
480 skidBuffer[tid].pop();
481 }
482
483 emptyRenameInsts(tid);
484}
485
486template<class Impl>
487void
488DefaultIEW<Impl>::squashDueToBranch(DynInstPtr &inst, ThreadID tid)
489{
490 DPRINTF(IEW, "[tid:%i]: Squashing from a specific instruction, PC: %s "
491 "[sn:%i].\n", tid, inst->pcState(), inst->seqNum);
492
493 if (!toCommit->squash[tid] ||
494 inst->seqNum < toCommit->squashedSeqNum[tid]) {
495 toCommit->squash[tid] = true;
496 toCommit->squashedSeqNum[tid] = inst->seqNum;
497 toCommit->branchTaken[tid] = inst->pcState().branching();
498
499 TheISA::PCState pc = inst->pcState();
500 TheISA::advancePC(pc, inst->staticInst);
501
502 toCommit->pc[tid] = pc;
503 toCommit->mispredictInst[tid] = inst;
504 toCommit->includeSquashInst[tid] = false;
505
506 wroteToTimeBuffer = true;
507 }
508
509}
510
511template<class Impl>
512void
513DefaultIEW<Impl>::squashDueToMemOrder(DynInstPtr &inst, ThreadID tid)
514{
515 DPRINTF(IEW, "[tid:%i]: Memory violation, squashing violator and younger "
516 "insts, PC: %s [sn:%i].\n", tid, inst->pcState(), inst->seqNum);
517 // Need to include inst->seqNum in the following comparison to cover the
518 // corner case when a branch misprediction and a memory violation for the
519 // same instruction (e.g. load PC) are detected in the same cycle. In this
520 // case the memory violator should take precedence over the branch
521 // misprediction because it requires the violator itself to be included in
522 // the squash.
523 if (!toCommit->squash[tid] ||
524 inst->seqNum <= toCommit->squashedSeqNum[tid]) {
525 toCommit->squash[tid] = true;
526
527 toCommit->squashedSeqNum[tid] = inst->seqNum;
528 toCommit->pc[tid] = inst->pcState();
529 toCommit->mispredictInst[tid] = NULL;
530
531 // Must include the memory violator in the squash.
532 toCommit->includeSquashInst[tid] = true;
533
534 wroteToTimeBuffer = true;
535 }
536}
537
538template<class Impl>
539void
540DefaultIEW<Impl>::squashDueToMemBlocked(DynInstPtr &inst, ThreadID tid)
541{
542 DPRINTF(IEW, "[tid:%i]: Memory blocked, squashing load and younger insts, "
543 "PC: %s [sn:%i].\n", tid, inst->pcState(), inst->seqNum);
544 if (!toCommit->squash[tid] ||
545 inst->seqNum < toCommit->squashedSeqNum[tid]) {
546 toCommit->squash[tid] = true;
547
548 toCommit->squashedSeqNum[tid] = inst->seqNum;
549 toCommit->pc[tid] = inst->pcState();
550 toCommit->mispredictInst[tid] = NULL;
551
552 // Must include the broadcasted SN in the squash.
553 toCommit->includeSquashInst[tid] = true;
554
555 ldstQueue.setLoadBlockedHandled(tid);
556
557 wroteToTimeBuffer = true;
558 }
559}
560
561template<class Impl>
562void
563DefaultIEW<Impl>::block(ThreadID tid)
564{
565 DPRINTF(IEW, "[tid:%u]: Blocking.\n", tid);
566
567 if (dispatchStatus[tid] != Blocked &&
568 dispatchStatus[tid] != Unblocking) {
569 toRename->iewBlock[tid] = true;
570 wroteToTimeBuffer = true;
571 }
572
573 // Add the current inputs to the skid buffer so they can be
574 // reprocessed when this stage unblocks.
575 skidInsert(tid);
576
577 dispatchStatus[tid] = Blocked;
578}
579
580template<class Impl>
581void
582DefaultIEW<Impl>::unblock(ThreadID tid)
583{
584 DPRINTF(IEW, "[tid:%i]: Reading instructions out of the skid "
585 "buffer %u.\n",tid, tid);
586
587 // If the skid bufffer is empty, signal back to previous stages to unblock.
588 // Also switch status to running.
589 if (skidBuffer[tid].empty()) {
590 toRename->iewUnblock[tid] = true;
591 wroteToTimeBuffer = true;
592 DPRINTF(IEW, "[tid:%i]: Done unblocking.\n",tid);
593 dispatchStatus[tid] = Running;
594 }
595}
596
597template<class Impl>
598void
599DefaultIEW<Impl>::wakeDependents(DynInstPtr &inst)
600{
601 instQueue.wakeDependents(inst);
602}
603
604template<class Impl>
605void
606DefaultIEW<Impl>::rescheduleMemInst(DynInstPtr &inst)
607{
608 instQueue.rescheduleMemInst(inst);
609}
610
611template<class Impl>
612void
613DefaultIEW<Impl>::replayMemInst(DynInstPtr &inst)
614{
615 instQueue.replayMemInst(inst);
616}
617
618template<class Impl>
619void
620DefaultIEW<Impl>::instToCommit(DynInstPtr &inst)
621{
622 // This function should not be called after writebackInsts in a
623 // single cycle. That will cause problems with an instruction
624 // being added to the queue to commit without being processed by
625 // writebackInsts prior to being sent to commit.
626
627 // First check the time slot that this instruction will write
628 // to. If there are free write ports at the time, then go ahead
629 // and write the instruction to that time. If there are not,
630 // keep looking back to see where's the first time there's a
631 // free slot.
632 while ((*iewQueue)[wbCycle].insts[wbNumInst]) {
633 ++wbNumInst;
634 if (wbNumInst == wbWidth) {
635 ++wbCycle;
636 wbNumInst = 0;
637 }
638
639 assert((wbCycle * wbWidth + wbNumInst) <= wbMax);
640 }
641
642 DPRINTF(IEW, "Current wb cycle: %i, width: %i, numInst: %i\nwbActual:%i\n",
643 wbCycle, wbWidth, wbNumInst, wbCycle * wbWidth + wbNumInst);
644 // Add finished instruction to queue to commit.
645 (*iewQueue)[wbCycle].insts[wbNumInst] = inst;
646 (*iewQueue)[wbCycle].size++;
647}
648
649template <class Impl>
650unsigned
651DefaultIEW<Impl>::validInstsFromRename()
652{
653 unsigned inst_count = 0;
654
655 for (int i=0; i<fromRename->size; i++) {
656 if (!fromRename->insts[i]->isSquashed())
657 inst_count++;
658 }
659
660 return inst_count;
661}
662
663template<class Impl>
664void
665DefaultIEW<Impl>::skidInsert(ThreadID tid)
666{
667 DynInstPtr inst = NULL;
668
669 while (!insts[tid].empty()) {
670 inst = insts[tid].front();
671
672 insts[tid].pop();
673
674 DPRINTF(IEW,"[tid:%i]: Inserting [sn:%lli] PC:%s into "
675 "dispatch skidBuffer %i\n",tid, inst->seqNum,
676 inst->pcState(),tid);
677
678 skidBuffer[tid].push(inst);
679 }
680
681 assert(skidBuffer[tid].size() <= skidBufferMax &&
682 "Skidbuffer Exceeded Max Size");
683}
684
685template<class Impl>
686int
687DefaultIEW<Impl>::skidCount()
688{
689 int max=0;
690
691 list<ThreadID>::iterator threads = activeThreads->begin();
692 list<ThreadID>::iterator end = activeThreads->end();
693
694 while (threads != end) {
695 ThreadID tid = *threads++;
696 unsigned thread_count = skidBuffer[tid].size();
697 if (max < thread_count)
698 max = thread_count;
699 }
700
701 return max;
702}
703
704template<class Impl>
705bool
706DefaultIEW<Impl>::skidsEmpty()
707{
708 list<ThreadID>::iterator threads = activeThreads->begin();
709 list<ThreadID>::iterator end = activeThreads->end();
710
711 while (threads != end) {
712 ThreadID tid = *threads++;
713
714 if (!skidBuffer[tid].empty())
715 return false;
716 }
717
718 return true;
719}
720
721template <class Impl>
722void
723DefaultIEW<Impl>::updateStatus()
724{
725 bool any_unblocking = false;
726
727 list<ThreadID>::iterator threads = activeThreads->begin();
728 list<ThreadID>::iterator end = activeThreads->end();
729
730 while (threads != end) {
731 ThreadID tid = *threads++;
732
733 if (dispatchStatus[tid] == Unblocking) {
734 any_unblocking = true;
735 break;
736 }
737 }
738
739 // If there are no ready instructions waiting to be scheduled by the IQ,
740 // and there's no stores waiting to write back, and dispatch is not
741 // unblocking, then there is no internal activity for the IEW stage.
742 instQueue.intInstQueueReads++;
743 if (_status == Active && !instQueue.hasReadyInsts() &&
744 !ldstQueue.willWB() && !any_unblocking) {
745 DPRINTF(IEW, "IEW switching to idle\n");
746
747 deactivateStage();
748
749 _status = Inactive;
750 } else if (_status == Inactive && (instQueue.hasReadyInsts() ||
751 ldstQueue.willWB() ||
752 any_unblocking)) {
753 // Otherwise there is internal activity. Set to active.
754 DPRINTF(IEW, "IEW switching to active\n");
755
756 activateStage();
757
758 _status = Active;
759 }
760}
761
762template <class Impl>
763void
764DefaultIEW<Impl>::resetEntries()
765{
766 instQueue.resetEntries();
767 ldstQueue.resetEntries();
768}
769
770template <class Impl>
771void
772DefaultIEW<Impl>::readStallSignals(ThreadID tid)
773{
774 if (fromCommit->commitBlock[tid]) {
775 stalls[tid].commit = true;
776 }
777
778 if (fromCommit->commitUnblock[tid]) {
779 assert(stalls[tid].commit);
780 stalls[tid].commit = false;
781 }
782}
783
784template <class Impl>
785bool
786DefaultIEW<Impl>::checkStall(ThreadID tid)
787{
788 bool ret_val(false);
789
790 if (stalls[tid].commit) {
791 DPRINTF(IEW,"[tid:%i]: Stall from Commit stage detected.\n",tid);
792 ret_val = true;
793 } else if (instQueue.isFull(tid)) {
794 DPRINTF(IEW,"[tid:%i]: Stall: IQ is full.\n",tid);
795 ret_val = true;
796 } else if (ldstQueue.isFull(tid)) {
797 DPRINTF(IEW,"[tid:%i]: Stall: LSQ is full\n",tid);
798
799 if (ldstQueue.numLoads(tid) > 0 ) {
800
801 DPRINTF(IEW,"[tid:%i]: LSQ oldest load: [sn:%i] \n",
802 tid,ldstQueue.getLoadHeadSeqNum(tid));
803 }
804
805 if (ldstQueue.numStores(tid) > 0) {
806
807 DPRINTF(IEW,"[tid:%i]: LSQ oldest store: [sn:%i] \n",
808 tid,ldstQueue.getStoreHeadSeqNum(tid));
809 }
810
811 ret_val = true;
812 } else if (ldstQueue.isStalled(tid)) {
813 DPRINTF(IEW,"[tid:%i]: Stall: LSQ stall detected.\n",tid);
814 ret_val = true;
815 }
816
817 return ret_val;
818}
819
820template <class Impl>
821void
822DefaultIEW<Impl>::checkSignalsAndUpdate(ThreadID tid)
823{
824 // Check if there's a squash signal, squash if there is
825 // Check stall signals, block if there is.
826 // If status was Blocked
827 // if so then go to unblocking
828 // If status was Squashing
829 // check if squashing is not high. Switch to running this cycle.
830
831 readStallSignals(tid);
832
833 if (fromCommit->commitInfo[tid].squash) {
834 squash(tid);
835
836 if (dispatchStatus[tid] == Blocked ||
837 dispatchStatus[tid] == Unblocking) {
838 toRename->iewUnblock[tid] = true;
839 wroteToTimeBuffer = true;
840 }
841
842 dispatchStatus[tid] = Squashing;
843 fetchRedirect[tid] = false;
844 return;
845 }
846
847 if (fromCommit->commitInfo[tid].robSquashing) {
848 DPRINTF(IEW, "[tid:%i]: ROB is still squashing.\n", tid);
849
850 dispatchStatus[tid] = Squashing;
851 emptyRenameInsts(tid);
852 wroteToTimeBuffer = true;
853 return;
854 }
855
856 if (checkStall(tid)) {
857 block(tid);
858 dispatchStatus[tid] = Blocked;
859 return;
860 }
861
862 if (dispatchStatus[tid] == Blocked) {
863 // Status from previous cycle was blocked, but there are no more stall
864 // conditions. Switch over to unblocking.
865 DPRINTF(IEW, "[tid:%i]: Done blocking, switching to unblocking.\n",
866 tid);
867
868 dispatchStatus[tid] = Unblocking;
869
870 unblock(tid);
871
872 return;
873 }
874
875 if (dispatchStatus[tid] == Squashing) {
876 // Switch status to running if rename isn't being told to block or
877 // squash this cycle.
878 DPRINTF(IEW, "[tid:%i]: Done squashing, switching to running.\n",
879 tid);
880
881 dispatchStatus[tid] = Running;
882
883 return;
884 }
885}
886
887template <class Impl>
888void
889DefaultIEW<Impl>::sortInsts()
890{
891 int insts_from_rename = fromRename->size;
892#ifdef DEBUG
893 for (ThreadID tid = 0; tid < numThreads; tid++)
894 assert(insts[tid].empty());
895#endif
896 for (int i = 0; i < insts_from_rename; ++i) {
897 insts[fromRename->insts[i]->threadNumber].push(fromRename->insts[i]);
898 }
899}
900
901template <class Impl>
902void
903DefaultIEW<Impl>::emptyRenameInsts(ThreadID tid)
904{
905 DPRINTF(IEW, "[tid:%i]: Removing incoming rename instructions\n", tid);
906
907 while (!insts[tid].empty()) {
908
906 if (insts[tid].front()->isLoad() ||
907 insts[tid].front()->isStore() ) {
908 toRename->iewInfo[tid].dispatchedToLSQ++;
909 if (insts[tid].front()->isLoad()) {
910 toRename->iewInfo[tid].dispatchedToLQ++;
909 }
911 }
912 if (insts[tid].front()->isStore()) {
913 toRename->iewInfo[tid].dispatchedToSQ++;
914 }
910
911 toRename->iewInfo[tid].dispatched++;
912
913 insts[tid].pop();
914 }
915}
916
917template <class Impl>
918void
919DefaultIEW<Impl>::wakeCPU()
920{
921 cpu->wakeCPU();
922}
923
924template <class Impl>
925void
926DefaultIEW<Impl>::activityThisCycle()
927{
928 DPRINTF(Activity, "Activity this cycle.\n");
929 cpu->activityThisCycle();
930}
931
932template <class Impl>
933inline void
934DefaultIEW<Impl>::activateStage()
935{
936 DPRINTF(Activity, "Activating stage.\n");
937 cpu->activateStage(O3CPU::IEWIdx);
938}
939
940template <class Impl>
941inline void
942DefaultIEW<Impl>::deactivateStage()
943{
944 DPRINTF(Activity, "Deactivating stage.\n");
945 cpu->deactivateStage(O3CPU::IEWIdx);
946}
947
948template<class Impl>
949void
950DefaultIEW<Impl>::dispatch(ThreadID tid)
951{
952 // If status is Running or idle,
953 // call dispatchInsts()
954 // If status is Unblocking,
955 // buffer any instructions coming from rename
956 // continue trying to empty skid buffer
957 // check if stall conditions have passed
958
959 if (dispatchStatus[tid] == Blocked) {
960 ++iewBlockCycles;
961
962 } else if (dispatchStatus[tid] == Squashing) {
963 ++iewSquashCycles;
964 }
965
966 // Dispatch should try to dispatch as many instructions as its bandwidth
967 // will allow, as long as it is not currently blocked.
968 if (dispatchStatus[tid] == Running ||
969 dispatchStatus[tid] == Idle) {
970 DPRINTF(IEW, "[tid:%i] Not blocked, so attempting to run "
971 "dispatch.\n", tid);
972
973 dispatchInsts(tid);
974 } else if (dispatchStatus[tid] == Unblocking) {
975 // Make sure that the skid buffer has something in it if the
976 // status is unblocking.
977 assert(!skidsEmpty());
978
979 // If the status was unblocking, then instructions from the skid
980 // buffer were used. Remove those instructions and handle
981 // the rest of unblocking.
982 dispatchInsts(tid);
983
984 ++iewUnblockCycles;
985
986 if (validInstsFromRename()) {
987 // Add the current inputs to the skid buffer so they can be
988 // reprocessed when this stage unblocks.
989 skidInsert(tid);
990 }
991
992 unblock(tid);
993 }
994}
995
996template <class Impl>
997void
998DefaultIEW<Impl>::dispatchInsts(ThreadID tid)
999{
1000 // Obtain instructions from skid buffer if unblocking, or queue from rename
1001 // otherwise.
1002 std::queue<DynInstPtr> &insts_to_dispatch =
1003 dispatchStatus[tid] == Unblocking ?
1004 skidBuffer[tid] : insts[tid];
1005
1006 int insts_to_add = insts_to_dispatch.size();
1007
1008 DynInstPtr inst;
1009 bool add_to_iq = false;
1010 int dis_num_inst = 0;
1011
1012 // Loop through the instructions, putting them in the instruction
1013 // queue.
1014 for ( ; dis_num_inst < insts_to_add &&
1015 dis_num_inst < dispatchWidth;
1016 ++dis_num_inst)
1017 {
1018 inst = insts_to_dispatch.front();
1019
1020 if (dispatchStatus[tid] == Unblocking) {
1021 DPRINTF(IEW, "[tid:%i]: Issue: Examining instruction from skid "
1022 "buffer\n", tid);
1023 }
1024
1025 // Make sure there's a valid instruction there.
1026 assert(inst);
1027
1028 DPRINTF(IEW, "[tid:%i]: Issue: Adding PC %s [sn:%lli] [tid:%i] to "
1029 "IQ.\n",
1030 tid, inst->pcState(), inst->seqNum, inst->threadNumber);
1031
1032 // Be sure to mark these instructions as ready so that the
1033 // commit stage can go ahead and execute them, and mark
1034 // them as issued so the IQ doesn't reprocess them.
1035
1036 // Check for squashed instructions.
1037 if (inst->isSquashed()) {
1038 DPRINTF(IEW, "[tid:%i]: Issue: Squashed instruction encountered, "
1039 "not adding to IQ.\n", tid);
1040
1041 ++iewDispSquashedInsts;
1042
1043 insts_to_dispatch.pop();
1044
1045 //Tell Rename That An Instruction has been processed
915
916 toRename->iewInfo[tid].dispatched++;
917
918 insts[tid].pop();
919 }
920}
921
922template <class Impl>
923void
924DefaultIEW<Impl>::wakeCPU()
925{
926 cpu->wakeCPU();
927}
928
929template <class Impl>
930void
931DefaultIEW<Impl>::activityThisCycle()
932{
933 DPRINTF(Activity, "Activity this cycle.\n");
934 cpu->activityThisCycle();
935}
936
937template <class Impl>
938inline void
939DefaultIEW<Impl>::activateStage()
940{
941 DPRINTF(Activity, "Activating stage.\n");
942 cpu->activateStage(O3CPU::IEWIdx);
943}
944
945template <class Impl>
946inline void
947DefaultIEW<Impl>::deactivateStage()
948{
949 DPRINTF(Activity, "Deactivating stage.\n");
950 cpu->deactivateStage(O3CPU::IEWIdx);
951}
952
953template<class Impl>
954void
955DefaultIEW<Impl>::dispatch(ThreadID tid)
956{
957 // If status is Running or idle,
958 // call dispatchInsts()
959 // If status is Unblocking,
960 // buffer any instructions coming from rename
961 // continue trying to empty skid buffer
962 // check if stall conditions have passed
963
964 if (dispatchStatus[tid] == Blocked) {
965 ++iewBlockCycles;
966
967 } else if (dispatchStatus[tid] == Squashing) {
968 ++iewSquashCycles;
969 }
970
971 // Dispatch should try to dispatch as many instructions as its bandwidth
972 // will allow, as long as it is not currently blocked.
973 if (dispatchStatus[tid] == Running ||
974 dispatchStatus[tid] == Idle) {
975 DPRINTF(IEW, "[tid:%i] Not blocked, so attempting to run "
976 "dispatch.\n", tid);
977
978 dispatchInsts(tid);
979 } else if (dispatchStatus[tid] == Unblocking) {
980 // Make sure that the skid buffer has something in it if the
981 // status is unblocking.
982 assert(!skidsEmpty());
983
984 // If the status was unblocking, then instructions from the skid
985 // buffer were used. Remove those instructions and handle
986 // the rest of unblocking.
987 dispatchInsts(tid);
988
989 ++iewUnblockCycles;
990
991 if (validInstsFromRename()) {
992 // Add the current inputs to the skid buffer so they can be
993 // reprocessed when this stage unblocks.
994 skidInsert(tid);
995 }
996
997 unblock(tid);
998 }
999}
1000
1001template <class Impl>
1002void
1003DefaultIEW<Impl>::dispatchInsts(ThreadID tid)
1004{
1005 // Obtain instructions from skid buffer if unblocking, or queue from rename
1006 // otherwise.
1007 std::queue<DynInstPtr> &insts_to_dispatch =
1008 dispatchStatus[tid] == Unblocking ?
1009 skidBuffer[tid] : insts[tid];
1010
1011 int insts_to_add = insts_to_dispatch.size();
1012
1013 DynInstPtr inst;
1014 bool add_to_iq = false;
1015 int dis_num_inst = 0;
1016
1017 // Loop through the instructions, putting them in the instruction
1018 // queue.
1019 for ( ; dis_num_inst < insts_to_add &&
1020 dis_num_inst < dispatchWidth;
1021 ++dis_num_inst)
1022 {
1023 inst = insts_to_dispatch.front();
1024
1025 if (dispatchStatus[tid] == Unblocking) {
1026 DPRINTF(IEW, "[tid:%i]: Issue: Examining instruction from skid "
1027 "buffer\n", tid);
1028 }
1029
1030 // Make sure there's a valid instruction there.
1031 assert(inst);
1032
1033 DPRINTF(IEW, "[tid:%i]: Issue: Adding PC %s [sn:%lli] [tid:%i] to "
1034 "IQ.\n",
1035 tid, inst->pcState(), inst->seqNum, inst->threadNumber);
1036
1037 // Be sure to mark these instructions as ready so that the
1038 // commit stage can go ahead and execute them, and mark
1039 // them as issued so the IQ doesn't reprocess them.
1040
1041 // Check for squashed instructions.
1042 if (inst->isSquashed()) {
1043 DPRINTF(IEW, "[tid:%i]: Issue: Squashed instruction encountered, "
1044 "not adding to IQ.\n", tid);
1045
1046 ++iewDispSquashedInsts;
1047
1048 insts_to_dispatch.pop();
1049
1050 //Tell Rename That An Instruction has been processed
1046 if (inst->isLoad() || inst->isStore()) {
1047 toRename->iewInfo[tid].dispatchedToLSQ++;
1051 if (inst->isLoad()) {
1052 toRename->iewInfo[tid].dispatchedToLQ++;
1048 }
1053 }
1054 if (inst->isStore()) {
1055 toRename->iewInfo[tid].dispatchedToSQ++;
1056 }
1057
1049 toRename->iewInfo[tid].dispatched++;
1050
1051 continue;
1052 }
1053
1054 // Check for full conditions.
1055 if (instQueue.isFull(tid)) {
1056 DPRINTF(IEW, "[tid:%i]: Issue: IQ has become full.\n", tid);
1057
1058 // Call function to start blocking.
1059 block(tid);
1060
1061 // Set unblock to false. Special case where we are using
1062 // skidbuffer (unblocking) instructions but then we still
1063 // get full in the IQ.
1064 toRename->iewUnblock[tid] = false;
1065
1066 ++iewIQFullEvents;
1067 break;
1068 } else if (ldstQueue.isFull(tid)) {
1069 DPRINTF(IEW, "[tid:%i]: Issue: LSQ has become full.\n",tid);
1070
1071 // Call function to start blocking.
1072 block(tid);
1073
1074 // Set unblock to false. Special case where we are using
1075 // skidbuffer (unblocking) instructions but then we still
1076 // get full in the IQ.
1077 toRename->iewUnblock[tid] = false;
1078
1079 ++iewLSQFullEvents;
1080 break;
1081 }
1082
1083 // Otherwise issue the instruction just fine.
1084 if (inst->isLoad()) {
1085 DPRINTF(IEW, "[tid:%i]: Issue: Memory instruction "
1086 "encountered, adding to LSQ.\n", tid);
1087
1088 // Reserve a spot in the load store queue for this
1089 // memory access.
1090 ldstQueue.insertLoad(inst);
1091
1092 ++iewDispLoadInsts;
1093
1094 add_to_iq = true;
1095
1058 toRename->iewInfo[tid].dispatched++;
1059
1060 continue;
1061 }
1062
1063 // Check for full conditions.
1064 if (instQueue.isFull(tid)) {
1065 DPRINTF(IEW, "[tid:%i]: Issue: IQ has become full.\n", tid);
1066
1067 // Call function to start blocking.
1068 block(tid);
1069
1070 // Set unblock to false. Special case where we are using
1071 // skidbuffer (unblocking) instructions but then we still
1072 // get full in the IQ.
1073 toRename->iewUnblock[tid] = false;
1074
1075 ++iewIQFullEvents;
1076 break;
1077 } else if (ldstQueue.isFull(tid)) {
1078 DPRINTF(IEW, "[tid:%i]: Issue: LSQ has become full.\n",tid);
1079
1080 // Call function to start blocking.
1081 block(tid);
1082
1083 // Set unblock to false. Special case where we are using
1084 // skidbuffer (unblocking) instructions but then we still
1085 // get full in the IQ.
1086 toRename->iewUnblock[tid] = false;
1087
1088 ++iewLSQFullEvents;
1089 break;
1090 }
1091
1092 // Otherwise issue the instruction just fine.
1093 if (inst->isLoad()) {
1094 DPRINTF(IEW, "[tid:%i]: Issue: Memory instruction "
1095 "encountered, adding to LSQ.\n", tid);
1096
1097 // Reserve a spot in the load store queue for this
1098 // memory access.
1099 ldstQueue.insertLoad(inst);
1100
1101 ++iewDispLoadInsts;
1102
1103 add_to_iq = true;
1104
1096 toRename->iewInfo[tid].dispatchedToLSQ++;
1105 toRename->iewInfo[tid].dispatchedToLQ++;
1097 } else if (inst->isStore()) {
1098 DPRINTF(IEW, "[tid:%i]: Issue: Memory instruction "
1099 "encountered, adding to LSQ.\n", tid);
1100
1101 ldstQueue.insertStore(inst);
1102
1103 ++iewDispStoreInsts;
1104
1105 if (inst->isStoreConditional()) {
1106 // Store conditionals need to be set as "canCommit()"
1107 // so that commit can process them when they reach the
1108 // head of commit.
1109 // @todo: This is somewhat specific to Alpha.
1110 inst->setCanCommit();
1111 instQueue.insertNonSpec(inst);
1112 add_to_iq = false;
1113
1114 ++iewDispNonSpecInsts;
1115 } else {
1116 add_to_iq = true;
1117 }
1118
1106 } else if (inst->isStore()) {
1107 DPRINTF(IEW, "[tid:%i]: Issue: Memory instruction "
1108 "encountered, adding to LSQ.\n", tid);
1109
1110 ldstQueue.insertStore(inst);
1111
1112 ++iewDispStoreInsts;
1113
1114 if (inst->isStoreConditional()) {
1115 // Store conditionals need to be set as "canCommit()"
1116 // so that commit can process them when they reach the
1117 // head of commit.
1118 // @todo: This is somewhat specific to Alpha.
1119 inst->setCanCommit();
1120 instQueue.insertNonSpec(inst);
1121 add_to_iq = false;
1122
1123 ++iewDispNonSpecInsts;
1124 } else {
1125 add_to_iq = true;
1126 }
1127
1119 toRename->iewInfo[tid].dispatchedToLSQ++;
1128 toRename->iewInfo[tid].dispatchedToSQ++;
1120 } else if (inst->isMemBarrier() || inst->isWriteBarrier()) {
1121 // Same as non-speculative stores.
1122 inst->setCanCommit();
1123 instQueue.insertBarrier(inst);
1124 add_to_iq = false;
1125 } else if (inst->isNop()) {
1126 DPRINTF(IEW, "[tid:%i]: Issue: Nop instruction encountered, "
1127 "skipping.\n", tid);
1128
1129 inst->setIssued();
1130 inst->setExecuted();
1131 inst->setCanCommit();
1132
1133 instQueue.recordProducer(inst);
1134
1135 iewExecutedNop[tid]++;
1136
1137 add_to_iq = false;
1138 } else if (inst->isExecuted()) {
1139 assert(0 && "Instruction shouldn't be executed.\n");
1140 DPRINTF(IEW, "Issue: Executed branch encountered, "
1141 "skipping.\n");
1142
1143 inst->setIssued();
1144 inst->setCanCommit();
1145
1146 instQueue.recordProducer(inst);
1147
1148 add_to_iq = false;
1149 } else {
1150 add_to_iq = true;
1151 }
1152 if (inst->isNonSpeculative()) {
1153 DPRINTF(IEW, "[tid:%i]: Issue: Nonspeculative instruction "
1154 "encountered, skipping.\n", tid);
1155
1156 // Same as non-speculative stores.
1157 inst->setCanCommit();
1158
1159 // Specifically insert it as nonspeculative.
1160 instQueue.insertNonSpec(inst);
1161
1162 ++iewDispNonSpecInsts;
1163
1164 add_to_iq = false;
1165 }
1166
1167 // If the instruction queue is not full, then add the
1168 // instruction.
1169 if (add_to_iq) {
1170 instQueue.insert(inst);
1171 }
1172
1173 insts_to_dispatch.pop();
1174
1175 toRename->iewInfo[tid].dispatched++;
1176
1177 ++iewDispatchedInsts;
1178
1179#if TRACING_ON
1180 inst->dispatchTick = curTick() - inst->fetchTick;
1181#endif
1182 ppDispatch->notify(inst);
1183 }
1184
1185 if (!insts_to_dispatch.empty()) {
1186 DPRINTF(IEW,"[tid:%i]: Issue: Bandwidth Full. Blocking.\n", tid);
1187 block(tid);
1188 toRename->iewUnblock[tid] = false;
1189 }
1190
1191 if (dispatchStatus[tid] == Idle && dis_num_inst) {
1192 dispatchStatus[tid] = Running;
1193
1194 updatedQueues = true;
1195 }
1196
1197 dis_num_inst = 0;
1198}
1199
1200template <class Impl>
1201void
1202DefaultIEW<Impl>::printAvailableInsts()
1203{
1204 int inst = 0;
1205
1206 std::cout << "Available Instructions: ";
1207
1208 while (fromIssue->insts[inst]) {
1209
1210 if (inst%3==0) std::cout << "\n\t";
1211
1212 std::cout << "PC: " << fromIssue->insts[inst]->pcState()
1213 << " TN: " << fromIssue->insts[inst]->threadNumber
1214 << " SN: " << fromIssue->insts[inst]->seqNum << " | ";
1215
1216 inst++;
1217
1218 }
1219
1220 std::cout << "\n";
1221}
1222
1223template <class Impl>
1224void
1225DefaultIEW<Impl>::executeInsts()
1226{
1227 wbNumInst = 0;
1228 wbCycle = 0;
1229
1230 list<ThreadID>::iterator threads = activeThreads->begin();
1231 list<ThreadID>::iterator end = activeThreads->end();
1232
1233 while (threads != end) {
1234 ThreadID tid = *threads++;
1235 fetchRedirect[tid] = false;
1236 }
1237
1238 // Uncomment this if you want to see all available instructions.
1239 // @todo This doesn't actually work anymore, we should fix it.
1240// printAvailableInsts();
1241
1242 // Execute/writeback any instructions that are available.
1243 int insts_to_execute = fromIssue->size;
1244 int inst_num = 0;
1245 for (; inst_num < insts_to_execute;
1246 ++inst_num) {
1247
1248 DPRINTF(IEW, "Execute: Executing instructions from IQ.\n");
1249
1250 DynInstPtr inst = instQueue.getInstToExecute();
1251
1252 DPRINTF(IEW, "Execute: Processing PC %s, [tid:%i] [sn:%i].\n",
1253 inst->pcState(), inst->threadNumber,inst->seqNum);
1254
1255 // Check if the instruction is squashed; if so then skip it
1256 if (inst->isSquashed()) {
1257 DPRINTF(IEW, "Execute: Instruction was squashed. PC: %s, [tid:%i]"
1258 " [sn:%i]\n", inst->pcState(), inst->threadNumber,
1259 inst->seqNum);
1260
1261 // Consider this instruction executed so that commit can go
1262 // ahead and retire the instruction.
1263 inst->setExecuted();
1264
1265 // Not sure if I should set this here or just let commit try to
1266 // commit any squashed instructions. I like the latter a bit more.
1267 inst->setCanCommit();
1268
1269 ++iewExecSquashedInsts;
1270
1271 decrWb(inst->seqNum);
1272 continue;
1273 }
1274
1275 Fault fault = NoFault;
1276
1277 // Execute instruction.
1278 // Note that if the instruction faults, it will be handled
1279 // at the commit stage.
1280 if (inst->isMemRef()) {
1281 DPRINTF(IEW, "Execute: Calculating address for memory "
1282 "reference.\n");
1283
1284 // Tell the LDSTQ to execute this instruction (if it is a load).
1285 if (inst->isLoad()) {
1286 // Loads will mark themselves as executed, and their writeback
1287 // event adds the instruction to the queue to commit
1288 fault = ldstQueue.executeLoad(inst);
1289
1290 if (inst->isTranslationDelayed() &&
1291 fault == NoFault) {
1292 // A hw page table walk is currently going on; the
1293 // instruction must be deferred.
1294 DPRINTF(IEW, "Execute: Delayed translation, deferring "
1295 "load.\n");
1296 instQueue.deferMemInst(inst);
1297 continue;
1298 }
1299
1300 if (inst->isDataPrefetch() || inst->isInstPrefetch()) {
1301 inst->fault = NoFault;
1302 }
1303 } else if (inst->isStore()) {
1304 fault = ldstQueue.executeStore(inst);
1305
1306 if (inst->isTranslationDelayed() &&
1307 fault == NoFault) {
1308 // A hw page table walk is currently going on; the
1309 // instruction must be deferred.
1310 DPRINTF(IEW, "Execute: Delayed translation, deferring "
1311 "store.\n");
1312 instQueue.deferMemInst(inst);
1313 continue;
1314 }
1315
1316 // If the store had a fault then it may not have a mem req
1317 if (fault != NoFault || !inst->readPredicate() ||
1318 !inst->isStoreConditional()) {
1319 // If the instruction faulted, then we need to send it along
1320 // to commit without the instruction completing.
1321 // Send this instruction to commit, also make sure iew stage
1322 // realizes there is activity.
1323 inst->setExecuted();
1324 instToCommit(inst);
1325 activityThisCycle();
1326 }
1327
1328 // Store conditionals will mark themselves as
1329 // executed, and their writeback event will add the
1330 // instruction to the queue to commit.
1331 } else {
1332 panic("Unexpected memory type!\n");
1333 }
1334
1335 } else {
1336 // If the instruction has already faulted, then skip executing it.
1337 // Such case can happen when it faulted during ITLB translation.
1338 // If we execute the instruction (even if it's a nop) the fault
1339 // will be replaced and we will lose it.
1340 if (inst->getFault() == NoFault) {
1341 inst->execute();
1342 if (!inst->readPredicate())
1343 inst->forwardOldRegs();
1344 }
1345
1346 inst->setExecuted();
1347
1348 instToCommit(inst);
1349 }
1350
1351 updateExeInstStats(inst);
1352
1353 // Check if branch prediction was correct, if not then we need
1354 // to tell commit to squash in flight instructions. Only
1355 // handle this if there hasn't already been something that
1356 // redirects fetch in this group of instructions.
1357
1358 // This probably needs to prioritize the redirects if a different
1359 // scheduler is used. Currently the scheduler schedules the oldest
1360 // instruction first, so the branch resolution order will be correct.
1361 ThreadID tid = inst->threadNumber;
1362
1363 if (!fetchRedirect[tid] ||
1364 !toCommit->squash[tid] ||
1365 toCommit->squashedSeqNum[tid] > inst->seqNum) {
1366
1367 // Prevent testing for misprediction on load instructions,
1368 // that have not been executed.
1369 bool loadNotExecuted = !inst->isExecuted() && inst->isLoad();
1370
1371 if (inst->mispredicted() && !loadNotExecuted) {
1372 fetchRedirect[tid] = true;
1373
1374 DPRINTF(IEW, "Execute: Branch mispredict detected.\n");
1375 DPRINTF(IEW, "Predicted target was PC: %s.\n",
1376 inst->readPredTarg());
1377 DPRINTF(IEW, "Execute: Redirecting fetch to PC: %s.\n",
1378 inst->pcState());
1379 // If incorrect, then signal the ROB that it must be squashed.
1380 squashDueToBranch(inst, tid);
1381
1382 ppMispredict->notify(inst);
1383
1384 if (inst->readPredTaken()) {
1385 predictedTakenIncorrect++;
1386 } else {
1387 predictedNotTakenIncorrect++;
1388 }
1389 } else if (ldstQueue.violation(tid)) {
1390 assert(inst->isMemRef());
1391 // If there was an ordering violation, then get the
1392 // DynInst that caused the violation. Note that this
1393 // clears the violation signal.
1394 DynInstPtr violator;
1395 violator = ldstQueue.getMemDepViolator(tid);
1396
1397 DPRINTF(IEW, "LDSTQ detected a violation. Violator PC: %s "
1398 "[sn:%lli], inst PC: %s [sn:%lli]. Addr is: %#x.\n",
1399 violator->pcState(), violator->seqNum,
1400 inst->pcState(), inst->seqNum, inst->physEffAddr);
1401
1402 fetchRedirect[tid] = true;
1403
1404 // Tell the instruction queue that a violation has occured.
1405 instQueue.violation(inst, violator);
1406
1407 // Squash.
1408 squashDueToMemOrder(violator, tid);
1409
1410 ++memOrderViolationEvents;
1411 } else if (ldstQueue.loadBlocked(tid) &&
1412 !ldstQueue.isLoadBlockedHandled(tid)) {
1413 fetchRedirect[tid] = true;
1414
1415 DPRINTF(IEW, "Load operation couldn't execute because the "
1416 "memory system is blocked. PC: %s [sn:%lli]\n",
1417 inst->pcState(), inst->seqNum);
1418
1419 squashDueToMemBlocked(inst, tid);
1420 }
1421 } else {
1422 // Reset any state associated with redirects that will not
1423 // be used.
1424 if (ldstQueue.violation(tid)) {
1425 assert(inst->isMemRef());
1426
1427 DynInstPtr violator = ldstQueue.getMemDepViolator(tid);
1428
1429 DPRINTF(IEW, "LDSTQ detected a violation. Violator PC: "
1430 "%s, inst PC: %s. Addr is: %#x.\n",
1431 violator->pcState(), inst->pcState(),
1432 inst->physEffAddr);
1433 DPRINTF(IEW, "Violation will not be handled because "
1434 "already squashing\n");
1435
1436 ++memOrderViolationEvents;
1437 }
1438 if (ldstQueue.loadBlocked(tid) &&
1439 !ldstQueue.isLoadBlockedHandled(tid)) {
1440 DPRINTF(IEW, "Load operation couldn't execute because the "
1441 "memory system is blocked. PC: %s [sn:%lli]\n",
1442 inst->pcState(), inst->seqNum);
1443 DPRINTF(IEW, "Blocked load will not be handled because "
1444 "already squashing\n");
1445
1446 ldstQueue.setLoadBlockedHandled(tid);
1447 }
1448
1449 }
1450 }
1451
1452 // Update and record activity if we processed any instructions.
1453 if (inst_num) {
1454 if (exeStatus == Idle) {
1455 exeStatus = Running;
1456 }
1457
1458 updatedQueues = true;
1459
1460 cpu->activityThisCycle();
1461 }
1462
1463 // Need to reset this in case a writeback event needs to write into the
1464 // iew queue. That way the writeback event will write into the correct
1465 // spot in the queue.
1466 wbNumInst = 0;
1467
1468}
1469
1470template <class Impl>
1471void
1472DefaultIEW<Impl>::writebackInsts()
1473{
1474 // Loop through the head of the time buffer and wake any
1475 // dependents. These instructions are about to write back. Also
1476 // mark scoreboard that this instruction is finally complete.
1477 // Either have IEW have direct access to scoreboard, or have this
1478 // as part of backwards communication.
1479 for (int inst_num = 0; inst_num < wbWidth &&
1480 toCommit->insts[inst_num]; inst_num++) {
1481 DynInstPtr inst = toCommit->insts[inst_num];
1482 ThreadID tid = inst->threadNumber;
1483
1484 DPRINTF(IEW, "Sending instructions to commit, [sn:%lli] PC %s.\n",
1485 inst->seqNum, inst->pcState());
1486
1487 iewInstsToCommit[tid]++;
1488
1489 // Some instructions will be sent to commit without having
1490 // executed because they need commit to handle them.
1491 // E.g. Uncached loads have not actually executed when they
1492 // are first sent to commit. Instead commit must tell the LSQ
1493 // when it's ready to execute the uncached load.
1494 if (!inst->isSquashed() && inst->isExecuted() && inst->getFault() == NoFault) {
1495 int dependents = instQueue.wakeDependents(inst);
1496
1497 for (int i = 0; i < inst->numDestRegs(); i++) {
1498 //mark as Ready
1499 DPRINTF(IEW,"Setting Destination Register %i\n",
1500 inst->renamedDestRegIdx(i));
1501 scoreboard->setReg(inst->renamedDestRegIdx(i));
1502 }
1503
1504 if (dependents) {
1505 producerInst[tid]++;
1506 consumerInst[tid]+= dependents;
1507 }
1508 writebackCount[tid]++;
1509 }
1510
1511 decrWb(inst->seqNum);
1512 }
1513}
1514
1515template<class Impl>
1516void
1517DefaultIEW<Impl>::tick()
1518{
1519 wbNumInst = 0;
1520 wbCycle = 0;
1521
1522 wroteToTimeBuffer = false;
1523 updatedQueues = false;
1524
1525 sortInsts();
1526
1527 // Free function units marked as being freed this cycle.
1528 fuPool->processFreeUnits();
1529
1530 list<ThreadID>::iterator threads = activeThreads->begin();
1531 list<ThreadID>::iterator end = activeThreads->end();
1532
1533 // Check stall and squash signals, dispatch any instructions.
1534 while (threads != end) {
1535 ThreadID tid = *threads++;
1536
1537 DPRINTF(IEW,"Issue: Processing [tid:%i]\n",tid);
1538
1539 checkSignalsAndUpdate(tid);
1540 dispatch(tid);
1541 }
1542
1543 if (exeStatus != Squashing) {
1544 executeInsts();
1545
1546 writebackInsts();
1547
1548 // Have the instruction queue try to schedule any ready instructions.
1549 // (In actuality, this scheduling is for instructions that will
1550 // be executed next cycle.)
1551 instQueue.scheduleReadyInsts();
1552
1553 // Also should advance its own time buffers if the stage ran.
1554 // Not the best place for it, but this works (hopefully).
1555 issueToExecQueue.advance();
1556 }
1557
1558 bool broadcast_free_entries = false;
1559
1560 if (updatedQueues || exeStatus == Running || updateLSQNextCycle) {
1561 exeStatus = Idle;
1562 updateLSQNextCycle = false;
1563
1564 broadcast_free_entries = true;
1565 }
1566
1567 // Writeback any stores using any leftover bandwidth.
1568 ldstQueue.writebackStores();
1569
1570 // Check the committed load/store signals to see if there's a load
1571 // or store to commit. Also check if it's being told to execute a
1572 // nonspeculative instruction.
1573 // This is pretty inefficient...
1574
1575 threads = activeThreads->begin();
1576 while (threads != end) {
1577 ThreadID tid = (*threads++);
1578
1579 DPRINTF(IEW,"Processing [tid:%i]\n",tid);
1580
1581 // Update structures based on instructions committed.
1582 if (fromCommit->commitInfo[tid].doneSeqNum != 0 &&
1583 !fromCommit->commitInfo[tid].squash &&
1584 !fromCommit->commitInfo[tid].robSquashing) {
1585
1586 ldstQueue.commitStores(fromCommit->commitInfo[tid].doneSeqNum,tid);
1587
1588 ldstQueue.commitLoads(fromCommit->commitInfo[tid].doneSeqNum,tid);
1589
1590 updateLSQNextCycle = true;
1591 instQueue.commit(fromCommit->commitInfo[tid].doneSeqNum,tid);
1592 }
1593
1594 if (fromCommit->commitInfo[tid].nonSpecSeqNum != 0) {
1595
1596 //DPRINTF(IEW,"NonspecInst from thread %i",tid);
1597 if (fromCommit->commitInfo[tid].uncached) {
1598 instQueue.replayMemInst(fromCommit->commitInfo[tid].uncachedLoad);
1599 fromCommit->commitInfo[tid].uncachedLoad->setAtCommit();
1600 } else {
1601 instQueue.scheduleNonSpec(
1602 fromCommit->commitInfo[tid].nonSpecSeqNum);
1603 }
1604 }
1605
1606 if (broadcast_free_entries) {
1607 toFetch->iewInfo[tid].iqCount =
1608 instQueue.getCount(tid);
1609 toFetch->iewInfo[tid].ldstqCount =
1610 ldstQueue.getCount(tid);
1611
1612 toRename->iewInfo[tid].usedIQ = true;
1613 toRename->iewInfo[tid].freeIQEntries =
1614 instQueue.numFreeEntries(tid);
1615 toRename->iewInfo[tid].usedLSQ = true;
1129 } else if (inst->isMemBarrier() || inst->isWriteBarrier()) {
1130 // Same as non-speculative stores.
1131 inst->setCanCommit();
1132 instQueue.insertBarrier(inst);
1133 add_to_iq = false;
1134 } else if (inst->isNop()) {
1135 DPRINTF(IEW, "[tid:%i]: Issue: Nop instruction encountered, "
1136 "skipping.\n", tid);
1137
1138 inst->setIssued();
1139 inst->setExecuted();
1140 inst->setCanCommit();
1141
1142 instQueue.recordProducer(inst);
1143
1144 iewExecutedNop[tid]++;
1145
1146 add_to_iq = false;
1147 } else if (inst->isExecuted()) {
1148 assert(0 && "Instruction shouldn't be executed.\n");
1149 DPRINTF(IEW, "Issue: Executed branch encountered, "
1150 "skipping.\n");
1151
1152 inst->setIssued();
1153 inst->setCanCommit();
1154
1155 instQueue.recordProducer(inst);
1156
1157 add_to_iq = false;
1158 } else {
1159 add_to_iq = true;
1160 }
1161 if (inst->isNonSpeculative()) {
1162 DPRINTF(IEW, "[tid:%i]: Issue: Nonspeculative instruction "
1163 "encountered, skipping.\n", tid);
1164
1165 // Same as non-speculative stores.
1166 inst->setCanCommit();
1167
1168 // Specifically insert it as nonspeculative.
1169 instQueue.insertNonSpec(inst);
1170
1171 ++iewDispNonSpecInsts;
1172
1173 add_to_iq = false;
1174 }
1175
1176 // If the instruction queue is not full, then add the
1177 // instruction.
1178 if (add_to_iq) {
1179 instQueue.insert(inst);
1180 }
1181
1182 insts_to_dispatch.pop();
1183
1184 toRename->iewInfo[tid].dispatched++;
1185
1186 ++iewDispatchedInsts;
1187
1188#if TRACING_ON
1189 inst->dispatchTick = curTick() - inst->fetchTick;
1190#endif
1191 ppDispatch->notify(inst);
1192 }
1193
1194 if (!insts_to_dispatch.empty()) {
1195 DPRINTF(IEW,"[tid:%i]: Issue: Bandwidth Full. Blocking.\n", tid);
1196 block(tid);
1197 toRename->iewUnblock[tid] = false;
1198 }
1199
1200 if (dispatchStatus[tid] == Idle && dis_num_inst) {
1201 dispatchStatus[tid] = Running;
1202
1203 updatedQueues = true;
1204 }
1205
1206 dis_num_inst = 0;
1207}
1208
1209template <class Impl>
1210void
1211DefaultIEW<Impl>::printAvailableInsts()
1212{
1213 int inst = 0;
1214
1215 std::cout << "Available Instructions: ";
1216
1217 while (fromIssue->insts[inst]) {
1218
1219 if (inst%3==0) std::cout << "\n\t";
1220
1221 std::cout << "PC: " << fromIssue->insts[inst]->pcState()
1222 << " TN: " << fromIssue->insts[inst]->threadNumber
1223 << " SN: " << fromIssue->insts[inst]->seqNum << " | ";
1224
1225 inst++;
1226
1227 }
1228
1229 std::cout << "\n";
1230}
1231
1232template <class Impl>
1233void
1234DefaultIEW<Impl>::executeInsts()
1235{
1236 wbNumInst = 0;
1237 wbCycle = 0;
1238
1239 list<ThreadID>::iterator threads = activeThreads->begin();
1240 list<ThreadID>::iterator end = activeThreads->end();
1241
1242 while (threads != end) {
1243 ThreadID tid = *threads++;
1244 fetchRedirect[tid] = false;
1245 }
1246
1247 // Uncomment this if you want to see all available instructions.
1248 // @todo This doesn't actually work anymore, we should fix it.
1249// printAvailableInsts();
1250
1251 // Execute/writeback any instructions that are available.
1252 int insts_to_execute = fromIssue->size;
1253 int inst_num = 0;
1254 for (; inst_num < insts_to_execute;
1255 ++inst_num) {
1256
1257 DPRINTF(IEW, "Execute: Executing instructions from IQ.\n");
1258
1259 DynInstPtr inst = instQueue.getInstToExecute();
1260
1261 DPRINTF(IEW, "Execute: Processing PC %s, [tid:%i] [sn:%i].\n",
1262 inst->pcState(), inst->threadNumber,inst->seqNum);
1263
1264 // Check if the instruction is squashed; if so then skip it
1265 if (inst->isSquashed()) {
1266 DPRINTF(IEW, "Execute: Instruction was squashed. PC: %s, [tid:%i]"
1267 " [sn:%i]\n", inst->pcState(), inst->threadNumber,
1268 inst->seqNum);
1269
1270 // Consider this instruction executed so that commit can go
1271 // ahead and retire the instruction.
1272 inst->setExecuted();
1273
1274 // Not sure if I should set this here or just let commit try to
1275 // commit any squashed instructions. I like the latter a bit more.
1276 inst->setCanCommit();
1277
1278 ++iewExecSquashedInsts;
1279
1280 decrWb(inst->seqNum);
1281 continue;
1282 }
1283
1284 Fault fault = NoFault;
1285
1286 // Execute instruction.
1287 // Note that if the instruction faults, it will be handled
1288 // at the commit stage.
1289 if (inst->isMemRef()) {
1290 DPRINTF(IEW, "Execute: Calculating address for memory "
1291 "reference.\n");
1292
1293 // Tell the LDSTQ to execute this instruction (if it is a load).
1294 if (inst->isLoad()) {
1295 // Loads will mark themselves as executed, and their writeback
1296 // event adds the instruction to the queue to commit
1297 fault = ldstQueue.executeLoad(inst);
1298
1299 if (inst->isTranslationDelayed() &&
1300 fault == NoFault) {
1301 // A hw page table walk is currently going on; the
1302 // instruction must be deferred.
1303 DPRINTF(IEW, "Execute: Delayed translation, deferring "
1304 "load.\n");
1305 instQueue.deferMemInst(inst);
1306 continue;
1307 }
1308
1309 if (inst->isDataPrefetch() || inst->isInstPrefetch()) {
1310 inst->fault = NoFault;
1311 }
1312 } else if (inst->isStore()) {
1313 fault = ldstQueue.executeStore(inst);
1314
1315 if (inst->isTranslationDelayed() &&
1316 fault == NoFault) {
1317 // A hw page table walk is currently going on; the
1318 // instruction must be deferred.
1319 DPRINTF(IEW, "Execute: Delayed translation, deferring "
1320 "store.\n");
1321 instQueue.deferMemInst(inst);
1322 continue;
1323 }
1324
1325 // If the store had a fault then it may not have a mem req
1326 if (fault != NoFault || !inst->readPredicate() ||
1327 !inst->isStoreConditional()) {
1328 // If the instruction faulted, then we need to send it along
1329 // to commit without the instruction completing.
1330 // Send this instruction to commit, also make sure iew stage
1331 // realizes there is activity.
1332 inst->setExecuted();
1333 instToCommit(inst);
1334 activityThisCycle();
1335 }
1336
1337 // Store conditionals will mark themselves as
1338 // executed, and their writeback event will add the
1339 // instruction to the queue to commit.
1340 } else {
1341 panic("Unexpected memory type!\n");
1342 }
1343
1344 } else {
1345 // If the instruction has already faulted, then skip executing it.
1346 // Such case can happen when it faulted during ITLB translation.
1347 // If we execute the instruction (even if it's a nop) the fault
1348 // will be replaced and we will lose it.
1349 if (inst->getFault() == NoFault) {
1350 inst->execute();
1351 if (!inst->readPredicate())
1352 inst->forwardOldRegs();
1353 }
1354
1355 inst->setExecuted();
1356
1357 instToCommit(inst);
1358 }
1359
1360 updateExeInstStats(inst);
1361
1362 // Check if branch prediction was correct, if not then we need
1363 // to tell commit to squash in flight instructions. Only
1364 // handle this if there hasn't already been something that
1365 // redirects fetch in this group of instructions.
1366
1367 // This probably needs to prioritize the redirects if a different
1368 // scheduler is used. Currently the scheduler schedules the oldest
1369 // instruction first, so the branch resolution order will be correct.
1370 ThreadID tid = inst->threadNumber;
1371
1372 if (!fetchRedirect[tid] ||
1373 !toCommit->squash[tid] ||
1374 toCommit->squashedSeqNum[tid] > inst->seqNum) {
1375
1376 // Prevent testing for misprediction on load instructions,
1377 // that have not been executed.
1378 bool loadNotExecuted = !inst->isExecuted() && inst->isLoad();
1379
1380 if (inst->mispredicted() && !loadNotExecuted) {
1381 fetchRedirect[tid] = true;
1382
1383 DPRINTF(IEW, "Execute: Branch mispredict detected.\n");
1384 DPRINTF(IEW, "Predicted target was PC: %s.\n",
1385 inst->readPredTarg());
1386 DPRINTF(IEW, "Execute: Redirecting fetch to PC: %s.\n",
1387 inst->pcState());
1388 // If incorrect, then signal the ROB that it must be squashed.
1389 squashDueToBranch(inst, tid);
1390
1391 ppMispredict->notify(inst);
1392
1393 if (inst->readPredTaken()) {
1394 predictedTakenIncorrect++;
1395 } else {
1396 predictedNotTakenIncorrect++;
1397 }
1398 } else if (ldstQueue.violation(tid)) {
1399 assert(inst->isMemRef());
1400 // If there was an ordering violation, then get the
1401 // DynInst that caused the violation. Note that this
1402 // clears the violation signal.
1403 DynInstPtr violator;
1404 violator = ldstQueue.getMemDepViolator(tid);
1405
1406 DPRINTF(IEW, "LDSTQ detected a violation. Violator PC: %s "
1407 "[sn:%lli], inst PC: %s [sn:%lli]. Addr is: %#x.\n",
1408 violator->pcState(), violator->seqNum,
1409 inst->pcState(), inst->seqNum, inst->physEffAddr);
1410
1411 fetchRedirect[tid] = true;
1412
1413 // Tell the instruction queue that a violation has occured.
1414 instQueue.violation(inst, violator);
1415
1416 // Squash.
1417 squashDueToMemOrder(violator, tid);
1418
1419 ++memOrderViolationEvents;
1420 } else if (ldstQueue.loadBlocked(tid) &&
1421 !ldstQueue.isLoadBlockedHandled(tid)) {
1422 fetchRedirect[tid] = true;
1423
1424 DPRINTF(IEW, "Load operation couldn't execute because the "
1425 "memory system is blocked. PC: %s [sn:%lli]\n",
1426 inst->pcState(), inst->seqNum);
1427
1428 squashDueToMemBlocked(inst, tid);
1429 }
1430 } else {
1431 // Reset any state associated with redirects that will not
1432 // be used.
1433 if (ldstQueue.violation(tid)) {
1434 assert(inst->isMemRef());
1435
1436 DynInstPtr violator = ldstQueue.getMemDepViolator(tid);
1437
1438 DPRINTF(IEW, "LDSTQ detected a violation. Violator PC: "
1439 "%s, inst PC: %s. Addr is: %#x.\n",
1440 violator->pcState(), inst->pcState(),
1441 inst->physEffAddr);
1442 DPRINTF(IEW, "Violation will not be handled because "
1443 "already squashing\n");
1444
1445 ++memOrderViolationEvents;
1446 }
1447 if (ldstQueue.loadBlocked(tid) &&
1448 !ldstQueue.isLoadBlockedHandled(tid)) {
1449 DPRINTF(IEW, "Load operation couldn't execute because the "
1450 "memory system is blocked. PC: %s [sn:%lli]\n",
1451 inst->pcState(), inst->seqNum);
1452 DPRINTF(IEW, "Blocked load will not be handled because "
1453 "already squashing\n");
1454
1455 ldstQueue.setLoadBlockedHandled(tid);
1456 }
1457
1458 }
1459 }
1460
1461 // Update and record activity if we processed any instructions.
1462 if (inst_num) {
1463 if (exeStatus == Idle) {
1464 exeStatus = Running;
1465 }
1466
1467 updatedQueues = true;
1468
1469 cpu->activityThisCycle();
1470 }
1471
1472 // Need to reset this in case a writeback event needs to write into the
1473 // iew queue. That way the writeback event will write into the correct
1474 // spot in the queue.
1475 wbNumInst = 0;
1476
1477}
1478
1479template <class Impl>
1480void
1481DefaultIEW<Impl>::writebackInsts()
1482{
1483 // Loop through the head of the time buffer and wake any
1484 // dependents. These instructions are about to write back. Also
1485 // mark scoreboard that this instruction is finally complete.
1486 // Either have IEW have direct access to scoreboard, or have this
1487 // as part of backwards communication.
1488 for (int inst_num = 0; inst_num < wbWidth &&
1489 toCommit->insts[inst_num]; inst_num++) {
1490 DynInstPtr inst = toCommit->insts[inst_num];
1491 ThreadID tid = inst->threadNumber;
1492
1493 DPRINTF(IEW, "Sending instructions to commit, [sn:%lli] PC %s.\n",
1494 inst->seqNum, inst->pcState());
1495
1496 iewInstsToCommit[tid]++;
1497
1498 // Some instructions will be sent to commit without having
1499 // executed because they need commit to handle them.
1500 // E.g. Uncached loads have not actually executed when they
1501 // are first sent to commit. Instead commit must tell the LSQ
1502 // when it's ready to execute the uncached load.
1503 if (!inst->isSquashed() && inst->isExecuted() && inst->getFault() == NoFault) {
1504 int dependents = instQueue.wakeDependents(inst);
1505
1506 for (int i = 0; i < inst->numDestRegs(); i++) {
1507 //mark as Ready
1508 DPRINTF(IEW,"Setting Destination Register %i\n",
1509 inst->renamedDestRegIdx(i));
1510 scoreboard->setReg(inst->renamedDestRegIdx(i));
1511 }
1512
1513 if (dependents) {
1514 producerInst[tid]++;
1515 consumerInst[tid]+= dependents;
1516 }
1517 writebackCount[tid]++;
1518 }
1519
1520 decrWb(inst->seqNum);
1521 }
1522}
1523
1524template<class Impl>
1525void
1526DefaultIEW<Impl>::tick()
1527{
1528 wbNumInst = 0;
1529 wbCycle = 0;
1530
1531 wroteToTimeBuffer = false;
1532 updatedQueues = false;
1533
1534 sortInsts();
1535
1536 // Free function units marked as being freed this cycle.
1537 fuPool->processFreeUnits();
1538
1539 list<ThreadID>::iterator threads = activeThreads->begin();
1540 list<ThreadID>::iterator end = activeThreads->end();
1541
1542 // Check stall and squash signals, dispatch any instructions.
1543 while (threads != end) {
1544 ThreadID tid = *threads++;
1545
1546 DPRINTF(IEW,"Issue: Processing [tid:%i]\n",tid);
1547
1548 checkSignalsAndUpdate(tid);
1549 dispatch(tid);
1550 }
1551
1552 if (exeStatus != Squashing) {
1553 executeInsts();
1554
1555 writebackInsts();
1556
1557 // Have the instruction queue try to schedule any ready instructions.
1558 // (In actuality, this scheduling is for instructions that will
1559 // be executed next cycle.)
1560 instQueue.scheduleReadyInsts();
1561
1562 // Also should advance its own time buffers if the stage ran.
1563 // Not the best place for it, but this works (hopefully).
1564 issueToExecQueue.advance();
1565 }
1566
1567 bool broadcast_free_entries = false;
1568
1569 if (updatedQueues || exeStatus == Running || updateLSQNextCycle) {
1570 exeStatus = Idle;
1571 updateLSQNextCycle = false;
1572
1573 broadcast_free_entries = true;
1574 }
1575
1576 // Writeback any stores using any leftover bandwidth.
1577 ldstQueue.writebackStores();
1578
1579 // Check the committed load/store signals to see if there's a load
1580 // or store to commit. Also check if it's being told to execute a
1581 // nonspeculative instruction.
1582 // This is pretty inefficient...
1583
1584 threads = activeThreads->begin();
1585 while (threads != end) {
1586 ThreadID tid = (*threads++);
1587
1588 DPRINTF(IEW,"Processing [tid:%i]\n",tid);
1589
1590 // Update structures based on instructions committed.
1591 if (fromCommit->commitInfo[tid].doneSeqNum != 0 &&
1592 !fromCommit->commitInfo[tid].squash &&
1593 !fromCommit->commitInfo[tid].robSquashing) {
1594
1595 ldstQueue.commitStores(fromCommit->commitInfo[tid].doneSeqNum,tid);
1596
1597 ldstQueue.commitLoads(fromCommit->commitInfo[tid].doneSeqNum,tid);
1598
1599 updateLSQNextCycle = true;
1600 instQueue.commit(fromCommit->commitInfo[tid].doneSeqNum,tid);
1601 }
1602
1603 if (fromCommit->commitInfo[tid].nonSpecSeqNum != 0) {
1604
1605 //DPRINTF(IEW,"NonspecInst from thread %i",tid);
1606 if (fromCommit->commitInfo[tid].uncached) {
1607 instQueue.replayMemInst(fromCommit->commitInfo[tid].uncachedLoad);
1608 fromCommit->commitInfo[tid].uncachedLoad->setAtCommit();
1609 } else {
1610 instQueue.scheduleNonSpec(
1611 fromCommit->commitInfo[tid].nonSpecSeqNum);
1612 }
1613 }
1614
1615 if (broadcast_free_entries) {
1616 toFetch->iewInfo[tid].iqCount =
1617 instQueue.getCount(tid);
1618 toFetch->iewInfo[tid].ldstqCount =
1619 ldstQueue.getCount(tid);
1620
1621 toRename->iewInfo[tid].usedIQ = true;
1622 toRename->iewInfo[tid].freeIQEntries =
1623 instQueue.numFreeEntries(tid);
1624 toRename->iewInfo[tid].usedLSQ = true;
1616 toRename->iewInfo[tid].freeLSQEntries =
1617 ldstQueue.numFreeEntries(tid);
1618
1625
1626 toRename->iewInfo[tid].freeLQEntries =
1627 ldstQueue.numFreeLoadEntries(tid);
1628 toRename->iewInfo[tid].freeSQEntries =
1629 ldstQueue.numFreeStoreEntries(tid);
1630
1619 wroteToTimeBuffer = true;
1620 }
1621
1622 DPRINTF(IEW, "[tid:%i], Dispatch dispatched %i instructions.\n",
1623 tid, toRename->iewInfo[tid].dispatched);
1624 }
1625
1626 DPRINTF(IEW, "IQ has %i free entries (Can schedule: %i). "
1631 wroteToTimeBuffer = true;
1632 }
1633
1634 DPRINTF(IEW, "[tid:%i], Dispatch dispatched %i instructions.\n",
1635 tid, toRename->iewInfo[tid].dispatched);
1636 }
1637
1638 DPRINTF(IEW, "IQ has %i free entries (Can schedule: %i). "
1627 "LSQ has %i free entries.\n",
1639 "LQ has %i free entries. SQ has %i free entries.\n",
1628 instQueue.numFreeEntries(), instQueue.hasReadyInsts(),
1640 instQueue.numFreeEntries(), instQueue.hasReadyInsts(),
1629 ldstQueue.numFreeEntries());
1641 ldstQueue.numFreeLoadEntries(), ldstQueue.numFreeStoreEntries());
1630
1631 updateStatus();
1632
1633 if (wroteToTimeBuffer) {
1634 DPRINTF(Activity, "Activity this cycle.\n");
1635 cpu->activityThisCycle();
1636 }
1637}
1638
1639template <class Impl>
1640void
1641DefaultIEW<Impl>::updateExeInstStats(DynInstPtr &inst)
1642{
1643 ThreadID tid = inst->threadNumber;
1644
1645 iewExecutedInsts++;
1646
1647#if TRACING_ON
1648 if (DTRACE(O3PipeView)) {
1649 inst->completeTick = curTick() - inst->fetchTick;
1650 }
1651#endif
1652
1653 //
1654 // Control operations
1655 //
1656 if (inst->isControl())
1657 iewExecutedBranches[tid]++;
1658
1659 //
1660 // Memory operations
1661 //
1662 if (inst->isMemRef()) {
1663 iewExecutedRefs[tid]++;
1664
1665 if (inst->isLoad()) {
1666 iewExecLoadInsts[tid]++;
1667 }
1668 }
1669}
1670
1671template <class Impl>
1672void
1673DefaultIEW<Impl>::checkMisprediction(DynInstPtr &inst)
1674{
1675 ThreadID tid = inst->threadNumber;
1676
1677 if (!fetchRedirect[tid] ||
1678 !toCommit->squash[tid] ||
1679 toCommit->squashedSeqNum[tid] > inst->seqNum) {
1680
1681 if (inst->mispredicted()) {
1682 fetchRedirect[tid] = true;
1683
1684 DPRINTF(IEW, "Execute: Branch mispredict detected.\n");
1685 DPRINTF(IEW, "Predicted target was PC:%#x, NPC:%#x.\n",
1686 inst->predInstAddr(), inst->predNextInstAddr());
1687 DPRINTF(IEW, "Execute: Redirecting fetch to PC: %#x,"
1688 " NPC: %#x.\n", inst->nextInstAddr(),
1689 inst->nextInstAddr());
1690 // If incorrect, then signal the ROB that it must be squashed.
1691 squashDueToBranch(inst, tid);
1692
1693 if (inst->readPredTaken()) {
1694 predictedTakenIncorrect++;
1695 } else {
1696 predictedNotTakenIncorrect++;
1697 }
1698 }
1699 }
1700}
1701
1702#endif//__CPU_O3_IEW_IMPL_IMPL_HH__
1642
1643 updateStatus();
1644
1645 if (wroteToTimeBuffer) {
1646 DPRINTF(Activity, "Activity this cycle.\n");
1647 cpu->activityThisCycle();
1648 }
1649}
1650
1651template <class Impl>
1652void
1653DefaultIEW<Impl>::updateExeInstStats(DynInstPtr &inst)
1654{
1655 ThreadID tid = inst->threadNumber;
1656
1657 iewExecutedInsts++;
1658
1659#if TRACING_ON
1660 if (DTRACE(O3PipeView)) {
1661 inst->completeTick = curTick() - inst->fetchTick;
1662 }
1663#endif
1664
1665 //
1666 // Control operations
1667 //
1668 if (inst->isControl())
1669 iewExecutedBranches[tid]++;
1670
1671 //
1672 // Memory operations
1673 //
1674 if (inst->isMemRef()) {
1675 iewExecutedRefs[tid]++;
1676
1677 if (inst->isLoad()) {
1678 iewExecLoadInsts[tid]++;
1679 }
1680 }
1681}
1682
1683template <class Impl>
1684void
1685DefaultIEW<Impl>::checkMisprediction(DynInstPtr &inst)
1686{
1687 ThreadID tid = inst->threadNumber;
1688
1689 if (!fetchRedirect[tid] ||
1690 !toCommit->squash[tid] ||
1691 toCommit->squashedSeqNum[tid] > inst->seqNum) {
1692
1693 if (inst->mispredicted()) {
1694 fetchRedirect[tid] = true;
1695
1696 DPRINTF(IEW, "Execute: Branch mispredict detected.\n");
1697 DPRINTF(IEW, "Predicted target was PC:%#x, NPC:%#x.\n",
1698 inst->predInstAddr(), inst->predNextInstAddr());
1699 DPRINTF(IEW, "Execute: Redirecting fetch to PC: %#x,"
1700 " NPC: %#x.\n", inst->nextInstAddr(),
1701 inst->nextInstAddr());
1702 // If incorrect, then signal the ROB that it must be squashed.
1703 squashDueToBranch(inst, tid);
1704
1705 if (inst->readPredTaken()) {
1706 predictedTakenIncorrect++;
1707 } else {
1708 predictedNotTakenIncorrect++;
1709 }
1710 }
1711 }
1712}
1713
1714#endif//__CPU_O3_IEW_IMPL_IMPL_HH__