inst_queue_impl.hh (9184:a1a8f137b796) inst_queue_impl.hh (9444:ab47fe7f03f0)
1/*
1/*
2 * Copyright (c) 2011 ARM Limited
2 * Copyright (c) 2011-2012 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2004-2006 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Kevin Lim
41 * Korey Sewell
42 */
43
44#include <limits>
45#include <vector>
46
47#include "cpu/o3/fu_pool.hh"
48#include "cpu/o3/inst_queue.hh"
49#include "debug/IQ.hh"
50#include "enums/OpClass.hh"
51#include "params/DerivO3CPU.hh"
52#include "sim/core.hh"
53
54// clang complains about std::set being overloaded with Packet::set if
55// we open up the entire namespace std
56using std::list;
57
58template <class Impl>
59InstructionQueue<Impl>::FUCompletion::FUCompletion(DynInstPtr &_inst,
60 int fu_idx, InstructionQueue<Impl> *iq_ptr)
61 : Event(Stat_Event_Pri, AutoDelete),
62 inst(_inst), fuIdx(fu_idx), iqPtr(iq_ptr), freeFU(false)
63{
64}
65
66template <class Impl>
67void
68InstructionQueue<Impl>::FUCompletion::process()
69{
70 iqPtr->processFUCompletion(inst, freeFU ? fuIdx : -1);
71 inst = NULL;
72}
73
74
75template <class Impl>
76const char *
77InstructionQueue<Impl>::FUCompletion::description() const
78{
79 return "Functional unit completion";
80}
81
82template <class Impl>
83InstructionQueue<Impl>::InstructionQueue(O3CPU *cpu_ptr, IEW *iew_ptr,
84 DerivO3CPUParams *params)
85 : cpu(cpu_ptr),
86 iewStage(iew_ptr),
87 fuPool(params->fuPool),
88 numEntries(params->numIQEntries),
89 totalWidth(params->issueWidth),
90 numPhysIntRegs(params->numPhysIntRegs),
91 numPhysFloatRegs(params->numPhysFloatRegs),
92 commitToIEWDelay(params->commitToIEWDelay)
93{
94 assert(fuPool);
95
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2004-2006 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Kevin Lim
41 * Korey Sewell
42 */
43
44#include <limits>
45#include <vector>
46
47#include "cpu/o3/fu_pool.hh"
48#include "cpu/o3/inst_queue.hh"
49#include "debug/IQ.hh"
50#include "enums/OpClass.hh"
51#include "params/DerivO3CPU.hh"
52#include "sim/core.hh"
53
54// clang complains about std::set being overloaded with Packet::set if
55// we open up the entire namespace std
56using std::list;
57
58template <class Impl>
59InstructionQueue<Impl>::FUCompletion::FUCompletion(DynInstPtr &_inst,
60 int fu_idx, InstructionQueue<Impl> *iq_ptr)
61 : Event(Stat_Event_Pri, AutoDelete),
62 inst(_inst), fuIdx(fu_idx), iqPtr(iq_ptr), freeFU(false)
63{
64}
65
66template <class Impl>
67void
68InstructionQueue<Impl>::FUCompletion::process()
69{
70 iqPtr->processFUCompletion(inst, freeFU ? fuIdx : -1);
71 inst = NULL;
72}
73
74
75template <class Impl>
76const char *
77InstructionQueue<Impl>::FUCompletion::description() const
78{
79 return "Functional unit completion";
80}
81
82template <class Impl>
83InstructionQueue<Impl>::InstructionQueue(O3CPU *cpu_ptr, IEW *iew_ptr,
84 DerivO3CPUParams *params)
85 : cpu(cpu_ptr),
86 iewStage(iew_ptr),
87 fuPool(params->fuPool),
88 numEntries(params->numIQEntries),
89 totalWidth(params->issueWidth),
90 numPhysIntRegs(params->numPhysIntRegs),
91 numPhysFloatRegs(params->numPhysFloatRegs),
92 commitToIEWDelay(params->commitToIEWDelay)
93{
94 assert(fuPool);
95
96 switchedOut = false;
97
98 numThreads = params->numThreads;
99
100 // Set the number of physical registers as the number of int + float
101 numPhysRegs = numPhysIntRegs + numPhysFloatRegs;
102
103 //Create an entry for each physical register within the
104 //dependency graph.
105 dependGraph.resize(numPhysRegs);
106
107 // Resize the register scoreboard.
108 regScoreboard.resize(numPhysRegs);
109
110 //Initialize Mem Dependence Units
111 for (ThreadID tid = 0; tid < numThreads; tid++) {
112 memDepUnit[tid].init(params, tid);
113 memDepUnit[tid].setIQ(this);
114 }
115
116 resetState();
117
118 std::string policy = params->smtIQPolicy;
119
120 //Convert string to lowercase
121 std::transform(policy.begin(), policy.end(), policy.begin(),
122 (int(*)(int)) tolower);
123
124 //Figure out resource sharing policy
125 if (policy == "dynamic") {
126 iqPolicy = Dynamic;
127
128 //Set Max Entries to Total ROB Capacity
129 for (ThreadID tid = 0; tid < numThreads; tid++) {
130 maxEntries[tid] = numEntries;
131 }
132
133 } else if (policy == "partitioned") {
134 iqPolicy = Partitioned;
135
136 //@todo:make work if part_amt doesnt divide evenly.
137 int part_amt = numEntries / numThreads;
138
139 //Divide ROB up evenly
140 for (ThreadID tid = 0; tid < numThreads; tid++) {
141 maxEntries[tid] = part_amt;
142 }
143
144 DPRINTF(IQ, "IQ sharing policy set to Partitioned:"
145 "%i entries per thread.\n",part_amt);
146 } else if (policy == "threshold") {
147 iqPolicy = Threshold;
148
149 double threshold = (double)params->smtIQThreshold / 100;
150
151 int thresholdIQ = (int)((double)threshold * numEntries);
152
153 //Divide up by threshold amount
154 for (ThreadID tid = 0; tid < numThreads; tid++) {
155 maxEntries[tid] = thresholdIQ;
156 }
157
158 DPRINTF(IQ, "IQ sharing policy set to Threshold:"
159 "%i entries per thread.\n",thresholdIQ);
160 } else {
161 assert(0 && "Invalid IQ Sharing Policy.Options Are:{Dynamic,"
162 "Partitioned, Threshold}");
163 }
164}
165
166template <class Impl>
167InstructionQueue<Impl>::~InstructionQueue()
168{
169 dependGraph.reset();
170#ifdef DEBUG
171 cprintf("Nodes traversed: %i, removed: %i\n",
172 dependGraph.nodesTraversed, dependGraph.nodesRemoved);
173#endif
174}
175
176template <class Impl>
177std::string
178InstructionQueue<Impl>::name() const
179{
180 return cpu->name() + ".iq";
181}
182
183template <class Impl>
184void
185InstructionQueue<Impl>::regStats()
186{
187 using namespace Stats;
188 iqInstsAdded
189 .name(name() + ".iqInstsAdded")
190 .desc("Number of instructions added to the IQ (excludes non-spec)")
191 .prereq(iqInstsAdded);
192
193 iqNonSpecInstsAdded
194 .name(name() + ".iqNonSpecInstsAdded")
195 .desc("Number of non-speculative instructions added to the IQ")
196 .prereq(iqNonSpecInstsAdded);
197
198 iqInstsIssued
199 .name(name() + ".iqInstsIssued")
200 .desc("Number of instructions issued")
201 .prereq(iqInstsIssued);
202
203 iqIntInstsIssued
204 .name(name() + ".iqIntInstsIssued")
205 .desc("Number of integer instructions issued")
206 .prereq(iqIntInstsIssued);
207
208 iqFloatInstsIssued
209 .name(name() + ".iqFloatInstsIssued")
210 .desc("Number of float instructions issued")
211 .prereq(iqFloatInstsIssued);
212
213 iqBranchInstsIssued
214 .name(name() + ".iqBranchInstsIssued")
215 .desc("Number of branch instructions issued")
216 .prereq(iqBranchInstsIssued);
217
218 iqMemInstsIssued
219 .name(name() + ".iqMemInstsIssued")
220 .desc("Number of memory instructions issued")
221 .prereq(iqMemInstsIssued);
222
223 iqMiscInstsIssued
224 .name(name() + ".iqMiscInstsIssued")
225 .desc("Number of miscellaneous instructions issued")
226 .prereq(iqMiscInstsIssued);
227
228 iqSquashedInstsIssued
229 .name(name() + ".iqSquashedInstsIssued")
230 .desc("Number of squashed instructions issued")
231 .prereq(iqSquashedInstsIssued);
232
233 iqSquashedInstsExamined
234 .name(name() + ".iqSquashedInstsExamined")
235 .desc("Number of squashed instructions iterated over during squash;"
236 " mainly for profiling")
237 .prereq(iqSquashedInstsExamined);
238
239 iqSquashedOperandsExamined
240 .name(name() + ".iqSquashedOperandsExamined")
241 .desc("Number of squashed operands that are examined and possibly "
242 "removed from graph")
243 .prereq(iqSquashedOperandsExamined);
244
245 iqSquashedNonSpecRemoved
246 .name(name() + ".iqSquashedNonSpecRemoved")
247 .desc("Number of squashed non-spec instructions that were removed")
248 .prereq(iqSquashedNonSpecRemoved);
249/*
250 queueResDist
251 .init(Num_OpClasses, 0, 99, 2)
252 .name(name() + ".IQ:residence:")
253 .desc("cycles from dispatch to issue")
254 .flags(total | pdf | cdf )
255 ;
256 for (int i = 0; i < Num_OpClasses; ++i) {
257 queueResDist.subname(i, opClassStrings[i]);
258 }
259*/
260 numIssuedDist
261 .init(0,totalWidth,1)
262 .name(name() + ".issued_per_cycle")
263 .desc("Number of insts issued each cycle")
264 .flags(pdf)
265 ;
266/*
267 dist_unissued
268 .init(Num_OpClasses+2)
269 .name(name() + ".unissued_cause")
270 .desc("Reason ready instruction not issued")
271 .flags(pdf | dist)
272 ;
273 for (int i=0; i < (Num_OpClasses + 2); ++i) {
274 dist_unissued.subname(i, unissued_names[i]);
275 }
276*/
277 statIssuedInstType
278 .init(numThreads,Enums::Num_OpClass)
279 .name(name() + ".FU_type")
280 .desc("Type of FU issued")
281 .flags(total | pdf | dist)
282 ;
283 statIssuedInstType.ysubnames(Enums::OpClassStrings);
284
285 //
286 // How long did instructions for a particular FU type wait prior to issue
287 //
288/*
289 issueDelayDist
290 .init(Num_OpClasses,0,99,2)
291 .name(name() + ".")
292 .desc("cycles from operands ready to issue")
293 .flags(pdf | cdf)
294 ;
295
296 for (int i=0; i<Num_OpClasses; ++i) {
297 std::stringstream subname;
298 subname << opClassStrings[i] << "_delay";
299 issueDelayDist.subname(i, subname.str());
300 }
301*/
302 issueRate
303 .name(name() + ".rate")
304 .desc("Inst issue rate")
305 .flags(total)
306 ;
307 issueRate = iqInstsIssued / cpu->numCycles;
308
309 statFuBusy
310 .init(Num_OpClasses)
311 .name(name() + ".fu_full")
312 .desc("attempts to use FU when none available")
313 .flags(pdf | dist)
314 ;
315 for (int i=0; i < Num_OpClasses; ++i) {
316 statFuBusy.subname(i, Enums::OpClassStrings[i]);
317 }
318
319 fuBusy
320 .init(numThreads)
321 .name(name() + ".fu_busy_cnt")
322 .desc("FU busy when requested")
323 .flags(total)
324 ;
325
326 fuBusyRate
327 .name(name() + ".fu_busy_rate")
328 .desc("FU busy rate (busy events/executed inst)")
329 .flags(total)
330 ;
331 fuBusyRate = fuBusy / iqInstsIssued;
332
333 for (ThreadID tid = 0; tid < numThreads; tid++) {
334 // Tell mem dependence unit to reg stats as well.
335 memDepUnit[tid].regStats();
336 }
337
338 intInstQueueReads
339 .name(name() + ".int_inst_queue_reads")
340 .desc("Number of integer instruction queue reads")
341 .flags(total);
342
343 intInstQueueWrites
344 .name(name() + ".int_inst_queue_writes")
345 .desc("Number of integer instruction queue writes")
346 .flags(total);
347
348 intInstQueueWakeupAccesses
349 .name(name() + ".int_inst_queue_wakeup_accesses")
350 .desc("Number of integer instruction queue wakeup accesses")
351 .flags(total);
352
353 fpInstQueueReads
354 .name(name() + ".fp_inst_queue_reads")
355 .desc("Number of floating instruction queue reads")
356 .flags(total);
357
358 fpInstQueueWrites
359 .name(name() + ".fp_inst_queue_writes")
360 .desc("Number of floating instruction queue writes")
361 .flags(total);
362
363 fpInstQueueWakeupQccesses
364 .name(name() + ".fp_inst_queue_wakeup_accesses")
365 .desc("Number of floating instruction queue wakeup accesses")
366 .flags(total);
367
368 intAluAccesses
369 .name(name() + ".int_alu_accesses")
370 .desc("Number of integer alu accesses")
371 .flags(total);
372
373 fpAluAccesses
374 .name(name() + ".fp_alu_accesses")
375 .desc("Number of floating point alu accesses")
376 .flags(total);
377
378}
379
380template <class Impl>
381void
382InstructionQueue<Impl>::resetState()
383{
384 //Initialize thread IQ counts
385 for (ThreadID tid = 0; tid <numThreads; tid++) {
386 count[tid] = 0;
387 instList[tid].clear();
388 }
389
390 // Initialize the number of free IQ entries.
391 freeEntries = numEntries;
392
393 // Note that in actuality, the registers corresponding to the logical
394 // registers start off as ready. However this doesn't matter for the
395 // IQ as the instruction should have been correctly told if those
396 // registers are ready in rename. Thus it can all be initialized as
397 // unready.
398 for (int i = 0; i < numPhysRegs; ++i) {
399 regScoreboard[i] = false;
400 }
401
402 for (ThreadID tid = 0; tid < numThreads; ++tid) {
403 squashedSeqNum[tid] = 0;
404 }
405
406 for (int i = 0; i < Num_OpClasses; ++i) {
407 while (!readyInsts[i].empty())
408 readyInsts[i].pop();
409 queueOnList[i] = false;
410 readyIt[i] = listOrder.end();
411 }
412 nonSpecInsts.clear();
413 listOrder.clear();
414 deferredMemInsts.clear();
415}
416
417template <class Impl>
418void
419InstructionQueue<Impl>::setActiveThreads(list<ThreadID> *at_ptr)
420{
421 activeThreads = at_ptr;
422}
423
424template <class Impl>
425void
426InstructionQueue<Impl>::setIssueToExecuteQueue(TimeBuffer<IssueStruct> *i2e_ptr)
427{
428 issueToExecuteQueue = i2e_ptr;
429}
430
431template <class Impl>
432void
433InstructionQueue<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr)
434{
435 timeBuffer = tb_ptr;
436
437 fromCommit = timeBuffer->getWire(-commitToIEWDelay);
438}
439
440template <class Impl>
441void
96 numThreads = params->numThreads;
97
98 // Set the number of physical registers as the number of int + float
99 numPhysRegs = numPhysIntRegs + numPhysFloatRegs;
100
101 //Create an entry for each physical register within the
102 //dependency graph.
103 dependGraph.resize(numPhysRegs);
104
105 // Resize the register scoreboard.
106 regScoreboard.resize(numPhysRegs);
107
108 //Initialize Mem Dependence Units
109 for (ThreadID tid = 0; tid < numThreads; tid++) {
110 memDepUnit[tid].init(params, tid);
111 memDepUnit[tid].setIQ(this);
112 }
113
114 resetState();
115
116 std::string policy = params->smtIQPolicy;
117
118 //Convert string to lowercase
119 std::transform(policy.begin(), policy.end(), policy.begin(),
120 (int(*)(int)) tolower);
121
122 //Figure out resource sharing policy
123 if (policy == "dynamic") {
124 iqPolicy = Dynamic;
125
126 //Set Max Entries to Total ROB Capacity
127 for (ThreadID tid = 0; tid < numThreads; tid++) {
128 maxEntries[tid] = numEntries;
129 }
130
131 } else if (policy == "partitioned") {
132 iqPolicy = Partitioned;
133
134 //@todo:make work if part_amt doesnt divide evenly.
135 int part_amt = numEntries / numThreads;
136
137 //Divide ROB up evenly
138 for (ThreadID tid = 0; tid < numThreads; tid++) {
139 maxEntries[tid] = part_amt;
140 }
141
142 DPRINTF(IQ, "IQ sharing policy set to Partitioned:"
143 "%i entries per thread.\n",part_amt);
144 } else if (policy == "threshold") {
145 iqPolicy = Threshold;
146
147 double threshold = (double)params->smtIQThreshold / 100;
148
149 int thresholdIQ = (int)((double)threshold * numEntries);
150
151 //Divide up by threshold amount
152 for (ThreadID tid = 0; tid < numThreads; tid++) {
153 maxEntries[tid] = thresholdIQ;
154 }
155
156 DPRINTF(IQ, "IQ sharing policy set to Threshold:"
157 "%i entries per thread.\n",thresholdIQ);
158 } else {
159 assert(0 && "Invalid IQ Sharing Policy.Options Are:{Dynamic,"
160 "Partitioned, Threshold}");
161 }
162}
163
164template <class Impl>
165InstructionQueue<Impl>::~InstructionQueue()
166{
167 dependGraph.reset();
168#ifdef DEBUG
169 cprintf("Nodes traversed: %i, removed: %i\n",
170 dependGraph.nodesTraversed, dependGraph.nodesRemoved);
171#endif
172}
173
174template <class Impl>
175std::string
176InstructionQueue<Impl>::name() const
177{
178 return cpu->name() + ".iq";
179}
180
181template <class Impl>
182void
183InstructionQueue<Impl>::regStats()
184{
185 using namespace Stats;
186 iqInstsAdded
187 .name(name() + ".iqInstsAdded")
188 .desc("Number of instructions added to the IQ (excludes non-spec)")
189 .prereq(iqInstsAdded);
190
191 iqNonSpecInstsAdded
192 .name(name() + ".iqNonSpecInstsAdded")
193 .desc("Number of non-speculative instructions added to the IQ")
194 .prereq(iqNonSpecInstsAdded);
195
196 iqInstsIssued
197 .name(name() + ".iqInstsIssued")
198 .desc("Number of instructions issued")
199 .prereq(iqInstsIssued);
200
201 iqIntInstsIssued
202 .name(name() + ".iqIntInstsIssued")
203 .desc("Number of integer instructions issued")
204 .prereq(iqIntInstsIssued);
205
206 iqFloatInstsIssued
207 .name(name() + ".iqFloatInstsIssued")
208 .desc("Number of float instructions issued")
209 .prereq(iqFloatInstsIssued);
210
211 iqBranchInstsIssued
212 .name(name() + ".iqBranchInstsIssued")
213 .desc("Number of branch instructions issued")
214 .prereq(iqBranchInstsIssued);
215
216 iqMemInstsIssued
217 .name(name() + ".iqMemInstsIssued")
218 .desc("Number of memory instructions issued")
219 .prereq(iqMemInstsIssued);
220
221 iqMiscInstsIssued
222 .name(name() + ".iqMiscInstsIssued")
223 .desc("Number of miscellaneous instructions issued")
224 .prereq(iqMiscInstsIssued);
225
226 iqSquashedInstsIssued
227 .name(name() + ".iqSquashedInstsIssued")
228 .desc("Number of squashed instructions issued")
229 .prereq(iqSquashedInstsIssued);
230
231 iqSquashedInstsExamined
232 .name(name() + ".iqSquashedInstsExamined")
233 .desc("Number of squashed instructions iterated over during squash;"
234 " mainly for profiling")
235 .prereq(iqSquashedInstsExamined);
236
237 iqSquashedOperandsExamined
238 .name(name() + ".iqSquashedOperandsExamined")
239 .desc("Number of squashed operands that are examined and possibly "
240 "removed from graph")
241 .prereq(iqSquashedOperandsExamined);
242
243 iqSquashedNonSpecRemoved
244 .name(name() + ".iqSquashedNonSpecRemoved")
245 .desc("Number of squashed non-spec instructions that were removed")
246 .prereq(iqSquashedNonSpecRemoved);
247/*
248 queueResDist
249 .init(Num_OpClasses, 0, 99, 2)
250 .name(name() + ".IQ:residence:")
251 .desc("cycles from dispatch to issue")
252 .flags(total | pdf | cdf )
253 ;
254 for (int i = 0; i < Num_OpClasses; ++i) {
255 queueResDist.subname(i, opClassStrings[i]);
256 }
257*/
258 numIssuedDist
259 .init(0,totalWidth,1)
260 .name(name() + ".issued_per_cycle")
261 .desc("Number of insts issued each cycle")
262 .flags(pdf)
263 ;
264/*
265 dist_unissued
266 .init(Num_OpClasses+2)
267 .name(name() + ".unissued_cause")
268 .desc("Reason ready instruction not issued")
269 .flags(pdf | dist)
270 ;
271 for (int i=0; i < (Num_OpClasses + 2); ++i) {
272 dist_unissued.subname(i, unissued_names[i]);
273 }
274*/
275 statIssuedInstType
276 .init(numThreads,Enums::Num_OpClass)
277 .name(name() + ".FU_type")
278 .desc("Type of FU issued")
279 .flags(total | pdf | dist)
280 ;
281 statIssuedInstType.ysubnames(Enums::OpClassStrings);
282
283 //
284 // How long did instructions for a particular FU type wait prior to issue
285 //
286/*
287 issueDelayDist
288 .init(Num_OpClasses,0,99,2)
289 .name(name() + ".")
290 .desc("cycles from operands ready to issue")
291 .flags(pdf | cdf)
292 ;
293
294 for (int i=0; i<Num_OpClasses; ++i) {
295 std::stringstream subname;
296 subname << opClassStrings[i] << "_delay";
297 issueDelayDist.subname(i, subname.str());
298 }
299*/
300 issueRate
301 .name(name() + ".rate")
302 .desc("Inst issue rate")
303 .flags(total)
304 ;
305 issueRate = iqInstsIssued / cpu->numCycles;
306
307 statFuBusy
308 .init(Num_OpClasses)
309 .name(name() + ".fu_full")
310 .desc("attempts to use FU when none available")
311 .flags(pdf | dist)
312 ;
313 for (int i=0; i < Num_OpClasses; ++i) {
314 statFuBusy.subname(i, Enums::OpClassStrings[i]);
315 }
316
317 fuBusy
318 .init(numThreads)
319 .name(name() + ".fu_busy_cnt")
320 .desc("FU busy when requested")
321 .flags(total)
322 ;
323
324 fuBusyRate
325 .name(name() + ".fu_busy_rate")
326 .desc("FU busy rate (busy events/executed inst)")
327 .flags(total)
328 ;
329 fuBusyRate = fuBusy / iqInstsIssued;
330
331 for (ThreadID tid = 0; tid < numThreads; tid++) {
332 // Tell mem dependence unit to reg stats as well.
333 memDepUnit[tid].regStats();
334 }
335
336 intInstQueueReads
337 .name(name() + ".int_inst_queue_reads")
338 .desc("Number of integer instruction queue reads")
339 .flags(total);
340
341 intInstQueueWrites
342 .name(name() + ".int_inst_queue_writes")
343 .desc("Number of integer instruction queue writes")
344 .flags(total);
345
346 intInstQueueWakeupAccesses
347 .name(name() + ".int_inst_queue_wakeup_accesses")
348 .desc("Number of integer instruction queue wakeup accesses")
349 .flags(total);
350
351 fpInstQueueReads
352 .name(name() + ".fp_inst_queue_reads")
353 .desc("Number of floating instruction queue reads")
354 .flags(total);
355
356 fpInstQueueWrites
357 .name(name() + ".fp_inst_queue_writes")
358 .desc("Number of floating instruction queue writes")
359 .flags(total);
360
361 fpInstQueueWakeupQccesses
362 .name(name() + ".fp_inst_queue_wakeup_accesses")
363 .desc("Number of floating instruction queue wakeup accesses")
364 .flags(total);
365
366 intAluAccesses
367 .name(name() + ".int_alu_accesses")
368 .desc("Number of integer alu accesses")
369 .flags(total);
370
371 fpAluAccesses
372 .name(name() + ".fp_alu_accesses")
373 .desc("Number of floating point alu accesses")
374 .flags(total);
375
376}
377
378template <class Impl>
379void
380InstructionQueue<Impl>::resetState()
381{
382 //Initialize thread IQ counts
383 for (ThreadID tid = 0; tid <numThreads; tid++) {
384 count[tid] = 0;
385 instList[tid].clear();
386 }
387
388 // Initialize the number of free IQ entries.
389 freeEntries = numEntries;
390
391 // Note that in actuality, the registers corresponding to the logical
392 // registers start off as ready. However this doesn't matter for the
393 // IQ as the instruction should have been correctly told if those
394 // registers are ready in rename. Thus it can all be initialized as
395 // unready.
396 for (int i = 0; i < numPhysRegs; ++i) {
397 regScoreboard[i] = false;
398 }
399
400 for (ThreadID tid = 0; tid < numThreads; ++tid) {
401 squashedSeqNum[tid] = 0;
402 }
403
404 for (int i = 0; i < Num_OpClasses; ++i) {
405 while (!readyInsts[i].empty())
406 readyInsts[i].pop();
407 queueOnList[i] = false;
408 readyIt[i] = listOrder.end();
409 }
410 nonSpecInsts.clear();
411 listOrder.clear();
412 deferredMemInsts.clear();
413}
414
415template <class Impl>
416void
417InstructionQueue<Impl>::setActiveThreads(list<ThreadID> *at_ptr)
418{
419 activeThreads = at_ptr;
420}
421
422template <class Impl>
423void
424InstructionQueue<Impl>::setIssueToExecuteQueue(TimeBuffer<IssueStruct> *i2e_ptr)
425{
426 issueToExecuteQueue = i2e_ptr;
427}
428
429template <class Impl>
430void
431InstructionQueue<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr)
432{
433 timeBuffer = tb_ptr;
434
435 fromCommit = timeBuffer->getWire(-commitToIEWDelay);
436}
437
438template <class Impl>
439void
442InstructionQueue<Impl>::switchOut()
440InstructionQueue<Impl>::drainSanityCheck() const
443{
441{
444/*
445 if (!instList[0].empty() || (numEntries != freeEntries) ||
446 !readyInsts[0].empty() || !nonSpecInsts.empty() || !listOrder.empty()) {
447 dumpInsts();
448// assert(0);
449 }
450*/
451 resetState();
452 dependGraph.reset();
453 instsToExecute.clear();
454 switchedOut = true;
455 for (ThreadID tid = 0; tid < numThreads; ++tid) {
456 memDepUnit[tid].switchOut();
457 }
442 assert(dependGraph.empty());
443 assert(instsToExecute.empty());
444 for (ThreadID tid = 0; tid < numThreads; ++tid)
445 memDepUnit[tid].drainSanityCheck();
458}
459
460template <class Impl>
461void
462InstructionQueue<Impl>::takeOverFrom()
463{
446}
447
448template <class Impl>
449void
450InstructionQueue<Impl>::takeOverFrom()
451{
464 switchedOut = false;
452 resetState();
465}
466
467template <class Impl>
468int
469InstructionQueue<Impl>::entryAmount(ThreadID num_threads)
470{
471 if (iqPolicy == Partitioned) {
472 return numEntries / num_threads;
473 } else {
474 return 0;
475 }
476}
477
478
479template <class Impl>
480void
481InstructionQueue<Impl>::resetEntries()
482{
483 if (iqPolicy != Dynamic || numThreads > 1) {
484 int active_threads = activeThreads->size();
485
486 list<ThreadID>::iterator threads = activeThreads->begin();
487 list<ThreadID>::iterator end = activeThreads->end();
488
489 while (threads != end) {
490 ThreadID tid = *threads++;
491
492 if (iqPolicy == Partitioned) {
493 maxEntries[tid] = numEntries / active_threads;
494 } else if(iqPolicy == Threshold && active_threads == 1) {
495 maxEntries[tid] = numEntries;
496 }
497 }
498 }
499}
500
501template <class Impl>
502unsigned
503InstructionQueue<Impl>::numFreeEntries()
504{
505 return freeEntries;
506}
507
508template <class Impl>
509unsigned
510InstructionQueue<Impl>::numFreeEntries(ThreadID tid)
511{
512 return maxEntries[tid] - count[tid];
513}
514
515// Might want to do something more complex if it knows how many instructions
516// will be issued this cycle.
517template <class Impl>
518bool
519InstructionQueue<Impl>::isFull()
520{
521 if (freeEntries == 0) {
522 return(true);
523 } else {
524 return(false);
525 }
526}
527
528template <class Impl>
529bool
530InstructionQueue<Impl>::isFull(ThreadID tid)
531{
532 if (numFreeEntries(tid) == 0) {
533 return(true);
534 } else {
535 return(false);
536 }
537}
538
539template <class Impl>
540bool
541InstructionQueue<Impl>::hasReadyInsts()
542{
543 if (!listOrder.empty()) {
544 return true;
545 }
546
547 for (int i = 0; i < Num_OpClasses; ++i) {
548 if (!readyInsts[i].empty()) {
549 return true;
550 }
551 }
552
553 return false;
554}
555
556template <class Impl>
557void
558InstructionQueue<Impl>::insert(DynInstPtr &new_inst)
559{
560 new_inst->isFloating() ? fpInstQueueWrites++ : intInstQueueWrites++;
561 // Make sure the instruction is valid
562 assert(new_inst);
563
564 DPRINTF(IQ, "Adding instruction [sn:%lli] PC %s to the IQ.\n",
565 new_inst->seqNum, new_inst->pcState());
566
567 assert(freeEntries != 0);
568
569 instList[new_inst->threadNumber].push_back(new_inst);
570
571 --freeEntries;
572
573 new_inst->setInIQ();
574
575 // Look through its source registers (physical regs), and mark any
576 // dependencies.
577 addToDependents(new_inst);
578
579 // Have this instruction set itself as the producer of its destination
580 // register(s).
581 addToProducers(new_inst);
582
583 if (new_inst->isMemRef()) {
584 memDepUnit[new_inst->threadNumber].insert(new_inst);
585 } else {
586 addIfReady(new_inst);
587 }
588
589 ++iqInstsAdded;
590
591 count[new_inst->threadNumber]++;
592
593 assert(freeEntries == (numEntries - countInsts()));
594}
595
596template <class Impl>
597void
598InstructionQueue<Impl>::insertNonSpec(DynInstPtr &new_inst)
599{
600 // @todo: Clean up this code; can do it by setting inst as unable
601 // to issue, then calling normal insert on the inst.
602 new_inst->isFloating() ? fpInstQueueWrites++ : intInstQueueWrites++;
603
604 assert(new_inst);
605
606 nonSpecInsts[new_inst->seqNum] = new_inst;
607
608 DPRINTF(IQ, "Adding non-speculative instruction [sn:%lli] PC %s "
609 "to the IQ.\n",
610 new_inst->seqNum, new_inst->pcState());
611
612 assert(freeEntries != 0);
613
614 instList[new_inst->threadNumber].push_back(new_inst);
615
616 --freeEntries;
617
618 new_inst->setInIQ();
619
620 // Have this instruction set itself as the producer of its destination
621 // register(s).
622 addToProducers(new_inst);
623
624 // If it's a memory instruction, add it to the memory dependency
625 // unit.
626 if (new_inst->isMemRef()) {
627 memDepUnit[new_inst->threadNumber].insertNonSpec(new_inst);
628 }
629
630 ++iqNonSpecInstsAdded;
631
632 count[new_inst->threadNumber]++;
633
634 assert(freeEntries == (numEntries - countInsts()));
635}
636
637template <class Impl>
638void
639InstructionQueue<Impl>::insertBarrier(DynInstPtr &barr_inst)
640{
641 memDepUnit[barr_inst->threadNumber].insertBarrier(barr_inst);
642
643 insertNonSpec(barr_inst);
644}
645
646template <class Impl>
647typename Impl::DynInstPtr
648InstructionQueue<Impl>::getInstToExecute()
649{
650 assert(!instsToExecute.empty());
651 DynInstPtr inst = instsToExecute.front();
652 instsToExecute.pop_front();
653 if (inst->isFloating()){
654 fpInstQueueReads++;
655 } else {
656 intInstQueueReads++;
657 }
658 return inst;
659}
660
661template <class Impl>
662void
663InstructionQueue<Impl>::addToOrderList(OpClass op_class)
664{
665 assert(!readyInsts[op_class].empty());
666
667 ListOrderEntry queue_entry;
668
669 queue_entry.queueType = op_class;
670
671 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
672
673 ListOrderIt list_it = listOrder.begin();
674 ListOrderIt list_end_it = listOrder.end();
675
676 while (list_it != list_end_it) {
677 if ((*list_it).oldestInst > queue_entry.oldestInst) {
678 break;
679 }
680
681 list_it++;
682 }
683
684 readyIt[op_class] = listOrder.insert(list_it, queue_entry);
685 queueOnList[op_class] = true;
686}
687
688template <class Impl>
689void
690InstructionQueue<Impl>::moveToYoungerInst(ListOrderIt list_order_it)
691{
692 // Get iterator of next item on the list
693 // Delete the original iterator
694 // Determine if the next item is either the end of the list or younger
695 // than the new instruction. If so, then add in a new iterator right here.
696 // If not, then move along.
697 ListOrderEntry queue_entry;
698 OpClass op_class = (*list_order_it).queueType;
699 ListOrderIt next_it = list_order_it;
700
701 ++next_it;
702
703 queue_entry.queueType = op_class;
704 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
705
706 while (next_it != listOrder.end() &&
707 (*next_it).oldestInst < queue_entry.oldestInst) {
708 ++next_it;
709 }
710
711 readyIt[op_class] = listOrder.insert(next_it, queue_entry);
712}
713
714template <class Impl>
715void
716InstructionQueue<Impl>::processFUCompletion(DynInstPtr &inst, int fu_idx)
717{
718 DPRINTF(IQ, "Processing FU completion [sn:%lli]\n", inst->seqNum);
453}
454
455template <class Impl>
456int
457InstructionQueue<Impl>::entryAmount(ThreadID num_threads)
458{
459 if (iqPolicy == Partitioned) {
460 return numEntries / num_threads;
461 } else {
462 return 0;
463 }
464}
465
466
467template <class Impl>
468void
469InstructionQueue<Impl>::resetEntries()
470{
471 if (iqPolicy != Dynamic || numThreads > 1) {
472 int active_threads = activeThreads->size();
473
474 list<ThreadID>::iterator threads = activeThreads->begin();
475 list<ThreadID>::iterator end = activeThreads->end();
476
477 while (threads != end) {
478 ThreadID tid = *threads++;
479
480 if (iqPolicy == Partitioned) {
481 maxEntries[tid] = numEntries / active_threads;
482 } else if(iqPolicy == Threshold && active_threads == 1) {
483 maxEntries[tid] = numEntries;
484 }
485 }
486 }
487}
488
489template <class Impl>
490unsigned
491InstructionQueue<Impl>::numFreeEntries()
492{
493 return freeEntries;
494}
495
496template <class Impl>
497unsigned
498InstructionQueue<Impl>::numFreeEntries(ThreadID tid)
499{
500 return maxEntries[tid] - count[tid];
501}
502
503// Might want to do something more complex if it knows how many instructions
504// will be issued this cycle.
505template <class Impl>
506bool
507InstructionQueue<Impl>::isFull()
508{
509 if (freeEntries == 0) {
510 return(true);
511 } else {
512 return(false);
513 }
514}
515
516template <class Impl>
517bool
518InstructionQueue<Impl>::isFull(ThreadID tid)
519{
520 if (numFreeEntries(tid) == 0) {
521 return(true);
522 } else {
523 return(false);
524 }
525}
526
527template <class Impl>
528bool
529InstructionQueue<Impl>::hasReadyInsts()
530{
531 if (!listOrder.empty()) {
532 return true;
533 }
534
535 for (int i = 0; i < Num_OpClasses; ++i) {
536 if (!readyInsts[i].empty()) {
537 return true;
538 }
539 }
540
541 return false;
542}
543
544template <class Impl>
545void
546InstructionQueue<Impl>::insert(DynInstPtr &new_inst)
547{
548 new_inst->isFloating() ? fpInstQueueWrites++ : intInstQueueWrites++;
549 // Make sure the instruction is valid
550 assert(new_inst);
551
552 DPRINTF(IQ, "Adding instruction [sn:%lli] PC %s to the IQ.\n",
553 new_inst->seqNum, new_inst->pcState());
554
555 assert(freeEntries != 0);
556
557 instList[new_inst->threadNumber].push_back(new_inst);
558
559 --freeEntries;
560
561 new_inst->setInIQ();
562
563 // Look through its source registers (physical regs), and mark any
564 // dependencies.
565 addToDependents(new_inst);
566
567 // Have this instruction set itself as the producer of its destination
568 // register(s).
569 addToProducers(new_inst);
570
571 if (new_inst->isMemRef()) {
572 memDepUnit[new_inst->threadNumber].insert(new_inst);
573 } else {
574 addIfReady(new_inst);
575 }
576
577 ++iqInstsAdded;
578
579 count[new_inst->threadNumber]++;
580
581 assert(freeEntries == (numEntries - countInsts()));
582}
583
584template <class Impl>
585void
586InstructionQueue<Impl>::insertNonSpec(DynInstPtr &new_inst)
587{
588 // @todo: Clean up this code; can do it by setting inst as unable
589 // to issue, then calling normal insert on the inst.
590 new_inst->isFloating() ? fpInstQueueWrites++ : intInstQueueWrites++;
591
592 assert(new_inst);
593
594 nonSpecInsts[new_inst->seqNum] = new_inst;
595
596 DPRINTF(IQ, "Adding non-speculative instruction [sn:%lli] PC %s "
597 "to the IQ.\n",
598 new_inst->seqNum, new_inst->pcState());
599
600 assert(freeEntries != 0);
601
602 instList[new_inst->threadNumber].push_back(new_inst);
603
604 --freeEntries;
605
606 new_inst->setInIQ();
607
608 // Have this instruction set itself as the producer of its destination
609 // register(s).
610 addToProducers(new_inst);
611
612 // If it's a memory instruction, add it to the memory dependency
613 // unit.
614 if (new_inst->isMemRef()) {
615 memDepUnit[new_inst->threadNumber].insertNonSpec(new_inst);
616 }
617
618 ++iqNonSpecInstsAdded;
619
620 count[new_inst->threadNumber]++;
621
622 assert(freeEntries == (numEntries - countInsts()));
623}
624
625template <class Impl>
626void
627InstructionQueue<Impl>::insertBarrier(DynInstPtr &barr_inst)
628{
629 memDepUnit[barr_inst->threadNumber].insertBarrier(barr_inst);
630
631 insertNonSpec(barr_inst);
632}
633
634template <class Impl>
635typename Impl::DynInstPtr
636InstructionQueue<Impl>::getInstToExecute()
637{
638 assert(!instsToExecute.empty());
639 DynInstPtr inst = instsToExecute.front();
640 instsToExecute.pop_front();
641 if (inst->isFloating()){
642 fpInstQueueReads++;
643 } else {
644 intInstQueueReads++;
645 }
646 return inst;
647}
648
649template <class Impl>
650void
651InstructionQueue<Impl>::addToOrderList(OpClass op_class)
652{
653 assert(!readyInsts[op_class].empty());
654
655 ListOrderEntry queue_entry;
656
657 queue_entry.queueType = op_class;
658
659 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
660
661 ListOrderIt list_it = listOrder.begin();
662 ListOrderIt list_end_it = listOrder.end();
663
664 while (list_it != list_end_it) {
665 if ((*list_it).oldestInst > queue_entry.oldestInst) {
666 break;
667 }
668
669 list_it++;
670 }
671
672 readyIt[op_class] = listOrder.insert(list_it, queue_entry);
673 queueOnList[op_class] = true;
674}
675
676template <class Impl>
677void
678InstructionQueue<Impl>::moveToYoungerInst(ListOrderIt list_order_it)
679{
680 // Get iterator of next item on the list
681 // Delete the original iterator
682 // Determine if the next item is either the end of the list or younger
683 // than the new instruction. If so, then add in a new iterator right here.
684 // If not, then move along.
685 ListOrderEntry queue_entry;
686 OpClass op_class = (*list_order_it).queueType;
687 ListOrderIt next_it = list_order_it;
688
689 ++next_it;
690
691 queue_entry.queueType = op_class;
692 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
693
694 while (next_it != listOrder.end() &&
695 (*next_it).oldestInst < queue_entry.oldestInst) {
696 ++next_it;
697 }
698
699 readyIt[op_class] = listOrder.insert(next_it, queue_entry);
700}
701
702template <class Impl>
703void
704InstructionQueue<Impl>::processFUCompletion(DynInstPtr &inst, int fu_idx)
705{
706 DPRINTF(IQ, "Processing FU completion [sn:%lli]\n", inst->seqNum);
707 assert(!cpu->switchedOut());
719 // The CPU could have been sleeping until this op completed (*extremely*
720 // long latency op). Wake it if it was. This may be overkill.
708 // The CPU could have been sleeping until this op completed (*extremely*
709 // long latency op). Wake it if it was. This may be overkill.
721 if (isSwitchedOut()) {
722 DPRINTF(IQ, "FU completion not processed, IQ is switched out [sn:%lli]\n",
723 inst->seqNum);
724 return;
725 }
726
727 iewStage->wakeCPU();
728
729 if (fu_idx > -1)
730 fuPool->freeUnitNextCycle(fu_idx);
731
732 // @todo: Ensure that these FU Completions happen at the beginning
733 // of a cycle, otherwise they could add too many instructions to
734 // the queue.
735 issueToExecuteQueue->access(-1)->size++;
736 instsToExecute.push_back(inst);
737}
738
739// @todo: Figure out a better way to remove the squashed items from the
740// lists. Checking the top item of each list to see if it's squashed
741// wastes time and forces jumps.
742template <class Impl>
743void
744InstructionQueue<Impl>::scheduleReadyInsts()
745{
746 DPRINTF(IQ, "Attempting to schedule ready instructions from "
747 "the IQ.\n");
748
749 IssueStruct *i2e_info = issueToExecuteQueue->access(0);
750
751 DynInstPtr deferred_mem_inst;
752 int total_deferred_mem_issued = 0;
753 while (total_deferred_mem_issued < totalWidth &&
754 (deferred_mem_inst = getDeferredMemInstToExecute()) != 0) {
755 issueToExecuteQueue->access(0)->size++;
756 instsToExecute.push_back(deferred_mem_inst);
757 total_deferred_mem_issued++;
758 }
759
760 // Have iterator to head of the list
761 // While I haven't exceeded bandwidth or reached the end of the list,
762 // Try to get a FU that can do what this op needs.
763 // If successful, change the oldestInst to the new top of the list, put
764 // the queue in the proper place in the list.
765 // Increment the iterator.
766 // This will avoid trying to schedule a certain op class if there are no
767 // FUs that handle it.
768 ListOrderIt order_it = listOrder.begin();
769 ListOrderIt order_end_it = listOrder.end();
770 int total_issued = 0;
771
772 while (total_issued < (totalWidth - total_deferred_mem_issued) &&
773 iewStage->canIssue() &&
774 order_it != order_end_it) {
775 OpClass op_class = (*order_it).queueType;
776
777 assert(!readyInsts[op_class].empty());
778
779 DynInstPtr issuing_inst = readyInsts[op_class].top();
780
781 issuing_inst->isFloating() ? fpInstQueueReads++ : intInstQueueReads++;
782
783 assert(issuing_inst->seqNum == (*order_it).oldestInst);
784
785 if (issuing_inst->isSquashed()) {
786 readyInsts[op_class].pop();
787
788 if (!readyInsts[op_class].empty()) {
789 moveToYoungerInst(order_it);
790 } else {
791 readyIt[op_class] = listOrder.end();
792 queueOnList[op_class] = false;
793 }
794
795 listOrder.erase(order_it++);
796
797 ++iqSquashedInstsIssued;
798
799 continue;
800 }
801
802 int idx = -2;
803 Cycles op_latency = Cycles(1);
804 ThreadID tid = issuing_inst->threadNumber;
805
806 if (op_class != No_OpClass) {
807 idx = fuPool->getUnit(op_class);
808 issuing_inst->isFloating() ? fpAluAccesses++ : intAluAccesses++;
809 if (idx > -1) {
810 op_latency = fuPool->getOpLatency(op_class);
811 }
812 }
813
814 // If we have an instruction that doesn't require a FU, or a
815 // valid FU, then schedule for execution.
816 if (idx == -2 || idx != -1) {
817 if (op_latency == Cycles(1)) {
818 i2e_info->size++;
819 instsToExecute.push_back(issuing_inst);
820
821 // Add the FU onto the list of FU's to be freed next
822 // cycle if we used one.
823 if (idx >= 0)
824 fuPool->freeUnitNextCycle(idx);
825 } else {
826 Cycles issue_latency = fuPool->getIssueLatency(op_class);
827 // Generate completion event for the FU
828 FUCompletion *execution = new FUCompletion(issuing_inst,
829 idx, this);
830
831 cpu->schedule(execution,
832 cpu->clockEdge(Cycles(op_latency - 1)));
833
834 // @todo: Enforce that issue_latency == 1 or op_latency
835 if (issue_latency > Cycles(1)) {
836 // If FU isn't pipelined, then it must be freed
837 // upon the execution completing.
838 execution->setFreeFU();
839 } else {
840 // Add the FU onto the list of FU's to be freed next cycle.
841 fuPool->freeUnitNextCycle(idx);
842 }
843 }
844
845 DPRINTF(IQ, "Thread %i: Issuing instruction PC %s "
846 "[sn:%lli]\n",
847 tid, issuing_inst->pcState(),
848 issuing_inst->seqNum);
849
850 readyInsts[op_class].pop();
851
852 if (!readyInsts[op_class].empty()) {
853 moveToYoungerInst(order_it);
854 } else {
855 readyIt[op_class] = listOrder.end();
856 queueOnList[op_class] = false;
857 }
858
859 issuing_inst->setIssued();
860 ++total_issued;
861
862#if TRACING_ON
863 issuing_inst->issueTick = curTick() - issuing_inst->fetchTick;
864#endif
865
866 if (!issuing_inst->isMemRef()) {
867 // Memory instructions can not be freed from the IQ until they
868 // complete.
869 ++freeEntries;
870 count[tid]--;
871 issuing_inst->clearInIQ();
872 } else {
873 memDepUnit[tid].issue(issuing_inst);
874 }
875
876 listOrder.erase(order_it++);
877 statIssuedInstType[tid][op_class]++;
878 iewStage->incrWb(issuing_inst->seqNum);
879 } else {
880 statFuBusy[op_class]++;
881 fuBusy[tid]++;
882 ++order_it;
883 }
884 }
885
886 numIssuedDist.sample(total_issued);
887 iqInstsIssued+= total_issued;
888
889 // If we issued any instructions, tell the CPU we had activity.
890 // @todo If the way deferred memory instructions are handeled due to
891 // translation changes then the deferredMemInsts condition should be removed
892 // from the code below.
893 if (total_issued || total_deferred_mem_issued || deferredMemInsts.size()) {
894 cpu->activityThisCycle();
895 } else {
896 DPRINTF(IQ, "Not able to schedule any instructions.\n");
897 }
898}
899
900template <class Impl>
901void
902InstructionQueue<Impl>::scheduleNonSpec(const InstSeqNum &inst)
903{
904 DPRINTF(IQ, "Marking nonspeculative instruction [sn:%lli] as ready "
905 "to execute.\n", inst);
906
907 NonSpecMapIt inst_it = nonSpecInsts.find(inst);
908
909 assert(inst_it != nonSpecInsts.end());
910
911 ThreadID tid = (*inst_it).second->threadNumber;
912
913 (*inst_it).second->setAtCommit();
914
915 (*inst_it).second->setCanIssue();
916
917 if (!(*inst_it).second->isMemRef()) {
918 addIfReady((*inst_it).second);
919 } else {
920 memDepUnit[tid].nonSpecInstReady((*inst_it).second);
921 }
922
923 (*inst_it).second = NULL;
924
925 nonSpecInsts.erase(inst_it);
926}
927
928template <class Impl>
929void
930InstructionQueue<Impl>::commit(const InstSeqNum &inst, ThreadID tid)
931{
932 DPRINTF(IQ, "[tid:%i]: Committing instructions older than [sn:%i]\n",
933 tid,inst);
934
935 ListIt iq_it = instList[tid].begin();
936
937 while (iq_it != instList[tid].end() &&
938 (*iq_it)->seqNum <= inst) {
939 ++iq_it;
940 instList[tid].pop_front();
941 }
942
943 assert(freeEntries == (numEntries - countInsts()));
944}
945
946template <class Impl>
947int
948InstructionQueue<Impl>::wakeDependents(DynInstPtr &completed_inst)
949{
950 int dependents = 0;
951
952 // The instruction queue here takes care of both floating and int ops
953 if (completed_inst->isFloating()) {
954 fpInstQueueWakeupQccesses++;
955 } else {
956 intInstQueueWakeupAccesses++;
957 }
958
959 DPRINTF(IQ, "Waking dependents of completed instruction.\n");
960
961 assert(!completed_inst->isSquashed());
962
963 // Tell the memory dependence unit to wake any dependents on this
964 // instruction if it is a memory instruction. Also complete the memory
965 // instruction at this point since we know it executed without issues.
966 // @todo: Might want to rename "completeMemInst" to something that
967 // indicates that it won't need to be replayed, and call this
968 // earlier. Might not be a big deal.
969 if (completed_inst->isMemRef()) {
970 memDepUnit[completed_inst->threadNumber].wakeDependents(completed_inst);
971 completeMemInst(completed_inst);
972 } else if (completed_inst->isMemBarrier() ||
973 completed_inst->isWriteBarrier()) {
974 memDepUnit[completed_inst->threadNumber].completeBarrier(completed_inst);
975 }
976
977 for (int dest_reg_idx = 0;
978 dest_reg_idx < completed_inst->numDestRegs();
979 dest_reg_idx++)
980 {
981 PhysRegIndex dest_reg =
982 completed_inst->renamedDestRegIdx(dest_reg_idx);
983
984 // Special case of uniq or control registers. They are not
985 // handled by the IQ and thus have no dependency graph entry.
986 // @todo Figure out a cleaner way to handle this.
987 if (dest_reg >= numPhysRegs) {
988 DPRINTF(IQ, "dest_reg :%d, numPhysRegs: %d\n", dest_reg,
989 numPhysRegs);
990 continue;
991 }
992
993 DPRINTF(IQ, "Waking any dependents on register %i.\n",
994 (int) dest_reg);
995
996 //Go through the dependency chain, marking the registers as
997 //ready within the waiting instructions.
998 DynInstPtr dep_inst = dependGraph.pop(dest_reg);
999
1000 while (dep_inst) {
1001 DPRINTF(IQ, "Waking up a dependent instruction, [sn:%lli] "
1002 "PC %s.\n", dep_inst->seqNum, dep_inst->pcState());
1003
1004 // Might want to give more information to the instruction
1005 // so that it knows which of its source registers is
1006 // ready. However that would mean that the dependency
1007 // graph entries would need to hold the src_reg_idx.
1008 dep_inst->markSrcRegReady();
1009
1010 addIfReady(dep_inst);
1011
1012 dep_inst = dependGraph.pop(dest_reg);
1013
1014 ++dependents;
1015 }
1016
1017 // Reset the head node now that all of its dependents have
1018 // been woken up.
1019 assert(dependGraph.empty(dest_reg));
1020 dependGraph.clearInst(dest_reg);
1021
1022 // Mark the scoreboard as having that register ready.
1023 regScoreboard[dest_reg] = true;
1024 }
1025 return dependents;
1026}
1027
1028template <class Impl>
1029void
1030InstructionQueue<Impl>::addReadyMemInst(DynInstPtr &ready_inst)
1031{
1032 OpClass op_class = ready_inst->opClass();
1033
1034 readyInsts[op_class].push(ready_inst);
1035
1036 // Will need to reorder the list if either a queue is not on the list,
1037 // or it has an older instruction than last time.
1038 if (!queueOnList[op_class]) {
1039 addToOrderList(op_class);
1040 } else if (readyInsts[op_class].top()->seqNum <
1041 (*readyIt[op_class]).oldestInst) {
1042 listOrder.erase(readyIt[op_class]);
1043 addToOrderList(op_class);
1044 }
1045
1046 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
1047 "the ready list, PC %s opclass:%i [sn:%lli].\n",
1048 ready_inst->pcState(), op_class, ready_inst->seqNum);
1049}
1050
1051template <class Impl>
1052void
1053InstructionQueue<Impl>::rescheduleMemInst(DynInstPtr &resched_inst)
1054{
1055 DPRINTF(IQ, "Rescheduling mem inst [sn:%lli]\n", resched_inst->seqNum);
1056
1057 // Reset DTB translation state
1058 resched_inst->translationStarted(false);
1059 resched_inst->translationCompleted(false);
1060
1061 resched_inst->clearCanIssue();
1062 memDepUnit[resched_inst->threadNumber].reschedule(resched_inst);
1063}
1064
1065template <class Impl>
1066void
1067InstructionQueue<Impl>::replayMemInst(DynInstPtr &replay_inst)
1068{
1069 memDepUnit[replay_inst->threadNumber].replay(replay_inst);
1070}
1071
1072template <class Impl>
1073void
1074InstructionQueue<Impl>::completeMemInst(DynInstPtr &completed_inst)
1075{
1076 ThreadID tid = completed_inst->threadNumber;
1077
1078 DPRINTF(IQ, "Completing mem instruction PC: %s [sn:%lli]\n",
1079 completed_inst->pcState(), completed_inst->seqNum);
1080
1081 ++freeEntries;
1082
1083 completed_inst->memOpDone(true);
1084
1085 memDepUnit[tid].completed(completed_inst);
1086 count[tid]--;
1087}
1088
1089template <class Impl>
1090void
1091InstructionQueue<Impl>::deferMemInst(DynInstPtr &deferred_inst)
1092{
1093 deferredMemInsts.push_back(deferred_inst);
1094}
1095
1096template <class Impl>
1097typename Impl::DynInstPtr
1098InstructionQueue<Impl>::getDeferredMemInstToExecute()
1099{
1100 for (ListIt it = deferredMemInsts.begin(); it != deferredMemInsts.end();
1101 ++it) {
1102 if ((*it)->translationCompleted() || (*it)->isSquashed()) {
1103 DynInstPtr ret = *it;
1104 deferredMemInsts.erase(it);
1105 return ret;
1106 }
1107 }
1108 return NULL;
1109}
1110
1111template <class Impl>
1112void
1113InstructionQueue<Impl>::violation(DynInstPtr &store,
1114 DynInstPtr &faulting_load)
1115{
1116 intInstQueueWrites++;
1117 memDepUnit[store->threadNumber].violation(store, faulting_load);
1118}
1119
1120template <class Impl>
1121void
1122InstructionQueue<Impl>::squash(ThreadID tid)
1123{
1124 DPRINTF(IQ, "[tid:%i]: Starting to squash instructions in "
1125 "the IQ.\n", tid);
1126
1127 // Read instruction sequence number of last instruction out of the
1128 // time buffer.
1129 squashedSeqNum[tid] = fromCommit->commitInfo[tid].doneSeqNum;
1130
1131 // Call doSquash if there are insts in the IQ
1132 if (count[tid] > 0) {
1133 doSquash(tid);
1134 }
1135
1136 // Also tell the memory dependence unit to squash.
1137 memDepUnit[tid].squash(squashedSeqNum[tid], tid);
1138}
1139
1140template <class Impl>
1141void
1142InstructionQueue<Impl>::doSquash(ThreadID tid)
1143{
1144 // Start at the tail.
1145 ListIt squash_it = instList[tid].end();
1146 --squash_it;
1147
1148 DPRINTF(IQ, "[tid:%i]: Squashing until sequence number %i!\n",
1149 tid, squashedSeqNum[tid]);
1150
1151 // Squash any instructions younger than the squashed sequence number
1152 // given.
1153 while (squash_it != instList[tid].end() &&
1154 (*squash_it)->seqNum > squashedSeqNum[tid]) {
1155
1156 DynInstPtr squashed_inst = (*squash_it);
1157 squashed_inst->isFloating() ? fpInstQueueWrites++ : intInstQueueWrites++;
1158
1159 // Only handle the instruction if it actually is in the IQ and
1160 // hasn't already been squashed in the IQ.
1161 if (squashed_inst->threadNumber != tid ||
1162 squashed_inst->isSquashedInIQ()) {
1163 --squash_it;
1164 continue;
1165 }
1166
1167 if (!squashed_inst->isIssued() ||
1168 (squashed_inst->isMemRef() &&
1169 !squashed_inst->memOpDone())) {
1170
1171 DPRINTF(IQ, "[tid:%i]: Instruction [sn:%lli] PC %s squashed.\n",
1172 tid, squashed_inst->seqNum, squashed_inst->pcState());
1173
1174 // Remove the instruction from the dependency list.
1175 if (!squashed_inst->isNonSpeculative() &&
1176 !squashed_inst->isStoreConditional() &&
1177 !squashed_inst->isMemBarrier() &&
1178 !squashed_inst->isWriteBarrier()) {
1179
1180 for (int src_reg_idx = 0;
1181 src_reg_idx < squashed_inst->numSrcRegs();
1182 src_reg_idx++)
1183 {
1184 PhysRegIndex src_reg =
1185 squashed_inst->renamedSrcRegIdx(src_reg_idx);
1186
1187 // Only remove it from the dependency graph if it
1188 // was placed there in the first place.
1189
1190 // Instead of doing a linked list traversal, we
1191 // can just remove these squashed instructions
1192 // either at issue time, or when the register is
1193 // overwritten. The only downside to this is it
1194 // leaves more room for error.
1195
1196 if (!squashed_inst->isReadySrcRegIdx(src_reg_idx) &&
1197 src_reg < numPhysRegs) {
1198 dependGraph.remove(src_reg, squashed_inst);
1199 }
1200
1201
1202 ++iqSquashedOperandsExamined;
1203 }
1204 } else if (!squashed_inst->isStoreConditional() ||
1205 !squashed_inst->isCompleted()) {
1206 NonSpecMapIt ns_inst_it =
1207 nonSpecInsts.find(squashed_inst->seqNum);
1208
1209 if (ns_inst_it == nonSpecInsts.end()) {
1210 assert(squashed_inst->getFault() != NoFault);
1211 } else {
1212
1213 (*ns_inst_it).second = NULL;
1214
1215 nonSpecInsts.erase(ns_inst_it);
1216
1217 ++iqSquashedNonSpecRemoved;
1218 }
1219 }
1220
1221 // Might want to also clear out the head of the dependency graph.
1222
1223 // Mark it as squashed within the IQ.
1224 squashed_inst->setSquashedInIQ();
1225
1226 // @todo: Remove this hack where several statuses are set so the
1227 // inst will flow through the rest of the pipeline.
1228 squashed_inst->setIssued();
1229 squashed_inst->setCanCommit();
1230 squashed_inst->clearInIQ();
1231
1232 //Update Thread IQ Count
1233 count[squashed_inst->threadNumber]--;
1234
1235 ++freeEntries;
1236 }
1237
1238 instList[tid].erase(squash_it--);
1239 ++iqSquashedInstsExamined;
1240 }
1241}
1242
1243template <class Impl>
1244bool
1245InstructionQueue<Impl>::addToDependents(DynInstPtr &new_inst)
1246{
1247 // Loop through the instruction's source registers, adding
1248 // them to the dependency list if they are not ready.
1249 int8_t total_src_regs = new_inst->numSrcRegs();
1250 bool return_val = false;
1251
1252 for (int src_reg_idx = 0;
1253 src_reg_idx < total_src_regs;
1254 src_reg_idx++)
1255 {
1256 // Only add it to the dependency graph if it's not ready.
1257 if (!new_inst->isReadySrcRegIdx(src_reg_idx)) {
1258 PhysRegIndex src_reg = new_inst->renamedSrcRegIdx(src_reg_idx);
1259
1260 // Check the IQ's scoreboard to make sure the register
1261 // hasn't become ready while the instruction was in flight
1262 // between stages. Only if it really isn't ready should
1263 // it be added to the dependency graph.
1264 if (src_reg >= numPhysRegs) {
1265 continue;
1266 } else if (regScoreboard[src_reg] == false) {
1267 DPRINTF(IQ, "Instruction PC %s has src reg %i that "
1268 "is being added to the dependency chain.\n",
1269 new_inst->pcState(), src_reg);
1270
1271 dependGraph.insert(src_reg, new_inst);
1272
1273 // Change the return value to indicate that something
1274 // was added to the dependency graph.
1275 return_val = true;
1276 } else {
1277 DPRINTF(IQ, "Instruction PC %s has src reg %i that "
1278 "became ready before it reached the IQ.\n",
1279 new_inst->pcState(), src_reg);
1280 // Mark a register ready within the instruction.
1281 new_inst->markSrcRegReady(src_reg_idx);
1282 }
1283 }
1284 }
1285
1286 return return_val;
1287}
1288
1289template <class Impl>
1290void
1291InstructionQueue<Impl>::addToProducers(DynInstPtr &new_inst)
1292{
1293 // Nothing really needs to be marked when an instruction becomes
1294 // the producer of a register's value, but for convenience a ptr
1295 // to the producing instruction will be placed in the head node of
1296 // the dependency links.
1297 int8_t total_dest_regs = new_inst->numDestRegs();
1298
1299 for (int dest_reg_idx = 0;
1300 dest_reg_idx < total_dest_regs;
1301 dest_reg_idx++)
1302 {
1303 PhysRegIndex dest_reg = new_inst->renamedDestRegIdx(dest_reg_idx);
1304
1305 // Instructions that use the misc regs will have a reg number
1306 // higher than the normal physical registers. In this case these
1307 // registers are not renamed, and there is no need to track
1308 // dependencies as these instructions must be executed at commit.
1309 if (dest_reg >= numPhysRegs) {
1310 continue;
1311 }
1312
1313 if (!dependGraph.empty(dest_reg)) {
1314 dependGraph.dump();
1315 panic("Dependency graph %i not empty!", dest_reg);
1316 }
1317
1318 dependGraph.setInst(dest_reg, new_inst);
1319
1320 // Mark the scoreboard to say it's not yet ready.
1321 regScoreboard[dest_reg] = false;
1322 }
1323}
1324
1325template <class Impl>
1326void
1327InstructionQueue<Impl>::addIfReady(DynInstPtr &inst)
1328{
1329 // If the instruction now has all of its source registers
1330 // available, then add it to the list of ready instructions.
1331 if (inst->readyToIssue()) {
1332
1333 //Add the instruction to the proper ready list.
1334 if (inst->isMemRef()) {
1335
1336 DPRINTF(IQ, "Checking if memory instruction can issue.\n");
1337
1338 // Message to the mem dependence unit that this instruction has
1339 // its registers ready.
1340 memDepUnit[inst->threadNumber].regsReady(inst);
1341
1342 return;
1343 }
1344
1345 OpClass op_class = inst->opClass();
1346
1347 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
1348 "the ready list, PC %s opclass:%i [sn:%lli].\n",
1349 inst->pcState(), op_class, inst->seqNum);
1350
1351 readyInsts[op_class].push(inst);
1352
1353 // Will need to reorder the list if either a queue is not on the list,
1354 // or it has an older instruction than last time.
1355 if (!queueOnList[op_class]) {
1356 addToOrderList(op_class);
1357 } else if (readyInsts[op_class].top()->seqNum <
1358 (*readyIt[op_class]).oldestInst) {
1359 listOrder.erase(readyIt[op_class]);
1360 addToOrderList(op_class);
1361 }
1362 }
1363}
1364
1365template <class Impl>
1366int
1367InstructionQueue<Impl>::countInsts()
1368{
1369#if 0
1370 //ksewell:This works but definitely could use a cleaner write
1371 //with a more intuitive way of counting. Right now it's
1372 //just brute force ....
1373 // Change the #if if you want to use this method.
1374 int total_insts = 0;
1375
1376 for (ThreadID tid = 0; tid < numThreads; ++tid) {
1377 ListIt count_it = instList[tid].begin();
1378
1379 while (count_it != instList[tid].end()) {
1380 if (!(*count_it)->isSquashed() && !(*count_it)->isSquashedInIQ()) {
1381 if (!(*count_it)->isIssued()) {
1382 ++total_insts;
1383 } else if ((*count_it)->isMemRef() &&
1384 !(*count_it)->memOpDone) {
1385 // Loads that have not been marked as executed still count
1386 // towards the total instructions.
1387 ++total_insts;
1388 }
1389 }
1390
1391 ++count_it;
1392 }
1393 }
1394
1395 return total_insts;
1396#else
1397 return numEntries - freeEntries;
1398#endif
1399}
1400
1401template <class Impl>
1402void
1403InstructionQueue<Impl>::dumpLists()
1404{
1405 for (int i = 0; i < Num_OpClasses; ++i) {
1406 cprintf("Ready list %i size: %i\n", i, readyInsts[i].size());
1407
1408 cprintf("\n");
1409 }
1410
1411 cprintf("Non speculative list size: %i\n", nonSpecInsts.size());
1412
1413 NonSpecMapIt non_spec_it = nonSpecInsts.begin();
1414 NonSpecMapIt non_spec_end_it = nonSpecInsts.end();
1415
1416 cprintf("Non speculative list: ");
1417
1418 while (non_spec_it != non_spec_end_it) {
1419 cprintf("%s [sn:%lli]", (*non_spec_it).second->pcState(),
1420 (*non_spec_it).second->seqNum);
1421 ++non_spec_it;
1422 }
1423
1424 cprintf("\n");
1425
1426 ListOrderIt list_order_it = listOrder.begin();
1427 ListOrderIt list_order_end_it = listOrder.end();
1428 int i = 1;
1429
1430 cprintf("List order: ");
1431
1432 while (list_order_it != list_order_end_it) {
1433 cprintf("%i OpClass:%i [sn:%lli] ", i, (*list_order_it).queueType,
1434 (*list_order_it).oldestInst);
1435
1436 ++list_order_it;
1437 ++i;
1438 }
1439
1440 cprintf("\n");
1441}
1442
1443
1444template <class Impl>
1445void
1446InstructionQueue<Impl>::dumpInsts()
1447{
1448 for (ThreadID tid = 0; tid < numThreads; ++tid) {
1449 int num = 0;
1450 int valid_num = 0;
1451 ListIt inst_list_it = instList[tid].begin();
1452
1453 while (inst_list_it != instList[tid].end()) {
1454 cprintf("Instruction:%i\n", num);
1455 if (!(*inst_list_it)->isSquashed()) {
1456 if (!(*inst_list_it)->isIssued()) {
1457 ++valid_num;
1458 cprintf("Count:%i\n", valid_num);
1459 } else if ((*inst_list_it)->isMemRef() &&
1460 !(*inst_list_it)->memOpDone()) {
1461 // Loads that have not been marked as executed
1462 // still count towards the total instructions.
1463 ++valid_num;
1464 cprintf("Count:%i\n", valid_num);
1465 }
1466 }
1467
1468 cprintf("PC: %s\n[sn:%lli]\n[tid:%i]\n"
1469 "Issued:%i\nSquashed:%i\n",
1470 (*inst_list_it)->pcState(),
1471 (*inst_list_it)->seqNum,
1472 (*inst_list_it)->threadNumber,
1473 (*inst_list_it)->isIssued(),
1474 (*inst_list_it)->isSquashed());
1475
1476 if ((*inst_list_it)->isMemRef()) {
1477 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone());
1478 }
1479
1480 cprintf("\n");
1481
1482 inst_list_it++;
1483 ++num;
1484 }
1485 }
1486
1487 cprintf("Insts to Execute list:\n");
1488
1489 int num = 0;
1490 int valid_num = 0;
1491 ListIt inst_list_it = instsToExecute.begin();
1492
1493 while (inst_list_it != instsToExecute.end())
1494 {
1495 cprintf("Instruction:%i\n",
1496 num);
1497 if (!(*inst_list_it)->isSquashed()) {
1498 if (!(*inst_list_it)->isIssued()) {
1499 ++valid_num;
1500 cprintf("Count:%i\n", valid_num);
1501 } else if ((*inst_list_it)->isMemRef() &&
1502 !(*inst_list_it)->memOpDone()) {
1503 // Loads that have not been marked as executed
1504 // still count towards the total instructions.
1505 ++valid_num;
1506 cprintf("Count:%i\n", valid_num);
1507 }
1508 }
1509
1510 cprintf("PC: %s\n[sn:%lli]\n[tid:%i]\n"
1511 "Issued:%i\nSquashed:%i\n",
1512 (*inst_list_it)->pcState(),
1513 (*inst_list_it)->seqNum,
1514 (*inst_list_it)->threadNumber,
1515 (*inst_list_it)->isIssued(),
1516 (*inst_list_it)->isSquashed());
1517
1518 if ((*inst_list_it)->isMemRef()) {
1519 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone());
1520 }
1521
1522 cprintf("\n");
1523
1524 inst_list_it++;
1525 ++num;
1526 }
1527}
710 iewStage->wakeCPU();
711
712 if (fu_idx > -1)
713 fuPool->freeUnitNextCycle(fu_idx);
714
715 // @todo: Ensure that these FU Completions happen at the beginning
716 // of a cycle, otherwise they could add too many instructions to
717 // the queue.
718 issueToExecuteQueue->access(-1)->size++;
719 instsToExecute.push_back(inst);
720}
721
722// @todo: Figure out a better way to remove the squashed items from the
723// lists. Checking the top item of each list to see if it's squashed
724// wastes time and forces jumps.
725template <class Impl>
726void
727InstructionQueue<Impl>::scheduleReadyInsts()
728{
729 DPRINTF(IQ, "Attempting to schedule ready instructions from "
730 "the IQ.\n");
731
732 IssueStruct *i2e_info = issueToExecuteQueue->access(0);
733
734 DynInstPtr deferred_mem_inst;
735 int total_deferred_mem_issued = 0;
736 while (total_deferred_mem_issued < totalWidth &&
737 (deferred_mem_inst = getDeferredMemInstToExecute()) != 0) {
738 issueToExecuteQueue->access(0)->size++;
739 instsToExecute.push_back(deferred_mem_inst);
740 total_deferred_mem_issued++;
741 }
742
743 // Have iterator to head of the list
744 // While I haven't exceeded bandwidth or reached the end of the list,
745 // Try to get a FU that can do what this op needs.
746 // If successful, change the oldestInst to the new top of the list, put
747 // the queue in the proper place in the list.
748 // Increment the iterator.
749 // This will avoid trying to schedule a certain op class if there are no
750 // FUs that handle it.
751 ListOrderIt order_it = listOrder.begin();
752 ListOrderIt order_end_it = listOrder.end();
753 int total_issued = 0;
754
755 while (total_issued < (totalWidth - total_deferred_mem_issued) &&
756 iewStage->canIssue() &&
757 order_it != order_end_it) {
758 OpClass op_class = (*order_it).queueType;
759
760 assert(!readyInsts[op_class].empty());
761
762 DynInstPtr issuing_inst = readyInsts[op_class].top();
763
764 issuing_inst->isFloating() ? fpInstQueueReads++ : intInstQueueReads++;
765
766 assert(issuing_inst->seqNum == (*order_it).oldestInst);
767
768 if (issuing_inst->isSquashed()) {
769 readyInsts[op_class].pop();
770
771 if (!readyInsts[op_class].empty()) {
772 moveToYoungerInst(order_it);
773 } else {
774 readyIt[op_class] = listOrder.end();
775 queueOnList[op_class] = false;
776 }
777
778 listOrder.erase(order_it++);
779
780 ++iqSquashedInstsIssued;
781
782 continue;
783 }
784
785 int idx = -2;
786 Cycles op_latency = Cycles(1);
787 ThreadID tid = issuing_inst->threadNumber;
788
789 if (op_class != No_OpClass) {
790 idx = fuPool->getUnit(op_class);
791 issuing_inst->isFloating() ? fpAluAccesses++ : intAluAccesses++;
792 if (idx > -1) {
793 op_latency = fuPool->getOpLatency(op_class);
794 }
795 }
796
797 // If we have an instruction that doesn't require a FU, or a
798 // valid FU, then schedule for execution.
799 if (idx == -2 || idx != -1) {
800 if (op_latency == Cycles(1)) {
801 i2e_info->size++;
802 instsToExecute.push_back(issuing_inst);
803
804 // Add the FU onto the list of FU's to be freed next
805 // cycle if we used one.
806 if (idx >= 0)
807 fuPool->freeUnitNextCycle(idx);
808 } else {
809 Cycles issue_latency = fuPool->getIssueLatency(op_class);
810 // Generate completion event for the FU
811 FUCompletion *execution = new FUCompletion(issuing_inst,
812 idx, this);
813
814 cpu->schedule(execution,
815 cpu->clockEdge(Cycles(op_latency - 1)));
816
817 // @todo: Enforce that issue_latency == 1 or op_latency
818 if (issue_latency > Cycles(1)) {
819 // If FU isn't pipelined, then it must be freed
820 // upon the execution completing.
821 execution->setFreeFU();
822 } else {
823 // Add the FU onto the list of FU's to be freed next cycle.
824 fuPool->freeUnitNextCycle(idx);
825 }
826 }
827
828 DPRINTF(IQ, "Thread %i: Issuing instruction PC %s "
829 "[sn:%lli]\n",
830 tid, issuing_inst->pcState(),
831 issuing_inst->seqNum);
832
833 readyInsts[op_class].pop();
834
835 if (!readyInsts[op_class].empty()) {
836 moveToYoungerInst(order_it);
837 } else {
838 readyIt[op_class] = listOrder.end();
839 queueOnList[op_class] = false;
840 }
841
842 issuing_inst->setIssued();
843 ++total_issued;
844
845#if TRACING_ON
846 issuing_inst->issueTick = curTick() - issuing_inst->fetchTick;
847#endif
848
849 if (!issuing_inst->isMemRef()) {
850 // Memory instructions can not be freed from the IQ until they
851 // complete.
852 ++freeEntries;
853 count[tid]--;
854 issuing_inst->clearInIQ();
855 } else {
856 memDepUnit[tid].issue(issuing_inst);
857 }
858
859 listOrder.erase(order_it++);
860 statIssuedInstType[tid][op_class]++;
861 iewStage->incrWb(issuing_inst->seqNum);
862 } else {
863 statFuBusy[op_class]++;
864 fuBusy[tid]++;
865 ++order_it;
866 }
867 }
868
869 numIssuedDist.sample(total_issued);
870 iqInstsIssued+= total_issued;
871
872 // If we issued any instructions, tell the CPU we had activity.
873 // @todo If the way deferred memory instructions are handeled due to
874 // translation changes then the deferredMemInsts condition should be removed
875 // from the code below.
876 if (total_issued || total_deferred_mem_issued || deferredMemInsts.size()) {
877 cpu->activityThisCycle();
878 } else {
879 DPRINTF(IQ, "Not able to schedule any instructions.\n");
880 }
881}
882
883template <class Impl>
884void
885InstructionQueue<Impl>::scheduleNonSpec(const InstSeqNum &inst)
886{
887 DPRINTF(IQ, "Marking nonspeculative instruction [sn:%lli] as ready "
888 "to execute.\n", inst);
889
890 NonSpecMapIt inst_it = nonSpecInsts.find(inst);
891
892 assert(inst_it != nonSpecInsts.end());
893
894 ThreadID tid = (*inst_it).second->threadNumber;
895
896 (*inst_it).second->setAtCommit();
897
898 (*inst_it).second->setCanIssue();
899
900 if (!(*inst_it).second->isMemRef()) {
901 addIfReady((*inst_it).second);
902 } else {
903 memDepUnit[tid].nonSpecInstReady((*inst_it).second);
904 }
905
906 (*inst_it).second = NULL;
907
908 nonSpecInsts.erase(inst_it);
909}
910
911template <class Impl>
912void
913InstructionQueue<Impl>::commit(const InstSeqNum &inst, ThreadID tid)
914{
915 DPRINTF(IQ, "[tid:%i]: Committing instructions older than [sn:%i]\n",
916 tid,inst);
917
918 ListIt iq_it = instList[tid].begin();
919
920 while (iq_it != instList[tid].end() &&
921 (*iq_it)->seqNum <= inst) {
922 ++iq_it;
923 instList[tid].pop_front();
924 }
925
926 assert(freeEntries == (numEntries - countInsts()));
927}
928
929template <class Impl>
930int
931InstructionQueue<Impl>::wakeDependents(DynInstPtr &completed_inst)
932{
933 int dependents = 0;
934
935 // The instruction queue here takes care of both floating and int ops
936 if (completed_inst->isFloating()) {
937 fpInstQueueWakeupQccesses++;
938 } else {
939 intInstQueueWakeupAccesses++;
940 }
941
942 DPRINTF(IQ, "Waking dependents of completed instruction.\n");
943
944 assert(!completed_inst->isSquashed());
945
946 // Tell the memory dependence unit to wake any dependents on this
947 // instruction if it is a memory instruction. Also complete the memory
948 // instruction at this point since we know it executed without issues.
949 // @todo: Might want to rename "completeMemInst" to something that
950 // indicates that it won't need to be replayed, and call this
951 // earlier. Might not be a big deal.
952 if (completed_inst->isMemRef()) {
953 memDepUnit[completed_inst->threadNumber].wakeDependents(completed_inst);
954 completeMemInst(completed_inst);
955 } else if (completed_inst->isMemBarrier() ||
956 completed_inst->isWriteBarrier()) {
957 memDepUnit[completed_inst->threadNumber].completeBarrier(completed_inst);
958 }
959
960 for (int dest_reg_idx = 0;
961 dest_reg_idx < completed_inst->numDestRegs();
962 dest_reg_idx++)
963 {
964 PhysRegIndex dest_reg =
965 completed_inst->renamedDestRegIdx(dest_reg_idx);
966
967 // Special case of uniq or control registers. They are not
968 // handled by the IQ and thus have no dependency graph entry.
969 // @todo Figure out a cleaner way to handle this.
970 if (dest_reg >= numPhysRegs) {
971 DPRINTF(IQ, "dest_reg :%d, numPhysRegs: %d\n", dest_reg,
972 numPhysRegs);
973 continue;
974 }
975
976 DPRINTF(IQ, "Waking any dependents on register %i.\n",
977 (int) dest_reg);
978
979 //Go through the dependency chain, marking the registers as
980 //ready within the waiting instructions.
981 DynInstPtr dep_inst = dependGraph.pop(dest_reg);
982
983 while (dep_inst) {
984 DPRINTF(IQ, "Waking up a dependent instruction, [sn:%lli] "
985 "PC %s.\n", dep_inst->seqNum, dep_inst->pcState());
986
987 // Might want to give more information to the instruction
988 // so that it knows which of its source registers is
989 // ready. However that would mean that the dependency
990 // graph entries would need to hold the src_reg_idx.
991 dep_inst->markSrcRegReady();
992
993 addIfReady(dep_inst);
994
995 dep_inst = dependGraph.pop(dest_reg);
996
997 ++dependents;
998 }
999
1000 // Reset the head node now that all of its dependents have
1001 // been woken up.
1002 assert(dependGraph.empty(dest_reg));
1003 dependGraph.clearInst(dest_reg);
1004
1005 // Mark the scoreboard as having that register ready.
1006 regScoreboard[dest_reg] = true;
1007 }
1008 return dependents;
1009}
1010
1011template <class Impl>
1012void
1013InstructionQueue<Impl>::addReadyMemInst(DynInstPtr &ready_inst)
1014{
1015 OpClass op_class = ready_inst->opClass();
1016
1017 readyInsts[op_class].push(ready_inst);
1018
1019 // Will need to reorder the list if either a queue is not on the list,
1020 // or it has an older instruction than last time.
1021 if (!queueOnList[op_class]) {
1022 addToOrderList(op_class);
1023 } else if (readyInsts[op_class].top()->seqNum <
1024 (*readyIt[op_class]).oldestInst) {
1025 listOrder.erase(readyIt[op_class]);
1026 addToOrderList(op_class);
1027 }
1028
1029 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
1030 "the ready list, PC %s opclass:%i [sn:%lli].\n",
1031 ready_inst->pcState(), op_class, ready_inst->seqNum);
1032}
1033
1034template <class Impl>
1035void
1036InstructionQueue<Impl>::rescheduleMemInst(DynInstPtr &resched_inst)
1037{
1038 DPRINTF(IQ, "Rescheduling mem inst [sn:%lli]\n", resched_inst->seqNum);
1039
1040 // Reset DTB translation state
1041 resched_inst->translationStarted(false);
1042 resched_inst->translationCompleted(false);
1043
1044 resched_inst->clearCanIssue();
1045 memDepUnit[resched_inst->threadNumber].reschedule(resched_inst);
1046}
1047
1048template <class Impl>
1049void
1050InstructionQueue<Impl>::replayMemInst(DynInstPtr &replay_inst)
1051{
1052 memDepUnit[replay_inst->threadNumber].replay(replay_inst);
1053}
1054
1055template <class Impl>
1056void
1057InstructionQueue<Impl>::completeMemInst(DynInstPtr &completed_inst)
1058{
1059 ThreadID tid = completed_inst->threadNumber;
1060
1061 DPRINTF(IQ, "Completing mem instruction PC: %s [sn:%lli]\n",
1062 completed_inst->pcState(), completed_inst->seqNum);
1063
1064 ++freeEntries;
1065
1066 completed_inst->memOpDone(true);
1067
1068 memDepUnit[tid].completed(completed_inst);
1069 count[tid]--;
1070}
1071
1072template <class Impl>
1073void
1074InstructionQueue<Impl>::deferMemInst(DynInstPtr &deferred_inst)
1075{
1076 deferredMemInsts.push_back(deferred_inst);
1077}
1078
1079template <class Impl>
1080typename Impl::DynInstPtr
1081InstructionQueue<Impl>::getDeferredMemInstToExecute()
1082{
1083 for (ListIt it = deferredMemInsts.begin(); it != deferredMemInsts.end();
1084 ++it) {
1085 if ((*it)->translationCompleted() || (*it)->isSquashed()) {
1086 DynInstPtr ret = *it;
1087 deferredMemInsts.erase(it);
1088 return ret;
1089 }
1090 }
1091 return NULL;
1092}
1093
1094template <class Impl>
1095void
1096InstructionQueue<Impl>::violation(DynInstPtr &store,
1097 DynInstPtr &faulting_load)
1098{
1099 intInstQueueWrites++;
1100 memDepUnit[store->threadNumber].violation(store, faulting_load);
1101}
1102
1103template <class Impl>
1104void
1105InstructionQueue<Impl>::squash(ThreadID tid)
1106{
1107 DPRINTF(IQ, "[tid:%i]: Starting to squash instructions in "
1108 "the IQ.\n", tid);
1109
1110 // Read instruction sequence number of last instruction out of the
1111 // time buffer.
1112 squashedSeqNum[tid] = fromCommit->commitInfo[tid].doneSeqNum;
1113
1114 // Call doSquash if there are insts in the IQ
1115 if (count[tid] > 0) {
1116 doSquash(tid);
1117 }
1118
1119 // Also tell the memory dependence unit to squash.
1120 memDepUnit[tid].squash(squashedSeqNum[tid], tid);
1121}
1122
1123template <class Impl>
1124void
1125InstructionQueue<Impl>::doSquash(ThreadID tid)
1126{
1127 // Start at the tail.
1128 ListIt squash_it = instList[tid].end();
1129 --squash_it;
1130
1131 DPRINTF(IQ, "[tid:%i]: Squashing until sequence number %i!\n",
1132 tid, squashedSeqNum[tid]);
1133
1134 // Squash any instructions younger than the squashed sequence number
1135 // given.
1136 while (squash_it != instList[tid].end() &&
1137 (*squash_it)->seqNum > squashedSeqNum[tid]) {
1138
1139 DynInstPtr squashed_inst = (*squash_it);
1140 squashed_inst->isFloating() ? fpInstQueueWrites++ : intInstQueueWrites++;
1141
1142 // Only handle the instruction if it actually is in the IQ and
1143 // hasn't already been squashed in the IQ.
1144 if (squashed_inst->threadNumber != tid ||
1145 squashed_inst->isSquashedInIQ()) {
1146 --squash_it;
1147 continue;
1148 }
1149
1150 if (!squashed_inst->isIssued() ||
1151 (squashed_inst->isMemRef() &&
1152 !squashed_inst->memOpDone())) {
1153
1154 DPRINTF(IQ, "[tid:%i]: Instruction [sn:%lli] PC %s squashed.\n",
1155 tid, squashed_inst->seqNum, squashed_inst->pcState());
1156
1157 // Remove the instruction from the dependency list.
1158 if (!squashed_inst->isNonSpeculative() &&
1159 !squashed_inst->isStoreConditional() &&
1160 !squashed_inst->isMemBarrier() &&
1161 !squashed_inst->isWriteBarrier()) {
1162
1163 for (int src_reg_idx = 0;
1164 src_reg_idx < squashed_inst->numSrcRegs();
1165 src_reg_idx++)
1166 {
1167 PhysRegIndex src_reg =
1168 squashed_inst->renamedSrcRegIdx(src_reg_idx);
1169
1170 // Only remove it from the dependency graph if it
1171 // was placed there in the first place.
1172
1173 // Instead of doing a linked list traversal, we
1174 // can just remove these squashed instructions
1175 // either at issue time, or when the register is
1176 // overwritten. The only downside to this is it
1177 // leaves more room for error.
1178
1179 if (!squashed_inst->isReadySrcRegIdx(src_reg_idx) &&
1180 src_reg < numPhysRegs) {
1181 dependGraph.remove(src_reg, squashed_inst);
1182 }
1183
1184
1185 ++iqSquashedOperandsExamined;
1186 }
1187 } else if (!squashed_inst->isStoreConditional() ||
1188 !squashed_inst->isCompleted()) {
1189 NonSpecMapIt ns_inst_it =
1190 nonSpecInsts.find(squashed_inst->seqNum);
1191
1192 if (ns_inst_it == nonSpecInsts.end()) {
1193 assert(squashed_inst->getFault() != NoFault);
1194 } else {
1195
1196 (*ns_inst_it).second = NULL;
1197
1198 nonSpecInsts.erase(ns_inst_it);
1199
1200 ++iqSquashedNonSpecRemoved;
1201 }
1202 }
1203
1204 // Might want to also clear out the head of the dependency graph.
1205
1206 // Mark it as squashed within the IQ.
1207 squashed_inst->setSquashedInIQ();
1208
1209 // @todo: Remove this hack where several statuses are set so the
1210 // inst will flow through the rest of the pipeline.
1211 squashed_inst->setIssued();
1212 squashed_inst->setCanCommit();
1213 squashed_inst->clearInIQ();
1214
1215 //Update Thread IQ Count
1216 count[squashed_inst->threadNumber]--;
1217
1218 ++freeEntries;
1219 }
1220
1221 instList[tid].erase(squash_it--);
1222 ++iqSquashedInstsExamined;
1223 }
1224}
1225
1226template <class Impl>
1227bool
1228InstructionQueue<Impl>::addToDependents(DynInstPtr &new_inst)
1229{
1230 // Loop through the instruction's source registers, adding
1231 // them to the dependency list if they are not ready.
1232 int8_t total_src_regs = new_inst->numSrcRegs();
1233 bool return_val = false;
1234
1235 for (int src_reg_idx = 0;
1236 src_reg_idx < total_src_regs;
1237 src_reg_idx++)
1238 {
1239 // Only add it to the dependency graph if it's not ready.
1240 if (!new_inst->isReadySrcRegIdx(src_reg_idx)) {
1241 PhysRegIndex src_reg = new_inst->renamedSrcRegIdx(src_reg_idx);
1242
1243 // Check the IQ's scoreboard to make sure the register
1244 // hasn't become ready while the instruction was in flight
1245 // between stages. Only if it really isn't ready should
1246 // it be added to the dependency graph.
1247 if (src_reg >= numPhysRegs) {
1248 continue;
1249 } else if (regScoreboard[src_reg] == false) {
1250 DPRINTF(IQ, "Instruction PC %s has src reg %i that "
1251 "is being added to the dependency chain.\n",
1252 new_inst->pcState(), src_reg);
1253
1254 dependGraph.insert(src_reg, new_inst);
1255
1256 // Change the return value to indicate that something
1257 // was added to the dependency graph.
1258 return_val = true;
1259 } else {
1260 DPRINTF(IQ, "Instruction PC %s has src reg %i that "
1261 "became ready before it reached the IQ.\n",
1262 new_inst->pcState(), src_reg);
1263 // Mark a register ready within the instruction.
1264 new_inst->markSrcRegReady(src_reg_idx);
1265 }
1266 }
1267 }
1268
1269 return return_val;
1270}
1271
1272template <class Impl>
1273void
1274InstructionQueue<Impl>::addToProducers(DynInstPtr &new_inst)
1275{
1276 // Nothing really needs to be marked when an instruction becomes
1277 // the producer of a register's value, but for convenience a ptr
1278 // to the producing instruction will be placed in the head node of
1279 // the dependency links.
1280 int8_t total_dest_regs = new_inst->numDestRegs();
1281
1282 for (int dest_reg_idx = 0;
1283 dest_reg_idx < total_dest_regs;
1284 dest_reg_idx++)
1285 {
1286 PhysRegIndex dest_reg = new_inst->renamedDestRegIdx(dest_reg_idx);
1287
1288 // Instructions that use the misc regs will have a reg number
1289 // higher than the normal physical registers. In this case these
1290 // registers are not renamed, and there is no need to track
1291 // dependencies as these instructions must be executed at commit.
1292 if (dest_reg >= numPhysRegs) {
1293 continue;
1294 }
1295
1296 if (!dependGraph.empty(dest_reg)) {
1297 dependGraph.dump();
1298 panic("Dependency graph %i not empty!", dest_reg);
1299 }
1300
1301 dependGraph.setInst(dest_reg, new_inst);
1302
1303 // Mark the scoreboard to say it's not yet ready.
1304 regScoreboard[dest_reg] = false;
1305 }
1306}
1307
1308template <class Impl>
1309void
1310InstructionQueue<Impl>::addIfReady(DynInstPtr &inst)
1311{
1312 // If the instruction now has all of its source registers
1313 // available, then add it to the list of ready instructions.
1314 if (inst->readyToIssue()) {
1315
1316 //Add the instruction to the proper ready list.
1317 if (inst->isMemRef()) {
1318
1319 DPRINTF(IQ, "Checking if memory instruction can issue.\n");
1320
1321 // Message to the mem dependence unit that this instruction has
1322 // its registers ready.
1323 memDepUnit[inst->threadNumber].regsReady(inst);
1324
1325 return;
1326 }
1327
1328 OpClass op_class = inst->opClass();
1329
1330 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
1331 "the ready list, PC %s opclass:%i [sn:%lli].\n",
1332 inst->pcState(), op_class, inst->seqNum);
1333
1334 readyInsts[op_class].push(inst);
1335
1336 // Will need to reorder the list if either a queue is not on the list,
1337 // or it has an older instruction than last time.
1338 if (!queueOnList[op_class]) {
1339 addToOrderList(op_class);
1340 } else if (readyInsts[op_class].top()->seqNum <
1341 (*readyIt[op_class]).oldestInst) {
1342 listOrder.erase(readyIt[op_class]);
1343 addToOrderList(op_class);
1344 }
1345 }
1346}
1347
1348template <class Impl>
1349int
1350InstructionQueue<Impl>::countInsts()
1351{
1352#if 0
1353 //ksewell:This works but definitely could use a cleaner write
1354 //with a more intuitive way of counting. Right now it's
1355 //just brute force ....
1356 // Change the #if if you want to use this method.
1357 int total_insts = 0;
1358
1359 for (ThreadID tid = 0; tid < numThreads; ++tid) {
1360 ListIt count_it = instList[tid].begin();
1361
1362 while (count_it != instList[tid].end()) {
1363 if (!(*count_it)->isSquashed() && !(*count_it)->isSquashedInIQ()) {
1364 if (!(*count_it)->isIssued()) {
1365 ++total_insts;
1366 } else if ((*count_it)->isMemRef() &&
1367 !(*count_it)->memOpDone) {
1368 // Loads that have not been marked as executed still count
1369 // towards the total instructions.
1370 ++total_insts;
1371 }
1372 }
1373
1374 ++count_it;
1375 }
1376 }
1377
1378 return total_insts;
1379#else
1380 return numEntries - freeEntries;
1381#endif
1382}
1383
1384template <class Impl>
1385void
1386InstructionQueue<Impl>::dumpLists()
1387{
1388 for (int i = 0; i < Num_OpClasses; ++i) {
1389 cprintf("Ready list %i size: %i\n", i, readyInsts[i].size());
1390
1391 cprintf("\n");
1392 }
1393
1394 cprintf("Non speculative list size: %i\n", nonSpecInsts.size());
1395
1396 NonSpecMapIt non_spec_it = nonSpecInsts.begin();
1397 NonSpecMapIt non_spec_end_it = nonSpecInsts.end();
1398
1399 cprintf("Non speculative list: ");
1400
1401 while (non_spec_it != non_spec_end_it) {
1402 cprintf("%s [sn:%lli]", (*non_spec_it).second->pcState(),
1403 (*non_spec_it).second->seqNum);
1404 ++non_spec_it;
1405 }
1406
1407 cprintf("\n");
1408
1409 ListOrderIt list_order_it = listOrder.begin();
1410 ListOrderIt list_order_end_it = listOrder.end();
1411 int i = 1;
1412
1413 cprintf("List order: ");
1414
1415 while (list_order_it != list_order_end_it) {
1416 cprintf("%i OpClass:%i [sn:%lli] ", i, (*list_order_it).queueType,
1417 (*list_order_it).oldestInst);
1418
1419 ++list_order_it;
1420 ++i;
1421 }
1422
1423 cprintf("\n");
1424}
1425
1426
1427template <class Impl>
1428void
1429InstructionQueue<Impl>::dumpInsts()
1430{
1431 for (ThreadID tid = 0; tid < numThreads; ++tid) {
1432 int num = 0;
1433 int valid_num = 0;
1434 ListIt inst_list_it = instList[tid].begin();
1435
1436 while (inst_list_it != instList[tid].end()) {
1437 cprintf("Instruction:%i\n", num);
1438 if (!(*inst_list_it)->isSquashed()) {
1439 if (!(*inst_list_it)->isIssued()) {
1440 ++valid_num;
1441 cprintf("Count:%i\n", valid_num);
1442 } else if ((*inst_list_it)->isMemRef() &&
1443 !(*inst_list_it)->memOpDone()) {
1444 // Loads that have not been marked as executed
1445 // still count towards the total instructions.
1446 ++valid_num;
1447 cprintf("Count:%i\n", valid_num);
1448 }
1449 }
1450
1451 cprintf("PC: %s\n[sn:%lli]\n[tid:%i]\n"
1452 "Issued:%i\nSquashed:%i\n",
1453 (*inst_list_it)->pcState(),
1454 (*inst_list_it)->seqNum,
1455 (*inst_list_it)->threadNumber,
1456 (*inst_list_it)->isIssued(),
1457 (*inst_list_it)->isSquashed());
1458
1459 if ((*inst_list_it)->isMemRef()) {
1460 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone());
1461 }
1462
1463 cprintf("\n");
1464
1465 inst_list_it++;
1466 ++num;
1467 }
1468 }
1469
1470 cprintf("Insts to Execute list:\n");
1471
1472 int num = 0;
1473 int valid_num = 0;
1474 ListIt inst_list_it = instsToExecute.begin();
1475
1476 while (inst_list_it != instsToExecute.end())
1477 {
1478 cprintf("Instruction:%i\n",
1479 num);
1480 if (!(*inst_list_it)->isSquashed()) {
1481 if (!(*inst_list_it)->isIssued()) {
1482 ++valid_num;
1483 cprintf("Count:%i\n", valid_num);
1484 } else if ((*inst_list_it)->isMemRef() &&
1485 !(*inst_list_it)->memOpDone()) {
1486 // Loads that have not been marked as executed
1487 // still count towards the total instructions.
1488 ++valid_num;
1489 cprintf("Count:%i\n", valid_num);
1490 }
1491 }
1492
1493 cprintf("PC: %s\n[sn:%lli]\n[tid:%i]\n"
1494 "Issued:%i\nSquashed:%i\n",
1495 (*inst_list_it)->pcState(),
1496 (*inst_list_it)->seqNum,
1497 (*inst_list_it)->threadNumber,
1498 (*inst_list_it)->isIssued(),
1499 (*inst_list_it)->isSquashed());
1500
1501 if ((*inst_list_it)->isMemRef()) {
1502 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone());
1503 }
1504
1505 cprintf("\n");
1506
1507 inst_list_it++;
1508 ++num;
1509 }
1510}