inst_queue_impl.hh (10333:6be8945d226b) inst_queue_impl.hh (10510:7e54a9a9f6b2)
1/*
2 * Copyright (c) 2011-2014 ARM Limited
3 * Copyright (c) 2013 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * The license below extends only to copyright in the software and shall
7 * not be construed as granting a license to any other intellectual
8 * property including but not limited to intellectual property relating
9 * to a hardware implementation of the functionality of the software
10 * licensed hereunder. You may use the software subject to the license
11 * terms below provided that you ensure that this notice is replicated
12 * unmodified and in its entirety in all distributions of the software,
13 * modified or unmodified, in source code or in binary form.
14 *
15 * Copyright (c) 2004-2006 The Regents of The University of Michigan
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Kevin Lim
42 * Korey Sewell
43 */
44
45#ifndef __CPU_O3_INST_QUEUE_IMPL_HH__
46#define __CPU_O3_INST_QUEUE_IMPL_HH__
47
48#include <limits>
49#include <vector>
50
51#include "cpu/o3/fu_pool.hh"
52#include "cpu/o3/inst_queue.hh"
53#include "debug/IQ.hh"
54#include "enums/OpClass.hh"
55#include "params/DerivO3CPU.hh"
56#include "sim/core.hh"
57
58// clang complains about std::set being overloaded with Packet::set if
59// we open up the entire namespace std
60using std::list;
61
62template <class Impl>
63InstructionQueue<Impl>::FUCompletion::FUCompletion(DynInstPtr &_inst,
64 int fu_idx, InstructionQueue<Impl> *iq_ptr)
65 : Event(Stat_Event_Pri, AutoDelete),
66 inst(_inst), fuIdx(fu_idx), iqPtr(iq_ptr), freeFU(false)
67{
68}
69
70template <class Impl>
71void
72InstructionQueue<Impl>::FUCompletion::process()
73{
74 iqPtr->processFUCompletion(inst, freeFU ? fuIdx : -1);
75 inst = NULL;
76}
77
78
79template <class Impl>
80const char *
81InstructionQueue<Impl>::FUCompletion::description() const
82{
83 return "Functional unit completion";
84}
85
86template <class Impl>
87InstructionQueue<Impl>::InstructionQueue(O3CPU *cpu_ptr, IEW *iew_ptr,
88 DerivO3CPUParams *params)
89 : cpu(cpu_ptr),
90 iewStage(iew_ptr),
91 fuPool(params->fuPool),
92 numEntries(params->numIQEntries),
93 totalWidth(params->issueWidth),
94 commitToIEWDelay(params->commitToIEWDelay)
95{
96 assert(fuPool);
97
98 numThreads = params->numThreads;
99
100 // Set the number of total physical registers
101 numPhysRegs = params->numPhysIntRegs + params->numPhysFloatRegs +
102 params->numPhysCCRegs;
103
104 //Create an entry for each physical register within the
105 //dependency graph.
106 dependGraph.resize(numPhysRegs);
107
108 // Resize the register scoreboard.
109 regScoreboard.resize(numPhysRegs);
110
111 //Initialize Mem Dependence Units
112 for (ThreadID tid = 0; tid < numThreads; tid++) {
113 memDepUnit[tid].init(params, tid);
114 memDepUnit[tid].setIQ(this);
115 }
116
117 resetState();
118
119 std::string policy = params->smtIQPolicy;
120
121 //Convert string to lowercase
122 std::transform(policy.begin(), policy.end(), policy.begin(),
123 (int(*)(int)) tolower);
124
125 //Figure out resource sharing policy
126 if (policy == "dynamic") {
127 iqPolicy = Dynamic;
128
129 //Set Max Entries to Total ROB Capacity
130 for (ThreadID tid = 0; tid < numThreads; tid++) {
131 maxEntries[tid] = numEntries;
132 }
133
134 } else if (policy == "partitioned") {
135 iqPolicy = Partitioned;
136
137 //@todo:make work if part_amt doesnt divide evenly.
138 int part_amt = numEntries / numThreads;
139
140 //Divide ROB up evenly
141 for (ThreadID tid = 0; tid < numThreads; tid++) {
142 maxEntries[tid] = part_amt;
143 }
144
145 DPRINTF(IQ, "IQ sharing policy set to Partitioned:"
146 "%i entries per thread.\n",part_amt);
147 } else if (policy == "threshold") {
148 iqPolicy = Threshold;
149
150 double threshold = (double)params->smtIQThreshold / 100;
151
152 int thresholdIQ = (int)((double)threshold * numEntries);
153
154 //Divide up by threshold amount
155 for (ThreadID tid = 0; tid < numThreads; tid++) {
156 maxEntries[tid] = thresholdIQ;
157 }
158
159 DPRINTF(IQ, "IQ sharing policy set to Threshold:"
160 "%i entries per thread.\n",thresholdIQ);
161 } else {
162 assert(0 && "Invalid IQ Sharing Policy.Options Are:{Dynamic,"
163 "Partitioned, Threshold}");
164 }
165}
166
167template <class Impl>
168InstructionQueue<Impl>::~InstructionQueue()
169{
170 dependGraph.reset();
171#ifdef DEBUG
172 cprintf("Nodes traversed: %i, removed: %i\n",
173 dependGraph.nodesTraversed, dependGraph.nodesRemoved);
174#endif
175}
176
177template <class Impl>
178std::string
179InstructionQueue<Impl>::name() const
180{
181 return cpu->name() + ".iq";
182}
183
184template <class Impl>
185void
186InstructionQueue<Impl>::regStats()
187{
188 using namespace Stats;
189 iqInstsAdded
190 .name(name() + ".iqInstsAdded")
191 .desc("Number of instructions added to the IQ (excludes non-spec)")
192 .prereq(iqInstsAdded);
193
194 iqNonSpecInstsAdded
195 .name(name() + ".iqNonSpecInstsAdded")
196 .desc("Number of non-speculative instructions added to the IQ")
197 .prereq(iqNonSpecInstsAdded);
198
199 iqInstsIssued
200 .name(name() + ".iqInstsIssued")
201 .desc("Number of instructions issued")
202 .prereq(iqInstsIssued);
203
204 iqIntInstsIssued
205 .name(name() + ".iqIntInstsIssued")
206 .desc("Number of integer instructions issued")
207 .prereq(iqIntInstsIssued);
208
209 iqFloatInstsIssued
210 .name(name() + ".iqFloatInstsIssued")
211 .desc("Number of float instructions issued")
212 .prereq(iqFloatInstsIssued);
213
214 iqBranchInstsIssued
215 .name(name() + ".iqBranchInstsIssued")
216 .desc("Number of branch instructions issued")
217 .prereq(iqBranchInstsIssued);
218
219 iqMemInstsIssued
220 .name(name() + ".iqMemInstsIssued")
221 .desc("Number of memory instructions issued")
222 .prereq(iqMemInstsIssued);
223
224 iqMiscInstsIssued
225 .name(name() + ".iqMiscInstsIssued")
226 .desc("Number of miscellaneous instructions issued")
227 .prereq(iqMiscInstsIssued);
228
229 iqSquashedInstsIssued
230 .name(name() + ".iqSquashedInstsIssued")
231 .desc("Number of squashed instructions issued")
232 .prereq(iqSquashedInstsIssued);
233
234 iqSquashedInstsExamined
235 .name(name() + ".iqSquashedInstsExamined")
236 .desc("Number of squashed instructions iterated over during squash;"
237 " mainly for profiling")
238 .prereq(iqSquashedInstsExamined);
239
240 iqSquashedOperandsExamined
241 .name(name() + ".iqSquashedOperandsExamined")
242 .desc("Number of squashed operands that are examined and possibly "
243 "removed from graph")
244 .prereq(iqSquashedOperandsExamined);
245
246 iqSquashedNonSpecRemoved
247 .name(name() + ".iqSquashedNonSpecRemoved")
248 .desc("Number of squashed non-spec instructions that were removed")
249 .prereq(iqSquashedNonSpecRemoved);
250/*
251 queueResDist
252 .init(Num_OpClasses, 0, 99, 2)
253 .name(name() + ".IQ:residence:")
254 .desc("cycles from dispatch to issue")
255 .flags(total | pdf | cdf )
256 ;
257 for (int i = 0; i < Num_OpClasses; ++i) {
258 queueResDist.subname(i, opClassStrings[i]);
259 }
260*/
261 numIssuedDist
262 .init(0,totalWidth,1)
263 .name(name() + ".issued_per_cycle")
264 .desc("Number of insts issued each cycle")
265 .flags(pdf)
266 ;
267/*
268 dist_unissued
269 .init(Num_OpClasses+2)
270 .name(name() + ".unissued_cause")
271 .desc("Reason ready instruction not issued")
272 .flags(pdf | dist)
273 ;
274 for (int i=0; i < (Num_OpClasses + 2); ++i) {
275 dist_unissued.subname(i, unissued_names[i]);
276 }
277*/
278 statIssuedInstType
279 .init(numThreads,Enums::Num_OpClass)
280 .name(name() + ".FU_type")
281 .desc("Type of FU issued")
282 .flags(total | pdf | dist)
283 ;
284 statIssuedInstType.ysubnames(Enums::OpClassStrings);
285
286 //
287 // How long did instructions for a particular FU type wait prior to issue
288 //
289/*
290 issueDelayDist
291 .init(Num_OpClasses,0,99,2)
292 .name(name() + ".")
293 .desc("cycles from operands ready to issue")
294 .flags(pdf | cdf)
295 ;
296
297 for (int i=0; i<Num_OpClasses; ++i) {
298 std::stringstream subname;
299 subname << opClassStrings[i] << "_delay";
300 issueDelayDist.subname(i, subname.str());
301 }
302*/
303 issueRate
304 .name(name() + ".rate")
305 .desc("Inst issue rate")
306 .flags(total)
307 ;
308 issueRate = iqInstsIssued / cpu->numCycles;
309
310 statFuBusy
311 .init(Num_OpClasses)
312 .name(name() + ".fu_full")
313 .desc("attempts to use FU when none available")
314 .flags(pdf | dist)
315 ;
316 for (int i=0; i < Num_OpClasses; ++i) {
317 statFuBusy.subname(i, Enums::OpClassStrings[i]);
318 }
319
320 fuBusy
321 .init(numThreads)
322 .name(name() + ".fu_busy_cnt")
323 .desc("FU busy when requested")
324 .flags(total)
325 ;
326
327 fuBusyRate
328 .name(name() + ".fu_busy_rate")
329 .desc("FU busy rate (busy events/executed inst)")
330 .flags(total)
331 ;
332 fuBusyRate = fuBusy / iqInstsIssued;
333
334 for (ThreadID tid = 0; tid < numThreads; tid++) {
335 // Tell mem dependence unit to reg stats as well.
336 memDepUnit[tid].regStats();
337 }
338
339 intInstQueueReads
340 .name(name() + ".int_inst_queue_reads")
341 .desc("Number of integer instruction queue reads")
342 .flags(total);
343
344 intInstQueueWrites
345 .name(name() + ".int_inst_queue_writes")
346 .desc("Number of integer instruction queue writes")
347 .flags(total);
348
349 intInstQueueWakeupAccesses
350 .name(name() + ".int_inst_queue_wakeup_accesses")
351 .desc("Number of integer instruction queue wakeup accesses")
352 .flags(total);
353
354 fpInstQueueReads
355 .name(name() + ".fp_inst_queue_reads")
356 .desc("Number of floating instruction queue reads")
357 .flags(total);
358
359 fpInstQueueWrites
360 .name(name() + ".fp_inst_queue_writes")
361 .desc("Number of floating instruction queue writes")
362 .flags(total);
363
364 fpInstQueueWakeupQccesses
365 .name(name() + ".fp_inst_queue_wakeup_accesses")
366 .desc("Number of floating instruction queue wakeup accesses")
367 .flags(total);
368
369 intAluAccesses
370 .name(name() + ".int_alu_accesses")
371 .desc("Number of integer alu accesses")
372 .flags(total);
373
374 fpAluAccesses
375 .name(name() + ".fp_alu_accesses")
376 .desc("Number of floating point alu accesses")
377 .flags(total);
378
379}
380
381template <class Impl>
382void
383InstructionQueue<Impl>::resetState()
384{
385 //Initialize thread IQ counts
386 for (ThreadID tid = 0; tid <numThreads; tid++) {
387 count[tid] = 0;
388 instList[tid].clear();
389 }
390
391 // Initialize the number of free IQ entries.
392 freeEntries = numEntries;
393
394 // Note that in actuality, the registers corresponding to the logical
395 // registers start off as ready. However this doesn't matter for the
396 // IQ as the instruction should have been correctly told if those
397 // registers are ready in rename. Thus it can all be initialized as
398 // unready.
399 for (int i = 0; i < numPhysRegs; ++i) {
400 regScoreboard[i] = false;
401 }
402
403 for (ThreadID tid = 0; tid < numThreads; ++tid) {
404 squashedSeqNum[tid] = 0;
405 }
406
407 for (int i = 0; i < Num_OpClasses; ++i) {
408 while (!readyInsts[i].empty())
409 readyInsts[i].pop();
410 queueOnList[i] = false;
411 readyIt[i] = listOrder.end();
412 }
413 nonSpecInsts.clear();
414 listOrder.clear();
415 deferredMemInsts.clear();
416 blockedMemInsts.clear();
417 retryMemInsts.clear();
418}
419
420template <class Impl>
421void
422InstructionQueue<Impl>::setActiveThreads(list<ThreadID> *at_ptr)
423{
424 activeThreads = at_ptr;
425}
426
427template <class Impl>
428void
429InstructionQueue<Impl>::setIssueToExecuteQueue(TimeBuffer<IssueStruct> *i2e_ptr)
430{
431 issueToExecuteQueue = i2e_ptr;
432}
433
434template <class Impl>
435void
436InstructionQueue<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr)
437{
438 timeBuffer = tb_ptr;
439
440 fromCommit = timeBuffer->getWire(-commitToIEWDelay);
441}
442
443template <class Impl>
1/*
2 * Copyright (c) 2011-2014 ARM Limited
3 * Copyright (c) 2013 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * The license below extends only to copyright in the software and shall
7 * not be construed as granting a license to any other intellectual
8 * property including but not limited to intellectual property relating
9 * to a hardware implementation of the functionality of the software
10 * licensed hereunder. You may use the software subject to the license
11 * terms below provided that you ensure that this notice is replicated
12 * unmodified and in its entirety in all distributions of the software,
13 * modified or unmodified, in source code or in binary form.
14 *
15 * Copyright (c) 2004-2006 The Regents of The University of Michigan
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Kevin Lim
42 * Korey Sewell
43 */
44
45#ifndef __CPU_O3_INST_QUEUE_IMPL_HH__
46#define __CPU_O3_INST_QUEUE_IMPL_HH__
47
48#include <limits>
49#include <vector>
50
51#include "cpu/o3/fu_pool.hh"
52#include "cpu/o3/inst_queue.hh"
53#include "debug/IQ.hh"
54#include "enums/OpClass.hh"
55#include "params/DerivO3CPU.hh"
56#include "sim/core.hh"
57
58// clang complains about std::set being overloaded with Packet::set if
59// we open up the entire namespace std
60using std::list;
61
62template <class Impl>
63InstructionQueue<Impl>::FUCompletion::FUCompletion(DynInstPtr &_inst,
64 int fu_idx, InstructionQueue<Impl> *iq_ptr)
65 : Event(Stat_Event_Pri, AutoDelete),
66 inst(_inst), fuIdx(fu_idx), iqPtr(iq_ptr), freeFU(false)
67{
68}
69
70template <class Impl>
71void
72InstructionQueue<Impl>::FUCompletion::process()
73{
74 iqPtr->processFUCompletion(inst, freeFU ? fuIdx : -1);
75 inst = NULL;
76}
77
78
79template <class Impl>
80const char *
81InstructionQueue<Impl>::FUCompletion::description() const
82{
83 return "Functional unit completion";
84}
85
86template <class Impl>
87InstructionQueue<Impl>::InstructionQueue(O3CPU *cpu_ptr, IEW *iew_ptr,
88 DerivO3CPUParams *params)
89 : cpu(cpu_ptr),
90 iewStage(iew_ptr),
91 fuPool(params->fuPool),
92 numEntries(params->numIQEntries),
93 totalWidth(params->issueWidth),
94 commitToIEWDelay(params->commitToIEWDelay)
95{
96 assert(fuPool);
97
98 numThreads = params->numThreads;
99
100 // Set the number of total physical registers
101 numPhysRegs = params->numPhysIntRegs + params->numPhysFloatRegs +
102 params->numPhysCCRegs;
103
104 //Create an entry for each physical register within the
105 //dependency graph.
106 dependGraph.resize(numPhysRegs);
107
108 // Resize the register scoreboard.
109 regScoreboard.resize(numPhysRegs);
110
111 //Initialize Mem Dependence Units
112 for (ThreadID tid = 0; tid < numThreads; tid++) {
113 memDepUnit[tid].init(params, tid);
114 memDepUnit[tid].setIQ(this);
115 }
116
117 resetState();
118
119 std::string policy = params->smtIQPolicy;
120
121 //Convert string to lowercase
122 std::transform(policy.begin(), policy.end(), policy.begin(),
123 (int(*)(int)) tolower);
124
125 //Figure out resource sharing policy
126 if (policy == "dynamic") {
127 iqPolicy = Dynamic;
128
129 //Set Max Entries to Total ROB Capacity
130 for (ThreadID tid = 0; tid < numThreads; tid++) {
131 maxEntries[tid] = numEntries;
132 }
133
134 } else if (policy == "partitioned") {
135 iqPolicy = Partitioned;
136
137 //@todo:make work if part_amt doesnt divide evenly.
138 int part_amt = numEntries / numThreads;
139
140 //Divide ROB up evenly
141 for (ThreadID tid = 0; tid < numThreads; tid++) {
142 maxEntries[tid] = part_amt;
143 }
144
145 DPRINTF(IQ, "IQ sharing policy set to Partitioned:"
146 "%i entries per thread.\n",part_amt);
147 } else if (policy == "threshold") {
148 iqPolicy = Threshold;
149
150 double threshold = (double)params->smtIQThreshold / 100;
151
152 int thresholdIQ = (int)((double)threshold * numEntries);
153
154 //Divide up by threshold amount
155 for (ThreadID tid = 0; tid < numThreads; tid++) {
156 maxEntries[tid] = thresholdIQ;
157 }
158
159 DPRINTF(IQ, "IQ sharing policy set to Threshold:"
160 "%i entries per thread.\n",thresholdIQ);
161 } else {
162 assert(0 && "Invalid IQ Sharing Policy.Options Are:{Dynamic,"
163 "Partitioned, Threshold}");
164 }
165}
166
167template <class Impl>
168InstructionQueue<Impl>::~InstructionQueue()
169{
170 dependGraph.reset();
171#ifdef DEBUG
172 cprintf("Nodes traversed: %i, removed: %i\n",
173 dependGraph.nodesTraversed, dependGraph.nodesRemoved);
174#endif
175}
176
177template <class Impl>
178std::string
179InstructionQueue<Impl>::name() const
180{
181 return cpu->name() + ".iq";
182}
183
184template <class Impl>
185void
186InstructionQueue<Impl>::regStats()
187{
188 using namespace Stats;
189 iqInstsAdded
190 .name(name() + ".iqInstsAdded")
191 .desc("Number of instructions added to the IQ (excludes non-spec)")
192 .prereq(iqInstsAdded);
193
194 iqNonSpecInstsAdded
195 .name(name() + ".iqNonSpecInstsAdded")
196 .desc("Number of non-speculative instructions added to the IQ")
197 .prereq(iqNonSpecInstsAdded);
198
199 iqInstsIssued
200 .name(name() + ".iqInstsIssued")
201 .desc("Number of instructions issued")
202 .prereq(iqInstsIssued);
203
204 iqIntInstsIssued
205 .name(name() + ".iqIntInstsIssued")
206 .desc("Number of integer instructions issued")
207 .prereq(iqIntInstsIssued);
208
209 iqFloatInstsIssued
210 .name(name() + ".iqFloatInstsIssued")
211 .desc("Number of float instructions issued")
212 .prereq(iqFloatInstsIssued);
213
214 iqBranchInstsIssued
215 .name(name() + ".iqBranchInstsIssued")
216 .desc("Number of branch instructions issued")
217 .prereq(iqBranchInstsIssued);
218
219 iqMemInstsIssued
220 .name(name() + ".iqMemInstsIssued")
221 .desc("Number of memory instructions issued")
222 .prereq(iqMemInstsIssued);
223
224 iqMiscInstsIssued
225 .name(name() + ".iqMiscInstsIssued")
226 .desc("Number of miscellaneous instructions issued")
227 .prereq(iqMiscInstsIssued);
228
229 iqSquashedInstsIssued
230 .name(name() + ".iqSquashedInstsIssued")
231 .desc("Number of squashed instructions issued")
232 .prereq(iqSquashedInstsIssued);
233
234 iqSquashedInstsExamined
235 .name(name() + ".iqSquashedInstsExamined")
236 .desc("Number of squashed instructions iterated over during squash;"
237 " mainly for profiling")
238 .prereq(iqSquashedInstsExamined);
239
240 iqSquashedOperandsExamined
241 .name(name() + ".iqSquashedOperandsExamined")
242 .desc("Number of squashed operands that are examined and possibly "
243 "removed from graph")
244 .prereq(iqSquashedOperandsExamined);
245
246 iqSquashedNonSpecRemoved
247 .name(name() + ".iqSquashedNonSpecRemoved")
248 .desc("Number of squashed non-spec instructions that were removed")
249 .prereq(iqSquashedNonSpecRemoved);
250/*
251 queueResDist
252 .init(Num_OpClasses, 0, 99, 2)
253 .name(name() + ".IQ:residence:")
254 .desc("cycles from dispatch to issue")
255 .flags(total | pdf | cdf )
256 ;
257 for (int i = 0; i < Num_OpClasses; ++i) {
258 queueResDist.subname(i, opClassStrings[i]);
259 }
260*/
261 numIssuedDist
262 .init(0,totalWidth,1)
263 .name(name() + ".issued_per_cycle")
264 .desc("Number of insts issued each cycle")
265 .flags(pdf)
266 ;
267/*
268 dist_unissued
269 .init(Num_OpClasses+2)
270 .name(name() + ".unissued_cause")
271 .desc("Reason ready instruction not issued")
272 .flags(pdf | dist)
273 ;
274 for (int i=0; i < (Num_OpClasses + 2); ++i) {
275 dist_unissued.subname(i, unissued_names[i]);
276 }
277*/
278 statIssuedInstType
279 .init(numThreads,Enums::Num_OpClass)
280 .name(name() + ".FU_type")
281 .desc("Type of FU issued")
282 .flags(total | pdf | dist)
283 ;
284 statIssuedInstType.ysubnames(Enums::OpClassStrings);
285
286 //
287 // How long did instructions for a particular FU type wait prior to issue
288 //
289/*
290 issueDelayDist
291 .init(Num_OpClasses,0,99,2)
292 .name(name() + ".")
293 .desc("cycles from operands ready to issue")
294 .flags(pdf | cdf)
295 ;
296
297 for (int i=0; i<Num_OpClasses; ++i) {
298 std::stringstream subname;
299 subname << opClassStrings[i] << "_delay";
300 issueDelayDist.subname(i, subname.str());
301 }
302*/
303 issueRate
304 .name(name() + ".rate")
305 .desc("Inst issue rate")
306 .flags(total)
307 ;
308 issueRate = iqInstsIssued / cpu->numCycles;
309
310 statFuBusy
311 .init(Num_OpClasses)
312 .name(name() + ".fu_full")
313 .desc("attempts to use FU when none available")
314 .flags(pdf | dist)
315 ;
316 for (int i=0; i < Num_OpClasses; ++i) {
317 statFuBusy.subname(i, Enums::OpClassStrings[i]);
318 }
319
320 fuBusy
321 .init(numThreads)
322 .name(name() + ".fu_busy_cnt")
323 .desc("FU busy when requested")
324 .flags(total)
325 ;
326
327 fuBusyRate
328 .name(name() + ".fu_busy_rate")
329 .desc("FU busy rate (busy events/executed inst)")
330 .flags(total)
331 ;
332 fuBusyRate = fuBusy / iqInstsIssued;
333
334 for (ThreadID tid = 0; tid < numThreads; tid++) {
335 // Tell mem dependence unit to reg stats as well.
336 memDepUnit[tid].regStats();
337 }
338
339 intInstQueueReads
340 .name(name() + ".int_inst_queue_reads")
341 .desc("Number of integer instruction queue reads")
342 .flags(total);
343
344 intInstQueueWrites
345 .name(name() + ".int_inst_queue_writes")
346 .desc("Number of integer instruction queue writes")
347 .flags(total);
348
349 intInstQueueWakeupAccesses
350 .name(name() + ".int_inst_queue_wakeup_accesses")
351 .desc("Number of integer instruction queue wakeup accesses")
352 .flags(total);
353
354 fpInstQueueReads
355 .name(name() + ".fp_inst_queue_reads")
356 .desc("Number of floating instruction queue reads")
357 .flags(total);
358
359 fpInstQueueWrites
360 .name(name() + ".fp_inst_queue_writes")
361 .desc("Number of floating instruction queue writes")
362 .flags(total);
363
364 fpInstQueueWakeupQccesses
365 .name(name() + ".fp_inst_queue_wakeup_accesses")
366 .desc("Number of floating instruction queue wakeup accesses")
367 .flags(total);
368
369 intAluAccesses
370 .name(name() + ".int_alu_accesses")
371 .desc("Number of integer alu accesses")
372 .flags(total);
373
374 fpAluAccesses
375 .name(name() + ".fp_alu_accesses")
376 .desc("Number of floating point alu accesses")
377 .flags(total);
378
379}
380
381template <class Impl>
382void
383InstructionQueue<Impl>::resetState()
384{
385 //Initialize thread IQ counts
386 for (ThreadID tid = 0; tid <numThreads; tid++) {
387 count[tid] = 0;
388 instList[tid].clear();
389 }
390
391 // Initialize the number of free IQ entries.
392 freeEntries = numEntries;
393
394 // Note that in actuality, the registers corresponding to the logical
395 // registers start off as ready. However this doesn't matter for the
396 // IQ as the instruction should have been correctly told if those
397 // registers are ready in rename. Thus it can all be initialized as
398 // unready.
399 for (int i = 0; i < numPhysRegs; ++i) {
400 regScoreboard[i] = false;
401 }
402
403 for (ThreadID tid = 0; tid < numThreads; ++tid) {
404 squashedSeqNum[tid] = 0;
405 }
406
407 for (int i = 0; i < Num_OpClasses; ++i) {
408 while (!readyInsts[i].empty())
409 readyInsts[i].pop();
410 queueOnList[i] = false;
411 readyIt[i] = listOrder.end();
412 }
413 nonSpecInsts.clear();
414 listOrder.clear();
415 deferredMemInsts.clear();
416 blockedMemInsts.clear();
417 retryMemInsts.clear();
418}
419
420template <class Impl>
421void
422InstructionQueue<Impl>::setActiveThreads(list<ThreadID> *at_ptr)
423{
424 activeThreads = at_ptr;
425}
426
427template <class Impl>
428void
429InstructionQueue<Impl>::setIssueToExecuteQueue(TimeBuffer<IssueStruct> *i2e_ptr)
430{
431 issueToExecuteQueue = i2e_ptr;
432}
433
434template <class Impl>
435void
436InstructionQueue<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr)
437{
438 timeBuffer = tb_ptr;
439
440 fromCommit = timeBuffer->getWire(-commitToIEWDelay);
441}
442
443template <class Impl>
444bool
445InstructionQueue<Impl>::isDrained() const
446{
447 bool drained = dependGraph.empty() && instsToExecute.empty();
448 for (ThreadID tid = 0; tid < numThreads; ++tid)
449 drained = drained && memDepUnit[tid].isDrained();
450
451 return drained;
452}
453
454template <class Impl>
444void
445InstructionQueue<Impl>::drainSanityCheck() const
446{
447 assert(dependGraph.empty());
448 assert(instsToExecute.empty());
449 for (ThreadID tid = 0; tid < numThreads; ++tid)
450 memDepUnit[tid].drainSanityCheck();
451}
452
453template <class Impl>
454void
455InstructionQueue<Impl>::takeOverFrom()
456{
457 resetState();
458}
459
460template <class Impl>
461int
462InstructionQueue<Impl>::entryAmount(ThreadID num_threads)
463{
464 if (iqPolicy == Partitioned) {
465 return numEntries / num_threads;
466 } else {
467 return 0;
468 }
469}
470
471
472template <class Impl>
473void
474InstructionQueue<Impl>::resetEntries()
475{
476 if (iqPolicy != Dynamic || numThreads > 1) {
477 int active_threads = activeThreads->size();
478
479 list<ThreadID>::iterator threads = activeThreads->begin();
480 list<ThreadID>::iterator end = activeThreads->end();
481
482 while (threads != end) {
483 ThreadID tid = *threads++;
484
485 if (iqPolicy == Partitioned) {
486 maxEntries[tid] = numEntries / active_threads;
487 } else if(iqPolicy == Threshold && active_threads == 1) {
488 maxEntries[tid] = numEntries;
489 }
490 }
491 }
492}
493
494template <class Impl>
495unsigned
496InstructionQueue<Impl>::numFreeEntries()
497{
498 return freeEntries;
499}
500
501template <class Impl>
502unsigned
503InstructionQueue<Impl>::numFreeEntries(ThreadID tid)
504{
505 return maxEntries[tid] - count[tid];
506}
507
508// Might want to do something more complex if it knows how many instructions
509// will be issued this cycle.
510template <class Impl>
511bool
512InstructionQueue<Impl>::isFull()
513{
514 if (freeEntries == 0) {
515 return(true);
516 } else {
517 return(false);
518 }
519}
520
521template <class Impl>
522bool
523InstructionQueue<Impl>::isFull(ThreadID tid)
524{
525 if (numFreeEntries(tid) == 0) {
526 return(true);
527 } else {
528 return(false);
529 }
530}
531
532template <class Impl>
533bool
534InstructionQueue<Impl>::hasReadyInsts()
535{
536 if (!listOrder.empty()) {
537 return true;
538 }
539
540 for (int i = 0; i < Num_OpClasses; ++i) {
541 if (!readyInsts[i].empty()) {
542 return true;
543 }
544 }
545
546 return false;
547}
548
549template <class Impl>
550void
551InstructionQueue<Impl>::insert(DynInstPtr &new_inst)
552{
553 new_inst->isFloating() ? fpInstQueueWrites++ : intInstQueueWrites++;
554 // Make sure the instruction is valid
555 assert(new_inst);
556
557 DPRINTF(IQ, "Adding instruction [sn:%lli] PC %s to the IQ.\n",
558 new_inst->seqNum, new_inst->pcState());
559
560 assert(freeEntries != 0);
561
562 instList[new_inst->threadNumber].push_back(new_inst);
563
564 --freeEntries;
565
566 new_inst->setInIQ();
567
568 // Look through its source registers (physical regs), and mark any
569 // dependencies.
570 addToDependents(new_inst);
571
572 // Have this instruction set itself as the producer of its destination
573 // register(s).
574 addToProducers(new_inst);
575
576 if (new_inst->isMemRef()) {
577 memDepUnit[new_inst->threadNumber].insert(new_inst);
578 } else {
579 addIfReady(new_inst);
580 }
581
582 ++iqInstsAdded;
583
584 count[new_inst->threadNumber]++;
585
586 assert(freeEntries == (numEntries - countInsts()));
587}
588
589template <class Impl>
590void
591InstructionQueue<Impl>::insertNonSpec(DynInstPtr &new_inst)
592{
593 // @todo: Clean up this code; can do it by setting inst as unable
594 // to issue, then calling normal insert on the inst.
595 new_inst->isFloating() ? fpInstQueueWrites++ : intInstQueueWrites++;
596
597 assert(new_inst);
598
599 nonSpecInsts[new_inst->seqNum] = new_inst;
600
601 DPRINTF(IQ, "Adding non-speculative instruction [sn:%lli] PC %s "
602 "to the IQ.\n",
603 new_inst->seqNum, new_inst->pcState());
604
605 assert(freeEntries != 0);
606
607 instList[new_inst->threadNumber].push_back(new_inst);
608
609 --freeEntries;
610
611 new_inst->setInIQ();
612
613 // Have this instruction set itself as the producer of its destination
614 // register(s).
615 addToProducers(new_inst);
616
617 // If it's a memory instruction, add it to the memory dependency
618 // unit.
619 if (new_inst->isMemRef()) {
620 memDepUnit[new_inst->threadNumber].insertNonSpec(new_inst);
621 }
622
623 ++iqNonSpecInstsAdded;
624
625 count[new_inst->threadNumber]++;
626
627 assert(freeEntries == (numEntries - countInsts()));
628}
629
630template <class Impl>
631void
632InstructionQueue<Impl>::insertBarrier(DynInstPtr &barr_inst)
633{
634 memDepUnit[barr_inst->threadNumber].insertBarrier(barr_inst);
635
636 insertNonSpec(barr_inst);
637}
638
639template <class Impl>
640typename Impl::DynInstPtr
641InstructionQueue<Impl>::getInstToExecute()
642{
643 assert(!instsToExecute.empty());
644 DynInstPtr inst = instsToExecute.front();
645 instsToExecute.pop_front();
646 if (inst->isFloating()){
647 fpInstQueueReads++;
648 } else {
649 intInstQueueReads++;
650 }
651 return inst;
652}
653
654template <class Impl>
655void
656InstructionQueue<Impl>::addToOrderList(OpClass op_class)
657{
658 assert(!readyInsts[op_class].empty());
659
660 ListOrderEntry queue_entry;
661
662 queue_entry.queueType = op_class;
663
664 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
665
666 ListOrderIt list_it = listOrder.begin();
667 ListOrderIt list_end_it = listOrder.end();
668
669 while (list_it != list_end_it) {
670 if ((*list_it).oldestInst > queue_entry.oldestInst) {
671 break;
672 }
673
674 list_it++;
675 }
676
677 readyIt[op_class] = listOrder.insert(list_it, queue_entry);
678 queueOnList[op_class] = true;
679}
680
681template <class Impl>
682void
683InstructionQueue<Impl>::moveToYoungerInst(ListOrderIt list_order_it)
684{
685 // Get iterator of next item on the list
686 // Delete the original iterator
687 // Determine if the next item is either the end of the list or younger
688 // than the new instruction. If so, then add in a new iterator right here.
689 // If not, then move along.
690 ListOrderEntry queue_entry;
691 OpClass op_class = (*list_order_it).queueType;
692 ListOrderIt next_it = list_order_it;
693
694 ++next_it;
695
696 queue_entry.queueType = op_class;
697 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
698
699 while (next_it != listOrder.end() &&
700 (*next_it).oldestInst < queue_entry.oldestInst) {
701 ++next_it;
702 }
703
704 readyIt[op_class] = listOrder.insert(next_it, queue_entry);
705}
706
707template <class Impl>
708void
709InstructionQueue<Impl>::processFUCompletion(DynInstPtr &inst, int fu_idx)
710{
711 DPRINTF(IQ, "Processing FU completion [sn:%lli]\n", inst->seqNum);
712 assert(!cpu->switchedOut());
713 // The CPU could have been sleeping until this op completed (*extremely*
714 // long latency op). Wake it if it was. This may be overkill.
715 iewStage->wakeCPU();
716
717 if (fu_idx > -1)
718 fuPool->freeUnitNextCycle(fu_idx);
719
720 // @todo: Ensure that these FU Completions happen at the beginning
721 // of a cycle, otherwise they could add too many instructions to
722 // the queue.
723 issueToExecuteQueue->access(-1)->size++;
724 instsToExecute.push_back(inst);
725}
726
727// @todo: Figure out a better way to remove the squashed items from the
728// lists. Checking the top item of each list to see if it's squashed
729// wastes time and forces jumps.
730template <class Impl>
731void
732InstructionQueue<Impl>::scheduleReadyInsts()
733{
734 DPRINTF(IQ, "Attempting to schedule ready instructions from "
735 "the IQ.\n");
736
737 IssueStruct *i2e_info = issueToExecuteQueue->access(0);
738
739 DynInstPtr mem_inst;
740 while (mem_inst = getDeferredMemInstToExecute()) {
741 addReadyMemInst(mem_inst);
742 }
743
744 // See if any cache blocked instructions are able to be executed
745 while (mem_inst = getBlockedMemInstToExecute()) {
746 addReadyMemInst(mem_inst);
747 }
748
749 // Have iterator to head of the list
750 // While I haven't exceeded bandwidth or reached the end of the list,
751 // Try to get a FU that can do what this op needs.
752 // If successful, change the oldestInst to the new top of the list, put
753 // the queue in the proper place in the list.
754 // Increment the iterator.
755 // This will avoid trying to schedule a certain op class if there are no
756 // FUs that handle it.
757 int total_issued = 0;
758 ListOrderIt order_it = listOrder.begin();
759 ListOrderIt order_end_it = listOrder.end();
760
761 while (total_issued < totalWidth && order_it != order_end_it) {
762 OpClass op_class = (*order_it).queueType;
763
764 assert(!readyInsts[op_class].empty());
765
766 DynInstPtr issuing_inst = readyInsts[op_class].top();
767
768 issuing_inst->isFloating() ? fpInstQueueReads++ : intInstQueueReads++;
769
770 assert(issuing_inst->seqNum == (*order_it).oldestInst);
771
772 if (issuing_inst->isSquashed()) {
773 readyInsts[op_class].pop();
774
775 if (!readyInsts[op_class].empty()) {
776 moveToYoungerInst(order_it);
777 } else {
778 readyIt[op_class] = listOrder.end();
779 queueOnList[op_class] = false;
780 }
781
782 listOrder.erase(order_it++);
783
784 ++iqSquashedInstsIssued;
785
786 continue;
787 }
788
789 int idx = -2;
790 Cycles op_latency = Cycles(1);
791 ThreadID tid = issuing_inst->threadNumber;
792
793 if (op_class != No_OpClass) {
794 idx = fuPool->getUnit(op_class);
795 issuing_inst->isFloating() ? fpAluAccesses++ : intAluAccesses++;
796 if (idx > -1) {
797 op_latency = fuPool->getOpLatency(op_class);
798 }
799 }
800
801 // If we have an instruction that doesn't require a FU, or a
802 // valid FU, then schedule for execution.
803 if (idx == -2 || idx != -1) {
804 if (op_latency == Cycles(1)) {
805 i2e_info->size++;
806 instsToExecute.push_back(issuing_inst);
807
808 // Add the FU onto the list of FU's to be freed next
809 // cycle if we used one.
810 if (idx >= 0)
811 fuPool->freeUnitNextCycle(idx);
812 } else {
813 Cycles issue_latency = fuPool->getIssueLatency(op_class);
814 // Generate completion event for the FU
815 FUCompletion *execution = new FUCompletion(issuing_inst,
816 idx, this);
817
818 cpu->schedule(execution,
819 cpu->clockEdge(Cycles(op_latency - 1)));
820
821 // @todo: Enforce that issue_latency == 1 or op_latency
822 if (issue_latency > Cycles(1)) {
823 // If FU isn't pipelined, then it must be freed
824 // upon the execution completing.
825 execution->setFreeFU();
826 } else {
827 // Add the FU onto the list of FU's to be freed next cycle.
828 fuPool->freeUnitNextCycle(idx);
829 }
830 }
831
832 DPRINTF(IQ, "Thread %i: Issuing instruction PC %s "
833 "[sn:%lli]\n",
834 tid, issuing_inst->pcState(),
835 issuing_inst->seqNum);
836
837 readyInsts[op_class].pop();
838
839 if (!readyInsts[op_class].empty()) {
840 moveToYoungerInst(order_it);
841 } else {
842 readyIt[op_class] = listOrder.end();
843 queueOnList[op_class] = false;
844 }
845
846 issuing_inst->setIssued();
847 ++total_issued;
848
849#if TRACING_ON
850 issuing_inst->issueTick = curTick() - issuing_inst->fetchTick;
851#endif
852
853 if (!issuing_inst->isMemRef()) {
854 // Memory instructions can not be freed from the IQ until they
855 // complete.
856 ++freeEntries;
857 count[tid]--;
858 issuing_inst->clearInIQ();
859 } else {
860 memDepUnit[tid].issue(issuing_inst);
861 }
862
863 listOrder.erase(order_it++);
864 statIssuedInstType[tid][op_class]++;
865 } else {
866 statFuBusy[op_class]++;
867 fuBusy[tid]++;
868 ++order_it;
869 }
870 }
871
872 numIssuedDist.sample(total_issued);
873 iqInstsIssued+= total_issued;
874
875 // If we issued any instructions, tell the CPU we had activity.
876 // @todo If the way deferred memory instructions are handeled due to
877 // translation changes then the deferredMemInsts condition should be removed
878 // from the code below.
879 if (total_issued || !retryMemInsts.empty() || !deferredMemInsts.empty()) {
880 cpu->activityThisCycle();
881 } else {
882 DPRINTF(IQ, "Not able to schedule any instructions.\n");
883 }
884}
885
886template <class Impl>
887void
888InstructionQueue<Impl>::scheduleNonSpec(const InstSeqNum &inst)
889{
890 DPRINTF(IQ, "Marking nonspeculative instruction [sn:%lli] as ready "
891 "to execute.\n", inst);
892
893 NonSpecMapIt inst_it = nonSpecInsts.find(inst);
894
895 assert(inst_it != nonSpecInsts.end());
896
897 ThreadID tid = (*inst_it).second->threadNumber;
898
899 (*inst_it).second->setAtCommit();
900
901 (*inst_it).second->setCanIssue();
902
903 if (!(*inst_it).second->isMemRef()) {
904 addIfReady((*inst_it).second);
905 } else {
906 memDepUnit[tid].nonSpecInstReady((*inst_it).second);
907 }
908
909 (*inst_it).second = NULL;
910
911 nonSpecInsts.erase(inst_it);
912}
913
914template <class Impl>
915void
916InstructionQueue<Impl>::commit(const InstSeqNum &inst, ThreadID tid)
917{
918 DPRINTF(IQ, "[tid:%i]: Committing instructions older than [sn:%i]\n",
919 tid,inst);
920
921 ListIt iq_it = instList[tid].begin();
922
923 while (iq_it != instList[tid].end() &&
924 (*iq_it)->seqNum <= inst) {
925 ++iq_it;
926 instList[tid].pop_front();
927 }
928
929 assert(freeEntries == (numEntries - countInsts()));
930}
931
932template <class Impl>
933int
934InstructionQueue<Impl>::wakeDependents(DynInstPtr &completed_inst)
935{
936 int dependents = 0;
937
938 // The instruction queue here takes care of both floating and int ops
939 if (completed_inst->isFloating()) {
940 fpInstQueueWakeupQccesses++;
941 } else {
942 intInstQueueWakeupAccesses++;
943 }
944
945 DPRINTF(IQ, "Waking dependents of completed instruction.\n");
946
947 assert(!completed_inst->isSquashed());
948
949 // Tell the memory dependence unit to wake any dependents on this
950 // instruction if it is a memory instruction. Also complete the memory
951 // instruction at this point since we know it executed without issues.
952 // @todo: Might want to rename "completeMemInst" to something that
953 // indicates that it won't need to be replayed, and call this
954 // earlier. Might not be a big deal.
955 if (completed_inst->isMemRef()) {
956 memDepUnit[completed_inst->threadNumber].wakeDependents(completed_inst);
957 completeMemInst(completed_inst);
958 } else if (completed_inst->isMemBarrier() ||
959 completed_inst->isWriteBarrier()) {
960 memDepUnit[completed_inst->threadNumber].completeBarrier(completed_inst);
961 }
962
963 for (int dest_reg_idx = 0;
964 dest_reg_idx < completed_inst->numDestRegs();
965 dest_reg_idx++)
966 {
967 PhysRegIndex dest_reg =
968 completed_inst->renamedDestRegIdx(dest_reg_idx);
969
970 // Special case of uniq or control registers. They are not
971 // handled by the IQ and thus have no dependency graph entry.
972 // @todo Figure out a cleaner way to handle this.
973 if (dest_reg >= numPhysRegs) {
974 DPRINTF(IQ, "dest_reg :%d, numPhysRegs: %d\n", dest_reg,
975 numPhysRegs);
976 continue;
977 }
978
979 DPRINTF(IQ, "Waking any dependents on register %i.\n",
980 (int) dest_reg);
981
982 //Go through the dependency chain, marking the registers as
983 //ready within the waiting instructions.
984 DynInstPtr dep_inst = dependGraph.pop(dest_reg);
985
986 while (dep_inst) {
987 DPRINTF(IQ, "Waking up a dependent instruction, [sn:%lli] "
988 "PC %s.\n", dep_inst->seqNum, dep_inst->pcState());
989
990 // Might want to give more information to the instruction
991 // so that it knows which of its source registers is
992 // ready. However that would mean that the dependency
993 // graph entries would need to hold the src_reg_idx.
994 dep_inst->markSrcRegReady();
995
996 addIfReady(dep_inst);
997
998 dep_inst = dependGraph.pop(dest_reg);
999
1000 ++dependents;
1001 }
1002
1003 // Reset the head node now that all of its dependents have
1004 // been woken up.
1005 assert(dependGraph.empty(dest_reg));
1006 dependGraph.clearInst(dest_reg);
1007
1008 // Mark the scoreboard as having that register ready.
1009 regScoreboard[dest_reg] = true;
1010 }
1011 return dependents;
1012}
1013
1014template <class Impl>
1015void
1016InstructionQueue<Impl>::addReadyMemInst(DynInstPtr &ready_inst)
1017{
1018 OpClass op_class = ready_inst->opClass();
1019
1020 readyInsts[op_class].push(ready_inst);
1021
1022 // Will need to reorder the list if either a queue is not on the list,
1023 // or it has an older instruction than last time.
1024 if (!queueOnList[op_class]) {
1025 addToOrderList(op_class);
1026 } else if (readyInsts[op_class].top()->seqNum <
1027 (*readyIt[op_class]).oldestInst) {
1028 listOrder.erase(readyIt[op_class]);
1029 addToOrderList(op_class);
1030 }
1031
1032 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
1033 "the ready list, PC %s opclass:%i [sn:%lli].\n",
1034 ready_inst->pcState(), op_class, ready_inst->seqNum);
1035}
1036
1037template <class Impl>
1038void
1039InstructionQueue<Impl>::rescheduleMemInst(DynInstPtr &resched_inst)
1040{
1041 DPRINTF(IQ, "Rescheduling mem inst [sn:%lli]\n", resched_inst->seqNum);
1042
1043 // Reset DTB translation state
1044 resched_inst->translationStarted(false);
1045 resched_inst->translationCompleted(false);
1046
1047 resched_inst->clearCanIssue();
1048 memDepUnit[resched_inst->threadNumber].reschedule(resched_inst);
1049}
1050
1051template <class Impl>
1052void
1053InstructionQueue<Impl>::replayMemInst(DynInstPtr &replay_inst)
1054{
1055 memDepUnit[replay_inst->threadNumber].replay();
1056}
1057
1058template <class Impl>
1059void
1060InstructionQueue<Impl>::completeMemInst(DynInstPtr &completed_inst)
1061{
1062 ThreadID tid = completed_inst->threadNumber;
1063
1064 DPRINTF(IQ, "Completing mem instruction PC: %s [sn:%lli]\n",
1065 completed_inst->pcState(), completed_inst->seqNum);
1066
1067 ++freeEntries;
1068
1069 completed_inst->memOpDone(true);
1070
1071 memDepUnit[tid].completed(completed_inst);
1072 count[tid]--;
1073}
1074
1075template <class Impl>
1076void
1077InstructionQueue<Impl>::deferMemInst(DynInstPtr &deferred_inst)
1078{
1079 deferredMemInsts.push_back(deferred_inst);
1080}
1081
1082template <class Impl>
1083void
1084InstructionQueue<Impl>::blockMemInst(DynInstPtr &blocked_inst)
1085{
1086 blocked_inst->translationStarted(false);
1087 blocked_inst->translationCompleted(false);
1088
1089 blocked_inst->clearIssued();
1090 blocked_inst->clearCanIssue();
1091 blockedMemInsts.push_back(blocked_inst);
1092}
1093
1094template <class Impl>
1095void
1096InstructionQueue<Impl>::cacheUnblocked()
1097{
1098 retryMemInsts.splice(retryMemInsts.end(), blockedMemInsts);
1099 // Get the CPU ticking again
1100 cpu->wakeCPU();
1101}
1102
1103template <class Impl>
1104typename Impl::DynInstPtr
1105InstructionQueue<Impl>::getDeferredMemInstToExecute()
1106{
1107 for (ListIt it = deferredMemInsts.begin(); it != deferredMemInsts.end();
1108 ++it) {
1109 if ((*it)->translationCompleted() || (*it)->isSquashed()) {
1110 DynInstPtr mem_inst = *it;
1111 deferredMemInsts.erase(it);
1112 return mem_inst;
1113 }
1114 }
1115 return nullptr;
1116}
1117
1118template <class Impl>
1119typename Impl::DynInstPtr
1120InstructionQueue<Impl>::getBlockedMemInstToExecute()
1121{
1122 if (retryMemInsts.empty()) {
1123 return nullptr;
1124 } else {
1125 DynInstPtr mem_inst = retryMemInsts.front();
1126 retryMemInsts.pop_front();
1127 return mem_inst;
1128 }
1129}
1130
1131template <class Impl>
1132void
1133InstructionQueue<Impl>::violation(DynInstPtr &store,
1134 DynInstPtr &faulting_load)
1135{
1136 intInstQueueWrites++;
1137 memDepUnit[store->threadNumber].violation(store, faulting_load);
1138}
1139
1140template <class Impl>
1141void
1142InstructionQueue<Impl>::squash(ThreadID tid)
1143{
1144 DPRINTF(IQ, "[tid:%i]: Starting to squash instructions in "
1145 "the IQ.\n", tid);
1146
1147 // Read instruction sequence number of last instruction out of the
1148 // time buffer.
1149 squashedSeqNum[tid] = fromCommit->commitInfo[tid].doneSeqNum;
1150
1151 // Call doSquash if there are insts in the IQ
1152 if (count[tid] > 0) {
1153 doSquash(tid);
1154 }
1155
1156 // Also tell the memory dependence unit to squash.
1157 memDepUnit[tid].squash(squashedSeqNum[tid], tid);
1158}
1159
1160template <class Impl>
1161void
1162InstructionQueue<Impl>::doSquash(ThreadID tid)
1163{
1164 // Start at the tail.
1165 ListIt squash_it = instList[tid].end();
1166 --squash_it;
1167
1168 DPRINTF(IQ, "[tid:%i]: Squashing until sequence number %i!\n",
1169 tid, squashedSeqNum[tid]);
1170
1171 // Squash any instructions younger than the squashed sequence number
1172 // given.
1173 while (squash_it != instList[tid].end() &&
1174 (*squash_it)->seqNum > squashedSeqNum[tid]) {
1175
1176 DynInstPtr squashed_inst = (*squash_it);
1177 squashed_inst->isFloating() ? fpInstQueueWrites++ : intInstQueueWrites++;
1178
1179 // Only handle the instruction if it actually is in the IQ and
1180 // hasn't already been squashed in the IQ.
1181 if (squashed_inst->threadNumber != tid ||
1182 squashed_inst->isSquashedInIQ()) {
1183 --squash_it;
1184 continue;
1185 }
1186
1187 if (!squashed_inst->isIssued() ||
1188 (squashed_inst->isMemRef() &&
1189 !squashed_inst->memOpDone())) {
1190
1191 DPRINTF(IQ, "[tid:%i]: Instruction [sn:%lli] PC %s squashed.\n",
1192 tid, squashed_inst->seqNum, squashed_inst->pcState());
1193
1194 bool is_acq_rel = squashed_inst->isMemBarrier() &&
1195 (squashed_inst->isLoad() ||
1196 (squashed_inst->isStore() &&
1197 !squashed_inst->isStoreConditional()));
1198
1199 // Remove the instruction from the dependency list.
1200 if (is_acq_rel ||
1201 (!squashed_inst->isNonSpeculative() &&
1202 !squashed_inst->isStoreConditional() &&
1203 !squashed_inst->isMemBarrier() &&
1204 !squashed_inst->isWriteBarrier())) {
1205
1206 for (int src_reg_idx = 0;
1207 src_reg_idx < squashed_inst->numSrcRegs();
1208 src_reg_idx++)
1209 {
1210 PhysRegIndex src_reg =
1211 squashed_inst->renamedSrcRegIdx(src_reg_idx);
1212
1213 // Only remove it from the dependency graph if it
1214 // was placed there in the first place.
1215
1216 // Instead of doing a linked list traversal, we
1217 // can just remove these squashed instructions
1218 // either at issue time, or when the register is
1219 // overwritten. The only downside to this is it
1220 // leaves more room for error.
1221
1222 if (!squashed_inst->isReadySrcRegIdx(src_reg_idx) &&
1223 src_reg < numPhysRegs) {
1224 dependGraph.remove(src_reg, squashed_inst);
1225 }
1226
1227
1228 ++iqSquashedOperandsExamined;
1229 }
1230 } else if (!squashed_inst->isStoreConditional() ||
1231 !squashed_inst->isCompleted()) {
1232 NonSpecMapIt ns_inst_it =
1233 nonSpecInsts.find(squashed_inst->seqNum);
1234
1235 // we remove non-speculative instructions from
1236 // nonSpecInsts already when they are ready, and so we
1237 // cannot always expect to find them
1238 if (ns_inst_it == nonSpecInsts.end()) {
1239 // loads that became ready but stalled on a
1240 // blocked cache are alreayd removed from
1241 // nonSpecInsts, and have not faulted
1242 assert(squashed_inst->getFault() != NoFault ||
1243 squashed_inst->isMemRef());
1244 } else {
1245
1246 (*ns_inst_it).second = NULL;
1247
1248 nonSpecInsts.erase(ns_inst_it);
1249
1250 ++iqSquashedNonSpecRemoved;
1251 }
1252 }
1253
1254 // Might want to also clear out the head of the dependency graph.
1255
1256 // Mark it as squashed within the IQ.
1257 squashed_inst->setSquashedInIQ();
1258
1259 // @todo: Remove this hack where several statuses are set so the
1260 // inst will flow through the rest of the pipeline.
1261 squashed_inst->setIssued();
1262 squashed_inst->setCanCommit();
1263 squashed_inst->clearInIQ();
1264
1265 //Update Thread IQ Count
1266 count[squashed_inst->threadNumber]--;
1267
1268 ++freeEntries;
1269 }
1270
1271 instList[tid].erase(squash_it--);
1272 ++iqSquashedInstsExamined;
1273 }
1274}
1275
1276template <class Impl>
1277bool
1278InstructionQueue<Impl>::addToDependents(DynInstPtr &new_inst)
1279{
1280 // Loop through the instruction's source registers, adding
1281 // them to the dependency list if they are not ready.
1282 int8_t total_src_regs = new_inst->numSrcRegs();
1283 bool return_val = false;
1284
1285 for (int src_reg_idx = 0;
1286 src_reg_idx < total_src_regs;
1287 src_reg_idx++)
1288 {
1289 // Only add it to the dependency graph if it's not ready.
1290 if (!new_inst->isReadySrcRegIdx(src_reg_idx)) {
1291 PhysRegIndex src_reg = new_inst->renamedSrcRegIdx(src_reg_idx);
1292
1293 // Check the IQ's scoreboard to make sure the register
1294 // hasn't become ready while the instruction was in flight
1295 // between stages. Only if it really isn't ready should
1296 // it be added to the dependency graph.
1297 if (src_reg >= numPhysRegs) {
1298 continue;
1299 } else if (!regScoreboard[src_reg]) {
1300 DPRINTF(IQ, "Instruction PC %s has src reg %i that "
1301 "is being added to the dependency chain.\n",
1302 new_inst->pcState(), src_reg);
1303
1304 dependGraph.insert(src_reg, new_inst);
1305
1306 // Change the return value to indicate that something
1307 // was added to the dependency graph.
1308 return_val = true;
1309 } else {
1310 DPRINTF(IQ, "Instruction PC %s has src reg %i that "
1311 "became ready before it reached the IQ.\n",
1312 new_inst->pcState(), src_reg);
1313 // Mark a register ready within the instruction.
1314 new_inst->markSrcRegReady(src_reg_idx);
1315 }
1316 }
1317 }
1318
1319 return return_val;
1320}
1321
1322template <class Impl>
1323void
1324InstructionQueue<Impl>::addToProducers(DynInstPtr &new_inst)
1325{
1326 // Nothing really needs to be marked when an instruction becomes
1327 // the producer of a register's value, but for convenience a ptr
1328 // to the producing instruction will be placed in the head node of
1329 // the dependency links.
1330 int8_t total_dest_regs = new_inst->numDestRegs();
1331
1332 for (int dest_reg_idx = 0;
1333 dest_reg_idx < total_dest_regs;
1334 dest_reg_idx++)
1335 {
1336 PhysRegIndex dest_reg = new_inst->renamedDestRegIdx(dest_reg_idx);
1337
1338 // Instructions that use the misc regs will have a reg number
1339 // higher than the normal physical registers. In this case these
1340 // registers are not renamed, and there is no need to track
1341 // dependencies as these instructions must be executed at commit.
1342 if (dest_reg >= numPhysRegs) {
1343 continue;
1344 }
1345
1346 if (!dependGraph.empty(dest_reg)) {
1347 dependGraph.dump();
1348 panic("Dependency graph %i not empty!", dest_reg);
1349 }
1350
1351 dependGraph.setInst(dest_reg, new_inst);
1352
1353 // Mark the scoreboard to say it's not yet ready.
1354 regScoreboard[dest_reg] = false;
1355 }
1356}
1357
1358template <class Impl>
1359void
1360InstructionQueue<Impl>::addIfReady(DynInstPtr &inst)
1361{
1362 // If the instruction now has all of its source registers
1363 // available, then add it to the list of ready instructions.
1364 if (inst->readyToIssue()) {
1365
1366 //Add the instruction to the proper ready list.
1367 if (inst->isMemRef()) {
1368
1369 DPRINTF(IQ, "Checking if memory instruction can issue.\n");
1370
1371 // Message to the mem dependence unit that this instruction has
1372 // its registers ready.
1373 memDepUnit[inst->threadNumber].regsReady(inst);
1374
1375 return;
1376 }
1377
1378 OpClass op_class = inst->opClass();
1379
1380 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
1381 "the ready list, PC %s opclass:%i [sn:%lli].\n",
1382 inst->pcState(), op_class, inst->seqNum);
1383
1384 readyInsts[op_class].push(inst);
1385
1386 // Will need to reorder the list if either a queue is not on the list,
1387 // or it has an older instruction than last time.
1388 if (!queueOnList[op_class]) {
1389 addToOrderList(op_class);
1390 } else if (readyInsts[op_class].top()->seqNum <
1391 (*readyIt[op_class]).oldestInst) {
1392 listOrder.erase(readyIt[op_class]);
1393 addToOrderList(op_class);
1394 }
1395 }
1396}
1397
1398template <class Impl>
1399int
1400InstructionQueue<Impl>::countInsts()
1401{
1402#if 0
1403 //ksewell:This works but definitely could use a cleaner write
1404 //with a more intuitive way of counting. Right now it's
1405 //just brute force ....
1406 // Change the #if if you want to use this method.
1407 int total_insts = 0;
1408
1409 for (ThreadID tid = 0; tid < numThreads; ++tid) {
1410 ListIt count_it = instList[tid].begin();
1411
1412 while (count_it != instList[tid].end()) {
1413 if (!(*count_it)->isSquashed() && !(*count_it)->isSquashedInIQ()) {
1414 if (!(*count_it)->isIssued()) {
1415 ++total_insts;
1416 } else if ((*count_it)->isMemRef() &&
1417 !(*count_it)->memOpDone) {
1418 // Loads that have not been marked as executed still count
1419 // towards the total instructions.
1420 ++total_insts;
1421 }
1422 }
1423
1424 ++count_it;
1425 }
1426 }
1427
1428 return total_insts;
1429#else
1430 return numEntries - freeEntries;
1431#endif
1432}
1433
1434template <class Impl>
1435void
1436InstructionQueue<Impl>::dumpLists()
1437{
1438 for (int i = 0; i < Num_OpClasses; ++i) {
1439 cprintf("Ready list %i size: %i\n", i, readyInsts[i].size());
1440
1441 cprintf("\n");
1442 }
1443
1444 cprintf("Non speculative list size: %i\n", nonSpecInsts.size());
1445
1446 NonSpecMapIt non_spec_it = nonSpecInsts.begin();
1447 NonSpecMapIt non_spec_end_it = nonSpecInsts.end();
1448
1449 cprintf("Non speculative list: ");
1450
1451 while (non_spec_it != non_spec_end_it) {
1452 cprintf("%s [sn:%lli]", (*non_spec_it).second->pcState(),
1453 (*non_spec_it).second->seqNum);
1454 ++non_spec_it;
1455 }
1456
1457 cprintf("\n");
1458
1459 ListOrderIt list_order_it = listOrder.begin();
1460 ListOrderIt list_order_end_it = listOrder.end();
1461 int i = 1;
1462
1463 cprintf("List order: ");
1464
1465 while (list_order_it != list_order_end_it) {
1466 cprintf("%i OpClass:%i [sn:%lli] ", i, (*list_order_it).queueType,
1467 (*list_order_it).oldestInst);
1468
1469 ++list_order_it;
1470 ++i;
1471 }
1472
1473 cprintf("\n");
1474}
1475
1476
1477template <class Impl>
1478void
1479InstructionQueue<Impl>::dumpInsts()
1480{
1481 for (ThreadID tid = 0; tid < numThreads; ++tid) {
1482 int num = 0;
1483 int valid_num = 0;
1484 ListIt inst_list_it = instList[tid].begin();
1485
1486 while (inst_list_it != instList[tid].end()) {
1487 cprintf("Instruction:%i\n", num);
1488 if (!(*inst_list_it)->isSquashed()) {
1489 if (!(*inst_list_it)->isIssued()) {
1490 ++valid_num;
1491 cprintf("Count:%i\n", valid_num);
1492 } else if ((*inst_list_it)->isMemRef() &&
1493 !(*inst_list_it)->memOpDone()) {
1494 // Loads that have not been marked as executed
1495 // still count towards the total instructions.
1496 ++valid_num;
1497 cprintf("Count:%i\n", valid_num);
1498 }
1499 }
1500
1501 cprintf("PC: %s\n[sn:%lli]\n[tid:%i]\n"
1502 "Issued:%i\nSquashed:%i\n",
1503 (*inst_list_it)->pcState(),
1504 (*inst_list_it)->seqNum,
1505 (*inst_list_it)->threadNumber,
1506 (*inst_list_it)->isIssued(),
1507 (*inst_list_it)->isSquashed());
1508
1509 if ((*inst_list_it)->isMemRef()) {
1510 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone());
1511 }
1512
1513 cprintf("\n");
1514
1515 inst_list_it++;
1516 ++num;
1517 }
1518 }
1519
1520 cprintf("Insts to Execute list:\n");
1521
1522 int num = 0;
1523 int valid_num = 0;
1524 ListIt inst_list_it = instsToExecute.begin();
1525
1526 while (inst_list_it != instsToExecute.end())
1527 {
1528 cprintf("Instruction:%i\n",
1529 num);
1530 if (!(*inst_list_it)->isSquashed()) {
1531 if (!(*inst_list_it)->isIssued()) {
1532 ++valid_num;
1533 cprintf("Count:%i\n", valid_num);
1534 } else if ((*inst_list_it)->isMemRef() &&
1535 !(*inst_list_it)->memOpDone()) {
1536 // Loads that have not been marked as executed
1537 // still count towards the total instructions.
1538 ++valid_num;
1539 cprintf("Count:%i\n", valid_num);
1540 }
1541 }
1542
1543 cprintf("PC: %s\n[sn:%lli]\n[tid:%i]\n"
1544 "Issued:%i\nSquashed:%i\n",
1545 (*inst_list_it)->pcState(),
1546 (*inst_list_it)->seqNum,
1547 (*inst_list_it)->threadNumber,
1548 (*inst_list_it)->isIssued(),
1549 (*inst_list_it)->isSquashed());
1550
1551 if ((*inst_list_it)->isMemRef()) {
1552 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone());
1553 }
1554
1555 cprintf("\n");
1556
1557 inst_list_it++;
1558 ++num;
1559 }
1560}
1561
1562#endif//__CPU_O3_INST_QUEUE_IMPL_HH__
455void
456InstructionQueue<Impl>::drainSanityCheck() const
457{
458 assert(dependGraph.empty());
459 assert(instsToExecute.empty());
460 for (ThreadID tid = 0; tid < numThreads; ++tid)
461 memDepUnit[tid].drainSanityCheck();
462}
463
464template <class Impl>
465void
466InstructionQueue<Impl>::takeOverFrom()
467{
468 resetState();
469}
470
471template <class Impl>
472int
473InstructionQueue<Impl>::entryAmount(ThreadID num_threads)
474{
475 if (iqPolicy == Partitioned) {
476 return numEntries / num_threads;
477 } else {
478 return 0;
479 }
480}
481
482
483template <class Impl>
484void
485InstructionQueue<Impl>::resetEntries()
486{
487 if (iqPolicy != Dynamic || numThreads > 1) {
488 int active_threads = activeThreads->size();
489
490 list<ThreadID>::iterator threads = activeThreads->begin();
491 list<ThreadID>::iterator end = activeThreads->end();
492
493 while (threads != end) {
494 ThreadID tid = *threads++;
495
496 if (iqPolicy == Partitioned) {
497 maxEntries[tid] = numEntries / active_threads;
498 } else if(iqPolicy == Threshold && active_threads == 1) {
499 maxEntries[tid] = numEntries;
500 }
501 }
502 }
503}
504
505template <class Impl>
506unsigned
507InstructionQueue<Impl>::numFreeEntries()
508{
509 return freeEntries;
510}
511
512template <class Impl>
513unsigned
514InstructionQueue<Impl>::numFreeEntries(ThreadID tid)
515{
516 return maxEntries[tid] - count[tid];
517}
518
519// Might want to do something more complex if it knows how many instructions
520// will be issued this cycle.
521template <class Impl>
522bool
523InstructionQueue<Impl>::isFull()
524{
525 if (freeEntries == 0) {
526 return(true);
527 } else {
528 return(false);
529 }
530}
531
532template <class Impl>
533bool
534InstructionQueue<Impl>::isFull(ThreadID tid)
535{
536 if (numFreeEntries(tid) == 0) {
537 return(true);
538 } else {
539 return(false);
540 }
541}
542
543template <class Impl>
544bool
545InstructionQueue<Impl>::hasReadyInsts()
546{
547 if (!listOrder.empty()) {
548 return true;
549 }
550
551 for (int i = 0; i < Num_OpClasses; ++i) {
552 if (!readyInsts[i].empty()) {
553 return true;
554 }
555 }
556
557 return false;
558}
559
560template <class Impl>
561void
562InstructionQueue<Impl>::insert(DynInstPtr &new_inst)
563{
564 new_inst->isFloating() ? fpInstQueueWrites++ : intInstQueueWrites++;
565 // Make sure the instruction is valid
566 assert(new_inst);
567
568 DPRINTF(IQ, "Adding instruction [sn:%lli] PC %s to the IQ.\n",
569 new_inst->seqNum, new_inst->pcState());
570
571 assert(freeEntries != 0);
572
573 instList[new_inst->threadNumber].push_back(new_inst);
574
575 --freeEntries;
576
577 new_inst->setInIQ();
578
579 // Look through its source registers (physical regs), and mark any
580 // dependencies.
581 addToDependents(new_inst);
582
583 // Have this instruction set itself as the producer of its destination
584 // register(s).
585 addToProducers(new_inst);
586
587 if (new_inst->isMemRef()) {
588 memDepUnit[new_inst->threadNumber].insert(new_inst);
589 } else {
590 addIfReady(new_inst);
591 }
592
593 ++iqInstsAdded;
594
595 count[new_inst->threadNumber]++;
596
597 assert(freeEntries == (numEntries - countInsts()));
598}
599
600template <class Impl>
601void
602InstructionQueue<Impl>::insertNonSpec(DynInstPtr &new_inst)
603{
604 // @todo: Clean up this code; can do it by setting inst as unable
605 // to issue, then calling normal insert on the inst.
606 new_inst->isFloating() ? fpInstQueueWrites++ : intInstQueueWrites++;
607
608 assert(new_inst);
609
610 nonSpecInsts[new_inst->seqNum] = new_inst;
611
612 DPRINTF(IQ, "Adding non-speculative instruction [sn:%lli] PC %s "
613 "to the IQ.\n",
614 new_inst->seqNum, new_inst->pcState());
615
616 assert(freeEntries != 0);
617
618 instList[new_inst->threadNumber].push_back(new_inst);
619
620 --freeEntries;
621
622 new_inst->setInIQ();
623
624 // Have this instruction set itself as the producer of its destination
625 // register(s).
626 addToProducers(new_inst);
627
628 // If it's a memory instruction, add it to the memory dependency
629 // unit.
630 if (new_inst->isMemRef()) {
631 memDepUnit[new_inst->threadNumber].insertNonSpec(new_inst);
632 }
633
634 ++iqNonSpecInstsAdded;
635
636 count[new_inst->threadNumber]++;
637
638 assert(freeEntries == (numEntries - countInsts()));
639}
640
641template <class Impl>
642void
643InstructionQueue<Impl>::insertBarrier(DynInstPtr &barr_inst)
644{
645 memDepUnit[barr_inst->threadNumber].insertBarrier(barr_inst);
646
647 insertNonSpec(barr_inst);
648}
649
650template <class Impl>
651typename Impl::DynInstPtr
652InstructionQueue<Impl>::getInstToExecute()
653{
654 assert(!instsToExecute.empty());
655 DynInstPtr inst = instsToExecute.front();
656 instsToExecute.pop_front();
657 if (inst->isFloating()){
658 fpInstQueueReads++;
659 } else {
660 intInstQueueReads++;
661 }
662 return inst;
663}
664
665template <class Impl>
666void
667InstructionQueue<Impl>::addToOrderList(OpClass op_class)
668{
669 assert(!readyInsts[op_class].empty());
670
671 ListOrderEntry queue_entry;
672
673 queue_entry.queueType = op_class;
674
675 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
676
677 ListOrderIt list_it = listOrder.begin();
678 ListOrderIt list_end_it = listOrder.end();
679
680 while (list_it != list_end_it) {
681 if ((*list_it).oldestInst > queue_entry.oldestInst) {
682 break;
683 }
684
685 list_it++;
686 }
687
688 readyIt[op_class] = listOrder.insert(list_it, queue_entry);
689 queueOnList[op_class] = true;
690}
691
692template <class Impl>
693void
694InstructionQueue<Impl>::moveToYoungerInst(ListOrderIt list_order_it)
695{
696 // Get iterator of next item on the list
697 // Delete the original iterator
698 // Determine if the next item is either the end of the list or younger
699 // than the new instruction. If so, then add in a new iterator right here.
700 // If not, then move along.
701 ListOrderEntry queue_entry;
702 OpClass op_class = (*list_order_it).queueType;
703 ListOrderIt next_it = list_order_it;
704
705 ++next_it;
706
707 queue_entry.queueType = op_class;
708 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
709
710 while (next_it != listOrder.end() &&
711 (*next_it).oldestInst < queue_entry.oldestInst) {
712 ++next_it;
713 }
714
715 readyIt[op_class] = listOrder.insert(next_it, queue_entry);
716}
717
718template <class Impl>
719void
720InstructionQueue<Impl>::processFUCompletion(DynInstPtr &inst, int fu_idx)
721{
722 DPRINTF(IQ, "Processing FU completion [sn:%lli]\n", inst->seqNum);
723 assert(!cpu->switchedOut());
724 // The CPU could have been sleeping until this op completed (*extremely*
725 // long latency op). Wake it if it was. This may be overkill.
726 iewStage->wakeCPU();
727
728 if (fu_idx > -1)
729 fuPool->freeUnitNextCycle(fu_idx);
730
731 // @todo: Ensure that these FU Completions happen at the beginning
732 // of a cycle, otherwise they could add too many instructions to
733 // the queue.
734 issueToExecuteQueue->access(-1)->size++;
735 instsToExecute.push_back(inst);
736}
737
738// @todo: Figure out a better way to remove the squashed items from the
739// lists. Checking the top item of each list to see if it's squashed
740// wastes time and forces jumps.
741template <class Impl>
742void
743InstructionQueue<Impl>::scheduleReadyInsts()
744{
745 DPRINTF(IQ, "Attempting to schedule ready instructions from "
746 "the IQ.\n");
747
748 IssueStruct *i2e_info = issueToExecuteQueue->access(0);
749
750 DynInstPtr mem_inst;
751 while (mem_inst = getDeferredMemInstToExecute()) {
752 addReadyMemInst(mem_inst);
753 }
754
755 // See if any cache blocked instructions are able to be executed
756 while (mem_inst = getBlockedMemInstToExecute()) {
757 addReadyMemInst(mem_inst);
758 }
759
760 // Have iterator to head of the list
761 // While I haven't exceeded bandwidth or reached the end of the list,
762 // Try to get a FU that can do what this op needs.
763 // If successful, change the oldestInst to the new top of the list, put
764 // the queue in the proper place in the list.
765 // Increment the iterator.
766 // This will avoid trying to schedule a certain op class if there are no
767 // FUs that handle it.
768 int total_issued = 0;
769 ListOrderIt order_it = listOrder.begin();
770 ListOrderIt order_end_it = listOrder.end();
771
772 while (total_issued < totalWidth && order_it != order_end_it) {
773 OpClass op_class = (*order_it).queueType;
774
775 assert(!readyInsts[op_class].empty());
776
777 DynInstPtr issuing_inst = readyInsts[op_class].top();
778
779 issuing_inst->isFloating() ? fpInstQueueReads++ : intInstQueueReads++;
780
781 assert(issuing_inst->seqNum == (*order_it).oldestInst);
782
783 if (issuing_inst->isSquashed()) {
784 readyInsts[op_class].pop();
785
786 if (!readyInsts[op_class].empty()) {
787 moveToYoungerInst(order_it);
788 } else {
789 readyIt[op_class] = listOrder.end();
790 queueOnList[op_class] = false;
791 }
792
793 listOrder.erase(order_it++);
794
795 ++iqSquashedInstsIssued;
796
797 continue;
798 }
799
800 int idx = -2;
801 Cycles op_latency = Cycles(1);
802 ThreadID tid = issuing_inst->threadNumber;
803
804 if (op_class != No_OpClass) {
805 idx = fuPool->getUnit(op_class);
806 issuing_inst->isFloating() ? fpAluAccesses++ : intAluAccesses++;
807 if (idx > -1) {
808 op_latency = fuPool->getOpLatency(op_class);
809 }
810 }
811
812 // If we have an instruction that doesn't require a FU, or a
813 // valid FU, then schedule for execution.
814 if (idx == -2 || idx != -1) {
815 if (op_latency == Cycles(1)) {
816 i2e_info->size++;
817 instsToExecute.push_back(issuing_inst);
818
819 // Add the FU onto the list of FU's to be freed next
820 // cycle if we used one.
821 if (idx >= 0)
822 fuPool->freeUnitNextCycle(idx);
823 } else {
824 Cycles issue_latency = fuPool->getIssueLatency(op_class);
825 // Generate completion event for the FU
826 FUCompletion *execution = new FUCompletion(issuing_inst,
827 idx, this);
828
829 cpu->schedule(execution,
830 cpu->clockEdge(Cycles(op_latency - 1)));
831
832 // @todo: Enforce that issue_latency == 1 or op_latency
833 if (issue_latency > Cycles(1)) {
834 // If FU isn't pipelined, then it must be freed
835 // upon the execution completing.
836 execution->setFreeFU();
837 } else {
838 // Add the FU onto the list of FU's to be freed next cycle.
839 fuPool->freeUnitNextCycle(idx);
840 }
841 }
842
843 DPRINTF(IQ, "Thread %i: Issuing instruction PC %s "
844 "[sn:%lli]\n",
845 tid, issuing_inst->pcState(),
846 issuing_inst->seqNum);
847
848 readyInsts[op_class].pop();
849
850 if (!readyInsts[op_class].empty()) {
851 moveToYoungerInst(order_it);
852 } else {
853 readyIt[op_class] = listOrder.end();
854 queueOnList[op_class] = false;
855 }
856
857 issuing_inst->setIssued();
858 ++total_issued;
859
860#if TRACING_ON
861 issuing_inst->issueTick = curTick() - issuing_inst->fetchTick;
862#endif
863
864 if (!issuing_inst->isMemRef()) {
865 // Memory instructions can not be freed from the IQ until they
866 // complete.
867 ++freeEntries;
868 count[tid]--;
869 issuing_inst->clearInIQ();
870 } else {
871 memDepUnit[tid].issue(issuing_inst);
872 }
873
874 listOrder.erase(order_it++);
875 statIssuedInstType[tid][op_class]++;
876 } else {
877 statFuBusy[op_class]++;
878 fuBusy[tid]++;
879 ++order_it;
880 }
881 }
882
883 numIssuedDist.sample(total_issued);
884 iqInstsIssued+= total_issued;
885
886 // If we issued any instructions, tell the CPU we had activity.
887 // @todo If the way deferred memory instructions are handeled due to
888 // translation changes then the deferredMemInsts condition should be removed
889 // from the code below.
890 if (total_issued || !retryMemInsts.empty() || !deferredMemInsts.empty()) {
891 cpu->activityThisCycle();
892 } else {
893 DPRINTF(IQ, "Not able to schedule any instructions.\n");
894 }
895}
896
897template <class Impl>
898void
899InstructionQueue<Impl>::scheduleNonSpec(const InstSeqNum &inst)
900{
901 DPRINTF(IQ, "Marking nonspeculative instruction [sn:%lli] as ready "
902 "to execute.\n", inst);
903
904 NonSpecMapIt inst_it = nonSpecInsts.find(inst);
905
906 assert(inst_it != nonSpecInsts.end());
907
908 ThreadID tid = (*inst_it).second->threadNumber;
909
910 (*inst_it).second->setAtCommit();
911
912 (*inst_it).second->setCanIssue();
913
914 if (!(*inst_it).second->isMemRef()) {
915 addIfReady((*inst_it).second);
916 } else {
917 memDepUnit[tid].nonSpecInstReady((*inst_it).second);
918 }
919
920 (*inst_it).second = NULL;
921
922 nonSpecInsts.erase(inst_it);
923}
924
925template <class Impl>
926void
927InstructionQueue<Impl>::commit(const InstSeqNum &inst, ThreadID tid)
928{
929 DPRINTF(IQ, "[tid:%i]: Committing instructions older than [sn:%i]\n",
930 tid,inst);
931
932 ListIt iq_it = instList[tid].begin();
933
934 while (iq_it != instList[tid].end() &&
935 (*iq_it)->seqNum <= inst) {
936 ++iq_it;
937 instList[tid].pop_front();
938 }
939
940 assert(freeEntries == (numEntries - countInsts()));
941}
942
943template <class Impl>
944int
945InstructionQueue<Impl>::wakeDependents(DynInstPtr &completed_inst)
946{
947 int dependents = 0;
948
949 // The instruction queue here takes care of both floating and int ops
950 if (completed_inst->isFloating()) {
951 fpInstQueueWakeupQccesses++;
952 } else {
953 intInstQueueWakeupAccesses++;
954 }
955
956 DPRINTF(IQ, "Waking dependents of completed instruction.\n");
957
958 assert(!completed_inst->isSquashed());
959
960 // Tell the memory dependence unit to wake any dependents on this
961 // instruction if it is a memory instruction. Also complete the memory
962 // instruction at this point since we know it executed without issues.
963 // @todo: Might want to rename "completeMemInst" to something that
964 // indicates that it won't need to be replayed, and call this
965 // earlier. Might not be a big deal.
966 if (completed_inst->isMemRef()) {
967 memDepUnit[completed_inst->threadNumber].wakeDependents(completed_inst);
968 completeMemInst(completed_inst);
969 } else if (completed_inst->isMemBarrier() ||
970 completed_inst->isWriteBarrier()) {
971 memDepUnit[completed_inst->threadNumber].completeBarrier(completed_inst);
972 }
973
974 for (int dest_reg_idx = 0;
975 dest_reg_idx < completed_inst->numDestRegs();
976 dest_reg_idx++)
977 {
978 PhysRegIndex dest_reg =
979 completed_inst->renamedDestRegIdx(dest_reg_idx);
980
981 // Special case of uniq or control registers. They are not
982 // handled by the IQ and thus have no dependency graph entry.
983 // @todo Figure out a cleaner way to handle this.
984 if (dest_reg >= numPhysRegs) {
985 DPRINTF(IQ, "dest_reg :%d, numPhysRegs: %d\n", dest_reg,
986 numPhysRegs);
987 continue;
988 }
989
990 DPRINTF(IQ, "Waking any dependents on register %i.\n",
991 (int) dest_reg);
992
993 //Go through the dependency chain, marking the registers as
994 //ready within the waiting instructions.
995 DynInstPtr dep_inst = dependGraph.pop(dest_reg);
996
997 while (dep_inst) {
998 DPRINTF(IQ, "Waking up a dependent instruction, [sn:%lli] "
999 "PC %s.\n", dep_inst->seqNum, dep_inst->pcState());
1000
1001 // Might want to give more information to the instruction
1002 // so that it knows which of its source registers is
1003 // ready. However that would mean that the dependency
1004 // graph entries would need to hold the src_reg_idx.
1005 dep_inst->markSrcRegReady();
1006
1007 addIfReady(dep_inst);
1008
1009 dep_inst = dependGraph.pop(dest_reg);
1010
1011 ++dependents;
1012 }
1013
1014 // Reset the head node now that all of its dependents have
1015 // been woken up.
1016 assert(dependGraph.empty(dest_reg));
1017 dependGraph.clearInst(dest_reg);
1018
1019 // Mark the scoreboard as having that register ready.
1020 regScoreboard[dest_reg] = true;
1021 }
1022 return dependents;
1023}
1024
1025template <class Impl>
1026void
1027InstructionQueue<Impl>::addReadyMemInst(DynInstPtr &ready_inst)
1028{
1029 OpClass op_class = ready_inst->opClass();
1030
1031 readyInsts[op_class].push(ready_inst);
1032
1033 // Will need to reorder the list if either a queue is not on the list,
1034 // or it has an older instruction than last time.
1035 if (!queueOnList[op_class]) {
1036 addToOrderList(op_class);
1037 } else if (readyInsts[op_class].top()->seqNum <
1038 (*readyIt[op_class]).oldestInst) {
1039 listOrder.erase(readyIt[op_class]);
1040 addToOrderList(op_class);
1041 }
1042
1043 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
1044 "the ready list, PC %s opclass:%i [sn:%lli].\n",
1045 ready_inst->pcState(), op_class, ready_inst->seqNum);
1046}
1047
1048template <class Impl>
1049void
1050InstructionQueue<Impl>::rescheduleMemInst(DynInstPtr &resched_inst)
1051{
1052 DPRINTF(IQ, "Rescheduling mem inst [sn:%lli]\n", resched_inst->seqNum);
1053
1054 // Reset DTB translation state
1055 resched_inst->translationStarted(false);
1056 resched_inst->translationCompleted(false);
1057
1058 resched_inst->clearCanIssue();
1059 memDepUnit[resched_inst->threadNumber].reschedule(resched_inst);
1060}
1061
1062template <class Impl>
1063void
1064InstructionQueue<Impl>::replayMemInst(DynInstPtr &replay_inst)
1065{
1066 memDepUnit[replay_inst->threadNumber].replay();
1067}
1068
1069template <class Impl>
1070void
1071InstructionQueue<Impl>::completeMemInst(DynInstPtr &completed_inst)
1072{
1073 ThreadID tid = completed_inst->threadNumber;
1074
1075 DPRINTF(IQ, "Completing mem instruction PC: %s [sn:%lli]\n",
1076 completed_inst->pcState(), completed_inst->seqNum);
1077
1078 ++freeEntries;
1079
1080 completed_inst->memOpDone(true);
1081
1082 memDepUnit[tid].completed(completed_inst);
1083 count[tid]--;
1084}
1085
1086template <class Impl>
1087void
1088InstructionQueue<Impl>::deferMemInst(DynInstPtr &deferred_inst)
1089{
1090 deferredMemInsts.push_back(deferred_inst);
1091}
1092
1093template <class Impl>
1094void
1095InstructionQueue<Impl>::blockMemInst(DynInstPtr &blocked_inst)
1096{
1097 blocked_inst->translationStarted(false);
1098 blocked_inst->translationCompleted(false);
1099
1100 blocked_inst->clearIssued();
1101 blocked_inst->clearCanIssue();
1102 blockedMemInsts.push_back(blocked_inst);
1103}
1104
1105template <class Impl>
1106void
1107InstructionQueue<Impl>::cacheUnblocked()
1108{
1109 retryMemInsts.splice(retryMemInsts.end(), blockedMemInsts);
1110 // Get the CPU ticking again
1111 cpu->wakeCPU();
1112}
1113
1114template <class Impl>
1115typename Impl::DynInstPtr
1116InstructionQueue<Impl>::getDeferredMemInstToExecute()
1117{
1118 for (ListIt it = deferredMemInsts.begin(); it != deferredMemInsts.end();
1119 ++it) {
1120 if ((*it)->translationCompleted() || (*it)->isSquashed()) {
1121 DynInstPtr mem_inst = *it;
1122 deferredMemInsts.erase(it);
1123 return mem_inst;
1124 }
1125 }
1126 return nullptr;
1127}
1128
1129template <class Impl>
1130typename Impl::DynInstPtr
1131InstructionQueue<Impl>::getBlockedMemInstToExecute()
1132{
1133 if (retryMemInsts.empty()) {
1134 return nullptr;
1135 } else {
1136 DynInstPtr mem_inst = retryMemInsts.front();
1137 retryMemInsts.pop_front();
1138 return mem_inst;
1139 }
1140}
1141
1142template <class Impl>
1143void
1144InstructionQueue<Impl>::violation(DynInstPtr &store,
1145 DynInstPtr &faulting_load)
1146{
1147 intInstQueueWrites++;
1148 memDepUnit[store->threadNumber].violation(store, faulting_load);
1149}
1150
1151template <class Impl>
1152void
1153InstructionQueue<Impl>::squash(ThreadID tid)
1154{
1155 DPRINTF(IQ, "[tid:%i]: Starting to squash instructions in "
1156 "the IQ.\n", tid);
1157
1158 // Read instruction sequence number of last instruction out of the
1159 // time buffer.
1160 squashedSeqNum[tid] = fromCommit->commitInfo[tid].doneSeqNum;
1161
1162 // Call doSquash if there are insts in the IQ
1163 if (count[tid] > 0) {
1164 doSquash(tid);
1165 }
1166
1167 // Also tell the memory dependence unit to squash.
1168 memDepUnit[tid].squash(squashedSeqNum[tid], tid);
1169}
1170
1171template <class Impl>
1172void
1173InstructionQueue<Impl>::doSquash(ThreadID tid)
1174{
1175 // Start at the tail.
1176 ListIt squash_it = instList[tid].end();
1177 --squash_it;
1178
1179 DPRINTF(IQ, "[tid:%i]: Squashing until sequence number %i!\n",
1180 tid, squashedSeqNum[tid]);
1181
1182 // Squash any instructions younger than the squashed sequence number
1183 // given.
1184 while (squash_it != instList[tid].end() &&
1185 (*squash_it)->seqNum > squashedSeqNum[tid]) {
1186
1187 DynInstPtr squashed_inst = (*squash_it);
1188 squashed_inst->isFloating() ? fpInstQueueWrites++ : intInstQueueWrites++;
1189
1190 // Only handle the instruction if it actually is in the IQ and
1191 // hasn't already been squashed in the IQ.
1192 if (squashed_inst->threadNumber != tid ||
1193 squashed_inst->isSquashedInIQ()) {
1194 --squash_it;
1195 continue;
1196 }
1197
1198 if (!squashed_inst->isIssued() ||
1199 (squashed_inst->isMemRef() &&
1200 !squashed_inst->memOpDone())) {
1201
1202 DPRINTF(IQ, "[tid:%i]: Instruction [sn:%lli] PC %s squashed.\n",
1203 tid, squashed_inst->seqNum, squashed_inst->pcState());
1204
1205 bool is_acq_rel = squashed_inst->isMemBarrier() &&
1206 (squashed_inst->isLoad() ||
1207 (squashed_inst->isStore() &&
1208 !squashed_inst->isStoreConditional()));
1209
1210 // Remove the instruction from the dependency list.
1211 if (is_acq_rel ||
1212 (!squashed_inst->isNonSpeculative() &&
1213 !squashed_inst->isStoreConditional() &&
1214 !squashed_inst->isMemBarrier() &&
1215 !squashed_inst->isWriteBarrier())) {
1216
1217 for (int src_reg_idx = 0;
1218 src_reg_idx < squashed_inst->numSrcRegs();
1219 src_reg_idx++)
1220 {
1221 PhysRegIndex src_reg =
1222 squashed_inst->renamedSrcRegIdx(src_reg_idx);
1223
1224 // Only remove it from the dependency graph if it
1225 // was placed there in the first place.
1226
1227 // Instead of doing a linked list traversal, we
1228 // can just remove these squashed instructions
1229 // either at issue time, or when the register is
1230 // overwritten. The only downside to this is it
1231 // leaves more room for error.
1232
1233 if (!squashed_inst->isReadySrcRegIdx(src_reg_idx) &&
1234 src_reg < numPhysRegs) {
1235 dependGraph.remove(src_reg, squashed_inst);
1236 }
1237
1238
1239 ++iqSquashedOperandsExamined;
1240 }
1241 } else if (!squashed_inst->isStoreConditional() ||
1242 !squashed_inst->isCompleted()) {
1243 NonSpecMapIt ns_inst_it =
1244 nonSpecInsts.find(squashed_inst->seqNum);
1245
1246 // we remove non-speculative instructions from
1247 // nonSpecInsts already when they are ready, and so we
1248 // cannot always expect to find them
1249 if (ns_inst_it == nonSpecInsts.end()) {
1250 // loads that became ready but stalled on a
1251 // blocked cache are alreayd removed from
1252 // nonSpecInsts, and have not faulted
1253 assert(squashed_inst->getFault() != NoFault ||
1254 squashed_inst->isMemRef());
1255 } else {
1256
1257 (*ns_inst_it).second = NULL;
1258
1259 nonSpecInsts.erase(ns_inst_it);
1260
1261 ++iqSquashedNonSpecRemoved;
1262 }
1263 }
1264
1265 // Might want to also clear out the head of the dependency graph.
1266
1267 // Mark it as squashed within the IQ.
1268 squashed_inst->setSquashedInIQ();
1269
1270 // @todo: Remove this hack where several statuses are set so the
1271 // inst will flow through the rest of the pipeline.
1272 squashed_inst->setIssued();
1273 squashed_inst->setCanCommit();
1274 squashed_inst->clearInIQ();
1275
1276 //Update Thread IQ Count
1277 count[squashed_inst->threadNumber]--;
1278
1279 ++freeEntries;
1280 }
1281
1282 instList[tid].erase(squash_it--);
1283 ++iqSquashedInstsExamined;
1284 }
1285}
1286
1287template <class Impl>
1288bool
1289InstructionQueue<Impl>::addToDependents(DynInstPtr &new_inst)
1290{
1291 // Loop through the instruction's source registers, adding
1292 // them to the dependency list if they are not ready.
1293 int8_t total_src_regs = new_inst->numSrcRegs();
1294 bool return_val = false;
1295
1296 for (int src_reg_idx = 0;
1297 src_reg_idx < total_src_regs;
1298 src_reg_idx++)
1299 {
1300 // Only add it to the dependency graph if it's not ready.
1301 if (!new_inst->isReadySrcRegIdx(src_reg_idx)) {
1302 PhysRegIndex src_reg = new_inst->renamedSrcRegIdx(src_reg_idx);
1303
1304 // Check the IQ's scoreboard to make sure the register
1305 // hasn't become ready while the instruction was in flight
1306 // between stages. Only if it really isn't ready should
1307 // it be added to the dependency graph.
1308 if (src_reg >= numPhysRegs) {
1309 continue;
1310 } else if (!regScoreboard[src_reg]) {
1311 DPRINTF(IQ, "Instruction PC %s has src reg %i that "
1312 "is being added to the dependency chain.\n",
1313 new_inst->pcState(), src_reg);
1314
1315 dependGraph.insert(src_reg, new_inst);
1316
1317 // Change the return value to indicate that something
1318 // was added to the dependency graph.
1319 return_val = true;
1320 } else {
1321 DPRINTF(IQ, "Instruction PC %s has src reg %i that "
1322 "became ready before it reached the IQ.\n",
1323 new_inst->pcState(), src_reg);
1324 // Mark a register ready within the instruction.
1325 new_inst->markSrcRegReady(src_reg_idx);
1326 }
1327 }
1328 }
1329
1330 return return_val;
1331}
1332
1333template <class Impl>
1334void
1335InstructionQueue<Impl>::addToProducers(DynInstPtr &new_inst)
1336{
1337 // Nothing really needs to be marked when an instruction becomes
1338 // the producer of a register's value, but for convenience a ptr
1339 // to the producing instruction will be placed in the head node of
1340 // the dependency links.
1341 int8_t total_dest_regs = new_inst->numDestRegs();
1342
1343 for (int dest_reg_idx = 0;
1344 dest_reg_idx < total_dest_regs;
1345 dest_reg_idx++)
1346 {
1347 PhysRegIndex dest_reg = new_inst->renamedDestRegIdx(dest_reg_idx);
1348
1349 // Instructions that use the misc regs will have a reg number
1350 // higher than the normal physical registers. In this case these
1351 // registers are not renamed, and there is no need to track
1352 // dependencies as these instructions must be executed at commit.
1353 if (dest_reg >= numPhysRegs) {
1354 continue;
1355 }
1356
1357 if (!dependGraph.empty(dest_reg)) {
1358 dependGraph.dump();
1359 panic("Dependency graph %i not empty!", dest_reg);
1360 }
1361
1362 dependGraph.setInst(dest_reg, new_inst);
1363
1364 // Mark the scoreboard to say it's not yet ready.
1365 regScoreboard[dest_reg] = false;
1366 }
1367}
1368
1369template <class Impl>
1370void
1371InstructionQueue<Impl>::addIfReady(DynInstPtr &inst)
1372{
1373 // If the instruction now has all of its source registers
1374 // available, then add it to the list of ready instructions.
1375 if (inst->readyToIssue()) {
1376
1377 //Add the instruction to the proper ready list.
1378 if (inst->isMemRef()) {
1379
1380 DPRINTF(IQ, "Checking if memory instruction can issue.\n");
1381
1382 // Message to the mem dependence unit that this instruction has
1383 // its registers ready.
1384 memDepUnit[inst->threadNumber].regsReady(inst);
1385
1386 return;
1387 }
1388
1389 OpClass op_class = inst->opClass();
1390
1391 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
1392 "the ready list, PC %s opclass:%i [sn:%lli].\n",
1393 inst->pcState(), op_class, inst->seqNum);
1394
1395 readyInsts[op_class].push(inst);
1396
1397 // Will need to reorder the list if either a queue is not on the list,
1398 // or it has an older instruction than last time.
1399 if (!queueOnList[op_class]) {
1400 addToOrderList(op_class);
1401 } else if (readyInsts[op_class].top()->seqNum <
1402 (*readyIt[op_class]).oldestInst) {
1403 listOrder.erase(readyIt[op_class]);
1404 addToOrderList(op_class);
1405 }
1406 }
1407}
1408
1409template <class Impl>
1410int
1411InstructionQueue<Impl>::countInsts()
1412{
1413#if 0
1414 //ksewell:This works but definitely could use a cleaner write
1415 //with a more intuitive way of counting. Right now it's
1416 //just brute force ....
1417 // Change the #if if you want to use this method.
1418 int total_insts = 0;
1419
1420 for (ThreadID tid = 0; tid < numThreads; ++tid) {
1421 ListIt count_it = instList[tid].begin();
1422
1423 while (count_it != instList[tid].end()) {
1424 if (!(*count_it)->isSquashed() && !(*count_it)->isSquashedInIQ()) {
1425 if (!(*count_it)->isIssued()) {
1426 ++total_insts;
1427 } else if ((*count_it)->isMemRef() &&
1428 !(*count_it)->memOpDone) {
1429 // Loads that have not been marked as executed still count
1430 // towards the total instructions.
1431 ++total_insts;
1432 }
1433 }
1434
1435 ++count_it;
1436 }
1437 }
1438
1439 return total_insts;
1440#else
1441 return numEntries - freeEntries;
1442#endif
1443}
1444
1445template <class Impl>
1446void
1447InstructionQueue<Impl>::dumpLists()
1448{
1449 for (int i = 0; i < Num_OpClasses; ++i) {
1450 cprintf("Ready list %i size: %i\n", i, readyInsts[i].size());
1451
1452 cprintf("\n");
1453 }
1454
1455 cprintf("Non speculative list size: %i\n", nonSpecInsts.size());
1456
1457 NonSpecMapIt non_spec_it = nonSpecInsts.begin();
1458 NonSpecMapIt non_spec_end_it = nonSpecInsts.end();
1459
1460 cprintf("Non speculative list: ");
1461
1462 while (non_spec_it != non_spec_end_it) {
1463 cprintf("%s [sn:%lli]", (*non_spec_it).second->pcState(),
1464 (*non_spec_it).second->seqNum);
1465 ++non_spec_it;
1466 }
1467
1468 cprintf("\n");
1469
1470 ListOrderIt list_order_it = listOrder.begin();
1471 ListOrderIt list_order_end_it = listOrder.end();
1472 int i = 1;
1473
1474 cprintf("List order: ");
1475
1476 while (list_order_it != list_order_end_it) {
1477 cprintf("%i OpClass:%i [sn:%lli] ", i, (*list_order_it).queueType,
1478 (*list_order_it).oldestInst);
1479
1480 ++list_order_it;
1481 ++i;
1482 }
1483
1484 cprintf("\n");
1485}
1486
1487
1488template <class Impl>
1489void
1490InstructionQueue<Impl>::dumpInsts()
1491{
1492 for (ThreadID tid = 0; tid < numThreads; ++tid) {
1493 int num = 0;
1494 int valid_num = 0;
1495 ListIt inst_list_it = instList[tid].begin();
1496
1497 while (inst_list_it != instList[tid].end()) {
1498 cprintf("Instruction:%i\n", num);
1499 if (!(*inst_list_it)->isSquashed()) {
1500 if (!(*inst_list_it)->isIssued()) {
1501 ++valid_num;
1502 cprintf("Count:%i\n", valid_num);
1503 } else if ((*inst_list_it)->isMemRef() &&
1504 !(*inst_list_it)->memOpDone()) {
1505 // Loads that have not been marked as executed
1506 // still count towards the total instructions.
1507 ++valid_num;
1508 cprintf("Count:%i\n", valid_num);
1509 }
1510 }
1511
1512 cprintf("PC: %s\n[sn:%lli]\n[tid:%i]\n"
1513 "Issued:%i\nSquashed:%i\n",
1514 (*inst_list_it)->pcState(),
1515 (*inst_list_it)->seqNum,
1516 (*inst_list_it)->threadNumber,
1517 (*inst_list_it)->isIssued(),
1518 (*inst_list_it)->isSquashed());
1519
1520 if ((*inst_list_it)->isMemRef()) {
1521 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone());
1522 }
1523
1524 cprintf("\n");
1525
1526 inst_list_it++;
1527 ++num;
1528 }
1529 }
1530
1531 cprintf("Insts to Execute list:\n");
1532
1533 int num = 0;
1534 int valid_num = 0;
1535 ListIt inst_list_it = instsToExecute.begin();
1536
1537 while (inst_list_it != instsToExecute.end())
1538 {
1539 cprintf("Instruction:%i\n",
1540 num);
1541 if (!(*inst_list_it)->isSquashed()) {
1542 if (!(*inst_list_it)->isIssued()) {
1543 ++valid_num;
1544 cprintf("Count:%i\n", valid_num);
1545 } else if ((*inst_list_it)->isMemRef() &&
1546 !(*inst_list_it)->memOpDone()) {
1547 // Loads that have not been marked as executed
1548 // still count towards the total instructions.
1549 ++valid_num;
1550 cprintf("Count:%i\n", valid_num);
1551 }
1552 }
1553
1554 cprintf("PC: %s\n[sn:%lli]\n[tid:%i]\n"
1555 "Issued:%i\nSquashed:%i\n",
1556 (*inst_list_it)->pcState(),
1557 (*inst_list_it)->seqNum,
1558 (*inst_list_it)->threadNumber,
1559 (*inst_list_it)->isIssued(),
1560 (*inst_list_it)->isSquashed());
1561
1562 if ((*inst_list_it)->isMemRef()) {
1563 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone());
1564 }
1565
1566 cprintf("\n");
1567
1568 inst_list_it++;
1569 ++num;
1570 }
1571}
1572
1573#endif//__CPU_O3_INST_QUEUE_IMPL_HH__