inst_queue_impl.hh (2698:d5f35d41e017) inst_queue_impl.hh (2727:91e17c7ee622)
1/*
2 * Copyright (c) 2004-2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Kevin Lim
29 */
30
31#include <limits>
32#include <vector>
33
34#include "sim/root.hh"
35
36#include "cpu/o3/fu_pool.hh"
37#include "cpu/o3/inst_queue.hh"
38
39using namespace std;
40
41template <class Impl>
42InstructionQueue<Impl>::FUCompletion::FUCompletion(DynInstPtr &_inst,
43 int fu_idx,
44 InstructionQueue<Impl> *iq_ptr)
45 : Event(&mainEventQueue, Stat_Event_Pri),
46 inst(_inst), fuIdx(fu_idx), iqPtr(iq_ptr), freeFU(false)
47{
48 this->setFlags(Event::AutoDelete);
49}
50
51template <class Impl>
52void
53InstructionQueue<Impl>::FUCompletion::process()
54{
55 iqPtr->processFUCompletion(inst, freeFU ? fuIdx : -1);
56 inst = NULL;
57}
58
59
60template <class Impl>
61const char *
62InstructionQueue<Impl>::FUCompletion::description()
63{
64 return "Functional unit completion event";
65}
66
67template <class Impl>
68InstructionQueue<Impl>::InstructionQueue(Params *params)
69 : fuPool(params->fuPool),
70 numEntries(params->numIQEntries),
71 totalWidth(params->issueWidth),
72 numPhysIntRegs(params->numPhysIntRegs),
73 numPhysFloatRegs(params->numPhysFloatRegs),
74 commitToIEWDelay(params->commitToIEWDelay)
75{
76 assert(fuPool);
77
78 switchedOut = false;
79
80 numThreads = params->numberOfThreads;
81
82 // Set the number of physical registers as the number of int + float
83 numPhysRegs = numPhysIntRegs + numPhysFloatRegs;
84
85 DPRINTF(IQ, "There are %i physical registers.\n", numPhysRegs);
86
87 //Create an entry for each physical register within the
88 //dependency graph.
89 dependGraph.resize(numPhysRegs);
90
91 // Resize the register scoreboard.
92 regScoreboard.resize(numPhysRegs);
93
94 //Initialize Mem Dependence Units
95 for (int i = 0; i < numThreads; i++) {
96 memDepUnit[i].init(params,i);
97 memDepUnit[i].setIQ(this);
98 }
99
100 resetState();
101
102 string policy = params->smtIQPolicy;
103
104 //Convert string to lowercase
105 std::transform(policy.begin(), policy.end(), policy.begin(),
106 (int(*)(int)) tolower);
107
108 //Figure out resource sharing policy
109 if (policy == "dynamic") {
110 iqPolicy = Dynamic;
111
112 //Set Max Entries to Total ROB Capacity
113 for (int i = 0; i < numThreads; i++) {
114 maxEntries[i] = numEntries;
115 }
116
117 } else if (policy == "partitioned") {
118 iqPolicy = Partitioned;
119
120 //@todo:make work if part_amt doesnt divide evenly.
121 int part_amt = numEntries / numThreads;
122
123 //Divide ROB up evenly
124 for (int i = 0; i < numThreads; i++) {
125 maxEntries[i] = part_amt;
126 }
127
128 DPRINTF(Fetch, "IQ sharing policy set to Partitioned:"
129 "%i entries per thread.\n",part_amt);
130
131 } else if (policy == "threshold") {
132 iqPolicy = Threshold;
133
134 double threshold = (double)params->smtIQThreshold / 100;
135
136 int thresholdIQ = (int)((double)threshold * numEntries);
137
138 //Divide up by threshold amount
139 for (int i = 0; i < numThreads; i++) {
140 maxEntries[i] = thresholdIQ;
141 }
142
143 DPRINTF(Fetch, "IQ sharing policy set to Threshold:"
144 "%i entries per thread.\n",thresholdIQ);
145 } else {
146 assert(0 && "Invalid IQ Sharing Policy.Options Are:{Dynamic,"
147 "Partitioned, Threshold}");
148 }
149}
150
151template <class Impl>
152InstructionQueue<Impl>::~InstructionQueue()
153{
154 dependGraph.reset();
155#ifdef DEBUG
156 cprintf("Nodes traversed: %i, removed: %i\n",
157 dependGraph.nodesTraversed, dependGraph.nodesRemoved);
158#endif
159}
160
161template <class Impl>
162std::string
163InstructionQueue<Impl>::name() const
164{
165 return cpu->name() + ".iq";
166}
167
168template <class Impl>
169void
170InstructionQueue<Impl>::regStats()
171{
172 using namespace Stats;
173 iqInstsAdded
174 .name(name() + ".iqInstsAdded")
175 .desc("Number of instructions added to the IQ (excludes non-spec)")
176 .prereq(iqInstsAdded);
177
178 iqNonSpecInstsAdded
179 .name(name() + ".iqNonSpecInstsAdded")
180 .desc("Number of non-speculative instructions added to the IQ")
181 .prereq(iqNonSpecInstsAdded);
182
183 iqInstsIssued
184 .name(name() + ".iqInstsIssued")
185 .desc("Number of instructions issued")
186 .prereq(iqInstsIssued);
187
188 iqIntInstsIssued
189 .name(name() + ".iqIntInstsIssued")
190 .desc("Number of integer instructions issued")
191 .prereq(iqIntInstsIssued);
192
193 iqFloatInstsIssued
194 .name(name() + ".iqFloatInstsIssued")
195 .desc("Number of float instructions issued")
196 .prereq(iqFloatInstsIssued);
197
198 iqBranchInstsIssued
199 .name(name() + ".iqBranchInstsIssued")
200 .desc("Number of branch instructions issued")
201 .prereq(iqBranchInstsIssued);
202
203 iqMemInstsIssued
204 .name(name() + ".iqMemInstsIssued")
205 .desc("Number of memory instructions issued")
206 .prereq(iqMemInstsIssued);
207
208 iqMiscInstsIssued
209 .name(name() + ".iqMiscInstsIssued")
210 .desc("Number of miscellaneous instructions issued")
211 .prereq(iqMiscInstsIssued);
212
213 iqSquashedInstsIssued
214 .name(name() + ".iqSquashedInstsIssued")
215 .desc("Number of squashed instructions issued")
216 .prereq(iqSquashedInstsIssued);
217
218 iqSquashedInstsExamined
219 .name(name() + ".iqSquashedInstsExamined")
220 .desc("Number of squashed instructions iterated over during squash;"
221 " mainly for profiling")
222 .prereq(iqSquashedInstsExamined);
223
224 iqSquashedOperandsExamined
225 .name(name() + ".iqSquashedOperandsExamined")
226 .desc("Number of squashed operands that are examined and possibly "
227 "removed from graph")
228 .prereq(iqSquashedOperandsExamined);
229
230 iqSquashedNonSpecRemoved
231 .name(name() + ".iqSquashedNonSpecRemoved")
232 .desc("Number of squashed non-spec instructions that were removed")
233 .prereq(iqSquashedNonSpecRemoved);
234
235 queueResDist
236 .init(Num_OpClasses, 0, 99, 2)
237 .name(name() + ".IQ:residence:")
238 .desc("cycles from dispatch to issue")
239 .flags(total | pdf | cdf )
240 ;
241 for (int i = 0; i < Num_OpClasses; ++i) {
242 queueResDist.subname(i, opClassStrings[i]);
243 }
244 numIssuedDist
245 .init(0,totalWidth,1)
246 .name(name() + ".ISSUE:issued_per_cycle")
247 .desc("Number of insts issued each cycle")
248 .flags(pdf)
249 ;
250/*
251 dist_unissued
252 .init(Num_OpClasses+2)
253 .name(name() + ".ISSUE:unissued_cause")
254 .desc("Reason ready instruction not issued")
255 .flags(pdf | dist)
256 ;
257 for (int i=0; i < (Num_OpClasses + 2); ++i) {
258 dist_unissued.subname(i, unissued_names[i]);
259 }
260*/
261 statIssuedInstType
262 .init(numThreads,Num_OpClasses)
263 .name(name() + ".ISSUE:FU_type")
264 .desc("Type of FU issued")
265 .flags(total | pdf | dist)
266 ;
267 statIssuedInstType.ysubnames(opClassStrings);
268
269 //
270 // How long did instructions for a particular FU type wait prior to issue
271 //
272
273 issueDelayDist
274 .init(Num_OpClasses,0,99,2)
275 .name(name() + ".ISSUE:")
276 .desc("cycles from operands ready to issue")
277 .flags(pdf | cdf)
278 ;
279
280 for (int i=0; i<Num_OpClasses; ++i) {
281 stringstream subname;
282 subname << opClassStrings[i] << "_delay";
283 issueDelayDist.subname(i, subname.str());
284 }
285
286 issueRate
287 .name(name() + ".ISSUE:rate")
288 .desc("Inst issue rate")
289 .flags(total)
290 ;
291 issueRate = iqInstsIssued / cpu->numCycles;
1/*
2 * Copyright (c) 2004-2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Kevin Lim
29 */
30
31#include <limits>
32#include <vector>
33
34#include "sim/root.hh"
35
36#include "cpu/o3/fu_pool.hh"
37#include "cpu/o3/inst_queue.hh"
38
39using namespace std;
40
41template <class Impl>
42InstructionQueue<Impl>::FUCompletion::FUCompletion(DynInstPtr &_inst,
43 int fu_idx,
44 InstructionQueue<Impl> *iq_ptr)
45 : Event(&mainEventQueue, Stat_Event_Pri),
46 inst(_inst), fuIdx(fu_idx), iqPtr(iq_ptr), freeFU(false)
47{
48 this->setFlags(Event::AutoDelete);
49}
50
51template <class Impl>
52void
53InstructionQueue<Impl>::FUCompletion::process()
54{
55 iqPtr->processFUCompletion(inst, freeFU ? fuIdx : -1);
56 inst = NULL;
57}
58
59
60template <class Impl>
61const char *
62InstructionQueue<Impl>::FUCompletion::description()
63{
64 return "Functional unit completion event";
65}
66
67template <class Impl>
68InstructionQueue<Impl>::InstructionQueue(Params *params)
69 : fuPool(params->fuPool),
70 numEntries(params->numIQEntries),
71 totalWidth(params->issueWidth),
72 numPhysIntRegs(params->numPhysIntRegs),
73 numPhysFloatRegs(params->numPhysFloatRegs),
74 commitToIEWDelay(params->commitToIEWDelay)
75{
76 assert(fuPool);
77
78 switchedOut = false;
79
80 numThreads = params->numberOfThreads;
81
82 // Set the number of physical registers as the number of int + float
83 numPhysRegs = numPhysIntRegs + numPhysFloatRegs;
84
85 DPRINTF(IQ, "There are %i physical registers.\n", numPhysRegs);
86
87 //Create an entry for each physical register within the
88 //dependency graph.
89 dependGraph.resize(numPhysRegs);
90
91 // Resize the register scoreboard.
92 regScoreboard.resize(numPhysRegs);
93
94 //Initialize Mem Dependence Units
95 for (int i = 0; i < numThreads; i++) {
96 memDepUnit[i].init(params,i);
97 memDepUnit[i].setIQ(this);
98 }
99
100 resetState();
101
102 string policy = params->smtIQPolicy;
103
104 //Convert string to lowercase
105 std::transform(policy.begin(), policy.end(), policy.begin(),
106 (int(*)(int)) tolower);
107
108 //Figure out resource sharing policy
109 if (policy == "dynamic") {
110 iqPolicy = Dynamic;
111
112 //Set Max Entries to Total ROB Capacity
113 for (int i = 0; i < numThreads; i++) {
114 maxEntries[i] = numEntries;
115 }
116
117 } else if (policy == "partitioned") {
118 iqPolicy = Partitioned;
119
120 //@todo:make work if part_amt doesnt divide evenly.
121 int part_amt = numEntries / numThreads;
122
123 //Divide ROB up evenly
124 for (int i = 0; i < numThreads; i++) {
125 maxEntries[i] = part_amt;
126 }
127
128 DPRINTF(Fetch, "IQ sharing policy set to Partitioned:"
129 "%i entries per thread.\n",part_amt);
130
131 } else if (policy == "threshold") {
132 iqPolicy = Threshold;
133
134 double threshold = (double)params->smtIQThreshold / 100;
135
136 int thresholdIQ = (int)((double)threshold * numEntries);
137
138 //Divide up by threshold amount
139 for (int i = 0; i < numThreads; i++) {
140 maxEntries[i] = thresholdIQ;
141 }
142
143 DPRINTF(Fetch, "IQ sharing policy set to Threshold:"
144 "%i entries per thread.\n",thresholdIQ);
145 } else {
146 assert(0 && "Invalid IQ Sharing Policy.Options Are:{Dynamic,"
147 "Partitioned, Threshold}");
148 }
149}
150
151template <class Impl>
152InstructionQueue<Impl>::~InstructionQueue()
153{
154 dependGraph.reset();
155#ifdef DEBUG
156 cprintf("Nodes traversed: %i, removed: %i\n",
157 dependGraph.nodesTraversed, dependGraph.nodesRemoved);
158#endif
159}
160
161template <class Impl>
162std::string
163InstructionQueue<Impl>::name() const
164{
165 return cpu->name() + ".iq";
166}
167
168template <class Impl>
169void
170InstructionQueue<Impl>::regStats()
171{
172 using namespace Stats;
173 iqInstsAdded
174 .name(name() + ".iqInstsAdded")
175 .desc("Number of instructions added to the IQ (excludes non-spec)")
176 .prereq(iqInstsAdded);
177
178 iqNonSpecInstsAdded
179 .name(name() + ".iqNonSpecInstsAdded")
180 .desc("Number of non-speculative instructions added to the IQ")
181 .prereq(iqNonSpecInstsAdded);
182
183 iqInstsIssued
184 .name(name() + ".iqInstsIssued")
185 .desc("Number of instructions issued")
186 .prereq(iqInstsIssued);
187
188 iqIntInstsIssued
189 .name(name() + ".iqIntInstsIssued")
190 .desc("Number of integer instructions issued")
191 .prereq(iqIntInstsIssued);
192
193 iqFloatInstsIssued
194 .name(name() + ".iqFloatInstsIssued")
195 .desc("Number of float instructions issued")
196 .prereq(iqFloatInstsIssued);
197
198 iqBranchInstsIssued
199 .name(name() + ".iqBranchInstsIssued")
200 .desc("Number of branch instructions issued")
201 .prereq(iqBranchInstsIssued);
202
203 iqMemInstsIssued
204 .name(name() + ".iqMemInstsIssued")
205 .desc("Number of memory instructions issued")
206 .prereq(iqMemInstsIssued);
207
208 iqMiscInstsIssued
209 .name(name() + ".iqMiscInstsIssued")
210 .desc("Number of miscellaneous instructions issued")
211 .prereq(iqMiscInstsIssued);
212
213 iqSquashedInstsIssued
214 .name(name() + ".iqSquashedInstsIssued")
215 .desc("Number of squashed instructions issued")
216 .prereq(iqSquashedInstsIssued);
217
218 iqSquashedInstsExamined
219 .name(name() + ".iqSquashedInstsExamined")
220 .desc("Number of squashed instructions iterated over during squash;"
221 " mainly for profiling")
222 .prereq(iqSquashedInstsExamined);
223
224 iqSquashedOperandsExamined
225 .name(name() + ".iqSquashedOperandsExamined")
226 .desc("Number of squashed operands that are examined and possibly "
227 "removed from graph")
228 .prereq(iqSquashedOperandsExamined);
229
230 iqSquashedNonSpecRemoved
231 .name(name() + ".iqSquashedNonSpecRemoved")
232 .desc("Number of squashed non-spec instructions that were removed")
233 .prereq(iqSquashedNonSpecRemoved);
234
235 queueResDist
236 .init(Num_OpClasses, 0, 99, 2)
237 .name(name() + ".IQ:residence:")
238 .desc("cycles from dispatch to issue")
239 .flags(total | pdf | cdf )
240 ;
241 for (int i = 0; i < Num_OpClasses; ++i) {
242 queueResDist.subname(i, opClassStrings[i]);
243 }
244 numIssuedDist
245 .init(0,totalWidth,1)
246 .name(name() + ".ISSUE:issued_per_cycle")
247 .desc("Number of insts issued each cycle")
248 .flags(pdf)
249 ;
250/*
251 dist_unissued
252 .init(Num_OpClasses+2)
253 .name(name() + ".ISSUE:unissued_cause")
254 .desc("Reason ready instruction not issued")
255 .flags(pdf | dist)
256 ;
257 for (int i=0; i < (Num_OpClasses + 2); ++i) {
258 dist_unissued.subname(i, unissued_names[i]);
259 }
260*/
261 statIssuedInstType
262 .init(numThreads,Num_OpClasses)
263 .name(name() + ".ISSUE:FU_type")
264 .desc("Type of FU issued")
265 .flags(total | pdf | dist)
266 ;
267 statIssuedInstType.ysubnames(opClassStrings);
268
269 //
270 // How long did instructions for a particular FU type wait prior to issue
271 //
272
273 issueDelayDist
274 .init(Num_OpClasses,0,99,2)
275 .name(name() + ".ISSUE:")
276 .desc("cycles from operands ready to issue")
277 .flags(pdf | cdf)
278 ;
279
280 for (int i=0; i<Num_OpClasses; ++i) {
281 stringstream subname;
282 subname << opClassStrings[i] << "_delay";
283 issueDelayDist.subname(i, subname.str());
284 }
285
286 issueRate
287 .name(name() + ".ISSUE:rate")
288 .desc("Inst issue rate")
289 .flags(total)
290 ;
291 issueRate = iqInstsIssued / cpu->numCycles;
292/*
293 issue_stores
294 .name(name() + ".ISSUE:stores")
295 .desc("Number of stores issued")
296 .flags(total)
297 ;
298 issue_stores = exe_refs - exe_loads;
299*/
300/*
301 issue_op_rate
302 .name(name() + ".ISSUE:op_rate")
303 .desc("Operation issue rate")
304 .flags(total)
305 ;
306 issue_op_rate = issued_ops / numCycles;
307*/
292
308 statFuBusy
309 .init(Num_OpClasses)
310 .name(name() + ".ISSUE:fu_full")
311 .desc("attempts to use FU when none available")
312 .flags(pdf | dist)
313 ;
314 for (int i=0; i < Num_OpClasses; ++i) {
315 statFuBusy.subname(i, opClassStrings[i]);
316 }
317
318 fuBusy
319 .init(numThreads)
320 .name(name() + ".ISSUE:fu_busy_cnt")
321 .desc("FU busy when requested")
322 .flags(total)
323 ;
324
325 fuBusyRate
326 .name(name() + ".ISSUE:fu_busy_rate")
327 .desc("FU busy rate (busy events/executed inst)")
328 .flags(total)
329 ;
330 fuBusyRate = fuBusy / iqInstsIssued;
331
332 for ( int i=0; i < numThreads; i++) {
333 // Tell mem dependence unit to reg stats as well.
334 memDepUnit[i].regStats();
335 }
336}
337
338template <class Impl>
339void
340InstructionQueue<Impl>::resetState()
341{
342 //Initialize thread IQ counts
343 for (int i = 0; i <numThreads; i++) {
344 count[i] = 0;
345 instList[i].clear();
346 }
347
348 // Initialize the number of free IQ entries.
349 freeEntries = numEntries;
350
351 // Note that in actuality, the registers corresponding to the logical
352 // registers start off as ready. However this doesn't matter for the
353 // IQ as the instruction should have been correctly told if those
354 // registers are ready in rename. Thus it can all be initialized as
355 // unready.
356 for (int i = 0; i < numPhysRegs; ++i) {
357 regScoreboard[i] = false;
358 }
359
360 for (int i = 0; i < numThreads; ++i) {
361 squashedSeqNum[i] = 0;
362 }
363
364 for (int i = 0; i < Num_OpClasses; ++i) {
365 while (!readyInsts[i].empty())
366 readyInsts[i].pop();
367 queueOnList[i] = false;
368 readyIt[i] = listOrder.end();
369 }
370 nonSpecInsts.clear();
371 listOrder.clear();
372}
373
374template <class Impl>
375void
376InstructionQueue<Impl>::setActiveThreads(list<unsigned> *at_ptr)
377{
378 DPRINTF(IQ, "Setting active threads list pointer.\n");
379 activeThreads = at_ptr;
380}
381
382template <class Impl>
383void
384InstructionQueue<Impl>::setIssueToExecuteQueue(TimeBuffer<IssueStruct> *i2e_ptr)
385{
386 DPRINTF(IQ, "Set the issue to execute queue.\n");
387 issueToExecuteQueue = i2e_ptr;
388}
389
390template <class Impl>
391void
392InstructionQueue<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr)
393{
394 DPRINTF(IQ, "Set the time buffer.\n");
395 timeBuffer = tb_ptr;
396
397 fromCommit = timeBuffer->getWire(-commitToIEWDelay);
398}
399
400template <class Impl>
401void
402InstructionQueue<Impl>::switchOut()
403{
404 resetState();
405 dependGraph.reset();
406 switchedOut = true;
407 for (int i = 0; i < numThreads; ++i) {
408 memDepUnit[i].switchOut();
409 }
410}
411
412template <class Impl>
413void
414InstructionQueue<Impl>::takeOverFrom()
415{
416 switchedOut = false;
417}
418
419template <class Impl>
420int
421InstructionQueue<Impl>::entryAmount(int num_threads)
422{
423 if (iqPolicy == Partitioned) {
424 return numEntries / num_threads;
425 } else {
426 return 0;
427 }
428}
429
430
431template <class Impl>
432void
433InstructionQueue<Impl>::resetEntries()
434{
435 if (iqPolicy != Dynamic || numThreads > 1) {
436 int active_threads = (*activeThreads).size();
437
438 list<unsigned>::iterator threads = (*activeThreads).begin();
439 list<unsigned>::iterator list_end = (*activeThreads).end();
440
441 while (threads != list_end) {
442 if (iqPolicy == Partitioned) {
443 maxEntries[*threads++] = numEntries / active_threads;
444 } else if(iqPolicy == Threshold && active_threads == 1) {
445 maxEntries[*threads++] = numEntries;
446 }
447 }
448 }
449}
450
451template <class Impl>
452unsigned
453InstructionQueue<Impl>::numFreeEntries()
454{
455 return freeEntries;
456}
457
458template <class Impl>
459unsigned
460InstructionQueue<Impl>::numFreeEntries(unsigned tid)
461{
462 return maxEntries[tid] - count[tid];
463}
464
465// Might want to do something more complex if it knows how many instructions
466// will be issued this cycle.
467template <class Impl>
468bool
469InstructionQueue<Impl>::isFull()
470{
471 if (freeEntries == 0) {
472 return(true);
473 } else {
474 return(false);
475 }
476}
477
478template <class Impl>
479bool
480InstructionQueue<Impl>::isFull(unsigned tid)
481{
482 if (numFreeEntries(tid) == 0) {
483 return(true);
484 } else {
485 return(false);
486 }
487}
488
489template <class Impl>
490bool
491InstructionQueue<Impl>::hasReadyInsts()
492{
493 if (!listOrder.empty()) {
494 return true;
495 }
496
497 for (int i = 0; i < Num_OpClasses; ++i) {
498 if (!readyInsts[i].empty()) {
499 return true;
500 }
501 }
502
503 return false;
504}
505
506template <class Impl>
507void
508InstructionQueue<Impl>::insert(DynInstPtr &new_inst)
509{
510 // Make sure the instruction is valid
511 assert(new_inst);
512
513 DPRINTF(IQ, "Adding instruction [sn:%lli] PC %#x to the IQ.\n",
514 new_inst->seqNum, new_inst->readPC());
515
516 assert(freeEntries != 0);
517
518 instList[new_inst->threadNumber].push_back(new_inst);
519
520 --freeEntries;
521
522 new_inst->setInIQ();
523
524 // Look through its source registers (physical regs), and mark any
525 // dependencies.
526 addToDependents(new_inst);
527
528 // Have this instruction set itself as the producer of its destination
529 // register(s).
530 addToProducers(new_inst);
531
532 if (new_inst->isMemRef()) {
533 memDepUnit[new_inst->threadNumber].insert(new_inst);
534 } else {
535 addIfReady(new_inst);
536 }
537
538 ++iqInstsAdded;
539
540 count[new_inst->threadNumber]++;
541
542 assert(freeEntries == (numEntries - countInsts()));
543}
544
545template <class Impl>
546void
547InstructionQueue<Impl>::insertNonSpec(DynInstPtr &new_inst)
548{
549 // @todo: Clean up this code; can do it by setting inst as unable
550 // to issue, then calling normal insert on the inst.
551
552 assert(new_inst);
553
554 nonSpecInsts[new_inst->seqNum] = new_inst;
555
556 DPRINTF(IQ, "Adding non-speculative instruction [sn:%lli] PC %#x "
557 "to the IQ.\n",
558 new_inst->seqNum, new_inst->readPC());
559
560 assert(freeEntries != 0);
561
562 instList[new_inst->threadNumber].push_back(new_inst);
563
564 --freeEntries;
565
566 new_inst->setInIQ();
567
568 // Have this instruction set itself as the producer of its destination
569 // register(s).
570 addToProducers(new_inst);
571
572 // If it's a memory instruction, add it to the memory dependency
573 // unit.
574 if (new_inst->isMemRef()) {
575 memDepUnit[new_inst->threadNumber].insertNonSpec(new_inst);
576 }
577
578 ++iqNonSpecInstsAdded;
579
580 count[new_inst->threadNumber]++;
581
582 assert(freeEntries == (numEntries - countInsts()));
583}
584
585template <class Impl>
586void
587InstructionQueue<Impl>::insertBarrier(DynInstPtr &barr_inst)
588{
589 memDepUnit[barr_inst->threadNumber].insertBarrier(barr_inst);
590
591 insertNonSpec(barr_inst);
592}
593
594template <class Impl>
595typename Impl::DynInstPtr
596InstructionQueue<Impl>::getInstToExecute()
597{
598 assert(!instsToExecute.empty());
599 DynInstPtr inst = instsToExecute.front();
600 instsToExecute.pop_front();
601 return inst;
602}
603
604template <class Impl>
605void
606InstructionQueue<Impl>::addToOrderList(OpClass op_class)
607{
608 assert(!readyInsts[op_class].empty());
609
610 ListOrderEntry queue_entry;
611
612 queue_entry.queueType = op_class;
613
614 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
615
616 ListOrderIt list_it = listOrder.begin();
617 ListOrderIt list_end_it = listOrder.end();
618
619 while (list_it != list_end_it) {
620 if ((*list_it).oldestInst > queue_entry.oldestInst) {
621 break;
622 }
623
624 list_it++;
625 }
626
627 readyIt[op_class] = listOrder.insert(list_it, queue_entry);
628 queueOnList[op_class] = true;
629}
630
631template <class Impl>
632void
633InstructionQueue<Impl>::moveToYoungerInst(ListOrderIt list_order_it)
634{
635 // Get iterator of next item on the list
636 // Delete the original iterator
637 // Determine if the next item is either the end of the list or younger
638 // than the new instruction. If so, then add in a new iterator right here.
639 // If not, then move along.
640 ListOrderEntry queue_entry;
641 OpClass op_class = (*list_order_it).queueType;
642 ListOrderIt next_it = list_order_it;
643
644 ++next_it;
645
646 queue_entry.queueType = op_class;
647 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
648
649 while (next_it != listOrder.end() &&
650 (*next_it).oldestInst < queue_entry.oldestInst) {
651 ++next_it;
652 }
653
654 readyIt[op_class] = listOrder.insert(next_it, queue_entry);
655}
656
657template <class Impl>
658void
659InstructionQueue<Impl>::processFUCompletion(DynInstPtr &inst, int fu_idx)
660{
661 // The CPU could have been sleeping until this op completed (*extremely*
662 // long latency op). Wake it if it was. This may be overkill.
663 if (isSwitchedOut()) {
664 return;
665 }
666
667 iewStage->wakeCPU();
668
669 if (fu_idx > -1)
670 fuPool->freeUnitNextCycle(fu_idx);
671
672 // @todo: Ensure that these FU Completions happen at the beginning
673 // of a cycle, otherwise they could add too many instructions to
674 // the queue.
675 issueToExecuteQueue->access(0)->size++;
676 instsToExecute.push_back(inst);
677}
678
679// @todo: Figure out a better way to remove the squashed items from the
680// lists. Checking the top item of each list to see if it's squashed
681// wastes time and forces jumps.
682template <class Impl>
683void
684InstructionQueue<Impl>::scheduleReadyInsts()
685{
686 DPRINTF(IQ, "Attempting to schedule ready instructions from "
687 "the IQ.\n");
688
689 IssueStruct *i2e_info = issueToExecuteQueue->access(0);
690
691 // Have iterator to head of the list
692 // While I haven't exceeded bandwidth or reached the end of the list,
693 // Try to get a FU that can do what this op needs.
694 // If successful, change the oldestInst to the new top of the list, put
695 // the queue in the proper place in the list.
696 // Increment the iterator.
697 // This will avoid trying to schedule a certain op class if there are no
698 // FUs that handle it.
699 ListOrderIt order_it = listOrder.begin();
700 ListOrderIt order_end_it = listOrder.end();
701 int total_issued = 0;
702
703 while (total_issued < totalWidth &&
704 order_it != order_end_it) {
705 OpClass op_class = (*order_it).queueType;
706
707 assert(!readyInsts[op_class].empty());
708
709 DynInstPtr issuing_inst = readyInsts[op_class].top();
710
711 assert(issuing_inst->seqNum == (*order_it).oldestInst);
712
713 if (issuing_inst->isSquashed()) {
714 readyInsts[op_class].pop();
715
716 if (!readyInsts[op_class].empty()) {
717 moveToYoungerInst(order_it);
718 } else {
719 readyIt[op_class] = listOrder.end();
720 queueOnList[op_class] = false;
721 }
722
723 listOrder.erase(order_it++);
724
725 ++iqSquashedInstsIssued;
726
727 continue;
728 }
729
730 int idx = -2;
731 int op_latency = 1;
732 int tid = issuing_inst->threadNumber;
733
734 if (op_class != No_OpClass) {
735 idx = fuPool->getUnit(op_class);
736
737 if (idx > -1) {
738 op_latency = fuPool->getOpLatency(op_class);
739 }
740 }
741
742 // If we have an instruction that doesn't require a FU, or a
743 // valid FU, then schedule for execution.
744 if (idx == -2 || idx != -1) {
745 if (op_latency == 1) {
746 i2e_info->size++;
747 instsToExecute.push_back(issuing_inst);
748
749 // Add the FU onto the list of FU's to be freed next
750 // cycle if we used one.
751 if (idx >= 0)
752 fuPool->freeUnitNextCycle(idx);
753 } else {
754 int issue_latency = fuPool->getIssueLatency(op_class);
755 // Generate completion event for the FU
756 FUCompletion *execution = new FUCompletion(issuing_inst,
757 idx, this);
758
759 execution->schedule(curTick + cpu->cycles(issue_latency - 1));
760
761 // @todo: Enforce that issue_latency == 1 or op_latency
762 if (issue_latency > 1) {
763 // If FU isn't pipelined, then it must be freed
764 // upon the execution completing.
765 execution->setFreeFU();
766 } else {
767 // Add the FU onto the list of FU's to be freed next cycle.
768 fuPool->freeUnitNextCycle(idx);
769 }
770 }
771
772 DPRINTF(IQ, "Thread %i: Issuing instruction PC %#x "
773 "[sn:%lli]\n",
774 tid, issuing_inst->readPC(),
775 issuing_inst->seqNum);
776
777 readyInsts[op_class].pop();
778
779 if (!readyInsts[op_class].empty()) {
780 moveToYoungerInst(order_it);
781 } else {
782 readyIt[op_class] = listOrder.end();
783 queueOnList[op_class] = false;
784 }
785
786 issuing_inst->setIssued();
787 ++total_issued;
788
789 if (!issuing_inst->isMemRef()) {
790 // Memory instructions can not be freed from the IQ until they
791 // complete.
792 ++freeEntries;
793 count[tid]--;
794 issuing_inst->removeInIQ();
795 } else {
796 memDepUnit[tid].issue(issuing_inst);
797 }
798
799 listOrder.erase(order_it++);
800 statIssuedInstType[tid][op_class]++;
801 } else {
802 statFuBusy[op_class]++;
803 fuBusy[tid]++;
804 ++order_it;
805 }
806 }
807
808 numIssuedDist.sample(total_issued);
809 iqInstsIssued+= total_issued;
810
811 // If we issued any instructions, tell the CPU we had activity.
812 if (total_issued) {
813 cpu->activityThisCycle();
814 } else {
815 DPRINTF(IQ, "Not able to schedule any instructions.\n");
816 }
817}
818
819template <class Impl>
820void
821InstructionQueue<Impl>::scheduleNonSpec(const InstSeqNum &inst)
822{
823 DPRINTF(IQ, "Marking nonspeculative instruction [sn:%lli] as ready "
824 "to execute.\n", inst);
825
826 NonSpecMapIt inst_it = nonSpecInsts.find(inst);
827
828 assert(inst_it != nonSpecInsts.end());
829
830 unsigned tid = (*inst_it).second->threadNumber;
831
832 (*inst_it).second->setCanIssue();
833
834 if (!(*inst_it).second->isMemRef()) {
835 addIfReady((*inst_it).second);
836 } else {
837 memDepUnit[tid].nonSpecInstReady((*inst_it).second);
838 }
839
840 (*inst_it).second = NULL;
841
842 nonSpecInsts.erase(inst_it);
843}
844
845template <class Impl>
846void
847InstructionQueue<Impl>::commit(const InstSeqNum &inst, unsigned tid)
848{
849 DPRINTF(IQ, "[tid:%i]: Committing instructions older than [sn:%i]\n",
850 tid,inst);
851
852 ListIt iq_it = instList[tid].begin();
853
854 while (iq_it != instList[tid].end() &&
855 (*iq_it)->seqNum <= inst) {
856 ++iq_it;
857 instList[tid].pop_front();
858 }
859
860 assert(freeEntries == (numEntries - countInsts()));
861}
862
863template <class Impl>
864int
865InstructionQueue<Impl>::wakeDependents(DynInstPtr &completed_inst)
866{
867 int dependents = 0;
868
869 DPRINTF(IQ, "Waking dependents of completed instruction.\n");
870
871 assert(!completed_inst->isSquashed());
872
873 // Tell the memory dependence unit to wake any dependents on this
874 // instruction if it is a memory instruction. Also complete the memory
875 // instruction at this point since we know it executed without issues.
876 // @todo: Might want to rename "completeMemInst" to something that
877 // indicates that it won't need to be replayed, and call this
878 // earlier. Might not be a big deal.
879 if (completed_inst->isMemRef()) {
880 memDepUnit[completed_inst->threadNumber].wakeDependents(completed_inst);
881 completeMemInst(completed_inst);
882 } else if (completed_inst->isMemBarrier() ||
883 completed_inst->isWriteBarrier()) {
884 memDepUnit[completed_inst->threadNumber].completeBarrier(completed_inst);
885 }
886
887 for (int dest_reg_idx = 0;
888 dest_reg_idx < completed_inst->numDestRegs();
889 dest_reg_idx++)
890 {
891 PhysRegIndex dest_reg =
892 completed_inst->renamedDestRegIdx(dest_reg_idx);
893
894 // Special case of uniq or control registers. They are not
895 // handled by the IQ and thus have no dependency graph entry.
896 // @todo Figure out a cleaner way to handle this.
897 if (dest_reg >= numPhysRegs) {
898 continue;
899 }
900
901 DPRINTF(IQ, "Waking any dependents on register %i.\n",
902 (int) dest_reg);
903
904 //Go through the dependency chain, marking the registers as
905 //ready within the waiting instructions.
906 DynInstPtr dep_inst = dependGraph.pop(dest_reg);
907
908 while (dep_inst) {
909 DPRINTF(IQ, "Waking up a dependent instruction, PC%#x.\n",
910 dep_inst->readPC());
911
912 // Might want to give more information to the instruction
913 // so that it knows which of its source registers is
914 // ready. However that would mean that the dependency
915 // graph entries would need to hold the src_reg_idx.
916 dep_inst->markSrcRegReady();
917
918 addIfReady(dep_inst);
919
920 dep_inst = dependGraph.pop(dest_reg);
921
922 ++dependents;
923 }
924
925 // Reset the head node now that all of its dependents have
926 // been woken up.
927 assert(dependGraph.empty(dest_reg));
928 dependGraph.clearInst(dest_reg);
929
930 // Mark the scoreboard as having that register ready.
931 regScoreboard[dest_reg] = true;
932 }
933 return dependents;
934}
935
936template <class Impl>
937void
938InstructionQueue<Impl>::addReadyMemInst(DynInstPtr &ready_inst)
939{
940 OpClass op_class = ready_inst->opClass();
941
942 readyInsts[op_class].push(ready_inst);
943
944 // Will need to reorder the list if either a queue is not on the list,
945 // or it has an older instruction than last time.
946 if (!queueOnList[op_class]) {
947 addToOrderList(op_class);
948 } else if (readyInsts[op_class].top()->seqNum <
949 (*readyIt[op_class]).oldestInst) {
950 listOrder.erase(readyIt[op_class]);
951 addToOrderList(op_class);
952 }
953
954 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
955 "the ready list, PC %#x opclass:%i [sn:%lli].\n",
956 ready_inst->readPC(), op_class, ready_inst->seqNum);
957}
958
959template <class Impl>
960void
961InstructionQueue<Impl>::rescheduleMemInst(DynInstPtr &resched_inst)
962{
963 memDepUnit[resched_inst->threadNumber].reschedule(resched_inst);
964}
965
966template <class Impl>
967void
968InstructionQueue<Impl>::replayMemInst(DynInstPtr &replay_inst)
969{
970 memDepUnit[replay_inst->threadNumber].replay(replay_inst);
971}
972
973template <class Impl>
974void
975InstructionQueue<Impl>::completeMemInst(DynInstPtr &completed_inst)
976{
977 int tid = completed_inst->threadNumber;
978
979 DPRINTF(IQ, "Completing mem instruction PC:%#x [sn:%lli]\n",
980 completed_inst->readPC(), completed_inst->seqNum);
981
982 ++freeEntries;
983
984 completed_inst->memOpDone = true;
985
986 memDepUnit[tid].completed(completed_inst);
987
988 count[tid]--;
989}
990
991template <class Impl>
992void
993InstructionQueue<Impl>::violation(DynInstPtr &store,
994 DynInstPtr &faulting_load)
995{
996 memDepUnit[store->threadNumber].violation(store, faulting_load);
997}
998
999template <class Impl>
1000void
1001InstructionQueue<Impl>::squash(unsigned tid)
1002{
1003 DPRINTF(IQ, "[tid:%i]: Starting to squash instructions in "
1004 "the IQ.\n", tid);
1005
1006 // Read instruction sequence number of last instruction out of the
1007 // time buffer.
1008 squashedSeqNum[tid] = fromCommit->commitInfo[tid].doneSeqNum;
1009
1010 // Call doSquash if there are insts in the IQ
1011 if (count[tid] > 0) {
1012 doSquash(tid);
1013 }
1014
1015 // Also tell the memory dependence unit to squash.
1016 memDepUnit[tid].squash(squashedSeqNum[tid], tid);
1017}
1018
1019template <class Impl>
1020void
1021InstructionQueue<Impl>::doSquash(unsigned tid)
1022{
1023 // Start at the tail.
1024 ListIt squash_it = instList[tid].end();
1025 --squash_it;
1026
1027 DPRINTF(IQ, "[tid:%i]: Squashing until sequence number %i!\n",
1028 tid, squashedSeqNum[tid]);
1029
1030 // Squash any instructions younger than the squashed sequence number
1031 // given.
1032 while (squash_it != instList[tid].end() &&
1033 (*squash_it)->seqNum > squashedSeqNum[tid]) {
1034
1035 DynInstPtr squashed_inst = (*squash_it);
1036
1037 // Only handle the instruction if it actually is in the IQ and
1038 // hasn't already been squashed in the IQ.
1039 if (squashed_inst->threadNumber != tid ||
1040 squashed_inst->isSquashedInIQ()) {
1041 --squash_it;
1042 continue;
1043 }
1044
1045 if (!squashed_inst->isIssued() ||
1046 (squashed_inst->isMemRef() &&
1047 !squashed_inst->memOpDone)) {
1048
1049 // Remove the instruction from the dependency list.
1050 if (!squashed_inst->isNonSpeculative() &&
1051 !squashed_inst->isStoreConditional() &&
1052 !squashed_inst->isMemBarrier() &&
1053 !squashed_inst->isWriteBarrier()) {
1054
1055 for (int src_reg_idx = 0;
1056 src_reg_idx < squashed_inst->numSrcRegs();
1057 src_reg_idx++)
1058 {
1059 PhysRegIndex src_reg =
1060 squashed_inst->renamedSrcRegIdx(src_reg_idx);
1061
1062 // Only remove it from the dependency graph if it
1063 // was placed there in the first place.
1064
1065 // Instead of doing a linked list traversal, we
1066 // can just remove these squashed instructions
1067 // either at issue time, or when the register is
1068 // overwritten. The only downside to this is it
1069 // leaves more room for error.
1070
1071 if (!squashed_inst->isReadySrcRegIdx(src_reg_idx) &&
1072 src_reg < numPhysRegs) {
1073 dependGraph.remove(src_reg, squashed_inst);
1074 }
1075
1076
1077 ++iqSquashedOperandsExamined;
1078 }
1079 } else {
1080 NonSpecMapIt ns_inst_it =
1081 nonSpecInsts.find(squashed_inst->seqNum);
1082 assert(ns_inst_it != nonSpecInsts.end());
1083
1084 (*ns_inst_it).second = NULL;
1085
1086 nonSpecInsts.erase(ns_inst_it);
1087
1088 ++iqSquashedNonSpecRemoved;
1089 }
1090
1091 // Might want to also clear out the head of the dependency graph.
1092
1093 // Mark it as squashed within the IQ.
1094 squashed_inst->setSquashedInIQ();
1095
1096 // @todo: Remove this hack where several statuses are set so the
1097 // inst will flow through the rest of the pipeline.
1098 squashed_inst->setIssued();
1099 squashed_inst->setCanCommit();
1100 squashed_inst->removeInIQ();
1101
1102 //Update Thread IQ Count
1103 count[squashed_inst->threadNumber]--;
1104
1105 ++freeEntries;
1106
1107 DPRINTF(IQ, "[tid:%i]: Instruction [sn:%lli] PC %#x "
1108 "squashed.\n",
1109 tid, squashed_inst->seqNum, squashed_inst->readPC());
1110 }
1111
1112 instList[tid].erase(squash_it--);
1113 ++iqSquashedInstsExamined;
1114 }
1115}
1116
1117template <class Impl>
1118bool
1119InstructionQueue<Impl>::addToDependents(DynInstPtr &new_inst)
1120{
1121 // Loop through the instruction's source registers, adding
1122 // them to the dependency list if they are not ready.
1123 int8_t total_src_regs = new_inst->numSrcRegs();
1124 bool return_val = false;
1125
1126 for (int src_reg_idx = 0;
1127 src_reg_idx < total_src_regs;
1128 src_reg_idx++)
1129 {
1130 // Only add it to the dependency graph if it's not ready.
1131 if (!new_inst->isReadySrcRegIdx(src_reg_idx)) {
1132 PhysRegIndex src_reg = new_inst->renamedSrcRegIdx(src_reg_idx);
1133
1134 // Check the IQ's scoreboard to make sure the register
1135 // hasn't become ready while the instruction was in flight
1136 // between stages. Only if it really isn't ready should
1137 // it be added to the dependency graph.
1138 if (src_reg >= numPhysRegs) {
1139 continue;
1140 } else if (regScoreboard[src_reg] == false) {
1141 DPRINTF(IQ, "Instruction PC %#x has src reg %i that "
1142 "is being added to the dependency chain.\n",
1143 new_inst->readPC(), src_reg);
1144
1145 dependGraph.insert(src_reg, new_inst);
1146
1147 // Change the return value to indicate that something
1148 // was added to the dependency graph.
1149 return_val = true;
1150 } else {
1151 DPRINTF(IQ, "Instruction PC %#x has src reg %i that "
1152 "became ready before it reached the IQ.\n",
1153 new_inst->readPC(), src_reg);
1154 // Mark a register ready within the instruction.
1155 new_inst->markSrcRegReady(src_reg_idx);
1156 }
1157 }
1158 }
1159
1160 return return_val;
1161}
1162
1163template <class Impl>
1164void
1165InstructionQueue<Impl>::addToProducers(DynInstPtr &new_inst)
1166{
1167 // Nothing really needs to be marked when an instruction becomes
1168 // the producer of a register's value, but for convenience a ptr
1169 // to the producing instruction will be placed in the head node of
1170 // the dependency links.
1171 int8_t total_dest_regs = new_inst->numDestRegs();
1172
1173 for (int dest_reg_idx = 0;
1174 dest_reg_idx < total_dest_regs;
1175 dest_reg_idx++)
1176 {
1177 PhysRegIndex dest_reg = new_inst->renamedDestRegIdx(dest_reg_idx);
1178
1179 // Instructions that use the misc regs will have a reg number
1180 // higher than the normal physical registers. In this case these
1181 // registers are not renamed, and there is no need to track
1182 // dependencies as these instructions must be executed at commit.
1183 if (dest_reg >= numPhysRegs) {
1184 continue;
1185 }
1186
1187 if (!dependGraph.empty(dest_reg)) {
1188 dependGraph.dump();
1189 panic("Dependency graph %i not empty!", dest_reg);
1190 }
1191
1192 dependGraph.setInst(dest_reg, new_inst);
1193
1194 // Mark the scoreboard to say it's not yet ready.
1195 regScoreboard[dest_reg] = false;
1196 }
1197}
1198
1199template <class Impl>
1200void
1201InstructionQueue<Impl>::addIfReady(DynInstPtr &inst)
1202{
1203 // If the instruction now has all of its source registers
1204 // available, then add it to the list of ready instructions.
1205 if (inst->readyToIssue()) {
1206
1207 //Add the instruction to the proper ready list.
1208 if (inst->isMemRef()) {
1209
1210 DPRINTF(IQ, "Checking if memory instruction can issue.\n");
1211
1212 // Message to the mem dependence unit that this instruction has
1213 // its registers ready.
1214 memDepUnit[inst->threadNumber].regsReady(inst);
1215
1216 return;
1217 }
1218
1219 OpClass op_class = inst->opClass();
1220
1221 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
1222 "the ready list, PC %#x opclass:%i [sn:%lli].\n",
1223 inst->readPC(), op_class, inst->seqNum);
1224
1225 readyInsts[op_class].push(inst);
1226
1227 // Will need to reorder the list if either a queue is not on the list,
1228 // or it has an older instruction than last time.
1229 if (!queueOnList[op_class]) {
1230 addToOrderList(op_class);
1231 } else if (readyInsts[op_class].top()->seqNum <
1232 (*readyIt[op_class]).oldestInst) {
1233 listOrder.erase(readyIt[op_class]);
1234 addToOrderList(op_class);
1235 }
1236 }
1237}
1238
1239template <class Impl>
1240int
1241InstructionQueue<Impl>::countInsts()
1242{
1243#if 0
1244 //ksewell:This works but definitely could use a cleaner write
1245 //with a more intuitive way of counting. Right now it's
1246 //just brute force ....
1247 // Change the #if if you want to use this method.
1248 int total_insts = 0;
1249
1250 for (int i = 0; i < numThreads; ++i) {
1251 ListIt count_it = instList[i].begin();
1252
1253 while (count_it != instList[i].end()) {
1254 if (!(*count_it)->isSquashed() && !(*count_it)->isSquashedInIQ()) {
1255 if (!(*count_it)->isIssued()) {
1256 ++total_insts;
1257 } else if ((*count_it)->isMemRef() &&
1258 !(*count_it)->memOpDone) {
1259 // Loads that have not been marked as executed still count
1260 // towards the total instructions.
1261 ++total_insts;
1262 }
1263 }
1264
1265 ++count_it;
1266 }
1267 }
1268
1269 return total_insts;
1270#else
1271 return numEntries - freeEntries;
1272#endif
1273}
1274
1275template <class Impl>
1276void
1277InstructionQueue<Impl>::dumpLists()
1278{
1279 for (int i = 0; i < Num_OpClasses; ++i) {
1280 cprintf("Ready list %i size: %i\n", i, readyInsts[i].size());
1281
1282 cprintf("\n");
1283 }
1284
1285 cprintf("Non speculative list size: %i\n", nonSpecInsts.size());
1286
1287 NonSpecMapIt non_spec_it = nonSpecInsts.begin();
1288 NonSpecMapIt non_spec_end_it = nonSpecInsts.end();
1289
1290 cprintf("Non speculative list: ");
1291
1292 while (non_spec_it != non_spec_end_it) {
1293 cprintf("%#x [sn:%lli]", (*non_spec_it).second->readPC(),
1294 (*non_spec_it).second->seqNum);
1295 ++non_spec_it;
1296 }
1297
1298 cprintf("\n");
1299
1300 ListOrderIt list_order_it = listOrder.begin();
1301 ListOrderIt list_order_end_it = listOrder.end();
1302 int i = 1;
1303
1304 cprintf("List order: ");
1305
1306 while (list_order_it != list_order_end_it) {
1307 cprintf("%i OpClass:%i [sn:%lli] ", i, (*list_order_it).queueType,
1308 (*list_order_it).oldestInst);
1309
1310 ++list_order_it;
1311 ++i;
1312 }
1313
1314 cprintf("\n");
1315}
1316
1317
1318template <class Impl>
1319void
1320InstructionQueue<Impl>::dumpInsts()
1321{
1322 for (int i = 0; i < numThreads; ++i) {
1323 int num = 0;
1324 int valid_num = 0;
1325 ListIt inst_list_it = instList[i].begin();
1326
1327 while (inst_list_it != instList[i].end())
1328 {
1329 cprintf("Instruction:%i\n",
1330 num);
1331 if (!(*inst_list_it)->isSquashed()) {
1332 if (!(*inst_list_it)->isIssued()) {
1333 ++valid_num;
1334 cprintf("Count:%i\n", valid_num);
1335 } else if ((*inst_list_it)->isMemRef() &&
1336 !(*inst_list_it)->memOpDone) {
1337 // Loads that have not been marked as executed
1338 // still count towards the total instructions.
1339 ++valid_num;
1340 cprintf("Count:%i\n", valid_num);
1341 }
1342 }
1343
1344 cprintf("PC:%#x\n[sn:%lli]\n[tid:%i]\n"
1345 "Issued:%i\nSquashed:%i\n",
1346 (*inst_list_it)->readPC(),
1347 (*inst_list_it)->seqNum,
1348 (*inst_list_it)->threadNumber,
1349 (*inst_list_it)->isIssued(),
1350 (*inst_list_it)->isSquashed());
1351
1352 if ((*inst_list_it)->isMemRef()) {
1353 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone);
1354 }
1355
1356 cprintf("\n");
1357
1358 inst_list_it++;
1359 ++num;
1360 }
1361 }
1362
1363 cprintf("Insts to Execute list:\n");
1364
1365 int num = 0;
1366 int valid_num = 0;
1367 ListIt inst_list_it = instsToExecute.begin();
1368
1369 while (inst_list_it != instsToExecute.end())
1370 {
1371 cprintf("Instruction:%i\n",
1372 num);
1373 if (!(*inst_list_it)->isSquashed()) {
1374 if (!(*inst_list_it)->isIssued()) {
1375 ++valid_num;
1376 cprintf("Count:%i\n", valid_num);
1377 } else if ((*inst_list_it)->isMemRef() &&
1378 !(*inst_list_it)->memOpDone) {
1379 // Loads that have not been marked as executed
1380 // still count towards the total instructions.
1381 ++valid_num;
1382 cprintf("Count:%i\n", valid_num);
1383 }
1384 }
1385
1386 cprintf("PC:%#x\n[sn:%lli]\n[tid:%i]\n"
1387 "Issued:%i\nSquashed:%i\n",
1388 (*inst_list_it)->readPC(),
1389 (*inst_list_it)->seqNum,
1390 (*inst_list_it)->threadNumber,
1391 (*inst_list_it)->isIssued(),
1392 (*inst_list_it)->isSquashed());
1393
1394 if ((*inst_list_it)->isMemRef()) {
1395 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone);
1396 }
1397
1398 cprintf("\n");
1399
1400 inst_list_it++;
1401 ++num;
1402 }
1403}
293 statFuBusy
294 .init(Num_OpClasses)
295 .name(name() + ".ISSUE:fu_full")
296 .desc("attempts to use FU when none available")
297 .flags(pdf | dist)
298 ;
299 for (int i=0; i < Num_OpClasses; ++i) {
300 statFuBusy.subname(i, opClassStrings[i]);
301 }
302
303 fuBusy
304 .init(numThreads)
305 .name(name() + ".ISSUE:fu_busy_cnt")
306 .desc("FU busy when requested")
307 .flags(total)
308 ;
309
310 fuBusyRate
311 .name(name() + ".ISSUE:fu_busy_rate")
312 .desc("FU busy rate (busy events/executed inst)")
313 .flags(total)
314 ;
315 fuBusyRate = fuBusy / iqInstsIssued;
316
317 for ( int i=0; i < numThreads; i++) {
318 // Tell mem dependence unit to reg stats as well.
319 memDepUnit[i].regStats();
320 }
321}
322
323template <class Impl>
324void
325InstructionQueue<Impl>::resetState()
326{
327 //Initialize thread IQ counts
328 for (int i = 0; i <numThreads; i++) {
329 count[i] = 0;
330 instList[i].clear();
331 }
332
333 // Initialize the number of free IQ entries.
334 freeEntries = numEntries;
335
336 // Note that in actuality, the registers corresponding to the logical
337 // registers start off as ready. However this doesn't matter for the
338 // IQ as the instruction should have been correctly told if those
339 // registers are ready in rename. Thus it can all be initialized as
340 // unready.
341 for (int i = 0; i < numPhysRegs; ++i) {
342 regScoreboard[i] = false;
343 }
344
345 for (int i = 0; i < numThreads; ++i) {
346 squashedSeqNum[i] = 0;
347 }
348
349 for (int i = 0; i < Num_OpClasses; ++i) {
350 while (!readyInsts[i].empty())
351 readyInsts[i].pop();
352 queueOnList[i] = false;
353 readyIt[i] = listOrder.end();
354 }
355 nonSpecInsts.clear();
356 listOrder.clear();
357}
358
359template <class Impl>
360void
361InstructionQueue<Impl>::setActiveThreads(list<unsigned> *at_ptr)
362{
363 DPRINTF(IQ, "Setting active threads list pointer.\n");
364 activeThreads = at_ptr;
365}
366
367template <class Impl>
368void
369InstructionQueue<Impl>::setIssueToExecuteQueue(TimeBuffer<IssueStruct> *i2e_ptr)
370{
371 DPRINTF(IQ, "Set the issue to execute queue.\n");
372 issueToExecuteQueue = i2e_ptr;
373}
374
375template <class Impl>
376void
377InstructionQueue<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr)
378{
379 DPRINTF(IQ, "Set the time buffer.\n");
380 timeBuffer = tb_ptr;
381
382 fromCommit = timeBuffer->getWire(-commitToIEWDelay);
383}
384
385template <class Impl>
386void
387InstructionQueue<Impl>::switchOut()
388{
389 resetState();
390 dependGraph.reset();
391 switchedOut = true;
392 for (int i = 0; i < numThreads; ++i) {
393 memDepUnit[i].switchOut();
394 }
395}
396
397template <class Impl>
398void
399InstructionQueue<Impl>::takeOverFrom()
400{
401 switchedOut = false;
402}
403
404template <class Impl>
405int
406InstructionQueue<Impl>::entryAmount(int num_threads)
407{
408 if (iqPolicy == Partitioned) {
409 return numEntries / num_threads;
410 } else {
411 return 0;
412 }
413}
414
415
416template <class Impl>
417void
418InstructionQueue<Impl>::resetEntries()
419{
420 if (iqPolicy != Dynamic || numThreads > 1) {
421 int active_threads = (*activeThreads).size();
422
423 list<unsigned>::iterator threads = (*activeThreads).begin();
424 list<unsigned>::iterator list_end = (*activeThreads).end();
425
426 while (threads != list_end) {
427 if (iqPolicy == Partitioned) {
428 maxEntries[*threads++] = numEntries / active_threads;
429 } else if(iqPolicy == Threshold && active_threads == 1) {
430 maxEntries[*threads++] = numEntries;
431 }
432 }
433 }
434}
435
436template <class Impl>
437unsigned
438InstructionQueue<Impl>::numFreeEntries()
439{
440 return freeEntries;
441}
442
443template <class Impl>
444unsigned
445InstructionQueue<Impl>::numFreeEntries(unsigned tid)
446{
447 return maxEntries[tid] - count[tid];
448}
449
450// Might want to do something more complex if it knows how many instructions
451// will be issued this cycle.
452template <class Impl>
453bool
454InstructionQueue<Impl>::isFull()
455{
456 if (freeEntries == 0) {
457 return(true);
458 } else {
459 return(false);
460 }
461}
462
463template <class Impl>
464bool
465InstructionQueue<Impl>::isFull(unsigned tid)
466{
467 if (numFreeEntries(tid) == 0) {
468 return(true);
469 } else {
470 return(false);
471 }
472}
473
474template <class Impl>
475bool
476InstructionQueue<Impl>::hasReadyInsts()
477{
478 if (!listOrder.empty()) {
479 return true;
480 }
481
482 for (int i = 0; i < Num_OpClasses; ++i) {
483 if (!readyInsts[i].empty()) {
484 return true;
485 }
486 }
487
488 return false;
489}
490
491template <class Impl>
492void
493InstructionQueue<Impl>::insert(DynInstPtr &new_inst)
494{
495 // Make sure the instruction is valid
496 assert(new_inst);
497
498 DPRINTF(IQ, "Adding instruction [sn:%lli] PC %#x to the IQ.\n",
499 new_inst->seqNum, new_inst->readPC());
500
501 assert(freeEntries != 0);
502
503 instList[new_inst->threadNumber].push_back(new_inst);
504
505 --freeEntries;
506
507 new_inst->setInIQ();
508
509 // Look through its source registers (physical regs), and mark any
510 // dependencies.
511 addToDependents(new_inst);
512
513 // Have this instruction set itself as the producer of its destination
514 // register(s).
515 addToProducers(new_inst);
516
517 if (new_inst->isMemRef()) {
518 memDepUnit[new_inst->threadNumber].insert(new_inst);
519 } else {
520 addIfReady(new_inst);
521 }
522
523 ++iqInstsAdded;
524
525 count[new_inst->threadNumber]++;
526
527 assert(freeEntries == (numEntries - countInsts()));
528}
529
530template <class Impl>
531void
532InstructionQueue<Impl>::insertNonSpec(DynInstPtr &new_inst)
533{
534 // @todo: Clean up this code; can do it by setting inst as unable
535 // to issue, then calling normal insert on the inst.
536
537 assert(new_inst);
538
539 nonSpecInsts[new_inst->seqNum] = new_inst;
540
541 DPRINTF(IQ, "Adding non-speculative instruction [sn:%lli] PC %#x "
542 "to the IQ.\n",
543 new_inst->seqNum, new_inst->readPC());
544
545 assert(freeEntries != 0);
546
547 instList[new_inst->threadNumber].push_back(new_inst);
548
549 --freeEntries;
550
551 new_inst->setInIQ();
552
553 // Have this instruction set itself as the producer of its destination
554 // register(s).
555 addToProducers(new_inst);
556
557 // If it's a memory instruction, add it to the memory dependency
558 // unit.
559 if (new_inst->isMemRef()) {
560 memDepUnit[new_inst->threadNumber].insertNonSpec(new_inst);
561 }
562
563 ++iqNonSpecInstsAdded;
564
565 count[new_inst->threadNumber]++;
566
567 assert(freeEntries == (numEntries - countInsts()));
568}
569
570template <class Impl>
571void
572InstructionQueue<Impl>::insertBarrier(DynInstPtr &barr_inst)
573{
574 memDepUnit[barr_inst->threadNumber].insertBarrier(barr_inst);
575
576 insertNonSpec(barr_inst);
577}
578
579template <class Impl>
580typename Impl::DynInstPtr
581InstructionQueue<Impl>::getInstToExecute()
582{
583 assert(!instsToExecute.empty());
584 DynInstPtr inst = instsToExecute.front();
585 instsToExecute.pop_front();
586 return inst;
587}
588
589template <class Impl>
590void
591InstructionQueue<Impl>::addToOrderList(OpClass op_class)
592{
593 assert(!readyInsts[op_class].empty());
594
595 ListOrderEntry queue_entry;
596
597 queue_entry.queueType = op_class;
598
599 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
600
601 ListOrderIt list_it = listOrder.begin();
602 ListOrderIt list_end_it = listOrder.end();
603
604 while (list_it != list_end_it) {
605 if ((*list_it).oldestInst > queue_entry.oldestInst) {
606 break;
607 }
608
609 list_it++;
610 }
611
612 readyIt[op_class] = listOrder.insert(list_it, queue_entry);
613 queueOnList[op_class] = true;
614}
615
616template <class Impl>
617void
618InstructionQueue<Impl>::moveToYoungerInst(ListOrderIt list_order_it)
619{
620 // Get iterator of next item on the list
621 // Delete the original iterator
622 // Determine if the next item is either the end of the list or younger
623 // than the new instruction. If so, then add in a new iterator right here.
624 // If not, then move along.
625 ListOrderEntry queue_entry;
626 OpClass op_class = (*list_order_it).queueType;
627 ListOrderIt next_it = list_order_it;
628
629 ++next_it;
630
631 queue_entry.queueType = op_class;
632 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
633
634 while (next_it != listOrder.end() &&
635 (*next_it).oldestInst < queue_entry.oldestInst) {
636 ++next_it;
637 }
638
639 readyIt[op_class] = listOrder.insert(next_it, queue_entry);
640}
641
642template <class Impl>
643void
644InstructionQueue<Impl>::processFUCompletion(DynInstPtr &inst, int fu_idx)
645{
646 // The CPU could have been sleeping until this op completed (*extremely*
647 // long latency op). Wake it if it was. This may be overkill.
648 if (isSwitchedOut()) {
649 return;
650 }
651
652 iewStage->wakeCPU();
653
654 if (fu_idx > -1)
655 fuPool->freeUnitNextCycle(fu_idx);
656
657 // @todo: Ensure that these FU Completions happen at the beginning
658 // of a cycle, otherwise they could add too many instructions to
659 // the queue.
660 issueToExecuteQueue->access(0)->size++;
661 instsToExecute.push_back(inst);
662}
663
664// @todo: Figure out a better way to remove the squashed items from the
665// lists. Checking the top item of each list to see if it's squashed
666// wastes time and forces jumps.
667template <class Impl>
668void
669InstructionQueue<Impl>::scheduleReadyInsts()
670{
671 DPRINTF(IQ, "Attempting to schedule ready instructions from "
672 "the IQ.\n");
673
674 IssueStruct *i2e_info = issueToExecuteQueue->access(0);
675
676 // Have iterator to head of the list
677 // While I haven't exceeded bandwidth or reached the end of the list,
678 // Try to get a FU that can do what this op needs.
679 // If successful, change the oldestInst to the new top of the list, put
680 // the queue in the proper place in the list.
681 // Increment the iterator.
682 // This will avoid trying to schedule a certain op class if there are no
683 // FUs that handle it.
684 ListOrderIt order_it = listOrder.begin();
685 ListOrderIt order_end_it = listOrder.end();
686 int total_issued = 0;
687
688 while (total_issued < totalWidth &&
689 order_it != order_end_it) {
690 OpClass op_class = (*order_it).queueType;
691
692 assert(!readyInsts[op_class].empty());
693
694 DynInstPtr issuing_inst = readyInsts[op_class].top();
695
696 assert(issuing_inst->seqNum == (*order_it).oldestInst);
697
698 if (issuing_inst->isSquashed()) {
699 readyInsts[op_class].pop();
700
701 if (!readyInsts[op_class].empty()) {
702 moveToYoungerInst(order_it);
703 } else {
704 readyIt[op_class] = listOrder.end();
705 queueOnList[op_class] = false;
706 }
707
708 listOrder.erase(order_it++);
709
710 ++iqSquashedInstsIssued;
711
712 continue;
713 }
714
715 int idx = -2;
716 int op_latency = 1;
717 int tid = issuing_inst->threadNumber;
718
719 if (op_class != No_OpClass) {
720 idx = fuPool->getUnit(op_class);
721
722 if (idx > -1) {
723 op_latency = fuPool->getOpLatency(op_class);
724 }
725 }
726
727 // If we have an instruction that doesn't require a FU, or a
728 // valid FU, then schedule for execution.
729 if (idx == -2 || idx != -1) {
730 if (op_latency == 1) {
731 i2e_info->size++;
732 instsToExecute.push_back(issuing_inst);
733
734 // Add the FU onto the list of FU's to be freed next
735 // cycle if we used one.
736 if (idx >= 0)
737 fuPool->freeUnitNextCycle(idx);
738 } else {
739 int issue_latency = fuPool->getIssueLatency(op_class);
740 // Generate completion event for the FU
741 FUCompletion *execution = new FUCompletion(issuing_inst,
742 idx, this);
743
744 execution->schedule(curTick + cpu->cycles(issue_latency - 1));
745
746 // @todo: Enforce that issue_latency == 1 or op_latency
747 if (issue_latency > 1) {
748 // If FU isn't pipelined, then it must be freed
749 // upon the execution completing.
750 execution->setFreeFU();
751 } else {
752 // Add the FU onto the list of FU's to be freed next cycle.
753 fuPool->freeUnitNextCycle(idx);
754 }
755 }
756
757 DPRINTF(IQ, "Thread %i: Issuing instruction PC %#x "
758 "[sn:%lli]\n",
759 tid, issuing_inst->readPC(),
760 issuing_inst->seqNum);
761
762 readyInsts[op_class].pop();
763
764 if (!readyInsts[op_class].empty()) {
765 moveToYoungerInst(order_it);
766 } else {
767 readyIt[op_class] = listOrder.end();
768 queueOnList[op_class] = false;
769 }
770
771 issuing_inst->setIssued();
772 ++total_issued;
773
774 if (!issuing_inst->isMemRef()) {
775 // Memory instructions can not be freed from the IQ until they
776 // complete.
777 ++freeEntries;
778 count[tid]--;
779 issuing_inst->removeInIQ();
780 } else {
781 memDepUnit[tid].issue(issuing_inst);
782 }
783
784 listOrder.erase(order_it++);
785 statIssuedInstType[tid][op_class]++;
786 } else {
787 statFuBusy[op_class]++;
788 fuBusy[tid]++;
789 ++order_it;
790 }
791 }
792
793 numIssuedDist.sample(total_issued);
794 iqInstsIssued+= total_issued;
795
796 // If we issued any instructions, tell the CPU we had activity.
797 if (total_issued) {
798 cpu->activityThisCycle();
799 } else {
800 DPRINTF(IQ, "Not able to schedule any instructions.\n");
801 }
802}
803
804template <class Impl>
805void
806InstructionQueue<Impl>::scheduleNonSpec(const InstSeqNum &inst)
807{
808 DPRINTF(IQ, "Marking nonspeculative instruction [sn:%lli] as ready "
809 "to execute.\n", inst);
810
811 NonSpecMapIt inst_it = nonSpecInsts.find(inst);
812
813 assert(inst_it != nonSpecInsts.end());
814
815 unsigned tid = (*inst_it).second->threadNumber;
816
817 (*inst_it).second->setCanIssue();
818
819 if (!(*inst_it).second->isMemRef()) {
820 addIfReady((*inst_it).second);
821 } else {
822 memDepUnit[tid].nonSpecInstReady((*inst_it).second);
823 }
824
825 (*inst_it).second = NULL;
826
827 nonSpecInsts.erase(inst_it);
828}
829
830template <class Impl>
831void
832InstructionQueue<Impl>::commit(const InstSeqNum &inst, unsigned tid)
833{
834 DPRINTF(IQ, "[tid:%i]: Committing instructions older than [sn:%i]\n",
835 tid,inst);
836
837 ListIt iq_it = instList[tid].begin();
838
839 while (iq_it != instList[tid].end() &&
840 (*iq_it)->seqNum <= inst) {
841 ++iq_it;
842 instList[tid].pop_front();
843 }
844
845 assert(freeEntries == (numEntries - countInsts()));
846}
847
848template <class Impl>
849int
850InstructionQueue<Impl>::wakeDependents(DynInstPtr &completed_inst)
851{
852 int dependents = 0;
853
854 DPRINTF(IQ, "Waking dependents of completed instruction.\n");
855
856 assert(!completed_inst->isSquashed());
857
858 // Tell the memory dependence unit to wake any dependents on this
859 // instruction if it is a memory instruction. Also complete the memory
860 // instruction at this point since we know it executed without issues.
861 // @todo: Might want to rename "completeMemInst" to something that
862 // indicates that it won't need to be replayed, and call this
863 // earlier. Might not be a big deal.
864 if (completed_inst->isMemRef()) {
865 memDepUnit[completed_inst->threadNumber].wakeDependents(completed_inst);
866 completeMemInst(completed_inst);
867 } else if (completed_inst->isMemBarrier() ||
868 completed_inst->isWriteBarrier()) {
869 memDepUnit[completed_inst->threadNumber].completeBarrier(completed_inst);
870 }
871
872 for (int dest_reg_idx = 0;
873 dest_reg_idx < completed_inst->numDestRegs();
874 dest_reg_idx++)
875 {
876 PhysRegIndex dest_reg =
877 completed_inst->renamedDestRegIdx(dest_reg_idx);
878
879 // Special case of uniq or control registers. They are not
880 // handled by the IQ and thus have no dependency graph entry.
881 // @todo Figure out a cleaner way to handle this.
882 if (dest_reg >= numPhysRegs) {
883 continue;
884 }
885
886 DPRINTF(IQ, "Waking any dependents on register %i.\n",
887 (int) dest_reg);
888
889 //Go through the dependency chain, marking the registers as
890 //ready within the waiting instructions.
891 DynInstPtr dep_inst = dependGraph.pop(dest_reg);
892
893 while (dep_inst) {
894 DPRINTF(IQ, "Waking up a dependent instruction, PC%#x.\n",
895 dep_inst->readPC());
896
897 // Might want to give more information to the instruction
898 // so that it knows which of its source registers is
899 // ready. However that would mean that the dependency
900 // graph entries would need to hold the src_reg_idx.
901 dep_inst->markSrcRegReady();
902
903 addIfReady(dep_inst);
904
905 dep_inst = dependGraph.pop(dest_reg);
906
907 ++dependents;
908 }
909
910 // Reset the head node now that all of its dependents have
911 // been woken up.
912 assert(dependGraph.empty(dest_reg));
913 dependGraph.clearInst(dest_reg);
914
915 // Mark the scoreboard as having that register ready.
916 regScoreboard[dest_reg] = true;
917 }
918 return dependents;
919}
920
921template <class Impl>
922void
923InstructionQueue<Impl>::addReadyMemInst(DynInstPtr &ready_inst)
924{
925 OpClass op_class = ready_inst->opClass();
926
927 readyInsts[op_class].push(ready_inst);
928
929 // Will need to reorder the list if either a queue is not on the list,
930 // or it has an older instruction than last time.
931 if (!queueOnList[op_class]) {
932 addToOrderList(op_class);
933 } else if (readyInsts[op_class].top()->seqNum <
934 (*readyIt[op_class]).oldestInst) {
935 listOrder.erase(readyIt[op_class]);
936 addToOrderList(op_class);
937 }
938
939 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
940 "the ready list, PC %#x opclass:%i [sn:%lli].\n",
941 ready_inst->readPC(), op_class, ready_inst->seqNum);
942}
943
944template <class Impl>
945void
946InstructionQueue<Impl>::rescheduleMemInst(DynInstPtr &resched_inst)
947{
948 memDepUnit[resched_inst->threadNumber].reschedule(resched_inst);
949}
950
951template <class Impl>
952void
953InstructionQueue<Impl>::replayMemInst(DynInstPtr &replay_inst)
954{
955 memDepUnit[replay_inst->threadNumber].replay(replay_inst);
956}
957
958template <class Impl>
959void
960InstructionQueue<Impl>::completeMemInst(DynInstPtr &completed_inst)
961{
962 int tid = completed_inst->threadNumber;
963
964 DPRINTF(IQ, "Completing mem instruction PC:%#x [sn:%lli]\n",
965 completed_inst->readPC(), completed_inst->seqNum);
966
967 ++freeEntries;
968
969 completed_inst->memOpDone = true;
970
971 memDepUnit[tid].completed(completed_inst);
972
973 count[tid]--;
974}
975
976template <class Impl>
977void
978InstructionQueue<Impl>::violation(DynInstPtr &store,
979 DynInstPtr &faulting_load)
980{
981 memDepUnit[store->threadNumber].violation(store, faulting_load);
982}
983
984template <class Impl>
985void
986InstructionQueue<Impl>::squash(unsigned tid)
987{
988 DPRINTF(IQ, "[tid:%i]: Starting to squash instructions in "
989 "the IQ.\n", tid);
990
991 // Read instruction sequence number of last instruction out of the
992 // time buffer.
993 squashedSeqNum[tid] = fromCommit->commitInfo[tid].doneSeqNum;
994
995 // Call doSquash if there are insts in the IQ
996 if (count[tid] > 0) {
997 doSquash(tid);
998 }
999
1000 // Also tell the memory dependence unit to squash.
1001 memDepUnit[tid].squash(squashedSeqNum[tid], tid);
1002}
1003
1004template <class Impl>
1005void
1006InstructionQueue<Impl>::doSquash(unsigned tid)
1007{
1008 // Start at the tail.
1009 ListIt squash_it = instList[tid].end();
1010 --squash_it;
1011
1012 DPRINTF(IQ, "[tid:%i]: Squashing until sequence number %i!\n",
1013 tid, squashedSeqNum[tid]);
1014
1015 // Squash any instructions younger than the squashed sequence number
1016 // given.
1017 while (squash_it != instList[tid].end() &&
1018 (*squash_it)->seqNum > squashedSeqNum[tid]) {
1019
1020 DynInstPtr squashed_inst = (*squash_it);
1021
1022 // Only handle the instruction if it actually is in the IQ and
1023 // hasn't already been squashed in the IQ.
1024 if (squashed_inst->threadNumber != tid ||
1025 squashed_inst->isSquashedInIQ()) {
1026 --squash_it;
1027 continue;
1028 }
1029
1030 if (!squashed_inst->isIssued() ||
1031 (squashed_inst->isMemRef() &&
1032 !squashed_inst->memOpDone)) {
1033
1034 // Remove the instruction from the dependency list.
1035 if (!squashed_inst->isNonSpeculative() &&
1036 !squashed_inst->isStoreConditional() &&
1037 !squashed_inst->isMemBarrier() &&
1038 !squashed_inst->isWriteBarrier()) {
1039
1040 for (int src_reg_idx = 0;
1041 src_reg_idx < squashed_inst->numSrcRegs();
1042 src_reg_idx++)
1043 {
1044 PhysRegIndex src_reg =
1045 squashed_inst->renamedSrcRegIdx(src_reg_idx);
1046
1047 // Only remove it from the dependency graph if it
1048 // was placed there in the first place.
1049
1050 // Instead of doing a linked list traversal, we
1051 // can just remove these squashed instructions
1052 // either at issue time, or when the register is
1053 // overwritten. The only downside to this is it
1054 // leaves more room for error.
1055
1056 if (!squashed_inst->isReadySrcRegIdx(src_reg_idx) &&
1057 src_reg < numPhysRegs) {
1058 dependGraph.remove(src_reg, squashed_inst);
1059 }
1060
1061
1062 ++iqSquashedOperandsExamined;
1063 }
1064 } else {
1065 NonSpecMapIt ns_inst_it =
1066 nonSpecInsts.find(squashed_inst->seqNum);
1067 assert(ns_inst_it != nonSpecInsts.end());
1068
1069 (*ns_inst_it).second = NULL;
1070
1071 nonSpecInsts.erase(ns_inst_it);
1072
1073 ++iqSquashedNonSpecRemoved;
1074 }
1075
1076 // Might want to also clear out the head of the dependency graph.
1077
1078 // Mark it as squashed within the IQ.
1079 squashed_inst->setSquashedInIQ();
1080
1081 // @todo: Remove this hack where several statuses are set so the
1082 // inst will flow through the rest of the pipeline.
1083 squashed_inst->setIssued();
1084 squashed_inst->setCanCommit();
1085 squashed_inst->removeInIQ();
1086
1087 //Update Thread IQ Count
1088 count[squashed_inst->threadNumber]--;
1089
1090 ++freeEntries;
1091
1092 DPRINTF(IQ, "[tid:%i]: Instruction [sn:%lli] PC %#x "
1093 "squashed.\n",
1094 tid, squashed_inst->seqNum, squashed_inst->readPC());
1095 }
1096
1097 instList[tid].erase(squash_it--);
1098 ++iqSquashedInstsExamined;
1099 }
1100}
1101
1102template <class Impl>
1103bool
1104InstructionQueue<Impl>::addToDependents(DynInstPtr &new_inst)
1105{
1106 // Loop through the instruction's source registers, adding
1107 // them to the dependency list if they are not ready.
1108 int8_t total_src_regs = new_inst->numSrcRegs();
1109 bool return_val = false;
1110
1111 for (int src_reg_idx = 0;
1112 src_reg_idx < total_src_regs;
1113 src_reg_idx++)
1114 {
1115 // Only add it to the dependency graph if it's not ready.
1116 if (!new_inst->isReadySrcRegIdx(src_reg_idx)) {
1117 PhysRegIndex src_reg = new_inst->renamedSrcRegIdx(src_reg_idx);
1118
1119 // Check the IQ's scoreboard to make sure the register
1120 // hasn't become ready while the instruction was in flight
1121 // between stages. Only if it really isn't ready should
1122 // it be added to the dependency graph.
1123 if (src_reg >= numPhysRegs) {
1124 continue;
1125 } else if (regScoreboard[src_reg] == false) {
1126 DPRINTF(IQ, "Instruction PC %#x has src reg %i that "
1127 "is being added to the dependency chain.\n",
1128 new_inst->readPC(), src_reg);
1129
1130 dependGraph.insert(src_reg, new_inst);
1131
1132 // Change the return value to indicate that something
1133 // was added to the dependency graph.
1134 return_val = true;
1135 } else {
1136 DPRINTF(IQ, "Instruction PC %#x has src reg %i that "
1137 "became ready before it reached the IQ.\n",
1138 new_inst->readPC(), src_reg);
1139 // Mark a register ready within the instruction.
1140 new_inst->markSrcRegReady(src_reg_idx);
1141 }
1142 }
1143 }
1144
1145 return return_val;
1146}
1147
1148template <class Impl>
1149void
1150InstructionQueue<Impl>::addToProducers(DynInstPtr &new_inst)
1151{
1152 // Nothing really needs to be marked when an instruction becomes
1153 // the producer of a register's value, but for convenience a ptr
1154 // to the producing instruction will be placed in the head node of
1155 // the dependency links.
1156 int8_t total_dest_regs = new_inst->numDestRegs();
1157
1158 for (int dest_reg_idx = 0;
1159 dest_reg_idx < total_dest_regs;
1160 dest_reg_idx++)
1161 {
1162 PhysRegIndex dest_reg = new_inst->renamedDestRegIdx(dest_reg_idx);
1163
1164 // Instructions that use the misc regs will have a reg number
1165 // higher than the normal physical registers. In this case these
1166 // registers are not renamed, and there is no need to track
1167 // dependencies as these instructions must be executed at commit.
1168 if (dest_reg >= numPhysRegs) {
1169 continue;
1170 }
1171
1172 if (!dependGraph.empty(dest_reg)) {
1173 dependGraph.dump();
1174 panic("Dependency graph %i not empty!", dest_reg);
1175 }
1176
1177 dependGraph.setInst(dest_reg, new_inst);
1178
1179 // Mark the scoreboard to say it's not yet ready.
1180 regScoreboard[dest_reg] = false;
1181 }
1182}
1183
1184template <class Impl>
1185void
1186InstructionQueue<Impl>::addIfReady(DynInstPtr &inst)
1187{
1188 // If the instruction now has all of its source registers
1189 // available, then add it to the list of ready instructions.
1190 if (inst->readyToIssue()) {
1191
1192 //Add the instruction to the proper ready list.
1193 if (inst->isMemRef()) {
1194
1195 DPRINTF(IQ, "Checking if memory instruction can issue.\n");
1196
1197 // Message to the mem dependence unit that this instruction has
1198 // its registers ready.
1199 memDepUnit[inst->threadNumber].regsReady(inst);
1200
1201 return;
1202 }
1203
1204 OpClass op_class = inst->opClass();
1205
1206 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
1207 "the ready list, PC %#x opclass:%i [sn:%lli].\n",
1208 inst->readPC(), op_class, inst->seqNum);
1209
1210 readyInsts[op_class].push(inst);
1211
1212 // Will need to reorder the list if either a queue is not on the list,
1213 // or it has an older instruction than last time.
1214 if (!queueOnList[op_class]) {
1215 addToOrderList(op_class);
1216 } else if (readyInsts[op_class].top()->seqNum <
1217 (*readyIt[op_class]).oldestInst) {
1218 listOrder.erase(readyIt[op_class]);
1219 addToOrderList(op_class);
1220 }
1221 }
1222}
1223
1224template <class Impl>
1225int
1226InstructionQueue<Impl>::countInsts()
1227{
1228#if 0
1229 //ksewell:This works but definitely could use a cleaner write
1230 //with a more intuitive way of counting. Right now it's
1231 //just brute force ....
1232 // Change the #if if you want to use this method.
1233 int total_insts = 0;
1234
1235 for (int i = 0; i < numThreads; ++i) {
1236 ListIt count_it = instList[i].begin();
1237
1238 while (count_it != instList[i].end()) {
1239 if (!(*count_it)->isSquashed() && !(*count_it)->isSquashedInIQ()) {
1240 if (!(*count_it)->isIssued()) {
1241 ++total_insts;
1242 } else if ((*count_it)->isMemRef() &&
1243 !(*count_it)->memOpDone) {
1244 // Loads that have not been marked as executed still count
1245 // towards the total instructions.
1246 ++total_insts;
1247 }
1248 }
1249
1250 ++count_it;
1251 }
1252 }
1253
1254 return total_insts;
1255#else
1256 return numEntries - freeEntries;
1257#endif
1258}
1259
1260template <class Impl>
1261void
1262InstructionQueue<Impl>::dumpLists()
1263{
1264 for (int i = 0; i < Num_OpClasses; ++i) {
1265 cprintf("Ready list %i size: %i\n", i, readyInsts[i].size());
1266
1267 cprintf("\n");
1268 }
1269
1270 cprintf("Non speculative list size: %i\n", nonSpecInsts.size());
1271
1272 NonSpecMapIt non_spec_it = nonSpecInsts.begin();
1273 NonSpecMapIt non_spec_end_it = nonSpecInsts.end();
1274
1275 cprintf("Non speculative list: ");
1276
1277 while (non_spec_it != non_spec_end_it) {
1278 cprintf("%#x [sn:%lli]", (*non_spec_it).second->readPC(),
1279 (*non_spec_it).second->seqNum);
1280 ++non_spec_it;
1281 }
1282
1283 cprintf("\n");
1284
1285 ListOrderIt list_order_it = listOrder.begin();
1286 ListOrderIt list_order_end_it = listOrder.end();
1287 int i = 1;
1288
1289 cprintf("List order: ");
1290
1291 while (list_order_it != list_order_end_it) {
1292 cprintf("%i OpClass:%i [sn:%lli] ", i, (*list_order_it).queueType,
1293 (*list_order_it).oldestInst);
1294
1295 ++list_order_it;
1296 ++i;
1297 }
1298
1299 cprintf("\n");
1300}
1301
1302
1303template <class Impl>
1304void
1305InstructionQueue<Impl>::dumpInsts()
1306{
1307 for (int i = 0; i < numThreads; ++i) {
1308 int num = 0;
1309 int valid_num = 0;
1310 ListIt inst_list_it = instList[i].begin();
1311
1312 while (inst_list_it != instList[i].end())
1313 {
1314 cprintf("Instruction:%i\n",
1315 num);
1316 if (!(*inst_list_it)->isSquashed()) {
1317 if (!(*inst_list_it)->isIssued()) {
1318 ++valid_num;
1319 cprintf("Count:%i\n", valid_num);
1320 } else if ((*inst_list_it)->isMemRef() &&
1321 !(*inst_list_it)->memOpDone) {
1322 // Loads that have not been marked as executed
1323 // still count towards the total instructions.
1324 ++valid_num;
1325 cprintf("Count:%i\n", valid_num);
1326 }
1327 }
1328
1329 cprintf("PC:%#x\n[sn:%lli]\n[tid:%i]\n"
1330 "Issued:%i\nSquashed:%i\n",
1331 (*inst_list_it)->readPC(),
1332 (*inst_list_it)->seqNum,
1333 (*inst_list_it)->threadNumber,
1334 (*inst_list_it)->isIssued(),
1335 (*inst_list_it)->isSquashed());
1336
1337 if ((*inst_list_it)->isMemRef()) {
1338 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone);
1339 }
1340
1341 cprintf("\n");
1342
1343 inst_list_it++;
1344 ++num;
1345 }
1346 }
1347
1348 cprintf("Insts to Execute list:\n");
1349
1350 int num = 0;
1351 int valid_num = 0;
1352 ListIt inst_list_it = instsToExecute.begin();
1353
1354 while (inst_list_it != instsToExecute.end())
1355 {
1356 cprintf("Instruction:%i\n",
1357 num);
1358 if (!(*inst_list_it)->isSquashed()) {
1359 if (!(*inst_list_it)->isIssued()) {
1360 ++valid_num;
1361 cprintf("Count:%i\n", valid_num);
1362 } else if ((*inst_list_it)->isMemRef() &&
1363 !(*inst_list_it)->memOpDone) {
1364 // Loads that have not been marked as executed
1365 // still count towards the total instructions.
1366 ++valid_num;
1367 cprintf("Count:%i\n", valid_num);
1368 }
1369 }
1370
1371 cprintf("PC:%#x\n[sn:%lli]\n[tid:%i]\n"
1372 "Issued:%i\nSquashed:%i\n",
1373 (*inst_list_it)->readPC(),
1374 (*inst_list_it)->seqNum,
1375 (*inst_list_it)->threadNumber,
1376 (*inst_list_it)->isIssued(),
1377 (*inst_list_it)->isSquashed());
1378
1379 if ((*inst_list_it)->isMemRef()) {
1380 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone);
1381 }
1382
1383 cprintf("\n");
1384
1385 inst_list_it++;
1386 ++num;
1387 }
1388}