inst_queue_impl.hh (2731:822b96578fba) inst_queue_impl.hh (2820:7fde0b0f8f78)
1/*
2 * Copyright (c) 2004-2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Kevin Lim
29 */
30
31#include <limits>
32#include <vector>
33
34#include "sim/root.hh"
35
36#include "cpu/o3/fu_pool.hh"
37#include "cpu/o3/inst_queue.hh"
38
39using namespace std;
40
41template <class Impl>
42InstructionQueue<Impl>::FUCompletion::FUCompletion(DynInstPtr &_inst,
43 int fu_idx,
44 InstructionQueue<Impl> *iq_ptr)
45 : Event(&mainEventQueue, Stat_Event_Pri),
46 inst(_inst), fuIdx(fu_idx), iqPtr(iq_ptr), freeFU(false)
47{
48 this->setFlags(Event::AutoDelete);
49}
50
51template <class Impl>
52void
53InstructionQueue<Impl>::FUCompletion::process()
54{
55 iqPtr->processFUCompletion(inst, freeFU ? fuIdx : -1);
56 inst = NULL;
57}
58
59
60template <class Impl>
61const char *
62InstructionQueue<Impl>::FUCompletion::description()
63{
64 return "Functional unit completion event";
65}
66
67template <class Impl>
68InstructionQueue<Impl>::InstructionQueue(Params *params)
69 : fuPool(params->fuPool),
70 numEntries(params->numIQEntries),
71 totalWidth(params->issueWidth),
72 numPhysIntRegs(params->numPhysIntRegs),
73 numPhysFloatRegs(params->numPhysFloatRegs),
74 commitToIEWDelay(params->commitToIEWDelay)
75{
76 assert(fuPool);
77
78 switchedOut = false;
79
80 numThreads = params->numberOfThreads;
81
82 // Set the number of physical registers as the number of int + float
83 numPhysRegs = numPhysIntRegs + numPhysFloatRegs;
84
85 DPRINTF(IQ, "There are %i physical registers.\n", numPhysRegs);
86
87 //Create an entry for each physical register within the
88 //dependency graph.
89 dependGraph.resize(numPhysRegs);
90
91 // Resize the register scoreboard.
92 regScoreboard.resize(numPhysRegs);
93
94 //Initialize Mem Dependence Units
95 for (int i = 0; i < numThreads; i++) {
96 memDepUnit[i].init(params,i);
97 memDepUnit[i].setIQ(this);
98 }
99
100 resetState();
101
102 string policy = params->smtIQPolicy;
103
104 //Convert string to lowercase
105 std::transform(policy.begin(), policy.end(), policy.begin(),
106 (int(*)(int)) tolower);
107
108 //Figure out resource sharing policy
109 if (policy == "dynamic") {
110 iqPolicy = Dynamic;
111
112 //Set Max Entries to Total ROB Capacity
113 for (int i = 0; i < numThreads; i++) {
114 maxEntries[i] = numEntries;
115 }
116
117 } else if (policy == "partitioned") {
118 iqPolicy = Partitioned;
119
120 //@todo:make work if part_amt doesnt divide evenly.
121 int part_amt = numEntries / numThreads;
122
123 //Divide ROB up evenly
124 for (int i = 0; i < numThreads; i++) {
125 maxEntries[i] = part_amt;
126 }
127
128 DPRINTF(Fetch, "IQ sharing policy set to Partitioned:"
129 "%i entries per thread.\n",part_amt);
130
131 } else if (policy == "threshold") {
132 iqPolicy = Threshold;
133
134 double threshold = (double)params->smtIQThreshold / 100;
135
136 int thresholdIQ = (int)((double)threshold * numEntries);
137
138 //Divide up by threshold amount
139 for (int i = 0; i < numThreads; i++) {
140 maxEntries[i] = thresholdIQ;
141 }
142
143 DPRINTF(Fetch, "IQ sharing policy set to Threshold:"
144 "%i entries per thread.\n",thresholdIQ);
145 } else {
146 assert(0 && "Invalid IQ Sharing Policy.Options Are:{Dynamic,"
147 "Partitioned, Threshold}");
148 }
149}
150
151template <class Impl>
152InstructionQueue<Impl>::~InstructionQueue()
153{
154 dependGraph.reset();
155#ifdef DEBUG
156 cprintf("Nodes traversed: %i, removed: %i\n",
157 dependGraph.nodesTraversed, dependGraph.nodesRemoved);
158#endif
159}
160
161template <class Impl>
162std::string
163InstructionQueue<Impl>::name() const
164{
165 return cpu->name() + ".iq";
166}
167
168template <class Impl>
169void
170InstructionQueue<Impl>::regStats()
171{
172 using namespace Stats;
173 iqInstsAdded
174 .name(name() + ".iqInstsAdded")
175 .desc("Number of instructions added to the IQ (excludes non-spec)")
176 .prereq(iqInstsAdded);
177
178 iqNonSpecInstsAdded
179 .name(name() + ".iqNonSpecInstsAdded")
180 .desc("Number of non-speculative instructions added to the IQ")
181 .prereq(iqNonSpecInstsAdded);
182
183 iqInstsIssued
184 .name(name() + ".iqInstsIssued")
185 .desc("Number of instructions issued")
186 .prereq(iqInstsIssued);
187
188 iqIntInstsIssued
189 .name(name() + ".iqIntInstsIssued")
190 .desc("Number of integer instructions issued")
191 .prereq(iqIntInstsIssued);
192
193 iqFloatInstsIssued
194 .name(name() + ".iqFloatInstsIssued")
195 .desc("Number of float instructions issued")
196 .prereq(iqFloatInstsIssued);
197
198 iqBranchInstsIssued
199 .name(name() + ".iqBranchInstsIssued")
200 .desc("Number of branch instructions issued")
201 .prereq(iqBranchInstsIssued);
202
203 iqMemInstsIssued
204 .name(name() + ".iqMemInstsIssued")
205 .desc("Number of memory instructions issued")
206 .prereq(iqMemInstsIssued);
207
208 iqMiscInstsIssued
209 .name(name() + ".iqMiscInstsIssued")
210 .desc("Number of miscellaneous instructions issued")
211 .prereq(iqMiscInstsIssued);
212
213 iqSquashedInstsIssued
214 .name(name() + ".iqSquashedInstsIssued")
215 .desc("Number of squashed instructions issued")
216 .prereq(iqSquashedInstsIssued);
217
218 iqSquashedInstsExamined
219 .name(name() + ".iqSquashedInstsExamined")
220 .desc("Number of squashed instructions iterated over during squash;"
221 " mainly for profiling")
222 .prereq(iqSquashedInstsExamined);
223
224 iqSquashedOperandsExamined
225 .name(name() + ".iqSquashedOperandsExamined")
226 .desc("Number of squashed operands that are examined and possibly "
227 "removed from graph")
228 .prereq(iqSquashedOperandsExamined);
229
230 iqSquashedNonSpecRemoved
231 .name(name() + ".iqSquashedNonSpecRemoved")
232 .desc("Number of squashed non-spec instructions that were removed")
233 .prereq(iqSquashedNonSpecRemoved);
234
235 queueResDist
236 .init(Num_OpClasses, 0, 99, 2)
237 .name(name() + ".IQ:residence:")
238 .desc("cycles from dispatch to issue")
239 .flags(total | pdf | cdf )
240 ;
241 for (int i = 0; i < Num_OpClasses; ++i) {
242 queueResDist.subname(i, opClassStrings[i]);
243 }
244 numIssuedDist
245 .init(0,totalWidth,1)
246 .name(name() + ".ISSUE:issued_per_cycle")
247 .desc("Number of insts issued each cycle")
248 .flags(pdf)
249 ;
250/*
251 dist_unissued
252 .init(Num_OpClasses+2)
253 .name(name() + ".ISSUE:unissued_cause")
254 .desc("Reason ready instruction not issued")
255 .flags(pdf | dist)
256 ;
257 for (int i=0; i < (Num_OpClasses + 2); ++i) {
258 dist_unissued.subname(i, unissued_names[i]);
259 }
260*/
261 statIssuedInstType
262 .init(numThreads,Num_OpClasses)
263 .name(name() + ".ISSUE:FU_type")
264 .desc("Type of FU issued")
265 .flags(total | pdf | dist)
266 ;
267 statIssuedInstType.ysubnames(opClassStrings);
268
269 //
270 // How long did instructions for a particular FU type wait prior to issue
271 //
272
273 issueDelayDist
274 .init(Num_OpClasses,0,99,2)
275 .name(name() + ".ISSUE:")
276 .desc("cycles from operands ready to issue")
277 .flags(pdf | cdf)
278 ;
279
280 for (int i=0; i<Num_OpClasses; ++i) {
281 stringstream subname;
282 subname << opClassStrings[i] << "_delay";
283 issueDelayDist.subname(i, subname.str());
284 }
285
286 issueRate
287 .name(name() + ".ISSUE:rate")
288 .desc("Inst issue rate")
289 .flags(total)
290 ;
291 issueRate = iqInstsIssued / cpu->numCycles;
292
293 statFuBusy
294 .init(Num_OpClasses)
295 .name(name() + ".ISSUE:fu_full")
296 .desc("attempts to use FU when none available")
297 .flags(pdf | dist)
298 ;
299 for (int i=0; i < Num_OpClasses; ++i) {
300 statFuBusy.subname(i, opClassStrings[i]);
301 }
302
303 fuBusy
304 .init(numThreads)
305 .name(name() + ".ISSUE:fu_busy_cnt")
306 .desc("FU busy when requested")
307 .flags(total)
308 ;
309
310 fuBusyRate
311 .name(name() + ".ISSUE:fu_busy_rate")
312 .desc("FU busy rate (busy events/executed inst)")
313 .flags(total)
314 ;
315 fuBusyRate = fuBusy / iqInstsIssued;
316
317 for ( int i=0; i < numThreads; i++) {
318 // Tell mem dependence unit to reg stats as well.
319 memDepUnit[i].regStats();
320 }
321}
322
323template <class Impl>
324void
325InstructionQueue<Impl>::resetState()
326{
327 //Initialize thread IQ counts
328 for (int i = 0; i <numThreads; i++) {
329 count[i] = 0;
330 instList[i].clear();
331 }
332
333 // Initialize the number of free IQ entries.
334 freeEntries = numEntries;
335
336 // Note that in actuality, the registers corresponding to the logical
337 // registers start off as ready. However this doesn't matter for the
338 // IQ as the instruction should have been correctly told if those
339 // registers are ready in rename. Thus it can all be initialized as
340 // unready.
341 for (int i = 0; i < numPhysRegs; ++i) {
342 regScoreboard[i] = false;
343 }
344
345 for (int i = 0; i < numThreads; ++i) {
346 squashedSeqNum[i] = 0;
347 }
348
349 for (int i = 0; i < Num_OpClasses; ++i) {
350 while (!readyInsts[i].empty())
351 readyInsts[i].pop();
352 queueOnList[i] = false;
353 readyIt[i] = listOrder.end();
354 }
355 nonSpecInsts.clear();
356 listOrder.clear();
357}
358
359template <class Impl>
360void
361InstructionQueue<Impl>::setActiveThreads(list<unsigned> *at_ptr)
362{
363 DPRINTF(IQ, "Setting active threads list pointer.\n");
364 activeThreads = at_ptr;
365}
366
367template <class Impl>
368void
369InstructionQueue<Impl>::setIssueToExecuteQueue(TimeBuffer<IssueStruct> *i2e_ptr)
370{
371 DPRINTF(IQ, "Set the issue to execute queue.\n");
372 issueToExecuteQueue = i2e_ptr;
373}
374
375template <class Impl>
376void
377InstructionQueue<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr)
378{
379 DPRINTF(IQ, "Set the time buffer.\n");
380 timeBuffer = tb_ptr;
381
382 fromCommit = timeBuffer->getWire(-commitToIEWDelay);
383}
384
385template <class Impl>
386void
387InstructionQueue<Impl>::switchOut()
388{
389 resetState();
390 dependGraph.reset();
391 switchedOut = true;
392 for (int i = 0; i < numThreads; ++i) {
393 memDepUnit[i].switchOut();
394 }
395}
396
397template <class Impl>
398void
399InstructionQueue<Impl>::takeOverFrom()
400{
401 switchedOut = false;
402}
403
404template <class Impl>
405int
406InstructionQueue<Impl>::entryAmount(int num_threads)
407{
408 if (iqPolicy == Partitioned) {
409 return numEntries / num_threads;
410 } else {
411 return 0;
412 }
413}
414
415
416template <class Impl>
417void
418InstructionQueue<Impl>::resetEntries()
419{
420 if (iqPolicy != Dynamic || numThreads > 1) {
421 int active_threads = (*activeThreads).size();
422
423 list<unsigned>::iterator threads = (*activeThreads).begin();
424 list<unsigned>::iterator list_end = (*activeThreads).end();
425
426 while (threads != list_end) {
427 if (iqPolicy == Partitioned) {
428 maxEntries[*threads++] = numEntries / active_threads;
429 } else if(iqPolicy == Threshold && active_threads == 1) {
430 maxEntries[*threads++] = numEntries;
431 }
432 }
433 }
434}
435
436template <class Impl>
437unsigned
438InstructionQueue<Impl>::numFreeEntries()
439{
440 return freeEntries;
441}
442
443template <class Impl>
444unsigned
445InstructionQueue<Impl>::numFreeEntries(unsigned tid)
446{
447 return maxEntries[tid] - count[tid];
448}
449
450// Might want to do something more complex if it knows how many instructions
451// will be issued this cycle.
452template <class Impl>
453bool
454InstructionQueue<Impl>::isFull()
455{
456 if (freeEntries == 0) {
457 return(true);
458 } else {
459 return(false);
460 }
461}
462
463template <class Impl>
464bool
465InstructionQueue<Impl>::isFull(unsigned tid)
466{
467 if (numFreeEntries(tid) == 0) {
468 return(true);
469 } else {
470 return(false);
471 }
472}
473
474template <class Impl>
475bool
476InstructionQueue<Impl>::hasReadyInsts()
477{
478 if (!listOrder.empty()) {
479 return true;
480 }
481
482 for (int i = 0; i < Num_OpClasses; ++i) {
483 if (!readyInsts[i].empty()) {
484 return true;
485 }
486 }
487
488 return false;
489}
490
491template <class Impl>
492void
493InstructionQueue<Impl>::insert(DynInstPtr &new_inst)
494{
495 // Make sure the instruction is valid
496 assert(new_inst);
497
498 DPRINTF(IQ, "Adding instruction [sn:%lli] PC %#x to the IQ.\n",
499 new_inst->seqNum, new_inst->readPC());
500
501 assert(freeEntries != 0);
502
503 instList[new_inst->threadNumber].push_back(new_inst);
504
505 --freeEntries;
506
507 new_inst->setInIQ();
508
509 // Look through its source registers (physical regs), and mark any
510 // dependencies.
511 addToDependents(new_inst);
512
513 // Have this instruction set itself as the producer of its destination
514 // register(s).
515 addToProducers(new_inst);
516
517 if (new_inst->isMemRef()) {
518 memDepUnit[new_inst->threadNumber].insert(new_inst);
519 } else {
520 addIfReady(new_inst);
521 }
522
523 ++iqInstsAdded;
524
525 count[new_inst->threadNumber]++;
526
527 assert(freeEntries == (numEntries - countInsts()));
528}
529
530template <class Impl>
531void
532InstructionQueue<Impl>::insertNonSpec(DynInstPtr &new_inst)
533{
534 // @todo: Clean up this code; can do it by setting inst as unable
535 // to issue, then calling normal insert on the inst.
536
537 assert(new_inst);
538
539 nonSpecInsts[new_inst->seqNum] = new_inst;
540
541 DPRINTF(IQ, "Adding non-speculative instruction [sn:%lli] PC %#x "
542 "to the IQ.\n",
543 new_inst->seqNum, new_inst->readPC());
544
545 assert(freeEntries != 0);
546
547 instList[new_inst->threadNumber].push_back(new_inst);
548
549 --freeEntries;
550
551 new_inst->setInIQ();
552
553 // Have this instruction set itself as the producer of its destination
554 // register(s).
555 addToProducers(new_inst);
556
557 // If it's a memory instruction, add it to the memory dependency
558 // unit.
559 if (new_inst->isMemRef()) {
560 memDepUnit[new_inst->threadNumber].insertNonSpec(new_inst);
561 }
562
563 ++iqNonSpecInstsAdded;
564
565 count[new_inst->threadNumber]++;
566
567 assert(freeEntries == (numEntries - countInsts()));
568}
569
570template <class Impl>
571void
572InstructionQueue<Impl>::insertBarrier(DynInstPtr &barr_inst)
573{
574 memDepUnit[barr_inst->threadNumber].insertBarrier(barr_inst);
575
576 insertNonSpec(barr_inst);
577}
578
579template <class Impl>
580typename Impl::DynInstPtr
581InstructionQueue<Impl>::getInstToExecute()
582{
583 assert(!instsToExecute.empty());
584 DynInstPtr inst = instsToExecute.front();
585 instsToExecute.pop_front();
586 return inst;
587}
588
589template <class Impl>
590void
591InstructionQueue<Impl>::addToOrderList(OpClass op_class)
592{
593 assert(!readyInsts[op_class].empty());
594
595 ListOrderEntry queue_entry;
596
597 queue_entry.queueType = op_class;
598
599 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
600
601 ListOrderIt list_it = listOrder.begin();
602 ListOrderIt list_end_it = listOrder.end();
603
604 while (list_it != list_end_it) {
605 if ((*list_it).oldestInst > queue_entry.oldestInst) {
606 break;
607 }
608
609 list_it++;
610 }
611
612 readyIt[op_class] = listOrder.insert(list_it, queue_entry);
613 queueOnList[op_class] = true;
614}
615
616template <class Impl>
617void
618InstructionQueue<Impl>::moveToYoungerInst(ListOrderIt list_order_it)
619{
620 // Get iterator of next item on the list
621 // Delete the original iterator
622 // Determine if the next item is either the end of the list or younger
623 // than the new instruction. If so, then add in a new iterator right here.
624 // If not, then move along.
625 ListOrderEntry queue_entry;
626 OpClass op_class = (*list_order_it).queueType;
627 ListOrderIt next_it = list_order_it;
628
629 ++next_it;
630
631 queue_entry.queueType = op_class;
632 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
633
634 while (next_it != listOrder.end() &&
635 (*next_it).oldestInst < queue_entry.oldestInst) {
636 ++next_it;
637 }
638
639 readyIt[op_class] = listOrder.insert(next_it, queue_entry);
640}
641
642template <class Impl>
643void
644InstructionQueue<Impl>::processFUCompletion(DynInstPtr &inst, int fu_idx)
645{
646 // The CPU could have been sleeping until this op completed (*extremely*
647 // long latency op). Wake it if it was. This may be overkill.
648 if (isSwitchedOut()) {
649 return;
650 }
651
652 iewStage->wakeCPU();
653
654 if (fu_idx > -1)
655 fuPool->freeUnitNextCycle(fu_idx);
656
657 // @todo: Ensure that these FU Completions happen at the beginning
658 // of a cycle, otherwise they could add too many instructions to
659 // the queue.
660 issueToExecuteQueue->access(0)->size++;
661 instsToExecute.push_back(inst);
662}
663
664// @todo: Figure out a better way to remove the squashed items from the
665// lists. Checking the top item of each list to see if it's squashed
666// wastes time and forces jumps.
667template <class Impl>
668void
669InstructionQueue<Impl>::scheduleReadyInsts()
670{
671 DPRINTF(IQ, "Attempting to schedule ready instructions from "
672 "the IQ.\n");
673
674 IssueStruct *i2e_info = issueToExecuteQueue->access(0);
675
676 // Have iterator to head of the list
677 // While I haven't exceeded bandwidth or reached the end of the list,
678 // Try to get a FU that can do what this op needs.
679 // If successful, change the oldestInst to the new top of the list, put
680 // the queue in the proper place in the list.
681 // Increment the iterator.
682 // This will avoid trying to schedule a certain op class if there are no
683 // FUs that handle it.
684 ListOrderIt order_it = listOrder.begin();
685 ListOrderIt order_end_it = listOrder.end();
686 int total_issued = 0;
687
688 while (total_issued < totalWidth &&
1/*
2 * Copyright (c) 2004-2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Kevin Lim
29 */
30
31#include <limits>
32#include <vector>
33
34#include "sim/root.hh"
35
36#include "cpu/o3/fu_pool.hh"
37#include "cpu/o3/inst_queue.hh"
38
39using namespace std;
40
41template <class Impl>
42InstructionQueue<Impl>::FUCompletion::FUCompletion(DynInstPtr &_inst,
43 int fu_idx,
44 InstructionQueue<Impl> *iq_ptr)
45 : Event(&mainEventQueue, Stat_Event_Pri),
46 inst(_inst), fuIdx(fu_idx), iqPtr(iq_ptr), freeFU(false)
47{
48 this->setFlags(Event::AutoDelete);
49}
50
51template <class Impl>
52void
53InstructionQueue<Impl>::FUCompletion::process()
54{
55 iqPtr->processFUCompletion(inst, freeFU ? fuIdx : -1);
56 inst = NULL;
57}
58
59
60template <class Impl>
61const char *
62InstructionQueue<Impl>::FUCompletion::description()
63{
64 return "Functional unit completion event";
65}
66
67template <class Impl>
68InstructionQueue<Impl>::InstructionQueue(Params *params)
69 : fuPool(params->fuPool),
70 numEntries(params->numIQEntries),
71 totalWidth(params->issueWidth),
72 numPhysIntRegs(params->numPhysIntRegs),
73 numPhysFloatRegs(params->numPhysFloatRegs),
74 commitToIEWDelay(params->commitToIEWDelay)
75{
76 assert(fuPool);
77
78 switchedOut = false;
79
80 numThreads = params->numberOfThreads;
81
82 // Set the number of physical registers as the number of int + float
83 numPhysRegs = numPhysIntRegs + numPhysFloatRegs;
84
85 DPRINTF(IQ, "There are %i physical registers.\n", numPhysRegs);
86
87 //Create an entry for each physical register within the
88 //dependency graph.
89 dependGraph.resize(numPhysRegs);
90
91 // Resize the register scoreboard.
92 regScoreboard.resize(numPhysRegs);
93
94 //Initialize Mem Dependence Units
95 for (int i = 0; i < numThreads; i++) {
96 memDepUnit[i].init(params,i);
97 memDepUnit[i].setIQ(this);
98 }
99
100 resetState();
101
102 string policy = params->smtIQPolicy;
103
104 //Convert string to lowercase
105 std::transform(policy.begin(), policy.end(), policy.begin(),
106 (int(*)(int)) tolower);
107
108 //Figure out resource sharing policy
109 if (policy == "dynamic") {
110 iqPolicy = Dynamic;
111
112 //Set Max Entries to Total ROB Capacity
113 for (int i = 0; i < numThreads; i++) {
114 maxEntries[i] = numEntries;
115 }
116
117 } else if (policy == "partitioned") {
118 iqPolicy = Partitioned;
119
120 //@todo:make work if part_amt doesnt divide evenly.
121 int part_amt = numEntries / numThreads;
122
123 //Divide ROB up evenly
124 for (int i = 0; i < numThreads; i++) {
125 maxEntries[i] = part_amt;
126 }
127
128 DPRINTF(Fetch, "IQ sharing policy set to Partitioned:"
129 "%i entries per thread.\n",part_amt);
130
131 } else if (policy == "threshold") {
132 iqPolicy = Threshold;
133
134 double threshold = (double)params->smtIQThreshold / 100;
135
136 int thresholdIQ = (int)((double)threshold * numEntries);
137
138 //Divide up by threshold amount
139 for (int i = 0; i < numThreads; i++) {
140 maxEntries[i] = thresholdIQ;
141 }
142
143 DPRINTF(Fetch, "IQ sharing policy set to Threshold:"
144 "%i entries per thread.\n",thresholdIQ);
145 } else {
146 assert(0 && "Invalid IQ Sharing Policy.Options Are:{Dynamic,"
147 "Partitioned, Threshold}");
148 }
149}
150
151template <class Impl>
152InstructionQueue<Impl>::~InstructionQueue()
153{
154 dependGraph.reset();
155#ifdef DEBUG
156 cprintf("Nodes traversed: %i, removed: %i\n",
157 dependGraph.nodesTraversed, dependGraph.nodesRemoved);
158#endif
159}
160
161template <class Impl>
162std::string
163InstructionQueue<Impl>::name() const
164{
165 return cpu->name() + ".iq";
166}
167
168template <class Impl>
169void
170InstructionQueue<Impl>::regStats()
171{
172 using namespace Stats;
173 iqInstsAdded
174 .name(name() + ".iqInstsAdded")
175 .desc("Number of instructions added to the IQ (excludes non-spec)")
176 .prereq(iqInstsAdded);
177
178 iqNonSpecInstsAdded
179 .name(name() + ".iqNonSpecInstsAdded")
180 .desc("Number of non-speculative instructions added to the IQ")
181 .prereq(iqNonSpecInstsAdded);
182
183 iqInstsIssued
184 .name(name() + ".iqInstsIssued")
185 .desc("Number of instructions issued")
186 .prereq(iqInstsIssued);
187
188 iqIntInstsIssued
189 .name(name() + ".iqIntInstsIssued")
190 .desc("Number of integer instructions issued")
191 .prereq(iqIntInstsIssued);
192
193 iqFloatInstsIssued
194 .name(name() + ".iqFloatInstsIssued")
195 .desc("Number of float instructions issued")
196 .prereq(iqFloatInstsIssued);
197
198 iqBranchInstsIssued
199 .name(name() + ".iqBranchInstsIssued")
200 .desc("Number of branch instructions issued")
201 .prereq(iqBranchInstsIssued);
202
203 iqMemInstsIssued
204 .name(name() + ".iqMemInstsIssued")
205 .desc("Number of memory instructions issued")
206 .prereq(iqMemInstsIssued);
207
208 iqMiscInstsIssued
209 .name(name() + ".iqMiscInstsIssued")
210 .desc("Number of miscellaneous instructions issued")
211 .prereq(iqMiscInstsIssued);
212
213 iqSquashedInstsIssued
214 .name(name() + ".iqSquashedInstsIssued")
215 .desc("Number of squashed instructions issued")
216 .prereq(iqSquashedInstsIssued);
217
218 iqSquashedInstsExamined
219 .name(name() + ".iqSquashedInstsExamined")
220 .desc("Number of squashed instructions iterated over during squash;"
221 " mainly for profiling")
222 .prereq(iqSquashedInstsExamined);
223
224 iqSquashedOperandsExamined
225 .name(name() + ".iqSquashedOperandsExamined")
226 .desc("Number of squashed operands that are examined and possibly "
227 "removed from graph")
228 .prereq(iqSquashedOperandsExamined);
229
230 iqSquashedNonSpecRemoved
231 .name(name() + ".iqSquashedNonSpecRemoved")
232 .desc("Number of squashed non-spec instructions that were removed")
233 .prereq(iqSquashedNonSpecRemoved);
234
235 queueResDist
236 .init(Num_OpClasses, 0, 99, 2)
237 .name(name() + ".IQ:residence:")
238 .desc("cycles from dispatch to issue")
239 .flags(total | pdf | cdf )
240 ;
241 for (int i = 0; i < Num_OpClasses; ++i) {
242 queueResDist.subname(i, opClassStrings[i]);
243 }
244 numIssuedDist
245 .init(0,totalWidth,1)
246 .name(name() + ".ISSUE:issued_per_cycle")
247 .desc("Number of insts issued each cycle")
248 .flags(pdf)
249 ;
250/*
251 dist_unissued
252 .init(Num_OpClasses+2)
253 .name(name() + ".ISSUE:unissued_cause")
254 .desc("Reason ready instruction not issued")
255 .flags(pdf | dist)
256 ;
257 for (int i=0; i < (Num_OpClasses + 2); ++i) {
258 dist_unissued.subname(i, unissued_names[i]);
259 }
260*/
261 statIssuedInstType
262 .init(numThreads,Num_OpClasses)
263 .name(name() + ".ISSUE:FU_type")
264 .desc("Type of FU issued")
265 .flags(total | pdf | dist)
266 ;
267 statIssuedInstType.ysubnames(opClassStrings);
268
269 //
270 // How long did instructions for a particular FU type wait prior to issue
271 //
272
273 issueDelayDist
274 .init(Num_OpClasses,0,99,2)
275 .name(name() + ".ISSUE:")
276 .desc("cycles from operands ready to issue")
277 .flags(pdf | cdf)
278 ;
279
280 for (int i=0; i<Num_OpClasses; ++i) {
281 stringstream subname;
282 subname << opClassStrings[i] << "_delay";
283 issueDelayDist.subname(i, subname.str());
284 }
285
286 issueRate
287 .name(name() + ".ISSUE:rate")
288 .desc("Inst issue rate")
289 .flags(total)
290 ;
291 issueRate = iqInstsIssued / cpu->numCycles;
292
293 statFuBusy
294 .init(Num_OpClasses)
295 .name(name() + ".ISSUE:fu_full")
296 .desc("attempts to use FU when none available")
297 .flags(pdf | dist)
298 ;
299 for (int i=0; i < Num_OpClasses; ++i) {
300 statFuBusy.subname(i, opClassStrings[i]);
301 }
302
303 fuBusy
304 .init(numThreads)
305 .name(name() + ".ISSUE:fu_busy_cnt")
306 .desc("FU busy when requested")
307 .flags(total)
308 ;
309
310 fuBusyRate
311 .name(name() + ".ISSUE:fu_busy_rate")
312 .desc("FU busy rate (busy events/executed inst)")
313 .flags(total)
314 ;
315 fuBusyRate = fuBusy / iqInstsIssued;
316
317 for ( int i=0; i < numThreads; i++) {
318 // Tell mem dependence unit to reg stats as well.
319 memDepUnit[i].regStats();
320 }
321}
322
323template <class Impl>
324void
325InstructionQueue<Impl>::resetState()
326{
327 //Initialize thread IQ counts
328 for (int i = 0; i <numThreads; i++) {
329 count[i] = 0;
330 instList[i].clear();
331 }
332
333 // Initialize the number of free IQ entries.
334 freeEntries = numEntries;
335
336 // Note that in actuality, the registers corresponding to the logical
337 // registers start off as ready. However this doesn't matter for the
338 // IQ as the instruction should have been correctly told if those
339 // registers are ready in rename. Thus it can all be initialized as
340 // unready.
341 for (int i = 0; i < numPhysRegs; ++i) {
342 regScoreboard[i] = false;
343 }
344
345 for (int i = 0; i < numThreads; ++i) {
346 squashedSeqNum[i] = 0;
347 }
348
349 for (int i = 0; i < Num_OpClasses; ++i) {
350 while (!readyInsts[i].empty())
351 readyInsts[i].pop();
352 queueOnList[i] = false;
353 readyIt[i] = listOrder.end();
354 }
355 nonSpecInsts.clear();
356 listOrder.clear();
357}
358
359template <class Impl>
360void
361InstructionQueue<Impl>::setActiveThreads(list<unsigned> *at_ptr)
362{
363 DPRINTF(IQ, "Setting active threads list pointer.\n");
364 activeThreads = at_ptr;
365}
366
367template <class Impl>
368void
369InstructionQueue<Impl>::setIssueToExecuteQueue(TimeBuffer<IssueStruct> *i2e_ptr)
370{
371 DPRINTF(IQ, "Set the issue to execute queue.\n");
372 issueToExecuteQueue = i2e_ptr;
373}
374
375template <class Impl>
376void
377InstructionQueue<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr)
378{
379 DPRINTF(IQ, "Set the time buffer.\n");
380 timeBuffer = tb_ptr;
381
382 fromCommit = timeBuffer->getWire(-commitToIEWDelay);
383}
384
385template <class Impl>
386void
387InstructionQueue<Impl>::switchOut()
388{
389 resetState();
390 dependGraph.reset();
391 switchedOut = true;
392 for (int i = 0; i < numThreads; ++i) {
393 memDepUnit[i].switchOut();
394 }
395}
396
397template <class Impl>
398void
399InstructionQueue<Impl>::takeOverFrom()
400{
401 switchedOut = false;
402}
403
404template <class Impl>
405int
406InstructionQueue<Impl>::entryAmount(int num_threads)
407{
408 if (iqPolicy == Partitioned) {
409 return numEntries / num_threads;
410 } else {
411 return 0;
412 }
413}
414
415
416template <class Impl>
417void
418InstructionQueue<Impl>::resetEntries()
419{
420 if (iqPolicy != Dynamic || numThreads > 1) {
421 int active_threads = (*activeThreads).size();
422
423 list<unsigned>::iterator threads = (*activeThreads).begin();
424 list<unsigned>::iterator list_end = (*activeThreads).end();
425
426 while (threads != list_end) {
427 if (iqPolicy == Partitioned) {
428 maxEntries[*threads++] = numEntries / active_threads;
429 } else if(iqPolicy == Threshold && active_threads == 1) {
430 maxEntries[*threads++] = numEntries;
431 }
432 }
433 }
434}
435
436template <class Impl>
437unsigned
438InstructionQueue<Impl>::numFreeEntries()
439{
440 return freeEntries;
441}
442
443template <class Impl>
444unsigned
445InstructionQueue<Impl>::numFreeEntries(unsigned tid)
446{
447 return maxEntries[tid] - count[tid];
448}
449
450// Might want to do something more complex if it knows how many instructions
451// will be issued this cycle.
452template <class Impl>
453bool
454InstructionQueue<Impl>::isFull()
455{
456 if (freeEntries == 0) {
457 return(true);
458 } else {
459 return(false);
460 }
461}
462
463template <class Impl>
464bool
465InstructionQueue<Impl>::isFull(unsigned tid)
466{
467 if (numFreeEntries(tid) == 0) {
468 return(true);
469 } else {
470 return(false);
471 }
472}
473
474template <class Impl>
475bool
476InstructionQueue<Impl>::hasReadyInsts()
477{
478 if (!listOrder.empty()) {
479 return true;
480 }
481
482 for (int i = 0; i < Num_OpClasses; ++i) {
483 if (!readyInsts[i].empty()) {
484 return true;
485 }
486 }
487
488 return false;
489}
490
491template <class Impl>
492void
493InstructionQueue<Impl>::insert(DynInstPtr &new_inst)
494{
495 // Make sure the instruction is valid
496 assert(new_inst);
497
498 DPRINTF(IQ, "Adding instruction [sn:%lli] PC %#x to the IQ.\n",
499 new_inst->seqNum, new_inst->readPC());
500
501 assert(freeEntries != 0);
502
503 instList[new_inst->threadNumber].push_back(new_inst);
504
505 --freeEntries;
506
507 new_inst->setInIQ();
508
509 // Look through its source registers (physical regs), and mark any
510 // dependencies.
511 addToDependents(new_inst);
512
513 // Have this instruction set itself as the producer of its destination
514 // register(s).
515 addToProducers(new_inst);
516
517 if (new_inst->isMemRef()) {
518 memDepUnit[new_inst->threadNumber].insert(new_inst);
519 } else {
520 addIfReady(new_inst);
521 }
522
523 ++iqInstsAdded;
524
525 count[new_inst->threadNumber]++;
526
527 assert(freeEntries == (numEntries - countInsts()));
528}
529
530template <class Impl>
531void
532InstructionQueue<Impl>::insertNonSpec(DynInstPtr &new_inst)
533{
534 // @todo: Clean up this code; can do it by setting inst as unable
535 // to issue, then calling normal insert on the inst.
536
537 assert(new_inst);
538
539 nonSpecInsts[new_inst->seqNum] = new_inst;
540
541 DPRINTF(IQ, "Adding non-speculative instruction [sn:%lli] PC %#x "
542 "to the IQ.\n",
543 new_inst->seqNum, new_inst->readPC());
544
545 assert(freeEntries != 0);
546
547 instList[new_inst->threadNumber].push_back(new_inst);
548
549 --freeEntries;
550
551 new_inst->setInIQ();
552
553 // Have this instruction set itself as the producer of its destination
554 // register(s).
555 addToProducers(new_inst);
556
557 // If it's a memory instruction, add it to the memory dependency
558 // unit.
559 if (new_inst->isMemRef()) {
560 memDepUnit[new_inst->threadNumber].insertNonSpec(new_inst);
561 }
562
563 ++iqNonSpecInstsAdded;
564
565 count[new_inst->threadNumber]++;
566
567 assert(freeEntries == (numEntries - countInsts()));
568}
569
570template <class Impl>
571void
572InstructionQueue<Impl>::insertBarrier(DynInstPtr &barr_inst)
573{
574 memDepUnit[barr_inst->threadNumber].insertBarrier(barr_inst);
575
576 insertNonSpec(barr_inst);
577}
578
579template <class Impl>
580typename Impl::DynInstPtr
581InstructionQueue<Impl>::getInstToExecute()
582{
583 assert(!instsToExecute.empty());
584 DynInstPtr inst = instsToExecute.front();
585 instsToExecute.pop_front();
586 return inst;
587}
588
589template <class Impl>
590void
591InstructionQueue<Impl>::addToOrderList(OpClass op_class)
592{
593 assert(!readyInsts[op_class].empty());
594
595 ListOrderEntry queue_entry;
596
597 queue_entry.queueType = op_class;
598
599 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
600
601 ListOrderIt list_it = listOrder.begin();
602 ListOrderIt list_end_it = listOrder.end();
603
604 while (list_it != list_end_it) {
605 if ((*list_it).oldestInst > queue_entry.oldestInst) {
606 break;
607 }
608
609 list_it++;
610 }
611
612 readyIt[op_class] = listOrder.insert(list_it, queue_entry);
613 queueOnList[op_class] = true;
614}
615
616template <class Impl>
617void
618InstructionQueue<Impl>::moveToYoungerInst(ListOrderIt list_order_it)
619{
620 // Get iterator of next item on the list
621 // Delete the original iterator
622 // Determine if the next item is either the end of the list or younger
623 // than the new instruction. If so, then add in a new iterator right here.
624 // If not, then move along.
625 ListOrderEntry queue_entry;
626 OpClass op_class = (*list_order_it).queueType;
627 ListOrderIt next_it = list_order_it;
628
629 ++next_it;
630
631 queue_entry.queueType = op_class;
632 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
633
634 while (next_it != listOrder.end() &&
635 (*next_it).oldestInst < queue_entry.oldestInst) {
636 ++next_it;
637 }
638
639 readyIt[op_class] = listOrder.insert(next_it, queue_entry);
640}
641
642template <class Impl>
643void
644InstructionQueue<Impl>::processFUCompletion(DynInstPtr &inst, int fu_idx)
645{
646 // The CPU could have been sleeping until this op completed (*extremely*
647 // long latency op). Wake it if it was. This may be overkill.
648 if (isSwitchedOut()) {
649 return;
650 }
651
652 iewStage->wakeCPU();
653
654 if (fu_idx > -1)
655 fuPool->freeUnitNextCycle(fu_idx);
656
657 // @todo: Ensure that these FU Completions happen at the beginning
658 // of a cycle, otherwise they could add too many instructions to
659 // the queue.
660 issueToExecuteQueue->access(0)->size++;
661 instsToExecute.push_back(inst);
662}
663
664// @todo: Figure out a better way to remove the squashed items from the
665// lists. Checking the top item of each list to see if it's squashed
666// wastes time and forces jumps.
667template <class Impl>
668void
669InstructionQueue<Impl>::scheduleReadyInsts()
670{
671 DPRINTF(IQ, "Attempting to schedule ready instructions from "
672 "the IQ.\n");
673
674 IssueStruct *i2e_info = issueToExecuteQueue->access(0);
675
676 // Have iterator to head of the list
677 // While I haven't exceeded bandwidth or reached the end of the list,
678 // Try to get a FU that can do what this op needs.
679 // If successful, change the oldestInst to the new top of the list, put
680 // the queue in the proper place in the list.
681 // Increment the iterator.
682 // This will avoid trying to schedule a certain op class if there are no
683 // FUs that handle it.
684 ListOrderIt order_it = listOrder.begin();
685 ListOrderIt order_end_it = listOrder.end();
686 int total_issued = 0;
687
688 while (total_issued < totalWidth &&
689 iewStage->canIssue() &&
689 order_it != order_end_it) {
690 OpClass op_class = (*order_it).queueType;
691
692 assert(!readyInsts[op_class].empty());
693
694 DynInstPtr issuing_inst = readyInsts[op_class].top();
695
696 assert(issuing_inst->seqNum == (*order_it).oldestInst);
697
698 if (issuing_inst->isSquashed()) {
699 readyInsts[op_class].pop();
700
701 if (!readyInsts[op_class].empty()) {
702 moveToYoungerInst(order_it);
703 } else {
704 readyIt[op_class] = listOrder.end();
705 queueOnList[op_class] = false;
706 }
707
708 listOrder.erase(order_it++);
709
710 ++iqSquashedInstsIssued;
711
712 continue;
713 }
714
715 int idx = -2;
716 int op_latency = 1;
717 int tid = issuing_inst->threadNumber;
718
719 if (op_class != No_OpClass) {
720 idx = fuPool->getUnit(op_class);
721
722 if (idx > -1) {
723 op_latency = fuPool->getOpLatency(op_class);
724 }
725 }
726
727 // If we have an instruction that doesn't require a FU, or a
728 // valid FU, then schedule for execution.
729 if (idx == -2 || idx != -1) {
730 if (op_latency == 1) {
731 i2e_info->size++;
732 instsToExecute.push_back(issuing_inst);
733
734 // Add the FU onto the list of FU's to be freed next
735 // cycle if we used one.
736 if (idx >= 0)
737 fuPool->freeUnitNextCycle(idx);
738 } else {
739 int issue_latency = fuPool->getIssueLatency(op_class);
740 // Generate completion event for the FU
741 FUCompletion *execution = new FUCompletion(issuing_inst,
742 idx, this);
743
744 execution->schedule(curTick + cpu->cycles(issue_latency - 1));
745
746 // @todo: Enforce that issue_latency == 1 or op_latency
747 if (issue_latency > 1) {
748 // If FU isn't pipelined, then it must be freed
749 // upon the execution completing.
750 execution->setFreeFU();
751 } else {
752 // Add the FU onto the list of FU's to be freed next cycle.
753 fuPool->freeUnitNextCycle(idx);
754 }
755 }
756
757 DPRINTF(IQ, "Thread %i: Issuing instruction PC %#x "
758 "[sn:%lli]\n",
759 tid, issuing_inst->readPC(),
760 issuing_inst->seqNum);
761
762 readyInsts[op_class].pop();
763
764 if (!readyInsts[op_class].empty()) {
765 moveToYoungerInst(order_it);
766 } else {
767 readyIt[op_class] = listOrder.end();
768 queueOnList[op_class] = false;
769 }
770
771 issuing_inst->setIssued();
772 ++total_issued;
773
774 if (!issuing_inst->isMemRef()) {
775 // Memory instructions can not be freed from the IQ until they
776 // complete.
777 ++freeEntries;
778 count[tid]--;
779 issuing_inst->clearInIQ();
780 } else {
781 memDepUnit[tid].issue(issuing_inst);
782 }
783
784 listOrder.erase(order_it++);
785 statIssuedInstType[tid][op_class]++;
690 order_it != order_end_it) {
691 OpClass op_class = (*order_it).queueType;
692
693 assert(!readyInsts[op_class].empty());
694
695 DynInstPtr issuing_inst = readyInsts[op_class].top();
696
697 assert(issuing_inst->seqNum == (*order_it).oldestInst);
698
699 if (issuing_inst->isSquashed()) {
700 readyInsts[op_class].pop();
701
702 if (!readyInsts[op_class].empty()) {
703 moveToYoungerInst(order_it);
704 } else {
705 readyIt[op_class] = listOrder.end();
706 queueOnList[op_class] = false;
707 }
708
709 listOrder.erase(order_it++);
710
711 ++iqSquashedInstsIssued;
712
713 continue;
714 }
715
716 int idx = -2;
717 int op_latency = 1;
718 int tid = issuing_inst->threadNumber;
719
720 if (op_class != No_OpClass) {
721 idx = fuPool->getUnit(op_class);
722
723 if (idx > -1) {
724 op_latency = fuPool->getOpLatency(op_class);
725 }
726 }
727
728 // If we have an instruction that doesn't require a FU, or a
729 // valid FU, then schedule for execution.
730 if (idx == -2 || idx != -1) {
731 if (op_latency == 1) {
732 i2e_info->size++;
733 instsToExecute.push_back(issuing_inst);
734
735 // Add the FU onto the list of FU's to be freed next
736 // cycle if we used one.
737 if (idx >= 0)
738 fuPool->freeUnitNextCycle(idx);
739 } else {
740 int issue_latency = fuPool->getIssueLatency(op_class);
741 // Generate completion event for the FU
742 FUCompletion *execution = new FUCompletion(issuing_inst,
743 idx, this);
744
745 execution->schedule(curTick + cpu->cycles(issue_latency - 1));
746
747 // @todo: Enforce that issue_latency == 1 or op_latency
748 if (issue_latency > 1) {
749 // If FU isn't pipelined, then it must be freed
750 // upon the execution completing.
751 execution->setFreeFU();
752 } else {
753 // Add the FU onto the list of FU's to be freed next cycle.
754 fuPool->freeUnitNextCycle(idx);
755 }
756 }
757
758 DPRINTF(IQ, "Thread %i: Issuing instruction PC %#x "
759 "[sn:%lli]\n",
760 tid, issuing_inst->readPC(),
761 issuing_inst->seqNum);
762
763 readyInsts[op_class].pop();
764
765 if (!readyInsts[op_class].empty()) {
766 moveToYoungerInst(order_it);
767 } else {
768 readyIt[op_class] = listOrder.end();
769 queueOnList[op_class] = false;
770 }
771
772 issuing_inst->setIssued();
773 ++total_issued;
774
775 if (!issuing_inst->isMemRef()) {
776 // Memory instructions can not be freed from the IQ until they
777 // complete.
778 ++freeEntries;
779 count[tid]--;
780 issuing_inst->clearInIQ();
781 } else {
782 memDepUnit[tid].issue(issuing_inst);
783 }
784
785 listOrder.erase(order_it++);
786 statIssuedInstType[tid][op_class]++;
787 iewStage->incrWb(issuing_inst->seqNum);
786 } else {
787 statFuBusy[op_class]++;
788 fuBusy[tid]++;
789 ++order_it;
790 }
791 }
792
793 numIssuedDist.sample(total_issued);
794 iqInstsIssued+= total_issued;
795
796 // If we issued any instructions, tell the CPU we had activity.
797 if (total_issued) {
798 cpu->activityThisCycle();
799 } else {
800 DPRINTF(IQ, "Not able to schedule any instructions.\n");
801 }
802}
803
804template <class Impl>
805void
806InstructionQueue<Impl>::scheduleNonSpec(const InstSeqNum &inst)
807{
808 DPRINTF(IQ, "Marking nonspeculative instruction [sn:%lli] as ready "
809 "to execute.\n", inst);
810
811 NonSpecMapIt inst_it = nonSpecInsts.find(inst);
812
813 assert(inst_it != nonSpecInsts.end());
814
815 unsigned tid = (*inst_it).second->threadNumber;
816
817 (*inst_it).second->setCanIssue();
818
819 if (!(*inst_it).second->isMemRef()) {
820 addIfReady((*inst_it).second);
821 } else {
822 memDepUnit[tid].nonSpecInstReady((*inst_it).second);
823 }
824
825 (*inst_it).second = NULL;
826
827 nonSpecInsts.erase(inst_it);
828}
829
830template <class Impl>
831void
832InstructionQueue<Impl>::commit(const InstSeqNum &inst, unsigned tid)
833{
834 DPRINTF(IQ, "[tid:%i]: Committing instructions older than [sn:%i]\n",
835 tid,inst);
836
837 ListIt iq_it = instList[tid].begin();
838
839 while (iq_it != instList[tid].end() &&
840 (*iq_it)->seqNum <= inst) {
841 ++iq_it;
842 instList[tid].pop_front();
843 }
844
845 assert(freeEntries == (numEntries - countInsts()));
846}
847
848template <class Impl>
849int
850InstructionQueue<Impl>::wakeDependents(DynInstPtr &completed_inst)
851{
852 int dependents = 0;
853
854 DPRINTF(IQ, "Waking dependents of completed instruction.\n");
855
856 assert(!completed_inst->isSquashed());
857
858 // Tell the memory dependence unit to wake any dependents on this
859 // instruction if it is a memory instruction. Also complete the memory
860 // instruction at this point since we know it executed without issues.
861 // @todo: Might want to rename "completeMemInst" to something that
862 // indicates that it won't need to be replayed, and call this
863 // earlier. Might not be a big deal.
864 if (completed_inst->isMemRef()) {
865 memDepUnit[completed_inst->threadNumber].wakeDependents(completed_inst);
866 completeMemInst(completed_inst);
867 } else if (completed_inst->isMemBarrier() ||
868 completed_inst->isWriteBarrier()) {
869 memDepUnit[completed_inst->threadNumber].completeBarrier(completed_inst);
870 }
871
872 for (int dest_reg_idx = 0;
873 dest_reg_idx < completed_inst->numDestRegs();
874 dest_reg_idx++)
875 {
876 PhysRegIndex dest_reg =
877 completed_inst->renamedDestRegIdx(dest_reg_idx);
878
879 // Special case of uniq or control registers. They are not
880 // handled by the IQ and thus have no dependency graph entry.
881 // @todo Figure out a cleaner way to handle this.
882 if (dest_reg >= numPhysRegs) {
883 continue;
884 }
885
886 DPRINTF(IQ, "Waking any dependents on register %i.\n",
887 (int) dest_reg);
888
889 //Go through the dependency chain, marking the registers as
890 //ready within the waiting instructions.
891 DynInstPtr dep_inst = dependGraph.pop(dest_reg);
892
893 while (dep_inst) {
894 DPRINTF(IQ, "Waking up a dependent instruction, PC%#x.\n",
895 dep_inst->readPC());
896
897 // Might want to give more information to the instruction
898 // so that it knows which of its source registers is
899 // ready. However that would mean that the dependency
900 // graph entries would need to hold the src_reg_idx.
901 dep_inst->markSrcRegReady();
902
903 addIfReady(dep_inst);
904
905 dep_inst = dependGraph.pop(dest_reg);
906
907 ++dependents;
908 }
909
910 // Reset the head node now that all of its dependents have
911 // been woken up.
912 assert(dependGraph.empty(dest_reg));
913 dependGraph.clearInst(dest_reg);
914
915 // Mark the scoreboard as having that register ready.
916 regScoreboard[dest_reg] = true;
917 }
918 return dependents;
919}
920
921template <class Impl>
922void
923InstructionQueue<Impl>::addReadyMemInst(DynInstPtr &ready_inst)
924{
925 OpClass op_class = ready_inst->opClass();
926
927 readyInsts[op_class].push(ready_inst);
928
929 // Will need to reorder the list if either a queue is not on the list,
930 // or it has an older instruction than last time.
931 if (!queueOnList[op_class]) {
932 addToOrderList(op_class);
933 } else if (readyInsts[op_class].top()->seqNum <
934 (*readyIt[op_class]).oldestInst) {
935 listOrder.erase(readyIt[op_class]);
936 addToOrderList(op_class);
937 }
938
939 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
940 "the ready list, PC %#x opclass:%i [sn:%lli].\n",
941 ready_inst->readPC(), op_class, ready_inst->seqNum);
942}
943
944template <class Impl>
945void
946InstructionQueue<Impl>::rescheduleMemInst(DynInstPtr &resched_inst)
947{
948 memDepUnit[resched_inst->threadNumber].reschedule(resched_inst);
949}
950
951template <class Impl>
952void
953InstructionQueue<Impl>::replayMemInst(DynInstPtr &replay_inst)
954{
955 memDepUnit[replay_inst->threadNumber].replay(replay_inst);
956}
957
958template <class Impl>
959void
960InstructionQueue<Impl>::completeMemInst(DynInstPtr &completed_inst)
961{
962 int tid = completed_inst->threadNumber;
963
964 DPRINTF(IQ, "Completing mem instruction PC:%#x [sn:%lli]\n",
965 completed_inst->readPC(), completed_inst->seqNum);
966
967 ++freeEntries;
968
969 completed_inst->memOpDone = true;
970
971 memDepUnit[tid].completed(completed_inst);
972
973 count[tid]--;
974}
975
976template <class Impl>
977void
978InstructionQueue<Impl>::violation(DynInstPtr &store,
979 DynInstPtr &faulting_load)
980{
981 memDepUnit[store->threadNumber].violation(store, faulting_load);
982}
983
984template <class Impl>
985void
986InstructionQueue<Impl>::squash(unsigned tid)
987{
988 DPRINTF(IQ, "[tid:%i]: Starting to squash instructions in "
989 "the IQ.\n", tid);
990
991 // Read instruction sequence number of last instruction out of the
992 // time buffer.
993 squashedSeqNum[tid] = fromCommit->commitInfo[tid].doneSeqNum;
994
995 // Call doSquash if there are insts in the IQ
996 if (count[tid] > 0) {
997 doSquash(tid);
998 }
999
1000 // Also tell the memory dependence unit to squash.
1001 memDepUnit[tid].squash(squashedSeqNum[tid], tid);
1002}
1003
1004template <class Impl>
1005void
1006InstructionQueue<Impl>::doSquash(unsigned tid)
1007{
1008 // Start at the tail.
1009 ListIt squash_it = instList[tid].end();
1010 --squash_it;
1011
1012 DPRINTF(IQ, "[tid:%i]: Squashing until sequence number %i!\n",
1013 tid, squashedSeqNum[tid]);
1014
1015 // Squash any instructions younger than the squashed sequence number
1016 // given.
1017 while (squash_it != instList[tid].end() &&
1018 (*squash_it)->seqNum > squashedSeqNum[tid]) {
1019
1020 DynInstPtr squashed_inst = (*squash_it);
1021
1022 // Only handle the instruction if it actually is in the IQ and
1023 // hasn't already been squashed in the IQ.
1024 if (squashed_inst->threadNumber != tid ||
1025 squashed_inst->isSquashedInIQ()) {
1026 --squash_it;
1027 continue;
1028 }
1029
1030 if (!squashed_inst->isIssued() ||
1031 (squashed_inst->isMemRef() &&
1032 !squashed_inst->memOpDone)) {
1033
1034 // Remove the instruction from the dependency list.
1035 if (!squashed_inst->isNonSpeculative() &&
1036 !squashed_inst->isStoreConditional() &&
1037 !squashed_inst->isMemBarrier() &&
1038 !squashed_inst->isWriteBarrier()) {
1039
1040 for (int src_reg_idx = 0;
1041 src_reg_idx < squashed_inst->numSrcRegs();
1042 src_reg_idx++)
1043 {
1044 PhysRegIndex src_reg =
1045 squashed_inst->renamedSrcRegIdx(src_reg_idx);
1046
1047 // Only remove it from the dependency graph if it
1048 // was placed there in the first place.
1049
1050 // Instead of doing a linked list traversal, we
1051 // can just remove these squashed instructions
1052 // either at issue time, or when the register is
1053 // overwritten. The only downside to this is it
1054 // leaves more room for error.
1055
1056 if (!squashed_inst->isReadySrcRegIdx(src_reg_idx) &&
1057 src_reg < numPhysRegs) {
1058 dependGraph.remove(src_reg, squashed_inst);
1059 }
1060
1061
1062 ++iqSquashedOperandsExamined;
1063 }
1064 } else {
1065 NonSpecMapIt ns_inst_it =
1066 nonSpecInsts.find(squashed_inst->seqNum);
1067 assert(ns_inst_it != nonSpecInsts.end());
1068
1069 (*ns_inst_it).second = NULL;
1070
1071 nonSpecInsts.erase(ns_inst_it);
1072
1073 ++iqSquashedNonSpecRemoved;
1074 }
1075
1076 // Might want to also clear out the head of the dependency graph.
1077
1078 // Mark it as squashed within the IQ.
1079 squashed_inst->setSquashedInIQ();
1080
1081 // @todo: Remove this hack where several statuses are set so the
1082 // inst will flow through the rest of the pipeline.
1083 squashed_inst->setIssued();
1084 squashed_inst->setCanCommit();
1085 squashed_inst->clearInIQ();
1086
1087 //Update Thread IQ Count
1088 count[squashed_inst->threadNumber]--;
1089
1090 ++freeEntries;
1091
1092 DPRINTF(IQ, "[tid:%i]: Instruction [sn:%lli] PC %#x "
1093 "squashed.\n",
1094 tid, squashed_inst->seqNum, squashed_inst->readPC());
1095 }
1096
1097 instList[tid].erase(squash_it--);
1098 ++iqSquashedInstsExamined;
1099 }
1100}
1101
1102template <class Impl>
1103bool
1104InstructionQueue<Impl>::addToDependents(DynInstPtr &new_inst)
1105{
1106 // Loop through the instruction's source registers, adding
1107 // them to the dependency list if they are not ready.
1108 int8_t total_src_regs = new_inst->numSrcRegs();
1109 bool return_val = false;
1110
1111 for (int src_reg_idx = 0;
1112 src_reg_idx < total_src_regs;
1113 src_reg_idx++)
1114 {
1115 // Only add it to the dependency graph if it's not ready.
1116 if (!new_inst->isReadySrcRegIdx(src_reg_idx)) {
1117 PhysRegIndex src_reg = new_inst->renamedSrcRegIdx(src_reg_idx);
1118
1119 // Check the IQ's scoreboard to make sure the register
1120 // hasn't become ready while the instruction was in flight
1121 // between stages. Only if it really isn't ready should
1122 // it be added to the dependency graph.
1123 if (src_reg >= numPhysRegs) {
1124 continue;
1125 } else if (regScoreboard[src_reg] == false) {
1126 DPRINTF(IQ, "Instruction PC %#x has src reg %i that "
1127 "is being added to the dependency chain.\n",
1128 new_inst->readPC(), src_reg);
1129
1130 dependGraph.insert(src_reg, new_inst);
1131
1132 // Change the return value to indicate that something
1133 // was added to the dependency graph.
1134 return_val = true;
1135 } else {
1136 DPRINTF(IQ, "Instruction PC %#x has src reg %i that "
1137 "became ready before it reached the IQ.\n",
1138 new_inst->readPC(), src_reg);
1139 // Mark a register ready within the instruction.
1140 new_inst->markSrcRegReady(src_reg_idx);
1141 }
1142 }
1143 }
1144
1145 return return_val;
1146}
1147
1148template <class Impl>
1149void
1150InstructionQueue<Impl>::addToProducers(DynInstPtr &new_inst)
1151{
1152 // Nothing really needs to be marked when an instruction becomes
1153 // the producer of a register's value, but for convenience a ptr
1154 // to the producing instruction will be placed in the head node of
1155 // the dependency links.
1156 int8_t total_dest_regs = new_inst->numDestRegs();
1157
1158 for (int dest_reg_idx = 0;
1159 dest_reg_idx < total_dest_regs;
1160 dest_reg_idx++)
1161 {
1162 PhysRegIndex dest_reg = new_inst->renamedDestRegIdx(dest_reg_idx);
1163
1164 // Instructions that use the misc regs will have a reg number
1165 // higher than the normal physical registers. In this case these
1166 // registers are not renamed, and there is no need to track
1167 // dependencies as these instructions must be executed at commit.
1168 if (dest_reg >= numPhysRegs) {
1169 continue;
1170 }
1171
1172 if (!dependGraph.empty(dest_reg)) {
1173 dependGraph.dump();
1174 panic("Dependency graph %i not empty!", dest_reg);
1175 }
1176
1177 dependGraph.setInst(dest_reg, new_inst);
1178
1179 // Mark the scoreboard to say it's not yet ready.
1180 regScoreboard[dest_reg] = false;
1181 }
1182}
1183
1184template <class Impl>
1185void
1186InstructionQueue<Impl>::addIfReady(DynInstPtr &inst)
1187{
1188 // If the instruction now has all of its source registers
1189 // available, then add it to the list of ready instructions.
1190 if (inst->readyToIssue()) {
1191
1192 //Add the instruction to the proper ready list.
1193 if (inst->isMemRef()) {
1194
1195 DPRINTF(IQ, "Checking if memory instruction can issue.\n");
1196
1197 // Message to the mem dependence unit that this instruction has
1198 // its registers ready.
1199 memDepUnit[inst->threadNumber].regsReady(inst);
1200
1201 return;
1202 }
1203
1204 OpClass op_class = inst->opClass();
1205
1206 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
1207 "the ready list, PC %#x opclass:%i [sn:%lli].\n",
1208 inst->readPC(), op_class, inst->seqNum);
1209
1210 readyInsts[op_class].push(inst);
1211
1212 // Will need to reorder the list if either a queue is not on the list,
1213 // or it has an older instruction than last time.
1214 if (!queueOnList[op_class]) {
1215 addToOrderList(op_class);
1216 } else if (readyInsts[op_class].top()->seqNum <
1217 (*readyIt[op_class]).oldestInst) {
1218 listOrder.erase(readyIt[op_class]);
1219 addToOrderList(op_class);
1220 }
1221 }
1222}
1223
1224template <class Impl>
1225int
1226InstructionQueue<Impl>::countInsts()
1227{
1228#if 0
1229 //ksewell:This works but definitely could use a cleaner write
1230 //with a more intuitive way of counting. Right now it's
1231 //just brute force ....
1232 // Change the #if if you want to use this method.
1233 int total_insts = 0;
1234
1235 for (int i = 0; i < numThreads; ++i) {
1236 ListIt count_it = instList[i].begin();
1237
1238 while (count_it != instList[i].end()) {
1239 if (!(*count_it)->isSquashed() && !(*count_it)->isSquashedInIQ()) {
1240 if (!(*count_it)->isIssued()) {
1241 ++total_insts;
1242 } else if ((*count_it)->isMemRef() &&
1243 !(*count_it)->memOpDone) {
1244 // Loads that have not been marked as executed still count
1245 // towards the total instructions.
1246 ++total_insts;
1247 }
1248 }
1249
1250 ++count_it;
1251 }
1252 }
1253
1254 return total_insts;
1255#else
1256 return numEntries - freeEntries;
1257#endif
1258}
1259
1260template <class Impl>
1261void
1262InstructionQueue<Impl>::dumpLists()
1263{
1264 for (int i = 0; i < Num_OpClasses; ++i) {
1265 cprintf("Ready list %i size: %i\n", i, readyInsts[i].size());
1266
1267 cprintf("\n");
1268 }
1269
1270 cprintf("Non speculative list size: %i\n", nonSpecInsts.size());
1271
1272 NonSpecMapIt non_spec_it = nonSpecInsts.begin();
1273 NonSpecMapIt non_spec_end_it = nonSpecInsts.end();
1274
1275 cprintf("Non speculative list: ");
1276
1277 while (non_spec_it != non_spec_end_it) {
1278 cprintf("%#x [sn:%lli]", (*non_spec_it).second->readPC(),
1279 (*non_spec_it).second->seqNum);
1280 ++non_spec_it;
1281 }
1282
1283 cprintf("\n");
1284
1285 ListOrderIt list_order_it = listOrder.begin();
1286 ListOrderIt list_order_end_it = listOrder.end();
1287 int i = 1;
1288
1289 cprintf("List order: ");
1290
1291 while (list_order_it != list_order_end_it) {
1292 cprintf("%i OpClass:%i [sn:%lli] ", i, (*list_order_it).queueType,
1293 (*list_order_it).oldestInst);
1294
1295 ++list_order_it;
1296 ++i;
1297 }
1298
1299 cprintf("\n");
1300}
1301
1302
1303template <class Impl>
1304void
1305InstructionQueue<Impl>::dumpInsts()
1306{
1307 for (int i = 0; i < numThreads; ++i) {
1308 int num = 0;
1309 int valid_num = 0;
1310 ListIt inst_list_it = instList[i].begin();
1311
1312 while (inst_list_it != instList[i].end())
1313 {
1314 cprintf("Instruction:%i\n",
1315 num);
1316 if (!(*inst_list_it)->isSquashed()) {
1317 if (!(*inst_list_it)->isIssued()) {
1318 ++valid_num;
1319 cprintf("Count:%i\n", valid_num);
1320 } else if ((*inst_list_it)->isMemRef() &&
1321 !(*inst_list_it)->memOpDone) {
1322 // Loads that have not been marked as executed
1323 // still count towards the total instructions.
1324 ++valid_num;
1325 cprintf("Count:%i\n", valid_num);
1326 }
1327 }
1328
1329 cprintf("PC:%#x\n[sn:%lli]\n[tid:%i]\n"
1330 "Issued:%i\nSquashed:%i\n",
1331 (*inst_list_it)->readPC(),
1332 (*inst_list_it)->seqNum,
1333 (*inst_list_it)->threadNumber,
1334 (*inst_list_it)->isIssued(),
1335 (*inst_list_it)->isSquashed());
1336
1337 if ((*inst_list_it)->isMemRef()) {
1338 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone);
1339 }
1340
1341 cprintf("\n");
1342
1343 inst_list_it++;
1344 ++num;
1345 }
1346 }
1347
1348 cprintf("Insts to Execute list:\n");
1349
1350 int num = 0;
1351 int valid_num = 0;
1352 ListIt inst_list_it = instsToExecute.begin();
1353
1354 while (inst_list_it != instsToExecute.end())
1355 {
1356 cprintf("Instruction:%i\n",
1357 num);
1358 if (!(*inst_list_it)->isSquashed()) {
1359 if (!(*inst_list_it)->isIssued()) {
1360 ++valid_num;
1361 cprintf("Count:%i\n", valid_num);
1362 } else if ((*inst_list_it)->isMemRef() &&
1363 !(*inst_list_it)->memOpDone) {
1364 // Loads that have not been marked as executed
1365 // still count towards the total instructions.
1366 ++valid_num;
1367 cprintf("Count:%i\n", valid_num);
1368 }
1369 }
1370
1371 cprintf("PC:%#x\n[sn:%lli]\n[tid:%i]\n"
1372 "Issued:%i\nSquashed:%i\n",
1373 (*inst_list_it)->readPC(),
1374 (*inst_list_it)->seqNum,
1375 (*inst_list_it)->threadNumber,
1376 (*inst_list_it)->isIssued(),
1377 (*inst_list_it)->isSquashed());
1378
1379 if ((*inst_list_it)->isMemRef()) {
1380 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone);
1381 }
1382
1383 cprintf("\n");
1384
1385 inst_list_it++;
1386 ++num;
1387 }
1388}
788 } else {
789 statFuBusy[op_class]++;
790 fuBusy[tid]++;
791 ++order_it;
792 }
793 }
794
795 numIssuedDist.sample(total_issued);
796 iqInstsIssued+= total_issued;
797
798 // If we issued any instructions, tell the CPU we had activity.
799 if (total_issued) {
800 cpu->activityThisCycle();
801 } else {
802 DPRINTF(IQ, "Not able to schedule any instructions.\n");
803 }
804}
805
806template <class Impl>
807void
808InstructionQueue<Impl>::scheduleNonSpec(const InstSeqNum &inst)
809{
810 DPRINTF(IQ, "Marking nonspeculative instruction [sn:%lli] as ready "
811 "to execute.\n", inst);
812
813 NonSpecMapIt inst_it = nonSpecInsts.find(inst);
814
815 assert(inst_it != nonSpecInsts.end());
816
817 unsigned tid = (*inst_it).second->threadNumber;
818
819 (*inst_it).second->setCanIssue();
820
821 if (!(*inst_it).second->isMemRef()) {
822 addIfReady((*inst_it).second);
823 } else {
824 memDepUnit[tid].nonSpecInstReady((*inst_it).second);
825 }
826
827 (*inst_it).second = NULL;
828
829 nonSpecInsts.erase(inst_it);
830}
831
832template <class Impl>
833void
834InstructionQueue<Impl>::commit(const InstSeqNum &inst, unsigned tid)
835{
836 DPRINTF(IQ, "[tid:%i]: Committing instructions older than [sn:%i]\n",
837 tid,inst);
838
839 ListIt iq_it = instList[tid].begin();
840
841 while (iq_it != instList[tid].end() &&
842 (*iq_it)->seqNum <= inst) {
843 ++iq_it;
844 instList[tid].pop_front();
845 }
846
847 assert(freeEntries == (numEntries - countInsts()));
848}
849
850template <class Impl>
851int
852InstructionQueue<Impl>::wakeDependents(DynInstPtr &completed_inst)
853{
854 int dependents = 0;
855
856 DPRINTF(IQ, "Waking dependents of completed instruction.\n");
857
858 assert(!completed_inst->isSquashed());
859
860 // Tell the memory dependence unit to wake any dependents on this
861 // instruction if it is a memory instruction. Also complete the memory
862 // instruction at this point since we know it executed without issues.
863 // @todo: Might want to rename "completeMemInst" to something that
864 // indicates that it won't need to be replayed, and call this
865 // earlier. Might not be a big deal.
866 if (completed_inst->isMemRef()) {
867 memDepUnit[completed_inst->threadNumber].wakeDependents(completed_inst);
868 completeMemInst(completed_inst);
869 } else if (completed_inst->isMemBarrier() ||
870 completed_inst->isWriteBarrier()) {
871 memDepUnit[completed_inst->threadNumber].completeBarrier(completed_inst);
872 }
873
874 for (int dest_reg_idx = 0;
875 dest_reg_idx < completed_inst->numDestRegs();
876 dest_reg_idx++)
877 {
878 PhysRegIndex dest_reg =
879 completed_inst->renamedDestRegIdx(dest_reg_idx);
880
881 // Special case of uniq or control registers. They are not
882 // handled by the IQ and thus have no dependency graph entry.
883 // @todo Figure out a cleaner way to handle this.
884 if (dest_reg >= numPhysRegs) {
885 continue;
886 }
887
888 DPRINTF(IQ, "Waking any dependents on register %i.\n",
889 (int) dest_reg);
890
891 //Go through the dependency chain, marking the registers as
892 //ready within the waiting instructions.
893 DynInstPtr dep_inst = dependGraph.pop(dest_reg);
894
895 while (dep_inst) {
896 DPRINTF(IQ, "Waking up a dependent instruction, PC%#x.\n",
897 dep_inst->readPC());
898
899 // Might want to give more information to the instruction
900 // so that it knows which of its source registers is
901 // ready. However that would mean that the dependency
902 // graph entries would need to hold the src_reg_idx.
903 dep_inst->markSrcRegReady();
904
905 addIfReady(dep_inst);
906
907 dep_inst = dependGraph.pop(dest_reg);
908
909 ++dependents;
910 }
911
912 // Reset the head node now that all of its dependents have
913 // been woken up.
914 assert(dependGraph.empty(dest_reg));
915 dependGraph.clearInst(dest_reg);
916
917 // Mark the scoreboard as having that register ready.
918 regScoreboard[dest_reg] = true;
919 }
920 return dependents;
921}
922
923template <class Impl>
924void
925InstructionQueue<Impl>::addReadyMemInst(DynInstPtr &ready_inst)
926{
927 OpClass op_class = ready_inst->opClass();
928
929 readyInsts[op_class].push(ready_inst);
930
931 // Will need to reorder the list if either a queue is not on the list,
932 // or it has an older instruction than last time.
933 if (!queueOnList[op_class]) {
934 addToOrderList(op_class);
935 } else if (readyInsts[op_class].top()->seqNum <
936 (*readyIt[op_class]).oldestInst) {
937 listOrder.erase(readyIt[op_class]);
938 addToOrderList(op_class);
939 }
940
941 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
942 "the ready list, PC %#x opclass:%i [sn:%lli].\n",
943 ready_inst->readPC(), op_class, ready_inst->seqNum);
944}
945
946template <class Impl>
947void
948InstructionQueue<Impl>::rescheduleMemInst(DynInstPtr &resched_inst)
949{
950 memDepUnit[resched_inst->threadNumber].reschedule(resched_inst);
951}
952
953template <class Impl>
954void
955InstructionQueue<Impl>::replayMemInst(DynInstPtr &replay_inst)
956{
957 memDepUnit[replay_inst->threadNumber].replay(replay_inst);
958}
959
960template <class Impl>
961void
962InstructionQueue<Impl>::completeMemInst(DynInstPtr &completed_inst)
963{
964 int tid = completed_inst->threadNumber;
965
966 DPRINTF(IQ, "Completing mem instruction PC:%#x [sn:%lli]\n",
967 completed_inst->readPC(), completed_inst->seqNum);
968
969 ++freeEntries;
970
971 completed_inst->memOpDone = true;
972
973 memDepUnit[tid].completed(completed_inst);
974
975 count[tid]--;
976}
977
978template <class Impl>
979void
980InstructionQueue<Impl>::violation(DynInstPtr &store,
981 DynInstPtr &faulting_load)
982{
983 memDepUnit[store->threadNumber].violation(store, faulting_load);
984}
985
986template <class Impl>
987void
988InstructionQueue<Impl>::squash(unsigned tid)
989{
990 DPRINTF(IQ, "[tid:%i]: Starting to squash instructions in "
991 "the IQ.\n", tid);
992
993 // Read instruction sequence number of last instruction out of the
994 // time buffer.
995 squashedSeqNum[tid] = fromCommit->commitInfo[tid].doneSeqNum;
996
997 // Call doSquash if there are insts in the IQ
998 if (count[tid] > 0) {
999 doSquash(tid);
1000 }
1001
1002 // Also tell the memory dependence unit to squash.
1003 memDepUnit[tid].squash(squashedSeqNum[tid], tid);
1004}
1005
1006template <class Impl>
1007void
1008InstructionQueue<Impl>::doSquash(unsigned tid)
1009{
1010 // Start at the tail.
1011 ListIt squash_it = instList[tid].end();
1012 --squash_it;
1013
1014 DPRINTF(IQ, "[tid:%i]: Squashing until sequence number %i!\n",
1015 tid, squashedSeqNum[tid]);
1016
1017 // Squash any instructions younger than the squashed sequence number
1018 // given.
1019 while (squash_it != instList[tid].end() &&
1020 (*squash_it)->seqNum > squashedSeqNum[tid]) {
1021
1022 DynInstPtr squashed_inst = (*squash_it);
1023
1024 // Only handle the instruction if it actually is in the IQ and
1025 // hasn't already been squashed in the IQ.
1026 if (squashed_inst->threadNumber != tid ||
1027 squashed_inst->isSquashedInIQ()) {
1028 --squash_it;
1029 continue;
1030 }
1031
1032 if (!squashed_inst->isIssued() ||
1033 (squashed_inst->isMemRef() &&
1034 !squashed_inst->memOpDone)) {
1035
1036 // Remove the instruction from the dependency list.
1037 if (!squashed_inst->isNonSpeculative() &&
1038 !squashed_inst->isStoreConditional() &&
1039 !squashed_inst->isMemBarrier() &&
1040 !squashed_inst->isWriteBarrier()) {
1041
1042 for (int src_reg_idx = 0;
1043 src_reg_idx < squashed_inst->numSrcRegs();
1044 src_reg_idx++)
1045 {
1046 PhysRegIndex src_reg =
1047 squashed_inst->renamedSrcRegIdx(src_reg_idx);
1048
1049 // Only remove it from the dependency graph if it
1050 // was placed there in the first place.
1051
1052 // Instead of doing a linked list traversal, we
1053 // can just remove these squashed instructions
1054 // either at issue time, or when the register is
1055 // overwritten. The only downside to this is it
1056 // leaves more room for error.
1057
1058 if (!squashed_inst->isReadySrcRegIdx(src_reg_idx) &&
1059 src_reg < numPhysRegs) {
1060 dependGraph.remove(src_reg, squashed_inst);
1061 }
1062
1063
1064 ++iqSquashedOperandsExamined;
1065 }
1066 } else {
1067 NonSpecMapIt ns_inst_it =
1068 nonSpecInsts.find(squashed_inst->seqNum);
1069 assert(ns_inst_it != nonSpecInsts.end());
1070
1071 (*ns_inst_it).second = NULL;
1072
1073 nonSpecInsts.erase(ns_inst_it);
1074
1075 ++iqSquashedNonSpecRemoved;
1076 }
1077
1078 // Might want to also clear out the head of the dependency graph.
1079
1080 // Mark it as squashed within the IQ.
1081 squashed_inst->setSquashedInIQ();
1082
1083 // @todo: Remove this hack where several statuses are set so the
1084 // inst will flow through the rest of the pipeline.
1085 squashed_inst->setIssued();
1086 squashed_inst->setCanCommit();
1087 squashed_inst->clearInIQ();
1088
1089 //Update Thread IQ Count
1090 count[squashed_inst->threadNumber]--;
1091
1092 ++freeEntries;
1093
1094 DPRINTF(IQ, "[tid:%i]: Instruction [sn:%lli] PC %#x "
1095 "squashed.\n",
1096 tid, squashed_inst->seqNum, squashed_inst->readPC());
1097 }
1098
1099 instList[tid].erase(squash_it--);
1100 ++iqSquashedInstsExamined;
1101 }
1102}
1103
1104template <class Impl>
1105bool
1106InstructionQueue<Impl>::addToDependents(DynInstPtr &new_inst)
1107{
1108 // Loop through the instruction's source registers, adding
1109 // them to the dependency list if they are not ready.
1110 int8_t total_src_regs = new_inst->numSrcRegs();
1111 bool return_val = false;
1112
1113 for (int src_reg_idx = 0;
1114 src_reg_idx < total_src_regs;
1115 src_reg_idx++)
1116 {
1117 // Only add it to the dependency graph if it's not ready.
1118 if (!new_inst->isReadySrcRegIdx(src_reg_idx)) {
1119 PhysRegIndex src_reg = new_inst->renamedSrcRegIdx(src_reg_idx);
1120
1121 // Check the IQ's scoreboard to make sure the register
1122 // hasn't become ready while the instruction was in flight
1123 // between stages. Only if it really isn't ready should
1124 // it be added to the dependency graph.
1125 if (src_reg >= numPhysRegs) {
1126 continue;
1127 } else if (regScoreboard[src_reg] == false) {
1128 DPRINTF(IQ, "Instruction PC %#x has src reg %i that "
1129 "is being added to the dependency chain.\n",
1130 new_inst->readPC(), src_reg);
1131
1132 dependGraph.insert(src_reg, new_inst);
1133
1134 // Change the return value to indicate that something
1135 // was added to the dependency graph.
1136 return_val = true;
1137 } else {
1138 DPRINTF(IQ, "Instruction PC %#x has src reg %i that "
1139 "became ready before it reached the IQ.\n",
1140 new_inst->readPC(), src_reg);
1141 // Mark a register ready within the instruction.
1142 new_inst->markSrcRegReady(src_reg_idx);
1143 }
1144 }
1145 }
1146
1147 return return_val;
1148}
1149
1150template <class Impl>
1151void
1152InstructionQueue<Impl>::addToProducers(DynInstPtr &new_inst)
1153{
1154 // Nothing really needs to be marked when an instruction becomes
1155 // the producer of a register's value, but for convenience a ptr
1156 // to the producing instruction will be placed in the head node of
1157 // the dependency links.
1158 int8_t total_dest_regs = new_inst->numDestRegs();
1159
1160 for (int dest_reg_idx = 0;
1161 dest_reg_idx < total_dest_regs;
1162 dest_reg_idx++)
1163 {
1164 PhysRegIndex dest_reg = new_inst->renamedDestRegIdx(dest_reg_idx);
1165
1166 // Instructions that use the misc regs will have a reg number
1167 // higher than the normal physical registers. In this case these
1168 // registers are not renamed, and there is no need to track
1169 // dependencies as these instructions must be executed at commit.
1170 if (dest_reg >= numPhysRegs) {
1171 continue;
1172 }
1173
1174 if (!dependGraph.empty(dest_reg)) {
1175 dependGraph.dump();
1176 panic("Dependency graph %i not empty!", dest_reg);
1177 }
1178
1179 dependGraph.setInst(dest_reg, new_inst);
1180
1181 // Mark the scoreboard to say it's not yet ready.
1182 regScoreboard[dest_reg] = false;
1183 }
1184}
1185
1186template <class Impl>
1187void
1188InstructionQueue<Impl>::addIfReady(DynInstPtr &inst)
1189{
1190 // If the instruction now has all of its source registers
1191 // available, then add it to the list of ready instructions.
1192 if (inst->readyToIssue()) {
1193
1194 //Add the instruction to the proper ready list.
1195 if (inst->isMemRef()) {
1196
1197 DPRINTF(IQ, "Checking if memory instruction can issue.\n");
1198
1199 // Message to the mem dependence unit that this instruction has
1200 // its registers ready.
1201 memDepUnit[inst->threadNumber].regsReady(inst);
1202
1203 return;
1204 }
1205
1206 OpClass op_class = inst->opClass();
1207
1208 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
1209 "the ready list, PC %#x opclass:%i [sn:%lli].\n",
1210 inst->readPC(), op_class, inst->seqNum);
1211
1212 readyInsts[op_class].push(inst);
1213
1214 // Will need to reorder the list if either a queue is not on the list,
1215 // or it has an older instruction than last time.
1216 if (!queueOnList[op_class]) {
1217 addToOrderList(op_class);
1218 } else if (readyInsts[op_class].top()->seqNum <
1219 (*readyIt[op_class]).oldestInst) {
1220 listOrder.erase(readyIt[op_class]);
1221 addToOrderList(op_class);
1222 }
1223 }
1224}
1225
1226template <class Impl>
1227int
1228InstructionQueue<Impl>::countInsts()
1229{
1230#if 0
1231 //ksewell:This works but definitely could use a cleaner write
1232 //with a more intuitive way of counting. Right now it's
1233 //just brute force ....
1234 // Change the #if if you want to use this method.
1235 int total_insts = 0;
1236
1237 for (int i = 0; i < numThreads; ++i) {
1238 ListIt count_it = instList[i].begin();
1239
1240 while (count_it != instList[i].end()) {
1241 if (!(*count_it)->isSquashed() && !(*count_it)->isSquashedInIQ()) {
1242 if (!(*count_it)->isIssued()) {
1243 ++total_insts;
1244 } else if ((*count_it)->isMemRef() &&
1245 !(*count_it)->memOpDone) {
1246 // Loads that have not been marked as executed still count
1247 // towards the total instructions.
1248 ++total_insts;
1249 }
1250 }
1251
1252 ++count_it;
1253 }
1254 }
1255
1256 return total_insts;
1257#else
1258 return numEntries - freeEntries;
1259#endif
1260}
1261
1262template <class Impl>
1263void
1264InstructionQueue<Impl>::dumpLists()
1265{
1266 for (int i = 0; i < Num_OpClasses; ++i) {
1267 cprintf("Ready list %i size: %i\n", i, readyInsts[i].size());
1268
1269 cprintf("\n");
1270 }
1271
1272 cprintf("Non speculative list size: %i\n", nonSpecInsts.size());
1273
1274 NonSpecMapIt non_spec_it = nonSpecInsts.begin();
1275 NonSpecMapIt non_spec_end_it = nonSpecInsts.end();
1276
1277 cprintf("Non speculative list: ");
1278
1279 while (non_spec_it != non_spec_end_it) {
1280 cprintf("%#x [sn:%lli]", (*non_spec_it).second->readPC(),
1281 (*non_spec_it).second->seqNum);
1282 ++non_spec_it;
1283 }
1284
1285 cprintf("\n");
1286
1287 ListOrderIt list_order_it = listOrder.begin();
1288 ListOrderIt list_order_end_it = listOrder.end();
1289 int i = 1;
1290
1291 cprintf("List order: ");
1292
1293 while (list_order_it != list_order_end_it) {
1294 cprintf("%i OpClass:%i [sn:%lli] ", i, (*list_order_it).queueType,
1295 (*list_order_it).oldestInst);
1296
1297 ++list_order_it;
1298 ++i;
1299 }
1300
1301 cprintf("\n");
1302}
1303
1304
1305template <class Impl>
1306void
1307InstructionQueue<Impl>::dumpInsts()
1308{
1309 for (int i = 0; i < numThreads; ++i) {
1310 int num = 0;
1311 int valid_num = 0;
1312 ListIt inst_list_it = instList[i].begin();
1313
1314 while (inst_list_it != instList[i].end())
1315 {
1316 cprintf("Instruction:%i\n",
1317 num);
1318 if (!(*inst_list_it)->isSquashed()) {
1319 if (!(*inst_list_it)->isIssued()) {
1320 ++valid_num;
1321 cprintf("Count:%i\n", valid_num);
1322 } else if ((*inst_list_it)->isMemRef() &&
1323 !(*inst_list_it)->memOpDone) {
1324 // Loads that have not been marked as executed
1325 // still count towards the total instructions.
1326 ++valid_num;
1327 cprintf("Count:%i\n", valid_num);
1328 }
1329 }
1330
1331 cprintf("PC:%#x\n[sn:%lli]\n[tid:%i]\n"
1332 "Issued:%i\nSquashed:%i\n",
1333 (*inst_list_it)->readPC(),
1334 (*inst_list_it)->seqNum,
1335 (*inst_list_it)->threadNumber,
1336 (*inst_list_it)->isIssued(),
1337 (*inst_list_it)->isSquashed());
1338
1339 if ((*inst_list_it)->isMemRef()) {
1340 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone);
1341 }
1342
1343 cprintf("\n");
1344
1345 inst_list_it++;
1346 ++num;
1347 }
1348 }
1349
1350 cprintf("Insts to Execute list:\n");
1351
1352 int num = 0;
1353 int valid_num = 0;
1354 ListIt inst_list_it = instsToExecute.begin();
1355
1356 while (inst_list_it != instsToExecute.end())
1357 {
1358 cprintf("Instruction:%i\n",
1359 num);
1360 if (!(*inst_list_it)->isSquashed()) {
1361 if (!(*inst_list_it)->isIssued()) {
1362 ++valid_num;
1363 cprintf("Count:%i\n", valid_num);
1364 } else if ((*inst_list_it)->isMemRef() &&
1365 !(*inst_list_it)->memOpDone) {
1366 // Loads that have not been marked as executed
1367 // still count towards the total instructions.
1368 ++valid_num;
1369 cprintf("Count:%i\n", valid_num);
1370 }
1371 }
1372
1373 cprintf("PC:%#x\n[sn:%lli]\n[tid:%i]\n"
1374 "Issued:%i\nSquashed:%i\n",
1375 (*inst_list_it)->readPC(),
1376 (*inst_list_it)->seqNum,
1377 (*inst_list_it)->threadNumber,
1378 (*inst_list_it)->isIssued(),
1379 (*inst_list_it)->isSquashed());
1380
1381 if ((*inst_list_it)->isMemRef()) {
1382 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone);
1383 }
1384
1385 cprintf("\n");
1386
1387 inst_list_it++;
1388 ++num;
1389 }
1390}