inst_queue_impl.hh (4033:7bb1223f9645) inst_queue_impl.hh (4167:ce5d0f62f13b)
1/*
2 * Copyright (c) 2004-2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Kevin Lim
29 * Korey Sewell
30 */
31
32#include <limits>
33#include <vector>
34
1/*
2 * Copyright (c) 2004-2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Kevin Lim
29 * Korey Sewell
30 */
31
32#include <limits>
33#include <vector>
34
35#include "sim/root.hh"
35#include "sim/core.hh"
36
37#include "cpu/o3/fu_pool.hh"
38#include "cpu/o3/inst_queue.hh"
39
40template <class Impl>
41InstructionQueue<Impl>::FUCompletion::FUCompletion(DynInstPtr &_inst,
42 int fu_idx,
43 InstructionQueue<Impl> *iq_ptr)
44 : Event(&mainEventQueue, Stat_Event_Pri),
45 inst(_inst), fuIdx(fu_idx), iqPtr(iq_ptr), freeFU(false)
46{
47 this->setFlags(Event::AutoDelete);
48}
49
50template <class Impl>
51void
52InstructionQueue<Impl>::FUCompletion::process()
53{
54 iqPtr->processFUCompletion(inst, freeFU ? fuIdx : -1);
55 inst = NULL;
56}
57
58
59template <class Impl>
60const char *
61InstructionQueue<Impl>::FUCompletion::description()
62{
63 return "Functional unit completion event";
64}
65
66template <class Impl>
67InstructionQueue<Impl>::InstructionQueue(Params *params)
68 : fuPool(params->fuPool),
69 numEntries(params->numIQEntries),
70 totalWidth(params->issueWidth),
71 numPhysIntRegs(params->numPhysIntRegs),
72 numPhysFloatRegs(params->numPhysFloatRegs),
73 commitToIEWDelay(params->commitToIEWDelay)
74{
75 assert(fuPool);
76
77 switchedOut = false;
78
79 numThreads = params->numberOfThreads;
80
81 // Set the number of physical registers as the number of int + float
82 numPhysRegs = numPhysIntRegs + numPhysFloatRegs;
83
84 DPRINTF(IQ, "There are %i physical registers.\n", numPhysRegs);
85
86 //Create an entry for each physical register within the
87 //dependency graph.
88 dependGraph.resize(numPhysRegs);
89
90 // Resize the register scoreboard.
91 regScoreboard.resize(numPhysRegs);
92
93 //Initialize Mem Dependence Units
94 for (int i = 0; i < numThreads; i++) {
95 memDepUnit[i].init(params,i);
96 memDepUnit[i].setIQ(this);
97 }
98
99 resetState();
100
101 std::string policy = params->smtIQPolicy;
102
103 //Convert string to lowercase
104 std::transform(policy.begin(), policy.end(), policy.begin(),
105 (int(*)(int)) tolower);
106
107 //Figure out resource sharing policy
108 if (policy == "dynamic") {
109 iqPolicy = Dynamic;
110
111 //Set Max Entries to Total ROB Capacity
112 for (int i = 0; i < numThreads; i++) {
113 maxEntries[i] = numEntries;
114 }
115
116 } else if (policy == "partitioned") {
117 iqPolicy = Partitioned;
118
119 //@todo:make work if part_amt doesnt divide evenly.
120 int part_amt = numEntries / numThreads;
121
122 //Divide ROB up evenly
123 for (int i = 0; i < numThreads; i++) {
124 maxEntries[i] = part_amt;
125 }
126
127 DPRINTF(IQ, "IQ sharing policy set to Partitioned:"
128 "%i entries per thread.\n",part_amt);
129
130 } else if (policy == "threshold") {
131 iqPolicy = Threshold;
132
133 double threshold = (double)params->smtIQThreshold / 100;
134
135 int thresholdIQ = (int)((double)threshold * numEntries);
136
137 //Divide up by threshold amount
138 for (int i = 0; i < numThreads; i++) {
139 maxEntries[i] = thresholdIQ;
140 }
141
142 DPRINTF(IQ, "IQ sharing policy set to Threshold:"
143 "%i entries per thread.\n",thresholdIQ);
144 } else {
145 assert(0 && "Invalid IQ Sharing Policy.Options Are:{Dynamic,"
146 "Partitioned, Threshold}");
147 }
148}
149
150template <class Impl>
151InstructionQueue<Impl>::~InstructionQueue()
152{
153 dependGraph.reset();
154#ifdef DEBUG
155 cprintf("Nodes traversed: %i, removed: %i\n",
156 dependGraph.nodesTraversed, dependGraph.nodesRemoved);
157#endif
158}
159
160template <class Impl>
161std::string
162InstructionQueue<Impl>::name() const
163{
164 return cpu->name() + ".iq";
165}
166
167template <class Impl>
168void
169InstructionQueue<Impl>::regStats()
170{
171 using namespace Stats;
172 iqInstsAdded
173 .name(name() + ".iqInstsAdded")
174 .desc("Number of instructions added to the IQ (excludes non-spec)")
175 .prereq(iqInstsAdded);
176
177 iqNonSpecInstsAdded
178 .name(name() + ".iqNonSpecInstsAdded")
179 .desc("Number of non-speculative instructions added to the IQ")
180 .prereq(iqNonSpecInstsAdded);
181
182 iqInstsIssued
183 .name(name() + ".iqInstsIssued")
184 .desc("Number of instructions issued")
185 .prereq(iqInstsIssued);
186
187 iqIntInstsIssued
188 .name(name() + ".iqIntInstsIssued")
189 .desc("Number of integer instructions issued")
190 .prereq(iqIntInstsIssued);
191
192 iqFloatInstsIssued
193 .name(name() + ".iqFloatInstsIssued")
194 .desc("Number of float instructions issued")
195 .prereq(iqFloatInstsIssued);
196
197 iqBranchInstsIssued
198 .name(name() + ".iqBranchInstsIssued")
199 .desc("Number of branch instructions issued")
200 .prereq(iqBranchInstsIssued);
201
202 iqMemInstsIssued
203 .name(name() + ".iqMemInstsIssued")
204 .desc("Number of memory instructions issued")
205 .prereq(iqMemInstsIssued);
206
207 iqMiscInstsIssued
208 .name(name() + ".iqMiscInstsIssued")
209 .desc("Number of miscellaneous instructions issued")
210 .prereq(iqMiscInstsIssued);
211
212 iqSquashedInstsIssued
213 .name(name() + ".iqSquashedInstsIssued")
214 .desc("Number of squashed instructions issued")
215 .prereq(iqSquashedInstsIssued);
216
217 iqSquashedInstsExamined
218 .name(name() + ".iqSquashedInstsExamined")
219 .desc("Number of squashed instructions iterated over during squash;"
220 " mainly for profiling")
221 .prereq(iqSquashedInstsExamined);
222
223 iqSquashedOperandsExamined
224 .name(name() + ".iqSquashedOperandsExamined")
225 .desc("Number of squashed operands that are examined and possibly "
226 "removed from graph")
227 .prereq(iqSquashedOperandsExamined);
228
229 iqSquashedNonSpecRemoved
230 .name(name() + ".iqSquashedNonSpecRemoved")
231 .desc("Number of squashed non-spec instructions that were removed")
232 .prereq(iqSquashedNonSpecRemoved);
233/*
234 queueResDist
235 .init(Num_OpClasses, 0, 99, 2)
236 .name(name() + ".IQ:residence:")
237 .desc("cycles from dispatch to issue")
238 .flags(total | pdf | cdf )
239 ;
240 for (int i = 0; i < Num_OpClasses; ++i) {
241 queueResDist.subname(i, opClassStrings[i]);
242 }
243*/
244 numIssuedDist
245 .init(0,totalWidth,1)
246 .name(name() + ".ISSUE:issued_per_cycle")
247 .desc("Number of insts issued each cycle")
248 .flags(pdf)
249 ;
250/*
251 dist_unissued
252 .init(Num_OpClasses+2)
253 .name(name() + ".ISSUE:unissued_cause")
254 .desc("Reason ready instruction not issued")
255 .flags(pdf | dist)
256 ;
257 for (int i=0; i < (Num_OpClasses + 2); ++i) {
258 dist_unissued.subname(i, unissued_names[i]);
259 }
260*/
261 statIssuedInstType
262 .init(numThreads,Num_OpClasses)
263 .name(name() + ".ISSUE:FU_type")
264 .desc("Type of FU issued")
265 .flags(total | pdf | dist)
266 ;
267 statIssuedInstType.ysubnames(opClassStrings);
268
269 //
270 // How long did instructions for a particular FU type wait prior to issue
271 //
272/*
273 issueDelayDist
274 .init(Num_OpClasses,0,99,2)
275 .name(name() + ".ISSUE:")
276 .desc("cycles from operands ready to issue")
277 .flags(pdf | cdf)
278 ;
279
280 for (int i=0; i<Num_OpClasses; ++i) {
281 std::stringstream subname;
282 subname << opClassStrings[i] << "_delay";
283 issueDelayDist.subname(i, subname.str());
284 }
285*/
286 issueRate
287 .name(name() + ".ISSUE:rate")
288 .desc("Inst issue rate")
289 .flags(total)
290 ;
291 issueRate = iqInstsIssued / cpu->numCycles;
292
293 statFuBusy
294 .init(Num_OpClasses)
295 .name(name() + ".ISSUE:fu_full")
296 .desc("attempts to use FU when none available")
297 .flags(pdf | dist)
298 ;
299 for (int i=0; i < Num_OpClasses; ++i) {
300 statFuBusy.subname(i, opClassStrings[i]);
301 }
302
303 fuBusy
304 .init(numThreads)
305 .name(name() + ".ISSUE:fu_busy_cnt")
306 .desc("FU busy when requested")
307 .flags(total)
308 ;
309
310 fuBusyRate
311 .name(name() + ".ISSUE:fu_busy_rate")
312 .desc("FU busy rate (busy events/executed inst)")
313 .flags(total)
314 ;
315 fuBusyRate = fuBusy / iqInstsIssued;
316
317 for ( int i=0; i < numThreads; i++) {
318 // Tell mem dependence unit to reg stats as well.
319 memDepUnit[i].regStats();
320 }
321}
322
323template <class Impl>
324void
325InstructionQueue<Impl>::resetState()
326{
327 //Initialize thread IQ counts
328 for (int i = 0; i <numThreads; i++) {
329 count[i] = 0;
330 instList[i].clear();
331 }
332
333 // Initialize the number of free IQ entries.
334 freeEntries = numEntries;
335
336 // Note that in actuality, the registers corresponding to the logical
337 // registers start off as ready. However this doesn't matter for the
338 // IQ as the instruction should have been correctly told if those
339 // registers are ready in rename. Thus it can all be initialized as
340 // unready.
341 for (int i = 0; i < numPhysRegs; ++i) {
342 regScoreboard[i] = false;
343 }
344
345 for (int i = 0; i < numThreads; ++i) {
346 squashedSeqNum[i] = 0;
347 }
348
349 for (int i = 0; i < Num_OpClasses; ++i) {
350 while (!readyInsts[i].empty())
351 readyInsts[i].pop();
352 queueOnList[i] = false;
353 readyIt[i] = listOrder.end();
354 }
355 nonSpecInsts.clear();
356 listOrder.clear();
357}
358
359template <class Impl>
360void
361InstructionQueue<Impl>::setActiveThreads(std::list<unsigned> *at_ptr)
362{
363 DPRINTF(IQ, "Setting active threads list pointer.\n");
364 activeThreads = at_ptr;
365}
366
367template <class Impl>
368void
369InstructionQueue<Impl>::setIssueToExecuteQueue(TimeBuffer<IssueStruct> *i2e_ptr)
370{
371 DPRINTF(IQ, "Set the issue to execute queue.\n");
372 issueToExecuteQueue = i2e_ptr;
373}
374
375template <class Impl>
376void
377InstructionQueue<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr)
378{
379 DPRINTF(IQ, "Set the time buffer.\n");
380 timeBuffer = tb_ptr;
381
382 fromCommit = timeBuffer->getWire(-commitToIEWDelay);
383}
384
385template <class Impl>
386void
387InstructionQueue<Impl>::switchOut()
388{
389/*
390 if (!instList[0].empty() || (numEntries != freeEntries) ||
391 !readyInsts[0].empty() || !nonSpecInsts.empty() || !listOrder.empty()) {
392 dumpInsts();
393// assert(0);
394 }
395*/
396 resetState();
397 dependGraph.reset();
398 instsToExecute.clear();
399 switchedOut = true;
400 for (int i = 0; i < numThreads; ++i) {
401 memDepUnit[i].switchOut();
402 }
403}
404
405template <class Impl>
406void
407InstructionQueue<Impl>::takeOverFrom()
408{
409 switchedOut = false;
410}
411
412template <class Impl>
413int
414InstructionQueue<Impl>::entryAmount(int num_threads)
415{
416 if (iqPolicy == Partitioned) {
417 return numEntries / num_threads;
418 } else {
419 return 0;
420 }
421}
422
423
424template <class Impl>
425void
426InstructionQueue<Impl>::resetEntries()
427{
428 if (iqPolicy != Dynamic || numThreads > 1) {
429 int active_threads = activeThreads->size();
430
431 std::list<unsigned>::iterator threads = activeThreads->begin();
432 std::list<unsigned>::iterator end = activeThreads->end();
433
434 while (threads != end) {
435 unsigned tid = *threads++;
436
437 if (iqPolicy == Partitioned) {
438 maxEntries[tid] = numEntries / active_threads;
439 } else if(iqPolicy == Threshold && active_threads == 1) {
440 maxEntries[tid] = numEntries;
441 }
442 }
443 }
444}
445
446template <class Impl>
447unsigned
448InstructionQueue<Impl>::numFreeEntries()
449{
450 return freeEntries;
451}
452
453template <class Impl>
454unsigned
455InstructionQueue<Impl>::numFreeEntries(unsigned tid)
456{
457 return maxEntries[tid] - count[tid];
458}
459
460// Might want to do something more complex if it knows how many instructions
461// will be issued this cycle.
462template <class Impl>
463bool
464InstructionQueue<Impl>::isFull()
465{
466 if (freeEntries == 0) {
467 return(true);
468 } else {
469 return(false);
470 }
471}
472
473template <class Impl>
474bool
475InstructionQueue<Impl>::isFull(unsigned tid)
476{
477 if (numFreeEntries(tid) == 0) {
478 return(true);
479 } else {
480 return(false);
481 }
482}
483
484template <class Impl>
485bool
486InstructionQueue<Impl>::hasReadyInsts()
487{
488 if (!listOrder.empty()) {
489 return true;
490 }
491
492 for (int i = 0; i < Num_OpClasses; ++i) {
493 if (!readyInsts[i].empty()) {
494 return true;
495 }
496 }
497
498 return false;
499}
500
501template <class Impl>
502void
503InstructionQueue<Impl>::insert(DynInstPtr &new_inst)
504{
505 // Make sure the instruction is valid
506 assert(new_inst);
507
508 DPRINTF(IQ, "Adding instruction [sn:%lli] PC %#x to the IQ.\n",
509 new_inst->seqNum, new_inst->readPC());
510
511 assert(freeEntries != 0);
512
513 instList[new_inst->threadNumber].push_back(new_inst);
514
515 --freeEntries;
516
517 new_inst->setInIQ();
518
519 // Look through its source registers (physical regs), and mark any
520 // dependencies.
521 addToDependents(new_inst);
522
523 // Have this instruction set itself as the producer of its destination
524 // register(s).
525 addToProducers(new_inst);
526
527 if (new_inst->isMemRef()) {
528 memDepUnit[new_inst->threadNumber].insert(new_inst);
529 } else {
530 addIfReady(new_inst);
531 }
532
533 ++iqInstsAdded;
534
535 count[new_inst->threadNumber]++;
536
537 assert(freeEntries == (numEntries - countInsts()));
538}
539
540template <class Impl>
541void
542InstructionQueue<Impl>::insertNonSpec(DynInstPtr &new_inst)
543{
544 // @todo: Clean up this code; can do it by setting inst as unable
545 // to issue, then calling normal insert on the inst.
546
547 assert(new_inst);
548
549 nonSpecInsts[new_inst->seqNum] = new_inst;
550
551 DPRINTF(IQ, "Adding non-speculative instruction [sn:%lli] PC %#x "
552 "to the IQ.\n",
553 new_inst->seqNum, new_inst->readPC());
554
555 assert(freeEntries != 0);
556
557 instList[new_inst->threadNumber].push_back(new_inst);
558
559 --freeEntries;
560
561 new_inst->setInIQ();
562
563 // Have this instruction set itself as the producer of its destination
564 // register(s).
565 addToProducers(new_inst);
566
567 // If it's a memory instruction, add it to the memory dependency
568 // unit.
569 if (new_inst->isMemRef()) {
570 memDepUnit[new_inst->threadNumber].insertNonSpec(new_inst);
571 }
572
573 ++iqNonSpecInstsAdded;
574
575 count[new_inst->threadNumber]++;
576
577 assert(freeEntries == (numEntries - countInsts()));
578}
579
580template <class Impl>
581void
582InstructionQueue<Impl>::insertBarrier(DynInstPtr &barr_inst)
583{
584 memDepUnit[barr_inst->threadNumber].insertBarrier(barr_inst);
585
586 insertNonSpec(barr_inst);
587}
588
589template <class Impl>
590typename Impl::DynInstPtr
591InstructionQueue<Impl>::getInstToExecute()
592{
593 assert(!instsToExecute.empty());
594 DynInstPtr inst = instsToExecute.front();
595 instsToExecute.pop_front();
596 return inst;
597}
598
599template <class Impl>
600void
601InstructionQueue<Impl>::addToOrderList(OpClass op_class)
602{
603 assert(!readyInsts[op_class].empty());
604
605 ListOrderEntry queue_entry;
606
607 queue_entry.queueType = op_class;
608
609 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
610
611 ListOrderIt list_it = listOrder.begin();
612 ListOrderIt list_end_it = listOrder.end();
613
614 while (list_it != list_end_it) {
615 if ((*list_it).oldestInst > queue_entry.oldestInst) {
616 break;
617 }
618
619 list_it++;
620 }
621
622 readyIt[op_class] = listOrder.insert(list_it, queue_entry);
623 queueOnList[op_class] = true;
624}
625
626template <class Impl>
627void
628InstructionQueue<Impl>::moveToYoungerInst(ListOrderIt list_order_it)
629{
630 // Get iterator of next item on the list
631 // Delete the original iterator
632 // Determine if the next item is either the end of the list or younger
633 // than the new instruction. If so, then add in a new iterator right here.
634 // If not, then move along.
635 ListOrderEntry queue_entry;
636 OpClass op_class = (*list_order_it).queueType;
637 ListOrderIt next_it = list_order_it;
638
639 ++next_it;
640
641 queue_entry.queueType = op_class;
642 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
643
644 while (next_it != listOrder.end() &&
645 (*next_it).oldestInst < queue_entry.oldestInst) {
646 ++next_it;
647 }
648
649 readyIt[op_class] = listOrder.insert(next_it, queue_entry);
650}
651
652template <class Impl>
653void
654InstructionQueue<Impl>::processFUCompletion(DynInstPtr &inst, int fu_idx)
655{
656 DPRINTF(IQ, "Processing FU completion [sn:%lli]\n", inst->seqNum);
657 // The CPU could have been sleeping until this op completed (*extremely*
658 // long latency op). Wake it if it was. This may be overkill.
659 if (isSwitchedOut()) {
660 DPRINTF(IQ, "FU completion not processed, IQ is switched out [sn:%lli]\n",
661 inst->seqNum);
662 return;
663 }
664
665 iewStage->wakeCPU();
666
667 if (fu_idx > -1)
668 fuPool->freeUnitNextCycle(fu_idx);
669
670 // @todo: Ensure that these FU Completions happen at the beginning
671 // of a cycle, otherwise they could add too many instructions to
672 // the queue.
673 issueToExecuteQueue->access(0)->size++;
674 instsToExecute.push_back(inst);
675}
676
677// @todo: Figure out a better way to remove the squashed items from the
678// lists. Checking the top item of each list to see if it's squashed
679// wastes time and forces jumps.
680template <class Impl>
681void
682InstructionQueue<Impl>::scheduleReadyInsts()
683{
684 DPRINTF(IQ, "Attempting to schedule ready instructions from "
685 "the IQ.\n");
686
687 IssueStruct *i2e_info = issueToExecuteQueue->access(0);
688
689 // Have iterator to head of the list
690 // While I haven't exceeded bandwidth or reached the end of the list,
691 // Try to get a FU that can do what this op needs.
692 // If successful, change the oldestInst to the new top of the list, put
693 // the queue in the proper place in the list.
694 // Increment the iterator.
695 // This will avoid trying to schedule a certain op class if there are no
696 // FUs that handle it.
697 ListOrderIt order_it = listOrder.begin();
698 ListOrderIt order_end_it = listOrder.end();
699 int total_issued = 0;
700
701 while (total_issued < totalWidth &&
702 iewStage->canIssue() &&
703 order_it != order_end_it) {
704 OpClass op_class = (*order_it).queueType;
705
706 assert(!readyInsts[op_class].empty());
707
708 DynInstPtr issuing_inst = readyInsts[op_class].top();
709
710 assert(issuing_inst->seqNum == (*order_it).oldestInst);
711
712 if (issuing_inst->isSquashed()) {
713 readyInsts[op_class].pop();
714
715 if (!readyInsts[op_class].empty()) {
716 moveToYoungerInst(order_it);
717 } else {
718 readyIt[op_class] = listOrder.end();
719 queueOnList[op_class] = false;
720 }
721
722 listOrder.erase(order_it++);
723
724 ++iqSquashedInstsIssued;
725
726 continue;
727 }
728
729 int idx = -2;
730 int op_latency = 1;
731 int tid = issuing_inst->threadNumber;
732
733 if (op_class != No_OpClass) {
734 idx = fuPool->getUnit(op_class);
735
736 if (idx > -1) {
737 op_latency = fuPool->getOpLatency(op_class);
738 }
739 }
740
741 // If we have an instruction that doesn't require a FU, or a
742 // valid FU, then schedule for execution.
743 if (idx == -2 || idx != -1) {
744 if (op_latency == 1) {
745 i2e_info->size++;
746 instsToExecute.push_back(issuing_inst);
747
748 // Add the FU onto the list of FU's to be freed next
749 // cycle if we used one.
750 if (idx >= 0)
751 fuPool->freeUnitNextCycle(idx);
752 } else {
753 int issue_latency = fuPool->getIssueLatency(op_class);
754 // Generate completion event for the FU
755 FUCompletion *execution = new FUCompletion(issuing_inst,
756 idx, this);
757
758 execution->schedule(curTick + cpu->cycles(issue_latency - 1));
759
760 // @todo: Enforce that issue_latency == 1 or op_latency
761 if (issue_latency > 1) {
762 // If FU isn't pipelined, then it must be freed
763 // upon the execution completing.
764 execution->setFreeFU();
765 } else {
766 // Add the FU onto the list of FU's to be freed next cycle.
767 fuPool->freeUnitNextCycle(idx);
768 }
769 }
770
771 DPRINTF(IQ, "Thread %i: Issuing instruction PC %#x "
772 "[sn:%lli]\n",
773 tid, issuing_inst->readPC(),
774 issuing_inst->seqNum);
775
776 readyInsts[op_class].pop();
777
778 if (!readyInsts[op_class].empty()) {
779 moveToYoungerInst(order_it);
780 } else {
781 readyIt[op_class] = listOrder.end();
782 queueOnList[op_class] = false;
783 }
784
785 issuing_inst->setIssued();
786 ++total_issued;
787
788 if (!issuing_inst->isMemRef()) {
789 // Memory instructions can not be freed from the IQ until they
790 // complete.
791 ++freeEntries;
792 count[tid]--;
793 issuing_inst->clearInIQ();
794 } else {
795 memDepUnit[tid].issue(issuing_inst);
796 }
797
798 listOrder.erase(order_it++);
799 statIssuedInstType[tid][op_class]++;
800 iewStage->incrWb(issuing_inst->seqNum);
801 } else {
802 statFuBusy[op_class]++;
803 fuBusy[tid]++;
804 ++order_it;
805 }
806 }
807
808 numIssuedDist.sample(total_issued);
809 iqInstsIssued+= total_issued;
810
811 // If we issued any instructions, tell the CPU we had activity.
812 if (total_issued) {
813 cpu->activityThisCycle();
814 } else {
815 DPRINTF(IQ, "Not able to schedule any instructions.\n");
816 }
817}
818
819template <class Impl>
820void
821InstructionQueue<Impl>::scheduleNonSpec(const InstSeqNum &inst)
822{
823 DPRINTF(IQ, "Marking nonspeculative instruction [sn:%lli] as ready "
824 "to execute.\n", inst);
825
826 NonSpecMapIt inst_it = nonSpecInsts.find(inst);
827
828 assert(inst_it != nonSpecInsts.end());
829
830 unsigned tid = (*inst_it).second->threadNumber;
831
36
37#include "cpu/o3/fu_pool.hh"
38#include "cpu/o3/inst_queue.hh"
39
40template <class Impl>
41InstructionQueue<Impl>::FUCompletion::FUCompletion(DynInstPtr &_inst,
42 int fu_idx,
43 InstructionQueue<Impl> *iq_ptr)
44 : Event(&mainEventQueue, Stat_Event_Pri),
45 inst(_inst), fuIdx(fu_idx), iqPtr(iq_ptr), freeFU(false)
46{
47 this->setFlags(Event::AutoDelete);
48}
49
50template <class Impl>
51void
52InstructionQueue<Impl>::FUCompletion::process()
53{
54 iqPtr->processFUCompletion(inst, freeFU ? fuIdx : -1);
55 inst = NULL;
56}
57
58
59template <class Impl>
60const char *
61InstructionQueue<Impl>::FUCompletion::description()
62{
63 return "Functional unit completion event";
64}
65
66template <class Impl>
67InstructionQueue<Impl>::InstructionQueue(Params *params)
68 : fuPool(params->fuPool),
69 numEntries(params->numIQEntries),
70 totalWidth(params->issueWidth),
71 numPhysIntRegs(params->numPhysIntRegs),
72 numPhysFloatRegs(params->numPhysFloatRegs),
73 commitToIEWDelay(params->commitToIEWDelay)
74{
75 assert(fuPool);
76
77 switchedOut = false;
78
79 numThreads = params->numberOfThreads;
80
81 // Set the number of physical registers as the number of int + float
82 numPhysRegs = numPhysIntRegs + numPhysFloatRegs;
83
84 DPRINTF(IQ, "There are %i physical registers.\n", numPhysRegs);
85
86 //Create an entry for each physical register within the
87 //dependency graph.
88 dependGraph.resize(numPhysRegs);
89
90 // Resize the register scoreboard.
91 regScoreboard.resize(numPhysRegs);
92
93 //Initialize Mem Dependence Units
94 for (int i = 0; i < numThreads; i++) {
95 memDepUnit[i].init(params,i);
96 memDepUnit[i].setIQ(this);
97 }
98
99 resetState();
100
101 std::string policy = params->smtIQPolicy;
102
103 //Convert string to lowercase
104 std::transform(policy.begin(), policy.end(), policy.begin(),
105 (int(*)(int)) tolower);
106
107 //Figure out resource sharing policy
108 if (policy == "dynamic") {
109 iqPolicy = Dynamic;
110
111 //Set Max Entries to Total ROB Capacity
112 for (int i = 0; i < numThreads; i++) {
113 maxEntries[i] = numEntries;
114 }
115
116 } else if (policy == "partitioned") {
117 iqPolicy = Partitioned;
118
119 //@todo:make work if part_amt doesnt divide evenly.
120 int part_amt = numEntries / numThreads;
121
122 //Divide ROB up evenly
123 for (int i = 0; i < numThreads; i++) {
124 maxEntries[i] = part_amt;
125 }
126
127 DPRINTF(IQ, "IQ sharing policy set to Partitioned:"
128 "%i entries per thread.\n",part_amt);
129
130 } else if (policy == "threshold") {
131 iqPolicy = Threshold;
132
133 double threshold = (double)params->smtIQThreshold / 100;
134
135 int thresholdIQ = (int)((double)threshold * numEntries);
136
137 //Divide up by threshold amount
138 for (int i = 0; i < numThreads; i++) {
139 maxEntries[i] = thresholdIQ;
140 }
141
142 DPRINTF(IQ, "IQ sharing policy set to Threshold:"
143 "%i entries per thread.\n",thresholdIQ);
144 } else {
145 assert(0 && "Invalid IQ Sharing Policy.Options Are:{Dynamic,"
146 "Partitioned, Threshold}");
147 }
148}
149
150template <class Impl>
151InstructionQueue<Impl>::~InstructionQueue()
152{
153 dependGraph.reset();
154#ifdef DEBUG
155 cprintf("Nodes traversed: %i, removed: %i\n",
156 dependGraph.nodesTraversed, dependGraph.nodesRemoved);
157#endif
158}
159
160template <class Impl>
161std::string
162InstructionQueue<Impl>::name() const
163{
164 return cpu->name() + ".iq";
165}
166
167template <class Impl>
168void
169InstructionQueue<Impl>::regStats()
170{
171 using namespace Stats;
172 iqInstsAdded
173 .name(name() + ".iqInstsAdded")
174 .desc("Number of instructions added to the IQ (excludes non-spec)")
175 .prereq(iqInstsAdded);
176
177 iqNonSpecInstsAdded
178 .name(name() + ".iqNonSpecInstsAdded")
179 .desc("Number of non-speculative instructions added to the IQ")
180 .prereq(iqNonSpecInstsAdded);
181
182 iqInstsIssued
183 .name(name() + ".iqInstsIssued")
184 .desc("Number of instructions issued")
185 .prereq(iqInstsIssued);
186
187 iqIntInstsIssued
188 .name(name() + ".iqIntInstsIssued")
189 .desc("Number of integer instructions issued")
190 .prereq(iqIntInstsIssued);
191
192 iqFloatInstsIssued
193 .name(name() + ".iqFloatInstsIssued")
194 .desc("Number of float instructions issued")
195 .prereq(iqFloatInstsIssued);
196
197 iqBranchInstsIssued
198 .name(name() + ".iqBranchInstsIssued")
199 .desc("Number of branch instructions issued")
200 .prereq(iqBranchInstsIssued);
201
202 iqMemInstsIssued
203 .name(name() + ".iqMemInstsIssued")
204 .desc("Number of memory instructions issued")
205 .prereq(iqMemInstsIssued);
206
207 iqMiscInstsIssued
208 .name(name() + ".iqMiscInstsIssued")
209 .desc("Number of miscellaneous instructions issued")
210 .prereq(iqMiscInstsIssued);
211
212 iqSquashedInstsIssued
213 .name(name() + ".iqSquashedInstsIssued")
214 .desc("Number of squashed instructions issued")
215 .prereq(iqSquashedInstsIssued);
216
217 iqSquashedInstsExamined
218 .name(name() + ".iqSquashedInstsExamined")
219 .desc("Number of squashed instructions iterated over during squash;"
220 " mainly for profiling")
221 .prereq(iqSquashedInstsExamined);
222
223 iqSquashedOperandsExamined
224 .name(name() + ".iqSquashedOperandsExamined")
225 .desc("Number of squashed operands that are examined and possibly "
226 "removed from graph")
227 .prereq(iqSquashedOperandsExamined);
228
229 iqSquashedNonSpecRemoved
230 .name(name() + ".iqSquashedNonSpecRemoved")
231 .desc("Number of squashed non-spec instructions that were removed")
232 .prereq(iqSquashedNonSpecRemoved);
233/*
234 queueResDist
235 .init(Num_OpClasses, 0, 99, 2)
236 .name(name() + ".IQ:residence:")
237 .desc("cycles from dispatch to issue")
238 .flags(total | pdf | cdf )
239 ;
240 for (int i = 0; i < Num_OpClasses; ++i) {
241 queueResDist.subname(i, opClassStrings[i]);
242 }
243*/
244 numIssuedDist
245 .init(0,totalWidth,1)
246 .name(name() + ".ISSUE:issued_per_cycle")
247 .desc("Number of insts issued each cycle")
248 .flags(pdf)
249 ;
250/*
251 dist_unissued
252 .init(Num_OpClasses+2)
253 .name(name() + ".ISSUE:unissued_cause")
254 .desc("Reason ready instruction not issued")
255 .flags(pdf | dist)
256 ;
257 for (int i=0; i < (Num_OpClasses + 2); ++i) {
258 dist_unissued.subname(i, unissued_names[i]);
259 }
260*/
261 statIssuedInstType
262 .init(numThreads,Num_OpClasses)
263 .name(name() + ".ISSUE:FU_type")
264 .desc("Type of FU issued")
265 .flags(total | pdf | dist)
266 ;
267 statIssuedInstType.ysubnames(opClassStrings);
268
269 //
270 // How long did instructions for a particular FU type wait prior to issue
271 //
272/*
273 issueDelayDist
274 .init(Num_OpClasses,0,99,2)
275 .name(name() + ".ISSUE:")
276 .desc("cycles from operands ready to issue")
277 .flags(pdf | cdf)
278 ;
279
280 for (int i=0; i<Num_OpClasses; ++i) {
281 std::stringstream subname;
282 subname << opClassStrings[i] << "_delay";
283 issueDelayDist.subname(i, subname.str());
284 }
285*/
286 issueRate
287 .name(name() + ".ISSUE:rate")
288 .desc("Inst issue rate")
289 .flags(total)
290 ;
291 issueRate = iqInstsIssued / cpu->numCycles;
292
293 statFuBusy
294 .init(Num_OpClasses)
295 .name(name() + ".ISSUE:fu_full")
296 .desc("attempts to use FU when none available")
297 .flags(pdf | dist)
298 ;
299 for (int i=0; i < Num_OpClasses; ++i) {
300 statFuBusy.subname(i, opClassStrings[i]);
301 }
302
303 fuBusy
304 .init(numThreads)
305 .name(name() + ".ISSUE:fu_busy_cnt")
306 .desc("FU busy when requested")
307 .flags(total)
308 ;
309
310 fuBusyRate
311 .name(name() + ".ISSUE:fu_busy_rate")
312 .desc("FU busy rate (busy events/executed inst)")
313 .flags(total)
314 ;
315 fuBusyRate = fuBusy / iqInstsIssued;
316
317 for ( int i=0; i < numThreads; i++) {
318 // Tell mem dependence unit to reg stats as well.
319 memDepUnit[i].regStats();
320 }
321}
322
323template <class Impl>
324void
325InstructionQueue<Impl>::resetState()
326{
327 //Initialize thread IQ counts
328 for (int i = 0; i <numThreads; i++) {
329 count[i] = 0;
330 instList[i].clear();
331 }
332
333 // Initialize the number of free IQ entries.
334 freeEntries = numEntries;
335
336 // Note that in actuality, the registers corresponding to the logical
337 // registers start off as ready. However this doesn't matter for the
338 // IQ as the instruction should have been correctly told if those
339 // registers are ready in rename. Thus it can all be initialized as
340 // unready.
341 for (int i = 0; i < numPhysRegs; ++i) {
342 regScoreboard[i] = false;
343 }
344
345 for (int i = 0; i < numThreads; ++i) {
346 squashedSeqNum[i] = 0;
347 }
348
349 for (int i = 0; i < Num_OpClasses; ++i) {
350 while (!readyInsts[i].empty())
351 readyInsts[i].pop();
352 queueOnList[i] = false;
353 readyIt[i] = listOrder.end();
354 }
355 nonSpecInsts.clear();
356 listOrder.clear();
357}
358
359template <class Impl>
360void
361InstructionQueue<Impl>::setActiveThreads(std::list<unsigned> *at_ptr)
362{
363 DPRINTF(IQ, "Setting active threads list pointer.\n");
364 activeThreads = at_ptr;
365}
366
367template <class Impl>
368void
369InstructionQueue<Impl>::setIssueToExecuteQueue(TimeBuffer<IssueStruct> *i2e_ptr)
370{
371 DPRINTF(IQ, "Set the issue to execute queue.\n");
372 issueToExecuteQueue = i2e_ptr;
373}
374
375template <class Impl>
376void
377InstructionQueue<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr)
378{
379 DPRINTF(IQ, "Set the time buffer.\n");
380 timeBuffer = tb_ptr;
381
382 fromCommit = timeBuffer->getWire(-commitToIEWDelay);
383}
384
385template <class Impl>
386void
387InstructionQueue<Impl>::switchOut()
388{
389/*
390 if (!instList[0].empty() || (numEntries != freeEntries) ||
391 !readyInsts[0].empty() || !nonSpecInsts.empty() || !listOrder.empty()) {
392 dumpInsts();
393// assert(0);
394 }
395*/
396 resetState();
397 dependGraph.reset();
398 instsToExecute.clear();
399 switchedOut = true;
400 for (int i = 0; i < numThreads; ++i) {
401 memDepUnit[i].switchOut();
402 }
403}
404
405template <class Impl>
406void
407InstructionQueue<Impl>::takeOverFrom()
408{
409 switchedOut = false;
410}
411
412template <class Impl>
413int
414InstructionQueue<Impl>::entryAmount(int num_threads)
415{
416 if (iqPolicy == Partitioned) {
417 return numEntries / num_threads;
418 } else {
419 return 0;
420 }
421}
422
423
424template <class Impl>
425void
426InstructionQueue<Impl>::resetEntries()
427{
428 if (iqPolicy != Dynamic || numThreads > 1) {
429 int active_threads = activeThreads->size();
430
431 std::list<unsigned>::iterator threads = activeThreads->begin();
432 std::list<unsigned>::iterator end = activeThreads->end();
433
434 while (threads != end) {
435 unsigned tid = *threads++;
436
437 if (iqPolicy == Partitioned) {
438 maxEntries[tid] = numEntries / active_threads;
439 } else if(iqPolicy == Threshold && active_threads == 1) {
440 maxEntries[tid] = numEntries;
441 }
442 }
443 }
444}
445
446template <class Impl>
447unsigned
448InstructionQueue<Impl>::numFreeEntries()
449{
450 return freeEntries;
451}
452
453template <class Impl>
454unsigned
455InstructionQueue<Impl>::numFreeEntries(unsigned tid)
456{
457 return maxEntries[tid] - count[tid];
458}
459
460// Might want to do something more complex if it knows how many instructions
461// will be issued this cycle.
462template <class Impl>
463bool
464InstructionQueue<Impl>::isFull()
465{
466 if (freeEntries == 0) {
467 return(true);
468 } else {
469 return(false);
470 }
471}
472
473template <class Impl>
474bool
475InstructionQueue<Impl>::isFull(unsigned tid)
476{
477 if (numFreeEntries(tid) == 0) {
478 return(true);
479 } else {
480 return(false);
481 }
482}
483
484template <class Impl>
485bool
486InstructionQueue<Impl>::hasReadyInsts()
487{
488 if (!listOrder.empty()) {
489 return true;
490 }
491
492 for (int i = 0; i < Num_OpClasses; ++i) {
493 if (!readyInsts[i].empty()) {
494 return true;
495 }
496 }
497
498 return false;
499}
500
501template <class Impl>
502void
503InstructionQueue<Impl>::insert(DynInstPtr &new_inst)
504{
505 // Make sure the instruction is valid
506 assert(new_inst);
507
508 DPRINTF(IQ, "Adding instruction [sn:%lli] PC %#x to the IQ.\n",
509 new_inst->seqNum, new_inst->readPC());
510
511 assert(freeEntries != 0);
512
513 instList[new_inst->threadNumber].push_back(new_inst);
514
515 --freeEntries;
516
517 new_inst->setInIQ();
518
519 // Look through its source registers (physical regs), and mark any
520 // dependencies.
521 addToDependents(new_inst);
522
523 // Have this instruction set itself as the producer of its destination
524 // register(s).
525 addToProducers(new_inst);
526
527 if (new_inst->isMemRef()) {
528 memDepUnit[new_inst->threadNumber].insert(new_inst);
529 } else {
530 addIfReady(new_inst);
531 }
532
533 ++iqInstsAdded;
534
535 count[new_inst->threadNumber]++;
536
537 assert(freeEntries == (numEntries - countInsts()));
538}
539
540template <class Impl>
541void
542InstructionQueue<Impl>::insertNonSpec(DynInstPtr &new_inst)
543{
544 // @todo: Clean up this code; can do it by setting inst as unable
545 // to issue, then calling normal insert on the inst.
546
547 assert(new_inst);
548
549 nonSpecInsts[new_inst->seqNum] = new_inst;
550
551 DPRINTF(IQ, "Adding non-speculative instruction [sn:%lli] PC %#x "
552 "to the IQ.\n",
553 new_inst->seqNum, new_inst->readPC());
554
555 assert(freeEntries != 0);
556
557 instList[new_inst->threadNumber].push_back(new_inst);
558
559 --freeEntries;
560
561 new_inst->setInIQ();
562
563 // Have this instruction set itself as the producer of its destination
564 // register(s).
565 addToProducers(new_inst);
566
567 // If it's a memory instruction, add it to the memory dependency
568 // unit.
569 if (new_inst->isMemRef()) {
570 memDepUnit[new_inst->threadNumber].insertNonSpec(new_inst);
571 }
572
573 ++iqNonSpecInstsAdded;
574
575 count[new_inst->threadNumber]++;
576
577 assert(freeEntries == (numEntries - countInsts()));
578}
579
580template <class Impl>
581void
582InstructionQueue<Impl>::insertBarrier(DynInstPtr &barr_inst)
583{
584 memDepUnit[barr_inst->threadNumber].insertBarrier(barr_inst);
585
586 insertNonSpec(barr_inst);
587}
588
589template <class Impl>
590typename Impl::DynInstPtr
591InstructionQueue<Impl>::getInstToExecute()
592{
593 assert(!instsToExecute.empty());
594 DynInstPtr inst = instsToExecute.front();
595 instsToExecute.pop_front();
596 return inst;
597}
598
599template <class Impl>
600void
601InstructionQueue<Impl>::addToOrderList(OpClass op_class)
602{
603 assert(!readyInsts[op_class].empty());
604
605 ListOrderEntry queue_entry;
606
607 queue_entry.queueType = op_class;
608
609 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
610
611 ListOrderIt list_it = listOrder.begin();
612 ListOrderIt list_end_it = listOrder.end();
613
614 while (list_it != list_end_it) {
615 if ((*list_it).oldestInst > queue_entry.oldestInst) {
616 break;
617 }
618
619 list_it++;
620 }
621
622 readyIt[op_class] = listOrder.insert(list_it, queue_entry);
623 queueOnList[op_class] = true;
624}
625
626template <class Impl>
627void
628InstructionQueue<Impl>::moveToYoungerInst(ListOrderIt list_order_it)
629{
630 // Get iterator of next item on the list
631 // Delete the original iterator
632 // Determine if the next item is either the end of the list or younger
633 // than the new instruction. If so, then add in a new iterator right here.
634 // If not, then move along.
635 ListOrderEntry queue_entry;
636 OpClass op_class = (*list_order_it).queueType;
637 ListOrderIt next_it = list_order_it;
638
639 ++next_it;
640
641 queue_entry.queueType = op_class;
642 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
643
644 while (next_it != listOrder.end() &&
645 (*next_it).oldestInst < queue_entry.oldestInst) {
646 ++next_it;
647 }
648
649 readyIt[op_class] = listOrder.insert(next_it, queue_entry);
650}
651
652template <class Impl>
653void
654InstructionQueue<Impl>::processFUCompletion(DynInstPtr &inst, int fu_idx)
655{
656 DPRINTF(IQ, "Processing FU completion [sn:%lli]\n", inst->seqNum);
657 // The CPU could have been sleeping until this op completed (*extremely*
658 // long latency op). Wake it if it was. This may be overkill.
659 if (isSwitchedOut()) {
660 DPRINTF(IQ, "FU completion not processed, IQ is switched out [sn:%lli]\n",
661 inst->seqNum);
662 return;
663 }
664
665 iewStage->wakeCPU();
666
667 if (fu_idx > -1)
668 fuPool->freeUnitNextCycle(fu_idx);
669
670 // @todo: Ensure that these FU Completions happen at the beginning
671 // of a cycle, otherwise they could add too many instructions to
672 // the queue.
673 issueToExecuteQueue->access(0)->size++;
674 instsToExecute.push_back(inst);
675}
676
677// @todo: Figure out a better way to remove the squashed items from the
678// lists. Checking the top item of each list to see if it's squashed
679// wastes time and forces jumps.
680template <class Impl>
681void
682InstructionQueue<Impl>::scheduleReadyInsts()
683{
684 DPRINTF(IQ, "Attempting to schedule ready instructions from "
685 "the IQ.\n");
686
687 IssueStruct *i2e_info = issueToExecuteQueue->access(0);
688
689 // Have iterator to head of the list
690 // While I haven't exceeded bandwidth or reached the end of the list,
691 // Try to get a FU that can do what this op needs.
692 // If successful, change the oldestInst to the new top of the list, put
693 // the queue in the proper place in the list.
694 // Increment the iterator.
695 // This will avoid trying to schedule a certain op class if there are no
696 // FUs that handle it.
697 ListOrderIt order_it = listOrder.begin();
698 ListOrderIt order_end_it = listOrder.end();
699 int total_issued = 0;
700
701 while (total_issued < totalWidth &&
702 iewStage->canIssue() &&
703 order_it != order_end_it) {
704 OpClass op_class = (*order_it).queueType;
705
706 assert(!readyInsts[op_class].empty());
707
708 DynInstPtr issuing_inst = readyInsts[op_class].top();
709
710 assert(issuing_inst->seqNum == (*order_it).oldestInst);
711
712 if (issuing_inst->isSquashed()) {
713 readyInsts[op_class].pop();
714
715 if (!readyInsts[op_class].empty()) {
716 moveToYoungerInst(order_it);
717 } else {
718 readyIt[op_class] = listOrder.end();
719 queueOnList[op_class] = false;
720 }
721
722 listOrder.erase(order_it++);
723
724 ++iqSquashedInstsIssued;
725
726 continue;
727 }
728
729 int idx = -2;
730 int op_latency = 1;
731 int tid = issuing_inst->threadNumber;
732
733 if (op_class != No_OpClass) {
734 idx = fuPool->getUnit(op_class);
735
736 if (idx > -1) {
737 op_latency = fuPool->getOpLatency(op_class);
738 }
739 }
740
741 // If we have an instruction that doesn't require a FU, or a
742 // valid FU, then schedule for execution.
743 if (idx == -2 || idx != -1) {
744 if (op_latency == 1) {
745 i2e_info->size++;
746 instsToExecute.push_back(issuing_inst);
747
748 // Add the FU onto the list of FU's to be freed next
749 // cycle if we used one.
750 if (idx >= 0)
751 fuPool->freeUnitNextCycle(idx);
752 } else {
753 int issue_latency = fuPool->getIssueLatency(op_class);
754 // Generate completion event for the FU
755 FUCompletion *execution = new FUCompletion(issuing_inst,
756 idx, this);
757
758 execution->schedule(curTick + cpu->cycles(issue_latency - 1));
759
760 // @todo: Enforce that issue_latency == 1 or op_latency
761 if (issue_latency > 1) {
762 // If FU isn't pipelined, then it must be freed
763 // upon the execution completing.
764 execution->setFreeFU();
765 } else {
766 // Add the FU onto the list of FU's to be freed next cycle.
767 fuPool->freeUnitNextCycle(idx);
768 }
769 }
770
771 DPRINTF(IQ, "Thread %i: Issuing instruction PC %#x "
772 "[sn:%lli]\n",
773 tid, issuing_inst->readPC(),
774 issuing_inst->seqNum);
775
776 readyInsts[op_class].pop();
777
778 if (!readyInsts[op_class].empty()) {
779 moveToYoungerInst(order_it);
780 } else {
781 readyIt[op_class] = listOrder.end();
782 queueOnList[op_class] = false;
783 }
784
785 issuing_inst->setIssued();
786 ++total_issued;
787
788 if (!issuing_inst->isMemRef()) {
789 // Memory instructions can not be freed from the IQ until they
790 // complete.
791 ++freeEntries;
792 count[tid]--;
793 issuing_inst->clearInIQ();
794 } else {
795 memDepUnit[tid].issue(issuing_inst);
796 }
797
798 listOrder.erase(order_it++);
799 statIssuedInstType[tid][op_class]++;
800 iewStage->incrWb(issuing_inst->seqNum);
801 } else {
802 statFuBusy[op_class]++;
803 fuBusy[tid]++;
804 ++order_it;
805 }
806 }
807
808 numIssuedDist.sample(total_issued);
809 iqInstsIssued+= total_issued;
810
811 // If we issued any instructions, tell the CPU we had activity.
812 if (total_issued) {
813 cpu->activityThisCycle();
814 } else {
815 DPRINTF(IQ, "Not able to schedule any instructions.\n");
816 }
817}
818
819template <class Impl>
820void
821InstructionQueue<Impl>::scheduleNonSpec(const InstSeqNum &inst)
822{
823 DPRINTF(IQ, "Marking nonspeculative instruction [sn:%lli] as ready "
824 "to execute.\n", inst);
825
826 NonSpecMapIt inst_it = nonSpecInsts.find(inst);
827
828 assert(inst_it != nonSpecInsts.end());
829
830 unsigned tid = (*inst_it).second->threadNumber;
831
832 (*inst_it).second->setAtCommit();
833
834 (*inst_it).second->setCanIssue();
835
836 if (!(*inst_it).second->isMemRef()) {
837 addIfReady((*inst_it).second);
838 } else {
839 memDepUnit[tid].nonSpecInstReady((*inst_it).second);
840 }
841
842 (*inst_it).second = NULL;
843
844 nonSpecInsts.erase(inst_it);
845}
846
847template <class Impl>
848void
849InstructionQueue<Impl>::commit(const InstSeqNum &inst, unsigned tid)
850{
851 DPRINTF(IQ, "[tid:%i]: Committing instructions older than [sn:%i]\n",
852 tid,inst);
853
854 ListIt iq_it = instList[tid].begin();
855
856 while (iq_it != instList[tid].end() &&
857 (*iq_it)->seqNum <= inst) {
858 ++iq_it;
859 instList[tid].pop_front();
860 }
861
862 assert(freeEntries == (numEntries - countInsts()));
863}
864
865template <class Impl>
866int
867InstructionQueue<Impl>::wakeDependents(DynInstPtr &completed_inst)
868{
869 int dependents = 0;
870
871 DPRINTF(IQ, "Waking dependents of completed instruction.\n");
872
873 assert(!completed_inst->isSquashed());
874
875 // Tell the memory dependence unit to wake any dependents on this
876 // instruction if it is a memory instruction. Also complete the memory
877 // instruction at this point since we know it executed without issues.
878 // @todo: Might want to rename "completeMemInst" to something that
879 // indicates that it won't need to be replayed, and call this
880 // earlier. Might not be a big deal.
881 if (completed_inst->isMemRef()) {
882 memDepUnit[completed_inst->threadNumber].wakeDependents(completed_inst);
883 completeMemInst(completed_inst);
884 } else if (completed_inst->isMemBarrier() ||
885 completed_inst->isWriteBarrier()) {
886 memDepUnit[completed_inst->threadNumber].completeBarrier(completed_inst);
887 }
888
889 for (int dest_reg_idx = 0;
890 dest_reg_idx < completed_inst->numDestRegs();
891 dest_reg_idx++)
892 {
893 PhysRegIndex dest_reg =
894 completed_inst->renamedDestRegIdx(dest_reg_idx);
895
896 // Special case of uniq or control registers. They are not
897 // handled by the IQ and thus have no dependency graph entry.
898 // @todo Figure out a cleaner way to handle this.
899 if (dest_reg >= numPhysRegs) {
900 continue;
901 }
902
903 DPRINTF(IQ, "Waking any dependents on register %i.\n",
904 (int) dest_reg);
905
906 //Go through the dependency chain, marking the registers as
907 //ready within the waiting instructions.
908 DynInstPtr dep_inst = dependGraph.pop(dest_reg);
909
910 while (dep_inst) {
911 DPRINTF(IQ, "Waking up a dependent instruction, PC%#x.\n",
912 dep_inst->readPC());
913
914 // Might want to give more information to the instruction
915 // so that it knows which of its source registers is
916 // ready. However that would mean that the dependency
917 // graph entries would need to hold the src_reg_idx.
918 dep_inst->markSrcRegReady();
919
920 addIfReady(dep_inst);
921
922 dep_inst = dependGraph.pop(dest_reg);
923
924 ++dependents;
925 }
926
927 // Reset the head node now that all of its dependents have
928 // been woken up.
929 assert(dependGraph.empty(dest_reg));
930 dependGraph.clearInst(dest_reg);
931
932 // Mark the scoreboard as having that register ready.
933 regScoreboard[dest_reg] = true;
934 }
935 return dependents;
936}
937
938template <class Impl>
939void
940InstructionQueue<Impl>::addReadyMemInst(DynInstPtr &ready_inst)
941{
942 OpClass op_class = ready_inst->opClass();
943
944 readyInsts[op_class].push(ready_inst);
945
946 // Will need to reorder the list if either a queue is not on the list,
947 // or it has an older instruction than last time.
948 if (!queueOnList[op_class]) {
949 addToOrderList(op_class);
950 } else if (readyInsts[op_class].top()->seqNum <
951 (*readyIt[op_class]).oldestInst) {
952 listOrder.erase(readyIt[op_class]);
953 addToOrderList(op_class);
954 }
955
956 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
957 "the ready list, PC %#x opclass:%i [sn:%lli].\n",
958 ready_inst->readPC(), op_class, ready_inst->seqNum);
959}
960
961template <class Impl>
962void
963InstructionQueue<Impl>::rescheduleMemInst(DynInstPtr &resched_inst)
964{
832 (*inst_it).second->setCanIssue();
833
834 if (!(*inst_it).second->isMemRef()) {
835 addIfReady((*inst_it).second);
836 } else {
837 memDepUnit[tid].nonSpecInstReady((*inst_it).second);
838 }
839
840 (*inst_it).second = NULL;
841
842 nonSpecInsts.erase(inst_it);
843}
844
845template <class Impl>
846void
847InstructionQueue<Impl>::commit(const InstSeqNum &inst, unsigned tid)
848{
849 DPRINTF(IQ, "[tid:%i]: Committing instructions older than [sn:%i]\n",
850 tid,inst);
851
852 ListIt iq_it = instList[tid].begin();
853
854 while (iq_it != instList[tid].end() &&
855 (*iq_it)->seqNum <= inst) {
856 ++iq_it;
857 instList[tid].pop_front();
858 }
859
860 assert(freeEntries == (numEntries - countInsts()));
861}
862
863template <class Impl>
864int
865InstructionQueue<Impl>::wakeDependents(DynInstPtr &completed_inst)
866{
867 int dependents = 0;
868
869 DPRINTF(IQ, "Waking dependents of completed instruction.\n");
870
871 assert(!completed_inst->isSquashed());
872
873 // Tell the memory dependence unit to wake any dependents on this
874 // instruction if it is a memory instruction. Also complete the memory
875 // instruction at this point since we know it executed without issues.
876 // @todo: Might want to rename "completeMemInst" to something that
877 // indicates that it won't need to be replayed, and call this
878 // earlier. Might not be a big deal.
879 if (completed_inst->isMemRef()) {
880 memDepUnit[completed_inst->threadNumber].wakeDependents(completed_inst);
881 completeMemInst(completed_inst);
882 } else if (completed_inst->isMemBarrier() ||
883 completed_inst->isWriteBarrier()) {
884 memDepUnit[completed_inst->threadNumber].completeBarrier(completed_inst);
885 }
886
887 for (int dest_reg_idx = 0;
888 dest_reg_idx < completed_inst->numDestRegs();
889 dest_reg_idx++)
890 {
891 PhysRegIndex dest_reg =
892 completed_inst->renamedDestRegIdx(dest_reg_idx);
893
894 // Special case of uniq or control registers. They are not
895 // handled by the IQ and thus have no dependency graph entry.
896 // @todo Figure out a cleaner way to handle this.
897 if (dest_reg >= numPhysRegs) {
898 continue;
899 }
900
901 DPRINTF(IQ, "Waking any dependents on register %i.\n",
902 (int) dest_reg);
903
904 //Go through the dependency chain, marking the registers as
905 //ready within the waiting instructions.
906 DynInstPtr dep_inst = dependGraph.pop(dest_reg);
907
908 while (dep_inst) {
909 DPRINTF(IQ, "Waking up a dependent instruction, PC%#x.\n",
910 dep_inst->readPC());
911
912 // Might want to give more information to the instruction
913 // so that it knows which of its source registers is
914 // ready. However that would mean that the dependency
915 // graph entries would need to hold the src_reg_idx.
916 dep_inst->markSrcRegReady();
917
918 addIfReady(dep_inst);
919
920 dep_inst = dependGraph.pop(dest_reg);
921
922 ++dependents;
923 }
924
925 // Reset the head node now that all of its dependents have
926 // been woken up.
927 assert(dependGraph.empty(dest_reg));
928 dependGraph.clearInst(dest_reg);
929
930 // Mark the scoreboard as having that register ready.
931 regScoreboard[dest_reg] = true;
932 }
933 return dependents;
934}
935
936template <class Impl>
937void
938InstructionQueue<Impl>::addReadyMemInst(DynInstPtr &ready_inst)
939{
940 OpClass op_class = ready_inst->opClass();
941
942 readyInsts[op_class].push(ready_inst);
943
944 // Will need to reorder the list if either a queue is not on the list,
945 // or it has an older instruction than last time.
946 if (!queueOnList[op_class]) {
947 addToOrderList(op_class);
948 } else if (readyInsts[op_class].top()->seqNum <
949 (*readyIt[op_class]).oldestInst) {
950 listOrder.erase(readyIt[op_class]);
951 addToOrderList(op_class);
952 }
953
954 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
955 "the ready list, PC %#x opclass:%i [sn:%lli].\n",
956 ready_inst->readPC(), op_class, ready_inst->seqNum);
957}
958
959template <class Impl>
960void
961InstructionQueue<Impl>::rescheduleMemInst(DynInstPtr &resched_inst)
962{
965 DPRINTF(IQ, "Rescheduling mem inst [sn:%lli]\n", resched_inst->seqNum);
966 resched_inst->clearCanIssue();
967 memDepUnit[resched_inst->threadNumber].reschedule(resched_inst);
968}
969
970template <class Impl>
971void
972InstructionQueue<Impl>::replayMemInst(DynInstPtr &replay_inst)
973{
974 memDepUnit[replay_inst->threadNumber].replay(replay_inst);
975}
976
977template <class Impl>
978void
979InstructionQueue<Impl>::completeMemInst(DynInstPtr &completed_inst)
980{
981 int tid = completed_inst->threadNumber;
982
983 DPRINTF(IQ, "Completing mem instruction PC:%#x [sn:%lli]\n",
984 completed_inst->readPC(), completed_inst->seqNum);
985
986 ++freeEntries;
987
988 completed_inst->memOpDone = true;
989
990 memDepUnit[tid].completed(completed_inst);
963 memDepUnit[resched_inst->threadNumber].reschedule(resched_inst);
964}
965
966template <class Impl>
967void
968InstructionQueue<Impl>::replayMemInst(DynInstPtr &replay_inst)
969{
970 memDepUnit[replay_inst->threadNumber].replay(replay_inst);
971}
972
973template <class Impl>
974void
975InstructionQueue<Impl>::completeMemInst(DynInstPtr &completed_inst)
976{
977 int tid = completed_inst->threadNumber;
978
979 DPRINTF(IQ, "Completing mem instruction PC:%#x [sn:%lli]\n",
980 completed_inst->readPC(), completed_inst->seqNum);
981
982 ++freeEntries;
983
984 completed_inst->memOpDone = true;
985
986 memDepUnit[tid].completed(completed_inst);
987
991 count[tid]--;
992}
993
994template <class Impl>
995void
996InstructionQueue<Impl>::violation(DynInstPtr &store,
997 DynInstPtr &faulting_load)
998{
999 memDepUnit[store->threadNumber].violation(store, faulting_load);
1000}
1001
1002template <class Impl>
1003void
1004InstructionQueue<Impl>::squash(unsigned tid)
1005{
1006 DPRINTF(IQ, "[tid:%i]: Starting to squash instructions in "
1007 "the IQ.\n", tid);
1008
1009 // Read instruction sequence number of last instruction out of the
1010 // time buffer.
1011#if ISA_HAS_DELAY_SLOT
1012 squashedSeqNum[tid] = fromCommit->commitInfo[tid].bdelayDoneSeqNum;
1013#else
1014 squashedSeqNum[tid] = fromCommit->commitInfo[tid].doneSeqNum;
1015#endif
1016
1017 // Call doSquash if there are insts in the IQ
1018 if (count[tid] > 0) {
1019 doSquash(tid);
1020 }
1021
1022 // Also tell the memory dependence unit to squash.
1023 memDepUnit[tid].squash(squashedSeqNum[tid], tid);
1024}
1025
1026template <class Impl>
1027void
1028InstructionQueue<Impl>::doSquash(unsigned tid)
1029{
1030 // Start at the tail.
1031 ListIt squash_it = instList[tid].end();
1032 --squash_it;
1033
1034 DPRINTF(IQ, "[tid:%i]: Squashing until sequence number %i!\n",
1035 tid, squashedSeqNum[tid]);
1036
1037 // Squash any instructions younger than the squashed sequence number
1038 // given.
1039 while (squash_it != instList[tid].end() &&
1040 (*squash_it)->seqNum > squashedSeqNum[tid]) {
1041
1042 DynInstPtr squashed_inst = (*squash_it);
1043
1044 // Only handle the instruction if it actually is in the IQ and
1045 // hasn't already been squashed in the IQ.
1046 if (squashed_inst->threadNumber != tid ||
1047 squashed_inst->isSquashedInIQ()) {
1048 --squash_it;
1049 continue;
1050 }
1051
1052 if (!squashed_inst->isIssued() ||
1053 (squashed_inst->isMemRef() &&
1054 !squashed_inst->memOpDone)) {
1055
1056 DPRINTF(IQ, "[tid:%i]: Instruction [sn:%lli] PC %#x "
1057 "squashed.\n",
1058 tid, squashed_inst->seqNum, squashed_inst->readPC());
1059
1060 // Remove the instruction from the dependency list.
1061 if (!squashed_inst->isNonSpeculative() &&
1062 !squashed_inst->isStoreConditional() &&
1063 !squashed_inst->isMemBarrier() &&
1064 !squashed_inst->isWriteBarrier()) {
1065
1066 for (int src_reg_idx = 0;
1067 src_reg_idx < squashed_inst->numSrcRegs();
1068 src_reg_idx++)
1069 {
1070 PhysRegIndex src_reg =
1071 squashed_inst->renamedSrcRegIdx(src_reg_idx);
1072
1073 // Only remove it from the dependency graph if it
1074 // was placed there in the first place.
1075
1076 // Instead of doing a linked list traversal, we
1077 // can just remove these squashed instructions
1078 // either at issue time, or when the register is
1079 // overwritten. The only downside to this is it
1080 // leaves more room for error.
1081
1082 if (!squashed_inst->isReadySrcRegIdx(src_reg_idx) &&
1083 src_reg < numPhysRegs) {
1084 dependGraph.remove(src_reg, squashed_inst);
1085 }
1086
1087
1088 ++iqSquashedOperandsExamined;
1089 }
988 count[tid]--;
989}
990
991template <class Impl>
992void
993InstructionQueue<Impl>::violation(DynInstPtr &store,
994 DynInstPtr &faulting_load)
995{
996 memDepUnit[store->threadNumber].violation(store, faulting_load);
997}
998
999template <class Impl>
1000void
1001InstructionQueue<Impl>::squash(unsigned tid)
1002{
1003 DPRINTF(IQ, "[tid:%i]: Starting to squash instructions in "
1004 "the IQ.\n", tid);
1005
1006 // Read instruction sequence number of last instruction out of the
1007 // time buffer.
1008#if ISA_HAS_DELAY_SLOT
1009 squashedSeqNum[tid] = fromCommit->commitInfo[tid].bdelayDoneSeqNum;
1010#else
1011 squashedSeqNum[tid] = fromCommit->commitInfo[tid].doneSeqNum;
1012#endif
1013
1014 // Call doSquash if there are insts in the IQ
1015 if (count[tid] > 0) {
1016 doSquash(tid);
1017 }
1018
1019 // Also tell the memory dependence unit to squash.
1020 memDepUnit[tid].squash(squashedSeqNum[tid], tid);
1021}
1022
1023template <class Impl>
1024void
1025InstructionQueue<Impl>::doSquash(unsigned tid)
1026{
1027 // Start at the tail.
1028 ListIt squash_it = instList[tid].end();
1029 --squash_it;
1030
1031 DPRINTF(IQ, "[tid:%i]: Squashing until sequence number %i!\n",
1032 tid, squashedSeqNum[tid]);
1033
1034 // Squash any instructions younger than the squashed sequence number
1035 // given.
1036 while (squash_it != instList[tid].end() &&
1037 (*squash_it)->seqNum > squashedSeqNum[tid]) {
1038
1039 DynInstPtr squashed_inst = (*squash_it);
1040
1041 // Only handle the instruction if it actually is in the IQ and
1042 // hasn't already been squashed in the IQ.
1043 if (squashed_inst->threadNumber != tid ||
1044 squashed_inst->isSquashedInIQ()) {
1045 --squash_it;
1046 continue;
1047 }
1048
1049 if (!squashed_inst->isIssued() ||
1050 (squashed_inst->isMemRef() &&
1051 !squashed_inst->memOpDone)) {
1052
1053 DPRINTF(IQ, "[tid:%i]: Instruction [sn:%lli] PC %#x "
1054 "squashed.\n",
1055 tid, squashed_inst->seqNum, squashed_inst->readPC());
1056
1057 // Remove the instruction from the dependency list.
1058 if (!squashed_inst->isNonSpeculative() &&
1059 !squashed_inst->isStoreConditional() &&
1060 !squashed_inst->isMemBarrier() &&
1061 !squashed_inst->isWriteBarrier()) {
1062
1063 for (int src_reg_idx = 0;
1064 src_reg_idx < squashed_inst->numSrcRegs();
1065 src_reg_idx++)
1066 {
1067 PhysRegIndex src_reg =
1068 squashed_inst->renamedSrcRegIdx(src_reg_idx);
1069
1070 // Only remove it from the dependency graph if it
1071 // was placed there in the first place.
1072
1073 // Instead of doing a linked list traversal, we
1074 // can just remove these squashed instructions
1075 // either at issue time, or when the register is
1076 // overwritten. The only downside to this is it
1077 // leaves more room for error.
1078
1079 if (!squashed_inst->isReadySrcRegIdx(src_reg_idx) &&
1080 src_reg < numPhysRegs) {
1081 dependGraph.remove(src_reg, squashed_inst);
1082 }
1083
1084
1085 ++iqSquashedOperandsExamined;
1086 }
1090 } else if (!squashed_inst->isStoreConditional() ||
1091 !squashed_inst->isCompleted()) {
1087 } else if (!squashed_inst->isStoreConditional() || !squashed_inst->isCompleted()) {
1092 NonSpecMapIt ns_inst_it =
1093 nonSpecInsts.find(squashed_inst->seqNum);
1094 assert(ns_inst_it != nonSpecInsts.end());
1088 NonSpecMapIt ns_inst_it =
1089 nonSpecInsts.find(squashed_inst->seqNum);
1090 assert(ns_inst_it != nonSpecInsts.end());
1095 if (ns_inst_it == nonSpecInsts.end()) {
1096 assert(squashed_inst->getFault() != NoFault);
1097 } else {
1098
1091
1099 (*ns_inst_it).second = NULL;
1092 (*ns_inst_it).second = NULL;
1100
1093
1101 nonSpecInsts.erase(ns_inst_it);
1094 nonSpecInsts.erase(ns_inst_it);
1102
1095
1103 ++iqSquashedNonSpecRemoved;
1104 }
1096 ++iqSquashedNonSpecRemoved;
1105 }
1106
1107 // Might want to also clear out the head of the dependency graph.
1108
1109 // Mark it as squashed within the IQ.
1110 squashed_inst->setSquashedInIQ();
1111
1112 // @todo: Remove this hack where several statuses are set so the
1113 // inst will flow through the rest of the pipeline.
1114 squashed_inst->setIssued();
1115 squashed_inst->setCanCommit();
1116 squashed_inst->clearInIQ();
1117
1118 //Update Thread IQ Count
1119 count[squashed_inst->threadNumber]--;
1120
1121 ++freeEntries;
1122 }
1123
1124 instList[tid].erase(squash_it--);
1125 ++iqSquashedInstsExamined;
1126 }
1127}
1128
1129template <class Impl>
1130bool
1131InstructionQueue<Impl>::addToDependents(DynInstPtr &new_inst)
1132{
1133 // Loop through the instruction's source registers, adding
1134 // them to the dependency list if they are not ready.
1135 int8_t total_src_regs = new_inst->numSrcRegs();
1136 bool return_val = false;
1137
1138 for (int src_reg_idx = 0;
1139 src_reg_idx < total_src_regs;
1140 src_reg_idx++)
1141 {
1142 // Only add it to the dependency graph if it's not ready.
1143 if (!new_inst->isReadySrcRegIdx(src_reg_idx)) {
1144 PhysRegIndex src_reg = new_inst->renamedSrcRegIdx(src_reg_idx);
1145
1146 // Check the IQ's scoreboard to make sure the register
1147 // hasn't become ready while the instruction was in flight
1148 // between stages. Only if it really isn't ready should
1149 // it be added to the dependency graph.
1150 if (src_reg >= numPhysRegs) {
1151 continue;
1152 } else if (regScoreboard[src_reg] == false) {
1153 DPRINTF(IQ, "Instruction PC %#x has src reg %i that "
1154 "is being added to the dependency chain.\n",
1155 new_inst->readPC(), src_reg);
1156
1157 dependGraph.insert(src_reg, new_inst);
1158
1159 // Change the return value to indicate that something
1160 // was added to the dependency graph.
1161 return_val = true;
1162 } else {
1163 DPRINTF(IQ, "Instruction PC %#x has src reg %i that "
1164 "became ready before it reached the IQ.\n",
1165 new_inst->readPC(), src_reg);
1166 // Mark a register ready within the instruction.
1167 new_inst->markSrcRegReady(src_reg_idx);
1168 }
1169 }
1170 }
1171
1172 return return_val;
1173}
1174
1175template <class Impl>
1176void
1177InstructionQueue<Impl>::addToProducers(DynInstPtr &new_inst)
1178{
1179 // Nothing really needs to be marked when an instruction becomes
1180 // the producer of a register's value, but for convenience a ptr
1181 // to the producing instruction will be placed in the head node of
1182 // the dependency links.
1183 int8_t total_dest_regs = new_inst->numDestRegs();
1184
1185 for (int dest_reg_idx = 0;
1186 dest_reg_idx < total_dest_regs;
1187 dest_reg_idx++)
1188 {
1189 PhysRegIndex dest_reg = new_inst->renamedDestRegIdx(dest_reg_idx);
1190
1191 // Instructions that use the misc regs will have a reg number
1192 // higher than the normal physical registers. In this case these
1193 // registers are not renamed, and there is no need to track
1194 // dependencies as these instructions must be executed at commit.
1195 if (dest_reg >= numPhysRegs) {
1196 continue;
1197 }
1198
1199 if (!dependGraph.empty(dest_reg)) {
1200 dependGraph.dump();
1201 panic("Dependency graph %i not empty!", dest_reg);
1202 }
1203
1204 dependGraph.setInst(dest_reg, new_inst);
1205
1206 // Mark the scoreboard to say it's not yet ready.
1207 regScoreboard[dest_reg] = false;
1208 }
1209}
1210
1211template <class Impl>
1212void
1213InstructionQueue<Impl>::addIfReady(DynInstPtr &inst)
1214{
1215 // If the instruction now has all of its source registers
1216 // available, then add it to the list of ready instructions.
1217 if (inst->readyToIssue()) {
1218
1219 //Add the instruction to the proper ready list.
1220 if (inst->isMemRef()) {
1221
1222 DPRINTF(IQ, "Checking if memory instruction can issue.\n");
1223
1224 // Message to the mem dependence unit that this instruction has
1225 // its registers ready.
1226 memDepUnit[inst->threadNumber].regsReady(inst);
1227
1228 return;
1229 }
1230
1231 OpClass op_class = inst->opClass();
1232
1233 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
1234 "the ready list, PC %#x opclass:%i [sn:%lli].\n",
1235 inst->readPC(), op_class, inst->seqNum);
1236
1237 readyInsts[op_class].push(inst);
1238
1239 // Will need to reorder the list if either a queue is not on the list,
1240 // or it has an older instruction than last time.
1241 if (!queueOnList[op_class]) {
1242 addToOrderList(op_class);
1243 } else if (readyInsts[op_class].top()->seqNum <
1244 (*readyIt[op_class]).oldestInst) {
1245 listOrder.erase(readyIt[op_class]);
1246 addToOrderList(op_class);
1247 }
1248 }
1249}
1250
1251template <class Impl>
1252int
1253InstructionQueue<Impl>::countInsts()
1254{
1255#if 0
1256 //ksewell:This works but definitely could use a cleaner write
1257 //with a more intuitive way of counting. Right now it's
1258 //just brute force ....
1259 // Change the #if if you want to use this method.
1260 int total_insts = 0;
1261
1262 for (int i = 0; i < numThreads; ++i) {
1263 ListIt count_it = instList[i].begin();
1264
1265 while (count_it != instList[i].end()) {
1266 if (!(*count_it)->isSquashed() && !(*count_it)->isSquashedInIQ()) {
1267 if (!(*count_it)->isIssued()) {
1268 ++total_insts;
1269 } else if ((*count_it)->isMemRef() &&
1270 !(*count_it)->memOpDone) {
1271 // Loads that have not been marked as executed still count
1272 // towards the total instructions.
1273 ++total_insts;
1274 }
1275 }
1276
1277 ++count_it;
1278 }
1279 }
1280
1281 return total_insts;
1282#else
1283 return numEntries - freeEntries;
1284#endif
1285}
1286
1287template <class Impl>
1288void
1289InstructionQueue<Impl>::dumpLists()
1290{
1291 for (int i = 0; i < Num_OpClasses; ++i) {
1292 cprintf("Ready list %i size: %i\n", i, readyInsts[i].size());
1293
1294 cprintf("\n");
1295 }
1296
1297 cprintf("Non speculative list size: %i\n", nonSpecInsts.size());
1298
1299 NonSpecMapIt non_spec_it = nonSpecInsts.begin();
1300 NonSpecMapIt non_spec_end_it = nonSpecInsts.end();
1301
1302 cprintf("Non speculative list: ");
1303
1304 while (non_spec_it != non_spec_end_it) {
1305 cprintf("%#x [sn:%lli]", (*non_spec_it).second->readPC(),
1306 (*non_spec_it).second->seqNum);
1307 ++non_spec_it;
1308 }
1309
1310 cprintf("\n");
1311
1312 ListOrderIt list_order_it = listOrder.begin();
1313 ListOrderIt list_order_end_it = listOrder.end();
1314 int i = 1;
1315
1316 cprintf("List order: ");
1317
1318 while (list_order_it != list_order_end_it) {
1319 cprintf("%i OpClass:%i [sn:%lli] ", i, (*list_order_it).queueType,
1320 (*list_order_it).oldestInst);
1321
1322 ++list_order_it;
1323 ++i;
1324 }
1325
1326 cprintf("\n");
1327}
1328
1329
1330template <class Impl>
1331void
1332InstructionQueue<Impl>::dumpInsts()
1333{
1334 for (int i = 0; i < numThreads; ++i) {
1335 int num = 0;
1336 int valid_num = 0;
1337 ListIt inst_list_it = instList[i].begin();
1338
1339 while (inst_list_it != instList[i].end())
1340 {
1341 cprintf("Instruction:%i\n",
1342 num);
1343 if (!(*inst_list_it)->isSquashed()) {
1344 if (!(*inst_list_it)->isIssued()) {
1345 ++valid_num;
1346 cprintf("Count:%i\n", valid_num);
1347 } else if ((*inst_list_it)->isMemRef() &&
1348 !(*inst_list_it)->memOpDone) {
1349 // Loads that have not been marked as executed
1350 // still count towards the total instructions.
1351 ++valid_num;
1352 cprintf("Count:%i\n", valid_num);
1353 }
1354 }
1355
1356 cprintf("PC:%#x\n[sn:%lli]\n[tid:%i]\n"
1357 "Issued:%i\nSquashed:%i\n",
1358 (*inst_list_it)->readPC(),
1359 (*inst_list_it)->seqNum,
1360 (*inst_list_it)->threadNumber,
1361 (*inst_list_it)->isIssued(),
1362 (*inst_list_it)->isSquashed());
1363
1364 if ((*inst_list_it)->isMemRef()) {
1365 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone);
1366 }
1367
1368 cprintf("\n");
1369
1370 inst_list_it++;
1371 ++num;
1372 }
1373 }
1374
1375 cprintf("Insts to Execute list:\n");
1376
1377 int num = 0;
1378 int valid_num = 0;
1379 ListIt inst_list_it = instsToExecute.begin();
1380
1381 while (inst_list_it != instsToExecute.end())
1382 {
1383 cprintf("Instruction:%i\n",
1384 num);
1385 if (!(*inst_list_it)->isSquashed()) {
1386 if (!(*inst_list_it)->isIssued()) {
1387 ++valid_num;
1388 cprintf("Count:%i\n", valid_num);
1389 } else if ((*inst_list_it)->isMemRef() &&
1390 !(*inst_list_it)->memOpDone) {
1391 // Loads that have not been marked as executed
1392 // still count towards the total instructions.
1393 ++valid_num;
1394 cprintf("Count:%i\n", valid_num);
1395 }
1396 }
1397
1398 cprintf("PC:%#x\n[sn:%lli]\n[tid:%i]\n"
1399 "Issued:%i\nSquashed:%i\n",
1400 (*inst_list_it)->readPC(),
1401 (*inst_list_it)->seqNum,
1402 (*inst_list_it)->threadNumber,
1403 (*inst_list_it)->isIssued(),
1404 (*inst_list_it)->isSquashed());
1405
1406 if ((*inst_list_it)->isMemRef()) {
1407 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone);
1408 }
1409
1410 cprintf("\n");
1411
1412 inst_list_it++;
1413 ++num;
1414 }
1415}
1097 }
1098
1099 // Might want to also clear out the head of the dependency graph.
1100
1101 // Mark it as squashed within the IQ.
1102 squashed_inst->setSquashedInIQ();
1103
1104 // @todo: Remove this hack where several statuses are set so the
1105 // inst will flow through the rest of the pipeline.
1106 squashed_inst->setIssued();
1107 squashed_inst->setCanCommit();
1108 squashed_inst->clearInIQ();
1109
1110 //Update Thread IQ Count
1111 count[squashed_inst->threadNumber]--;
1112
1113 ++freeEntries;
1114 }
1115
1116 instList[tid].erase(squash_it--);
1117 ++iqSquashedInstsExamined;
1118 }
1119}
1120
1121template <class Impl>
1122bool
1123InstructionQueue<Impl>::addToDependents(DynInstPtr &new_inst)
1124{
1125 // Loop through the instruction's source registers, adding
1126 // them to the dependency list if they are not ready.
1127 int8_t total_src_regs = new_inst->numSrcRegs();
1128 bool return_val = false;
1129
1130 for (int src_reg_idx = 0;
1131 src_reg_idx < total_src_regs;
1132 src_reg_idx++)
1133 {
1134 // Only add it to the dependency graph if it's not ready.
1135 if (!new_inst->isReadySrcRegIdx(src_reg_idx)) {
1136 PhysRegIndex src_reg = new_inst->renamedSrcRegIdx(src_reg_idx);
1137
1138 // Check the IQ's scoreboard to make sure the register
1139 // hasn't become ready while the instruction was in flight
1140 // between stages. Only if it really isn't ready should
1141 // it be added to the dependency graph.
1142 if (src_reg >= numPhysRegs) {
1143 continue;
1144 } else if (regScoreboard[src_reg] == false) {
1145 DPRINTF(IQ, "Instruction PC %#x has src reg %i that "
1146 "is being added to the dependency chain.\n",
1147 new_inst->readPC(), src_reg);
1148
1149 dependGraph.insert(src_reg, new_inst);
1150
1151 // Change the return value to indicate that something
1152 // was added to the dependency graph.
1153 return_val = true;
1154 } else {
1155 DPRINTF(IQ, "Instruction PC %#x has src reg %i that "
1156 "became ready before it reached the IQ.\n",
1157 new_inst->readPC(), src_reg);
1158 // Mark a register ready within the instruction.
1159 new_inst->markSrcRegReady(src_reg_idx);
1160 }
1161 }
1162 }
1163
1164 return return_val;
1165}
1166
1167template <class Impl>
1168void
1169InstructionQueue<Impl>::addToProducers(DynInstPtr &new_inst)
1170{
1171 // Nothing really needs to be marked when an instruction becomes
1172 // the producer of a register's value, but for convenience a ptr
1173 // to the producing instruction will be placed in the head node of
1174 // the dependency links.
1175 int8_t total_dest_regs = new_inst->numDestRegs();
1176
1177 for (int dest_reg_idx = 0;
1178 dest_reg_idx < total_dest_regs;
1179 dest_reg_idx++)
1180 {
1181 PhysRegIndex dest_reg = new_inst->renamedDestRegIdx(dest_reg_idx);
1182
1183 // Instructions that use the misc regs will have a reg number
1184 // higher than the normal physical registers. In this case these
1185 // registers are not renamed, and there is no need to track
1186 // dependencies as these instructions must be executed at commit.
1187 if (dest_reg >= numPhysRegs) {
1188 continue;
1189 }
1190
1191 if (!dependGraph.empty(dest_reg)) {
1192 dependGraph.dump();
1193 panic("Dependency graph %i not empty!", dest_reg);
1194 }
1195
1196 dependGraph.setInst(dest_reg, new_inst);
1197
1198 // Mark the scoreboard to say it's not yet ready.
1199 regScoreboard[dest_reg] = false;
1200 }
1201}
1202
1203template <class Impl>
1204void
1205InstructionQueue<Impl>::addIfReady(DynInstPtr &inst)
1206{
1207 // If the instruction now has all of its source registers
1208 // available, then add it to the list of ready instructions.
1209 if (inst->readyToIssue()) {
1210
1211 //Add the instruction to the proper ready list.
1212 if (inst->isMemRef()) {
1213
1214 DPRINTF(IQ, "Checking if memory instruction can issue.\n");
1215
1216 // Message to the mem dependence unit that this instruction has
1217 // its registers ready.
1218 memDepUnit[inst->threadNumber].regsReady(inst);
1219
1220 return;
1221 }
1222
1223 OpClass op_class = inst->opClass();
1224
1225 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
1226 "the ready list, PC %#x opclass:%i [sn:%lli].\n",
1227 inst->readPC(), op_class, inst->seqNum);
1228
1229 readyInsts[op_class].push(inst);
1230
1231 // Will need to reorder the list if either a queue is not on the list,
1232 // or it has an older instruction than last time.
1233 if (!queueOnList[op_class]) {
1234 addToOrderList(op_class);
1235 } else if (readyInsts[op_class].top()->seqNum <
1236 (*readyIt[op_class]).oldestInst) {
1237 listOrder.erase(readyIt[op_class]);
1238 addToOrderList(op_class);
1239 }
1240 }
1241}
1242
1243template <class Impl>
1244int
1245InstructionQueue<Impl>::countInsts()
1246{
1247#if 0
1248 //ksewell:This works but definitely could use a cleaner write
1249 //with a more intuitive way of counting. Right now it's
1250 //just brute force ....
1251 // Change the #if if you want to use this method.
1252 int total_insts = 0;
1253
1254 for (int i = 0; i < numThreads; ++i) {
1255 ListIt count_it = instList[i].begin();
1256
1257 while (count_it != instList[i].end()) {
1258 if (!(*count_it)->isSquashed() && !(*count_it)->isSquashedInIQ()) {
1259 if (!(*count_it)->isIssued()) {
1260 ++total_insts;
1261 } else if ((*count_it)->isMemRef() &&
1262 !(*count_it)->memOpDone) {
1263 // Loads that have not been marked as executed still count
1264 // towards the total instructions.
1265 ++total_insts;
1266 }
1267 }
1268
1269 ++count_it;
1270 }
1271 }
1272
1273 return total_insts;
1274#else
1275 return numEntries - freeEntries;
1276#endif
1277}
1278
1279template <class Impl>
1280void
1281InstructionQueue<Impl>::dumpLists()
1282{
1283 for (int i = 0; i < Num_OpClasses; ++i) {
1284 cprintf("Ready list %i size: %i\n", i, readyInsts[i].size());
1285
1286 cprintf("\n");
1287 }
1288
1289 cprintf("Non speculative list size: %i\n", nonSpecInsts.size());
1290
1291 NonSpecMapIt non_spec_it = nonSpecInsts.begin();
1292 NonSpecMapIt non_spec_end_it = nonSpecInsts.end();
1293
1294 cprintf("Non speculative list: ");
1295
1296 while (non_spec_it != non_spec_end_it) {
1297 cprintf("%#x [sn:%lli]", (*non_spec_it).second->readPC(),
1298 (*non_spec_it).second->seqNum);
1299 ++non_spec_it;
1300 }
1301
1302 cprintf("\n");
1303
1304 ListOrderIt list_order_it = listOrder.begin();
1305 ListOrderIt list_order_end_it = listOrder.end();
1306 int i = 1;
1307
1308 cprintf("List order: ");
1309
1310 while (list_order_it != list_order_end_it) {
1311 cprintf("%i OpClass:%i [sn:%lli] ", i, (*list_order_it).queueType,
1312 (*list_order_it).oldestInst);
1313
1314 ++list_order_it;
1315 ++i;
1316 }
1317
1318 cprintf("\n");
1319}
1320
1321
1322template <class Impl>
1323void
1324InstructionQueue<Impl>::dumpInsts()
1325{
1326 for (int i = 0; i < numThreads; ++i) {
1327 int num = 0;
1328 int valid_num = 0;
1329 ListIt inst_list_it = instList[i].begin();
1330
1331 while (inst_list_it != instList[i].end())
1332 {
1333 cprintf("Instruction:%i\n",
1334 num);
1335 if (!(*inst_list_it)->isSquashed()) {
1336 if (!(*inst_list_it)->isIssued()) {
1337 ++valid_num;
1338 cprintf("Count:%i\n", valid_num);
1339 } else if ((*inst_list_it)->isMemRef() &&
1340 !(*inst_list_it)->memOpDone) {
1341 // Loads that have not been marked as executed
1342 // still count towards the total instructions.
1343 ++valid_num;
1344 cprintf("Count:%i\n", valid_num);
1345 }
1346 }
1347
1348 cprintf("PC:%#x\n[sn:%lli]\n[tid:%i]\n"
1349 "Issued:%i\nSquashed:%i\n",
1350 (*inst_list_it)->readPC(),
1351 (*inst_list_it)->seqNum,
1352 (*inst_list_it)->threadNumber,
1353 (*inst_list_it)->isIssued(),
1354 (*inst_list_it)->isSquashed());
1355
1356 if ((*inst_list_it)->isMemRef()) {
1357 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone);
1358 }
1359
1360 cprintf("\n");
1361
1362 inst_list_it++;
1363 ++num;
1364 }
1365 }
1366
1367 cprintf("Insts to Execute list:\n");
1368
1369 int num = 0;
1370 int valid_num = 0;
1371 ListIt inst_list_it = instsToExecute.begin();
1372
1373 while (inst_list_it != instsToExecute.end())
1374 {
1375 cprintf("Instruction:%i\n",
1376 num);
1377 if (!(*inst_list_it)->isSquashed()) {
1378 if (!(*inst_list_it)->isIssued()) {
1379 ++valid_num;
1380 cprintf("Count:%i\n", valid_num);
1381 } else if ((*inst_list_it)->isMemRef() &&
1382 !(*inst_list_it)->memOpDone) {
1383 // Loads that have not been marked as executed
1384 // still count towards the total instructions.
1385 ++valid_num;
1386 cprintf("Count:%i\n", valid_num);
1387 }
1388 }
1389
1390 cprintf("PC:%#x\n[sn:%lli]\n[tid:%i]\n"
1391 "Issued:%i\nSquashed:%i\n",
1392 (*inst_list_it)->readPC(),
1393 (*inst_list_it)->seqNum,
1394 (*inst_list_it)->threadNumber,
1395 (*inst_list_it)->isIssued(),
1396 (*inst_list_it)->isSquashed());
1397
1398 if ((*inst_list_it)->isMemRef()) {
1399 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone);
1400 }
1401
1402 cprintf("\n");
1403
1404 inst_list_it++;
1405 ++num;
1406 }
1407}