Deleted Added
sdiff udiff text old ( 2665:a124942bacb8 ) new ( 2669:f2b336e89d2a )
full compact
1/*
2 * Copyright (c) 2004-2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the

--- 8 unchanged lines hidden (view full) ---

19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <limits>
30#include <vector>
31
32#include "sim/root.hh"
33
34#include "cpu/o3/fu_pool.hh"
35#include "cpu/o3/inst_queue.hh"
36
37using namespace std;
38
39template <class Impl>
40InstructionQueue<Impl>::FUCompletion::FUCompletion(DynInstPtr &_inst,
41 int fu_idx,
42 InstructionQueue<Impl> *iq_ptr)
43 : Event(&mainEventQueue, Stat_Event_Pri),
44 inst(_inst), fuIdx(fu_idx), iqPtr(iq_ptr), freeFU(false)
45{
46 this->setFlags(Event::AutoDelete);
47}
48
49template <class Impl>
50void
51InstructionQueue<Impl>::FUCompletion::process()
52{
53 iqPtr->processFUCompletion(inst, freeFU ? fuIdx : -1);
54 inst = NULL;
55}
56
57
58template <class Impl>
59const char *
60InstructionQueue<Impl>::FUCompletion::description()
61{
62 return "Functional unit completion event";
63}
64
65template <class Impl>
66InstructionQueue<Impl>::InstructionQueue(Params *params)
67 : fuPool(params->fuPool),
68 numEntries(params->numIQEntries),
69 totalWidth(params->issueWidth),
70 numPhysIntRegs(params->numPhysIntRegs),
71 numPhysFloatRegs(params->numPhysFloatRegs),
72 commitToIEWDelay(params->commitToIEWDelay)
73{
74 assert(fuPool);
75
76 switchedOut = false;
77
78 numThreads = params->numberOfThreads;
79
80 // Set the number of physical registers as the number of int + float
81 numPhysRegs = numPhysIntRegs + numPhysFloatRegs;
82
83 DPRINTF(IQ, "There are %i physical registers.\n", numPhysRegs);
84
85 //Create an entry for each physical register within the
86 //dependency graph.
87 dependGraph.resize(numPhysRegs);
88
89 // Resize the register scoreboard.
90 regScoreboard.resize(numPhysRegs);
91
92 //Initialize Mem Dependence Units
93 for (int i = 0; i < numThreads; i++) {
94 memDepUnit[i].init(params,i);
95 memDepUnit[i].setIQ(this);
96 }
97
98 resetState();
99
100 string policy = params->smtIQPolicy;
101
102 //Convert string to lowercase
103 std::transform(policy.begin(), policy.end(), policy.begin(),
104 (int(*)(int)) tolower);
105
106 //Figure out resource sharing policy
107 if (policy == "dynamic") {
108 iqPolicy = Dynamic;
109
110 //Set Max Entries to Total ROB Capacity
111 for (int i = 0; i < numThreads; i++) {
112 maxEntries[i] = numEntries;
113 }
114
115 } else if (policy == "partitioned") {
116 iqPolicy = Partitioned;
117
118 //@todo:make work if part_amt doesnt divide evenly.
119 int part_amt = numEntries / numThreads;
120
121 //Divide ROB up evenly
122 for (int i = 0; i < numThreads; i++) {
123 maxEntries[i] = part_amt;
124 }
125
126 DPRINTF(Fetch, "IQ sharing policy set to Partitioned:"
127 "%i entries per thread.\n",part_amt);
128
129 } else if (policy == "threshold") {
130 iqPolicy = Threshold;
131
132 double threshold = (double)params->smtIQThreshold / 100;
133
134 int thresholdIQ = (int)((double)threshold * numEntries);
135
136 //Divide up by threshold amount
137 for (int i = 0; i < numThreads; i++) {
138 maxEntries[i] = thresholdIQ;
139 }
140
141 DPRINTF(Fetch, "IQ sharing policy set to Threshold:"
142 "%i entries per thread.\n",thresholdIQ);
143 } else {
144 assert(0 && "Invalid IQ Sharing Policy.Options Are:{Dynamic,"
145 "Partitioned, Threshold}");
146 }
147}
148
149template <class Impl>
150InstructionQueue<Impl>::~InstructionQueue()
151{
152 dependGraph.reset();
153 cprintf("Nodes traversed: %i, removed: %i\n",
154 dependGraph.nodesTraversed, dependGraph.nodesRemoved);
155}
156
157template <class Impl>
158std::string
159InstructionQueue<Impl>::name() const
160{
161 return cpu->name() + ".iq";
162}
163
164template <class Impl>
165void
166InstructionQueue<Impl>::regStats()
167{
168 using namespace Stats;
169 iqInstsAdded
170 .name(name() + ".iqInstsAdded")
171 .desc("Number of instructions added to the IQ (excludes non-spec)")
172 .prereq(iqInstsAdded);
173
174 iqNonSpecInstsAdded
175 .name(name() + ".iqNonSpecInstsAdded")
176 .desc("Number of non-speculative instructions added to the IQ")
177 .prereq(iqNonSpecInstsAdded);
178
179 iqInstsIssued
180 .name(name() + ".iqInstsIssued")
181 .desc("Number of instructions issued")
182 .prereq(iqInstsIssued);
183
184 iqIntInstsIssued
185 .name(name() + ".iqIntInstsIssued")
186 .desc("Number of integer instructions issued")
187 .prereq(iqIntInstsIssued);
188
189 iqFloatInstsIssued
190 .name(name() + ".iqFloatInstsIssued")
191 .desc("Number of float instructions issued")
192 .prereq(iqFloatInstsIssued);
193
194 iqBranchInstsIssued
195 .name(name() + ".iqBranchInstsIssued")
196 .desc("Number of branch instructions issued")
197 .prereq(iqBranchInstsIssued);
198
199 iqMemInstsIssued
200 .name(name() + ".iqMemInstsIssued")
201 .desc("Number of memory instructions issued")
202 .prereq(iqMemInstsIssued);
203
204 iqMiscInstsIssued
205 .name(name() + ".iqMiscInstsIssued")
206 .desc("Number of miscellaneous instructions issued")
207 .prereq(iqMiscInstsIssued);
208
209 iqSquashedInstsIssued
210 .name(name() + ".iqSquashedInstsIssued")
211 .desc("Number of squashed instructions issued")
212 .prereq(iqSquashedInstsIssued);
213
214 iqSquashedInstsExamined
215 .name(name() + ".iqSquashedInstsExamined")
216 .desc("Number of squashed instructions iterated over during squash;"
217 " mainly for profiling")
218 .prereq(iqSquashedInstsExamined);
219
220 iqSquashedOperandsExamined
221 .name(name() + ".iqSquashedOperandsExamined")
222 .desc("Number of squashed operands that are examined and possibly "
223 "removed from graph")
224 .prereq(iqSquashedOperandsExamined);
225
226 iqSquashedNonSpecRemoved
227 .name(name() + ".iqSquashedNonSpecRemoved")
228 .desc("Number of squashed non-spec instructions that were removed")
229 .prereq(iqSquashedNonSpecRemoved);
230
231 queueResDist
232 .init(Num_OpClasses, 0, 99, 2)
233 .name(name() + ".IQ:residence:")
234 .desc("cycles from dispatch to issue")
235 .flags(total | pdf | cdf )
236 ;
237 for (int i = 0; i < Num_OpClasses; ++i) {
238 queueResDist.subname(i, opClassStrings[i]);
239 }
240 numIssuedDist
241 .init(0,totalWidth,1)
242 .name(name() + ".ISSUE:issued_per_cycle")
243 .desc("Number of insts issued each cycle")
244 .flags(pdf)
245 ;
246/*
247 dist_unissued
248 .init(Num_OpClasses+2)
249 .name(name() + ".ISSUE:unissued_cause")
250 .desc("Reason ready instruction not issued")
251 .flags(pdf | dist)
252 ;
253 for (int i=0; i < (Num_OpClasses + 2); ++i) {
254 dist_unissued.subname(i, unissued_names[i]);
255 }
256*/
257 statIssuedInstType
258 .init(numThreads,Num_OpClasses)
259 .name(name() + ".ISSUE:FU_type")
260 .desc("Type of FU issued")
261 .flags(total | pdf | dist)
262 ;
263 statIssuedInstType.ysubnames(opClassStrings);
264
265 //
266 // How long did instructions for a particular FU type wait prior to issue
267 //
268
269 issueDelayDist
270 .init(Num_OpClasses,0,99,2)
271 .name(name() + ".ISSUE:")
272 .desc("cycles from operands ready to issue")
273 .flags(pdf | cdf)
274 ;
275
276 for (int i=0; i<Num_OpClasses; ++i) {
277 stringstream subname;
278 subname << opClassStrings[i] << "_delay";
279 issueDelayDist.subname(i, subname.str());
280 }
281
282 issueRate
283 .name(name() + ".ISSUE:rate")
284 .desc("Inst issue rate")
285 .flags(total)
286 ;
287 issueRate = iqInstsIssued / cpu->numCycles;
288/*
289 issue_stores
290 .name(name() + ".ISSUE:stores")
291 .desc("Number of stores issued")
292 .flags(total)
293 ;
294 issue_stores = exe_refs - exe_loads;
295*/
296/*
297 issue_op_rate
298 .name(name() + ".ISSUE:op_rate")
299 .desc("Operation issue rate")
300 .flags(total)
301 ;
302 issue_op_rate = issued_ops / numCycles;
303*/
304 statFuBusy
305 .init(Num_OpClasses)
306 .name(name() + ".ISSUE:fu_full")
307 .desc("attempts to use FU when none available")
308 .flags(pdf | dist)
309 ;
310 for (int i=0; i < Num_OpClasses; ++i) {
311 statFuBusy.subname(i, opClassStrings[i]);
312 }
313
314 fuBusy
315 .init(numThreads)
316 .name(name() + ".ISSUE:fu_busy_cnt")
317 .desc("FU busy when requested")
318 .flags(total)
319 ;
320
321 fuBusyRate
322 .name(name() + ".ISSUE:fu_busy_rate")
323 .desc("FU busy rate (busy events/executed inst)")
324 .flags(total)
325 ;
326 fuBusyRate = fuBusy / iqInstsIssued;
327
328 for ( int i=0; i < numThreads; i++) {
329 // Tell mem dependence unit to reg stats as well.
330 memDepUnit[i].regStats();
331 }
332}
333
334template <class Impl>
335void
336InstructionQueue<Impl>::resetState()
337{
338 //Initialize thread IQ counts
339 for (int i = 0; i <numThreads; i++) {
340 count[i] = 0;
341 instList[i].clear();
342 }
343
344 // Initialize the number of free IQ entries.
345 freeEntries = numEntries;
346
347 // Note that in actuality, the registers corresponding to the logical
348 // registers start off as ready. However this doesn't matter for the
349 // IQ as the instruction should have been correctly told if those
350 // registers are ready in rename. Thus it can all be initialized as
351 // unready.
352 for (int i = 0; i < numPhysRegs; ++i) {
353 regScoreboard[i] = false;
354 }
355
356 for (int i = 0; i < numThreads; ++i) {
357 squashedSeqNum[i] = 0;
358 }
359
360 for (int i = 0; i < Num_OpClasses; ++i) {
361 while (!readyInsts[i].empty())
362 readyInsts[i].pop();
363 queueOnList[i] = false;
364 readyIt[i] = listOrder.end();
365 }
366 nonSpecInsts.clear();
367 listOrder.clear();
368}
369
370template <class Impl>
371void
372InstructionQueue<Impl>::setActiveThreads(list<unsigned> *at_ptr)
373{
374 DPRINTF(IQ, "Setting active threads list pointer.\n");
375 activeThreads = at_ptr;
376}
377
378template <class Impl>
379void
380InstructionQueue<Impl>::setIssueToExecuteQueue(TimeBuffer<IssueStruct> *i2e_ptr)
381{
382 DPRINTF(IQ, "Set the issue to execute queue.\n");
383 issueToExecuteQueue = i2e_ptr;
384}
385
386template <class Impl>
387void
388InstructionQueue<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr)
389{
390 DPRINTF(IQ, "Set the time buffer.\n");
391 timeBuffer = tb_ptr;
392
393 fromCommit = timeBuffer->getWire(-commitToIEWDelay);
394}
395
396template <class Impl>
397void
398InstructionQueue<Impl>::switchOut()
399{
400 resetState();
401 dependGraph.reset();
402 switchedOut = true;
403 for (int i = 0; i < numThreads; ++i) {
404 memDepUnit[i].switchOut();
405 }
406}
407
408template <class Impl>
409void
410InstructionQueue<Impl>::takeOverFrom()
411{
412 switchedOut = false;
413}
414
415template <class Impl>
416int
417InstructionQueue<Impl>::entryAmount(int num_threads)
418{
419 if (iqPolicy == Partitioned) {
420 return numEntries / num_threads;
421 } else {
422 return 0;
423 }
424}
425
426
427template <class Impl>
428void
429InstructionQueue<Impl>::resetEntries()
430{
431 if (iqPolicy != Dynamic || numThreads > 1) {
432 int active_threads = (*activeThreads).size();
433
434 list<unsigned>::iterator threads = (*activeThreads).begin();
435 list<unsigned>::iterator list_end = (*activeThreads).end();
436
437 while (threads != list_end) {
438 if (iqPolicy == Partitioned) {
439 maxEntries[*threads++] = numEntries / active_threads;
440 } else if(iqPolicy == Threshold && active_threads == 1) {
441 maxEntries[*threads++] = numEntries;
442 }
443 }
444 }
445}
446
447template <class Impl>
448unsigned
449InstructionQueue<Impl>::numFreeEntries()
450{
451 return freeEntries;
452}
453
454template <class Impl>
455unsigned
456InstructionQueue<Impl>::numFreeEntries(unsigned tid)
457{
458 return maxEntries[tid] - count[tid];
459}
460
461// Might want to do something more complex if it knows how many instructions
462// will be issued this cycle.
463template <class Impl>
464bool
465InstructionQueue<Impl>::isFull()
466{
467 if (freeEntries == 0) {
468 return(true);
469 } else {
470 return(false);
471 }
472}
473
474template <class Impl>
475bool
476InstructionQueue<Impl>::isFull(unsigned tid)
477{
478 if (numFreeEntries(tid) == 0) {
479 return(true);
480 } else {
481 return(false);
482 }
483}
484
485template <class Impl>
486bool
487InstructionQueue<Impl>::hasReadyInsts()
488{
489 if (!listOrder.empty()) {
490 return true;
491 }
492
493 for (int i = 0; i < Num_OpClasses; ++i) {
494 if (!readyInsts[i].empty()) {
495 return true;
496 }
497 }
498
499 return false;
500}
501
502template <class Impl>
503void
504InstructionQueue<Impl>::insert(DynInstPtr &new_inst)
505{
506 // Make sure the instruction is valid
507 assert(new_inst);
508
509 DPRINTF(IQ, "Adding instruction [sn:%lli] PC %#x to the IQ.\n",
510 new_inst->seqNum, new_inst->readPC());
511
512 assert(freeEntries != 0);
513
514 instList[new_inst->threadNumber].push_back(new_inst);
515
516 --freeEntries;
517
518 new_inst->setInIQ();
519
520 // Look through its source registers (physical regs), and mark any
521 // dependencies.
522 addToDependents(new_inst);
523
524 // Have this instruction set itself as the producer of its destination
525 // register(s).
526 addToProducers(new_inst);
527
528 if (new_inst->isMemRef()) {
529 memDepUnit[new_inst->threadNumber].insert(new_inst);
530 } else {
531 addIfReady(new_inst);
532 }
533
534 ++iqInstsAdded;
535
536 count[new_inst->threadNumber]++;
537
538 assert(freeEntries == (numEntries - countInsts()));
539}
540
541template <class Impl>
542void
543InstructionQueue<Impl>::insertNonSpec(DynInstPtr &new_inst)
544{
545 // @todo: Clean up this code; can do it by setting inst as unable
546 // to issue, then calling normal insert on the inst.
547
548 assert(new_inst);
549
550 nonSpecInsts[new_inst->seqNum] = new_inst;
551
552 DPRINTF(IQ, "Adding non-speculative instruction [sn:%lli] PC %#x "
553 "to the IQ.\n",
554 new_inst->seqNum, new_inst->readPC());
555
556 assert(freeEntries != 0);
557
558 instList[new_inst->threadNumber].push_back(new_inst);
559
560 --freeEntries;
561
562 new_inst->setInIQ();
563
564 // Have this instruction set itself as the producer of its destination
565 // register(s).
566 addToProducers(new_inst);
567
568 // If it's a memory instruction, add it to the memory dependency
569 // unit.
570 if (new_inst->isMemRef()) {
571 memDepUnit[new_inst->threadNumber].insertNonSpec(new_inst);
572 }
573
574 ++iqNonSpecInstsAdded;
575
576 count[new_inst->threadNumber]++;
577
578 assert(freeEntries == (numEntries - countInsts()));
579}
580
581template <class Impl>
582void
583InstructionQueue<Impl>::insertBarrier(DynInstPtr &barr_inst)
584{
585 memDepUnit[barr_inst->threadNumber].insertBarrier(barr_inst);
586
587 insertNonSpec(barr_inst);
588}
589
590template <class Impl>
591typename Impl::DynInstPtr
592InstructionQueue<Impl>::getInstToExecute()
593{
594 assert(!instsToExecute.empty());
595 DynInstPtr inst = instsToExecute.front();
596 instsToExecute.pop_front();
597 return inst;
598}
599
600template <class Impl>
601void
602InstructionQueue<Impl>::addToOrderList(OpClass op_class)
603{
604 assert(!readyInsts[op_class].empty());
605
606 ListOrderEntry queue_entry;
607
608 queue_entry.queueType = op_class;
609
610 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
611
612 ListOrderIt list_it = listOrder.begin();
613 ListOrderIt list_end_it = listOrder.end();
614
615 while (list_it != list_end_it) {
616 if ((*list_it).oldestInst > queue_entry.oldestInst) {
617 break;
618 }
619
620 list_it++;
621 }
622
623 readyIt[op_class] = listOrder.insert(list_it, queue_entry);
624 queueOnList[op_class] = true;
625}
626
627template <class Impl>
628void
629InstructionQueue<Impl>::moveToYoungerInst(ListOrderIt list_order_it)
630{
631 // Get iterator of next item on the list
632 // Delete the original iterator
633 // Determine if the next item is either the end of the list or younger
634 // than the new instruction. If so, then add in a new iterator right here.
635 // If not, then move along.
636 ListOrderEntry queue_entry;
637 OpClass op_class = (*list_order_it).queueType;
638 ListOrderIt next_it = list_order_it;
639
640 ++next_it;
641
642 queue_entry.queueType = op_class;
643 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
644
645 while (next_it != listOrder.end() &&
646 (*next_it).oldestInst < queue_entry.oldestInst) {
647 ++next_it;
648 }
649
650 readyIt[op_class] = listOrder.insert(next_it, queue_entry);
651}
652
653template <class Impl>
654void
655InstructionQueue<Impl>::processFUCompletion(DynInstPtr &inst, int fu_idx)
656{
657 // The CPU could have been sleeping until this op completed (*extremely*
658 // long latency op). Wake it if it was. This may be overkill.
659 if (isSwitchedOut()) {
660 return;
661 }
662
663 iewStage->wakeCPU();
664
665 if (fu_idx > -1)
666 fuPool->freeUnitNextCycle(fu_idx);
667
668 // @todo: Ensure that these FU Completions happen at the beginning
669 // of a cycle, otherwise they could add too many instructions to
670 // the queue.
671 // @todo: This could break if there's multiple multi-cycle ops
672 // finishing on this cycle. Maybe implement something like
673 // instToCommit in iew_impl.hh.
674 issueToExecuteQueue->access(0)->size++;
675 instsToExecute.push_back(inst);
676// int &size = issueToExecuteQueue->access(0)->size;
677
678// issueToExecuteQueue->access(0)->insts[size++] = inst;
679}
680
681// @todo: Figure out a better way to remove the squashed items from the
682// lists. Checking the top item of each list to see if it's squashed
683// wastes time and forces jumps.
684template <class Impl>
685void
686InstructionQueue<Impl>::scheduleReadyInsts()
687{
688 DPRINTF(IQ, "Attempting to schedule ready instructions from "
689 "the IQ.\n");
690
691 IssueStruct *i2e_info = issueToExecuteQueue->access(0);
692
693 // Have iterator to head of the list
694 // While I haven't exceeded bandwidth or reached the end of the list,
695 // Try to get a FU that can do what this op needs.
696 // If successful, change the oldestInst to the new top of the list, put
697 // the queue in the proper place in the list.
698 // Increment the iterator.
699 // This will avoid trying to schedule a certain op class if there are no
700 // FUs that handle it.
701 ListOrderIt order_it = listOrder.begin();
702 ListOrderIt order_end_it = listOrder.end();
703 int total_issued = 0;
704
705 while (total_issued < totalWidth &&
706 order_it != order_end_it) {
707 OpClass op_class = (*order_it).queueType;
708
709 assert(!readyInsts[op_class].empty());
710
711 DynInstPtr issuing_inst = readyInsts[op_class].top();
712
713 assert(issuing_inst->seqNum == (*order_it).oldestInst);
714
715 if (issuing_inst->isSquashed()) {
716 readyInsts[op_class].pop();
717
718 if (!readyInsts[op_class].empty()) {
719 moveToYoungerInst(order_it);
720 } else {
721 readyIt[op_class] = listOrder.end();
722 queueOnList[op_class] = false;
723 }
724
725 listOrder.erase(order_it++);
726
727 ++iqSquashedInstsIssued;
728
729 continue;
730 }
731
732 int idx = -2;
733 int op_latency = 1;
734 int tid = issuing_inst->threadNumber;
735
736 if (op_class != No_OpClass) {
737 idx = fuPool->getUnit(op_class);
738
739 if (idx > -1) {
740 op_latency = fuPool->getOpLatency(op_class);
741 }
742 }
743
744 if (idx == -2 || idx != -1) {
745 if (op_latency == 1) {
746// i2e_info->insts[exec_queue_slot++] = issuing_inst;
747 i2e_info->size++;
748 instsToExecute.push_back(issuing_inst);
749
750 // Add the FU onto the list of FU's to be freed next
751 // cycle if we used one.
752 if (idx >= 0)
753 fuPool->freeUnitNextCycle(idx);
754 } else {
755 int issue_latency = fuPool->getIssueLatency(op_class);
756 // Generate completion event for the FU
757 FUCompletion *execution = new FUCompletion(issuing_inst,
758 idx, this);
759
760 execution->schedule(curTick + cpu->cycles(issue_latency - 1));
761
762 // @todo: Enforce that issue_latency == 1 or op_latency
763 if (issue_latency > 1) {
764 execution->setFreeFU();
765 } else {
766 // @todo: Not sure I'm accounting for the
767 // multi-cycle op in a pipelined FU properly, or
768 // the number of instructions issued in one cycle.
769// i2e_info->insts[exec_queue_slot++] = issuing_inst;
770// i2e_info->size++;
771
772 // Add the FU onto the list of FU's to be freed next cycle.
773 fuPool->freeUnitNextCycle(idx);
774 }
775 }
776
777 DPRINTF(IQ, "Thread %i: Issuing instruction PC %#x "
778 "[sn:%lli]\n",
779 tid, issuing_inst->readPC(),
780 issuing_inst->seqNum);
781
782 readyInsts[op_class].pop();
783
784 if (!readyInsts[op_class].empty()) {
785 moveToYoungerInst(order_it);
786 } else {
787 readyIt[op_class] = listOrder.end();
788 queueOnList[op_class] = false;
789 }
790
791 issuing_inst->setIssued();
792 ++total_issued;
793
794 if (!issuing_inst->isMemRef()) {
795 // Memory instructions can not be freed from the IQ until they
796 // complete.
797 ++freeEntries;
798 count[tid]--;
799 issuing_inst->removeInIQ();
800 } else {
801 memDepUnit[tid].issue(issuing_inst);
802 }
803
804 listOrder.erase(order_it++);
805 statIssuedInstType[tid][op_class]++;
806 } else {
807 statFuBusy[op_class]++;
808 fuBusy[tid]++;
809 ++order_it;
810 }
811 }
812
813 numIssuedDist.sample(total_issued);
814 iqInstsIssued+= total_issued;
815
816 if (total_issued) {
817 cpu->activityThisCycle();
818 } else {
819 DPRINTF(IQ, "Not able to schedule any instructions.\n");
820 }
821}
822
823template <class Impl>
824void
825InstructionQueue<Impl>::scheduleNonSpec(const InstSeqNum &inst)
826{
827 DPRINTF(IQ, "Marking nonspeculative instruction [sn:%lli] as ready "
828 "to execute.\n", inst);
829
830 NonSpecMapIt inst_it = nonSpecInsts.find(inst);
831
832 assert(inst_it != nonSpecInsts.end());
833
834 unsigned tid = (*inst_it).second->threadNumber;
835
836 (*inst_it).second->setCanIssue();
837
838 if (!(*inst_it).second->isMemRef()) {
839 addIfReady((*inst_it).second);
840 } else {
841 memDepUnit[tid].nonSpecInstReady((*inst_it).second);
842 }
843
844 (*inst_it).second = NULL;
845
846 nonSpecInsts.erase(inst_it);
847}
848
849template <class Impl>
850void
851InstructionQueue<Impl>::commit(const InstSeqNum &inst, unsigned tid)
852{
853 DPRINTF(IQ, "[tid:%i]: Committing instructions older than [sn:%i]\n",
854 tid,inst);
855
856 ListIt iq_it = instList[tid].begin();
857
858 while (iq_it != instList[tid].end() &&
859 (*iq_it)->seqNum <= inst) {
860 ++iq_it;
861 instList[tid].pop_front();
862 }
863
864 assert(freeEntries == (numEntries - countInsts()));
865}
866
867template <class Impl>
868int
869InstructionQueue<Impl>::wakeDependents(DynInstPtr &completed_inst)
870{
871 int dependents = 0;
872
873 DPRINTF(IQ, "Waking dependents of completed instruction.\n");
874
875 assert(!completed_inst->isSquashed());
876
877 // Tell the memory dependence unit to wake any dependents on this
878 // instruction if it is a memory instruction. Also complete the memory
879 // instruction at this point since we know it executed without issues.
880 // @todo: Might want to rename "completeMemInst" to something that
881 // indicates that it won't need to be replayed, and call this
882 // earlier. Might not be a big deal.
883 if (completed_inst->isMemRef()) {
884 memDepUnit[completed_inst->threadNumber].wakeDependents(completed_inst);
885 completeMemInst(completed_inst);
886 } else if (completed_inst->isMemBarrier() ||
887 completed_inst->isWriteBarrier()) {
888 memDepUnit[completed_inst->threadNumber].completeBarrier(completed_inst);
889 }
890
891 for (int dest_reg_idx = 0;
892 dest_reg_idx < completed_inst->numDestRegs();
893 dest_reg_idx++)
894 {
895 PhysRegIndex dest_reg =
896 completed_inst->renamedDestRegIdx(dest_reg_idx);
897
898 // Special case of uniq or control registers. They are not
899 // handled by the IQ and thus have no dependency graph entry.
900 // @todo Figure out a cleaner way to handle this.
901 if (dest_reg >= numPhysRegs) {
902 continue;
903 }
904
905 DPRINTF(IQ, "Waking any dependents on register %i.\n",
906 (int) dest_reg);
907
908 //Go through the dependency chain, marking the registers as
909 //ready within the waiting instructions.
910 DynInstPtr dep_inst = dependGraph.pop(dest_reg);
911
912 while (dep_inst) {
913 DPRINTF(IQ, "Waking up a dependent instruction, PC%#x.\n",
914 dep_inst->readPC());
915
916 // Might want to give more information to the instruction
917 // so that it knows which of its source registers is
918 // ready. However that would mean that the dependency
919 // graph entries would need to hold the src_reg_idx.
920 dep_inst->markSrcRegReady();
921
922 addIfReady(dep_inst);
923
924 dep_inst = dependGraph.pop(dest_reg);
925
926 ++dependents;
927 }
928
929 // Reset the head node now that all of its dependents have
930 // been woken up.
931 assert(dependGraph.empty(dest_reg));
932 dependGraph.clearInst(dest_reg);
933
934 // Mark the scoreboard as having that register ready.
935 regScoreboard[dest_reg] = true;
936 }
937 return dependents;
938}
939
940template <class Impl>
941void
942InstructionQueue<Impl>::addReadyMemInst(DynInstPtr &ready_inst)
943{
944 OpClass op_class = ready_inst->opClass();
945
946 readyInsts[op_class].push(ready_inst);
947
948 // Will need to reorder the list if either a queue is not on the list,
949 // or it has an older instruction than last time.
950 if (!queueOnList[op_class]) {
951 addToOrderList(op_class);
952 } else if (readyInsts[op_class].top()->seqNum <
953 (*readyIt[op_class]).oldestInst) {
954 listOrder.erase(readyIt[op_class]);
955 addToOrderList(op_class);
956 }
957
958 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
959 "the ready list, PC %#x opclass:%i [sn:%lli].\n",
960 ready_inst->readPC(), op_class, ready_inst->seqNum);
961}
962
963template <class Impl>
964void
965InstructionQueue<Impl>::rescheduleMemInst(DynInstPtr &resched_inst)
966{
967 memDepUnit[resched_inst->threadNumber].reschedule(resched_inst);
968}
969
970template <class Impl>
971void
972InstructionQueue<Impl>::replayMemInst(DynInstPtr &replay_inst)
973{
974 memDepUnit[replay_inst->threadNumber].replay(replay_inst);
975}
976
977template <class Impl>
978void
979InstructionQueue<Impl>::completeMemInst(DynInstPtr &completed_inst)
980{
981 int tid = completed_inst->threadNumber;
982
983 DPRINTF(IQ, "Completing mem instruction PC:%#x [sn:%lli]\n",
984 completed_inst->readPC(), completed_inst->seqNum);
985
986 ++freeEntries;
987
988 completed_inst->memOpDone = true;
989
990 memDepUnit[tid].completed(completed_inst);
991
992 count[tid]--;
993}
994
995template <class Impl>
996void
997InstructionQueue<Impl>::violation(DynInstPtr &store,
998 DynInstPtr &faulting_load)
999{
1000 memDepUnit[store->threadNumber].violation(store, faulting_load);
1001}
1002
1003template <class Impl>
1004void
1005InstructionQueue<Impl>::squash(unsigned tid)
1006{
1007 DPRINTF(IQ, "[tid:%i]: Starting to squash instructions in "
1008 "the IQ.\n", tid);
1009
1010 // Read instruction sequence number of last instruction out of the
1011 // time buffer.
1012 squashedSeqNum[tid] = fromCommit->commitInfo[tid].doneSeqNum;
1013
1014 // Call doSquash if there are insts in the IQ
1015 if (count[tid] > 0) {
1016 doSquash(tid);
1017 }
1018
1019 // Also tell the memory dependence unit to squash.
1020 memDepUnit[tid].squash(squashedSeqNum[tid], tid);
1021}
1022
1023template <class Impl>
1024void
1025InstructionQueue<Impl>::doSquash(unsigned tid)
1026{
1027 // Start at the tail.
1028 ListIt squash_it = instList[tid].end();
1029 --squash_it;
1030
1031 DPRINTF(IQ, "[tid:%i]: Squashing until sequence number %i!\n",
1032 tid, squashedSeqNum[tid]);
1033
1034 // Squash any instructions younger than the squashed sequence number
1035 // given.
1036 while (squash_it != instList[tid].end() &&
1037 (*squash_it)->seqNum > squashedSeqNum[tid]) {
1038
1039 DynInstPtr squashed_inst = (*squash_it);
1040
1041 // Only handle the instruction if it actually is in the IQ and
1042 // hasn't already been squashed in the IQ.
1043 if (squashed_inst->threadNumber != tid ||
1044 squashed_inst->isSquashedInIQ()) {
1045 --squash_it;
1046 continue;
1047 }
1048
1049 if (!squashed_inst->isIssued() ||
1050 (squashed_inst->isMemRef() &&
1051 !squashed_inst->memOpDone)) {
1052
1053 // Remove the instruction from the dependency list.
1054 if (!squashed_inst->isNonSpeculative() &&
1055 !squashed_inst->isStoreConditional() &&
1056 !squashed_inst->isMemBarrier() &&
1057 !squashed_inst->isWriteBarrier()) {
1058
1059 for (int src_reg_idx = 0;
1060 src_reg_idx < squashed_inst->numSrcRegs();
1061 src_reg_idx++)
1062 {
1063 PhysRegIndex src_reg =
1064 squashed_inst->renamedSrcRegIdx(src_reg_idx);
1065
1066 // Only remove it from the dependency graph if it
1067 // was placed there in the first place.
1068
1069 // Instead of doing a linked list traversal, we
1070 // can just remove these squashed instructions
1071 // either at issue time, or when the register is
1072 // overwritten. The only downside to this is it
1073 // leaves more room for error.
1074
1075 if (!squashed_inst->isReadySrcRegIdx(src_reg_idx) &&
1076 src_reg < numPhysRegs) {
1077 dependGraph.remove(src_reg, squashed_inst);
1078 }
1079
1080
1081 ++iqSquashedOperandsExamined;
1082 }
1083 } else {
1084 NonSpecMapIt ns_inst_it =
1085 nonSpecInsts.find(squashed_inst->seqNum);
1086 assert(ns_inst_it != nonSpecInsts.end());
1087
1088 (*ns_inst_it).second = NULL;
1089
1090 nonSpecInsts.erase(ns_inst_it);
1091
1092 ++iqSquashedNonSpecRemoved;
1093 }
1094
1095 // Might want to also clear out the head of the dependency graph.
1096
1097 // Mark it as squashed within the IQ.
1098 squashed_inst->setSquashedInIQ();
1099
1100 // @todo: Remove this hack where several statuses are set so the
1101 // inst will flow through the rest of the pipeline.
1102 squashed_inst->setIssued();
1103 squashed_inst->setCanCommit();
1104 squashed_inst->removeInIQ();
1105
1106 //Update Thread IQ Count
1107 count[squashed_inst->threadNumber]--;
1108
1109 ++freeEntries;
1110
1111 DPRINTF(IQ, "[tid:%i]: Instruction [sn:%lli] PC %#x "
1112 "squashed.\n",
1113 tid, squashed_inst->seqNum, squashed_inst->readPC());
1114 }
1115
1116 instList[tid].erase(squash_it--);
1117 ++iqSquashedInstsExamined;
1118 }
1119}
1120
1121template <class Impl>
1122bool
1123InstructionQueue<Impl>::addToDependents(DynInstPtr &new_inst)
1124{
1125 // Loop through the instruction's source registers, adding
1126 // them to the dependency list if they are not ready.
1127 int8_t total_src_regs = new_inst->numSrcRegs();
1128 bool return_val = false;
1129

--- 7 unchanged lines hidden (view full) ---

1137
1138 // Check the IQ's scoreboard to make sure the register
1139 // hasn't become ready while the instruction was in flight
1140 // between stages. Only if it really isn't ready should
1141 // it be added to the dependency graph.
1142 if (src_reg >= numPhysRegs) {
1143 continue;
1144 } else if (regScoreboard[src_reg] == false) {
1145 DPRINTF(IQ, "Instruction PC %#x has src reg %i that "
1146 "is being added to the dependency chain.\n",
1147 new_inst->readPC(), src_reg);
1148
1149 dependGraph.insert(src_reg, new_inst);
1150
1151 // Change the return value to indicate that something
1152 // was added to the dependency graph.
1153 return_val = true;
1154 } else {
1155 DPRINTF(IQ, "Instruction PC %#x has src reg %i that "
1156 "became ready before it reached the IQ.\n",
1157 new_inst->readPC(), src_reg);
1158 // Mark a register ready within the instruction.
1159 new_inst->markSrcRegReady(src_reg_idx);
1160 }
1161 }
1162 }
1163
1164 return return_val;
1165}
1166
1167template <class Impl>
1168void
1169InstructionQueue<Impl>::addToProducers(DynInstPtr &new_inst)
1170{
1171 // Nothing really needs to be marked when an instruction becomes
1172 // the producer of a register's value, but for convenience a ptr
1173 // to the producing instruction will be placed in the head node of
1174 // the dependency links.
1175 int8_t total_dest_regs = new_inst->numDestRegs();
1176
1177 for (int dest_reg_idx = 0;
1178 dest_reg_idx < total_dest_regs;
1179 dest_reg_idx++)
1180 {
1181 PhysRegIndex dest_reg = new_inst->renamedDestRegIdx(dest_reg_idx);
1182
1183 // Instructions that use the misc regs will have a reg number
1184 // higher than the normal physical registers. In this case these
1185 // registers are not renamed, and there is no need to track
1186 // dependencies as these instructions must be executed at commit.
1187 if (dest_reg >= numPhysRegs) {
1188 continue;
1189 }
1190
1191 if (!dependGraph.empty(dest_reg)) {
1192 dependGraph.dump();
1193 panic("Dependency graph %i not empty!", dest_reg);
1194 }
1195
1196 dependGraph.setInst(dest_reg, new_inst);
1197
1198 // Mark the scoreboard to say it's not yet ready.
1199 regScoreboard[dest_reg] = false;
1200 }
1201}
1202
1203template <class Impl>
1204void
1205InstructionQueue<Impl>::addIfReady(DynInstPtr &inst)
1206{
1207 // If the instruction now has all of its source registers
1208 // available, then add it to the list of ready instructions.
1209 if (inst->readyToIssue()) {
1210
1211 //Add the instruction to the proper ready list.
1212 if (inst->isMemRef()) {
1213
1214 DPRINTF(IQ, "Checking if memory instruction can issue.\n");
1215
1216 // Message to the mem dependence unit that this instruction has
1217 // its registers ready.
1218 memDepUnit[inst->threadNumber].regsReady(inst);
1219
1220 return;
1221 }
1222
1223 OpClass op_class = inst->opClass();
1224
1225 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
1226 "the ready list, PC %#x opclass:%i [sn:%lli].\n",
1227 inst->readPC(), op_class, inst->seqNum);
1228
1229 readyInsts[op_class].push(inst);
1230
1231 // Will need to reorder the list if either a queue is not on the list,
1232 // or it has an older instruction than last time.
1233 if (!queueOnList[op_class]) {
1234 addToOrderList(op_class);
1235 } else if (readyInsts[op_class].top()->seqNum <
1236 (*readyIt[op_class]).oldestInst) {
1237 listOrder.erase(readyIt[op_class]);
1238 addToOrderList(op_class);
1239 }
1240 }
1241}
1242
1243template <class Impl>
1244int
1245InstructionQueue<Impl>::countInsts()
1246{
1247 //ksewell:This works but definitely could use a cleaner write
1248 //with a more intuitive way of counting. Right now it's
1249 //just brute force ....
1250
1251#if 0
1252 int total_insts = 0;
1253
1254 for (int i = 0; i < numThreads; ++i) {
1255 ListIt count_it = instList[i].begin();
1256
1257 while (count_it != instList[i].end()) {
1258 if (!(*count_it)->isSquashed() && !(*count_it)->isSquashedInIQ()) {
1259 if (!(*count_it)->isIssued()) {
1260 ++total_insts;
1261 } else if ((*count_it)->isMemRef() &&
1262 !(*count_it)->memOpDone) {
1263 // Loads that have not been marked as executed still count
1264 // towards the total instructions.
1265 ++total_insts;
1266 }
1267 }
1268
1269 ++count_it;
1270 }
1271 }
1272
1273 return total_insts;
1274#else
1275 return numEntries - freeEntries;
1276#endif
1277}
1278
1279template <class Impl>
1280void
1281InstructionQueue<Impl>::dumpLists()
1282{
1283 for (int i = 0; i < Num_OpClasses; ++i) {
1284 cprintf("Ready list %i size: %i\n", i, readyInsts[i].size());
1285
1286 cprintf("\n");
1287 }
1288
1289 cprintf("Non speculative list size: %i\n", nonSpecInsts.size());
1290
1291 NonSpecMapIt non_spec_it = nonSpecInsts.begin();
1292 NonSpecMapIt non_spec_end_it = nonSpecInsts.end();
1293
1294 cprintf("Non speculative list: ");
1295
1296 while (non_spec_it != non_spec_end_it) {
1297 cprintf("%#x [sn:%lli]", (*non_spec_it).second->readPC(),
1298 (*non_spec_it).second->seqNum);
1299 ++non_spec_it;
1300 }
1301
1302 cprintf("\n");
1303
1304 ListOrderIt list_order_it = listOrder.begin();
1305 ListOrderIt list_order_end_it = listOrder.end();
1306 int i = 1;
1307
1308 cprintf("List order: ");
1309
1310 while (list_order_it != list_order_end_it) {
1311 cprintf("%i OpClass:%i [sn:%lli] ", i, (*list_order_it).queueType,
1312 (*list_order_it).oldestInst);
1313
1314 ++list_order_it;
1315 ++i;
1316 }
1317
1318 cprintf("\n");
1319}
1320
1321
1322template <class Impl>
1323void
1324InstructionQueue<Impl>::dumpInsts()
1325{
1326 for (int i = 0; i < numThreads; ++i) {
1327 int num = 0;
1328 int valid_num = 0;
1329 ListIt inst_list_it = instList[i].begin();
1330
1331 while (inst_list_it != instList[i].end())
1332 {
1333 cprintf("Instruction:%i\n",
1334 num);
1335 if (!(*inst_list_it)->isSquashed()) {
1336 if (!(*inst_list_it)->isIssued()) {
1337 ++valid_num;
1338 cprintf("Count:%i\n", valid_num);
1339 } else if ((*inst_list_it)->isMemRef() &&
1340 !(*inst_list_it)->memOpDone) {
1341 // Loads that have not been marked as executed
1342 // still count towards the total instructions.
1343 ++valid_num;
1344 cprintf("Count:%i\n", valid_num);
1345 }
1346 }
1347
1348 cprintf("PC:%#x\n[sn:%lli]\n[tid:%i]\n"
1349 "Issued:%i\nSquashed:%i\n",
1350 (*inst_list_it)->readPC(),
1351 (*inst_list_it)->seqNum,
1352 (*inst_list_it)->threadNumber,
1353 (*inst_list_it)->isIssued(),
1354 (*inst_list_it)->isSquashed());
1355
1356 if ((*inst_list_it)->isMemRef()) {
1357 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone);
1358 }
1359
1360 cprintf("\n");
1361
1362 inst_list_it++;
1363 ++num;
1364 }
1365 }
1366}