Deleted Added
sdiff udiff text old ( 2654:9559cfa91b9d ) new ( 2665:a124942bacb8 )
full compact
1/*
2 * Copyright (c) 2004-2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <limits>
30#include <vector>
31
32#include "sim/root.hh"
33
34#include "cpu/o3/fu_pool.hh"
35#include "cpu/o3/inst_queue.hh"
36
37using namespace std;
38
39template <class Impl>
40InstructionQueue<Impl>::FUCompletion::FUCompletion(DynInstPtr &_inst,
41 int fu_idx,
42 InstructionQueue<Impl> *iq_ptr)
43 : Event(&mainEventQueue, Stat_Event_Pri),
44 inst(_inst), fuIdx(fu_idx), iqPtr(iq_ptr), freeFU(false)
45{
46 this->setFlags(Event::AutoDelete);
47}
48
49template <class Impl>
50void
51InstructionQueue<Impl>::FUCompletion::process()
52{
53 iqPtr->processFUCompletion(inst, freeFU ? fuIdx : -1);
54 inst = NULL;
55}
56
57
58template <class Impl>
59const char *
60InstructionQueue<Impl>::FUCompletion::description()
61{
62 return "Functional unit completion event";
63}
64
65template <class Impl>
66InstructionQueue<Impl>::InstructionQueue(Params *params)
67 : dcacheInterface(params->dcacheInterface),
68 fuPool(params->fuPool),
69 numEntries(params->numIQEntries),
70 totalWidth(params->issueWidth),
71 numPhysIntRegs(params->numPhysIntRegs),
72 numPhysFloatRegs(params->numPhysFloatRegs),
73 commitToIEWDelay(params->commitToIEWDelay)
74{
75 assert(fuPool);
76
77 switchedOut = false;
78
79 numThreads = params->numberOfThreads;
80
81 // Set the number of physical registers as the number of int + float
82 numPhysRegs = numPhysIntRegs + numPhysFloatRegs;
83
84 DPRINTF(IQ, "There are %i physical registers.\n", numPhysRegs);
85
86 //Create an entry for each physical register within the
87 //dependency graph.
88 dependGraph.resize(numPhysRegs);
89
90 // Resize the register scoreboard.
91 regScoreboard.resize(numPhysRegs);
92
93 //Initialize Mem Dependence Units
94 for (int i = 0; i < numThreads; i++) {
95 memDepUnit[i].init(params,i);
96 memDepUnit[i].setIQ(this);
97 }
98
99 resetState();
100
101 string policy = params->smtIQPolicy;
102
103 //Convert string to lowercase
104 std::transform(policy.begin(), policy.end(), policy.begin(),
105 (int(*)(int)) tolower);
106
107 //Figure out resource sharing policy
108 if (policy == "dynamic") {
109 iqPolicy = Dynamic;
110
111 //Set Max Entries to Total ROB Capacity
112 for (int i = 0; i < numThreads; i++) {
113 maxEntries[i] = numEntries;
114 }
115
116 } else if (policy == "partitioned") {
117 iqPolicy = Partitioned;
118
119 //@todo:make work if part_amt doesnt divide evenly.
120 int part_amt = numEntries / numThreads;
121
122 //Divide ROB up evenly
123 for (int i = 0; i < numThreads; i++) {
124 maxEntries[i] = part_amt;
125 }
126
127 DPRINTF(Fetch, "IQ sharing policy set to Partitioned:"
128 "%i entries per thread.\n",part_amt);
129
130 } else if (policy == "threshold") {
131 iqPolicy = Threshold;
132
133 double threshold = (double)params->smtIQThreshold / 100;
134
135 int thresholdIQ = (int)((double)threshold * numEntries);
136
137 //Divide up by threshold amount
138 for (int i = 0; i < numThreads; i++) {
139 maxEntries[i] = thresholdIQ;
140 }
141
142 DPRINTF(Fetch, "IQ sharing policy set to Threshold:"
143 "%i entries per thread.\n",thresholdIQ);
144 } else {
145 assert(0 && "Invalid IQ Sharing Policy.Options Are:{Dynamic,"
146 "Partitioned, Threshold}");
147 }
148}
149
150template <class Impl>
151InstructionQueue<Impl>::~InstructionQueue()
152{
153 dependGraph.reset();
154 cprintf("Nodes traversed: %i, removed: %i\n",
155 dependGraph.nodesTraversed, dependGraph.nodesRemoved);
156}
157
158template <class Impl>
159std::string
160InstructionQueue<Impl>::name() const
161{
162 return cpu->name() + ".iq";
163}
164
165template <class Impl>
166void
167InstructionQueue<Impl>::regStats()
168{
169 using namespace Stats;
170 iqInstsAdded
171 .name(name() + ".iqInstsAdded")
172 .desc("Number of instructions added to the IQ (excludes non-spec)")
173 .prereq(iqInstsAdded);
174
175 iqNonSpecInstsAdded
176 .name(name() + ".iqNonSpecInstsAdded")
177 .desc("Number of non-speculative instructions added to the IQ")
178 .prereq(iqNonSpecInstsAdded);
179
180 iqInstsIssued
181 .name(name() + ".iqInstsIssued")
182 .desc("Number of instructions issued")
183 .prereq(iqInstsIssued);
184
185 iqIntInstsIssued
186 .name(name() + ".iqIntInstsIssued")
187 .desc("Number of integer instructions issued")
188 .prereq(iqIntInstsIssued);
189
190 iqFloatInstsIssued
191 .name(name() + ".iqFloatInstsIssued")
192 .desc("Number of float instructions issued")
193 .prereq(iqFloatInstsIssued);
194
195 iqBranchInstsIssued
196 .name(name() + ".iqBranchInstsIssued")
197 .desc("Number of branch instructions issued")
198 .prereq(iqBranchInstsIssued);
199
200 iqMemInstsIssued
201 .name(name() + ".iqMemInstsIssued")
202 .desc("Number of memory instructions issued")
203 .prereq(iqMemInstsIssued);
204
205 iqMiscInstsIssued
206 .name(name() + ".iqMiscInstsIssued")
207 .desc("Number of miscellaneous instructions issued")
208 .prereq(iqMiscInstsIssued);
209
210 iqSquashedInstsIssued
211 .name(name() + ".iqSquashedInstsIssued")
212 .desc("Number of squashed instructions issued")
213 .prereq(iqSquashedInstsIssued);
214
215 iqSquashedInstsExamined
216 .name(name() + ".iqSquashedInstsExamined")
217 .desc("Number of squashed instructions iterated over during squash;"
218 " mainly for profiling")
219 .prereq(iqSquashedInstsExamined);
220
221 iqSquashedOperandsExamined
222 .name(name() + ".iqSquashedOperandsExamined")
223 .desc("Number of squashed operands that are examined and possibly "
224 "removed from graph")
225 .prereq(iqSquashedOperandsExamined);
226
227 iqSquashedNonSpecRemoved
228 .name(name() + ".iqSquashedNonSpecRemoved")
229 .desc("Number of squashed non-spec instructions that were removed")
230 .prereq(iqSquashedNonSpecRemoved);
231
232 queueResDist
233 .init(Num_OpClasses, 0, 99, 2)
234 .name(name() + ".IQ:residence:")
235 .desc("cycles from dispatch to issue")
236 .flags(total | pdf | cdf )
237 ;
238 for (int i = 0; i < Num_OpClasses; ++i) {
239 queueResDist.subname(i, opClassStrings[i]);
240 }
241 numIssuedDist
242 .init(0,totalWidth,1)
243 .name(name() + ".ISSUE:issued_per_cycle")
244 .desc("Number of insts issued each cycle")
245 .flags(pdf)
246 ;
247/*
248 dist_unissued
249 .init(Num_OpClasses+2)
250 .name(name() + ".ISSUE:unissued_cause")
251 .desc("Reason ready instruction not issued")
252 .flags(pdf | dist)
253 ;
254 for (int i=0; i < (Num_OpClasses + 2); ++i) {
255 dist_unissued.subname(i, unissued_names[i]);
256 }
257*/
258 statIssuedInstType
259 .init(numThreads,Num_OpClasses)
260 .name(name() + ".ISSUE:FU_type")
261 .desc("Type of FU issued")
262 .flags(total | pdf | dist)
263 ;
264 statIssuedInstType.ysubnames(opClassStrings);
265
266 //
267 // How long did instructions for a particular FU type wait prior to issue
268 //
269
270 issueDelayDist
271 .init(Num_OpClasses,0,99,2)
272 .name(name() + ".ISSUE:")
273 .desc("cycles from operands ready to issue")
274 .flags(pdf | cdf)
275 ;
276
277 for (int i=0; i<Num_OpClasses; ++i) {
278 stringstream subname;
279 subname << opClassStrings[i] << "_delay";
280 issueDelayDist.subname(i, subname.str());
281 }
282
283 issueRate
284 .name(name() + ".ISSUE:rate")
285 .desc("Inst issue rate")
286 .flags(total)
287 ;
288 issueRate = iqInstsIssued / cpu->numCycles;
289/*
290 issue_stores
291 .name(name() + ".ISSUE:stores")
292 .desc("Number of stores issued")
293 .flags(total)
294 ;
295 issue_stores = exe_refs - exe_loads;
296*/
297/*
298 issue_op_rate
299 .name(name() + ".ISSUE:op_rate")
300 .desc("Operation issue rate")
301 .flags(total)
302 ;
303 issue_op_rate = issued_ops / numCycles;
304*/
305 statFuBusy
306 .init(Num_OpClasses)
307 .name(name() + ".ISSUE:fu_full")
308 .desc("attempts to use FU when none available")
309 .flags(pdf | dist)
310 ;
311 for (int i=0; i < Num_OpClasses; ++i) {
312 statFuBusy.subname(i, opClassStrings[i]);
313 }
314
315 fuBusy
316 .init(numThreads)
317 .name(name() + ".ISSUE:fu_busy_cnt")
318 .desc("FU busy when requested")
319 .flags(total)
320 ;
321
322 fuBusyRate
323 .name(name() + ".ISSUE:fu_busy_rate")
324 .desc("FU busy rate (busy events/executed inst)")
325 .flags(total)
326 ;
327 fuBusyRate = fuBusy / iqInstsIssued;
328
329 for ( int i=0; i < numThreads; i++) {
330 // Tell mem dependence unit to reg stats as well.
331 memDepUnit[i].regStats();
332 }
333}
334
335template <class Impl>
336void
337InstructionQueue<Impl>::resetState()
338{
339 //Initialize thread IQ counts
340 for (int i = 0; i <numThreads; i++) {
341 count[i] = 0;
342 instList[i].clear();
343 }
344
345 // Initialize the number of free IQ entries.
346 freeEntries = numEntries;
347
348 // Note that in actuality, the registers corresponding to the logical
349 // registers start off as ready. However this doesn't matter for the
350 // IQ as the instruction should have been correctly told if those
351 // registers are ready in rename. Thus it can all be initialized as
352 // unready.
353 for (int i = 0; i < numPhysRegs; ++i) {
354 regScoreboard[i] = false;
355 }
356
357 for (int i = 0; i < numThreads; ++i) {
358 squashedSeqNum[i] = 0;
359 }
360
361 for (int i = 0; i < Num_OpClasses; ++i) {
362 while (!readyInsts[i].empty())
363 readyInsts[i].pop();
364 queueOnList[i] = false;
365 readyIt[i] = listOrder.end();
366 }
367 nonSpecInsts.clear();
368 listOrder.clear();
369}
370
371template <class Impl>
372void
373InstructionQueue<Impl>::setActiveThreads(list<unsigned> *at_ptr)
374{
375 DPRINTF(IQ, "Setting active threads list pointer.\n");
376 activeThreads = at_ptr;
377}
378
379template <class Impl>
380void
381InstructionQueue<Impl>::setIssueToExecuteQueue(TimeBuffer<IssueStruct> *i2e_ptr)
382{
383 DPRINTF(IQ, "Set the issue to execute queue.\n");
384 issueToExecuteQueue = i2e_ptr;
385}
386
387template <class Impl>
388void
389InstructionQueue<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr)
390{
391 DPRINTF(IQ, "Set the time buffer.\n");
392 timeBuffer = tb_ptr;
393
394 fromCommit = timeBuffer->getWire(-commitToIEWDelay);
395}
396
397template <class Impl>
398void
399InstructionQueue<Impl>::switchOut()
400{
401 resetState();
402 dependGraph.reset();
403 switchedOut = true;
404 for (int i = 0; i < numThreads; ++i) {
405 memDepUnit[i].switchOut();
406 }
407}
408
409template <class Impl>
410void
411InstructionQueue<Impl>::takeOverFrom()
412{
413 switchedOut = false;
414}
415
416template <class Impl>
417int
418InstructionQueue<Impl>::entryAmount(int num_threads)
419{
420 if (iqPolicy == Partitioned) {
421 return numEntries / num_threads;
422 } else {
423 return 0;
424 }
425}
426
427
428template <class Impl>
429void
430InstructionQueue<Impl>::resetEntries()
431{
432 if (iqPolicy != Dynamic || numThreads > 1) {
433 int active_threads = (*activeThreads).size();
434
435 list<unsigned>::iterator threads = (*activeThreads).begin();
436 list<unsigned>::iterator list_end = (*activeThreads).end();
437
438 while (threads != list_end) {
439 if (iqPolicy == Partitioned) {
440 maxEntries[*threads++] = numEntries / active_threads;
441 } else if(iqPolicy == Threshold && active_threads == 1) {
442 maxEntries[*threads++] = numEntries;
443 }
444 }
445 }
446}
447
448template <class Impl>
449unsigned
450InstructionQueue<Impl>::numFreeEntries()
451{
452 return freeEntries;
453}
454
455template <class Impl>
456unsigned
457InstructionQueue<Impl>::numFreeEntries(unsigned tid)
458{
459 return maxEntries[tid] - count[tid];
460}
461
462// Might want to do something more complex if it knows how many instructions
463// will be issued this cycle.
464template <class Impl>
465bool
466InstructionQueue<Impl>::isFull()
467{
468 if (freeEntries == 0) {
469 return(true);
470 } else {
471 return(false);
472 }
473}
474
475template <class Impl>
476bool
477InstructionQueue<Impl>::isFull(unsigned tid)
478{
479 if (numFreeEntries(tid) == 0) {
480 return(true);
481 } else {
482 return(false);
483 }
484}
485
486template <class Impl>
487bool
488InstructionQueue<Impl>::hasReadyInsts()
489{
490 if (!listOrder.empty()) {
491 return true;
492 }
493
494 for (int i = 0; i < Num_OpClasses; ++i) {
495 if (!readyInsts[i].empty()) {
496 return true;
497 }
498 }
499
500 return false;
501}
502
503template <class Impl>
504void
505InstructionQueue<Impl>::insert(DynInstPtr &new_inst)
506{
507 // Make sure the instruction is valid
508 assert(new_inst);
509
510 DPRINTF(IQ, "Adding instruction [sn:%lli] PC %#x to the IQ.\n",
511 new_inst->seqNum, new_inst->readPC());
512
513 assert(freeEntries != 0);
514
515 instList[new_inst->threadNumber].push_back(new_inst);
516
517 --freeEntries;
518
519 new_inst->setInIQ();
520
521 // Look through its source registers (physical regs), and mark any
522 // dependencies.
523 addToDependents(new_inst);
524
525 // Have this instruction set itself as the producer of its destination
526 // register(s).
527 addToProducers(new_inst);
528
529 if (new_inst->isMemRef()) {
530 memDepUnit[new_inst->threadNumber].insert(new_inst);
531 } else {
532 addIfReady(new_inst);
533 }
534
535 ++iqInstsAdded;
536
537 count[new_inst->threadNumber]++;
538
539 assert(freeEntries == (numEntries - countInsts()));
540}
541
542template <class Impl>
543void
544InstructionQueue<Impl>::insertNonSpec(DynInstPtr &new_inst)
545{
546 // @todo: Clean up this code; can do it by setting inst as unable
547 // to issue, then calling normal insert on the inst.
548
549 assert(new_inst);
550
551 nonSpecInsts[new_inst->seqNum] = new_inst;
552
553 DPRINTF(IQ, "Adding non-speculative instruction [sn:%lli] PC %#x "
554 "to the IQ.\n",
555 new_inst->seqNum, new_inst->readPC());
556
557 assert(freeEntries != 0);
558
559 instList[new_inst->threadNumber].push_back(new_inst);
560
561 --freeEntries;
562
563 new_inst->setInIQ();
564
565 // Have this instruction set itself as the producer of its destination
566 // register(s).
567 addToProducers(new_inst);
568
569 // If it's a memory instruction, add it to the memory dependency
570 // unit.
571 if (new_inst->isMemRef()) {
572 memDepUnit[new_inst->threadNumber].insertNonSpec(new_inst);
573 }
574
575 ++iqNonSpecInstsAdded;
576
577 count[new_inst->threadNumber]++;
578
579 assert(freeEntries == (numEntries - countInsts()));
580}
581
582template <class Impl>
583void
584InstructionQueue<Impl>::insertBarrier(DynInstPtr &barr_inst)
585{
586 memDepUnit[barr_inst->threadNumber].insertBarrier(barr_inst);
587
588 insertNonSpec(barr_inst);
589}
590
591template <class Impl>
592typename Impl::DynInstPtr
593InstructionQueue<Impl>::getInstToExecute()
594{
595 assert(!instsToExecute.empty());
596 DynInstPtr inst = instsToExecute.front();
597 instsToExecute.pop_front();
598 return inst;
599}
600
601template <class Impl>
602void
603InstructionQueue<Impl>::addToOrderList(OpClass op_class)
604{
605 assert(!readyInsts[op_class].empty());
606
607 ListOrderEntry queue_entry;
608
609 queue_entry.queueType = op_class;
610
611 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
612
613 ListOrderIt list_it = listOrder.begin();
614 ListOrderIt list_end_it = listOrder.end();
615
616 while (list_it != list_end_it) {
617 if ((*list_it).oldestInst > queue_entry.oldestInst) {
618 break;
619 }
620
621 list_it++;
622 }
623
624 readyIt[op_class] = listOrder.insert(list_it, queue_entry);
625 queueOnList[op_class] = true;
626}
627
628template <class Impl>
629void
630InstructionQueue<Impl>::moveToYoungerInst(ListOrderIt list_order_it)
631{
632 // Get iterator of next item on the list
633 // Delete the original iterator
634 // Determine if the next item is either the end of the list or younger
635 // than the new instruction. If so, then add in a new iterator right here.
636 // If not, then move along.
637 ListOrderEntry queue_entry;
638 OpClass op_class = (*list_order_it).queueType;
639 ListOrderIt next_it = list_order_it;
640
641 ++next_it;
642
643 queue_entry.queueType = op_class;
644 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
645
646 while (next_it != listOrder.end() &&
647 (*next_it).oldestInst < queue_entry.oldestInst) {
648 ++next_it;
649 }
650
651 readyIt[op_class] = listOrder.insert(next_it, queue_entry);
652}
653
654template <class Impl>
655void
656InstructionQueue<Impl>::processFUCompletion(DynInstPtr &inst, int fu_idx)
657{
658 // The CPU could have been sleeping until this op completed (*extremely*
659 // long latency op). Wake it if it was. This may be overkill.
660 if (isSwitchedOut()) {
661 return;
662 }
663
664 iewStage->wakeCPU();
665
666 if (fu_idx > -1)
667 fuPool->freeUnitNextCycle(fu_idx);
668
669 // @todo: Ensure that these FU Completions happen at the beginning
670 // of a cycle, otherwise they could add too many instructions to
671 // the queue.
672 // @todo: This could break if there's multiple multi-cycle ops
673 // finishing on this cycle. Maybe implement something like
674 // instToCommit in iew_impl.hh.
675 issueToExecuteQueue->access(0)->size++;
676 instsToExecute.push_back(inst);
677// int &size = issueToExecuteQueue->access(0)->size;
678
679// issueToExecuteQueue->access(0)->insts[size++] = inst;
680}
681
682// @todo: Figure out a better way to remove the squashed items from the
683// lists. Checking the top item of each list to see if it's squashed
684// wastes time and forces jumps.
685template <class Impl>
686void
687InstructionQueue<Impl>::scheduleReadyInsts()
688{
689 DPRINTF(IQ, "Attempting to schedule ready instructions from "
690 "the IQ.\n");
691
692 IssueStruct *i2e_info = issueToExecuteQueue->access(0);
693
694 // Have iterator to head of the list
695 // While I haven't exceeded bandwidth or reached the end of the list,
696 // Try to get a FU that can do what this op needs.
697 // If successful, change the oldestInst to the new top of the list, put
698 // the queue in the proper place in the list.
699 // Increment the iterator.
700 // This will avoid trying to schedule a certain op class if there are no
701 // FUs that handle it.
702 ListOrderIt order_it = listOrder.begin();
703 ListOrderIt order_end_it = listOrder.end();
704 int total_issued = 0;
705
706 while (total_issued < totalWidth &&
707 order_it != order_end_it) {
708 OpClass op_class = (*order_it).queueType;
709
710 assert(!readyInsts[op_class].empty());
711
712 DynInstPtr issuing_inst = readyInsts[op_class].top();
713
714 assert(issuing_inst->seqNum == (*order_it).oldestInst);
715
716 if (issuing_inst->isSquashed()) {
717 readyInsts[op_class].pop();
718
719 if (!readyInsts[op_class].empty()) {
720 moveToYoungerInst(order_it);
721 } else {
722 readyIt[op_class] = listOrder.end();
723 queueOnList[op_class] = false;
724 }
725
726 listOrder.erase(order_it++);
727
728 ++iqSquashedInstsIssued;
729
730 continue;
731 }
732
733 int idx = -2;
734 int op_latency = 1;
735 int tid = issuing_inst->threadNumber;
736
737 if (op_class != No_OpClass) {
738 idx = fuPool->getUnit(op_class);
739
740 if (idx > -1) {
741 op_latency = fuPool->getOpLatency(op_class);
742 }
743 }
744
745 if (idx == -2 || idx != -1) {
746 if (op_latency == 1) {
747// i2e_info->insts[exec_queue_slot++] = issuing_inst;
748 i2e_info->size++;
749 instsToExecute.push_back(issuing_inst);
750
751 // Add the FU onto the list of FU's to be freed next
752 // cycle if we used one.
753 if (idx >= 0)
754 fuPool->freeUnitNextCycle(idx);
755 } else {
756 int issue_latency = fuPool->getIssueLatency(op_class);
757 // Generate completion event for the FU
758 FUCompletion *execution = new FUCompletion(issuing_inst,
759 idx, this);
760
761 execution->schedule(curTick + cpu->cycles(issue_latency - 1));
762
763 // @todo: Enforce that issue_latency == 1 or op_latency
764 if (issue_latency > 1) {
765 execution->setFreeFU();
766 } else {
767 // @todo: Not sure I'm accounting for the
768 // multi-cycle op in a pipelined FU properly, or
769 // the number of instructions issued in one cycle.
770// i2e_info->insts[exec_queue_slot++] = issuing_inst;
771// i2e_info->size++;
772
773 // Add the FU onto the list of FU's to be freed next cycle.
774 fuPool->freeUnitNextCycle(idx);
775 }
776 }
777
778 DPRINTF(IQ, "Thread %i: Issuing instruction PC %#x "
779 "[sn:%lli]\n",
780 tid, issuing_inst->readPC(),
781 issuing_inst->seqNum);
782
783 readyInsts[op_class].pop();
784
785 if (!readyInsts[op_class].empty()) {
786 moveToYoungerInst(order_it);
787 } else {
788 readyIt[op_class] = listOrder.end();
789 queueOnList[op_class] = false;
790 }
791
792 issuing_inst->setIssued();
793 ++total_issued;
794
795 if (!issuing_inst->isMemRef()) {
796 // Memory instructions can not be freed from the IQ until they
797 // complete.
798 ++freeEntries;
799 count[tid]--;
800 issuing_inst->removeInIQ();
801 } else {
802 memDepUnit[tid].issue(issuing_inst);
803 }
804
805 listOrder.erase(order_it++);
806 statIssuedInstType[tid][op_class]++;
807 } else {
808 statFuBusy[op_class]++;
809 fuBusy[tid]++;
810 ++order_it;
811 }
812 }
813
814 numIssuedDist.sample(total_issued);
815 iqInstsIssued+= total_issued;
816
817 if (total_issued) {
818 cpu->activityThisCycle();
819 } else {
820 DPRINTF(IQ, "Not able to schedule any instructions.\n");
821 }
822}
823
824template <class Impl>
825void
826InstructionQueue<Impl>::scheduleNonSpec(const InstSeqNum &inst)
827{
828 DPRINTF(IQ, "Marking nonspeculative instruction [sn:%lli] as ready "
829 "to execute.\n", inst);
830
831 NonSpecMapIt inst_it = nonSpecInsts.find(inst);
832
833 assert(inst_it != nonSpecInsts.end());
834
835 unsigned tid = (*inst_it).second->threadNumber;
836
837 (*inst_it).second->setCanIssue();
838
839 if (!(*inst_it).second->isMemRef()) {
840 addIfReady((*inst_it).second);
841 } else {
842 memDepUnit[tid].nonSpecInstReady((*inst_it).second);
843 }
844
845 (*inst_it).second = NULL;
846
847 nonSpecInsts.erase(inst_it);
848}
849
850template <class Impl>
851void
852InstructionQueue<Impl>::commit(const InstSeqNum &inst, unsigned tid)
853{
854 DPRINTF(IQ, "[tid:%i]: Committing instructions older than [sn:%i]\n",
855 tid,inst);
856
857 ListIt iq_it = instList[tid].begin();
858
859 while (iq_it != instList[tid].end() &&
860 (*iq_it)->seqNum <= inst) {
861 ++iq_it;
862 instList[tid].pop_front();
863 }
864
865 assert(freeEntries == (numEntries - countInsts()));
866}
867
868template <class Impl>
869int
870InstructionQueue<Impl>::wakeDependents(DynInstPtr &completed_inst)
871{
872 int dependents = 0;
873
874 DPRINTF(IQ, "Waking dependents of completed instruction.\n");
875
876 assert(!completed_inst->isSquashed());
877
878 // Tell the memory dependence unit to wake any dependents on this
879 // instruction if it is a memory instruction. Also complete the memory
880 // instruction at this point since we know it executed without issues.
881 // @todo: Might want to rename "completeMemInst" to something that
882 // indicates that it won't need to be replayed, and call this
883 // earlier. Might not be a big deal.
884 if (completed_inst->isMemRef()) {
885 memDepUnit[completed_inst->threadNumber].wakeDependents(completed_inst);
886 completeMemInst(completed_inst);
887 } else if (completed_inst->isMemBarrier() ||
888 completed_inst->isWriteBarrier()) {
889 memDepUnit[completed_inst->threadNumber].completeBarrier(completed_inst);
890 }
891
892 for (int dest_reg_idx = 0;
893 dest_reg_idx < completed_inst->numDestRegs();
894 dest_reg_idx++)
895 {
896 PhysRegIndex dest_reg =
897 completed_inst->renamedDestRegIdx(dest_reg_idx);
898
899 // Special case of uniq or control registers. They are not
900 // handled by the IQ and thus have no dependency graph entry.
901 // @todo Figure out a cleaner way to handle this.
902 if (dest_reg >= numPhysRegs) {
903 continue;
904 }
905
906 DPRINTF(IQ, "Waking any dependents on register %i.\n",
907 (int) dest_reg);
908
909 //Go through the dependency chain, marking the registers as
910 //ready within the waiting instructions.
911 DynInstPtr dep_inst = dependGraph.pop(dest_reg);
912
913 while (dep_inst) {
914 DPRINTF(IQ, "Waking up a dependent instruction, PC%#x.\n",
915 dep_inst->readPC());
916
917 // Might want to give more information to the instruction
918 // so that it knows which of its source registers is
919 // ready. However that would mean that the dependency
920 // graph entries would need to hold the src_reg_idx.
921 dep_inst->markSrcRegReady();
922
923 addIfReady(dep_inst);
924
925 dep_inst = dependGraph.pop(dest_reg);
926
927 ++dependents;
928 }
929
930 // Reset the head node now that all of its dependents have
931 // been woken up.
932 assert(dependGraph.empty(dest_reg));
933 dependGraph.clearInst(dest_reg);
934
935 // Mark the scoreboard as having that register ready.
936 regScoreboard[dest_reg] = true;
937 }
938 return dependents;
939}
940
941template <class Impl>
942void
943InstructionQueue<Impl>::addReadyMemInst(DynInstPtr &ready_inst)
944{
945 OpClass op_class = ready_inst->opClass();
946
947 readyInsts[op_class].push(ready_inst);
948
949 // Will need to reorder the list if either a queue is not on the list,
950 // or it has an older instruction than last time.
951 if (!queueOnList[op_class]) {
952 addToOrderList(op_class);
953 } else if (readyInsts[op_class].top()->seqNum <
954 (*readyIt[op_class]).oldestInst) {
955 listOrder.erase(readyIt[op_class]);
956 addToOrderList(op_class);
957 }
958
959 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
960 "the ready list, PC %#x opclass:%i [sn:%lli].\n",
961 ready_inst->readPC(), op_class, ready_inst->seqNum);
962}
963
964template <class Impl>
965void
966InstructionQueue<Impl>::rescheduleMemInst(DynInstPtr &resched_inst)
967{
968 memDepUnit[resched_inst->threadNumber].reschedule(resched_inst);
969}
970
971template <class Impl>
972void
973InstructionQueue<Impl>::replayMemInst(DynInstPtr &replay_inst)
974{
975 memDepUnit[replay_inst->threadNumber].replay(replay_inst);
976}
977
978template <class Impl>
979void
980InstructionQueue<Impl>::completeMemInst(DynInstPtr &completed_inst)
981{
982 int tid = completed_inst->threadNumber;
983
984 DPRINTF(IQ, "Completing mem instruction PC:%#x [sn:%lli]\n",
985 completed_inst->readPC(), completed_inst->seqNum);
986
987 ++freeEntries;
988
989 completed_inst->memOpDone = true;
990
991 memDepUnit[tid].completed(completed_inst);
992
993 count[tid]--;
994}
995
996template <class Impl>
997void
998InstructionQueue<Impl>::violation(DynInstPtr &store,
999 DynInstPtr &faulting_load)
1000{
1001 memDepUnit[store->threadNumber].violation(store, faulting_load);
1002}
1003
1004template <class Impl>
1005void
1006InstructionQueue<Impl>::squash(unsigned tid)
1007{
1008 DPRINTF(IQ, "[tid:%i]: Starting to squash instructions in "
1009 "the IQ.\n", tid);
1010
1011 // Read instruction sequence number of last instruction out of the
1012 // time buffer.
1013 squashedSeqNum[tid] = fromCommit->commitInfo[tid].doneSeqNum;
1014
1015 // Call doSquash if there are insts in the IQ
1016 if (count[tid] > 0) {
1017 doSquash(tid);
1018 }
1019
1020 // Also tell the memory dependence unit to squash.
1021 memDepUnit[tid].squash(squashedSeqNum[tid], tid);
1022}
1023
1024template <class Impl>
1025void
1026InstructionQueue<Impl>::doSquash(unsigned tid)
1027{
1028 // Start at the tail.
1029 ListIt squash_it = instList[tid].end();
1030 --squash_it;
1031
1032 DPRINTF(IQ, "[tid:%i]: Squashing until sequence number %i!\n",
1033 tid, squashedSeqNum[tid]);
1034
1035 // Squash any instructions younger than the squashed sequence number
1036 // given.
1037 while (squash_it != instList[tid].end() &&
1038 (*squash_it)->seqNum > squashedSeqNum[tid]) {
1039
1040 DynInstPtr squashed_inst = (*squash_it);
1041
1042 // Only handle the instruction if it actually is in the IQ and
1043 // hasn't already been squashed in the IQ.
1044 if (squashed_inst->threadNumber != tid ||
1045 squashed_inst->isSquashedInIQ()) {
1046 --squash_it;
1047 continue;
1048 }
1049
1050 if (!squashed_inst->isIssued() ||
1051 (squashed_inst->isMemRef() &&
1052 !squashed_inst->memOpDone)) {
1053
1054 // Remove the instruction from the dependency list.
1055 if (!squashed_inst->isNonSpeculative() &&
1056 !squashed_inst->isStoreConditional() &&
1057 !squashed_inst->isMemBarrier() &&
1058 !squashed_inst->isWriteBarrier()) {
1059
1060 for (int src_reg_idx = 0;
1061 src_reg_idx < squashed_inst->numSrcRegs();
1062 src_reg_idx++)
1063 {
1064 PhysRegIndex src_reg =
1065 squashed_inst->renamedSrcRegIdx(src_reg_idx);
1066
1067 // Only remove it from the dependency graph if it
1068 // was placed there in the first place.
1069
1070 // Instead of doing a linked list traversal, we
1071 // can just remove these squashed instructions
1072 // either at issue time, or when the register is
1073 // overwritten. The only downside to this is it
1074 // leaves more room for error.
1075
1076 if (!squashed_inst->isReadySrcRegIdx(src_reg_idx) &&
1077 src_reg < numPhysRegs) {
1078 dependGraph.remove(src_reg, squashed_inst);
1079 }
1080
1081
1082 ++iqSquashedOperandsExamined;
1083 }
1084 } else {
1085 NonSpecMapIt ns_inst_it =
1086 nonSpecInsts.find(squashed_inst->seqNum);
1087 assert(ns_inst_it != nonSpecInsts.end());
1088
1089 (*ns_inst_it).second = NULL;
1090
1091 nonSpecInsts.erase(ns_inst_it);
1092
1093 ++iqSquashedNonSpecRemoved;
1094 }
1095
1096 // Might want to also clear out the head of the dependency graph.
1097
1098 // Mark it as squashed within the IQ.
1099 squashed_inst->setSquashedInIQ();
1100
1101 // @todo: Remove this hack where several statuses are set so the
1102 // inst will flow through the rest of the pipeline.
1103 squashed_inst->setIssued();
1104 squashed_inst->setCanCommit();
1105 squashed_inst->removeInIQ();
1106
1107 //Update Thread IQ Count
1108 count[squashed_inst->threadNumber]--;
1109
1110 ++freeEntries;
1111
1112 DPRINTF(IQ, "[tid:%i]: Instruction [sn:%lli] PC %#x "
1113 "squashed.\n",
1114 tid, squashed_inst->seqNum, squashed_inst->readPC());
1115 }
1116
1117 instList[tid].erase(squash_it--);
1118 ++iqSquashedInstsExamined;
1119 }
1120}
1121
1122template <class Impl>
1123bool
1124InstructionQueue<Impl>::addToDependents(DynInstPtr &new_inst)
1125{
1126 // Loop through the instruction's source registers, adding
1127 // them to the dependency list if they are not ready.
1128 int8_t total_src_regs = new_inst->numSrcRegs();
1129 bool return_val = false;
1130
1131 for (int src_reg_idx = 0;
1132 src_reg_idx < total_src_regs;
1133 src_reg_idx++)
1134 {
1135 // Only add it to the dependency graph if it's not ready.
1136 if (!new_inst->isReadySrcRegIdx(src_reg_idx)) {
1137 PhysRegIndex src_reg = new_inst->renamedSrcRegIdx(src_reg_idx);
1138
1139 // Check the IQ's scoreboard to make sure the register
1140 // hasn't become ready while the instruction was in flight
1141 // between stages. Only if it really isn't ready should
1142 // it be added to the dependency graph.
1143 if (src_reg >= numPhysRegs) {
1144 continue;
1145 } else if (regScoreboard[src_reg] == false) {
1146 DPRINTF(IQ, "Instruction PC %#x has src reg %i that "
1147 "is being added to the dependency chain.\n",
1148 new_inst->readPC(), src_reg);
1149
1150 dependGraph.insert(src_reg, new_inst);
1151
1152 // Change the return value to indicate that something
1153 // was added to the dependency graph.
1154 return_val = true;
1155 } else {
1156 DPRINTF(IQ, "Instruction PC %#x has src reg %i that "
1157 "became ready before it reached the IQ.\n",
1158 new_inst->readPC(), src_reg);
1159 // Mark a register ready within the instruction.
1160 new_inst->markSrcRegReady(src_reg_idx);
1161 }
1162 }
1163 }
1164
1165 return return_val;
1166}
1167
1168template <class Impl>
1169void
1170InstructionQueue<Impl>::addToProducers(DynInstPtr &new_inst)
1171{
1172 // Nothing really needs to be marked when an instruction becomes
1173 // the producer of a register's value, but for convenience a ptr
1174 // to the producing instruction will be placed in the head node of
1175 // the dependency links.
1176 int8_t total_dest_regs = new_inst->numDestRegs();
1177
1178 for (int dest_reg_idx = 0;
1179 dest_reg_idx < total_dest_regs;
1180 dest_reg_idx++)
1181 {
1182 PhysRegIndex dest_reg = new_inst->renamedDestRegIdx(dest_reg_idx);
1183
1184 // Instructions that use the misc regs will have a reg number
1185 // higher than the normal physical registers. In this case these
1186 // registers are not renamed, and there is no need to track
1187 // dependencies as these instructions must be executed at commit.
1188 if (dest_reg >= numPhysRegs) {
1189 continue;
1190 }
1191
1192 if (!dependGraph.empty(dest_reg)) {
1193 dependGraph.dump();
1194 panic("Dependency graph %i not empty!", dest_reg);
1195 }
1196
1197 dependGraph.setInst(dest_reg, new_inst);
1198
1199 // Mark the scoreboard to say it's not yet ready.
1200 regScoreboard[dest_reg] = false;
1201 }
1202}
1203
1204template <class Impl>
1205void
1206InstructionQueue<Impl>::addIfReady(DynInstPtr &inst)
1207{
1208 // If the instruction now has all of its source registers
1209 // available, then add it to the list of ready instructions.
1210 if (inst->readyToIssue()) {
1211
1212 //Add the instruction to the proper ready list.
1213 if (inst->isMemRef()) {
1214
1215 DPRINTF(IQ, "Checking if memory instruction can issue.\n");
1216
1217 // Message to the mem dependence unit that this instruction has
1218 // its registers ready.
1219 memDepUnit[inst->threadNumber].regsReady(inst);
1220
1221 return;
1222 }
1223
1224 OpClass op_class = inst->opClass();
1225
1226 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
1227 "the ready list, PC %#x opclass:%i [sn:%lli].\n",
1228 inst->readPC(), op_class, inst->seqNum);
1229
1230 readyInsts[op_class].push(inst);
1231
1232 // Will need to reorder the list if either a queue is not on the list,
1233 // or it has an older instruction than last time.
1234 if (!queueOnList[op_class]) {
1235 addToOrderList(op_class);
1236 } else if (readyInsts[op_class].top()->seqNum <
1237 (*readyIt[op_class]).oldestInst) {
1238 listOrder.erase(readyIt[op_class]);
1239 addToOrderList(op_class);
1240 }
1241 }
1242}
1243
1244template <class Impl>
1245int
1246InstructionQueue<Impl>::countInsts()
1247{
1248 //ksewell:This works but definitely could use a cleaner write
1249 //with a more intuitive way of counting. Right now it's
1250 //just brute force ....
1251
1252#if 0
1253 int total_insts = 0;
1254
1255 for (int i = 0; i < numThreads; ++i) {
1256 ListIt count_it = instList[i].begin();
1257
1258 while (count_it != instList[i].end()) {
1259 if (!(*count_it)->isSquashed() && !(*count_it)->isSquashedInIQ()) {
1260 if (!(*count_it)->isIssued()) {
1261 ++total_insts;
1262 } else if ((*count_it)->isMemRef() &&
1263 !(*count_it)->memOpDone) {
1264 // Loads that have not been marked as executed still count
1265 // towards the total instructions.
1266 ++total_insts;
1267 }
1268 }
1269
1270 ++count_it;
1271 }
1272 }
1273
1274 return total_insts;
1275#else
1276 return numEntries - freeEntries;
1277#endif
1278}
1279
1280template <class Impl>
1281void
1282InstructionQueue<Impl>::dumpLists()
1283{
1284 for (int i = 0; i < Num_OpClasses; ++i) {
1285 cprintf("Ready list %i size: %i\n", i, readyInsts[i].size());
1286
1287 cprintf("\n");
1288 }
1289
1290 cprintf("Non speculative list size: %i\n", nonSpecInsts.size());
1291
1292 NonSpecMapIt non_spec_it = nonSpecInsts.begin();
1293 NonSpecMapIt non_spec_end_it = nonSpecInsts.end();
1294
1295 cprintf("Non speculative list: ");
1296
1297 while (non_spec_it != non_spec_end_it) {
1298 cprintf("%#x [sn:%lli]", (*non_spec_it).second->readPC(),
1299 (*non_spec_it).second->seqNum);
1300 ++non_spec_it;
1301 }
1302
1303 cprintf("\n");
1304
1305 ListOrderIt list_order_it = listOrder.begin();
1306 ListOrderIt list_order_end_it = listOrder.end();
1307 int i = 1;
1308
1309 cprintf("List order: ");
1310
1311 while (list_order_it != list_order_end_it) {
1312 cprintf("%i OpClass:%i [sn:%lli] ", i, (*list_order_it).queueType,
1313 (*list_order_it).oldestInst);
1314
1315 ++list_order_it;
1316 ++i;
1317 }
1318
1319 cprintf("\n");
1320}
1321
1322
1323template <class Impl>
1324void
1325InstructionQueue<Impl>::dumpInsts()
1326{
1327 for (int i = 0; i < numThreads; ++i) {
1328 int num = 0;
1329 int valid_num = 0;
1330 ListIt inst_list_it = instList[i].begin();
1331
1332 while (inst_list_it != instList[i].end())
1333 {
1334 cprintf("Instruction:%i\n",
1335 num);
1336 if (!(*inst_list_it)->isSquashed()) {
1337 if (!(*inst_list_it)->isIssued()) {
1338 ++valid_num;
1339 cprintf("Count:%i\n", valid_num);
1340 } else if ((*inst_list_it)->isMemRef() &&
1341 !(*inst_list_it)->memOpDone) {
1342 // Loads that have not been marked as executed
1343 // still count towards the total instructions.
1344 ++valid_num;
1345 cprintf("Count:%i\n", valid_num);
1346 }
1347 }
1348
1349 cprintf("PC:%#x\n[sn:%lli]\n[tid:%i]\n"
1350 "Issued:%i\nSquashed:%i\n",
1351 (*inst_list_it)->readPC(),
1352 (*inst_list_it)->seqNum,
1353 (*inst_list_it)->threadNumber,
1354 (*inst_list_it)->isIssued(),
1355 (*inst_list_it)->isSquashed());
1356
1357 if ((*inst_list_it)->isMemRef()) {
1358 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone);
1359 }
1360
1361 cprintf("\n");
1362
1363 inst_list_it++;
1364 ++num;
1365 }
1366 }
1367}