inst_queue_impl.hh (13561:523608bb180c) inst_queue_impl.hh (13590:d7e018859709)
1/*
1/*
2 * Copyright (c) 2011-2014 ARM Limited
2 * Copyright (c) 2011-2014, 2017-2018 ARM Limited
3 * Copyright (c) 2013 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * The license below extends only to copyright in the software and shall
7 * not be construed as granting a license to any other intellectual
8 * property including but not limited to intellectual property relating
9 * to a hardware implementation of the functionality of the software
10 * licensed hereunder. You may use the software subject to the license
11 * terms below provided that you ensure that this notice is replicated
12 * unmodified and in its entirety in all distributions of the software,
13 * modified or unmodified, in source code or in binary form.
14 *
15 * Copyright (c) 2004-2006 The Regents of The University of Michigan
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Kevin Lim
42 * Korey Sewell
43 */
44
45#ifndef __CPU_O3_INST_QUEUE_IMPL_HH__
46#define __CPU_O3_INST_QUEUE_IMPL_HH__
47
48#include <limits>
49#include <vector>
50
51#include "base/logging.hh"
52#include "cpu/o3/fu_pool.hh"
53#include "cpu/o3/inst_queue.hh"
54#include "debug/IQ.hh"
55#include "enums/OpClass.hh"
56#include "params/DerivO3CPU.hh"
57#include "sim/core.hh"
58
59// clang complains about std::set being overloaded with Packet::set if
60// we open up the entire namespace std
61using std::list;
62
63template <class Impl>
64InstructionQueue<Impl>::FUCompletion::FUCompletion(const DynInstPtr &_inst,
65 int fu_idx, InstructionQueue<Impl> *iq_ptr)
66 : Event(Stat_Event_Pri, AutoDelete),
67 inst(_inst), fuIdx(fu_idx), iqPtr(iq_ptr), freeFU(false)
68{
69}
70
71template <class Impl>
72void
73InstructionQueue<Impl>::FUCompletion::process()
74{
75 iqPtr->processFUCompletion(inst, freeFU ? fuIdx : -1);
76 inst = NULL;
77}
78
79
80template <class Impl>
81const char *
82InstructionQueue<Impl>::FUCompletion::description() const
83{
84 return "Functional unit completion";
85}
86
87template <class Impl>
88InstructionQueue<Impl>::InstructionQueue(O3CPU *cpu_ptr, IEW *iew_ptr,
89 DerivO3CPUParams *params)
90 : cpu(cpu_ptr),
91 iewStage(iew_ptr),
92 fuPool(params->fuPool),
93 iqPolicy(params->smtIQPolicy),
94 numEntries(params->numIQEntries),
95 totalWidth(params->issueWidth),
96 commitToIEWDelay(params->commitToIEWDelay)
97{
98 assert(fuPool);
99
100 numThreads = params->numThreads;
101
102 // Set the number of total physical registers
103 // As the vector registers have two addressing modes, they are added twice
104 numPhysRegs = params->numPhysIntRegs + params->numPhysFloatRegs +
105 params->numPhysVecRegs +
106 params->numPhysVecRegs * TheISA::NumVecElemPerVecReg +
107 params->numPhysCCRegs;
108
109 //Create an entry for each physical register within the
110 //dependency graph.
111 dependGraph.resize(numPhysRegs);
112
113 // Resize the register scoreboard.
114 regScoreboard.resize(numPhysRegs);
115
116 //Initialize Mem Dependence Units
117 for (ThreadID tid = 0; tid < Impl::MaxThreads; tid++) {
118 memDepUnit[tid].init(params, tid);
119 memDepUnit[tid].setIQ(this);
120 }
121
122 resetState();
123
124 //Figure out resource sharing policy
125 if (iqPolicy == SMTQueuePolicy::Dynamic) {
126 //Set Max Entries to Total ROB Capacity
127 for (ThreadID tid = 0; tid < numThreads; tid++) {
128 maxEntries[tid] = numEntries;
129 }
130
131 } else if (iqPolicy == SMTQueuePolicy::Partitioned) {
132 //@todo:make work if part_amt doesnt divide evenly.
133 int part_amt = numEntries / numThreads;
134
135 //Divide ROB up evenly
136 for (ThreadID tid = 0; tid < numThreads; tid++) {
137 maxEntries[tid] = part_amt;
138 }
139
140 DPRINTF(IQ, "IQ sharing policy set to Partitioned:"
141 "%i entries per thread.\n",part_amt);
142 } else if (iqPolicy == SMTQueuePolicy::Threshold) {
143 double threshold = (double)params->smtIQThreshold / 100;
144
145 int thresholdIQ = (int)((double)threshold * numEntries);
146
147 //Divide up by threshold amount
148 for (ThreadID tid = 0; tid < numThreads; tid++) {
149 maxEntries[tid] = thresholdIQ;
150 }
151
152 DPRINTF(IQ, "IQ sharing policy set to Threshold:"
153 "%i entries per thread.\n",thresholdIQ);
154 }
155 for (ThreadID tid = numThreads; tid < Impl::MaxThreads; tid++) {
156 maxEntries[tid] = 0;
157 }
158}
159
160template <class Impl>
161InstructionQueue<Impl>::~InstructionQueue()
162{
163 dependGraph.reset();
164#ifdef DEBUG
165 cprintf("Nodes traversed: %i, removed: %i\n",
166 dependGraph.nodesTraversed, dependGraph.nodesRemoved);
167#endif
168}
169
170template <class Impl>
171std::string
172InstructionQueue<Impl>::name() const
173{
174 return cpu->name() + ".iq";
175}
176
177template <class Impl>
178void
179InstructionQueue<Impl>::regStats()
180{
181 using namespace Stats;
182 iqInstsAdded
183 .name(name() + ".iqInstsAdded")
184 .desc("Number of instructions added to the IQ (excludes non-spec)")
185 .prereq(iqInstsAdded);
186
187 iqNonSpecInstsAdded
188 .name(name() + ".iqNonSpecInstsAdded")
189 .desc("Number of non-speculative instructions added to the IQ")
190 .prereq(iqNonSpecInstsAdded);
191
192 iqInstsIssued
193 .name(name() + ".iqInstsIssued")
194 .desc("Number of instructions issued")
195 .prereq(iqInstsIssued);
196
197 iqIntInstsIssued
198 .name(name() + ".iqIntInstsIssued")
199 .desc("Number of integer instructions issued")
200 .prereq(iqIntInstsIssued);
201
202 iqFloatInstsIssued
203 .name(name() + ".iqFloatInstsIssued")
204 .desc("Number of float instructions issued")
205 .prereq(iqFloatInstsIssued);
206
207 iqBranchInstsIssued
208 .name(name() + ".iqBranchInstsIssued")
209 .desc("Number of branch instructions issued")
210 .prereq(iqBranchInstsIssued);
211
212 iqMemInstsIssued
213 .name(name() + ".iqMemInstsIssued")
214 .desc("Number of memory instructions issued")
215 .prereq(iqMemInstsIssued);
216
217 iqMiscInstsIssued
218 .name(name() + ".iqMiscInstsIssued")
219 .desc("Number of miscellaneous instructions issued")
220 .prereq(iqMiscInstsIssued);
221
222 iqSquashedInstsIssued
223 .name(name() + ".iqSquashedInstsIssued")
224 .desc("Number of squashed instructions issued")
225 .prereq(iqSquashedInstsIssued);
226
227 iqSquashedInstsExamined
228 .name(name() + ".iqSquashedInstsExamined")
229 .desc("Number of squashed instructions iterated over during squash;"
230 " mainly for profiling")
231 .prereq(iqSquashedInstsExamined);
232
233 iqSquashedOperandsExamined
234 .name(name() + ".iqSquashedOperandsExamined")
235 .desc("Number of squashed operands that are examined and possibly "
236 "removed from graph")
237 .prereq(iqSquashedOperandsExamined);
238
239 iqSquashedNonSpecRemoved
240 .name(name() + ".iqSquashedNonSpecRemoved")
241 .desc("Number of squashed non-spec instructions that were removed")
242 .prereq(iqSquashedNonSpecRemoved);
243/*
244 queueResDist
245 .init(Num_OpClasses, 0, 99, 2)
246 .name(name() + ".IQ:residence:")
247 .desc("cycles from dispatch to issue")
248 .flags(total | pdf | cdf )
249 ;
250 for (int i = 0; i < Num_OpClasses; ++i) {
251 queueResDist.subname(i, opClassStrings[i]);
252 }
253*/
254 numIssuedDist
255 .init(0,totalWidth,1)
256 .name(name() + ".issued_per_cycle")
257 .desc("Number of insts issued each cycle")
258 .flags(pdf)
259 ;
260/*
261 dist_unissued
262 .init(Num_OpClasses+2)
263 .name(name() + ".unissued_cause")
264 .desc("Reason ready instruction not issued")
265 .flags(pdf | dist)
266 ;
267 for (int i=0; i < (Num_OpClasses + 2); ++i) {
268 dist_unissued.subname(i, unissued_names[i]);
269 }
270*/
271 statIssuedInstType
272 .init(numThreads,Enums::Num_OpClass)
273 .name(name() + ".FU_type")
274 .desc("Type of FU issued")
275 .flags(total | pdf | dist)
276 ;
277 statIssuedInstType.ysubnames(Enums::OpClassStrings);
278
279 //
280 // How long did instructions for a particular FU type wait prior to issue
281 //
282/*
283 issueDelayDist
284 .init(Num_OpClasses,0,99,2)
285 .name(name() + ".")
286 .desc("cycles from operands ready to issue")
287 .flags(pdf | cdf)
288 ;
289
290 for (int i=0; i<Num_OpClasses; ++i) {
291 std::stringstream subname;
292 subname << opClassStrings[i] << "_delay";
293 issueDelayDist.subname(i, subname.str());
294 }
295*/
296 issueRate
297 .name(name() + ".rate")
298 .desc("Inst issue rate")
299 .flags(total)
300 ;
301 issueRate = iqInstsIssued / cpu->numCycles;
302
303 statFuBusy
304 .init(Num_OpClasses)
305 .name(name() + ".fu_full")
306 .desc("attempts to use FU when none available")
307 .flags(pdf | dist)
308 ;
309 for (int i=0; i < Num_OpClasses; ++i) {
310 statFuBusy.subname(i, Enums::OpClassStrings[i]);
311 }
312
313 fuBusy
314 .init(numThreads)
315 .name(name() + ".fu_busy_cnt")
316 .desc("FU busy when requested")
317 .flags(total)
318 ;
319
320 fuBusyRate
321 .name(name() + ".fu_busy_rate")
322 .desc("FU busy rate (busy events/executed inst)")
323 .flags(total)
324 ;
325 fuBusyRate = fuBusy / iqInstsIssued;
326
327 for (ThreadID tid = 0; tid < numThreads; tid++) {
328 // Tell mem dependence unit to reg stats as well.
329 memDepUnit[tid].regStats();
330 }
331
332 intInstQueueReads
333 .name(name() + ".int_inst_queue_reads")
334 .desc("Number of integer instruction queue reads")
335 .flags(total);
336
337 intInstQueueWrites
338 .name(name() + ".int_inst_queue_writes")
339 .desc("Number of integer instruction queue writes")
340 .flags(total);
341
342 intInstQueueWakeupAccesses
343 .name(name() + ".int_inst_queue_wakeup_accesses")
344 .desc("Number of integer instruction queue wakeup accesses")
345 .flags(total);
346
347 fpInstQueueReads
348 .name(name() + ".fp_inst_queue_reads")
349 .desc("Number of floating instruction queue reads")
350 .flags(total);
351
352 fpInstQueueWrites
353 .name(name() + ".fp_inst_queue_writes")
354 .desc("Number of floating instruction queue writes")
355 .flags(total);
356
357 fpInstQueueWakeupAccesses
358 .name(name() + ".fp_inst_queue_wakeup_accesses")
359 .desc("Number of floating instruction queue wakeup accesses")
360 .flags(total);
361
362 vecInstQueueReads
363 .name(name() + ".vec_inst_queue_reads")
364 .desc("Number of vector instruction queue reads")
365 .flags(total);
366
367 vecInstQueueWrites
368 .name(name() + ".vec_inst_queue_writes")
369 .desc("Number of vector instruction queue writes")
370 .flags(total);
371
372 vecInstQueueWakeupAccesses
373 .name(name() + ".vec_inst_queue_wakeup_accesses")
374 .desc("Number of vector instruction queue wakeup accesses")
375 .flags(total);
376
377 intAluAccesses
378 .name(name() + ".int_alu_accesses")
379 .desc("Number of integer alu accesses")
380 .flags(total);
381
382 fpAluAccesses
383 .name(name() + ".fp_alu_accesses")
384 .desc("Number of floating point alu accesses")
385 .flags(total);
386
387 vecAluAccesses
388 .name(name() + ".vec_alu_accesses")
389 .desc("Number of vector alu accesses")
390 .flags(total);
391
392}
393
394template <class Impl>
395void
396InstructionQueue<Impl>::resetState()
397{
398 //Initialize thread IQ counts
399 for (ThreadID tid = 0; tid < Impl::MaxThreads; tid++) {
400 count[tid] = 0;
401 instList[tid].clear();
402 }
403
404 // Initialize the number of free IQ entries.
405 freeEntries = numEntries;
406
407 // Note that in actuality, the registers corresponding to the logical
408 // registers start off as ready. However this doesn't matter for the
409 // IQ as the instruction should have been correctly told if those
410 // registers are ready in rename. Thus it can all be initialized as
411 // unready.
412 for (int i = 0; i < numPhysRegs; ++i) {
413 regScoreboard[i] = false;
414 }
415
416 for (ThreadID tid = 0; tid < Impl::MaxThreads; ++tid) {
417 squashedSeqNum[tid] = 0;
418 }
419
420 for (int i = 0; i < Num_OpClasses; ++i) {
421 while (!readyInsts[i].empty())
422 readyInsts[i].pop();
423 queueOnList[i] = false;
424 readyIt[i] = listOrder.end();
425 }
426 nonSpecInsts.clear();
427 listOrder.clear();
428 deferredMemInsts.clear();
429 blockedMemInsts.clear();
430 retryMemInsts.clear();
431 wbOutstanding = 0;
432}
433
434template <class Impl>
435void
436InstructionQueue<Impl>::setActiveThreads(list<ThreadID> *at_ptr)
437{
438 activeThreads = at_ptr;
439}
440
441template <class Impl>
442void
443InstructionQueue<Impl>::setIssueToExecuteQueue(TimeBuffer<IssueStruct> *i2e_ptr)
444{
445 issueToExecuteQueue = i2e_ptr;
446}
447
448template <class Impl>
449void
450InstructionQueue<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr)
451{
452 timeBuffer = tb_ptr;
453
454 fromCommit = timeBuffer->getWire(-commitToIEWDelay);
455}
456
457template <class Impl>
458bool
459InstructionQueue<Impl>::isDrained() const
460{
461 bool drained = dependGraph.empty() &&
462 instsToExecute.empty() &&
463 wbOutstanding == 0;
464 for (ThreadID tid = 0; tid < numThreads; ++tid)
465 drained = drained && memDepUnit[tid].isDrained();
466
467 return drained;
468}
469
470template <class Impl>
471void
472InstructionQueue<Impl>::drainSanityCheck() const
473{
474 assert(dependGraph.empty());
475 assert(instsToExecute.empty());
476 for (ThreadID tid = 0; tid < numThreads; ++tid)
477 memDepUnit[tid].drainSanityCheck();
478}
479
480template <class Impl>
481void
482InstructionQueue<Impl>::takeOverFrom()
483{
484 resetState();
485}
486
487template <class Impl>
488int
489InstructionQueue<Impl>::entryAmount(ThreadID num_threads)
490{
491 if (iqPolicy == SMTQueuePolicy::Partitioned) {
492 return numEntries / num_threads;
493 } else {
494 return 0;
495 }
496}
497
498
499template <class Impl>
500void
501InstructionQueue<Impl>::resetEntries()
502{
503 if (iqPolicy != SMTQueuePolicy::Dynamic || numThreads > 1) {
504 int active_threads = activeThreads->size();
505
506 list<ThreadID>::iterator threads = activeThreads->begin();
507 list<ThreadID>::iterator end = activeThreads->end();
508
509 while (threads != end) {
510 ThreadID tid = *threads++;
511
512 if (iqPolicy == SMTQueuePolicy::Partitioned) {
513 maxEntries[tid] = numEntries / active_threads;
514 } else if (iqPolicy == SMTQueuePolicy::Threshold &&
515 active_threads == 1) {
516 maxEntries[tid] = numEntries;
517 }
518 }
519 }
520}
521
522template <class Impl>
523unsigned
524InstructionQueue<Impl>::numFreeEntries()
525{
526 return freeEntries;
527}
528
529template <class Impl>
530unsigned
531InstructionQueue<Impl>::numFreeEntries(ThreadID tid)
532{
533 return maxEntries[tid] - count[tid];
534}
535
536// Might want to do something more complex if it knows how many instructions
537// will be issued this cycle.
538template <class Impl>
539bool
540InstructionQueue<Impl>::isFull()
541{
542 if (freeEntries == 0) {
543 return(true);
544 } else {
545 return(false);
546 }
547}
548
549template <class Impl>
550bool
551InstructionQueue<Impl>::isFull(ThreadID tid)
552{
553 if (numFreeEntries(tid) == 0) {
554 return(true);
555 } else {
556 return(false);
557 }
558}
559
560template <class Impl>
561bool
562InstructionQueue<Impl>::hasReadyInsts()
563{
564 if (!listOrder.empty()) {
565 return true;
566 }
567
568 for (int i = 0; i < Num_OpClasses; ++i) {
569 if (!readyInsts[i].empty()) {
570 return true;
571 }
572 }
573
574 return false;
575}
576
577template <class Impl>
578void
579InstructionQueue<Impl>::insert(const DynInstPtr &new_inst)
580{
581 if (new_inst->isFloating()) {
582 fpInstQueueWrites++;
583 } else if (new_inst->isVector()) {
584 vecInstQueueWrites++;
585 } else {
586 intInstQueueWrites++;
587 }
588 // Make sure the instruction is valid
589 assert(new_inst);
590
591 DPRINTF(IQ, "Adding instruction [sn:%lli] PC %s to the IQ.\n",
592 new_inst->seqNum, new_inst->pcState());
593
594 assert(freeEntries != 0);
595
596 instList[new_inst->threadNumber].push_back(new_inst);
597
598 --freeEntries;
599
600 new_inst->setInIQ();
601
602 // Look through its source registers (physical regs), and mark any
603 // dependencies.
604 addToDependents(new_inst);
605
606 // Have this instruction set itself as the producer of its destination
607 // register(s).
608 addToProducers(new_inst);
609
610 if (new_inst->isMemRef()) {
611 memDepUnit[new_inst->threadNumber].insert(new_inst);
612 } else {
613 addIfReady(new_inst);
614 }
615
616 ++iqInstsAdded;
617
618 count[new_inst->threadNumber]++;
619
620 assert(freeEntries == (numEntries - countInsts()));
621}
622
623template <class Impl>
624void
625InstructionQueue<Impl>::insertNonSpec(const DynInstPtr &new_inst)
626{
627 // @todo: Clean up this code; can do it by setting inst as unable
628 // to issue, then calling normal insert on the inst.
629 if (new_inst->isFloating()) {
630 fpInstQueueWrites++;
631 } else if (new_inst->isVector()) {
632 vecInstQueueWrites++;
633 } else {
634 intInstQueueWrites++;
635 }
636
637 assert(new_inst);
638
639 nonSpecInsts[new_inst->seqNum] = new_inst;
640
641 DPRINTF(IQ, "Adding non-speculative instruction [sn:%lli] PC %s "
642 "to the IQ.\n",
643 new_inst->seqNum, new_inst->pcState());
644
645 assert(freeEntries != 0);
646
647 instList[new_inst->threadNumber].push_back(new_inst);
648
649 --freeEntries;
650
651 new_inst->setInIQ();
652
653 // Have this instruction set itself as the producer of its destination
654 // register(s).
655 addToProducers(new_inst);
656
657 // If it's a memory instruction, add it to the memory dependency
658 // unit.
659 if (new_inst->isMemRef()) {
660 memDepUnit[new_inst->threadNumber].insertNonSpec(new_inst);
661 }
662
663 ++iqNonSpecInstsAdded;
664
665 count[new_inst->threadNumber]++;
666
667 assert(freeEntries == (numEntries - countInsts()));
668}
669
670template <class Impl>
671void
672InstructionQueue<Impl>::insertBarrier(const DynInstPtr &barr_inst)
673{
674 memDepUnit[barr_inst->threadNumber].insertBarrier(barr_inst);
675
676 insertNonSpec(barr_inst);
677}
678
679template <class Impl>
680typename Impl::DynInstPtr
681InstructionQueue<Impl>::getInstToExecute()
682{
683 assert(!instsToExecute.empty());
684 DynInstPtr inst = std::move(instsToExecute.front());
685 instsToExecute.pop_front();
686 if (inst->isFloating()) {
687 fpInstQueueReads++;
688 } else if (inst->isVector()) {
689 vecInstQueueReads++;
690 } else {
691 intInstQueueReads++;
692 }
693 return inst;
694}
695
696template <class Impl>
697void
698InstructionQueue<Impl>::addToOrderList(OpClass op_class)
699{
700 assert(!readyInsts[op_class].empty());
701
702 ListOrderEntry queue_entry;
703
704 queue_entry.queueType = op_class;
705
706 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
707
708 ListOrderIt list_it = listOrder.begin();
709 ListOrderIt list_end_it = listOrder.end();
710
711 while (list_it != list_end_it) {
712 if ((*list_it).oldestInst > queue_entry.oldestInst) {
713 break;
714 }
715
716 list_it++;
717 }
718
719 readyIt[op_class] = listOrder.insert(list_it, queue_entry);
720 queueOnList[op_class] = true;
721}
722
723template <class Impl>
724void
725InstructionQueue<Impl>::moveToYoungerInst(ListOrderIt list_order_it)
726{
727 // Get iterator of next item on the list
728 // Delete the original iterator
729 // Determine if the next item is either the end of the list or younger
730 // than the new instruction. If so, then add in a new iterator right here.
731 // If not, then move along.
732 ListOrderEntry queue_entry;
733 OpClass op_class = (*list_order_it).queueType;
734 ListOrderIt next_it = list_order_it;
735
736 ++next_it;
737
738 queue_entry.queueType = op_class;
739 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
740
741 while (next_it != listOrder.end() &&
742 (*next_it).oldestInst < queue_entry.oldestInst) {
743 ++next_it;
744 }
745
746 readyIt[op_class] = listOrder.insert(next_it, queue_entry);
747}
748
749template <class Impl>
750void
751InstructionQueue<Impl>::processFUCompletion(const DynInstPtr &inst, int fu_idx)
752{
753 DPRINTF(IQ, "Processing FU completion [sn:%lli]\n", inst->seqNum);
754 assert(!cpu->switchedOut());
755 // The CPU could have been sleeping until this op completed (*extremely*
756 // long latency op). Wake it if it was. This may be overkill.
757 --wbOutstanding;
758 iewStage->wakeCPU();
759
760 if (fu_idx > -1)
761 fuPool->freeUnitNextCycle(fu_idx);
762
763 // @todo: Ensure that these FU Completions happen at the beginning
764 // of a cycle, otherwise they could add too many instructions to
765 // the queue.
766 issueToExecuteQueue->access(-1)->size++;
767 instsToExecute.push_back(inst);
768}
769
770// @todo: Figure out a better way to remove the squashed items from the
771// lists. Checking the top item of each list to see if it's squashed
772// wastes time and forces jumps.
773template <class Impl>
774void
775InstructionQueue<Impl>::scheduleReadyInsts()
776{
777 DPRINTF(IQ, "Attempting to schedule ready instructions from "
778 "the IQ.\n");
779
780 IssueStruct *i2e_info = issueToExecuteQueue->access(0);
781
782 DynInstPtr mem_inst;
783 while (mem_inst = std::move(getDeferredMemInstToExecute())) {
784 addReadyMemInst(mem_inst);
785 }
786
787 // See if any cache blocked instructions are able to be executed
788 while (mem_inst = std::move(getBlockedMemInstToExecute())) {
789 addReadyMemInst(mem_inst);
790 }
791
792 // Have iterator to head of the list
793 // While I haven't exceeded bandwidth or reached the end of the list,
794 // Try to get a FU that can do what this op needs.
795 // If successful, change the oldestInst to the new top of the list, put
796 // the queue in the proper place in the list.
797 // Increment the iterator.
798 // This will avoid trying to schedule a certain op class if there are no
799 // FUs that handle it.
800 int total_issued = 0;
801 ListOrderIt order_it = listOrder.begin();
802 ListOrderIt order_end_it = listOrder.end();
803
804 while (total_issued < totalWidth && order_it != order_end_it) {
805 OpClass op_class = (*order_it).queueType;
806
807 assert(!readyInsts[op_class].empty());
808
809 DynInstPtr issuing_inst = readyInsts[op_class].top();
810
811 if (issuing_inst->isFloating()) {
812 fpInstQueueReads++;
813 } else if (issuing_inst->isVector()) {
814 vecInstQueueReads++;
815 } else {
816 intInstQueueReads++;
817 }
818
819 assert(issuing_inst->seqNum == (*order_it).oldestInst);
820
821 if (issuing_inst->isSquashed()) {
822 readyInsts[op_class].pop();
823
824 if (!readyInsts[op_class].empty()) {
825 moveToYoungerInst(order_it);
826 } else {
827 readyIt[op_class] = listOrder.end();
828 queueOnList[op_class] = false;
829 }
830
831 listOrder.erase(order_it++);
832
833 ++iqSquashedInstsIssued;
834
835 continue;
836 }
837
838 int idx = FUPool::NoCapableFU;
839 Cycles op_latency = Cycles(1);
840 ThreadID tid = issuing_inst->threadNumber;
841
842 if (op_class != No_OpClass) {
843 idx = fuPool->getUnit(op_class);
844 if (issuing_inst->isFloating()) {
845 fpAluAccesses++;
846 } else if (issuing_inst->isVector()) {
847 vecAluAccesses++;
848 } else {
849 intAluAccesses++;
850 }
851 if (idx > FUPool::NoFreeFU) {
852 op_latency = fuPool->getOpLatency(op_class);
853 }
854 }
855
856 // If we have an instruction that doesn't require a FU, or a
857 // valid FU, then schedule for execution.
858 if (idx != FUPool::NoFreeFU) {
859 if (op_latency == Cycles(1)) {
860 i2e_info->size++;
861 instsToExecute.push_back(issuing_inst);
862
863 // Add the FU onto the list of FU's to be freed next
864 // cycle if we used one.
865 if (idx >= 0)
866 fuPool->freeUnitNextCycle(idx);
867 } else {
868 bool pipelined = fuPool->isPipelined(op_class);
869 // Generate completion event for the FU
870 ++wbOutstanding;
871 FUCompletion *execution = new FUCompletion(issuing_inst,
872 idx, this);
873
874 cpu->schedule(execution,
875 cpu->clockEdge(Cycles(op_latency - 1)));
876
877 if (!pipelined) {
878 // If FU isn't pipelined, then it must be freed
879 // upon the execution completing.
880 execution->setFreeFU();
881 } else {
882 // Add the FU onto the list of FU's to be freed next cycle.
883 fuPool->freeUnitNextCycle(idx);
884 }
885 }
886
887 DPRINTF(IQ, "Thread %i: Issuing instruction PC %s "
888 "[sn:%lli]\n",
889 tid, issuing_inst->pcState(),
890 issuing_inst->seqNum);
891
892 readyInsts[op_class].pop();
893
894 if (!readyInsts[op_class].empty()) {
895 moveToYoungerInst(order_it);
896 } else {
897 readyIt[op_class] = listOrder.end();
898 queueOnList[op_class] = false;
899 }
900
901 issuing_inst->setIssued();
902 ++total_issued;
903
904#if TRACING_ON
905 issuing_inst->issueTick = curTick() - issuing_inst->fetchTick;
906#endif
907
908 if (!issuing_inst->isMemRef()) {
909 // Memory instructions can not be freed from the IQ until they
910 // complete.
911 ++freeEntries;
912 count[tid]--;
913 issuing_inst->clearInIQ();
914 } else {
915 memDepUnit[tid].issue(issuing_inst);
916 }
917
918 listOrder.erase(order_it++);
919 statIssuedInstType[tid][op_class]++;
920 } else {
921 statFuBusy[op_class]++;
922 fuBusy[tid]++;
923 ++order_it;
924 }
925 }
926
927 numIssuedDist.sample(total_issued);
928 iqInstsIssued+= total_issued;
929
930 // If we issued any instructions, tell the CPU we had activity.
931 // @todo If the way deferred memory instructions are handeled due to
932 // translation changes then the deferredMemInsts condition should be removed
933 // from the code below.
934 if (total_issued || !retryMemInsts.empty() || !deferredMemInsts.empty()) {
935 cpu->activityThisCycle();
936 } else {
937 DPRINTF(IQ, "Not able to schedule any instructions.\n");
938 }
939}
940
941template <class Impl>
942void
943InstructionQueue<Impl>::scheduleNonSpec(const InstSeqNum &inst)
944{
945 DPRINTF(IQ, "Marking nonspeculative instruction [sn:%lli] as ready "
946 "to execute.\n", inst);
947
948 NonSpecMapIt inst_it = nonSpecInsts.find(inst);
949
950 assert(inst_it != nonSpecInsts.end());
951
952 ThreadID tid = (*inst_it).second->threadNumber;
953
954 (*inst_it).second->setAtCommit();
955
956 (*inst_it).second->setCanIssue();
957
958 if (!(*inst_it).second->isMemRef()) {
959 addIfReady((*inst_it).second);
960 } else {
961 memDepUnit[tid].nonSpecInstReady((*inst_it).second);
962 }
963
964 (*inst_it).second = NULL;
965
966 nonSpecInsts.erase(inst_it);
967}
968
969template <class Impl>
970void
971InstructionQueue<Impl>::commit(const InstSeqNum &inst, ThreadID tid)
972{
973 DPRINTF(IQ, "[tid:%i]: Committing instructions older than [sn:%i]\n",
974 tid,inst);
975
976 ListIt iq_it = instList[tid].begin();
977
978 while (iq_it != instList[tid].end() &&
979 (*iq_it)->seqNum <= inst) {
980 ++iq_it;
981 instList[tid].pop_front();
982 }
983
984 assert(freeEntries == (numEntries - countInsts()));
985}
986
987template <class Impl>
988int
989InstructionQueue<Impl>::wakeDependents(const DynInstPtr &completed_inst)
990{
991 int dependents = 0;
992
993 // The instruction queue here takes care of both floating and int ops
994 if (completed_inst->isFloating()) {
995 fpInstQueueWakeupAccesses++;
996 } else if (completed_inst->isVector()) {
997 vecInstQueueWakeupAccesses++;
998 } else {
999 intInstQueueWakeupAccesses++;
1000 }
1001
1002 DPRINTF(IQ, "Waking dependents of completed instruction.\n");
1003
1004 assert(!completed_inst->isSquashed());
1005
1006 // Tell the memory dependence unit to wake any dependents on this
1007 // instruction if it is a memory instruction. Also complete the memory
1008 // instruction at this point since we know it executed without issues.
1009 // @todo: Might want to rename "completeMemInst" to something that
1010 // indicates that it won't need to be replayed, and call this
1011 // earlier. Might not be a big deal.
1012 if (completed_inst->isMemRef()) {
1013 memDepUnit[completed_inst->threadNumber].wakeDependents(completed_inst);
1014 completeMemInst(completed_inst);
1015 } else if (completed_inst->isMemBarrier() ||
1016 completed_inst->isWriteBarrier()) {
1017 memDepUnit[completed_inst->threadNumber].completeBarrier(completed_inst);
1018 }
1019
1020 for (int dest_reg_idx = 0;
1021 dest_reg_idx < completed_inst->numDestRegs();
1022 dest_reg_idx++)
1023 {
1024 PhysRegIdPtr dest_reg =
1025 completed_inst->renamedDestRegIdx(dest_reg_idx);
1026
1027 // Special case of uniq or control registers. They are not
1028 // handled by the IQ and thus have no dependency graph entry.
1029 if (dest_reg->isFixedMapping()) {
1030 DPRINTF(IQ, "Reg %d [%s] is part of a fix mapping, skipping\n",
1031 dest_reg->index(), dest_reg->className());
1032 continue;
1033 }
1034
1035 DPRINTF(IQ, "Waking any dependents on register %i (%s).\n",
1036 dest_reg->index(),
1037 dest_reg->className());
1038
1039 //Go through the dependency chain, marking the registers as
1040 //ready within the waiting instructions.
1041 DynInstPtr dep_inst = dependGraph.pop(dest_reg->flatIndex());
1042
1043 while (dep_inst) {
1044 DPRINTF(IQ, "Waking up a dependent instruction, [sn:%lli] "
1045 "PC %s.\n", dep_inst->seqNum, dep_inst->pcState());
1046
1047 // Might want to give more information to the instruction
1048 // so that it knows which of its source registers is
1049 // ready. However that would mean that the dependency
1050 // graph entries would need to hold the src_reg_idx.
1051 dep_inst->markSrcRegReady();
1052
1053 addIfReady(dep_inst);
1054
1055 dep_inst = dependGraph.pop(dest_reg->flatIndex());
1056
1057 ++dependents;
1058 }
1059
1060 // Reset the head node now that all of its dependents have
1061 // been woken up.
1062 assert(dependGraph.empty(dest_reg->flatIndex()));
1063 dependGraph.clearInst(dest_reg->flatIndex());
1064
1065 // Mark the scoreboard as having that register ready.
1066 regScoreboard[dest_reg->flatIndex()] = true;
1067 }
1068 return dependents;
1069}
1070
1071template <class Impl>
1072void
1073InstructionQueue<Impl>::addReadyMemInst(const DynInstPtr &ready_inst)
1074{
1075 OpClass op_class = ready_inst->opClass();
1076
1077 readyInsts[op_class].push(ready_inst);
1078
1079 // Will need to reorder the list if either a queue is not on the list,
1080 // or it has an older instruction than last time.
1081 if (!queueOnList[op_class]) {
1082 addToOrderList(op_class);
1083 } else if (readyInsts[op_class].top()->seqNum <
1084 (*readyIt[op_class]).oldestInst) {
1085 listOrder.erase(readyIt[op_class]);
1086 addToOrderList(op_class);
1087 }
1088
1089 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
1090 "the ready list, PC %s opclass:%i [sn:%lli].\n",
1091 ready_inst->pcState(), op_class, ready_inst->seqNum);
1092}
1093
1094template <class Impl>
1095void
1096InstructionQueue<Impl>::rescheduleMemInst(const DynInstPtr &resched_inst)
1097{
1098 DPRINTF(IQ, "Rescheduling mem inst [sn:%lli]\n", resched_inst->seqNum);
1099
1100 // Reset DTB translation state
1101 resched_inst->translationStarted(false);
1102 resched_inst->translationCompleted(false);
1103
1104 resched_inst->clearCanIssue();
1105 memDepUnit[resched_inst->threadNumber].reschedule(resched_inst);
1106}
1107
1108template <class Impl>
1109void
1110InstructionQueue<Impl>::replayMemInst(const DynInstPtr &replay_inst)
1111{
1112 memDepUnit[replay_inst->threadNumber].replay();
1113}
1114
1115template <class Impl>
1116void
1117InstructionQueue<Impl>::completeMemInst(const DynInstPtr &completed_inst)
1118{
1119 ThreadID tid = completed_inst->threadNumber;
1120
1121 DPRINTF(IQ, "Completing mem instruction PC: %s [sn:%lli]\n",
1122 completed_inst->pcState(), completed_inst->seqNum);
1123
1124 ++freeEntries;
1125
1126 completed_inst->memOpDone(true);
1127
1128 memDepUnit[tid].completed(completed_inst);
1129 count[tid]--;
1130}
1131
1132template <class Impl>
1133void
1134InstructionQueue<Impl>::deferMemInst(const DynInstPtr &deferred_inst)
1135{
1136 deferredMemInsts.push_back(deferred_inst);
1137}
1138
1139template <class Impl>
1140void
1141InstructionQueue<Impl>::blockMemInst(const DynInstPtr &blocked_inst)
1142{
3 * Copyright (c) 2013 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * The license below extends only to copyright in the software and shall
7 * not be construed as granting a license to any other intellectual
8 * property including but not limited to intellectual property relating
9 * to a hardware implementation of the functionality of the software
10 * licensed hereunder. You may use the software subject to the license
11 * terms below provided that you ensure that this notice is replicated
12 * unmodified and in its entirety in all distributions of the software,
13 * modified or unmodified, in source code or in binary form.
14 *
15 * Copyright (c) 2004-2006 The Regents of The University of Michigan
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Kevin Lim
42 * Korey Sewell
43 */
44
45#ifndef __CPU_O3_INST_QUEUE_IMPL_HH__
46#define __CPU_O3_INST_QUEUE_IMPL_HH__
47
48#include <limits>
49#include <vector>
50
51#include "base/logging.hh"
52#include "cpu/o3/fu_pool.hh"
53#include "cpu/o3/inst_queue.hh"
54#include "debug/IQ.hh"
55#include "enums/OpClass.hh"
56#include "params/DerivO3CPU.hh"
57#include "sim/core.hh"
58
59// clang complains about std::set being overloaded with Packet::set if
60// we open up the entire namespace std
61using std::list;
62
63template <class Impl>
64InstructionQueue<Impl>::FUCompletion::FUCompletion(const DynInstPtr &_inst,
65 int fu_idx, InstructionQueue<Impl> *iq_ptr)
66 : Event(Stat_Event_Pri, AutoDelete),
67 inst(_inst), fuIdx(fu_idx), iqPtr(iq_ptr), freeFU(false)
68{
69}
70
71template <class Impl>
72void
73InstructionQueue<Impl>::FUCompletion::process()
74{
75 iqPtr->processFUCompletion(inst, freeFU ? fuIdx : -1);
76 inst = NULL;
77}
78
79
80template <class Impl>
81const char *
82InstructionQueue<Impl>::FUCompletion::description() const
83{
84 return "Functional unit completion";
85}
86
87template <class Impl>
88InstructionQueue<Impl>::InstructionQueue(O3CPU *cpu_ptr, IEW *iew_ptr,
89 DerivO3CPUParams *params)
90 : cpu(cpu_ptr),
91 iewStage(iew_ptr),
92 fuPool(params->fuPool),
93 iqPolicy(params->smtIQPolicy),
94 numEntries(params->numIQEntries),
95 totalWidth(params->issueWidth),
96 commitToIEWDelay(params->commitToIEWDelay)
97{
98 assert(fuPool);
99
100 numThreads = params->numThreads;
101
102 // Set the number of total physical registers
103 // As the vector registers have two addressing modes, they are added twice
104 numPhysRegs = params->numPhysIntRegs + params->numPhysFloatRegs +
105 params->numPhysVecRegs +
106 params->numPhysVecRegs * TheISA::NumVecElemPerVecReg +
107 params->numPhysCCRegs;
108
109 //Create an entry for each physical register within the
110 //dependency graph.
111 dependGraph.resize(numPhysRegs);
112
113 // Resize the register scoreboard.
114 regScoreboard.resize(numPhysRegs);
115
116 //Initialize Mem Dependence Units
117 for (ThreadID tid = 0; tid < Impl::MaxThreads; tid++) {
118 memDepUnit[tid].init(params, tid);
119 memDepUnit[tid].setIQ(this);
120 }
121
122 resetState();
123
124 //Figure out resource sharing policy
125 if (iqPolicy == SMTQueuePolicy::Dynamic) {
126 //Set Max Entries to Total ROB Capacity
127 for (ThreadID tid = 0; tid < numThreads; tid++) {
128 maxEntries[tid] = numEntries;
129 }
130
131 } else if (iqPolicy == SMTQueuePolicy::Partitioned) {
132 //@todo:make work if part_amt doesnt divide evenly.
133 int part_amt = numEntries / numThreads;
134
135 //Divide ROB up evenly
136 for (ThreadID tid = 0; tid < numThreads; tid++) {
137 maxEntries[tid] = part_amt;
138 }
139
140 DPRINTF(IQ, "IQ sharing policy set to Partitioned:"
141 "%i entries per thread.\n",part_amt);
142 } else if (iqPolicy == SMTQueuePolicy::Threshold) {
143 double threshold = (double)params->smtIQThreshold / 100;
144
145 int thresholdIQ = (int)((double)threshold * numEntries);
146
147 //Divide up by threshold amount
148 for (ThreadID tid = 0; tid < numThreads; tid++) {
149 maxEntries[tid] = thresholdIQ;
150 }
151
152 DPRINTF(IQ, "IQ sharing policy set to Threshold:"
153 "%i entries per thread.\n",thresholdIQ);
154 }
155 for (ThreadID tid = numThreads; tid < Impl::MaxThreads; tid++) {
156 maxEntries[tid] = 0;
157 }
158}
159
160template <class Impl>
161InstructionQueue<Impl>::~InstructionQueue()
162{
163 dependGraph.reset();
164#ifdef DEBUG
165 cprintf("Nodes traversed: %i, removed: %i\n",
166 dependGraph.nodesTraversed, dependGraph.nodesRemoved);
167#endif
168}
169
170template <class Impl>
171std::string
172InstructionQueue<Impl>::name() const
173{
174 return cpu->name() + ".iq";
175}
176
177template <class Impl>
178void
179InstructionQueue<Impl>::regStats()
180{
181 using namespace Stats;
182 iqInstsAdded
183 .name(name() + ".iqInstsAdded")
184 .desc("Number of instructions added to the IQ (excludes non-spec)")
185 .prereq(iqInstsAdded);
186
187 iqNonSpecInstsAdded
188 .name(name() + ".iqNonSpecInstsAdded")
189 .desc("Number of non-speculative instructions added to the IQ")
190 .prereq(iqNonSpecInstsAdded);
191
192 iqInstsIssued
193 .name(name() + ".iqInstsIssued")
194 .desc("Number of instructions issued")
195 .prereq(iqInstsIssued);
196
197 iqIntInstsIssued
198 .name(name() + ".iqIntInstsIssued")
199 .desc("Number of integer instructions issued")
200 .prereq(iqIntInstsIssued);
201
202 iqFloatInstsIssued
203 .name(name() + ".iqFloatInstsIssued")
204 .desc("Number of float instructions issued")
205 .prereq(iqFloatInstsIssued);
206
207 iqBranchInstsIssued
208 .name(name() + ".iqBranchInstsIssued")
209 .desc("Number of branch instructions issued")
210 .prereq(iqBranchInstsIssued);
211
212 iqMemInstsIssued
213 .name(name() + ".iqMemInstsIssued")
214 .desc("Number of memory instructions issued")
215 .prereq(iqMemInstsIssued);
216
217 iqMiscInstsIssued
218 .name(name() + ".iqMiscInstsIssued")
219 .desc("Number of miscellaneous instructions issued")
220 .prereq(iqMiscInstsIssued);
221
222 iqSquashedInstsIssued
223 .name(name() + ".iqSquashedInstsIssued")
224 .desc("Number of squashed instructions issued")
225 .prereq(iqSquashedInstsIssued);
226
227 iqSquashedInstsExamined
228 .name(name() + ".iqSquashedInstsExamined")
229 .desc("Number of squashed instructions iterated over during squash;"
230 " mainly for profiling")
231 .prereq(iqSquashedInstsExamined);
232
233 iqSquashedOperandsExamined
234 .name(name() + ".iqSquashedOperandsExamined")
235 .desc("Number of squashed operands that are examined and possibly "
236 "removed from graph")
237 .prereq(iqSquashedOperandsExamined);
238
239 iqSquashedNonSpecRemoved
240 .name(name() + ".iqSquashedNonSpecRemoved")
241 .desc("Number of squashed non-spec instructions that were removed")
242 .prereq(iqSquashedNonSpecRemoved);
243/*
244 queueResDist
245 .init(Num_OpClasses, 0, 99, 2)
246 .name(name() + ".IQ:residence:")
247 .desc("cycles from dispatch to issue")
248 .flags(total | pdf | cdf )
249 ;
250 for (int i = 0; i < Num_OpClasses; ++i) {
251 queueResDist.subname(i, opClassStrings[i]);
252 }
253*/
254 numIssuedDist
255 .init(0,totalWidth,1)
256 .name(name() + ".issued_per_cycle")
257 .desc("Number of insts issued each cycle")
258 .flags(pdf)
259 ;
260/*
261 dist_unissued
262 .init(Num_OpClasses+2)
263 .name(name() + ".unissued_cause")
264 .desc("Reason ready instruction not issued")
265 .flags(pdf | dist)
266 ;
267 for (int i=0; i < (Num_OpClasses + 2); ++i) {
268 dist_unissued.subname(i, unissued_names[i]);
269 }
270*/
271 statIssuedInstType
272 .init(numThreads,Enums::Num_OpClass)
273 .name(name() + ".FU_type")
274 .desc("Type of FU issued")
275 .flags(total | pdf | dist)
276 ;
277 statIssuedInstType.ysubnames(Enums::OpClassStrings);
278
279 //
280 // How long did instructions for a particular FU type wait prior to issue
281 //
282/*
283 issueDelayDist
284 .init(Num_OpClasses,0,99,2)
285 .name(name() + ".")
286 .desc("cycles from operands ready to issue")
287 .flags(pdf | cdf)
288 ;
289
290 for (int i=0; i<Num_OpClasses; ++i) {
291 std::stringstream subname;
292 subname << opClassStrings[i] << "_delay";
293 issueDelayDist.subname(i, subname.str());
294 }
295*/
296 issueRate
297 .name(name() + ".rate")
298 .desc("Inst issue rate")
299 .flags(total)
300 ;
301 issueRate = iqInstsIssued / cpu->numCycles;
302
303 statFuBusy
304 .init(Num_OpClasses)
305 .name(name() + ".fu_full")
306 .desc("attempts to use FU when none available")
307 .flags(pdf | dist)
308 ;
309 for (int i=0; i < Num_OpClasses; ++i) {
310 statFuBusy.subname(i, Enums::OpClassStrings[i]);
311 }
312
313 fuBusy
314 .init(numThreads)
315 .name(name() + ".fu_busy_cnt")
316 .desc("FU busy when requested")
317 .flags(total)
318 ;
319
320 fuBusyRate
321 .name(name() + ".fu_busy_rate")
322 .desc("FU busy rate (busy events/executed inst)")
323 .flags(total)
324 ;
325 fuBusyRate = fuBusy / iqInstsIssued;
326
327 for (ThreadID tid = 0; tid < numThreads; tid++) {
328 // Tell mem dependence unit to reg stats as well.
329 memDepUnit[tid].regStats();
330 }
331
332 intInstQueueReads
333 .name(name() + ".int_inst_queue_reads")
334 .desc("Number of integer instruction queue reads")
335 .flags(total);
336
337 intInstQueueWrites
338 .name(name() + ".int_inst_queue_writes")
339 .desc("Number of integer instruction queue writes")
340 .flags(total);
341
342 intInstQueueWakeupAccesses
343 .name(name() + ".int_inst_queue_wakeup_accesses")
344 .desc("Number of integer instruction queue wakeup accesses")
345 .flags(total);
346
347 fpInstQueueReads
348 .name(name() + ".fp_inst_queue_reads")
349 .desc("Number of floating instruction queue reads")
350 .flags(total);
351
352 fpInstQueueWrites
353 .name(name() + ".fp_inst_queue_writes")
354 .desc("Number of floating instruction queue writes")
355 .flags(total);
356
357 fpInstQueueWakeupAccesses
358 .name(name() + ".fp_inst_queue_wakeup_accesses")
359 .desc("Number of floating instruction queue wakeup accesses")
360 .flags(total);
361
362 vecInstQueueReads
363 .name(name() + ".vec_inst_queue_reads")
364 .desc("Number of vector instruction queue reads")
365 .flags(total);
366
367 vecInstQueueWrites
368 .name(name() + ".vec_inst_queue_writes")
369 .desc("Number of vector instruction queue writes")
370 .flags(total);
371
372 vecInstQueueWakeupAccesses
373 .name(name() + ".vec_inst_queue_wakeup_accesses")
374 .desc("Number of vector instruction queue wakeup accesses")
375 .flags(total);
376
377 intAluAccesses
378 .name(name() + ".int_alu_accesses")
379 .desc("Number of integer alu accesses")
380 .flags(total);
381
382 fpAluAccesses
383 .name(name() + ".fp_alu_accesses")
384 .desc("Number of floating point alu accesses")
385 .flags(total);
386
387 vecAluAccesses
388 .name(name() + ".vec_alu_accesses")
389 .desc("Number of vector alu accesses")
390 .flags(total);
391
392}
393
394template <class Impl>
395void
396InstructionQueue<Impl>::resetState()
397{
398 //Initialize thread IQ counts
399 for (ThreadID tid = 0; tid < Impl::MaxThreads; tid++) {
400 count[tid] = 0;
401 instList[tid].clear();
402 }
403
404 // Initialize the number of free IQ entries.
405 freeEntries = numEntries;
406
407 // Note that in actuality, the registers corresponding to the logical
408 // registers start off as ready. However this doesn't matter for the
409 // IQ as the instruction should have been correctly told if those
410 // registers are ready in rename. Thus it can all be initialized as
411 // unready.
412 for (int i = 0; i < numPhysRegs; ++i) {
413 regScoreboard[i] = false;
414 }
415
416 for (ThreadID tid = 0; tid < Impl::MaxThreads; ++tid) {
417 squashedSeqNum[tid] = 0;
418 }
419
420 for (int i = 0; i < Num_OpClasses; ++i) {
421 while (!readyInsts[i].empty())
422 readyInsts[i].pop();
423 queueOnList[i] = false;
424 readyIt[i] = listOrder.end();
425 }
426 nonSpecInsts.clear();
427 listOrder.clear();
428 deferredMemInsts.clear();
429 blockedMemInsts.clear();
430 retryMemInsts.clear();
431 wbOutstanding = 0;
432}
433
434template <class Impl>
435void
436InstructionQueue<Impl>::setActiveThreads(list<ThreadID> *at_ptr)
437{
438 activeThreads = at_ptr;
439}
440
441template <class Impl>
442void
443InstructionQueue<Impl>::setIssueToExecuteQueue(TimeBuffer<IssueStruct> *i2e_ptr)
444{
445 issueToExecuteQueue = i2e_ptr;
446}
447
448template <class Impl>
449void
450InstructionQueue<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr)
451{
452 timeBuffer = tb_ptr;
453
454 fromCommit = timeBuffer->getWire(-commitToIEWDelay);
455}
456
457template <class Impl>
458bool
459InstructionQueue<Impl>::isDrained() const
460{
461 bool drained = dependGraph.empty() &&
462 instsToExecute.empty() &&
463 wbOutstanding == 0;
464 for (ThreadID tid = 0; tid < numThreads; ++tid)
465 drained = drained && memDepUnit[tid].isDrained();
466
467 return drained;
468}
469
470template <class Impl>
471void
472InstructionQueue<Impl>::drainSanityCheck() const
473{
474 assert(dependGraph.empty());
475 assert(instsToExecute.empty());
476 for (ThreadID tid = 0; tid < numThreads; ++tid)
477 memDepUnit[tid].drainSanityCheck();
478}
479
480template <class Impl>
481void
482InstructionQueue<Impl>::takeOverFrom()
483{
484 resetState();
485}
486
487template <class Impl>
488int
489InstructionQueue<Impl>::entryAmount(ThreadID num_threads)
490{
491 if (iqPolicy == SMTQueuePolicy::Partitioned) {
492 return numEntries / num_threads;
493 } else {
494 return 0;
495 }
496}
497
498
499template <class Impl>
500void
501InstructionQueue<Impl>::resetEntries()
502{
503 if (iqPolicy != SMTQueuePolicy::Dynamic || numThreads > 1) {
504 int active_threads = activeThreads->size();
505
506 list<ThreadID>::iterator threads = activeThreads->begin();
507 list<ThreadID>::iterator end = activeThreads->end();
508
509 while (threads != end) {
510 ThreadID tid = *threads++;
511
512 if (iqPolicy == SMTQueuePolicy::Partitioned) {
513 maxEntries[tid] = numEntries / active_threads;
514 } else if (iqPolicy == SMTQueuePolicy::Threshold &&
515 active_threads == 1) {
516 maxEntries[tid] = numEntries;
517 }
518 }
519 }
520}
521
522template <class Impl>
523unsigned
524InstructionQueue<Impl>::numFreeEntries()
525{
526 return freeEntries;
527}
528
529template <class Impl>
530unsigned
531InstructionQueue<Impl>::numFreeEntries(ThreadID tid)
532{
533 return maxEntries[tid] - count[tid];
534}
535
536// Might want to do something more complex if it knows how many instructions
537// will be issued this cycle.
538template <class Impl>
539bool
540InstructionQueue<Impl>::isFull()
541{
542 if (freeEntries == 0) {
543 return(true);
544 } else {
545 return(false);
546 }
547}
548
549template <class Impl>
550bool
551InstructionQueue<Impl>::isFull(ThreadID tid)
552{
553 if (numFreeEntries(tid) == 0) {
554 return(true);
555 } else {
556 return(false);
557 }
558}
559
560template <class Impl>
561bool
562InstructionQueue<Impl>::hasReadyInsts()
563{
564 if (!listOrder.empty()) {
565 return true;
566 }
567
568 for (int i = 0; i < Num_OpClasses; ++i) {
569 if (!readyInsts[i].empty()) {
570 return true;
571 }
572 }
573
574 return false;
575}
576
577template <class Impl>
578void
579InstructionQueue<Impl>::insert(const DynInstPtr &new_inst)
580{
581 if (new_inst->isFloating()) {
582 fpInstQueueWrites++;
583 } else if (new_inst->isVector()) {
584 vecInstQueueWrites++;
585 } else {
586 intInstQueueWrites++;
587 }
588 // Make sure the instruction is valid
589 assert(new_inst);
590
591 DPRINTF(IQ, "Adding instruction [sn:%lli] PC %s to the IQ.\n",
592 new_inst->seqNum, new_inst->pcState());
593
594 assert(freeEntries != 0);
595
596 instList[new_inst->threadNumber].push_back(new_inst);
597
598 --freeEntries;
599
600 new_inst->setInIQ();
601
602 // Look through its source registers (physical regs), and mark any
603 // dependencies.
604 addToDependents(new_inst);
605
606 // Have this instruction set itself as the producer of its destination
607 // register(s).
608 addToProducers(new_inst);
609
610 if (new_inst->isMemRef()) {
611 memDepUnit[new_inst->threadNumber].insert(new_inst);
612 } else {
613 addIfReady(new_inst);
614 }
615
616 ++iqInstsAdded;
617
618 count[new_inst->threadNumber]++;
619
620 assert(freeEntries == (numEntries - countInsts()));
621}
622
623template <class Impl>
624void
625InstructionQueue<Impl>::insertNonSpec(const DynInstPtr &new_inst)
626{
627 // @todo: Clean up this code; can do it by setting inst as unable
628 // to issue, then calling normal insert on the inst.
629 if (new_inst->isFloating()) {
630 fpInstQueueWrites++;
631 } else if (new_inst->isVector()) {
632 vecInstQueueWrites++;
633 } else {
634 intInstQueueWrites++;
635 }
636
637 assert(new_inst);
638
639 nonSpecInsts[new_inst->seqNum] = new_inst;
640
641 DPRINTF(IQ, "Adding non-speculative instruction [sn:%lli] PC %s "
642 "to the IQ.\n",
643 new_inst->seqNum, new_inst->pcState());
644
645 assert(freeEntries != 0);
646
647 instList[new_inst->threadNumber].push_back(new_inst);
648
649 --freeEntries;
650
651 new_inst->setInIQ();
652
653 // Have this instruction set itself as the producer of its destination
654 // register(s).
655 addToProducers(new_inst);
656
657 // If it's a memory instruction, add it to the memory dependency
658 // unit.
659 if (new_inst->isMemRef()) {
660 memDepUnit[new_inst->threadNumber].insertNonSpec(new_inst);
661 }
662
663 ++iqNonSpecInstsAdded;
664
665 count[new_inst->threadNumber]++;
666
667 assert(freeEntries == (numEntries - countInsts()));
668}
669
670template <class Impl>
671void
672InstructionQueue<Impl>::insertBarrier(const DynInstPtr &barr_inst)
673{
674 memDepUnit[barr_inst->threadNumber].insertBarrier(barr_inst);
675
676 insertNonSpec(barr_inst);
677}
678
679template <class Impl>
680typename Impl::DynInstPtr
681InstructionQueue<Impl>::getInstToExecute()
682{
683 assert(!instsToExecute.empty());
684 DynInstPtr inst = std::move(instsToExecute.front());
685 instsToExecute.pop_front();
686 if (inst->isFloating()) {
687 fpInstQueueReads++;
688 } else if (inst->isVector()) {
689 vecInstQueueReads++;
690 } else {
691 intInstQueueReads++;
692 }
693 return inst;
694}
695
696template <class Impl>
697void
698InstructionQueue<Impl>::addToOrderList(OpClass op_class)
699{
700 assert(!readyInsts[op_class].empty());
701
702 ListOrderEntry queue_entry;
703
704 queue_entry.queueType = op_class;
705
706 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
707
708 ListOrderIt list_it = listOrder.begin();
709 ListOrderIt list_end_it = listOrder.end();
710
711 while (list_it != list_end_it) {
712 if ((*list_it).oldestInst > queue_entry.oldestInst) {
713 break;
714 }
715
716 list_it++;
717 }
718
719 readyIt[op_class] = listOrder.insert(list_it, queue_entry);
720 queueOnList[op_class] = true;
721}
722
723template <class Impl>
724void
725InstructionQueue<Impl>::moveToYoungerInst(ListOrderIt list_order_it)
726{
727 // Get iterator of next item on the list
728 // Delete the original iterator
729 // Determine if the next item is either the end of the list or younger
730 // than the new instruction. If so, then add in a new iterator right here.
731 // If not, then move along.
732 ListOrderEntry queue_entry;
733 OpClass op_class = (*list_order_it).queueType;
734 ListOrderIt next_it = list_order_it;
735
736 ++next_it;
737
738 queue_entry.queueType = op_class;
739 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
740
741 while (next_it != listOrder.end() &&
742 (*next_it).oldestInst < queue_entry.oldestInst) {
743 ++next_it;
744 }
745
746 readyIt[op_class] = listOrder.insert(next_it, queue_entry);
747}
748
749template <class Impl>
750void
751InstructionQueue<Impl>::processFUCompletion(const DynInstPtr &inst, int fu_idx)
752{
753 DPRINTF(IQ, "Processing FU completion [sn:%lli]\n", inst->seqNum);
754 assert(!cpu->switchedOut());
755 // The CPU could have been sleeping until this op completed (*extremely*
756 // long latency op). Wake it if it was. This may be overkill.
757 --wbOutstanding;
758 iewStage->wakeCPU();
759
760 if (fu_idx > -1)
761 fuPool->freeUnitNextCycle(fu_idx);
762
763 // @todo: Ensure that these FU Completions happen at the beginning
764 // of a cycle, otherwise they could add too many instructions to
765 // the queue.
766 issueToExecuteQueue->access(-1)->size++;
767 instsToExecute.push_back(inst);
768}
769
770// @todo: Figure out a better way to remove the squashed items from the
771// lists. Checking the top item of each list to see if it's squashed
772// wastes time and forces jumps.
773template <class Impl>
774void
775InstructionQueue<Impl>::scheduleReadyInsts()
776{
777 DPRINTF(IQ, "Attempting to schedule ready instructions from "
778 "the IQ.\n");
779
780 IssueStruct *i2e_info = issueToExecuteQueue->access(0);
781
782 DynInstPtr mem_inst;
783 while (mem_inst = std::move(getDeferredMemInstToExecute())) {
784 addReadyMemInst(mem_inst);
785 }
786
787 // See if any cache blocked instructions are able to be executed
788 while (mem_inst = std::move(getBlockedMemInstToExecute())) {
789 addReadyMemInst(mem_inst);
790 }
791
792 // Have iterator to head of the list
793 // While I haven't exceeded bandwidth or reached the end of the list,
794 // Try to get a FU that can do what this op needs.
795 // If successful, change the oldestInst to the new top of the list, put
796 // the queue in the proper place in the list.
797 // Increment the iterator.
798 // This will avoid trying to schedule a certain op class if there are no
799 // FUs that handle it.
800 int total_issued = 0;
801 ListOrderIt order_it = listOrder.begin();
802 ListOrderIt order_end_it = listOrder.end();
803
804 while (total_issued < totalWidth && order_it != order_end_it) {
805 OpClass op_class = (*order_it).queueType;
806
807 assert(!readyInsts[op_class].empty());
808
809 DynInstPtr issuing_inst = readyInsts[op_class].top();
810
811 if (issuing_inst->isFloating()) {
812 fpInstQueueReads++;
813 } else if (issuing_inst->isVector()) {
814 vecInstQueueReads++;
815 } else {
816 intInstQueueReads++;
817 }
818
819 assert(issuing_inst->seqNum == (*order_it).oldestInst);
820
821 if (issuing_inst->isSquashed()) {
822 readyInsts[op_class].pop();
823
824 if (!readyInsts[op_class].empty()) {
825 moveToYoungerInst(order_it);
826 } else {
827 readyIt[op_class] = listOrder.end();
828 queueOnList[op_class] = false;
829 }
830
831 listOrder.erase(order_it++);
832
833 ++iqSquashedInstsIssued;
834
835 continue;
836 }
837
838 int idx = FUPool::NoCapableFU;
839 Cycles op_latency = Cycles(1);
840 ThreadID tid = issuing_inst->threadNumber;
841
842 if (op_class != No_OpClass) {
843 idx = fuPool->getUnit(op_class);
844 if (issuing_inst->isFloating()) {
845 fpAluAccesses++;
846 } else if (issuing_inst->isVector()) {
847 vecAluAccesses++;
848 } else {
849 intAluAccesses++;
850 }
851 if (idx > FUPool::NoFreeFU) {
852 op_latency = fuPool->getOpLatency(op_class);
853 }
854 }
855
856 // If we have an instruction that doesn't require a FU, or a
857 // valid FU, then schedule for execution.
858 if (idx != FUPool::NoFreeFU) {
859 if (op_latency == Cycles(1)) {
860 i2e_info->size++;
861 instsToExecute.push_back(issuing_inst);
862
863 // Add the FU onto the list of FU's to be freed next
864 // cycle if we used one.
865 if (idx >= 0)
866 fuPool->freeUnitNextCycle(idx);
867 } else {
868 bool pipelined = fuPool->isPipelined(op_class);
869 // Generate completion event for the FU
870 ++wbOutstanding;
871 FUCompletion *execution = new FUCompletion(issuing_inst,
872 idx, this);
873
874 cpu->schedule(execution,
875 cpu->clockEdge(Cycles(op_latency - 1)));
876
877 if (!pipelined) {
878 // If FU isn't pipelined, then it must be freed
879 // upon the execution completing.
880 execution->setFreeFU();
881 } else {
882 // Add the FU onto the list of FU's to be freed next cycle.
883 fuPool->freeUnitNextCycle(idx);
884 }
885 }
886
887 DPRINTF(IQ, "Thread %i: Issuing instruction PC %s "
888 "[sn:%lli]\n",
889 tid, issuing_inst->pcState(),
890 issuing_inst->seqNum);
891
892 readyInsts[op_class].pop();
893
894 if (!readyInsts[op_class].empty()) {
895 moveToYoungerInst(order_it);
896 } else {
897 readyIt[op_class] = listOrder.end();
898 queueOnList[op_class] = false;
899 }
900
901 issuing_inst->setIssued();
902 ++total_issued;
903
904#if TRACING_ON
905 issuing_inst->issueTick = curTick() - issuing_inst->fetchTick;
906#endif
907
908 if (!issuing_inst->isMemRef()) {
909 // Memory instructions can not be freed from the IQ until they
910 // complete.
911 ++freeEntries;
912 count[tid]--;
913 issuing_inst->clearInIQ();
914 } else {
915 memDepUnit[tid].issue(issuing_inst);
916 }
917
918 listOrder.erase(order_it++);
919 statIssuedInstType[tid][op_class]++;
920 } else {
921 statFuBusy[op_class]++;
922 fuBusy[tid]++;
923 ++order_it;
924 }
925 }
926
927 numIssuedDist.sample(total_issued);
928 iqInstsIssued+= total_issued;
929
930 // If we issued any instructions, tell the CPU we had activity.
931 // @todo If the way deferred memory instructions are handeled due to
932 // translation changes then the deferredMemInsts condition should be removed
933 // from the code below.
934 if (total_issued || !retryMemInsts.empty() || !deferredMemInsts.empty()) {
935 cpu->activityThisCycle();
936 } else {
937 DPRINTF(IQ, "Not able to schedule any instructions.\n");
938 }
939}
940
941template <class Impl>
942void
943InstructionQueue<Impl>::scheduleNonSpec(const InstSeqNum &inst)
944{
945 DPRINTF(IQ, "Marking nonspeculative instruction [sn:%lli] as ready "
946 "to execute.\n", inst);
947
948 NonSpecMapIt inst_it = nonSpecInsts.find(inst);
949
950 assert(inst_it != nonSpecInsts.end());
951
952 ThreadID tid = (*inst_it).second->threadNumber;
953
954 (*inst_it).second->setAtCommit();
955
956 (*inst_it).second->setCanIssue();
957
958 if (!(*inst_it).second->isMemRef()) {
959 addIfReady((*inst_it).second);
960 } else {
961 memDepUnit[tid].nonSpecInstReady((*inst_it).second);
962 }
963
964 (*inst_it).second = NULL;
965
966 nonSpecInsts.erase(inst_it);
967}
968
969template <class Impl>
970void
971InstructionQueue<Impl>::commit(const InstSeqNum &inst, ThreadID tid)
972{
973 DPRINTF(IQ, "[tid:%i]: Committing instructions older than [sn:%i]\n",
974 tid,inst);
975
976 ListIt iq_it = instList[tid].begin();
977
978 while (iq_it != instList[tid].end() &&
979 (*iq_it)->seqNum <= inst) {
980 ++iq_it;
981 instList[tid].pop_front();
982 }
983
984 assert(freeEntries == (numEntries - countInsts()));
985}
986
987template <class Impl>
988int
989InstructionQueue<Impl>::wakeDependents(const DynInstPtr &completed_inst)
990{
991 int dependents = 0;
992
993 // The instruction queue here takes care of both floating and int ops
994 if (completed_inst->isFloating()) {
995 fpInstQueueWakeupAccesses++;
996 } else if (completed_inst->isVector()) {
997 vecInstQueueWakeupAccesses++;
998 } else {
999 intInstQueueWakeupAccesses++;
1000 }
1001
1002 DPRINTF(IQ, "Waking dependents of completed instruction.\n");
1003
1004 assert(!completed_inst->isSquashed());
1005
1006 // Tell the memory dependence unit to wake any dependents on this
1007 // instruction if it is a memory instruction. Also complete the memory
1008 // instruction at this point since we know it executed without issues.
1009 // @todo: Might want to rename "completeMemInst" to something that
1010 // indicates that it won't need to be replayed, and call this
1011 // earlier. Might not be a big deal.
1012 if (completed_inst->isMemRef()) {
1013 memDepUnit[completed_inst->threadNumber].wakeDependents(completed_inst);
1014 completeMemInst(completed_inst);
1015 } else if (completed_inst->isMemBarrier() ||
1016 completed_inst->isWriteBarrier()) {
1017 memDepUnit[completed_inst->threadNumber].completeBarrier(completed_inst);
1018 }
1019
1020 for (int dest_reg_idx = 0;
1021 dest_reg_idx < completed_inst->numDestRegs();
1022 dest_reg_idx++)
1023 {
1024 PhysRegIdPtr dest_reg =
1025 completed_inst->renamedDestRegIdx(dest_reg_idx);
1026
1027 // Special case of uniq or control registers. They are not
1028 // handled by the IQ and thus have no dependency graph entry.
1029 if (dest_reg->isFixedMapping()) {
1030 DPRINTF(IQ, "Reg %d [%s] is part of a fix mapping, skipping\n",
1031 dest_reg->index(), dest_reg->className());
1032 continue;
1033 }
1034
1035 DPRINTF(IQ, "Waking any dependents on register %i (%s).\n",
1036 dest_reg->index(),
1037 dest_reg->className());
1038
1039 //Go through the dependency chain, marking the registers as
1040 //ready within the waiting instructions.
1041 DynInstPtr dep_inst = dependGraph.pop(dest_reg->flatIndex());
1042
1043 while (dep_inst) {
1044 DPRINTF(IQ, "Waking up a dependent instruction, [sn:%lli] "
1045 "PC %s.\n", dep_inst->seqNum, dep_inst->pcState());
1046
1047 // Might want to give more information to the instruction
1048 // so that it knows which of its source registers is
1049 // ready. However that would mean that the dependency
1050 // graph entries would need to hold the src_reg_idx.
1051 dep_inst->markSrcRegReady();
1052
1053 addIfReady(dep_inst);
1054
1055 dep_inst = dependGraph.pop(dest_reg->flatIndex());
1056
1057 ++dependents;
1058 }
1059
1060 // Reset the head node now that all of its dependents have
1061 // been woken up.
1062 assert(dependGraph.empty(dest_reg->flatIndex()));
1063 dependGraph.clearInst(dest_reg->flatIndex());
1064
1065 // Mark the scoreboard as having that register ready.
1066 regScoreboard[dest_reg->flatIndex()] = true;
1067 }
1068 return dependents;
1069}
1070
1071template <class Impl>
1072void
1073InstructionQueue<Impl>::addReadyMemInst(const DynInstPtr &ready_inst)
1074{
1075 OpClass op_class = ready_inst->opClass();
1076
1077 readyInsts[op_class].push(ready_inst);
1078
1079 // Will need to reorder the list if either a queue is not on the list,
1080 // or it has an older instruction than last time.
1081 if (!queueOnList[op_class]) {
1082 addToOrderList(op_class);
1083 } else if (readyInsts[op_class].top()->seqNum <
1084 (*readyIt[op_class]).oldestInst) {
1085 listOrder.erase(readyIt[op_class]);
1086 addToOrderList(op_class);
1087 }
1088
1089 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
1090 "the ready list, PC %s opclass:%i [sn:%lli].\n",
1091 ready_inst->pcState(), op_class, ready_inst->seqNum);
1092}
1093
1094template <class Impl>
1095void
1096InstructionQueue<Impl>::rescheduleMemInst(const DynInstPtr &resched_inst)
1097{
1098 DPRINTF(IQ, "Rescheduling mem inst [sn:%lli]\n", resched_inst->seqNum);
1099
1100 // Reset DTB translation state
1101 resched_inst->translationStarted(false);
1102 resched_inst->translationCompleted(false);
1103
1104 resched_inst->clearCanIssue();
1105 memDepUnit[resched_inst->threadNumber].reschedule(resched_inst);
1106}
1107
1108template <class Impl>
1109void
1110InstructionQueue<Impl>::replayMemInst(const DynInstPtr &replay_inst)
1111{
1112 memDepUnit[replay_inst->threadNumber].replay();
1113}
1114
1115template <class Impl>
1116void
1117InstructionQueue<Impl>::completeMemInst(const DynInstPtr &completed_inst)
1118{
1119 ThreadID tid = completed_inst->threadNumber;
1120
1121 DPRINTF(IQ, "Completing mem instruction PC: %s [sn:%lli]\n",
1122 completed_inst->pcState(), completed_inst->seqNum);
1123
1124 ++freeEntries;
1125
1126 completed_inst->memOpDone(true);
1127
1128 memDepUnit[tid].completed(completed_inst);
1129 count[tid]--;
1130}
1131
1132template <class Impl>
1133void
1134InstructionQueue<Impl>::deferMemInst(const DynInstPtr &deferred_inst)
1135{
1136 deferredMemInsts.push_back(deferred_inst);
1137}
1138
1139template <class Impl>
1140void
1141InstructionQueue<Impl>::blockMemInst(const DynInstPtr &blocked_inst)
1142{
1143 blocked_inst->translationStarted(false);
1144 blocked_inst->translationCompleted(false);
1145
1146 blocked_inst->clearIssued();
1147 blocked_inst->clearCanIssue();
1148 blockedMemInsts.push_back(blocked_inst);
1149}
1150
1151template <class Impl>
1152void
1153InstructionQueue<Impl>::cacheUnblocked()
1154{
1155 retryMemInsts.splice(retryMemInsts.end(), blockedMemInsts);
1156 // Get the CPU ticking again
1157 cpu->wakeCPU();
1158}
1159
1160template <class Impl>
1161typename Impl::DynInstPtr
1162InstructionQueue<Impl>::getDeferredMemInstToExecute()
1163{
1164 for (ListIt it = deferredMemInsts.begin(); it != deferredMemInsts.end();
1165 ++it) {
1166 if ((*it)->translationCompleted() || (*it)->isSquashed()) {
1167 DynInstPtr mem_inst = std::move(*it);
1168 deferredMemInsts.erase(it);
1169 return mem_inst;
1170 }
1171 }
1172 return nullptr;
1173}
1174
1175template <class Impl>
1176typename Impl::DynInstPtr
1177InstructionQueue<Impl>::getBlockedMemInstToExecute()
1178{
1179 if (retryMemInsts.empty()) {
1180 return nullptr;
1181 } else {
1182 DynInstPtr mem_inst = std::move(retryMemInsts.front());
1183 retryMemInsts.pop_front();
1184 return mem_inst;
1185 }
1186}
1187
1188template <class Impl>
1189void
1190InstructionQueue<Impl>::violation(const DynInstPtr &store,
1191 const DynInstPtr &faulting_load)
1192{
1193 intInstQueueWrites++;
1194 memDepUnit[store->threadNumber].violation(store, faulting_load);
1195}
1196
1197template <class Impl>
1198void
1199InstructionQueue<Impl>::squash(ThreadID tid)
1200{
1201 DPRINTF(IQ, "[tid:%i]: Starting to squash instructions in "
1202 "the IQ.\n", tid);
1203
1204 // Read instruction sequence number of last instruction out of the
1205 // time buffer.
1206 squashedSeqNum[tid] = fromCommit->commitInfo[tid].doneSeqNum;
1207
1208 doSquash(tid);
1209
1210 // Also tell the memory dependence unit to squash.
1211 memDepUnit[tid].squash(squashedSeqNum[tid], tid);
1212}
1213
1214template <class Impl>
1215void
1216InstructionQueue<Impl>::doSquash(ThreadID tid)
1217{
1218 // Start at the tail.
1219 ListIt squash_it = instList[tid].end();
1220 --squash_it;
1221
1222 DPRINTF(IQ, "[tid:%i]: Squashing until sequence number %i!\n",
1223 tid, squashedSeqNum[tid]);
1224
1225 // Squash any instructions younger than the squashed sequence number
1226 // given.
1227 while (squash_it != instList[tid].end() &&
1228 (*squash_it)->seqNum > squashedSeqNum[tid]) {
1229
1230 DynInstPtr squashed_inst = (*squash_it);
1231 if (squashed_inst->isFloating()) {
1232 fpInstQueueWrites++;
1233 } else if (squashed_inst->isVector()) {
1234 vecInstQueueWrites++;
1235 } else {
1236 intInstQueueWrites++;
1237 }
1238
1239 // Only handle the instruction if it actually is in the IQ and
1240 // hasn't already been squashed in the IQ.
1241 if (squashed_inst->threadNumber != tid ||
1242 squashed_inst->isSquashedInIQ()) {
1243 --squash_it;
1244 continue;
1245 }
1246
1247 if (!squashed_inst->isIssued() ||
1248 (squashed_inst->isMemRef() &&
1249 !squashed_inst->memOpDone())) {
1250
1251 DPRINTF(IQ, "[tid:%i]: Instruction [sn:%lli] PC %s squashed.\n",
1252 tid, squashed_inst->seqNum, squashed_inst->pcState());
1253
1254 bool is_acq_rel = squashed_inst->isMemBarrier() &&
1255 (squashed_inst->isLoad() ||
1256 (squashed_inst->isStore() &&
1257 !squashed_inst->isStoreConditional()));
1258
1259 // Remove the instruction from the dependency list.
1260 if (is_acq_rel ||
1261 (!squashed_inst->isNonSpeculative() &&
1262 !squashed_inst->isStoreConditional() &&
1263 !squashed_inst->isMemBarrier() &&
1264 !squashed_inst->isWriteBarrier())) {
1265
1266 for (int src_reg_idx = 0;
1267 src_reg_idx < squashed_inst->numSrcRegs();
1268 src_reg_idx++)
1269 {
1270 PhysRegIdPtr src_reg =
1271 squashed_inst->renamedSrcRegIdx(src_reg_idx);
1272
1273 // Only remove it from the dependency graph if it
1274 // was placed there in the first place.
1275
1276 // Instead of doing a linked list traversal, we
1277 // can just remove these squashed instructions
1278 // either at issue time, or when the register is
1279 // overwritten. The only downside to this is it
1280 // leaves more room for error.
1281
1282 if (!squashed_inst->isReadySrcRegIdx(src_reg_idx) &&
1283 !src_reg->isFixedMapping()) {
1284 dependGraph.remove(src_reg->flatIndex(),
1285 squashed_inst);
1286 }
1287
1143 blocked_inst->clearIssued();
1144 blocked_inst->clearCanIssue();
1145 blockedMemInsts.push_back(blocked_inst);
1146}
1147
1148template <class Impl>
1149void
1150InstructionQueue<Impl>::cacheUnblocked()
1151{
1152 retryMemInsts.splice(retryMemInsts.end(), blockedMemInsts);
1153 // Get the CPU ticking again
1154 cpu->wakeCPU();
1155}
1156
1157template <class Impl>
1158typename Impl::DynInstPtr
1159InstructionQueue<Impl>::getDeferredMemInstToExecute()
1160{
1161 for (ListIt it = deferredMemInsts.begin(); it != deferredMemInsts.end();
1162 ++it) {
1163 if ((*it)->translationCompleted() || (*it)->isSquashed()) {
1164 DynInstPtr mem_inst = std::move(*it);
1165 deferredMemInsts.erase(it);
1166 return mem_inst;
1167 }
1168 }
1169 return nullptr;
1170}
1171
1172template <class Impl>
1173typename Impl::DynInstPtr
1174InstructionQueue<Impl>::getBlockedMemInstToExecute()
1175{
1176 if (retryMemInsts.empty()) {
1177 return nullptr;
1178 } else {
1179 DynInstPtr mem_inst = std::move(retryMemInsts.front());
1180 retryMemInsts.pop_front();
1181 return mem_inst;
1182 }
1183}
1184
1185template <class Impl>
1186void
1187InstructionQueue<Impl>::violation(const DynInstPtr &store,
1188 const DynInstPtr &faulting_load)
1189{
1190 intInstQueueWrites++;
1191 memDepUnit[store->threadNumber].violation(store, faulting_load);
1192}
1193
1194template <class Impl>
1195void
1196InstructionQueue<Impl>::squash(ThreadID tid)
1197{
1198 DPRINTF(IQ, "[tid:%i]: Starting to squash instructions in "
1199 "the IQ.\n", tid);
1200
1201 // Read instruction sequence number of last instruction out of the
1202 // time buffer.
1203 squashedSeqNum[tid] = fromCommit->commitInfo[tid].doneSeqNum;
1204
1205 doSquash(tid);
1206
1207 // Also tell the memory dependence unit to squash.
1208 memDepUnit[tid].squash(squashedSeqNum[tid], tid);
1209}
1210
1211template <class Impl>
1212void
1213InstructionQueue<Impl>::doSquash(ThreadID tid)
1214{
1215 // Start at the tail.
1216 ListIt squash_it = instList[tid].end();
1217 --squash_it;
1218
1219 DPRINTF(IQ, "[tid:%i]: Squashing until sequence number %i!\n",
1220 tid, squashedSeqNum[tid]);
1221
1222 // Squash any instructions younger than the squashed sequence number
1223 // given.
1224 while (squash_it != instList[tid].end() &&
1225 (*squash_it)->seqNum > squashedSeqNum[tid]) {
1226
1227 DynInstPtr squashed_inst = (*squash_it);
1228 if (squashed_inst->isFloating()) {
1229 fpInstQueueWrites++;
1230 } else if (squashed_inst->isVector()) {
1231 vecInstQueueWrites++;
1232 } else {
1233 intInstQueueWrites++;
1234 }
1235
1236 // Only handle the instruction if it actually is in the IQ and
1237 // hasn't already been squashed in the IQ.
1238 if (squashed_inst->threadNumber != tid ||
1239 squashed_inst->isSquashedInIQ()) {
1240 --squash_it;
1241 continue;
1242 }
1243
1244 if (!squashed_inst->isIssued() ||
1245 (squashed_inst->isMemRef() &&
1246 !squashed_inst->memOpDone())) {
1247
1248 DPRINTF(IQ, "[tid:%i]: Instruction [sn:%lli] PC %s squashed.\n",
1249 tid, squashed_inst->seqNum, squashed_inst->pcState());
1250
1251 bool is_acq_rel = squashed_inst->isMemBarrier() &&
1252 (squashed_inst->isLoad() ||
1253 (squashed_inst->isStore() &&
1254 !squashed_inst->isStoreConditional()));
1255
1256 // Remove the instruction from the dependency list.
1257 if (is_acq_rel ||
1258 (!squashed_inst->isNonSpeculative() &&
1259 !squashed_inst->isStoreConditional() &&
1260 !squashed_inst->isMemBarrier() &&
1261 !squashed_inst->isWriteBarrier())) {
1262
1263 for (int src_reg_idx = 0;
1264 src_reg_idx < squashed_inst->numSrcRegs();
1265 src_reg_idx++)
1266 {
1267 PhysRegIdPtr src_reg =
1268 squashed_inst->renamedSrcRegIdx(src_reg_idx);
1269
1270 // Only remove it from the dependency graph if it
1271 // was placed there in the first place.
1272
1273 // Instead of doing a linked list traversal, we
1274 // can just remove these squashed instructions
1275 // either at issue time, or when the register is
1276 // overwritten. The only downside to this is it
1277 // leaves more room for error.
1278
1279 if (!squashed_inst->isReadySrcRegIdx(src_reg_idx) &&
1280 !src_reg->isFixedMapping()) {
1281 dependGraph.remove(src_reg->flatIndex(),
1282 squashed_inst);
1283 }
1284
1288
1289 ++iqSquashedOperandsExamined;
1290 }
1285 ++iqSquashedOperandsExamined;
1286 }
1287
1291 } else if (!squashed_inst->isStoreConditional() ||
1292 !squashed_inst->isCompleted()) {
1293 NonSpecMapIt ns_inst_it =
1294 nonSpecInsts.find(squashed_inst->seqNum);
1295
1296 // we remove non-speculative instructions from
1297 // nonSpecInsts already when they are ready, and so we
1298 // cannot always expect to find them
1299 if (ns_inst_it == nonSpecInsts.end()) {
1300 // loads that became ready but stalled on a
1301 // blocked cache are alreayd removed from
1302 // nonSpecInsts, and have not faulted
1303 assert(squashed_inst->getFault() != NoFault ||
1304 squashed_inst->isMemRef());
1305 } else {
1306
1307 (*ns_inst_it).second = NULL;
1308
1309 nonSpecInsts.erase(ns_inst_it);
1310
1311 ++iqSquashedNonSpecRemoved;
1312 }
1313 }
1314
1315 // Might want to also clear out the head of the dependency graph.
1316
1317 // Mark it as squashed within the IQ.
1318 squashed_inst->setSquashedInIQ();
1319
1320 // @todo: Remove this hack where several statuses are set so the
1321 // inst will flow through the rest of the pipeline.
1322 squashed_inst->setIssued();
1323 squashed_inst->setCanCommit();
1324 squashed_inst->clearInIQ();
1325
1326 //Update Thread IQ Count
1327 count[squashed_inst->threadNumber]--;
1328
1329 ++freeEntries;
1330 }
1331
1332 // IQ clears out the heads of the dependency graph only when
1333 // instructions reach writeback stage. If an instruction is squashed
1334 // before writeback stage, its head of dependency graph would not be
1335 // cleared out; it holds the instruction's DynInstPtr. This prevents
1336 // freeing the squashed instruction's DynInst.
1337 // Thus, we need to manually clear out the squashed instructions' heads
1338 // of dependency graph.
1339 for (int dest_reg_idx = 0;
1340 dest_reg_idx < squashed_inst->numDestRegs();
1341 dest_reg_idx++)
1342 {
1343 PhysRegIdPtr dest_reg =
1344 squashed_inst->renamedDestRegIdx(dest_reg_idx);
1345 if (dest_reg->isFixedMapping()){
1346 continue;
1347 }
1348 assert(dependGraph.empty(dest_reg->flatIndex()));
1349 dependGraph.clearInst(dest_reg->flatIndex());
1350 }
1351 instList[tid].erase(squash_it--);
1352 ++iqSquashedInstsExamined;
1353 }
1354}
1355
1356template <class Impl>
1357bool
1358InstructionQueue<Impl>::addToDependents(const DynInstPtr &new_inst)
1359{
1360 // Loop through the instruction's source registers, adding
1361 // them to the dependency list if they are not ready.
1362 int8_t total_src_regs = new_inst->numSrcRegs();
1363 bool return_val = false;
1364
1365 for (int src_reg_idx = 0;
1366 src_reg_idx < total_src_regs;
1367 src_reg_idx++)
1368 {
1369 // Only add it to the dependency graph if it's not ready.
1370 if (!new_inst->isReadySrcRegIdx(src_reg_idx)) {
1371 PhysRegIdPtr src_reg = new_inst->renamedSrcRegIdx(src_reg_idx);
1372
1373 // Check the IQ's scoreboard to make sure the register
1374 // hasn't become ready while the instruction was in flight
1375 // between stages. Only if it really isn't ready should
1376 // it be added to the dependency graph.
1377 if (src_reg->isFixedMapping()) {
1378 continue;
1379 } else if (!regScoreboard[src_reg->flatIndex()]) {
1380 DPRINTF(IQ, "Instruction PC %s has src reg %i (%s) that "
1381 "is being added to the dependency chain.\n",
1382 new_inst->pcState(), src_reg->index(),
1383 src_reg->className());
1384
1385 dependGraph.insert(src_reg->flatIndex(), new_inst);
1386
1387 // Change the return value to indicate that something
1388 // was added to the dependency graph.
1389 return_val = true;
1390 } else {
1391 DPRINTF(IQ, "Instruction PC %s has src reg %i (%s) that "
1392 "became ready before it reached the IQ.\n",
1393 new_inst->pcState(), src_reg->index(),
1394 src_reg->className());
1395 // Mark a register ready within the instruction.
1396 new_inst->markSrcRegReady(src_reg_idx);
1397 }
1398 }
1399 }
1400
1401 return return_val;
1402}
1403
1404template <class Impl>
1405void
1406InstructionQueue<Impl>::addToProducers(const DynInstPtr &new_inst)
1407{
1408 // Nothing really needs to be marked when an instruction becomes
1409 // the producer of a register's value, but for convenience a ptr
1410 // to the producing instruction will be placed in the head node of
1411 // the dependency links.
1412 int8_t total_dest_regs = new_inst->numDestRegs();
1413
1414 for (int dest_reg_idx = 0;
1415 dest_reg_idx < total_dest_regs;
1416 dest_reg_idx++)
1417 {
1418 PhysRegIdPtr dest_reg = new_inst->renamedDestRegIdx(dest_reg_idx);
1419
1420 // Some registers have fixed mapping, and there is no need to track
1421 // dependencies as these instructions must be executed at commit.
1422 if (dest_reg->isFixedMapping()) {
1423 continue;
1424 }
1425
1426 if (!dependGraph.empty(dest_reg->flatIndex())) {
1427 dependGraph.dump();
1428 panic("Dependency graph %i (%s) (flat: %i) not empty!",
1429 dest_reg->index(), dest_reg->className(),
1430 dest_reg->flatIndex());
1431 }
1432
1433 dependGraph.setInst(dest_reg->flatIndex(), new_inst);
1434
1435 // Mark the scoreboard to say it's not yet ready.
1436 regScoreboard[dest_reg->flatIndex()] = false;
1437 }
1438}
1439
1440template <class Impl>
1441void
1442InstructionQueue<Impl>::addIfReady(const DynInstPtr &inst)
1443{
1444 // If the instruction now has all of its source registers
1445 // available, then add it to the list of ready instructions.
1446 if (inst->readyToIssue()) {
1447
1448 //Add the instruction to the proper ready list.
1449 if (inst->isMemRef()) {
1450
1451 DPRINTF(IQ, "Checking if memory instruction can issue.\n");
1452
1453 // Message to the mem dependence unit that this instruction has
1454 // its registers ready.
1455 memDepUnit[inst->threadNumber].regsReady(inst);
1456
1457 return;
1458 }
1459
1460 OpClass op_class = inst->opClass();
1461
1462 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
1463 "the ready list, PC %s opclass:%i [sn:%lli].\n",
1464 inst->pcState(), op_class, inst->seqNum);
1465
1466 readyInsts[op_class].push(inst);
1467
1468 // Will need to reorder the list if either a queue is not on the list,
1469 // or it has an older instruction than last time.
1470 if (!queueOnList[op_class]) {
1471 addToOrderList(op_class);
1472 } else if (readyInsts[op_class].top()->seqNum <
1473 (*readyIt[op_class]).oldestInst) {
1474 listOrder.erase(readyIt[op_class]);
1475 addToOrderList(op_class);
1476 }
1477 }
1478}
1479
1480template <class Impl>
1481int
1482InstructionQueue<Impl>::countInsts()
1483{
1484#if 0
1485 //ksewell:This works but definitely could use a cleaner write
1486 //with a more intuitive way of counting. Right now it's
1487 //just brute force ....
1488 // Change the #if if you want to use this method.
1489 int total_insts = 0;
1490
1491 for (ThreadID tid = 0; tid < numThreads; ++tid) {
1492 ListIt count_it = instList[tid].begin();
1493
1494 while (count_it != instList[tid].end()) {
1495 if (!(*count_it)->isSquashed() && !(*count_it)->isSquashedInIQ()) {
1496 if (!(*count_it)->isIssued()) {
1497 ++total_insts;
1498 } else if ((*count_it)->isMemRef() &&
1499 !(*count_it)->memOpDone) {
1500 // Loads that have not been marked as executed still count
1501 // towards the total instructions.
1502 ++total_insts;
1503 }
1504 }
1505
1506 ++count_it;
1507 }
1508 }
1509
1510 return total_insts;
1511#else
1512 return numEntries - freeEntries;
1513#endif
1514}
1515
1516template <class Impl>
1517void
1518InstructionQueue<Impl>::dumpLists()
1519{
1520 for (int i = 0; i < Num_OpClasses; ++i) {
1521 cprintf("Ready list %i size: %i\n", i, readyInsts[i].size());
1522
1523 cprintf("\n");
1524 }
1525
1526 cprintf("Non speculative list size: %i\n", nonSpecInsts.size());
1527
1528 NonSpecMapIt non_spec_it = nonSpecInsts.begin();
1529 NonSpecMapIt non_spec_end_it = nonSpecInsts.end();
1530
1531 cprintf("Non speculative list: ");
1532
1533 while (non_spec_it != non_spec_end_it) {
1534 cprintf("%s [sn:%lli]", (*non_spec_it).second->pcState(),
1535 (*non_spec_it).second->seqNum);
1536 ++non_spec_it;
1537 }
1538
1539 cprintf("\n");
1540
1541 ListOrderIt list_order_it = listOrder.begin();
1542 ListOrderIt list_order_end_it = listOrder.end();
1543 int i = 1;
1544
1545 cprintf("List order: ");
1546
1547 while (list_order_it != list_order_end_it) {
1548 cprintf("%i OpClass:%i [sn:%lli] ", i, (*list_order_it).queueType,
1549 (*list_order_it).oldestInst);
1550
1551 ++list_order_it;
1552 ++i;
1553 }
1554
1555 cprintf("\n");
1556}
1557
1558
1559template <class Impl>
1560void
1561InstructionQueue<Impl>::dumpInsts()
1562{
1563 for (ThreadID tid = 0; tid < numThreads; ++tid) {
1564 int num = 0;
1565 int valid_num = 0;
1566 ListIt inst_list_it = instList[tid].begin();
1567
1568 while (inst_list_it != instList[tid].end()) {
1569 cprintf("Instruction:%i\n", num);
1570 if (!(*inst_list_it)->isSquashed()) {
1571 if (!(*inst_list_it)->isIssued()) {
1572 ++valid_num;
1573 cprintf("Count:%i\n", valid_num);
1574 } else if ((*inst_list_it)->isMemRef() &&
1575 !(*inst_list_it)->memOpDone()) {
1576 // Loads that have not been marked as executed
1577 // still count towards the total instructions.
1578 ++valid_num;
1579 cprintf("Count:%i\n", valid_num);
1580 }
1581 }
1582
1583 cprintf("PC: %s\n[sn:%lli]\n[tid:%i]\n"
1584 "Issued:%i\nSquashed:%i\n",
1585 (*inst_list_it)->pcState(),
1586 (*inst_list_it)->seqNum,
1587 (*inst_list_it)->threadNumber,
1588 (*inst_list_it)->isIssued(),
1589 (*inst_list_it)->isSquashed());
1590
1591 if ((*inst_list_it)->isMemRef()) {
1592 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone());
1593 }
1594
1595 cprintf("\n");
1596
1597 inst_list_it++;
1598 ++num;
1599 }
1600 }
1601
1602 cprintf("Insts to Execute list:\n");
1603
1604 int num = 0;
1605 int valid_num = 0;
1606 ListIt inst_list_it = instsToExecute.begin();
1607
1608 while (inst_list_it != instsToExecute.end())
1609 {
1610 cprintf("Instruction:%i\n",
1611 num);
1612 if (!(*inst_list_it)->isSquashed()) {
1613 if (!(*inst_list_it)->isIssued()) {
1614 ++valid_num;
1615 cprintf("Count:%i\n", valid_num);
1616 } else if ((*inst_list_it)->isMemRef() &&
1617 !(*inst_list_it)->memOpDone()) {
1618 // Loads that have not been marked as executed
1619 // still count towards the total instructions.
1620 ++valid_num;
1621 cprintf("Count:%i\n", valid_num);
1622 }
1623 }
1624
1625 cprintf("PC: %s\n[sn:%lli]\n[tid:%i]\n"
1626 "Issued:%i\nSquashed:%i\n",
1627 (*inst_list_it)->pcState(),
1628 (*inst_list_it)->seqNum,
1629 (*inst_list_it)->threadNumber,
1630 (*inst_list_it)->isIssued(),
1631 (*inst_list_it)->isSquashed());
1632
1633 if ((*inst_list_it)->isMemRef()) {
1634 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone());
1635 }
1636
1637 cprintf("\n");
1638
1639 inst_list_it++;
1640 ++num;
1641 }
1642}
1643
1644#endif//__CPU_O3_INST_QUEUE_IMPL_HH__
1288 } else if (!squashed_inst->isStoreConditional() ||
1289 !squashed_inst->isCompleted()) {
1290 NonSpecMapIt ns_inst_it =
1291 nonSpecInsts.find(squashed_inst->seqNum);
1292
1293 // we remove non-speculative instructions from
1294 // nonSpecInsts already when they are ready, and so we
1295 // cannot always expect to find them
1296 if (ns_inst_it == nonSpecInsts.end()) {
1297 // loads that became ready but stalled on a
1298 // blocked cache are alreayd removed from
1299 // nonSpecInsts, and have not faulted
1300 assert(squashed_inst->getFault() != NoFault ||
1301 squashed_inst->isMemRef());
1302 } else {
1303
1304 (*ns_inst_it).second = NULL;
1305
1306 nonSpecInsts.erase(ns_inst_it);
1307
1308 ++iqSquashedNonSpecRemoved;
1309 }
1310 }
1311
1312 // Might want to also clear out the head of the dependency graph.
1313
1314 // Mark it as squashed within the IQ.
1315 squashed_inst->setSquashedInIQ();
1316
1317 // @todo: Remove this hack where several statuses are set so the
1318 // inst will flow through the rest of the pipeline.
1319 squashed_inst->setIssued();
1320 squashed_inst->setCanCommit();
1321 squashed_inst->clearInIQ();
1322
1323 //Update Thread IQ Count
1324 count[squashed_inst->threadNumber]--;
1325
1326 ++freeEntries;
1327 }
1328
1329 // IQ clears out the heads of the dependency graph only when
1330 // instructions reach writeback stage. If an instruction is squashed
1331 // before writeback stage, its head of dependency graph would not be
1332 // cleared out; it holds the instruction's DynInstPtr. This prevents
1333 // freeing the squashed instruction's DynInst.
1334 // Thus, we need to manually clear out the squashed instructions' heads
1335 // of dependency graph.
1336 for (int dest_reg_idx = 0;
1337 dest_reg_idx < squashed_inst->numDestRegs();
1338 dest_reg_idx++)
1339 {
1340 PhysRegIdPtr dest_reg =
1341 squashed_inst->renamedDestRegIdx(dest_reg_idx);
1342 if (dest_reg->isFixedMapping()){
1343 continue;
1344 }
1345 assert(dependGraph.empty(dest_reg->flatIndex()));
1346 dependGraph.clearInst(dest_reg->flatIndex());
1347 }
1348 instList[tid].erase(squash_it--);
1349 ++iqSquashedInstsExamined;
1350 }
1351}
1352
1353template <class Impl>
1354bool
1355InstructionQueue<Impl>::addToDependents(const DynInstPtr &new_inst)
1356{
1357 // Loop through the instruction's source registers, adding
1358 // them to the dependency list if they are not ready.
1359 int8_t total_src_regs = new_inst->numSrcRegs();
1360 bool return_val = false;
1361
1362 for (int src_reg_idx = 0;
1363 src_reg_idx < total_src_regs;
1364 src_reg_idx++)
1365 {
1366 // Only add it to the dependency graph if it's not ready.
1367 if (!new_inst->isReadySrcRegIdx(src_reg_idx)) {
1368 PhysRegIdPtr src_reg = new_inst->renamedSrcRegIdx(src_reg_idx);
1369
1370 // Check the IQ's scoreboard to make sure the register
1371 // hasn't become ready while the instruction was in flight
1372 // between stages. Only if it really isn't ready should
1373 // it be added to the dependency graph.
1374 if (src_reg->isFixedMapping()) {
1375 continue;
1376 } else if (!regScoreboard[src_reg->flatIndex()]) {
1377 DPRINTF(IQ, "Instruction PC %s has src reg %i (%s) that "
1378 "is being added to the dependency chain.\n",
1379 new_inst->pcState(), src_reg->index(),
1380 src_reg->className());
1381
1382 dependGraph.insert(src_reg->flatIndex(), new_inst);
1383
1384 // Change the return value to indicate that something
1385 // was added to the dependency graph.
1386 return_val = true;
1387 } else {
1388 DPRINTF(IQ, "Instruction PC %s has src reg %i (%s) that "
1389 "became ready before it reached the IQ.\n",
1390 new_inst->pcState(), src_reg->index(),
1391 src_reg->className());
1392 // Mark a register ready within the instruction.
1393 new_inst->markSrcRegReady(src_reg_idx);
1394 }
1395 }
1396 }
1397
1398 return return_val;
1399}
1400
1401template <class Impl>
1402void
1403InstructionQueue<Impl>::addToProducers(const DynInstPtr &new_inst)
1404{
1405 // Nothing really needs to be marked when an instruction becomes
1406 // the producer of a register's value, but for convenience a ptr
1407 // to the producing instruction will be placed in the head node of
1408 // the dependency links.
1409 int8_t total_dest_regs = new_inst->numDestRegs();
1410
1411 for (int dest_reg_idx = 0;
1412 dest_reg_idx < total_dest_regs;
1413 dest_reg_idx++)
1414 {
1415 PhysRegIdPtr dest_reg = new_inst->renamedDestRegIdx(dest_reg_idx);
1416
1417 // Some registers have fixed mapping, and there is no need to track
1418 // dependencies as these instructions must be executed at commit.
1419 if (dest_reg->isFixedMapping()) {
1420 continue;
1421 }
1422
1423 if (!dependGraph.empty(dest_reg->flatIndex())) {
1424 dependGraph.dump();
1425 panic("Dependency graph %i (%s) (flat: %i) not empty!",
1426 dest_reg->index(), dest_reg->className(),
1427 dest_reg->flatIndex());
1428 }
1429
1430 dependGraph.setInst(dest_reg->flatIndex(), new_inst);
1431
1432 // Mark the scoreboard to say it's not yet ready.
1433 regScoreboard[dest_reg->flatIndex()] = false;
1434 }
1435}
1436
1437template <class Impl>
1438void
1439InstructionQueue<Impl>::addIfReady(const DynInstPtr &inst)
1440{
1441 // If the instruction now has all of its source registers
1442 // available, then add it to the list of ready instructions.
1443 if (inst->readyToIssue()) {
1444
1445 //Add the instruction to the proper ready list.
1446 if (inst->isMemRef()) {
1447
1448 DPRINTF(IQ, "Checking if memory instruction can issue.\n");
1449
1450 // Message to the mem dependence unit that this instruction has
1451 // its registers ready.
1452 memDepUnit[inst->threadNumber].regsReady(inst);
1453
1454 return;
1455 }
1456
1457 OpClass op_class = inst->opClass();
1458
1459 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
1460 "the ready list, PC %s opclass:%i [sn:%lli].\n",
1461 inst->pcState(), op_class, inst->seqNum);
1462
1463 readyInsts[op_class].push(inst);
1464
1465 // Will need to reorder the list if either a queue is not on the list,
1466 // or it has an older instruction than last time.
1467 if (!queueOnList[op_class]) {
1468 addToOrderList(op_class);
1469 } else if (readyInsts[op_class].top()->seqNum <
1470 (*readyIt[op_class]).oldestInst) {
1471 listOrder.erase(readyIt[op_class]);
1472 addToOrderList(op_class);
1473 }
1474 }
1475}
1476
1477template <class Impl>
1478int
1479InstructionQueue<Impl>::countInsts()
1480{
1481#if 0
1482 //ksewell:This works but definitely could use a cleaner write
1483 //with a more intuitive way of counting. Right now it's
1484 //just brute force ....
1485 // Change the #if if you want to use this method.
1486 int total_insts = 0;
1487
1488 for (ThreadID tid = 0; tid < numThreads; ++tid) {
1489 ListIt count_it = instList[tid].begin();
1490
1491 while (count_it != instList[tid].end()) {
1492 if (!(*count_it)->isSquashed() && !(*count_it)->isSquashedInIQ()) {
1493 if (!(*count_it)->isIssued()) {
1494 ++total_insts;
1495 } else if ((*count_it)->isMemRef() &&
1496 !(*count_it)->memOpDone) {
1497 // Loads that have not been marked as executed still count
1498 // towards the total instructions.
1499 ++total_insts;
1500 }
1501 }
1502
1503 ++count_it;
1504 }
1505 }
1506
1507 return total_insts;
1508#else
1509 return numEntries - freeEntries;
1510#endif
1511}
1512
1513template <class Impl>
1514void
1515InstructionQueue<Impl>::dumpLists()
1516{
1517 for (int i = 0; i < Num_OpClasses; ++i) {
1518 cprintf("Ready list %i size: %i\n", i, readyInsts[i].size());
1519
1520 cprintf("\n");
1521 }
1522
1523 cprintf("Non speculative list size: %i\n", nonSpecInsts.size());
1524
1525 NonSpecMapIt non_spec_it = nonSpecInsts.begin();
1526 NonSpecMapIt non_spec_end_it = nonSpecInsts.end();
1527
1528 cprintf("Non speculative list: ");
1529
1530 while (non_spec_it != non_spec_end_it) {
1531 cprintf("%s [sn:%lli]", (*non_spec_it).second->pcState(),
1532 (*non_spec_it).second->seqNum);
1533 ++non_spec_it;
1534 }
1535
1536 cprintf("\n");
1537
1538 ListOrderIt list_order_it = listOrder.begin();
1539 ListOrderIt list_order_end_it = listOrder.end();
1540 int i = 1;
1541
1542 cprintf("List order: ");
1543
1544 while (list_order_it != list_order_end_it) {
1545 cprintf("%i OpClass:%i [sn:%lli] ", i, (*list_order_it).queueType,
1546 (*list_order_it).oldestInst);
1547
1548 ++list_order_it;
1549 ++i;
1550 }
1551
1552 cprintf("\n");
1553}
1554
1555
1556template <class Impl>
1557void
1558InstructionQueue<Impl>::dumpInsts()
1559{
1560 for (ThreadID tid = 0; tid < numThreads; ++tid) {
1561 int num = 0;
1562 int valid_num = 0;
1563 ListIt inst_list_it = instList[tid].begin();
1564
1565 while (inst_list_it != instList[tid].end()) {
1566 cprintf("Instruction:%i\n", num);
1567 if (!(*inst_list_it)->isSquashed()) {
1568 if (!(*inst_list_it)->isIssued()) {
1569 ++valid_num;
1570 cprintf("Count:%i\n", valid_num);
1571 } else if ((*inst_list_it)->isMemRef() &&
1572 !(*inst_list_it)->memOpDone()) {
1573 // Loads that have not been marked as executed
1574 // still count towards the total instructions.
1575 ++valid_num;
1576 cprintf("Count:%i\n", valid_num);
1577 }
1578 }
1579
1580 cprintf("PC: %s\n[sn:%lli]\n[tid:%i]\n"
1581 "Issued:%i\nSquashed:%i\n",
1582 (*inst_list_it)->pcState(),
1583 (*inst_list_it)->seqNum,
1584 (*inst_list_it)->threadNumber,
1585 (*inst_list_it)->isIssued(),
1586 (*inst_list_it)->isSquashed());
1587
1588 if ((*inst_list_it)->isMemRef()) {
1589 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone());
1590 }
1591
1592 cprintf("\n");
1593
1594 inst_list_it++;
1595 ++num;
1596 }
1597 }
1598
1599 cprintf("Insts to Execute list:\n");
1600
1601 int num = 0;
1602 int valid_num = 0;
1603 ListIt inst_list_it = instsToExecute.begin();
1604
1605 while (inst_list_it != instsToExecute.end())
1606 {
1607 cprintf("Instruction:%i\n",
1608 num);
1609 if (!(*inst_list_it)->isSquashed()) {
1610 if (!(*inst_list_it)->isIssued()) {
1611 ++valid_num;
1612 cprintf("Count:%i\n", valid_num);
1613 } else if ((*inst_list_it)->isMemRef() &&
1614 !(*inst_list_it)->memOpDone()) {
1615 // Loads that have not been marked as executed
1616 // still count towards the total instructions.
1617 ++valid_num;
1618 cprintf("Count:%i\n", valid_num);
1619 }
1620 }
1621
1622 cprintf("PC: %s\n[sn:%lli]\n[tid:%i]\n"
1623 "Issued:%i\nSquashed:%i\n",
1624 (*inst_list_it)->pcState(),
1625 (*inst_list_it)->seqNum,
1626 (*inst_list_it)->threadNumber,
1627 (*inst_list_it)->isIssued(),
1628 (*inst_list_it)->isSquashed());
1629
1630 if ((*inst_list_it)->isMemRef()) {
1631 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone());
1632 }
1633
1634 cprintf("\n");
1635
1636 inst_list_it++;
1637 ++num;
1638 }
1639}
1640
1641#endif//__CPU_O3_INST_QUEUE_IMPL_HH__