inst_queue_impl.hh revision 13429:a1e199fd8122
12292SN/A/* 22292SN/A * Copyright (c) 2011-2014 ARM Limited 32292SN/A * Copyright (c) 2013 Advanced Micro Devices, Inc. 42292SN/A * All rights reserved. 52292SN/A * 62292SN/A * The license below extends only to copyright in the software and shall 72292SN/A * not be construed as granting a license to any other intellectual 82292SN/A * property including but not limited to intellectual property relating 92292SN/A * to a hardware implementation of the functionality of the software 102292SN/A * licensed hereunder. You may use the software subject to the license 112292SN/A * terms below provided that you ensure that this notice is replicated 122292SN/A * unmodified and in its entirety in all distributions of the software, 132292SN/A * modified or unmodified, in source code or in binary form. 142292SN/A * 152292SN/A * Copyright (c) 2004-2006 The Regents of The University of Michigan 162292SN/A * All rights reserved. 172292SN/A * 182292SN/A * Redistribution and use in source and binary forms, with or without 192292SN/A * modification, are permitted provided that the following conditions are 202292SN/A * met: redistributions of source code must retain the above copyright 212292SN/A * notice, this list of conditions and the following disclaimer; 222292SN/A * redistributions in binary form must reproduce the above copyright 232292SN/A * notice, this list of conditions and the following disclaimer in the 242292SN/A * documentation and/or other materials provided with the distribution; 252292SN/A * neither the name of the copyright holders nor the names of its 262292SN/A * contributors may be used to endorse or promote products derived from 272689Sktlim@umich.edu * this software without specific prior written permission. 282689Sktlim@umich.edu * 292689Sktlim@umich.edu * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 302292SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 312292SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 322316SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 332292SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 342292SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 352669Sktlim@umich.edu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 362292SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 372669Sktlim@umich.edu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 382678Sktlim@umich.edu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 392678Sktlim@umich.edu * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 402678Sktlim@umich.edu * 412292SN/A * Authors: Kevin Lim 422678Sktlim@umich.edu * Korey Sewell 432292SN/A */ 442292SN/A 452669Sktlim@umich.edu#ifndef __CPU_O3_INST_QUEUE_IMPL_HH__ 462292SN/A#define __CPU_O3_INST_QUEUE_IMPL_HH__ 472678Sktlim@umich.edu 482292SN/A#include <limits> 492678Sktlim@umich.edu#include <vector> 502678Sktlim@umich.edu 512678Sktlim@umich.edu#include "cpu/o3/fu_pool.hh" 522678Sktlim@umich.edu#include "cpu/o3/inst_queue.hh" 532678Sktlim@umich.edu#include "debug/IQ.hh" 542292SN/A#include "enums/OpClass.hh" 552678Sktlim@umich.edu#include "params/DerivO3CPU.hh" 562678Sktlim@umich.edu#include "sim/core.hh" 572678Sktlim@umich.edu 582678Sktlim@umich.edu// clang complains about std::set being overloaded with Packet::set if 592678Sktlim@umich.edu// we open up the entire namespace std 602678Sktlim@umich.eduusing std::list; 612292SN/A 622678Sktlim@umich.edutemplate <class Impl> 632678Sktlim@umich.eduInstructionQueue<Impl>::FUCompletion::FUCompletion(const DynInstPtr &_inst, 642678Sktlim@umich.edu int fu_idx, InstructionQueue<Impl> *iq_ptr) 652678Sktlim@umich.edu : Event(Stat_Event_Pri, AutoDelete), 662678Sktlim@umich.edu inst(_inst), fuIdx(fu_idx), iqPtr(iq_ptr), freeFU(false) 672678Sktlim@umich.edu{ 682678Sktlim@umich.edu} 692678Sktlim@umich.edu 702344SN/Atemplate <class Impl> 712678Sktlim@umich.eduvoid 722678Sktlim@umich.eduInstructionQueue<Impl>::FUCompletion::process() 732678Sktlim@umich.edu{ 742678Sktlim@umich.edu iqPtr->processFUCompletion(inst, freeFU ? fuIdx : -1); 752678Sktlim@umich.edu inst = NULL; 762307SN/A} 772678Sktlim@umich.edu 782678Sktlim@umich.edu 792678Sktlim@umich.edutemplate <class Impl> 802678Sktlim@umich.educonst char * 812678Sktlim@umich.eduInstructionQueue<Impl>::FUCompletion::description() const 822678Sktlim@umich.edu{ 832678Sktlim@umich.edu return "Functional unit completion"; 842678Sktlim@umich.edu} 852344SN/A 862307SN/Atemplate <class Impl> 872678Sktlim@umich.eduInstructionQueue<Impl>::InstructionQueue(O3CPU *cpu_ptr, IEW *iew_ptr, 882678Sktlim@umich.edu DerivO3CPUParams *params) 892292SN/A : cpu(cpu_ptr), 902292SN/A iewStage(iew_ptr), 912292SN/A fuPool(params->fuPool), 922669Sktlim@umich.edu numEntries(params->numIQEntries), 932669Sktlim@umich.edu totalWidth(params->issueWidth), 942292SN/A commitToIEWDelay(params->commitToIEWDelay) 952669Sktlim@umich.edu{ 962669Sktlim@umich.edu assert(fuPool); 972669Sktlim@umich.edu 982669Sktlim@umich.edu numThreads = params->numThreads; 992669Sktlim@umich.edu 1002669Sktlim@umich.edu // Set the number of total physical registers 1012669Sktlim@umich.edu // As the vector registers have two addressing modes, they are added twice 1022669Sktlim@umich.edu numPhysRegs = params->numPhysIntRegs + params->numPhysFloatRegs + 1032669Sktlim@umich.edu params->numPhysVecRegs + 1042669Sktlim@umich.edu params->numPhysVecRegs * TheISA::NumVecElemPerVecReg + 1052669Sktlim@umich.edu params->numPhysCCRegs; 1062669Sktlim@umich.edu 1072669Sktlim@umich.edu //Create an entry for each physical register within the 1082669Sktlim@umich.edu //dependency graph. 1092669Sktlim@umich.edu dependGraph.resize(numPhysRegs); 1102669Sktlim@umich.edu 1112669Sktlim@umich.edu // Resize the register scoreboard. 1122669Sktlim@umich.edu regScoreboard.resize(numPhysRegs); 1132669Sktlim@umich.edu 1142669Sktlim@umich.edu //Initialize Mem Dependence Units 1152669Sktlim@umich.edu for (ThreadID tid = 0; tid < numThreads; tid++) { 1162669Sktlim@umich.edu memDepUnit[tid].init(params, tid); 1172669Sktlim@umich.edu memDepUnit[tid].setIQ(this); 1182669Sktlim@umich.edu } 1192669Sktlim@umich.edu 1202669Sktlim@umich.edu resetState(); 1212669Sktlim@umich.edu 1222669Sktlim@umich.edu std::string policy = params->smtIQPolicy; 1232669Sktlim@umich.edu 1242669Sktlim@umich.edu //Convert string to lowercase 1252669Sktlim@umich.edu std::transform(policy.begin(), policy.end(), policy.begin(), 1262669Sktlim@umich.edu (int(*)(int)) tolower); 1272669Sktlim@umich.edu 1282669Sktlim@umich.edu //Figure out resource sharing policy 1292669Sktlim@umich.edu if (policy == "dynamic") { 1302669Sktlim@umich.edu iqPolicy = Dynamic; 1312669Sktlim@umich.edu 1322669Sktlim@umich.edu //Set Max Entries to Total ROB Capacity 1332669Sktlim@umich.edu for (ThreadID tid = 0; tid < numThreads; tid++) { 1342669Sktlim@umich.edu maxEntries[tid] = numEntries; 1352669Sktlim@umich.edu } 1362669Sktlim@umich.edu 1372669Sktlim@umich.edu } else if (policy == "partitioned") { 1382669Sktlim@umich.edu iqPolicy = Partitioned; 1392669Sktlim@umich.edu 1402292SN/A //@todo:make work if part_amt doesnt divide evenly. 1412292SN/A int part_amt = numEntries / numThreads; 1422292SN/A 1432292SN/A //Divide ROB up evenly 1442678Sktlim@umich.edu for (ThreadID tid = 0; tid < numThreads; tid++) { 1452678Sktlim@umich.edu maxEntries[tid] = part_amt; 1462292SN/A } 1472292SN/A 1482292SN/A DPRINTF(IQ, "IQ sharing policy set to Partitioned:" 1492292SN/A "%i entries per thread.\n",part_amt); 1502292SN/A } else if (policy == "threshold") { 1512292SN/A iqPolicy = Threshold; 1522292SN/A 1532292SN/A double threshold = (double)params->smtIQThreshold / 100; 1542292SN/A 1552292SN/A int thresholdIQ = (int)((double)threshold * numEntries); 1562292SN/A 1572307SN/A //Divide up by threshold amount 1582307SN/A for (ThreadID tid = 0; tid < numThreads; tid++) { 1592292SN/A maxEntries[tid] = thresholdIQ; 1602292SN/A } 1612329SN/A 1622329SN/A DPRINTF(IQ, "IQ sharing policy set to Threshold:" 1632329SN/A "%i entries per thread.\n",thresholdIQ); 1642292SN/A } else { 1652292SN/A assert(0 && "Invalid IQ Sharing Policy.Options Are:{Dynamic," 1662292SN/A "Partitioned, Threshold}"); 1672292SN/A } 1682292SN/A} 1692292SN/A 1702292SN/Atemplate <class Impl> 1712292SN/AInstructionQueue<Impl>::~InstructionQueue() 1722292SN/A{ 1732292SN/A dependGraph.reset(); 1742292SN/A#ifdef DEBUG 1752678Sktlim@umich.edu cprintf("Nodes traversed: %i, removed: %i\n", 1762292SN/A dependGraph.nodesTraversed, dependGraph.nodesRemoved); 1772329SN/A#endif 1782292SN/A} 1792292SN/A 1802292SN/Atemplate <class Impl> 1812292SN/Astd::string 1822292SN/AInstructionQueue<Impl>::name() const 1832669Sktlim@umich.edu{ 1842669Sktlim@umich.edu return cpu->name() + ".iq"; 1852669Sktlim@umich.edu} 1862669Sktlim@umich.edu 1872669Sktlim@umich.edutemplate <class Impl> 1882678Sktlim@umich.eduvoid 1892678Sktlim@umich.eduInstructionQueue<Impl>::regStats() 1902678Sktlim@umich.edu{ 1912678Sktlim@umich.edu using namespace Stats; 1922679Sktlim@umich.edu iqInstsAdded 1932679Sktlim@umich.edu .name(name() + ".iqInstsAdded") 1942679Sktlim@umich.edu .desc("Number of instructions added to the IQ (excludes non-spec)") 1952679Sktlim@umich.edu .prereq(iqInstsAdded); 1962669Sktlim@umich.edu 1972669Sktlim@umich.edu iqNonSpecInstsAdded 1982669Sktlim@umich.edu .name(name() + ".iqNonSpecInstsAdded") 1992292SN/A .desc("Number of non-speculative instructions added to the IQ") 2002292SN/A .prereq(iqNonSpecInstsAdded); 2012292SN/A 2022292SN/A iqInstsIssued 2032292SN/A .name(name() + ".iqInstsIssued") 2042292SN/A .desc("Number of instructions issued") 2052292SN/A .prereq(iqInstsIssued); 2062292SN/A 2072292SN/A iqIntInstsIssued 2082292SN/A .name(name() + ".iqIntInstsIssued") 2092292SN/A .desc("Number of integer instructions issued") 2102292SN/A .prereq(iqIntInstsIssued); 2112292SN/A 2122292SN/A iqFloatInstsIssued 2132292SN/A .name(name() + ".iqFloatInstsIssued") 2142292SN/A .desc("Number of float instructions issued") 2152292SN/A .prereq(iqFloatInstsIssued); 2162292SN/A 2172292SN/A iqBranchInstsIssued 2182292SN/A .name(name() + ".iqBranchInstsIssued") 2192292SN/A .desc("Number of branch instructions issued") 2202292SN/A .prereq(iqBranchInstsIssued); 2212292SN/A 2222292SN/A iqMemInstsIssued 2232292SN/A .name(name() + ".iqMemInstsIssued") 2242292SN/A .desc("Number of memory instructions issued") 2252292SN/A .prereq(iqMemInstsIssued); 2262292SN/A 2272292SN/A iqMiscInstsIssued 2282292SN/A .name(name() + ".iqMiscInstsIssued") 2292292SN/A .desc("Number of miscellaneous instructions issued") 2302292SN/A .prereq(iqMiscInstsIssued); 2312292SN/A 2322292SN/A iqSquashedInstsIssued 2332292SN/A .name(name() + ".iqSquashedInstsIssued") 2342292SN/A .desc("Number of squashed instructions issued") 2352307SN/A .prereq(iqSquashedInstsIssued); 2362307SN/A 2372307SN/A iqSquashedInstsExamined 2382307SN/A .name(name() + ".iqSquashedInstsExamined") 2392307SN/A .desc("Number of squashed instructions iterated over during squash;" 2402307SN/A " mainly for profiling") 2412329SN/A .prereq(iqSquashedInstsExamined); 2422307SN/A 2432307SN/A iqSquashedOperandsExamined 2442307SN/A .name(name() + ".iqSquashedOperandsExamined") 2452307SN/A .desc("Number of squashed operands that are examined and possibly " 2462307SN/A "removed from graph") 2472307SN/A .prereq(iqSquashedOperandsExamined); 2482307SN/A 2492307SN/A iqSquashedNonSpecRemoved 2502307SN/A .name(name() + ".iqSquashedNonSpecRemoved") 2512307SN/A .desc("Number of squashed non-spec instructions that were removed") 2522307SN/A .prereq(iqSquashedNonSpecRemoved); 2532307SN/A/* 2542307SN/A queueResDist 2552307SN/A .init(Num_OpClasses, 0, 99, 2) 2562307SN/A .name(name() + ".IQ:residence:") 2572329SN/A .desc("cycles from dispatch to issue") 2582307SN/A .flags(total | pdf | cdf ) 2592307SN/A ; 2602307SN/A for (int i = 0; i < Num_OpClasses; ++i) { 2612307SN/A queueResDist.subname(i, opClassStrings[i]); 2622307SN/A } 2632307SN/A*/ 2642307SN/A numIssuedDist 2652307SN/A .init(0,totalWidth,1) 2662307SN/A .name(name() + ".issued_per_cycle") 2672307SN/A .desc("Number of insts issued each cycle") 2682292SN/A .flags(pdf) 2692292SN/A ; 2702329SN/A/* 2712329SN/A dist_unissued 2722292SN/A .init(Num_OpClasses+2) 2732329SN/A .name(name() + ".unissued_cause") 2742329SN/A .desc("Reason ready instruction not issued") 2752292SN/A .flags(pdf | dist) 2762292SN/A ; 2772292SN/A for (int i=0; i < (Num_OpClasses + 2); ++i) { 2782292SN/A dist_unissued.subname(i, unissued_names[i]); 2792292SN/A } 2802329SN/A*/ 2812292SN/A statIssuedInstType 2822292SN/A .init(numThreads,Enums::Num_OpClass) 2832292SN/A .name(name() + ".FU_type") 2842292SN/A .desc("Type of FU issued") 2852292SN/A .flags(total | pdf | dist) 2862292SN/A ; 2872292SN/A statIssuedInstType.ysubnames(Enums::OpClassStrings); 2882292SN/A 2892329SN/A // 2902329SN/A // How long did instructions for a particular FU type wait prior to issue 2912329SN/A // 2922292SN/A/* 2932292SN/A issueDelayDist 2942292SN/A .init(Num_OpClasses,0,99,2) 2952292SN/A .name(name() + ".") 2962292SN/A .desc("cycles from operands ready to issue") 2972329SN/A .flags(pdf | cdf) 2982292SN/A ; 2992292SN/A 3002292SN/A for (int i=0; i<Num_OpClasses; ++i) { 3012292SN/A std::stringstream subname; 3022292SN/A subname << opClassStrings[i] << "_delay"; 3032292SN/A issueDelayDist.subname(i, subname.str()); 3042292SN/A } 3052292SN/A*/ 3062292SN/A issueRate 3072292SN/A .name(name() + ".rate") 3082292SN/A .desc("Inst issue rate") 3092292SN/A .flags(total) 3102292SN/A ; 3112292SN/A issueRate = iqInstsIssued / cpu->numCycles; 3122292SN/A 3132292SN/A statFuBusy 3142292SN/A .init(Num_OpClasses) 3152292SN/A .name(name() + ".fu_full") 3162292SN/A .desc("attempts to use FU when none available") 3172292SN/A .flags(pdf | dist) 3182292SN/A ; 3192292SN/A for (int i=0; i < Num_OpClasses; ++i) { 3202292SN/A statFuBusy.subname(i, Enums::OpClassStrings[i]); 3212292SN/A } 3222329SN/A 3232329SN/A fuBusy 3242292SN/A .init(numThreads) 3252292SN/A .name(name() + ".fu_busy_cnt") 3262292SN/A .desc("FU busy when requested") 3272292SN/A .flags(total) 3282292SN/A ; 3292292SN/A 3302292SN/A fuBusyRate 3312292SN/A .name(name() + ".fu_busy_rate") 3322292SN/A .desc("FU busy rate (busy events/executed inst)") 3332292SN/A .flags(total) 3342292SN/A ; 3352292SN/A fuBusyRate = fuBusy / iqInstsIssued; 3362292SN/A 3372292SN/A for (ThreadID tid = 0; tid < numThreads; tid++) { 3382292SN/A // Tell mem dependence unit to reg stats as well. 3392292SN/A memDepUnit[tid].regStats(); 3402292SN/A } 3412292SN/A 3422292SN/A intInstQueueReads 3432292SN/A .name(name() + ".int_inst_queue_reads") 3442292SN/A .desc("Number of integer instruction queue reads") 3452292SN/A .flags(total); 3462292SN/A 3472292SN/A intInstQueueWrites 3482292SN/A .name(name() + ".int_inst_queue_writes") 3492292SN/A .desc("Number of integer instruction queue writes") 3502292SN/A .flags(total); 3512292SN/A 3522292SN/A intInstQueueWakeupAccesses 3532292SN/A .name(name() + ".int_inst_queue_wakeup_accesses") 3542292SN/A .desc("Number of integer instruction queue wakeup accesses") 3552292SN/A .flags(total); 3562292SN/A 3572292SN/A fpInstQueueReads 3582292SN/A .name(name() + ".fp_inst_queue_reads") 3592292SN/A .desc("Number of floating instruction queue reads") 3602292SN/A .flags(total); 3612292SN/A 3622292SN/A fpInstQueueWrites 3632292SN/A .name(name() + ".fp_inst_queue_writes") 3642292SN/A .desc("Number of floating instruction queue writes") 3652292SN/A .flags(total); 3662292SN/A 3672292SN/A fpInstQueueWakeupAccesses 3682292SN/A .name(name() + ".fp_inst_queue_wakeup_accesses") 3692292SN/A .desc("Number of floating instruction queue wakeup accesses") 3702292SN/A .flags(total); 3712292SN/A 3722292SN/A vecInstQueueReads 3732292SN/A .name(name() + ".vec_inst_queue_reads") 3742292SN/A .desc("Number of vector instruction queue reads") 3752292SN/A .flags(total); 3762292SN/A 3772292SN/A vecInstQueueWrites 3782292SN/A .name(name() + ".vec_inst_queue_writes") 3792292SN/A .desc("Number of vector instruction queue writes") 3802292SN/A .flags(total); 3812292SN/A 3822292SN/A vecInstQueueWakeupAccesses 3832292SN/A .name(name() + ".vec_inst_queue_wakeup_accesses") 3842292SN/A .desc("Number of vector instruction queue wakeup accesses") 3852292SN/A .flags(total); 3862292SN/A 3872292SN/A intAluAccesses 3882292SN/A .name(name() + ".int_alu_accesses") 3892292SN/A .desc("Number of integer alu accesses") 3902292SN/A .flags(total); 3912292SN/A 3922292SN/A fpAluAccesses 3932292SN/A .name(name() + ".fp_alu_accesses") 3942292SN/A .desc("Number of floating point alu accesses") 3952292SN/A .flags(total); 3962292SN/A 3972292SN/A vecAluAccesses 3982292SN/A .name(name() + ".vec_alu_accesses") 3992292SN/A .desc("Number of vector alu accesses") 4002292SN/A .flags(total); 4012292SN/A 4022292SN/A} 4032292SN/A 4042292SN/Atemplate <class Impl> 4052292SN/Avoid 4062292SN/AInstructionQueue<Impl>::resetState() 4072292SN/A{ 4082292SN/A //Initialize thread IQ counts 4092292SN/A for (ThreadID tid = 0; tid <numThreads; tid++) { 4102292SN/A count[tid] = 0; 4112292SN/A instList[tid].clear(); 4122292SN/A } 4132292SN/A 4142292SN/A // Initialize the number of free IQ entries. 4152292SN/A freeEntries = numEntries; 4162292SN/A 4172292SN/A // Note that in actuality, the registers corresponding to the logical 4182292SN/A // registers start off as ready. However this doesn't matter for the 4192669Sktlim@umich.edu // IQ as the instruction should have been correctly told if those 4202292SN/A // registers are ready in rename. Thus it can all be initialized as 4212292SN/A // unready. 4222292SN/A for (int i = 0; i < numPhysRegs; ++i) { 4232292SN/A regScoreboard[i] = false; 4242329SN/A } 4252329SN/A 4262292SN/A for (ThreadID tid = 0; tid < numThreads; ++tid) { 4272292SN/A squashedSeqNum[tid] = 0; 4282292SN/A } 4292292SN/A 4302292SN/A for (int i = 0; i < Num_OpClasses; ++i) { 4312292SN/A while (!readyInsts[i].empty()) 4322292SN/A readyInsts[i].pop(); 4332292SN/A queueOnList[i] = false; 4342292SN/A readyIt[i] = listOrder.end(); 4352292SN/A } 4362292SN/A nonSpecInsts.clear(); 4372292SN/A listOrder.clear(); 4382292SN/A deferredMemInsts.clear(); 4392292SN/A blockedMemInsts.clear(); 4402292SN/A retryMemInsts.clear(); 4412292SN/A wbOutstanding = 0; 4422292SN/A} 4432292SN/A 4442292SN/Atemplate <class Impl> 4452292SN/Avoid 4462292SN/AInstructionQueue<Impl>::setActiveThreads(list<ThreadID> *at_ptr) 4472292SN/A{ 4482292SN/A activeThreads = at_ptr; 4492292SN/A} 4502292SN/A 4512292SN/Atemplate <class Impl> 4522329SN/Avoid 4532292SN/AInstructionQueue<Impl>::setIssueToExecuteQueue(TimeBuffer<IssueStruct> *i2e_ptr) 4542292SN/A{ 4552292SN/A issueToExecuteQueue = i2e_ptr; 4562292SN/A} 4572292SN/A 4582292SN/Atemplate <class Impl> 4592292SN/Avoid 4602292SN/AInstructionQueue<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr) 4612336SN/A{ 4622336SN/A timeBuffer = tb_ptr; 4632336SN/A 4642329SN/A fromCommit = timeBuffer->getWire(-commitToIEWDelay); 4652292SN/A} 4662329SN/A 4672292SN/Atemplate <class Impl> 4682292SN/Abool 4692292SN/AInstructionQueue<Impl>::isDrained() const 4702292SN/A{ 4712329SN/A bool drained = dependGraph.empty() && 4722329SN/A instsToExecute.empty() && 4732329SN/A wbOutstanding == 0; 4742292SN/A for (ThreadID tid = 0; tid < numThreads; ++tid) 4752329SN/A drained = drained && memDepUnit[tid].isDrained(); 4762329SN/A 4772329SN/A return drained; 4782329SN/A} 4792292SN/A 4802292SN/Atemplate <class Impl> 4812292SN/Avoid 4822292SN/AInstructionQueue<Impl>::drainSanityCheck() const 4832292SN/A{ 4842292SN/A assert(dependGraph.empty()); 4852292SN/A assert(instsToExecute.empty()); 4862292SN/A for (ThreadID tid = 0; tid < numThreads; ++tid) 4872292SN/A memDepUnit[tid].drainSanityCheck(); 4882292SN/A} 4892292SN/A 4902292SN/Atemplate <class Impl> 4912292SN/Avoid 4922292SN/AInstructionQueue<Impl>::takeOverFrom() 4932292SN/A{ 4942292SN/A resetState(); 4952292SN/A} 4962292SN/A 4972292SN/Atemplate <class Impl> 4982292SN/Aint 4992292SN/AInstructionQueue<Impl>::entryAmount(ThreadID num_threads) 5002292SN/A{ 5012292SN/A if (iqPolicy == Partitioned) { 5022292SN/A return numEntries / num_threads; 5032292SN/A } else { 5042292SN/A return 0; 5052292SN/A } 5062292SN/A} 5072292SN/A 5082292SN/A 5092292SN/Atemplate <class Impl> 5102292SN/Avoid 5112292SN/AInstructionQueue<Impl>::resetEntries() 5122292SN/A{ 5132292SN/A if (iqPolicy != Dynamic || numThreads > 1) { 5142292SN/A int active_threads = activeThreads->size(); 5152292SN/A 5162292SN/A list<ThreadID>::iterator threads = activeThreads->begin(); 5172292SN/A list<ThreadID>::iterator end = activeThreads->end(); 5182292SN/A 5192292SN/A while (threads != end) { 5202292SN/A ThreadID tid = *threads++; 5212292SN/A 5222292SN/A if (iqPolicy == Partitioned) { 5232292SN/A maxEntries[tid] = numEntries / active_threads; 5242292SN/A } else if (iqPolicy == Threshold && active_threads == 1) { 5252292SN/A maxEntries[tid] = numEntries; 5262292SN/A } 5272292SN/A } 5282292SN/A } 5292292SN/A} 5302292SN/A 5312292SN/Atemplate <class Impl> 5322292SN/Aunsigned 5332292SN/AInstructionQueue<Impl>::numFreeEntries() 5342292SN/A{ 5352292SN/A return freeEntries; 5362329SN/A} 5372329SN/A 5382292SN/Atemplate <class Impl> 5392292SN/Aunsigned 5402292SN/AInstructionQueue<Impl>::numFreeEntries(ThreadID tid) 5412292SN/A{ 5422292SN/A return maxEntries[tid] - count[tid]; 5432292SN/A} 5442292SN/A 5452292SN/A// Might want to do something more complex if it knows how many instructions 5462292SN/A// will be issued this cycle. 5472292SN/Atemplate <class Impl> 5482292SN/Abool 5492292SN/AInstructionQueue<Impl>::isFull() 5502292SN/A{ 5512292SN/A if (freeEntries == 0) { 5522292SN/A return(true); 5532292SN/A } else { 5542292SN/A return(false); 5552292SN/A } 5562292SN/A} 5572292SN/A 5582292SN/Atemplate <class Impl> 5592292SN/Abool 5602292SN/AInstructionQueue<Impl>::isFull(ThreadID tid) 5612292SN/A{ 5622292SN/A if (numFreeEntries(tid) == 0) { 5632292SN/A return(true); 5642292SN/A } else { 5652292SN/A return(false); 5662678Sktlim@umich.edu } 5672678Sktlim@umich.edu} 5682678Sktlim@umich.edu 5692678Sktlim@umich.edutemplate <class Impl> 5702678Sktlim@umich.edubool 5712678Sktlim@umich.eduInstructionQueue<Impl>::hasReadyInsts() 5722329SN/A{ 5732329SN/A if (!listOrder.empty()) { 5742292SN/A return true; 5752292SN/A } 5762292SN/A 5772292SN/A for (int i = 0; i < Num_OpClasses; ++i) { 5782292SN/A if (!readyInsts[i].empty()) { 5792292SN/A return true; 5802292SN/A } 5812678Sktlim@umich.edu } 5822292SN/A 5832292SN/A return false; 5842292SN/A} 5852292SN/A 5862292SN/Atemplate <class Impl> 5872292SN/Avoid 5882292SN/AInstructionQueue<Impl>::insert(const DynInstPtr &new_inst) 5892292SN/A{ 5902292SN/A if (new_inst->isFloating()) { 5912292SN/A fpInstQueueWrites++; 5922292SN/A } else if (new_inst->isVector()) { 5932669Sktlim@umich.edu vecInstQueueWrites++; 5942669Sktlim@umich.edu } else { 5952669Sktlim@umich.edu intInstQueueWrites++; 5962292SN/A } 5972292SN/A // Make sure the instruction is valid 5982669Sktlim@umich.edu assert(new_inst); 5992669Sktlim@umich.edu 6002678Sktlim@umich.edu DPRINTF(IQ, "Adding instruction [sn:%lli] PC %s to the IQ.\n", 6012678Sktlim@umich.edu new_inst->seqNum, new_inst->pcState()); 6022669Sktlim@umich.edu 6032669Sktlim@umich.edu assert(freeEntries != 0); 6042669Sktlim@umich.edu 6052292SN/A instList[new_inst->threadNumber].push_back(new_inst); 6062678Sktlim@umich.edu 6072678Sktlim@umich.edu --freeEntries; 6082678Sktlim@umich.edu 6092678Sktlim@umich.edu new_inst->setInIQ(); 6102678Sktlim@umich.edu 6112678Sktlim@umich.edu // Look through its source registers (physical regs), and mark any 6122292SN/A // dependencies. 6132292SN/A addToDependents(new_inst); 6142669Sktlim@umich.edu 6152669Sktlim@umich.edu // Have this instruction set itself as the producer of its destination 6162292SN/A // register(s). 6172292SN/A addToProducers(new_inst); 6182669Sktlim@umich.edu 6192669Sktlim@umich.edu if (new_inst->isMemRef()) { 6202678Sktlim@umich.edu memDepUnit[new_inst->threadNumber].insert(new_inst); 6212669Sktlim@umich.edu } else { 6222292SN/A addIfReady(new_inst); 6232292SN/A } 6242292SN/A 6252292SN/A ++iqInstsAdded; 6262292SN/A 6272292SN/A count[new_inst->threadNumber]++; 6282292SN/A 6292292SN/A assert(freeEntries == (numEntries - countInsts())); 6302292SN/A} 6312678Sktlim@umich.edu 6322678Sktlim@umich.edutemplate <class Impl> 6332678Sktlim@umich.eduvoid 6342678Sktlim@umich.eduInstructionQueue<Impl>::insertNonSpec(const DynInstPtr &new_inst) 6352678Sktlim@umich.edu{ 6362329SN/A // @todo: Clean up this code; can do it by setting inst as unable 6372678Sktlim@umich.edu // to issue, then calling normal insert on the inst. 6382669Sktlim@umich.edu if (new_inst->isFloating()) { 6392329SN/A fpInstQueueWrites++; 6402329SN/A } else if (new_inst->isVector()) { 6412292SN/A vecInstQueueWrites++; 6422292SN/A } else { 6432292SN/A intInstQueueWrites++; 6442292SN/A } 6452292SN/A 6462292SN/A assert(new_inst); 6472292SN/A 6482292SN/A nonSpecInsts[new_inst->seqNum] = new_inst; 6492329SN/A 6502292SN/A DPRINTF(IQ, "Adding non-speculative instruction [sn:%lli] PC %s " 6512292SN/A "to the IQ.\n", 6522292SN/A new_inst->seqNum, new_inst->pcState()); 6532292SN/A 6542292SN/A assert(freeEntries != 0); 6552292SN/A 6562292SN/A instList[new_inst->threadNumber].push_back(new_inst); 6572292SN/A 6582292SN/A --freeEntries; 6592292SN/A 6602292SN/A new_inst->setInIQ(); 6612292SN/A 6622292SN/A // Have this instruction set itself as the producer of its destination 6632292SN/A // register(s). 6642292SN/A addToProducers(new_inst); 6652292SN/A 6662292SN/A // If it's a memory instruction, add it to the memory dependency 6672292SN/A // unit. 6682292SN/A if (new_inst->isMemRef()) { 6692292SN/A memDepUnit[new_inst->threadNumber].insertNonSpec(new_inst); 6702292SN/A } 6712292SN/A 6722292SN/A ++iqNonSpecInstsAdded; 6732292SN/A 6742292SN/A count[new_inst->threadNumber]++; 6752292SN/A 6762292SN/A assert(freeEntries == (numEntries - countInsts())); 6772292SN/A} 6782292SN/A 6792292SN/Atemplate <class Impl> 6802292SN/Avoid 6812292SN/AInstructionQueue<Impl>::insertBarrier(const DynInstPtr &barr_inst) 6822292SN/A{ 6832292SN/A memDepUnit[barr_inst->threadNumber].insertBarrier(barr_inst); 6842292SN/A 6852292SN/A insertNonSpec(barr_inst); 6862292SN/A} 6872329SN/A 6882292SN/Atemplate <class Impl> 6892292SN/Atypename Impl::DynInstPtr 6902292SN/AInstructionQueue<Impl>::getInstToExecute() 6912292SN/A{ 6922292SN/A assert(!instsToExecute.empty()); 6932292SN/A DynInstPtr inst = std::move(instsToExecute.front()); 6942292SN/A instsToExecute.pop_front(); 6952292SN/A if (inst->isFloating()) { 6962292SN/A fpInstQueueReads++; 6972292SN/A } else if (inst->isVector()) { 6982292SN/A vecInstQueueReads++; 6992292SN/A } else { 7002292SN/A intInstQueueReads++; 7012292SN/A } 7022292SN/A return inst; 7032292SN/A} 7042329SN/A 7052292SN/Atemplate <class Impl> 7062292SN/Avoid 7072292SN/AInstructionQueue<Impl>::addToOrderList(OpClass op_class) 7082292SN/A{ 7092292SN/A assert(!readyInsts[op_class].empty()); 7102292SN/A 7112292SN/A ListOrderEntry queue_entry; 7122292SN/A 7132292SN/A queue_entry.queueType = op_class; 7142292SN/A 7152292SN/A queue_entry.oldestInst = readyInsts[op_class].top()->seqNum; 7162292SN/A 7172292SN/A ListOrderIt list_it = listOrder.begin(); 7182292SN/A ListOrderIt list_end_it = listOrder.end(); 7192292SN/A 7202292SN/A while (list_it != list_end_it) { 7212292SN/A if ((*list_it).oldestInst > queue_entry.oldestInst) { 7222292SN/A break; 7232292SN/A } 7242292SN/A 7252292SN/A list_it++; 7262292SN/A } 7272292SN/A 7282329SN/A readyIt[op_class] = listOrder.insert(list_it, queue_entry); 7292292SN/A queueOnList[op_class] = true; 7302292SN/A} 7312292SN/A 7322292SN/Atemplate <class Impl> 7332292SN/Avoid 7342292SN/AInstructionQueue<Impl>::moveToYoungerInst(ListOrderIt list_order_it) 7352292SN/A{ 7362292SN/A // Get iterator of next item on the list 7372292SN/A // Delete the original iterator 7382329SN/A // Determine if the next item is either the end of the list or younger 7392329SN/A // than the new instruction. If so, then add in a new iterator right here. 7402292SN/A // If not, then move along. 7412292SN/A ListOrderEntry queue_entry; 7422292SN/A OpClass op_class = (*list_order_it).queueType; 7432292SN/A ListOrderIt next_it = list_order_it; 7442292SN/A 7452292SN/A ++next_it; 7462292SN/A 7472329SN/A queue_entry.queueType = op_class; 7482292SN/A queue_entry.oldestInst = readyInsts[op_class].top()->seqNum; 7492292SN/A 7502292SN/A while (next_it != listOrder.end() && 7512292SN/A (*next_it).oldestInst < queue_entry.oldestInst) { 7522292SN/A ++next_it; 7532292SN/A } 7542292SN/A 7552292SN/A readyIt[op_class] = listOrder.insert(next_it, queue_entry); 7562292SN/A} 7572292SN/A 7582292SN/Atemplate <class Impl> 7592292SN/Avoid 7602292SN/AInstructionQueue<Impl>::processFUCompletion(const DynInstPtr &inst, int fu_idx) 7612292SN/A{ 7622292SN/A DPRINTF(IQ, "Processing FU completion [sn:%lli]\n", inst->seqNum); 7632292SN/A assert(!cpu->switchedOut()); 7642678Sktlim@umich.edu // The CPU could have been sleeping until this op completed (*extremely* 7652678Sktlim@umich.edu // long latency op). Wake it if it was. This may be overkill. 7662678Sktlim@umich.edu --wbOutstanding; 7672678Sktlim@umich.edu iewStage->wakeCPU(); 7682678Sktlim@umich.edu 7692678Sktlim@umich.edu if (fu_idx > -1) 7702678Sktlim@umich.edu fuPool->freeUnitNextCycle(fu_idx); 7712678Sktlim@umich.edu 7722678Sktlim@umich.edu // @todo: Ensure that these FU Completions happen at the beginning 7732678Sktlim@umich.edu // of a cycle, otherwise they could add too many instructions to 7742678Sktlim@umich.edu // the queue. 7752678Sktlim@umich.edu issueToExecuteQueue->access(-1)->size++; 7762678Sktlim@umich.edu instsToExecute.push_back(inst); 7772678Sktlim@umich.edu} 7782678Sktlim@umich.edu 7792678Sktlim@umich.edu// @todo: Figure out a better way to remove the squashed items from the 7802678Sktlim@umich.edu// lists. Checking the top item of each list to see if it's squashed 7812678Sktlim@umich.edu// wastes time and forces jumps. 7822678Sktlim@umich.edutemplate <class Impl> 7832678Sktlim@umich.eduvoid 7842678Sktlim@umich.eduInstructionQueue<Impl>::scheduleReadyInsts() 7852678Sktlim@umich.edu{ 7862678Sktlim@umich.edu DPRINTF(IQ, "Attempting to schedule ready instructions from " 7872678Sktlim@umich.edu "the IQ.\n"); 7882678Sktlim@umich.edu 7892292SN/A IssueStruct *i2e_info = issueToExecuteQueue->access(0); 7902292SN/A 7912292SN/A DynInstPtr mem_inst; 7922292SN/A while (mem_inst = std::move(getDeferredMemInstToExecute())) { 7932292SN/A addReadyMemInst(mem_inst); 7942292SN/A } 7952292SN/A 7962292SN/A // See if any cache blocked instructions are able to be executed 7972292SN/A while (mem_inst = std::move(getBlockedMemInstToExecute())) { 7982292SN/A addReadyMemInst(mem_inst); 7992292SN/A } 8002292SN/A 8012292SN/A // Have iterator to head of the list 8022292SN/A // While I haven't exceeded bandwidth or reached the end of the list, 8032292SN/A // Try to get a FU that can do what this op needs. 8042292SN/A // If successful, change the oldestInst to the new top of the list, put 8052292SN/A // the queue in the proper place in the list. 8062292SN/A // Increment the iterator. 8072292SN/A // This will avoid trying to schedule a certain op class if there are no 8082292SN/A // FUs that handle it. 8092292SN/A int total_issued = 0; 8102329SN/A ListOrderIt order_it = listOrder.begin(); 8112329SN/A ListOrderIt order_end_it = listOrder.end(); 8122329SN/A 8132292SN/A while (total_issued < totalWidth && order_it != order_end_it) { 8142292SN/A OpClass op_class = (*order_it).queueType; 8152292SN/A 8162292SN/A assert(!readyInsts[op_class].empty()); 8172292SN/A 8182292SN/A DynInstPtr issuing_inst = readyInsts[op_class].top(); 8192292SN/A 8202292SN/A if (issuing_inst->isFloating()) { 8212292SN/A fpInstQueueReads++; 8222292SN/A } else if (issuing_inst->isVector()) { 8232316SN/A vecInstQueueReads++; 8242316SN/A } else { 8252329SN/A intInstQueueReads++; 8262329SN/A } 8272329SN/A 8282329SN/A assert(issuing_inst->seqNum == (*order_it).oldestInst); 8292316SN/A 8302316SN/A if (issuing_inst->isSquashed()) { 8312316SN/A readyInsts[op_class].pop(); 8322292SN/A 8332292SN/A if (!readyInsts[op_class].empty()) { 8342292SN/A moveToYoungerInst(order_it); 8352292SN/A } else { 8362292SN/A readyIt[op_class] = listOrder.end(); 8372292SN/A queueOnList[op_class] = false; 8382292SN/A } 8392292SN/A 8402292SN/A listOrder.erase(order_it++); 8412292SN/A 8422292SN/A ++iqSquashedInstsIssued; 8432292SN/A 8442292SN/A continue; 8452292SN/A } 8462292SN/A 8472292SN/A int idx = FUPool::NoCapableFU; 8482292SN/A Cycles op_latency = Cycles(1); 8492292SN/A ThreadID tid = issuing_inst->threadNumber; 8502292SN/A 8512292SN/A if (op_class != No_OpClass) { 8522292SN/A idx = fuPool->getUnit(op_class); 8532292SN/A if (issuing_inst->isFloating()) { 8542292SN/A fpAluAccesses++; 8552292SN/A } else if (issuing_inst->isVector()) { 8562292SN/A vecAluAccesses++; 8572292SN/A } else { 8582292SN/A intAluAccesses++; 8592292SN/A } 8602292SN/A if (idx > FUPool::NoFreeFU) { 8612292SN/A op_latency = fuPool->getOpLatency(op_class); 8622292SN/A } 8632292SN/A } 8642292SN/A 8652329SN/A // If we have an instruction that doesn't require a FU, or a 8662329SN/A // valid FU, then schedule for execution. 8672329SN/A if (idx != FUPool::NoFreeFU) { 8682329SN/A if (op_latency == Cycles(1)) { 8692329SN/A i2e_info->size++; 8702329SN/A instsToExecute.push_back(issuing_inst); 8712329SN/A 8722329SN/A // Add the FU onto the list of FU's to be freed next 8732329SN/A // cycle if we used one. 8742329SN/A if (idx >= 0) 8752329SN/A fuPool->freeUnitNextCycle(idx); 8762329SN/A } else { 8772329SN/A bool pipelined = fuPool->isPipelined(op_class); 8782329SN/A // Generate completion event for the FU 8792329SN/A ++wbOutstanding; 8802329SN/A FUCompletion *execution = new FUCompletion(issuing_inst, 8812329SN/A idx, this); 8822329SN/A 8832329SN/A cpu->schedule(execution, 8842329SN/A cpu->clockEdge(Cycles(op_latency - 1))); 8852329SN/A 8862329SN/A if (!pipelined) { 8872329SN/A // If FU isn't pipelined, then it must be freed 8882329SN/A // upon the execution completing. 8892329SN/A execution->setFreeFU(); 8902329SN/A } else { 8912329SN/A // Add the FU onto the list of FU's to be freed next cycle. 8922329SN/A fuPool->freeUnitNextCycle(idx); 8932329SN/A } 8942329SN/A } 895 896 DPRINTF(IQ, "Thread %i: Issuing instruction PC %s " 897 "[sn:%lli]\n", 898 tid, issuing_inst->pcState(), 899 issuing_inst->seqNum); 900 901 readyInsts[op_class].pop(); 902 903 if (!readyInsts[op_class].empty()) { 904 moveToYoungerInst(order_it); 905 } else { 906 readyIt[op_class] = listOrder.end(); 907 queueOnList[op_class] = false; 908 } 909 910 issuing_inst->setIssued(); 911 ++total_issued; 912 913#if TRACING_ON 914 issuing_inst->issueTick = curTick() - issuing_inst->fetchTick; 915#endif 916 917 if (!issuing_inst->isMemRef()) { 918 // Memory instructions can not be freed from the IQ until they 919 // complete. 920 ++freeEntries; 921 count[tid]--; 922 issuing_inst->clearInIQ(); 923 } else { 924 memDepUnit[tid].issue(issuing_inst); 925 } 926 927 listOrder.erase(order_it++); 928 statIssuedInstType[tid][op_class]++; 929 } else { 930 statFuBusy[op_class]++; 931 fuBusy[tid]++; 932 ++order_it; 933 } 934 } 935 936 numIssuedDist.sample(total_issued); 937 iqInstsIssued+= total_issued; 938 939 // If we issued any instructions, tell the CPU we had activity. 940 // @todo If the way deferred memory instructions are handeled due to 941 // translation changes then the deferredMemInsts condition should be removed 942 // from the code below. 943 if (total_issued || !retryMemInsts.empty() || !deferredMemInsts.empty()) { 944 cpu->activityThisCycle(); 945 } else { 946 DPRINTF(IQ, "Not able to schedule any instructions.\n"); 947 } 948} 949 950template <class Impl> 951void 952InstructionQueue<Impl>::scheduleNonSpec(const InstSeqNum &inst) 953{ 954 DPRINTF(IQ, "Marking nonspeculative instruction [sn:%lli] as ready " 955 "to execute.\n", inst); 956 957 NonSpecMapIt inst_it = nonSpecInsts.find(inst); 958 959 assert(inst_it != nonSpecInsts.end()); 960 961 ThreadID tid = (*inst_it).second->threadNumber; 962 963 (*inst_it).second->setAtCommit(); 964 965 (*inst_it).second->setCanIssue(); 966 967 if (!(*inst_it).second->isMemRef()) { 968 addIfReady((*inst_it).second); 969 } else { 970 memDepUnit[tid].nonSpecInstReady((*inst_it).second); 971 } 972 973 (*inst_it).second = NULL; 974 975 nonSpecInsts.erase(inst_it); 976} 977 978template <class Impl> 979void 980InstructionQueue<Impl>::commit(const InstSeqNum &inst, ThreadID tid) 981{ 982 DPRINTF(IQ, "[tid:%i]: Committing instructions older than [sn:%i]\n", 983 tid,inst); 984 985 ListIt iq_it = instList[tid].begin(); 986 987 while (iq_it != instList[tid].end() && 988 (*iq_it)->seqNum <= inst) { 989 ++iq_it; 990 instList[tid].pop_front(); 991 } 992 993 assert(freeEntries == (numEntries - countInsts())); 994} 995 996template <class Impl> 997int 998InstructionQueue<Impl>::wakeDependents(const DynInstPtr &completed_inst) 999{ 1000 int dependents = 0; 1001 1002 // The instruction queue here takes care of both floating and int ops 1003 if (completed_inst->isFloating()) { 1004 fpInstQueueWakeupAccesses++; 1005 } else if (completed_inst->isVector()) { 1006 vecInstQueueWakeupAccesses++; 1007 } else { 1008 intInstQueueWakeupAccesses++; 1009 } 1010 1011 DPRINTF(IQ, "Waking dependents of completed instruction.\n"); 1012 1013 assert(!completed_inst->isSquashed()); 1014 1015 // Tell the memory dependence unit to wake any dependents on this 1016 // instruction if it is a memory instruction. Also complete the memory 1017 // instruction at this point since we know it executed without issues. 1018 // @todo: Might want to rename "completeMemInst" to something that 1019 // indicates that it won't need to be replayed, and call this 1020 // earlier. Might not be a big deal. 1021 if (completed_inst->isMemRef()) { 1022 memDepUnit[completed_inst->threadNumber].wakeDependents(completed_inst); 1023 completeMemInst(completed_inst); 1024 } else if (completed_inst->isMemBarrier() || 1025 completed_inst->isWriteBarrier()) { 1026 memDepUnit[completed_inst->threadNumber].completeBarrier(completed_inst); 1027 } 1028 1029 for (int dest_reg_idx = 0; 1030 dest_reg_idx < completed_inst->numDestRegs(); 1031 dest_reg_idx++) 1032 { 1033 PhysRegIdPtr dest_reg = 1034 completed_inst->renamedDestRegIdx(dest_reg_idx); 1035 1036 // Special case of uniq or control registers. They are not 1037 // handled by the IQ and thus have no dependency graph entry. 1038 if (dest_reg->isFixedMapping()) { 1039 DPRINTF(IQ, "Reg %d [%s] is part of a fix mapping, skipping\n", 1040 dest_reg->index(), dest_reg->className()); 1041 continue; 1042 } 1043 1044 DPRINTF(IQ, "Waking any dependents on register %i (%s).\n", 1045 dest_reg->index(), 1046 dest_reg->className()); 1047 1048 //Go through the dependency chain, marking the registers as 1049 //ready within the waiting instructions. 1050 DynInstPtr dep_inst = dependGraph.pop(dest_reg->flatIndex()); 1051 1052 while (dep_inst) { 1053 DPRINTF(IQ, "Waking up a dependent instruction, [sn:%lli] " 1054 "PC %s.\n", dep_inst->seqNum, dep_inst->pcState()); 1055 1056 // Might want to give more information to the instruction 1057 // so that it knows which of its source registers is 1058 // ready. However that would mean that the dependency 1059 // graph entries would need to hold the src_reg_idx. 1060 dep_inst->markSrcRegReady(); 1061 1062 addIfReady(dep_inst); 1063 1064 dep_inst = dependGraph.pop(dest_reg->flatIndex()); 1065 1066 ++dependents; 1067 } 1068 1069 // Reset the head node now that all of its dependents have 1070 // been woken up. 1071 assert(dependGraph.empty(dest_reg->flatIndex())); 1072 dependGraph.clearInst(dest_reg->flatIndex()); 1073 1074 // Mark the scoreboard as having that register ready. 1075 regScoreboard[dest_reg->flatIndex()] = true; 1076 } 1077 return dependents; 1078} 1079 1080template <class Impl> 1081void 1082InstructionQueue<Impl>::addReadyMemInst(const DynInstPtr &ready_inst) 1083{ 1084 OpClass op_class = ready_inst->opClass(); 1085 1086 readyInsts[op_class].push(ready_inst); 1087 1088 // Will need to reorder the list if either a queue is not on the list, 1089 // or it has an older instruction than last time. 1090 if (!queueOnList[op_class]) { 1091 addToOrderList(op_class); 1092 } else if (readyInsts[op_class].top()->seqNum < 1093 (*readyIt[op_class]).oldestInst) { 1094 listOrder.erase(readyIt[op_class]); 1095 addToOrderList(op_class); 1096 } 1097 1098 DPRINTF(IQ, "Instruction is ready to issue, putting it onto " 1099 "the ready list, PC %s opclass:%i [sn:%lli].\n", 1100 ready_inst->pcState(), op_class, ready_inst->seqNum); 1101} 1102 1103template <class Impl> 1104void 1105InstructionQueue<Impl>::rescheduleMemInst(const DynInstPtr &resched_inst) 1106{ 1107 DPRINTF(IQ, "Rescheduling mem inst [sn:%lli]\n", resched_inst->seqNum); 1108 1109 // Reset DTB translation state 1110 resched_inst->translationStarted(false); 1111 resched_inst->translationCompleted(false); 1112 1113 resched_inst->clearCanIssue(); 1114 memDepUnit[resched_inst->threadNumber].reschedule(resched_inst); 1115} 1116 1117template <class Impl> 1118void 1119InstructionQueue<Impl>::replayMemInst(const DynInstPtr &replay_inst) 1120{ 1121 memDepUnit[replay_inst->threadNumber].replay(); 1122} 1123 1124template <class Impl> 1125void 1126InstructionQueue<Impl>::completeMemInst(const DynInstPtr &completed_inst) 1127{ 1128 ThreadID tid = completed_inst->threadNumber; 1129 1130 DPRINTF(IQ, "Completing mem instruction PC: %s [sn:%lli]\n", 1131 completed_inst->pcState(), completed_inst->seqNum); 1132 1133 ++freeEntries; 1134 1135 completed_inst->memOpDone(true); 1136 1137 memDepUnit[tid].completed(completed_inst); 1138 count[tid]--; 1139} 1140 1141template <class Impl> 1142void 1143InstructionQueue<Impl>::deferMemInst(const DynInstPtr &deferred_inst) 1144{ 1145 deferredMemInsts.push_back(deferred_inst); 1146} 1147 1148template <class Impl> 1149void 1150InstructionQueue<Impl>::blockMemInst(const DynInstPtr &blocked_inst) 1151{ 1152 blocked_inst->translationStarted(false); 1153 blocked_inst->translationCompleted(false); 1154 1155 blocked_inst->clearIssued(); 1156 blocked_inst->clearCanIssue(); 1157 blockedMemInsts.push_back(blocked_inst); 1158} 1159 1160template <class Impl> 1161void 1162InstructionQueue<Impl>::cacheUnblocked() 1163{ 1164 retryMemInsts.splice(retryMemInsts.end(), blockedMemInsts); 1165 // Get the CPU ticking again 1166 cpu->wakeCPU(); 1167} 1168 1169template <class Impl> 1170typename Impl::DynInstPtr 1171InstructionQueue<Impl>::getDeferredMemInstToExecute() 1172{ 1173 for (ListIt it = deferredMemInsts.begin(); it != deferredMemInsts.end(); 1174 ++it) { 1175 if ((*it)->translationCompleted() || (*it)->isSquashed()) { 1176 DynInstPtr mem_inst = std::move(*it); 1177 deferredMemInsts.erase(it); 1178 return mem_inst; 1179 } 1180 } 1181 return nullptr; 1182} 1183 1184template <class Impl> 1185typename Impl::DynInstPtr 1186InstructionQueue<Impl>::getBlockedMemInstToExecute() 1187{ 1188 if (retryMemInsts.empty()) { 1189 return nullptr; 1190 } else { 1191 DynInstPtr mem_inst = std::move(retryMemInsts.front()); 1192 retryMemInsts.pop_front(); 1193 return mem_inst; 1194 } 1195} 1196 1197template <class Impl> 1198void 1199InstructionQueue<Impl>::violation(const DynInstPtr &store, 1200 const DynInstPtr &faulting_load) 1201{ 1202 intInstQueueWrites++; 1203 memDepUnit[store->threadNumber].violation(store, faulting_load); 1204} 1205 1206template <class Impl> 1207void 1208InstructionQueue<Impl>::squash(ThreadID tid) 1209{ 1210 DPRINTF(IQ, "[tid:%i]: Starting to squash instructions in " 1211 "the IQ.\n", tid); 1212 1213 // Read instruction sequence number of last instruction out of the 1214 // time buffer. 1215 squashedSeqNum[tid] = fromCommit->commitInfo[tid].doneSeqNum; 1216 1217 doSquash(tid); 1218 1219 // Also tell the memory dependence unit to squash. 1220 memDepUnit[tid].squash(squashedSeqNum[tid], tid); 1221} 1222 1223template <class Impl> 1224void 1225InstructionQueue<Impl>::doSquash(ThreadID tid) 1226{ 1227 // Start at the tail. 1228 ListIt squash_it = instList[tid].end(); 1229 --squash_it; 1230 1231 DPRINTF(IQ, "[tid:%i]: Squashing until sequence number %i!\n", 1232 tid, squashedSeqNum[tid]); 1233 1234 // Squash any instructions younger than the squashed sequence number 1235 // given. 1236 while (squash_it != instList[tid].end() && 1237 (*squash_it)->seqNum > squashedSeqNum[tid]) { 1238 1239 DynInstPtr squashed_inst = (*squash_it); 1240 if (squashed_inst->isFloating()) { 1241 fpInstQueueWrites++; 1242 } else if (squashed_inst->isVector()) { 1243 vecInstQueueWrites++; 1244 } else { 1245 intInstQueueWrites++; 1246 } 1247 1248 // Only handle the instruction if it actually is in the IQ and 1249 // hasn't already been squashed in the IQ. 1250 if (squashed_inst->threadNumber != tid || 1251 squashed_inst->isSquashedInIQ()) { 1252 --squash_it; 1253 continue; 1254 } 1255 1256 if (!squashed_inst->isIssued() || 1257 (squashed_inst->isMemRef() && 1258 !squashed_inst->memOpDone())) { 1259 1260 DPRINTF(IQ, "[tid:%i]: Instruction [sn:%lli] PC %s squashed.\n", 1261 tid, squashed_inst->seqNum, squashed_inst->pcState()); 1262 1263 bool is_acq_rel = squashed_inst->isMemBarrier() && 1264 (squashed_inst->isLoad() || 1265 (squashed_inst->isStore() && 1266 !squashed_inst->isStoreConditional())); 1267 1268 // Remove the instruction from the dependency list. 1269 if (is_acq_rel || 1270 (!squashed_inst->isNonSpeculative() && 1271 !squashed_inst->isStoreConditional() && 1272 !squashed_inst->isMemBarrier() && 1273 !squashed_inst->isWriteBarrier())) { 1274 1275 for (int src_reg_idx = 0; 1276 src_reg_idx < squashed_inst->numSrcRegs(); 1277 src_reg_idx++) 1278 { 1279 PhysRegIdPtr src_reg = 1280 squashed_inst->renamedSrcRegIdx(src_reg_idx); 1281 1282 // Only remove it from the dependency graph if it 1283 // was placed there in the first place. 1284 1285 // Instead of doing a linked list traversal, we 1286 // can just remove these squashed instructions 1287 // either at issue time, or when the register is 1288 // overwritten. The only downside to this is it 1289 // leaves more room for error. 1290 1291 if (!squashed_inst->isReadySrcRegIdx(src_reg_idx) && 1292 !src_reg->isFixedMapping()) { 1293 dependGraph.remove(src_reg->flatIndex(), 1294 squashed_inst); 1295 } 1296 1297 1298 ++iqSquashedOperandsExamined; 1299 } 1300 } else if (!squashed_inst->isStoreConditional() || 1301 !squashed_inst->isCompleted()) { 1302 NonSpecMapIt ns_inst_it = 1303 nonSpecInsts.find(squashed_inst->seqNum); 1304 1305 // we remove non-speculative instructions from 1306 // nonSpecInsts already when they are ready, and so we 1307 // cannot always expect to find them 1308 if (ns_inst_it == nonSpecInsts.end()) { 1309 // loads that became ready but stalled on a 1310 // blocked cache are alreayd removed from 1311 // nonSpecInsts, and have not faulted 1312 assert(squashed_inst->getFault() != NoFault || 1313 squashed_inst->isMemRef()); 1314 } else { 1315 1316 (*ns_inst_it).second = NULL; 1317 1318 nonSpecInsts.erase(ns_inst_it); 1319 1320 ++iqSquashedNonSpecRemoved; 1321 } 1322 } 1323 1324 // Might want to also clear out the head of the dependency graph. 1325 1326 // Mark it as squashed within the IQ. 1327 squashed_inst->setSquashedInIQ(); 1328 1329 // @todo: Remove this hack where several statuses are set so the 1330 // inst will flow through the rest of the pipeline. 1331 squashed_inst->setIssued(); 1332 squashed_inst->setCanCommit(); 1333 squashed_inst->clearInIQ(); 1334 1335 //Update Thread IQ Count 1336 count[squashed_inst->threadNumber]--; 1337 1338 ++freeEntries; 1339 } 1340 1341 // IQ clears out the heads of the dependency graph only when 1342 // instructions reach writeback stage. If an instruction is squashed 1343 // before writeback stage, its head of dependency graph would not be 1344 // cleared out; it holds the instruction's DynInstPtr. This prevents 1345 // freeing the squashed instruction's DynInst. 1346 // Thus, we need to manually clear out the squashed instructions' heads 1347 // of dependency graph. 1348 for (int dest_reg_idx = 0; 1349 dest_reg_idx < squashed_inst->numDestRegs(); 1350 dest_reg_idx++) 1351 { 1352 PhysRegIdPtr dest_reg = 1353 squashed_inst->renamedDestRegIdx(dest_reg_idx); 1354 if (dest_reg->isFixedMapping()){ 1355 continue; 1356 } 1357 assert(dependGraph.empty(dest_reg->flatIndex())); 1358 dependGraph.clearInst(dest_reg->flatIndex()); 1359 } 1360 instList[tid].erase(squash_it--); 1361 ++iqSquashedInstsExamined; 1362 } 1363} 1364 1365template <class Impl> 1366bool 1367InstructionQueue<Impl>::addToDependents(const DynInstPtr &new_inst) 1368{ 1369 // Loop through the instruction's source registers, adding 1370 // them to the dependency list if they are not ready. 1371 int8_t total_src_regs = new_inst->numSrcRegs(); 1372 bool return_val = false; 1373 1374 for (int src_reg_idx = 0; 1375 src_reg_idx < total_src_regs; 1376 src_reg_idx++) 1377 { 1378 // Only add it to the dependency graph if it's not ready. 1379 if (!new_inst->isReadySrcRegIdx(src_reg_idx)) { 1380 PhysRegIdPtr src_reg = new_inst->renamedSrcRegIdx(src_reg_idx); 1381 1382 // Check the IQ's scoreboard to make sure the register 1383 // hasn't become ready while the instruction was in flight 1384 // between stages. Only if it really isn't ready should 1385 // it be added to the dependency graph. 1386 if (src_reg->isFixedMapping()) { 1387 continue; 1388 } else if (!regScoreboard[src_reg->flatIndex()]) { 1389 DPRINTF(IQ, "Instruction PC %s has src reg %i (%s) that " 1390 "is being added to the dependency chain.\n", 1391 new_inst->pcState(), src_reg->index(), 1392 src_reg->className()); 1393 1394 dependGraph.insert(src_reg->flatIndex(), new_inst); 1395 1396 // Change the return value to indicate that something 1397 // was added to the dependency graph. 1398 return_val = true; 1399 } else { 1400 DPRINTF(IQ, "Instruction PC %s has src reg %i (%s) that " 1401 "became ready before it reached the IQ.\n", 1402 new_inst->pcState(), src_reg->index(), 1403 src_reg->className()); 1404 // Mark a register ready within the instruction. 1405 new_inst->markSrcRegReady(src_reg_idx); 1406 } 1407 } 1408 } 1409 1410 return return_val; 1411} 1412 1413template <class Impl> 1414void 1415InstructionQueue<Impl>::addToProducers(const DynInstPtr &new_inst) 1416{ 1417 // Nothing really needs to be marked when an instruction becomes 1418 // the producer of a register's value, but for convenience a ptr 1419 // to the producing instruction will be placed in the head node of 1420 // the dependency links. 1421 int8_t total_dest_regs = new_inst->numDestRegs(); 1422 1423 for (int dest_reg_idx = 0; 1424 dest_reg_idx < total_dest_regs; 1425 dest_reg_idx++) 1426 { 1427 PhysRegIdPtr dest_reg = new_inst->renamedDestRegIdx(dest_reg_idx); 1428 1429 // Some registers have fixed mapping, and there is no need to track 1430 // dependencies as these instructions must be executed at commit. 1431 if (dest_reg->isFixedMapping()) { 1432 continue; 1433 } 1434 1435 if (!dependGraph.empty(dest_reg->flatIndex())) { 1436 dependGraph.dump(); 1437 panic("Dependency graph %i (%s) (flat: %i) not empty!", 1438 dest_reg->index(), dest_reg->className(), 1439 dest_reg->flatIndex()); 1440 } 1441 1442 dependGraph.setInst(dest_reg->flatIndex(), new_inst); 1443 1444 // Mark the scoreboard to say it's not yet ready. 1445 regScoreboard[dest_reg->flatIndex()] = false; 1446 } 1447} 1448 1449template <class Impl> 1450void 1451InstructionQueue<Impl>::addIfReady(const DynInstPtr &inst) 1452{ 1453 // If the instruction now has all of its source registers 1454 // available, then add it to the list of ready instructions. 1455 if (inst->readyToIssue()) { 1456 1457 //Add the instruction to the proper ready list. 1458 if (inst->isMemRef()) { 1459 1460 DPRINTF(IQ, "Checking if memory instruction can issue.\n"); 1461 1462 // Message to the mem dependence unit that this instruction has 1463 // its registers ready. 1464 memDepUnit[inst->threadNumber].regsReady(inst); 1465 1466 return; 1467 } 1468 1469 OpClass op_class = inst->opClass(); 1470 1471 DPRINTF(IQ, "Instruction is ready to issue, putting it onto " 1472 "the ready list, PC %s opclass:%i [sn:%lli].\n", 1473 inst->pcState(), op_class, inst->seqNum); 1474 1475 readyInsts[op_class].push(inst); 1476 1477 // Will need to reorder the list if either a queue is not on the list, 1478 // or it has an older instruction than last time. 1479 if (!queueOnList[op_class]) { 1480 addToOrderList(op_class); 1481 } else if (readyInsts[op_class].top()->seqNum < 1482 (*readyIt[op_class]).oldestInst) { 1483 listOrder.erase(readyIt[op_class]); 1484 addToOrderList(op_class); 1485 } 1486 } 1487} 1488 1489template <class Impl> 1490int 1491InstructionQueue<Impl>::countInsts() 1492{ 1493#if 0 1494 //ksewell:This works but definitely could use a cleaner write 1495 //with a more intuitive way of counting. Right now it's 1496 //just brute force .... 1497 // Change the #if if you want to use this method. 1498 int total_insts = 0; 1499 1500 for (ThreadID tid = 0; tid < numThreads; ++tid) { 1501 ListIt count_it = instList[tid].begin(); 1502 1503 while (count_it != instList[tid].end()) { 1504 if (!(*count_it)->isSquashed() && !(*count_it)->isSquashedInIQ()) { 1505 if (!(*count_it)->isIssued()) { 1506 ++total_insts; 1507 } else if ((*count_it)->isMemRef() && 1508 !(*count_it)->memOpDone) { 1509 // Loads that have not been marked as executed still count 1510 // towards the total instructions. 1511 ++total_insts; 1512 } 1513 } 1514 1515 ++count_it; 1516 } 1517 } 1518 1519 return total_insts; 1520#else 1521 return numEntries - freeEntries; 1522#endif 1523} 1524 1525template <class Impl> 1526void 1527InstructionQueue<Impl>::dumpLists() 1528{ 1529 for (int i = 0; i < Num_OpClasses; ++i) { 1530 cprintf("Ready list %i size: %i\n", i, readyInsts[i].size()); 1531 1532 cprintf("\n"); 1533 } 1534 1535 cprintf("Non speculative list size: %i\n", nonSpecInsts.size()); 1536 1537 NonSpecMapIt non_spec_it = nonSpecInsts.begin(); 1538 NonSpecMapIt non_spec_end_it = nonSpecInsts.end(); 1539 1540 cprintf("Non speculative list: "); 1541 1542 while (non_spec_it != non_spec_end_it) { 1543 cprintf("%s [sn:%lli]", (*non_spec_it).second->pcState(), 1544 (*non_spec_it).second->seqNum); 1545 ++non_spec_it; 1546 } 1547 1548 cprintf("\n"); 1549 1550 ListOrderIt list_order_it = listOrder.begin(); 1551 ListOrderIt list_order_end_it = listOrder.end(); 1552 int i = 1; 1553 1554 cprintf("List order: "); 1555 1556 while (list_order_it != list_order_end_it) { 1557 cprintf("%i OpClass:%i [sn:%lli] ", i, (*list_order_it).queueType, 1558 (*list_order_it).oldestInst); 1559 1560 ++list_order_it; 1561 ++i; 1562 } 1563 1564 cprintf("\n"); 1565} 1566 1567 1568template <class Impl> 1569void 1570InstructionQueue<Impl>::dumpInsts() 1571{ 1572 for (ThreadID tid = 0; tid < numThreads; ++tid) { 1573 int num = 0; 1574 int valid_num = 0; 1575 ListIt inst_list_it = instList[tid].begin(); 1576 1577 while (inst_list_it != instList[tid].end()) { 1578 cprintf("Instruction:%i\n", num); 1579 if (!(*inst_list_it)->isSquashed()) { 1580 if (!(*inst_list_it)->isIssued()) { 1581 ++valid_num; 1582 cprintf("Count:%i\n", valid_num); 1583 } else if ((*inst_list_it)->isMemRef() && 1584 !(*inst_list_it)->memOpDone()) { 1585 // Loads that have not been marked as executed 1586 // still count towards the total instructions. 1587 ++valid_num; 1588 cprintf("Count:%i\n", valid_num); 1589 } 1590 } 1591 1592 cprintf("PC: %s\n[sn:%lli]\n[tid:%i]\n" 1593 "Issued:%i\nSquashed:%i\n", 1594 (*inst_list_it)->pcState(), 1595 (*inst_list_it)->seqNum, 1596 (*inst_list_it)->threadNumber, 1597 (*inst_list_it)->isIssued(), 1598 (*inst_list_it)->isSquashed()); 1599 1600 if ((*inst_list_it)->isMemRef()) { 1601 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone()); 1602 } 1603 1604 cprintf("\n"); 1605 1606 inst_list_it++; 1607 ++num; 1608 } 1609 } 1610 1611 cprintf("Insts to Execute list:\n"); 1612 1613 int num = 0; 1614 int valid_num = 0; 1615 ListIt inst_list_it = instsToExecute.begin(); 1616 1617 while (inst_list_it != instsToExecute.end()) 1618 { 1619 cprintf("Instruction:%i\n", 1620 num); 1621 if (!(*inst_list_it)->isSquashed()) { 1622 if (!(*inst_list_it)->isIssued()) { 1623 ++valid_num; 1624 cprintf("Count:%i\n", valid_num); 1625 } else if ((*inst_list_it)->isMemRef() && 1626 !(*inst_list_it)->memOpDone()) { 1627 // Loads that have not been marked as executed 1628 // still count towards the total instructions. 1629 ++valid_num; 1630 cprintf("Count:%i\n", valid_num); 1631 } 1632 } 1633 1634 cprintf("PC: %s\n[sn:%lli]\n[tid:%i]\n" 1635 "Issued:%i\nSquashed:%i\n", 1636 (*inst_list_it)->pcState(), 1637 (*inst_list_it)->seqNum, 1638 (*inst_list_it)->threadNumber, 1639 (*inst_list_it)->isIssued(), 1640 (*inst_list_it)->isSquashed()); 1641 1642 if ((*inst_list_it)->isMemRef()) { 1643 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone()); 1644 } 1645 1646 cprintf("\n"); 1647 1648 inst_list_it++; 1649 ++num; 1650 } 1651} 1652 1653#endif//__CPU_O3_INST_QUEUE_IMPL_HH__ 1654