inst_queue_impl.hh revision 13561:523608bb180c
16657Snate@binkert.org/* 26657Snate@binkert.org * Copyright (c) 2011-2014 ARM Limited 36657Snate@binkert.org * Copyright (c) 2013 Advanced Micro Devices, Inc. 46657Snate@binkert.org * All rights reserved. 56657Snate@binkert.org * 66657Snate@binkert.org * The license below extends only to copyright in the software and shall 76657Snate@binkert.org * not be construed as granting a license to any other intellectual 86657Snate@binkert.org * property including but not limited to intellectual property relating 96657Snate@binkert.org * to a hardware implementation of the functionality of the software 106657Snate@binkert.org * licensed hereunder. You may use the software subject to the license 116657Snate@binkert.org * terms below provided that you ensure that this notice is replicated 126657Snate@binkert.org * unmodified and in its entirety in all distributions of the software, 136657Snate@binkert.org * modified or unmodified, in source code or in binary form. 146657Snate@binkert.org * 156657Snate@binkert.org * Copyright (c) 2004-2006 The Regents of The University of Michigan 166657Snate@binkert.org * All rights reserved. 176657Snate@binkert.org * 186657Snate@binkert.org * Redistribution and use in source and binary forms, with or without 196657Snate@binkert.org * modification, are permitted provided that the following conditions are 206657Snate@binkert.org * met: redistributions of source code must retain the above copyright 216657Snate@binkert.org * notice, this list of conditions and the following disclaimer; 226657Snate@binkert.org * redistributions in binary form must reproduce the above copyright 236657Snate@binkert.org * notice, this list of conditions and the following disclaimer in the 246657Snate@binkert.org * documentation and/or other materials provided with the distribution; 256657Snate@binkert.org * neither the name of the copyright holders nor the names of its 266657Snate@binkert.org * contributors may be used to endorse or promote products derived from 276657Snate@binkert.org * this software without specific prior written permission. 286999Snate@binkert.org * 296657Snate@binkert.org * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 306657Snate@binkert.org * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 316657Snate@binkert.org * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 329302Snilay@cs.wisc.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 336657Snate@binkert.org * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 346657Snate@binkert.org * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 356657Snate@binkert.org * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 366657Snate@binkert.org * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 376657Snate@binkert.org * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 386657Snate@binkert.org * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 396657Snate@binkert.org * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 406657Snate@binkert.org * 416657Snate@binkert.org * Authors: Kevin Lim 426657Snate@binkert.org * Korey Sewell 436657Snate@binkert.org */ 446657Snate@binkert.org 456657Snate@binkert.org#ifndef __CPU_O3_INST_QUEUE_IMPL_HH__ 466657Snate@binkert.org#define __CPU_O3_INST_QUEUE_IMPL_HH__ 476657Snate@binkert.org 486657Snate@binkert.org#include <limits> 496657Snate@binkert.org#include <vector> 506882SBrad.Beckmann@amd.com 516657Snate@binkert.org#include "base/logging.hh" 526657Snate@binkert.org#include "cpu/o3/fu_pool.hh" 536657Snate@binkert.org#include "cpu/o3/inst_queue.hh" 546657Snate@binkert.org#include "debug/IQ.hh" 556657Snate@binkert.org#include "enums/OpClass.hh" 566657Snate@binkert.org#include "params/DerivO3CPU.hh" 576657Snate@binkert.org#include "sim/core.hh" 586657Snate@binkert.org 596657Snate@binkert.org// clang complains about std::set being overloaded with Packet::set if 606657Snate@binkert.org// we open up the entire namespace std 616657Snate@binkert.orgusing std::list; 626657Snate@binkert.org 636657Snate@binkert.orgtemplate <class Impl> 6410895Snilay@cs.wisc.eduInstructionQueue<Impl>::FUCompletion::FUCompletion(const DynInstPtr &_inst, 656657Snate@binkert.org int fu_idx, InstructionQueue<Impl> *iq_ptr) 666657Snate@binkert.org : Event(Stat_Event_Pri, AutoDelete), 676657Snate@binkert.org inst(_inst), fuIdx(fu_idx), iqPtr(iq_ptr), freeFU(false) 6810228Snilay@cs.wisc.edu{ 696657Snate@binkert.org} 706657Snate@binkert.org 7110228Snilay@cs.wisc.edutemplate <class Impl> 726657Snate@binkert.orgvoid 736657Snate@binkert.orgInstructionQueue<Impl>::FUCompletion::process() 746657Snate@binkert.org{ 756657Snate@binkert.org iqPtr->processFUCompletion(inst, freeFU ? fuIdx : -1); 766657Snate@binkert.org inst = NULL; 776657Snate@binkert.org} 786657Snate@binkert.org 796657Snate@binkert.org 806657Snate@binkert.orgtemplate <class Impl> 816657Snate@binkert.orgconst char * 826657Snate@binkert.orgInstructionQueue<Impl>::FUCompletion::description() const 836657Snate@binkert.org{ 846657Snate@binkert.org return "Functional unit completion"; 856657Snate@binkert.org} 866657Snate@binkert.org 876657Snate@binkert.orgtemplate <class Impl> 888086SBrad.Beckmann@amd.comInstructionQueue<Impl>::InstructionQueue(O3CPU *cpu_ptr, IEW *iew_ptr, 898086SBrad.Beckmann@amd.com DerivO3CPUParams *params) 908086SBrad.Beckmann@amd.com : cpu(cpu_ptr), 916657Snate@binkert.org iewStage(iew_ptr), 926657Snate@binkert.org fuPool(params->fuPool), 936657Snate@binkert.org iqPolicy(params->smtIQPolicy), 946657Snate@binkert.org numEntries(params->numIQEntries), 956657Snate@binkert.org totalWidth(params->issueWidth), 966657Snate@binkert.org commitToIEWDelay(params->commitToIEWDelay) 976657Snate@binkert.org{ 9810895Snilay@cs.wisc.edu assert(fuPool); 996657Snate@binkert.org 1006657Snate@binkert.org numThreads = params->numThreads; 1016657Snate@binkert.org 1026657Snate@binkert.org // Set the number of total physical registers 1036657Snate@binkert.org // As the vector registers have two addressing modes, they are added twice 1046657Snate@binkert.org numPhysRegs = params->numPhysIntRegs + params->numPhysFloatRegs + 1056657Snate@binkert.org params->numPhysVecRegs + 1066657Snate@binkert.org params->numPhysVecRegs * TheISA::NumVecElemPerVecReg + 1076657Snate@binkert.org params->numPhysCCRegs; 1086657Snate@binkert.org 1096657Snate@binkert.org //Create an entry for each physical register within the 1106657Snate@binkert.org //dependency graph. 1116657Snate@binkert.org dependGraph.resize(numPhysRegs); 1126657Snate@binkert.org 1136657Snate@binkert.org // Resize the register scoreboard. 1146657Snate@binkert.org regScoreboard.resize(numPhysRegs); 1156657Snate@binkert.org 1166657Snate@binkert.org //Initialize Mem Dependence Units 1176657Snate@binkert.org for (ThreadID tid = 0; tid < Impl::MaxThreads; tid++) { 1186657Snate@binkert.org memDepUnit[tid].init(params, tid); 1196657Snate@binkert.org memDepUnit[tid].setIQ(this); 1206657Snate@binkert.org } 1216657Snate@binkert.org 1226657Snate@binkert.org resetState(); 1236657Snate@binkert.org 1246657Snate@binkert.org //Figure out resource sharing policy 1259298Snilay@cs.wisc.edu if (iqPolicy == SMTQueuePolicy::Dynamic) { 1266657Snate@binkert.org //Set Max Entries to Total ROB Capacity 1276657Snate@binkert.org for (ThreadID tid = 0; tid < numThreads; tid++) { 1286657Snate@binkert.org maxEntries[tid] = numEntries; 1296657Snate@binkert.org } 1306657Snate@binkert.org 1316657Snate@binkert.org } else if (iqPolicy == SMTQueuePolicy::Partitioned) { 1329302Snilay@cs.wisc.edu //@todo:make work if part_amt doesnt divide evenly. 1339302Snilay@cs.wisc.edu int part_amt = numEntries / numThreads; 1349302Snilay@cs.wisc.edu 1356657Snate@binkert.org //Divide ROB up evenly 1366657Snate@binkert.org for (ThreadID tid = 0; tid < numThreads; tid++) { 1376657Snate@binkert.org maxEntries[tid] = part_amt; 1386657Snate@binkert.org } 1396657Snate@binkert.org 1406657Snate@binkert.org DPRINTF(IQ, "IQ sharing policy set to Partitioned:" 1416657Snate@binkert.org "%i entries per thread.\n",part_amt); 1426657Snate@binkert.org } else if (iqPolicy == SMTQueuePolicy::Threshold) { 1436882SBrad.Beckmann@amd.com double threshold = (double)params->smtIQThreshold / 100; 1446882SBrad.Beckmann@amd.com 1456882SBrad.Beckmann@amd.com int thresholdIQ = (int)((double)threshold * numEntries); 1468086SBrad.Beckmann@amd.com 1478086SBrad.Beckmann@amd.com //Divide up by threshold amount 1488086SBrad.Beckmann@amd.com for (ThreadID tid = 0; tid < numThreads; tid++) { 14910307Snilay@cs.wisc.edu maxEntries[tid] = thresholdIQ; 15010307Snilay@cs.wisc.edu } 1516657Snate@binkert.org 1526657Snate@binkert.org DPRINTF(IQ, "IQ sharing policy set to Threshold:" 1536657Snate@binkert.org "%i entries per thread.\n",thresholdIQ); 15410307Snilay@cs.wisc.edu } 1559298Snilay@cs.wisc.edu for (ThreadID tid = numThreads; tid < Impl::MaxThreads; tid++) { 1569298Snilay@cs.wisc.edu maxEntries[tid] = 0; 1579298Snilay@cs.wisc.edu } 1586657Snate@binkert.org} 1596657Snate@binkert.org 1606657Snate@binkert.orgtemplate <class Impl> 1616657Snate@binkert.orgInstructionQueue<Impl>::~InstructionQueue() 1626657Snate@binkert.org{ 1636657Snate@binkert.org dependGraph.reset(); 1646657Snate@binkert.org#ifdef DEBUG 1656657Snate@binkert.org cprintf("Nodes traversed: %i, removed: %i\n", 1666657Snate@binkert.org dependGraph.nodesTraversed, dependGraph.nodesRemoved); 1676657Snate@binkert.org#endif 1686657Snate@binkert.org} 1699219Spower.jg@gmail.com 1706657Snate@binkert.orgtemplate <class Impl> 1716657Snate@binkert.orgstd::string 1726657Snate@binkert.orgInstructionQueue<Impl>::name() const 1736657Snate@binkert.org{ 1746657Snate@binkert.org return cpu->name() + ".iq"; 1756657Snate@binkert.org} 1766657Snate@binkert.org 1776657Snate@binkert.orgtemplate <class Impl> 1786657Snate@binkert.orgvoid 1796657Snate@binkert.orgInstructionQueue<Impl>::regStats() 1806657Snate@binkert.org{ 1816657Snate@binkert.org using namespace Stats; 1826999Snate@binkert.org iqInstsAdded 1836657Snate@binkert.org .name(name() + ".iqInstsAdded") 1846657Snate@binkert.org .desc("Number of instructions added to the IQ (excludes non-spec)") 1856657Snate@binkert.org .prereq(iqInstsAdded); 1866657Snate@binkert.org 1876657Snate@binkert.org iqNonSpecInstsAdded 1886657Snate@binkert.org .name(name() + ".iqNonSpecInstsAdded") 1896657Snate@binkert.org .desc("Number of non-speculative instructions added to the IQ") 1907007Snate@binkert.org .prereq(iqNonSpecInstsAdded); 1917007Snate@binkert.org 1926657Snate@binkert.org iqInstsIssued 1937002Snate@binkert.org .name(name() + ".iqInstsIssued") 1947002Snate@binkert.org .desc("Number of instructions issued") 1959466Snilay@cs.wisc.edu .prereq(iqInstsIssued); 1966657Snate@binkert.org 1976657Snate@binkert.org iqIntInstsIssued 1986657Snate@binkert.org .name(name() + ".iqIntInstsIssued") 1996657Snate@binkert.org .desc("Number of integer instructions issued") 2006657Snate@binkert.org .prereq(iqIntInstsIssued); 2016657Snate@binkert.org 2026657Snate@binkert.org iqFloatInstsIssued 2036657Snate@binkert.org .name(name() + ".iqFloatInstsIssued") 2046657Snate@binkert.org .desc("Number of float instructions issued") 2056657Snate@binkert.org .prereq(iqFloatInstsIssued); 2066657Snate@binkert.org 2076657Snate@binkert.org iqBranchInstsIssued 2087007Snate@binkert.org .name(name() + ".iqBranchInstsIssued") 2097007Snate@binkert.org .desc("Number of branch instructions issued") 2106657Snate@binkert.org .prereq(iqBranchInstsIssued); 2119466Snilay@cs.wisc.edu 2126657Snate@binkert.org iqMemInstsIssued 2136657Snate@binkert.org .name(name() + ".iqMemInstsIssued") 2149466Snilay@cs.wisc.edu .desc("Number of memory instructions issued") 2159508Snilay@cs.wisc.edu .prereq(iqMemInstsIssued); 2169466Snilay@cs.wisc.edu 2179466Snilay@cs.wisc.edu iqMiscInstsIssued 2189466Snilay@cs.wisc.edu .name(name() + ".iqMiscInstsIssued") 2196657Snate@binkert.org .desc("Number of miscellaneous instructions issued") 2206657Snate@binkert.org .prereq(iqMiscInstsIssued); 2216657Snate@binkert.org 2226657Snate@binkert.org iqSquashedInstsIssued 2236657Snate@binkert.org .name(name() + ".iqSquashedInstsIssued") 2246657Snate@binkert.org .desc("Number of squashed instructions issued") 2256657Snate@binkert.org .prereq(iqSquashedInstsIssued); 2266657Snate@binkert.org 2276657Snate@binkert.org iqSquashedInstsExamined 2286657Snate@binkert.org .name(name() + ".iqSquashedInstsExamined") 2296657Snate@binkert.org .desc("Number of squashed instructions iterated over during squash;" 2306657Snate@binkert.org " mainly for profiling") 2316657Snate@binkert.org .prereq(iqSquashedInstsExamined); 2326657Snate@binkert.org 2336657Snate@binkert.org iqSquashedOperandsExamined 2346657Snate@binkert.org .name(name() + ".iqSquashedOperandsExamined") 2356657Snate@binkert.org .desc("Number of squashed operands that are examined and possibly " 2367453Snate@binkert.org "removed from graph") 2377453Snate@binkert.org .prereq(iqSquashedOperandsExamined); 2387453Snate@binkert.org 2397453Snate@binkert.org iqSquashedNonSpecRemoved 2407453Snate@binkert.org .name(name() + ".iqSquashedNonSpecRemoved") 2417453Snate@binkert.org .desc("Number of squashed non-spec instructions that were removed") 2427453Snate@binkert.org .prereq(iqSquashedNonSpecRemoved); 2437453Snate@binkert.org/* 2447453Snate@binkert.org queueResDist 2457453Snate@binkert.org .init(Num_OpClasses, 0, 99, 2) 2467453Snate@binkert.org .name(name() + ".IQ:residence:") 2477453Snate@binkert.org .desc("cycles from dispatch to issue") 2487453Snate@binkert.org .flags(total | pdf | cdf ) 2497453Snate@binkert.org ; 2507453Snate@binkert.org for (int i = 0; i < Num_OpClasses; ++i) { 2517453Snate@binkert.org queueResDist.subname(i, opClassStrings[i]); 2527453Snate@binkert.org } 2536657Snate@binkert.org*/ 2546657Snate@binkert.org numIssuedDist 2556657Snate@binkert.org .init(0,totalWidth,1) 2566657Snate@binkert.org .name(name() + ".issued_per_cycle") 2579466Snilay@cs.wisc.edu .desc("Number of insts issued each cycle") 2586657Snate@binkert.org .flags(pdf) 2599466Snilay@cs.wisc.edu ; 2609508Snilay@cs.wisc.edu/* 2619466Snilay@cs.wisc.edu dist_unissued 2626657Snate@binkert.org .init(Num_OpClasses+2) 2636657Snate@binkert.org .name(name() + ".unissued_cause") 2646657Snate@binkert.org .desc("Reason ready instruction not issued") 2656657Snate@binkert.org .flags(pdf | dist) 2669466Snilay@cs.wisc.edu ; 2679466Snilay@cs.wisc.edu for (int i=0; i < (Num_OpClasses + 2); ++i) { 2689466Snilay@cs.wisc.edu dist_unissued.subname(i, unissued_names[i]); 2699466Snilay@cs.wisc.edu } 2706657Snate@binkert.org*/ 2716657Snate@binkert.org statIssuedInstType 2726657Snate@binkert.org .init(numThreads,Enums::Num_OpClass) 2736657Snate@binkert.org .name(name() + ".FU_type") 2746657Snate@binkert.org .desc("Type of FU issued") 2756657Snate@binkert.org .flags(total | pdf | dist) 2766657Snate@binkert.org ; 2776657Snate@binkert.org statIssuedInstType.ysubnames(Enums::OpClassStrings); 2786657Snate@binkert.org 2799466Snilay@cs.wisc.edu // 28010472Sandreas.hansson@arm.com // How long did instructions for a particular FU type wait prior to issue 28110472Sandreas.hansson@arm.com // 28210472Sandreas.hansson@arm.com/* 28310472Sandreas.hansson@arm.com issueDelayDist 28410472Sandreas.hansson@arm.com .init(Num_OpClasses,0,99,2) 28510472Sandreas.hansson@arm.com .name(name() + ".") 28610472Sandreas.hansson@arm.com .desc("cycles from operands ready to issue") 28710472Sandreas.hansson@arm.com .flags(pdf | cdf) 28810472Sandreas.hansson@arm.com ; 28910472Sandreas.hansson@arm.com 2907453Snate@binkert.org for (int i=0; i<Num_OpClasses; ++i) { 2917007Snate@binkert.org std::stringstream subname; 2927007Snate@binkert.org subname << opClassStrings[i] << "_delay"; 2937453Snate@binkert.org issueDelayDist.subname(i, subname.str()); 2947007Snate@binkert.org } 2956657Snate@binkert.org*/ 2966657Snate@binkert.org issueRate 2976657Snate@binkert.org .name(name() + ".rate") 2986657Snate@binkert.org .desc("Inst issue rate") 2996657Snate@binkert.org .flags(total) 3006657Snate@binkert.org ; 3016657Snate@binkert.org issueRate = iqInstsIssued / cpu->numCycles; 3026657Snate@binkert.org 3036657Snate@binkert.org statFuBusy 3046657Snate@binkert.org .init(Num_OpClasses) 3057007Snate@binkert.org .name(name() + ".fu_full") 3067007Snate@binkert.org .desc("attempts to use FU when none available") 3077007Snate@binkert.org .flags(pdf | dist) 3087007Snate@binkert.org ; 3097007Snate@binkert.org for (int i=0; i < Num_OpClasses; ++i) { 3106657Snate@binkert.org statFuBusy.subname(i, Enums::OpClassStrings[i]); 3116657Snate@binkert.org } 3126657Snate@binkert.org 3136657Snate@binkert.org fuBusy 3146657Snate@binkert.org .init(numThreads) 3156657Snate@binkert.org .name(name() + ".fu_busy_cnt") 3166657Snate@binkert.org .desc("FU busy when requested") 3176657Snate@binkert.org .flags(total) 3186657Snate@binkert.org ; 3197007Snate@binkert.org 3207007Snate@binkert.org fuBusyRate 3217007Snate@binkert.org .name(name() + ".fu_busy_rate") 3227007Snate@binkert.org .desc("FU busy rate (busy events/executed inst)") 3237007Snate@binkert.org .flags(total) 3246657Snate@binkert.org ; 3256657Snate@binkert.org fuBusyRate = fuBusy / iqInstsIssued; 3266657Snate@binkert.org 3276657Snate@binkert.org for (ThreadID tid = 0; tid < numThreads; tid++) { 3286657Snate@binkert.org // Tell mem dependence unit to reg stats as well. 3296657Snate@binkert.org memDepUnit[tid].regStats(); 3306657Snate@binkert.org } 3317007Snate@binkert.org 3327007Snate@binkert.org intInstQueueReads 3337007Snate@binkert.org .name(name() + ".int_inst_queue_reads") 3347007Snate@binkert.org .desc("Number of integer instruction queue reads") 3357007Snate@binkert.org .flags(total); 3366657Snate@binkert.org 3376657Snate@binkert.org intInstQueueWrites 3387002Snate@binkert.org .name(name() + ".int_inst_queue_writes") 3396657Snate@binkert.org .desc("Number of integer instruction queue writes") 3406657Snate@binkert.org .flags(total); 3416657Snate@binkert.org 3426657Snate@binkert.org intInstQueueWakeupAccesses 3436657Snate@binkert.org .name(name() + ".int_inst_queue_wakeup_accesses") 3446657Snate@binkert.org .desc("Number of integer instruction queue wakeup accesses") 3456657Snate@binkert.org .flags(total); 3466657Snate@binkert.org 3476657Snate@binkert.org fpInstQueueReads 3486657Snate@binkert.org .name(name() + ".fp_inst_queue_reads") 3496657Snate@binkert.org .desc("Number of floating instruction queue reads") 3506657Snate@binkert.org .flags(total); 3516657Snate@binkert.org 3526657Snate@binkert.org fpInstQueueWrites 3536657Snate@binkert.org .name(name() + ".fp_inst_queue_writes") 3546657Snate@binkert.org .desc("Number of floating instruction queue writes") 3556657Snate@binkert.org .flags(total); 3566657Snate@binkert.org 3576657Snate@binkert.org fpInstQueueWakeupAccesses 3586657Snate@binkert.org .name(name() + ".fp_inst_queue_wakeup_accesses") 3596657Snate@binkert.org .desc("Number of floating instruction queue wakeup accesses") 3607007Snate@binkert.org .flags(total); 3616657Snate@binkert.org 3627007Snate@binkert.org vecInstQueueReads 3636657Snate@binkert.org .name(name() + ".vec_inst_queue_reads") 36410307Snilay@cs.wisc.edu .desc("Number of vector instruction queue reads") 36510307Snilay@cs.wisc.edu .flags(total); 36610307Snilay@cs.wisc.edu 3679298Snilay@cs.wisc.edu vecInstQueueWrites 3689298Snilay@cs.wisc.edu .name(name() + ".vec_inst_queue_writes") 3699298Snilay@cs.wisc.edu .desc("Number of vector instruction queue writes") 3706657Snate@binkert.org .flags(total); 3716657Snate@binkert.org 3726657Snate@binkert.org vecInstQueueWakeupAccesses 3736657Snate@binkert.org .name(name() + ".vec_inst_queue_wakeup_accesses") 3747055Snate@binkert.org .desc("Number of vector instruction queue wakeup accesses") 3757007Snate@binkert.org .flags(total); 3766657Snate@binkert.org 3776657Snate@binkert.org intAluAccesses 3787002Snate@binkert.org .name(name() + ".int_alu_accesses") 3796657Snate@binkert.org .desc("Number of integer alu accesses") 3806657Snate@binkert.org .flags(total); 3816657Snate@binkert.org 3827007Snate@binkert.org fpAluAccesses 3836657Snate@binkert.org .name(name() + ".fp_alu_accesses") 3846657Snate@binkert.org .desc("Number of floating point alu accesses") 3856657Snate@binkert.org .flags(total); 3866657Snate@binkert.org 3876657Snate@binkert.org vecAluAccesses 3886999Snate@binkert.org .name(name() + ".vec_alu_accesses") 3896657Snate@binkert.org .desc("Number of vector alu accesses") 3906657Snate@binkert.org .flags(total); 3916657Snate@binkert.org 3926657Snate@binkert.org} 3936657Snate@binkert.org 3946657Snate@binkert.orgtemplate <class Impl> 3956657Snate@binkert.orgvoid 3967002Snate@binkert.orgInstructionQueue<Impl>::resetState() 39710472Sandreas.hansson@arm.com{ 3987002Snate@binkert.org //Initialize thread IQ counts 3996657Snate@binkert.org for (ThreadID tid = 0; tid < Impl::MaxThreads; tid++) { 40011108Sdavid.hashe@amd.com count[tid] = 0; 4017002Snate@binkert.org instList[tid].clear(); 4027002Snate@binkert.org } 4036657Snate@binkert.org 4046657Snate@binkert.org // Initialize the number of free IQ entries. 4056657Snate@binkert.org freeEntries = numEntries; 4066657Snate@binkert.org 4077007Snate@binkert.org // Note that in actuality, the registers corresponding to the logical 4087007Snate@binkert.org // registers start off as ready. However this doesn't matter for the 4096657Snate@binkert.org // IQ as the instruction should have been correctly told if those 4106657Snate@binkert.org // registers are ready in rename. Thus it can all be initialized as 4116657Snate@binkert.org // unready. 4126657Snate@binkert.org for (int i = 0; i < numPhysRegs; ++i) { 4136657Snate@binkert.org regScoreboard[i] = false; 4146657Snate@binkert.org } 4156657Snate@binkert.org 4166657Snate@binkert.org for (ThreadID tid = 0; tid < Impl::MaxThreads; ++tid) { 4176657Snate@binkert.org squashedSeqNum[tid] = 0; 4186657Snate@binkert.org } 4196657Snate@binkert.org 4206657Snate@binkert.org for (int i = 0; i < Num_OpClasses; ++i) { 4216657Snate@binkert.org while (!readyInsts[i].empty()) 4226657Snate@binkert.org readyInsts[i].pop(); 4236657Snate@binkert.org queueOnList[i] = false; 4246657Snate@binkert.org readyIt[i] = listOrder.end(); 42510307Snilay@cs.wisc.edu } 42610307Snilay@cs.wisc.edu nonSpecInsts.clear(); 42710307Snilay@cs.wisc.edu listOrder.clear(); 4289298Snilay@cs.wisc.edu deferredMemInsts.clear(); 4296657Snate@binkert.org blockedMemInsts.clear(); 4306657Snate@binkert.org retryMemInsts.clear(); 4316657Snate@binkert.org wbOutstanding = 0; 4326999Snate@binkert.org} 4336657Snate@binkert.org 4346657Snate@binkert.orgtemplate <class Impl> 4356657Snate@binkert.orgvoid 4366657Snate@binkert.orgInstructionQueue<Impl>::setActiveThreads(list<ThreadID> *at_ptr) 4376657Snate@binkert.org{ 4387007Snate@binkert.org activeThreads = at_ptr; 4397007Snate@binkert.org} 4407007Snate@binkert.org 4416657Snate@binkert.orgtemplate <class Impl> 4427002Snate@binkert.orgvoid 4437002Snate@binkert.orgInstructionQueue<Impl>::setIssueToExecuteQueue(TimeBuffer<IssueStruct> *i2e_ptr) 4447002Snate@binkert.org{ 4458086SBrad.Beckmann@amd.com issueToExecuteQueue = i2e_ptr; 4468086SBrad.Beckmann@amd.com} 4478086SBrad.Beckmann@amd.com 4488086SBrad.Beckmann@amd.comtemplate <class Impl> 4498602Snilay@cs.wisc.eduvoid 4508602Snilay@cs.wisc.eduInstructionQueue<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr) 4518602Snilay@cs.wisc.edu{ 45211025Snilay@cs.wisc.edu timeBuffer = tb_ptr; 4538602Snilay@cs.wisc.edu 4548602Snilay@cs.wisc.edu fromCommit = timeBuffer->getWire(-commitToIEWDelay); 4558086SBrad.Beckmann@amd.com} 4566657Snate@binkert.org 4577007Snate@binkert.orgtemplate <class Impl> 4586657Snate@binkert.orgbool 4596657Snate@binkert.orgInstructionQueue<Impl>::isDrained() const 4606657Snate@binkert.org{ 4616657Snate@binkert.org bool drained = dependGraph.empty() && 4626657Snate@binkert.org instsToExecute.empty() && 4636657Snate@binkert.org wbOutstanding == 0; 4646657Snate@binkert.org for (ThreadID tid = 0; tid < numThreads; ++tid) 4656657Snate@binkert.org drained = drained && memDepUnit[tid].isDrained(); 4666657Snate@binkert.org 4676657Snate@binkert.org return drained; 4686657Snate@binkert.org} 46910917Sbrandon.potter@amd.com 47010917Sbrandon.potter@amd.comtemplate <class Impl> 4716862Sdrh5@cs.wisc.eduvoid 4726862Sdrh5@cs.wisc.eduInstructionQueue<Impl>::drainSanityCheck() const 4736657Snate@binkert.org{ 4746657Snate@binkert.org assert(dependGraph.empty()); 4756657Snate@binkert.org assert(instsToExecute.empty()); 4766657Snate@binkert.org for (ThreadID tid = 0; tid < numThreads; ++tid) 4776657Snate@binkert.org memDepUnit[tid].drainSanityCheck(); 4787007Snate@binkert.org} 4797007Snate@binkert.org 4807002Snate@binkert.orgtemplate <class Impl> 4817007Snate@binkert.orgvoid 4827007Snate@binkert.orgInstructionQueue<Impl>::takeOverFrom() 4837002Snate@binkert.org{ 4847007Snate@binkert.org resetState(); 4857007Snate@binkert.org} 4866657Snate@binkert.org 4876657Snate@binkert.orgtemplate <class Impl> 4886657Snate@binkert.orgint 4896657Snate@binkert.orgInstructionQueue<Impl>::entryAmount(ThreadID num_threads) 4906657Snate@binkert.org{ 4916657Snate@binkert.org if (iqPolicy == SMTQueuePolicy::Partitioned) { 4926657Snate@binkert.org return numEntries / num_threads; 4936657Snate@binkert.org } else { 4946657Snate@binkert.org return 0; 4956657Snate@binkert.org } 4966657Snate@binkert.org} 4976657Snate@binkert.org 4986657Snate@binkert.org 4998602Snilay@cs.wisc.edutemplate <class Impl> 5008602Snilay@cs.wisc.eduvoid 50111025Snilay@cs.wisc.eduInstructionQueue<Impl>::resetEntries() 5028602Snilay@cs.wisc.edu{ 5038602Snilay@cs.wisc.edu if (iqPolicy != SMTQueuePolicy::Dynamic || numThreads > 1) { 5048602Snilay@cs.wisc.edu int active_threads = activeThreads->size(); 5058602Snilay@cs.wisc.edu 5068602Snilay@cs.wisc.edu list<ThreadID>::iterator threads = activeThreads->begin(); 5078602Snilay@cs.wisc.edu list<ThreadID>::iterator end = activeThreads->end(); 5088086SBrad.Beckmann@amd.com 5098086SBrad.Beckmann@amd.com while (threads != end) { 5108086SBrad.Beckmann@amd.com ThreadID tid = *threads++; 5118086SBrad.Beckmann@amd.com 5128086SBrad.Beckmann@amd.com if (iqPolicy == SMTQueuePolicy::Partitioned) { 5138086SBrad.Beckmann@amd.com maxEntries[tid] = numEntries / active_threads; 5148086SBrad.Beckmann@amd.com } else if (iqPolicy == SMTQueuePolicy::Threshold && 5158086SBrad.Beckmann@amd.com active_threads == 1) { 5166657Snate@binkert.org maxEntries[tid] = numEntries; 5176657Snate@binkert.org } 5187002Snate@binkert.org } 5196657Snate@binkert.org } 5207007Snate@binkert.org} 5216657Snate@binkert.org 5226657Snate@binkert.orgtemplate <class Impl> 5236657Snate@binkert.orgunsigned 5246657Snate@binkert.orgInstructionQueue<Impl>::numFreeEntries() 5256657Snate@binkert.org{ 5266999Snate@binkert.org return freeEntries; 5276657Snate@binkert.org} 5286657Snate@binkert.org 5296657Snate@binkert.orgtemplate <class Impl> 5306657Snate@binkert.orgunsigned 5316657Snate@binkert.orgInstructionQueue<Impl>::numFreeEntries(ThreadID tid) 5326657Snate@binkert.org{ 5337832Snate@binkert.org return maxEntries[tid] - count[tid]; 5347002Snate@binkert.org} 5357002Snate@binkert.org 5367002Snate@binkert.org// Might want to do something more complex if it knows how many instructions 5377805Snilay@cs.wisc.edu// will be issued this cycle. 5386657Snate@binkert.orgtemplate <class Impl> 5396657Snate@binkert.orgbool 5407002Snate@binkert.orgInstructionQueue<Impl>::isFull() 5417002Snate@binkert.org{ 5426657Snate@binkert.org if (freeEntries == 0) { 5436657Snate@binkert.org return(true); 5448086SBrad.Beckmann@amd.com } else { 5458086SBrad.Beckmann@amd.com return(false); 5468086SBrad.Beckmann@amd.com } 5478086SBrad.Beckmann@amd.com} 5488086SBrad.Beckmann@amd.com 5498086SBrad.Beckmann@amd.comtemplate <class Impl> 5508086SBrad.Beckmann@amd.combool 5518086SBrad.Beckmann@amd.comInstructionQueue<Impl>::isFull(ThreadID tid) 5528086SBrad.Beckmann@amd.com{ 5538086SBrad.Beckmann@amd.com if (numFreeEntries(tid) == 0) { 5548086SBrad.Beckmann@amd.com return(true); 5558086SBrad.Beckmann@amd.com } else { 5568086SBrad.Beckmann@amd.com return(false); 5578086SBrad.Beckmann@amd.com } 5588086SBrad.Beckmann@amd.com} 5598086SBrad.Beckmann@amd.com 5608086SBrad.Beckmann@amd.comtemplate <class Impl> 5618086SBrad.Beckmann@amd.combool 5628086SBrad.Beckmann@amd.comInstructionQueue<Impl>::hasReadyInsts() 5638086SBrad.Beckmann@amd.com{ 5648086SBrad.Beckmann@amd.com if (!listOrder.empty()) { 5656657Snate@binkert.org return true; 5666657Snate@binkert.org } 5679773Snilay@cs.wisc.edu 5689773Snilay@cs.wisc.edu for (int i = 0; i < Num_OpClasses; ++i) { 56910301Snilay@cs.wisc.edu if (!readyInsts[i].empty()) { 5706657Snate@binkert.org return true; 5716657Snate@binkert.org } 5727007Snate@binkert.org } 5737007Snate@binkert.org 5747007Snate@binkert.org return false; 5756657Snate@binkert.org} 5766657Snate@binkert.org 5776657Snate@binkert.orgtemplate <class Impl> 5786657Snate@binkert.orgvoid 5796657Snate@binkert.orgInstructionQueue<Impl>::insert(const DynInstPtr &new_inst) 5806657Snate@binkert.org{ 5817007Snate@binkert.org if (new_inst->isFloating()) { 5827007Snate@binkert.org fpInstQueueWrites++; 5837007Snate@binkert.org } else if (new_inst->isVector()) { 5846657Snate@binkert.org vecInstQueueWrites++; 5856657Snate@binkert.org } else { 5866657Snate@binkert.org intInstQueueWrites++; 5876657Snate@binkert.org } 5886657Snate@binkert.org // Make sure the instruction is valid 5896657Snate@binkert.org assert(new_inst); 5906657Snate@binkert.org 5916657Snate@binkert.org DPRINTF(IQ, "Adding instruction [sn:%lli] PC %s to the IQ.\n", 5926657Snate@binkert.org new_inst->seqNum, new_inst->pcState()); 5936657Snate@binkert.org 5946657Snate@binkert.org assert(freeEntries != 0); 5956657Snate@binkert.org 5966657Snate@binkert.org instList[new_inst->threadNumber].push_back(new_inst); 5976657Snate@binkert.org 5987805Snilay@cs.wisc.edu --freeEntries; 5996657Snate@binkert.org 6006657Snate@binkert.org new_inst->setInIQ(); 6016657Snate@binkert.org 6027007Snate@binkert.org // Look through its source registers (physical regs), and mark any 6037007Snate@binkert.org // dependencies. 6047007Snate@binkert.org addToDependents(new_inst); 6056657Snate@binkert.org 6066657Snate@binkert.org // Have this instruction set itself as the producer of its destination 6076657Snate@binkert.org // register(s). 6086657Snate@binkert.org addToProducers(new_inst); 6097007Snate@binkert.org 6106657Snate@binkert.org if (new_inst->isMemRef()) { 6116657Snate@binkert.org memDepUnit[new_inst->threadNumber].insert(new_inst); 6126657Snate@binkert.org } else { 6136657Snate@binkert.org addIfReady(new_inst); 6147007Snate@binkert.org } 6156657Snate@binkert.org 6166657Snate@binkert.org ++iqInstsAdded; 6176657Snate@binkert.org 6186657Snate@binkert.org count[new_inst->threadNumber]++; 6197805Snilay@cs.wisc.edu 6206657Snate@binkert.org assert(freeEntries == (numEntries - countInsts())); 6216657Snate@binkert.org} 6226657Snate@binkert.org 6237007Snate@binkert.orgtemplate <class Impl> 6247007Snate@binkert.orgvoid 6257007Snate@binkert.orgInstructionQueue<Impl>::insertNonSpec(const DynInstPtr &new_inst) 6267007Snate@binkert.org{ 6276657Snate@binkert.org // @todo: Clean up this code; can do it by setting inst as unable 6286657Snate@binkert.org // to issue, then calling normal insert on the inst. 6296657Snate@binkert.org if (new_inst->isFloating()) { 6306657Snate@binkert.org fpInstQueueWrites++; 6316657Snate@binkert.org } else if (new_inst->isVector()) { 6326657Snate@binkert.org vecInstQueueWrites++; 6336657Snate@binkert.org } else { 6346657Snate@binkert.org intInstQueueWrites++; 6356657Snate@binkert.org } 6367007Snate@binkert.org 6377007Snate@binkert.org assert(new_inst); 6386657Snate@binkert.org 6396657Snate@binkert.org nonSpecInsts[new_inst->seqNum] = new_inst; 6406657Snate@binkert.org 6416657Snate@binkert.org DPRINTF(IQ, "Adding non-speculative instruction [sn:%lli] PC %s " 6427007Snate@binkert.org "to the IQ.\n", 6437007Snate@binkert.org new_inst->seqNum, new_inst->pcState()); 6446657Snate@binkert.org 6456657Snate@binkert.org assert(freeEntries != 0); 6466657Snate@binkert.org 6476657Snate@binkert.org instList[new_inst->threadNumber].push_back(new_inst); 6486657Snate@binkert.org 6496657Snate@binkert.org --freeEntries; 6506657Snate@binkert.org 6516657Snate@binkert.org new_inst->setInIQ(); 6526657Snate@binkert.org 6536657Snate@binkert.org // Have this instruction set itself as the producer of its destination 6546657Snate@binkert.org // register(s). 6556657Snate@binkert.org addToProducers(new_inst); 6566657Snate@binkert.org 6576657Snate@binkert.org // If it's a memory instruction, add it to the memory dependency 6586657Snate@binkert.org // unit. 6596657Snate@binkert.org if (new_inst->isMemRef()) { 6606657Snate@binkert.org memDepUnit[new_inst->threadNumber].insertNonSpec(new_inst); 6617805Snilay@cs.wisc.edu } 6626657Snate@binkert.org 6636657Snate@binkert.org ++iqNonSpecInstsAdded; 6646657Snate@binkert.org 6656657Snate@binkert.org count[new_inst->threadNumber]++; 6666657Snate@binkert.org 6677007Snate@binkert.org assert(freeEntries == (numEntries - countInsts())); 6686657Snate@binkert.org} 6697007Snate@binkert.org 6707007Snate@binkert.orgtemplate <class Impl> 6716657Snate@binkert.orgvoid 6726657Snate@binkert.orgInstructionQueue<Impl>::insertBarrier(const DynInstPtr &barr_inst) 6736657Snate@binkert.org{ 6746657Snate@binkert.org memDepUnit[barr_inst->threadNumber].insertBarrier(barr_inst); 6756657Snate@binkert.org 6766657Snate@binkert.org insertNonSpec(barr_inst); 6776657Snate@binkert.org} 6786657Snate@binkert.org 6796657Snate@binkert.orgtemplate <class Impl> 6806657Snate@binkert.orgtypename Impl::DynInstPtr 6816657Snate@binkert.orgInstructionQueue<Impl>::getInstToExecute() 6826657Snate@binkert.org{ 6836657Snate@binkert.org assert(!instsToExecute.empty()); 6846657Snate@binkert.org DynInstPtr inst = std::move(instsToExecute.front()); 6857805Snilay@cs.wisc.edu instsToExecute.pop_front(); 6866657Snate@binkert.org if (inst->isFloating()) { 6876657Snate@binkert.org fpInstQueueReads++; 6886657Snate@binkert.org } else if (inst->isVector()) { 6896657Snate@binkert.org vecInstQueueReads++; 6906657Snate@binkert.org } else { 6916657Snate@binkert.org intInstQueueReads++; 6926657Snate@binkert.org } 6936657Snate@binkert.org return inst; 6947007Snate@binkert.org} 6957007Snate@binkert.org 6966657Snate@binkert.orgtemplate <class Impl> 6976657Snate@binkert.orgvoid 6986657Snate@binkert.orgInstructionQueue<Impl>::addToOrderList(OpClass op_class) 6996657Snate@binkert.org{ 7006657Snate@binkert.org assert(!readyInsts[op_class].empty()); 7016657Snate@binkert.org 7026657Snate@binkert.org ListOrderEntry queue_entry; 7036657Snate@binkert.org 7046657Snate@binkert.org queue_entry.queueType = op_class; 7059773Snilay@cs.wisc.edu 7069773Snilay@cs.wisc.edu queue_entry.oldestInst = readyInsts[op_class].top()->seqNum; 7079773Snilay@cs.wisc.edu 7089773Snilay@cs.wisc.edu ListOrderIt list_it = listOrder.begin(); 7099773Snilay@cs.wisc.edu ListOrderIt list_end_it = listOrder.end(); 7106657Snate@binkert.org 7116657Snate@binkert.org while (list_it != list_end_it) { 7126657Snate@binkert.org if ((*list_it).oldestInst > queue_entry.oldestInst) { 7136657Snate@binkert.org break; 7146657Snate@binkert.org } 7156657Snate@binkert.org 7167805Snilay@cs.wisc.edu list_it++; 7176657Snate@binkert.org } 7186657Snate@binkert.org 7196657Snate@binkert.org readyIt[op_class] = listOrder.insert(list_it, queue_entry); 7206657Snate@binkert.org queueOnList[op_class] = true; 7216657Snate@binkert.org} 7226657Snate@binkert.org 7236657Snate@binkert.orgtemplate <class Impl> 7246657Snate@binkert.orgvoid 7257007Snate@binkert.orgInstructionQueue<Impl>::moveToYoungerInst(ListOrderIt list_order_it) 7267007Snate@binkert.org{ 7276657Snate@binkert.org // Get iterator of next item on the list 7286657Snate@binkert.org // Delete the original iterator 7296657Snate@binkert.org // Determine if the next item is either the end of the list or younger 7306657Snate@binkert.org // than the new instruction. If so, then add in a new iterator right here. 7316657Snate@binkert.org // If not, then move along. 7326657Snate@binkert.org ListOrderEntry queue_entry; 7339773Snilay@cs.wisc.edu OpClass op_class = (*list_order_it).queueType; 7349773Snilay@cs.wisc.edu ListOrderIt next_it = list_order_it; 7359773Snilay@cs.wisc.edu 7369773Snilay@cs.wisc.edu ++next_it; 7379773Snilay@cs.wisc.edu 7386657Snate@binkert.org queue_entry.queueType = op_class; 7396657Snate@binkert.org queue_entry.oldestInst = readyInsts[op_class].top()->seqNum; 7406657Snate@binkert.org 7416657Snate@binkert.org while (next_it != listOrder.end() && 7426657Snate@binkert.org (*next_it).oldestInst < queue_entry.oldestInst) { 7437805Snilay@cs.wisc.edu ++next_it; 7446657Snate@binkert.org } 7456657Snate@binkert.org 7466657Snate@binkert.org readyIt[op_class] = listOrder.insert(next_it, queue_entry); 7476657Snate@binkert.org} 7488602Snilay@cs.wisc.edu 7498602Snilay@cs.wisc.edutemplate <class Impl> 7508602Snilay@cs.wisc.eduvoid 7518602Snilay@cs.wisc.eduInstructionQueue<Impl>::processFUCompletion(const DynInstPtr &inst, int fu_idx) 75211025Snilay@cs.wisc.edu{ 7538602Snilay@cs.wisc.edu DPRINTF(IQ, "Processing FU completion [sn:%lli]\n", inst->seqNum); 7548602Snilay@cs.wisc.edu assert(!cpu->switchedOut()); 7558602Snilay@cs.wisc.edu // The CPU could have been sleeping until this op completed (*extremely* 7568602Snilay@cs.wisc.edu // long latency op). Wake it if it was. This may be overkill. 7578602Snilay@cs.wisc.edu --wbOutstanding; 7588602Snilay@cs.wisc.edu iewStage->wakeCPU(); 7598602Snilay@cs.wisc.edu 7608602Snilay@cs.wisc.edu if (fu_idx > -1) 7618602Snilay@cs.wisc.edu fuPool->freeUnitNextCycle(fu_idx); 7628602Snilay@cs.wisc.edu 7638602Snilay@cs.wisc.edu // @todo: Ensure that these FU Completions happen at the beginning 7648602Snilay@cs.wisc.edu // of a cycle, otherwise they could add too many instructions to 7658602Snilay@cs.wisc.edu // the queue. 7668602Snilay@cs.wisc.edu issueToExecuteQueue->access(-1)->size++; 7678602Snilay@cs.wisc.edu instsToExecute.push_back(inst); 7688602Snilay@cs.wisc.edu} 7696657Snate@binkert.org 7706657Snate@binkert.org// @todo: Figure out a better way to remove the squashed items from the 7716657Snate@binkert.org// lists. Checking the top item of each list to see if it's squashed 7726657Snate@binkert.org// wastes time and forces jumps. 773template <class Impl> 774void 775InstructionQueue<Impl>::scheduleReadyInsts() 776{ 777 DPRINTF(IQ, "Attempting to schedule ready instructions from " 778 "the IQ.\n"); 779 780 IssueStruct *i2e_info = issueToExecuteQueue->access(0); 781 782 DynInstPtr mem_inst; 783 while (mem_inst = std::move(getDeferredMemInstToExecute())) { 784 addReadyMemInst(mem_inst); 785 } 786 787 // See if any cache blocked instructions are able to be executed 788 while (mem_inst = std::move(getBlockedMemInstToExecute())) { 789 addReadyMemInst(mem_inst); 790 } 791 792 // Have iterator to head of the list 793 // While I haven't exceeded bandwidth or reached the end of the list, 794 // Try to get a FU that can do what this op needs. 795 // If successful, change the oldestInst to the new top of the list, put 796 // the queue in the proper place in the list. 797 // Increment the iterator. 798 // This will avoid trying to schedule a certain op class if there are no 799 // FUs that handle it. 800 int total_issued = 0; 801 ListOrderIt order_it = listOrder.begin(); 802 ListOrderIt order_end_it = listOrder.end(); 803 804 while (total_issued < totalWidth && order_it != order_end_it) { 805 OpClass op_class = (*order_it).queueType; 806 807 assert(!readyInsts[op_class].empty()); 808 809 DynInstPtr issuing_inst = readyInsts[op_class].top(); 810 811 if (issuing_inst->isFloating()) { 812 fpInstQueueReads++; 813 } else if (issuing_inst->isVector()) { 814 vecInstQueueReads++; 815 } else { 816 intInstQueueReads++; 817 } 818 819 assert(issuing_inst->seqNum == (*order_it).oldestInst); 820 821 if (issuing_inst->isSquashed()) { 822 readyInsts[op_class].pop(); 823 824 if (!readyInsts[op_class].empty()) { 825 moveToYoungerInst(order_it); 826 } else { 827 readyIt[op_class] = listOrder.end(); 828 queueOnList[op_class] = false; 829 } 830 831 listOrder.erase(order_it++); 832 833 ++iqSquashedInstsIssued; 834 835 continue; 836 } 837 838 int idx = FUPool::NoCapableFU; 839 Cycles op_latency = Cycles(1); 840 ThreadID tid = issuing_inst->threadNumber; 841 842 if (op_class != No_OpClass) { 843 idx = fuPool->getUnit(op_class); 844 if (issuing_inst->isFloating()) { 845 fpAluAccesses++; 846 } else if (issuing_inst->isVector()) { 847 vecAluAccesses++; 848 } else { 849 intAluAccesses++; 850 } 851 if (idx > FUPool::NoFreeFU) { 852 op_latency = fuPool->getOpLatency(op_class); 853 } 854 } 855 856 // If we have an instruction that doesn't require a FU, or a 857 // valid FU, then schedule for execution. 858 if (idx != FUPool::NoFreeFU) { 859 if (op_latency == Cycles(1)) { 860 i2e_info->size++; 861 instsToExecute.push_back(issuing_inst); 862 863 // Add the FU onto the list of FU's to be freed next 864 // cycle if we used one. 865 if (idx >= 0) 866 fuPool->freeUnitNextCycle(idx); 867 } else { 868 bool pipelined = fuPool->isPipelined(op_class); 869 // Generate completion event for the FU 870 ++wbOutstanding; 871 FUCompletion *execution = new FUCompletion(issuing_inst, 872 idx, this); 873 874 cpu->schedule(execution, 875 cpu->clockEdge(Cycles(op_latency - 1))); 876 877 if (!pipelined) { 878 // If FU isn't pipelined, then it must be freed 879 // upon the execution completing. 880 execution->setFreeFU(); 881 } else { 882 // Add the FU onto the list of FU's to be freed next cycle. 883 fuPool->freeUnitNextCycle(idx); 884 } 885 } 886 887 DPRINTF(IQ, "Thread %i: Issuing instruction PC %s " 888 "[sn:%lli]\n", 889 tid, issuing_inst->pcState(), 890 issuing_inst->seqNum); 891 892 readyInsts[op_class].pop(); 893 894 if (!readyInsts[op_class].empty()) { 895 moveToYoungerInst(order_it); 896 } else { 897 readyIt[op_class] = listOrder.end(); 898 queueOnList[op_class] = false; 899 } 900 901 issuing_inst->setIssued(); 902 ++total_issued; 903 904#if TRACING_ON 905 issuing_inst->issueTick = curTick() - issuing_inst->fetchTick; 906#endif 907 908 if (!issuing_inst->isMemRef()) { 909 // Memory instructions can not be freed from the IQ until they 910 // complete. 911 ++freeEntries; 912 count[tid]--; 913 issuing_inst->clearInIQ(); 914 } else { 915 memDepUnit[tid].issue(issuing_inst); 916 } 917 918 listOrder.erase(order_it++); 919 statIssuedInstType[tid][op_class]++; 920 } else { 921 statFuBusy[op_class]++; 922 fuBusy[tid]++; 923 ++order_it; 924 } 925 } 926 927 numIssuedDist.sample(total_issued); 928 iqInstsIssued+= total_issued; 929 930 // If we issued any instructions, tell the CPU we had activity. 931 // @todo If the way deferred memory instructions are handeled due to 932 // translation changes then the deferredMemInsts condition should be removed 933 // from the code below. 934 if (total_issued || !retryMemInsts.empty() || !deferredMemInsts.empty()) { 935 cpu->activityThisCycle(); 936 } else { 937 DPRINTF(IQ, "Not able to schedule any instructions.\n"); 938 } 939} 940 941template <class Impl> 942void 943InstructionQueue<Impl>::scheduleNonSpec(const InstSeqNum &inst) 944{ 945 DPRINTF(IQ, "Marking nonspeculative instruction [sn:%lli] as ready " 946 "to execute.\n", inst); 947 948 NonSpecMapIt inst_it = nonSpecInsts.find(inst); 949 950 assert(inst_it != nonSpecInsts.end()); 951 952 ThreadID tid = (*inst_it).second->threadNumber; 953 954 (*inst_it).second->setAtCommit(); 955 956 (*inst_it).second->setCanIssue(); 957 958 if (!(*inst_it).second->isMemRef()) { 959 addIfReady((*inst_it).second); 960 } else { 961 memDepUnit[tid].nonSpecInstReady((*inst_it).second); 962 } 963 964 (*inst_it).second = NULL; 965 966 nonSpecInsts.erase(inst_it); 967} 968 969template <class Impl> 970void 971InstructionQueue<Impl>::commit(const InstSeqNum &inst, ThreadID tid) 972{ 973 DPRINTF(IQ, "[tid:%i]: Committing instructions older than [sn:%i]\n", 974 tid,inst); 975 976 ListIt iq_it = instList[tid].begin(); 977 978 while (iq_it != instList[tid].end() && 979 (*iq_it)->seqNum <= inst) { 980 ++iq_it; 981 instList[tid].pop_front(); 982 } 983 984 assert(freeEntries == (numEntries - countInsts())); 985} 986 987template <class Impl> 988int 989InstructionQueue<Impl>::wakeDependents(const DynInstPtr &completed_inst) 990{ 991 int dependents = 0; 992 993 // The instruction queue here takes care of both floating and int ops 994 if (completed_inst->isFloating()) { 995 fpInstQueueWakeupAccesses++; 996 } else if (completed_inst->isVector()) { 997 vecInstQueueWakeupAccesses++; 998 } else { 999 intInstQueueWakeupAccesses++; 1000 } 1001 1002 DPRINTF(IQ, "Waking dependents of completed instruction.\n"); 1003 1004 assert(!completed_inst->isSquashed()); 1005 1006 // Tell the memory dependence unit to wake any dependents on this 1007 // instruction if it is a memory instruction. Also complete the memory 1008 // instruction at this point since we know it executed without issues. 1009 // @todo: Might want to rename "completeMemInst" to something that 1010 // indicates that it won't need to be replayed, and call this 1011 // earlier. Might not be a big deal. 1012 if (completed_inst->isMemRef()) { 1013 memDepUnit[completed_inst->threadNumber].wakeDependents(completed_inst); 1014 completeMemInst(completed_inst); 1015 } else if (completed_inst->isMemBarrier() || 1016 completed_inst->isWriteBarrier()) { 1017 memDepUnit[completed_inst->threadNumber].completeBarrier(completed_inst); 1018 } 1019 1020 for (int dest_reg_idx = 0; 1021 dest_reg_idx < completed_inst->numDestRegs(); 1022 dest_reg_idx++) 1023 { 1024 PhysRegIdPtr dest_reg = 1025 completed_inst->renamedDestRegIdx(dest_reg_idx); 1026 1027 // Special case of uniq or control registers. They are not 1028 // handled by the IQ and thus have no dependency graph entry. 1029 if (dest_reg->isFixedMapping()) { 1030 DPRINTF(IQ, "Reg %d [%s] is part of a fix mapping, skipping\n", 1031 dest_reg->index(), dest_reg->className()); 1032 continue; 1033 } 1034 1035 DPRINTF(IQ, "Waking any dependents on register %i (%s).\n", 1036 dest_reg->index(), 1037 dest_reg->className()); 1038 1039 //Go through the dependency chain, marking the registers as 1040 //ready within the waiting instructions. 1041 DynInstPtr dep_inst = dependGraph.pop(dest_reg->flatIndex()); 1042 1043 while (dep_inst) { 1044 DPRINTF(IQ, "Waking up a dependent instruction, [sn:%lli] " 1045 "PC %s.\n", dep_inst->seqNum, dep_inst->pcState()); 1046 1047 // Might want to give more information to the instruction 1048 // so that it knows which of its source registers is 1049 // ready. However that would mean that the dependency 1050 // graph entries would need to hold the src_reg_idx. 1051 dep_inst->markSrcRegReady(); 1052 1053 addIfReady(dep_inst); 1054 1055 dep_inst = dependGraph.pop(dest_reg->flatIndex()); 1056 1057 ++dependents; 1058 } 1059 1060 // Reset the head node now that all of its dependents have 1061 // been woken up. 1062 assert(dependGraph.empty(dest_reg->flatIndex())); 1063 dependGraph.clearInst(dest_reg->flatIndex()); 1064 1065 // Mark the scoreboard as having that register ready. 1066 regScoreboard[dest_reg->flatIndex()] = true; 1067 } 1068 return dependents; 1069} 1070 1071template <class Impl> 1072void 1073InstructionQueue<Impl>::addReadyMemInst(const DynInstPtr &ready_inst) 1074{ 1075 OpClass op_class = ready_inst->opClass(); 1076 1077 readyInsts[op_class].push(ready_inst); 1078 1079 // Will need to reorder the list if either a queue is not on the list, 1080 // or it has an older instruction than last time. 1081 if (!queueOnList[op_class]) { 1082 addToOrderList(op_class); 1083 } else if (readyInsts[op_class].top()->seqNum < 1084 (*readyIt[op_class]).oldestInst) { 1085 listOrder.erase(readyIt[op_class]); 1086 addToOrderList(op_class); 1087 } 1088 1089 DPRINTF(IQ, "Instruction is ready to issue, putting it onto " 1090 "the ready list, PC %s opclass:%i [sn:%lli].\n", 1091 ready_inst->pcState(), op_class, ready_inst->seqNum); 1092} 1093 1094template <class Impl> 1095void 1096InstructionQueue<Impl>::rescheduleMemInst(const DynInstPtr &resched_inst) 1097{ 1098 DPRINTF(IQ, "Rescheduling mem inst [sn:%lli]\n", resched_inst->seqNum); 1099 1100 // Reset DTB translation state 1101 resched_inst->translationStarted(false); 1102 resched_inst->translationCompleted(false); 1103 1104 resched_inst->clearCanIssue(); 1105 memDepUnit[resched_inst->threadNumber].reschedule(resched_inst); 1106} 1107 1108template <class Impl> 1109void 1110InstructionQueue<Impl>::replayMemInst(const DynInstPtr &replay_inst) 1111{ 1112 memDepUnit[replay_inst->threadNumber].replay(); 1113} 1114 1115template <class Impl> 1116void 1117InstructionQueue<Impl>::completeMemInst(const DynInstPtr &completed_inst) 1118{ 1119 ThreadID tid = completed_inst->threadNumber; 1120 1121 DPRINTF(IQ, "Completing mem instruction PC: %s [sn:%lli]\n", 1122 completed_inst->pcState(), completed_inst->seqNum); 1123 1124 ++freeEntries; 1125 1126 completed_inst->memOpDone(true); 1127 1128 memDepUnit[tid].completed(completed_inst); 1129 count[tid]--; 1130} 1131 1132template <class Impl> 1133void 1134InstructionQueue<Impl>::deferMemInst(const DynInstPtr &deferred_inst) 1135{ 1136 deferredMemInsts.push_back(deferred_inst); 1137} 1138 1139template <class Impl> 1140void 1141InstructionQueue<Impl>::blockMemInst(const DynInstPtr &blocked_inst) 1142{ 1143 blocked_inst->translationStarted(false); 1144 blocked_inst->translationCompleted(false); 1145 1146 blocked_inst->clearIssued(); 1147 blocked_inst->clearCanIssue(); 1148 blockedMemInsts.push_back(blocked_inst); 1149} 1150 1151template <class Impl> 1152void 1153InstructionQueue<Impl>::cacheUnblocked() 1154{ 1155 retryMemInsts.splice(retryMemInsts.end(), blockedMemInsts); 1156 // Get the CPU ticking again 1157 cpu->wakeCPU(); 1158} 1159 1160template <class Impl> 1161typename Impl::DynInstPtr 1162InstructionQueue<Impl>::getDeferredMemInstToExecute() 1163{ 1164 for (ListIt it = deferredMemInsts.begin(); it != deferredMemInsts.end(); 1165 ++it) { 1166 if ((*it)->translationCompleted() || (*it)->isSquashed()) { 1167 DynInstPtr mem_inst = std::move(*it); 1168 deferredMemInsts.erase(it); 1169 return mem_inst; 1170 } 1171 } 1172 return nullptr; 1173} 1174 1175template <class Impl> 1176typename Impl::DynInstPtr 1177InstructionQueue<Impl>::getBlockedMemInstToExecute() 1178{ 1179 if (retryMemInsts.empty()) { 1180 return nullptr; 1181 } else { 1182 DynInstPtr mem_inst = std::move(retryMemInsts.front()); 1183 retryMemInsts.pop_front(); 1184 return mem_inst; 1185 } 1186} 1187 1188template <class Impl> 1189void 1190InstructionQueue<Impl>::violation(const DynInstPtr &store, 1191 const DynInstPtr &faulting_load) 1192{ 1193 intInstQueueWrites++; 1194 memDepUnit[store->threadNumber].violation(store, faulting_load); 1195} 1196 1197template <class Impl> 1198void 1199InstructionQueue<Impl>::squash(ThreadID tid) 1200{ 1201 DPRINTF(IQ, "[tid:%i]: Starting to squash instructions in " 1202 "the IQ.\n", tid); 1203 1204 // Read instruction sequence number of last instruction out of the 1205 // time buffer. 1206 squashedSeqNum[tid] = fromCommit->commitInfo[tid].doneSeqNum; 1207 1208 doSquash(tid); 1209 1210 // Also tell the memory dependence unit to squash. 1211 memDepUnit[tid].squash(squashedSeqNum[tid], tid); 1212} 1213 1214template <class Impl> 1215void 1216InstructionQueue<Impl>::doSquash(ThreadID tid) 1217{ 1218 // Start at the tail. 1219 ListIt squash_it = instList[tid].end(); 1220 --squash_it; 1221 1222 DPRINTF(IQ, "[tid:%i]: Squashing until sequence number %i!\n", 1223 tid, squashedSeqNum[tid]); 1224 1225 // Squash any instructions younger than the squashed sequence number 1226 // given. 1227 while (squash_it != instList[tid].end() && 1228 (*squash_it)->seqNum > squashedSeqNum[tid]) { 1229 1230 DynInstPtr squashed_inst = (*squash_it); 1231 if (squashed_inst->isFloating()) { 1232 fpInstQueueWrites++; 1233 } else if (squashed_inst->isVector()) { 1234 vecInstQueueWrites++; 1235 } else { 1236 intInstQueueWrites++; 1237 } 1238 1239 // Only handle the instruction if it actually is in the IQ and 1240 // hasn't already been squashed in the IQ. 1241 if (squashed_inst->threadNumber != tid || 1242 squashed_inst->isSquashedInIQ()) { 1243 --squash_it; 1244 continue; 1245 } 1246 1247 if (!squashed_inst->isIssued() || 1248 (squashed_inst->isMemRef() && 1249 !squashed_inst->memOpDone())) { 1250 1251 DPRINTF(IQ, "[tid:%i]: Instruction [sn:%lli] PC %s squashed.\n", 1252 tid, squashed_inst->seqNum, squashed_inst->pcState()); 1253 1254 bool is_acq_rel = squashed_inst->isMemBarrier() && 1255 (squashed_inst->isLoad() || 1256 (squashed_inst->isStore() && 1257 !squashed_inst->isStoreConditional())); 1258 1259 // Remove the instruction from the dependency list. 1260 if (is_acq_rel || 1261 (!squashed_inst->isNonSpeculative() && 1262 !squashed_inst->isStoreConditional() && 1263 !squashed_inst->isMemBarrier() && 1264 !squashed_inst->isWriteBarrier())) { 1265 1266 for (int src_reg_idx = 0; 1267 src_reg_idx < squashed_inst->numSrcRegs(); 1268 src_reg_idx++) 1269 { 1270 PhysRegIdPtr src_reg = 1271 squashed_inst->renamedSrcRegIdx(src_reg_idx); 1272 1273 // Only remove it from the dependency graph if it 1274 // was placed there in the first place. 1275 1276 // Instead of doing a linked list traversal, we 1277 // can just remove these squashed instructions 1278 // either at issue time, or when the register is 1279 // overwritten. The only downside to this is it 1280 // leaves more room for error. 1281 1282 if (!squashed_inst->isReadySrcRegIdx(src_reg_idx) && 1283 !src_reg->isFixedMapping()) { 1284 dependGraph.remove(src_reg->flatIndex(), 1285 squashed_inst); 1286 } 1287 1288 1289 ++iqSquashedOperandsExamined; 1290 } 1291 } else if (!squashed_inst->isStoreConditional() || 1292 !squashed_inst->isCompleted()) { 1293 NonSpecMapIt ns_inst_it = 1294 nonSpecInsts.find(squashed_inst->seqNum); 1295 1296 // we remove non-speculative instructions from 1297 // nonSpecInsts already when they are ready, and so we 1298 // cannot always expect to find them 1299 if (ns_inst_it == nonSpecInsts.end()) { 1300 // loads that became ready but stalled on a 1301 // blocked cache are alreayd removed from 1302 // nonSpecInsts, and have not faulted 1303 assert(squashed_inst->getFault() != NoFault || 1304 squashed_inst->isMemRef()); 1305 } else { 1306 1307 (*ns_inst_it).second = NULL; 1308 1309 nonSpecInsts.erase(ns_inst_it); 1310 1311 ++iqSquashedNonSpecRemoved; 1312 } 1313 } 1314 1315 // Might want to also clear out the head of the dependency graph. 1316 1317 // Mark it as squashed within the IQ. 1318 squashed_inst->setSquashedInIQ(); 1319 1320 // @todo: Remove this hack where several statuses are set so the 1321 // inst will flow through the rest of the pipeline. 1322 squashed_inst->setIssued(); 1323 squashed_inst->setCanCommit(); 1324 squashed_inst->clearInIQ(); 1325 1326 //Update Thread IQ Count 1327 count[squashed_inst->threadNumber]--; 1328 1329 ++freeEntries; 1330 } 1331 1332 // IQ clears out the heads of the dependency graph only when 1333 // instructions reach writeback stage. If an instruction is squashed 1334 // before writeback stage, its head of dependency graph would not be 1335 // cleared out; it holds the instruction's DynInstPtr. This prevents 1336 // freeing the squashed instruction's DynInst. 1337 // Thus, we need to manually clear out the squashed instructions' heads 1338 // of dependency graph. 1339 for (int dest_reg_idx = 0; 1340 dest_reg_idx < squashed_inst->numDestRegs(); 1341 dest_reg_idx++) 1342 { 1343 PhysRegIdPtr dest_reg = 1344 squashed_inst->renamedDestRegIdx(dest_reg_idx); 1345 if (dest_reg->isFixedMapping()){ 1346 continue; 1347 } 1348 assert(dependGraph.empty(dest_reg->flatIndex())); 1349 dependGraph.clearInst(dest_reg->flatIndex()); 1350 } 1351 instList[tid].erase(squash_it--); 1352 ++iqSquashedInstsExamined; 1353 } 1354} 1355 1356template <class Impl> 1357bool 1358InstructionQueue<Impl>::addToDependents(const DynInstPtr &new_inst) 1359{ 1360 // Loop through the instruction's source registers, adding 1361 // them to the dependency list if they are not ready. 1362 int8_t total_src_regs = new_inst->numSrcRegs(); 1363 bool return_val = false; 1364 1365 for (int src_reg_idx = 0; 1366 src_reg_idx < total_src_regs; 1367 src_reg_idx++) 1368 { 1369 // Only add it to the dependency graph if it's not ready. 1370 if (!new_inst->isReadySrcRegIdx(src_reg_idx)) { 1371 PhysRegIdPtr src_reg = new_inst->renamedSrcRegIdx(src_reg_idx); 1372 1373 // Check the IQ's scoreboard to make sure the register 1374 // hasn't become ready while the instruction was in flight 1375 // between stages. Only if it really isn't ready should 1376 // it be added to the dependency graph. 1377 if (src_reg->isFixedMapping()) { 1378 continue; 1379 } else if (!regScoreboard[src_reg->flatIndex()]) { 1380 DPRINTF(IQ, "Instruction PC %s has src reg %i (%s) that " 1381 "is being added to the dependency chain.\n", 1382 new_inst->pcState(), src_reg->index(), 1383 src_reg->className()); 1384 1385 dependGraph.insert(src_reg->flatIndex(), new_inst); 1386 1387 // Change the return value to indicate that something 1388 // was added to the dependency graph. 1389 return_val = true; 1390 } else { 1391 DPRINTF(IQ, "Instruction PC %s has src reg %i (%s) that " 1392 "became ready before it reached the IQ.\n", 1393 new_inst->pcState(), src_reg->index(), 1394 src_reg->className()); 1395 // Mark a register ready within the instruction. 1396 new_inst->markSrcRegReady(src_reg_idx); 1397 } 1398 } 1399 } 1400 1401 return return_val; 1402} 1403 1404template <class Impl> 1405void 1406InstructionQueue<Impl>::addToProducers(const DynInstPtr &new_inst) 1407{ 1408 // Nothing really needs to be marked when an instruction becomes 1409 // the producer of a register's value, but for convenience a ptr 1410 // to the producing instruction will be placed in the head node of 1411 // the dependency links. 1412 int8_t total_dest_regs = new_inst->numDestRegs(); 1413 1414 for (int dest_reg_idx = 0; 1415 dest_reg_idx < total_dest_regs; 1416 dest_reg_idx++) 1417 { 1418 PhysRegIdPtr dest_reg = new_inst->renamedDestRegIdx(dest_reg_idx); 1419 1420 // Some registers have fixed mapping, and there is no need to track 1421 // dependencies as these instructions must be executed at commit. 1422 if (dest_reg->isFixedMapping()) { 1423 continue; 1424 } 1425 1426 if (!dependGraph.empty(dest_reg->flatIndex())) { 1427 dependGraph.dump(); 1428 panic("Dependency graph %i (%s) (flat: %i) not empty!", 1429 dest_reg->index(), dest_reg->className(), 1430 dest_reg->flatIndex()); 1431 } 1432 1433 dependGraph.setInst(dest_reg->flatIndex(), new_inst); 1434 1435 // Mark the scoreboard to say it's not yet ready. 1436 regScoreboard[dest_reg->flatIndex()] = false; 1437 } 1438} 1439 1440template <class Impl> 1441void 1442InstructionQueue<Impl>::addIfReady(const DynInstPtr &inst) 1443{ 1444 // If the instruction now has all of its source registers 1445 // available, then add it to the list of ready instructions. 1446 if (inst->readyToIssue()) { 1447 1448 //Add the instruction to the proper ready list. 1449 if (inst->isMemRef()) { 1450 1451 DPRINTF(IQ, "Checking if memory instruction can issue.\n"); 1452 1453 // Message to the mem dependence unit that this instruction has 1454 // its registers ready. 1455 memDepUnit[inst->threadNumber].regsReady(inst); 1456 1457 return; 1458 } 1459 1460 OpClass op_class = inst->opClass(); 1461 1462 DPRINTF(IQ, "Instruction is ready to issue, putting it onto " 1463 "the ready list, PC %s opclass:%i [sn:%lli].\n", 1464 inst->pcState(), op_class, inst->seqNum); 1465 1466 readyInsts[op_class].push(inst); 1467 1468 // Will need to reorder the list if either a queue is not on the list, 1469 // or it has an older instruction than last time. 1470 if (!queueOnList[op_class]) { 1471 addToOrderList(op_class); 1472 } else if (readyInsts[op_class].top()->seqNum < 1473 (*readyIt[op_class]).oldestInst) { 1474 listOrder.erase(readyIt[op_class]); 1475 addToOrderList(op_class); 1476 } 1477 } 1478} 1479 1480template <class Impl> 1481int 1482InstructionQueue<Impl>::countInsts() 1483{ 1484#if 0 1485 //ksewell:This works but definitely could use a cleaner write 1486 //with a more intuitive way of counting. Right now it's 1487 //just brute force .... 1488 // Change the #if if you want to use this method. 1489 int total_insts = 0; 1490 1491 for (ThreadID tid = 0; tid < numThreads; ++tid) { 1492 ListIt count_it = instList[tid].begin(); 1493 1494 while (count_it != instList[tid].end()) { 1495 if (!(*count_it)->isSquashed() && !(*count_it)->isSquashedInIQ()) { 1496 if (!(*count_it)->isIssued()) { 1497 ++total_insts; 1498 } else if ((*count_it)->isMemRef() && 1499 !(*count_it)->memOpDone) { 1500 // Loads that have not been marked as executed still count 1501 // towards the total instructions. 1502 ++total_insts; 1503 } 1504 } 1505 1506 ++count_it; 1507 } 1508 } 1509 1510 return total_insts; 1511#else 1512 return numEntries - freeEntries; 1513#endif 1514} 1515 1516template <class Impl> 1517void 1518InstructionQueue<Impl>::dumpLists() 1519{ 1520 for (int i = 0; i < Num_OpClasses; ++i) { 1521 cprintf("Ready list %i size: %i\n", i, readyInsts[i].size()); 1522 1523 cprintf("\n"); 1524 } 1525 1526 cprintf("Non speculative list size: %i\n", nonSpecInsts.size()); 1527 1528 NonSpecMapIt non_spec_it = nonSpecInsts.begin(); 1529 NonSpecMapIt non_spec_end_it = nonSpecInsts.end(); 1530 1531 cprintf("Non speculative list: "); 1532 1533 while (non_spec_it != non_spec_end_it) { 1534 cprintf("%s [sn:%lli]", (*non_spec_it).second->pcState(), 1535 (*non_spec_it).second->seqNum); 1536 ++non_spec_it; 1537 } 1538 1539 cprintf("\n"); 1540 1541 ListOrderIt list_order_it = listOrder.begin(); 1542 ListOrderIt list_order_end_it = listOrder.end(); 1543 int i = 1; 1544 1545 cprintf("List order: "); 1546 1547 while (list_order_it != list_order_end_it) { 1548 cprintf("%i OpClass:%i [sn:%lli] ", i, (*list_order_it).queueType, 1549 (*list_order_it).oldestInst); 1550 1551 ++list_order_it; 1552 ++i; 1553 } 1554 1555 cprintf("\n"); 1556} 1557 1558 1559template <class Impl> 1560void 1561InstructionQueue<Impl>::dumpInsts() 1562{ 1563 for (ThreadID tid = 0; tid < numThreads; ++tid) { 1564 int num = 0; 1565 int valid_num = 0; 1566 ListIt inst_list_it = instList[tid].begin(); 1567 1568 while (inst_list_it != instList[tid].end()) { 1569 cprintf("Instruction:%i\n", num); 1570 if (!(*inst_list_it)->isSquashed()) { 1571 if (!(*inst_list_it)->isIssued()) { 1572 ++valid_num; 1573 cprintf("Count:%i\n", valid_num); 1574 } else if ((*inst_list_it)->isMemRef() && 1575 !(*inst_list_it)->memOpDone()) { 1576 // Loads that have not been marked as executed 1577 // still count towards the total instructions. 1578 ++valid_num; 1579 cprintf("Count:%i\n", valid_num); 1580 } 1581 } 1582 1583 cprintf("PC: %s\n[sn:%lli]\n[tid:%i]\n" 1584 "Issued:%i\nSquashed:%i\n", 1585 (*inst_list_it)->pcState(), 1586 (*inst_list_it)->seqNum, 1587 (*inst_list_it)->threadNumber, 1588 (*inst_list_it)->isIssued(), 1589 (*inst_list_it)->isSquashed()); 1590 1591 if ((*inst_list_it)->isMemRef()) { 1592 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone()); 1593 } 1594 1595 cprintf("\n"); 1596 1597 inst_list_it++; 1598 ++num; 1599 } 1600 } 1601 1602 cprintf("Insts to Execute list:\n"); 1603 1604 int num = 0; 1605 int valid_num = 0; 1606 ListIt inst_list_it = instsToExecute.begin(); 1607 1608 while (inst_list_it != instsToExecute.end()) 1609 { 1610 cprintf("Instruction:%i\n", 1611 num); 1612 if (!(*inst_list_it)->isSquashed()) { 1613 if (!(*inst_list_it)->isIssued()) { 1614 ++valid_num; 1615 cprintf("Count:%i\n", valid_num); 1616 } else if ((*inst_list_it)->isMemRef() && 1617 !(*inst_list_it)->memOpDone()) { 1618 // Loads that have not been marked as executed 1619 // still count towards the total instructions. 1620 ++valid_num; 1621 cprintf("Count:%i\n", valid_num); 1622 } 1623 } 1624 1625 cprintf("PC: %s\n[sn:%lli]\n[tid:%i]\n" 1626 "Issued:%i\nSquashed:%i\n", 1627 (*inst_list_it)->pcState(), 1628 (*inst_list_it)->seqNum, 1629 (*inst_list_it)->threadNumber, 1630 (*inst_list_it)->isIssued(), 1631 (*inst_list_it)->isSquashed()); 1632 1633 if ((*inst_list_it)->isMemRef()) { 1634 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone()); 1635 } 1636 1637 cprintf("\n"); 1638 1639 inst_list_it++; 1640 ++num; 1641 } 1642} 1643 1644#endif//__CPU_O3_INST_QUEUE_IMPL_HH__ 1645