inst_queue_impl.hh revision 14025:3a133070aa2e
16657Snate@binkert.org/* 26657Snate@binkert.org * Copyright (c) 2011-2014, 2017-2019 ARM Limited 36657Snate@binkert.org * Copyright (c) 2013 Advanced Micro Devices, Inc. 46657Snate@binkert.org * All rights reserved. 56657Snate@binkert.org * 66657Snate@binkert.org * The license below extends only to copyright in the software and shall 76657Snate@binkert.org * not be construed as granting a license to any other intellectual 86657Snate@binkert.org * property including but not limited to intellectual property relating 96657Snate@binkert.org * to a hardware implementation of the functionality of the software 106657Snate@binkert.org * licensed hereunder. You may use the software subject to the license 116657Snate@binkert.org * terms below provided that you ensure that this notice is replicated 126657Snate@binkert.org * unmodified and in its entirety in all distributions of the software, 136657Snate@binkert.org * modified or unmodified, in source code or in binary form. 146657Snate@binkert.org * 156657Snate@binkert.org * Copyright (c) 2004-2006 The Regents of The University of Michigan 166657Snate@binkert.org * All rights reserved. 176657Snate@binkert.org * 186657Snate@binkert.org * Redistribution and use in source and binary forms, with or without 196657Snate@binkert.org * modification, are permitted provided that the following conditions are 206657Snate@binkert.org * met: redistributions of source code must retain the above copyright 216657Snate@binkert.org * notice, this list of conditions and the following disclaimer; 226657Snate@binkert.org * redistributions in binary form must reproduce the above copyright 236657Snate@binkert.org * notice, this list of conditions and the following disclaimer in the 246657Snate@binkert.org * documentation and/or other materials provided with the distribution; 256657Snate@binkert.org * neither the name of the copyright holders nor the names of its 266657Snate@binkert.org * contributors may be used to endorse or promote products derived from 276657Snate@binkert.org * this software without specific prior written permission. 2813672Sandreas.sandberg@arm.com * 296657Snate@binkert.org * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 306657Snate@binkert.org * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 316657Snate@binkert.org * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 329302Snilay@cs.wisc.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 336657Snate@binkert.org * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 3411117Snilay@cs.wisc.edu * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 3511117Snilay@cs.wisc.edu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 3611117Snilay@cs.wisc.edu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 3711117Snilay@cs.wisc.edu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 3811117Snilay@cs.wisc.edu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 396657Snate@binkert.org * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 406657Snate@binkert.org * 416657Snate@binkert.org * Authors: Kevin Lim 426657Snate@binkert.org * Korey Sewell 436657Snate@binkert.org */ 446657Snate@binkert.org 4511283Santhony.gutierrez@amd.com#ifndef __CPU_O3_INST_QUEUE_IMPL_HH__ 466657Snate@binkert.org#define __CPU_O3_INST_QUEUE_IMPL_HH__ 476657Snate@binkert.org 486657Snate@binkert.org#include <limits> 496657Snate@binkert.org#include <vector> 506657Snate@binkert.org 516882SBrad.Beckmann@amd.com#include "base/logging.hh" 526657Snate@binkert.org#include "cpu/o3/fu_pool.hh" 536657Snate@binkert.org#include "cpu/o3/inst_queue.hh" 546657Snate@binkert.org#include "debug/IQ.hh" 556657Snate@binkert.org#include "enums/OpClass.hh" 566657Snate@binkert.org#include "params/DerivO3CPU.hh" 576657Snate@binkert.org#include "sim/core.hh" 586657Snate@binkert.org 596657Snate@binkert.org// clang complains about std::set being overloaded with Packet::set if 606657Snate@binkert.org// we open up the entire namespace std 616657Snate@binkert.orgusing std::list; 626657Snate@binkert.org 636657Snate@binkert.orgtemplate <class Impl> 646657Snate@binkert.orgInstructionQueue<Impl>::FUCompletion::FUCompletion(const DynInstPtr &_inst, 6510895Snilay@cs.wisc.edu int fu_idx, InstructionQueue<Impl> *iq_ptr) 666657Snate@binkert.org : Event(Stat_Event_Pri, AutoDelete), 676657Snate@binkert.org inst(_inst), fuIdx(fu_idx), iqPtr(iq_ptr), freeFU(false) 686657Snate@binkert.org{ 6910228Snilay@cs.wisc.edu} 706657Snate@binkert.org 716657Snate@binkert.orgtemplate <class Impl> 7210228Snilay@cs.wisc.eduvoid 736657Snate@binkert.orgInstructionQueue<Impl>::FUCompletion::process() 746657Snate@binkert.org{ 756657Snate@binkert.org iqPtr->processFUCompletion(inst, freeFU ? fuIdx : -1); 766657Snate@binkert.org inst = NULL; 776657Snate@binkert.org} 786657Snate@binkert.org 796657Snate@binkert.org 806657Snate@binkert.orgtemplate <class Impl> 816657Snate@binkert.orgconst char * 826657Snate@binkert.orgInstructionQueue<Impl>::FUCompletion::description() const 836657Snate@binkert.org{ 846657Snate@binkert.org return "Functional unit completion"; 856657Snate@binkert.org} 866657Snate@binkert.org 876657Snate@binkert.orgtemplate <class Impl> 886657Snate@binkert.orgInstructionQueue<Impl>::InstructionQueue(O3CPU *cpu_ptr, IEW *iew_ptr, 898086SBrad.Beckmann@amd.com DerivO3CPUParams *params) 908086SBrad.Beckmann@amd.com : cpu(cpu_ptr), 918086SBrad.Beckmann@amd.com iewStage(iew_ptr), 9213672Sandreas.sandberg@arm.com fuPool(params->fuPool), 936657Snate@binkert.org iqPolicy(params->smtIQPolicy), 9413672Sandreas.sandberg@arm.com numEntries(params->numIQEntries), 956657Snate@binkert.org totalWidth(params->issueWidth), 966657Snate@binkert.org commitToIEWDelay(params->commitToIEWDelay) 976657Snate@binkert.org{ 986657Snate@binkert.org assert(fuPool); 9910895Snilay@cs.wisc.edu 1006657Snate@binkert.org numThreads = params->numThreads; 1016657Snate@binkert.org 1026657Snate@binkert.org // Set the number of total physical registers 1036657Snate@binkert.org // As the vector registers have two addressing modes, they are added twice 1046657Snate@binkert.org numPhysRegs = params->numPhysIntRegs + params->numPhysFloatRegs + 1056657Snate@binkert.org params->numPhysVecRegs + 1066657Snate@binkert.org params->numPhysVecRegs * TheISA::NumVecElemPerVecReg + 1076657Snate@binkert.org params->numPhysVecPredRegs + 1086657Snate@binkert.org params->numPhysCCRegs; 1096657Snate@binkert.org 1106657Snate@binkert.org //Create an entry for each physical register within the 1116657Snate@binkert.org //dependency graph. 1126657Snate@binkert.org dependGraph.resize(numPhysRegs); 1136657Snate@binkert.org 1146657Snate@binkert.org // Resize the register scoreboard. 1156657Snate@binkert.org regScoreboard.resize(numPhysRegs); 1166657Snate@binkert.org 1176657Snate@binkert.org //Initialize Mem Dependence Units 1186657Snate@binkert.org for (ThreadID tid = 0; tid < Impl::MaxThreads; tid++) { 1196657Snate@binkert.org memDepUnit[tid].init(params, tid); 1206657Snate@binkert.org memDepUnit[tid].setIQ(this); 1216657Snate@binkert.org } 1226657Snate@binkert.org 1236657Snate@binkert.org resetState(); 1246657Snate@binkert.org 1256657Snate@binkert.org //Figure out resource sharing policy 1269298Snilay@cs.wisc.edu if (iqPolicy == SMTQueuePolicy::Dynamic) { 1276657Snate@binkert.org //Set Max Entries to Total ROB Capacity 1286657Snate@binkert.org for (ThreadID tid = 0; tid < numThreads; tid++) { 1296657Snate@binkert.org maxEntries[tid] = numEntries; 13011117Snilay@cs.wisc.edu } 13111117Snilay@cs.wisc.edu 13211117Snilay@cs.wisc.edu } else if (iqPolicy == SMTQueuePolicy::Partitioned) { 1336657Snate@binkert.org //@todo:make work if part_amt doesnt divide evenly. 13411117Snilay@cs.wisc.edu int part_amt = numEntries / numThreads; 1356657Snate@binkert.org 1366657Snate@binkert.org //Divide ROB up evenly 1376657Snate@binkert.org for (ThreadID tid = 0; tid < numThreads; tid++) { 1386657Snate@binkert.org maxEntries[tid] = part_amt; 1396657Snate@binkert.org } 1406657Snate@binkert.org 1416657Snate@binkert.org DPRINTF(IQ, "IQ sharing policy set to Partitioned:" 1426657Snate@binkert.org "%i entries per thread.\n",part_amt); 1436882SBrad.Beckmann@amd.com } else if (iqPolicy == SMTQueuePolicy::Threshold) { 1446882SBrad.Beckmann@amd.com double threshold = (double)params->smtIQThreshold / 100; 1456882SBrad.Beckmann@amd.com 1468086SBrad.Beckmann@amd.com int thresholdIQ = (int)((double)threshold * numEntries); 1478086SBrad.Beckmann@amd.com 1488086SBrad.Beckmann@amd.com //Divide up by threshold amount 14910307Snilay@cs.wisc.edu for (ThreadID tid = 0; tid < numThreads; tid++) { 15010307Snilay@cs.wisc.edu maxEntries[tid] = thresholdIQ; 1516657Snate@binkert.org } 1526657Snate@binkert.org 1536657Snate@binkert.org DPRINTF(IQ, "IQ sharing policy set to Threshold:" 15410307Snilay@cs.wisc.edu "%i entries per thread.\n",thresholdIQ); 1559298Snilay@cs.wisc.edu } 1569298Snilay@cs.wisc.edu for (ThreadID tid = numThreads; tid < Impl::MaxThreads; tid++) { 1579298Snilay@cs.wisc.edu maxEntries[tid] = 0; 1586657Snate@binkert.org } 1596657Snate@binkert.org} 1606657Snate@binkert.org 1616657Snate@binkert.orgtemplate <class Impl> 1626657Snate@binkert.orgInstructionQueue<Impl>::~InstructionQueue() 1636657Snate@binkert.org{ 1646657Snate@binkert.org dependGraph.reset(); 1656657Snate@binkert.org#ifdef DEBUG 1666657Snate@binkert.org cprintf("Nodes traversed: %i, removed: %i\n", 1676657Snate@binkert.org dependGraph.nodesTraversed, dependGraph.nodesRemoved); 1686657Snate@binkert.org#endif 16911283Santhony.gutierrez@amd.com} 17011283Santhony.gutierrez@amd.com 17111283Santhony.gutierrez@amd.comtemplate <class Impl> 17211283Santhony.gutierrez@amd.comstd::string 17311283Santhony.gutierrez@amd.comInstructionQueue<Impl>::name() const 17411283Santhony.gutierrez@amd.com{ 17511283Santhony.gutierrez@amd.com return cpu->name() + ".iq"; 17611283Santhony.gutierrez@amd.com} 1779219Spower.jg@gmail.com 1786657Snate@binkert.orgtemplate <class Impl> 1796657Snate@binkert.orgvoid 1806657Snate@binkert.orgInstructionQueue<Impl>::regStats() 1816657Snate@binkert.org{ 1826657Snate@binkert.org using namespace Stats; 1836657Snate@binkert.org iqInstsAdded 1846657Snate@binkert.org .name(name() + ".iqInstsAdded") 1856657Snate@binkert.org .desc("Number of instructions added to the IQ (excludes non-spec)") 1866657Snate@binkert.org .prereq(iqInstsAdded); 1876657Snate@binkert.org 1886657Snate@binkert.org iqNonSpecInstsAdded 1896657Snate@binkert.org .name(name() + ".iqNonSpecInstsAdded") 1906999Snate@binkert.org .desc("Number of non-speculative instructions added to the IQ") 1916657Snate@binkert.org .prereq(iqNonSpecInstsAdded); 1926657Snate@binkert.org 1936657Snate@binkert.org iqInstsIssued 1946657Snate@binkert.org .name(name() + ".iqInstsIssued") 1956657Snate@binkert.org .desc("Number of instructions issued") 1966657Snate@binkert.org .prereq(iqInstsIssued); 1976657Snate@binkert.org 1987007Snate@binkert.org iqIntInstsIssued 1997007Snate@binkert.org .name(name() + ".iqIntInstsIssued") 2006657Snate@binkert.org .desc("Number of integer instructions issued") 2017002Snate@binkert.org .prereq(iqIntInstsIssued); 2027002Snate@binkert.org 2039466Snilay@cs.wisc.edu iqFloatInstsIssued 20414184Sgabeblack@google.com .name(name() + ".iqFloatInstsIssued") 2056657Snate@binkert.org .desc("Number of float instructions issued") 2066657Snate@binkert.org .prereq(iqFloatInstsIssued); 2076657Snate@binkert.org 2086657Snate@binkert.org iqBranchInstsIssued 20914184Sgabeblack@google.com .name(name() + ".iqBranchInstsIssued") 2106657Snate@binkert.org .desc("Number of branch instructions issued") 2116657Snate@binkert.org .prereq(iqBranchInstsIssued); 2126657Snate@binkert.org 21314184Sgabeblack@google.com iqMemInstsIssued 2146657Snate@binkert.org .name(name() + ".iqMemInstsIssued") 2156657Snate@binkert.org .desc("Number of memory instructions issued") 2166657Snate@binkert.org .prereq(iqMemInstsIssued); 2177007Snate@binkert.org 2187007Snate@binkert.org iqMiscInstsIssued 2196657Snate@binkert.org .name(name() + ".iqMiscInstsIssued") 2209466Snilay@cs.wisc.edu .desc("Number of miscellaneous instructions issued") 2216657Snate@binkert.org .prereq(iqMiscInstsIssued); 2226657Snate@binkert.org 2239466Snilay@cs.wisc.edu iqSquashedInstsIssued 2249508Snilay@cs.wisc.edu .name(name() + ".iqSquashedInstsIssued") 2259466Snilay@cs.wisc.edu .desc("Number of squashed instructions issued") 2269466Snilay@cs.wisc.edu .prereq(iqSquashedInstsIssued); 2279466Snilay@cs.wisc.edu 2286657Snate@binkert.org iqSquashedInstsExamined 2296657Snate@binkert.org .name(name() + ".iqSquashedInstsExamined") 2306657Snate@binkert.org .desc("Number of squashed instructions iterated over during squash;" 2316657Snate@binkert.org " mainly for profiling") 2326657Snate@binkert.org .prereq(iqSquashedInstsExamined); 2336657Snate@binkert.org 2346657Snate@binkert.org iqSquashedOperandsExamined 2356657Snate@binkert.org .name(name() + ".iqSquashedOperandsExamined") 2366657Snate@binkert.org .desc("Number of squashed operands that are examined and possibly " 2376657Snate@binkert.org "removed from graph") 2386657Snate@binkert.org .prereq(iqSquashedOperandsExamined); 2396657Snate@binkert.org 2406657Snate@binkert.org iqSquashedNonSpecRemoved 2416657Snate@binkert.org .name(name() + ".iqSquashedNonSpecRemoved") 2426657Snate@binkert.org .desc("Number of squashed non-spec instructions that were removed") 2436657Snate@binkert.org .prereq(iqSquashedNonSpecRemoved); 2446657Snate@binkert.org/* 2457453Snate@binkert.org queueResDist 2467453Snate@binkert.org .init(Num_OpClasses, 0, 99, 2) 2477453Snate@binkert.org .name(name() + ".IQ:residence:") 2487453Snate@binkert.org .desc("cycles from dispatch to issue") 2497453Snate@binkert.org .flags(total | pdf | cdf ) 2507453Snate@binkert.org ; 2517453Snate@binkert.org for (int i = 0; i < Num_OpClasses; ++i) { 2527453Snate@binkert.org queueResDist.subname(i, opClassStrings[i]); 2537453Snate@binkert.org } 2547453Snate@binkert.org*/ 2557453Snate@binkert.org numIssuedDist 2567453Snate@binkert.org .init(0,totalWidth,1) 2577453Snate@binkert.org .name(name() + ".issued_per_cycle") 2587453Snate@binkert.org .desc("Number of insts issued each cycle") 2597453Snate@binkert.org .flags(pdf) 2607453Snate@binkert.org ; 2617453Snate@binkert.org/* 2626657Snate@binkert.org dist_unissued 2636657Snate@binkert.org .init(Num_OpClasses+2) 2646657Snate@binkert.org .name(name() + ".unissued_cause") 2656657Snate@binkert.org .desc("Reason ready instruction not issued") 2669466Snilay@cs.wisc.edu .flags(pdf | dist) 2676657Snate@binkert.org ; 2689466Snilay@cs.wisc.edu for (int i=0; i < (Num_OpClasses + 2); ++i) { 2699508Snilay@cs.wisc.edu dist_unissued.subname(i, unissued_names[i]); 2709466Snilay@cs.wisc.edu } 2716657Snate@binkert.org*/ 2726657Snate@binkert.org statIssuedInstType 2736657Snate@binkert.org .init(numThreads,Enums::Num_OpClass) 2746657Snate@binkert.org .name(name() + ".FU_type") 2759466Snilay@cs.wisc.edu .desc("Type of FU issued") 2769466Snilay@cs.wisc.edu .flags(total | pdf | dist) 2779466Snilay@cs.wisc.edu ; 2789466Snilay@cs.wisc.edu statIssuedInstType.ysubnames(Enums::OpClassStrings); 2796657Snate@binkert.org 2806657Snate@binkert.org // 2816657Snate@binkert.org // How long did instructions for a particular FU type wait prior to issue 2826657Snate@binkert.org // 2836657Snate@binkert.org/* 2846657Snate@binkert.org issueDelayDist 2856657Snate@binkert.org .init(Num_OpClasses,0,99,2) 2866657Snate@binkert.org .name(name() + ".") 2876657Snate@binkert.org .desc("cycles from operands ready to issue") 2889466Snilay@cs.wisc.edu .flags(pdf | cdf) 28910472Sandreas.hansson@arm.com ; 29010472Sandreas.hansson@arm.com 29110472Sandreas.hansson@arm.com for (int i=0; i<Num_OpClasses; ++i) { 29210472Sandreas.hansson@arm.com std::stringstream subname; 29310472Sandreas.hansson@arm.com subname << opClassStrings[i] << "_delay"; 29410472Sandreas.hansson@arm.com issueDelayDist.subname(i, subname.str()); 29510472Sandreas.hansson@arm.com } 29610472Sandreas.hansson@arm.com*/ 29710472Sandreas.hansson@arm.com issueRate 29810472Sandreas.hansson@arm.com .name(name() + ".rate") 2997453Snate@binkert.org .desc("Inst issue rate") 3007007Snate@binkert.org .flags(total) 3017007Snate@binkert.org ; 3027453Snate@binkert.org issueRate = iqInstsIssued / cpu->numCycles; 3037007Snate@binkert.org 3046657Snate@binkert.org statFuBusy 3056657Snate@binkert.org .init(Num_OpClasses) 3066657Snate@binkert.org .name(name() + ".fu_full") 3076657Snate@binkert.org .desc("attempts to use FU when none available") 3086657Snate@binkert.org .flags(pdf | dist) 3096657Snate@binkert.org ; 3106657Snate@binkert.org for (int i=0; i < Num_OpClasses; ++i) { 3116657Snate@binkert.org statFuBusy.subname(i, Enums::OpClassStrings[i]); 3126657Snate@binkert.org } 3136657Snate@binkert.org 3147007Snate@binkert.org fuBusy 3157007Snate@binkert.org .init(numThreads) 3167007Snate@binkert.org .name(name() + ".fu_busy_cnt") 3177007Snate@binkert.org .desc("FU busy when requested") 3187007Snate@binkert.org .flags(total) 3196657Snate@binkert.org ; 3206657Snate@binkert.org 3216657Snate@binkert.org fuBusyRate 3226657Snate@binkert.org .name(name() + ".fu_busy_rate") 3236657Snate@binkert.org .desc("FU busy rate (busy events/executed inst)") 3246657Snate@binkert.org .flags(total) 3256657Snate@binkert.org ; 3266657Snate@binkert.org fuBusyRate = fuBusy / iqInstsIssued; 3276657Snate@binkert.org 3287007Snate@binkert.org for (ThreadID tid = 0; tid < numThreads; tid++) { 3297007Snate@binkert.org // Tell mem dependence unit to reg stats as well. 3307007Snate@binkert.org memDepUnit[tid].regStats(); 3317007Snate@binkert.org } 3327007Snate@binkert.org 3336657Snate@binkert.org intInstQueueReads 3346657Snate@binkert.org .name(name() + ".int_inst_queue_reads") 3356657Snate@binkert.org .desc("Number of integer instruction queue reads") 3366657Snate@binkert.org .flags(total); 3376657Snate@binkert.org 3386657Snate@binkert.org intInstQueueWrites 3396657Snate@binkert.org .name(name() + ".int_inst_queue_writes") 3407007Snate@binkert.org .desc("Number of integer instruction queue writes") 3417007Snate@binkert.org .flags(total); 3427007Snate@binkert.org 3437007Snate@binkert.org intInstQueueWakeupAccesses 3447007Snate@binkert.org .name(name() + ".int_inst_queue_wakeup_accesses") 3456657Snate@binkert.org .desc("Number of integer instruction queue wakeup accesses") 3466657Snate@binkert.org .flags(total); 3477002Snate@binkert.org 3486657Snate@binkert.org fpInstQueueReads 3496657Snate@binkert.org .name(name() + ".fp_inst_queue_reads") 3506657Snate@binkert.org .desc("Number of floating instruction queue reads") 3516657Snate@binkert.org .flags(total); 3526657Snate@binkert.org 3536657Snate@binkert.org fpInstQueueWrites 3546657Snate@binkert.org .name(name() + ".fp_inst_queue_writes") 3556657Snate@binkert.org .desc("Number of floating instruction queue writes") 3566657Snate@binkert.org .flags(total); 3576657Snate@binkert.org 3586657Snate@binkert.org fpInstQueueWakeupAccesses 3596657Snate@binkert.org .name(name() + ".fp_inst_queue_wakeup_accesses") 3606657Snate@binkert.org .desc("Number of floating instruction queue wakeup accesses") 3616657Snate@binkert.org .flags(total); 3626657Snate@binkert.org 3636657Snate@binkert.org vecInstQueueReads 3646657Snate@binkert.org .name(name() + ".vec_inst_queue_reads") 3656657Snate@binkert.org .desc("Number of vector instruction queue reads") 3666657Snate@binkert.org .flags(total); 3676657Snate@binkert.org 3686657Snate@binkert.org vecInstQueueWrites 3697007Snate@binkert.org .name(name() + ".vec_inst_queue_writes") 3706657Snate@binkert.org .desc("Number of vector instruction queue writes") 3717007Snate@binkert.org .flags(total); 3726657Snate@binkert.org 37310307Snilay@cs.wisc.edu vecInstQueueWakeupAccesses 37410307Snilay@cs.wisc.edu .name(name() + ".vec_inst_queue_wakeup_accesses") 37510307Snilay@cs.wisc.edu .desc("Number of vector instruction queue wakeup accesses") 3769298Snilay@cs.wisc.edu .flags(total); 3779298Snilay@cs.wisc.edu 3789298Snilay@cs.wisc.edu intAluAccesses 3796657Snate@binkert.org .name(name() + ".int_alu_accesses") 3806657Snate@binkert.org .desc("Number of integer alu accesses") 3816657Snate@binkert.org .flags(total); 3826657Snate@binkert.org 3837055Snate@binkert.org fpAluAccesses 3847007Snate@binkert.org .name(name() + ".fp_alu_accesses") 3856657Snate@binkert.org .desc("Number of floating point alu accesses") 3866657Snate@binkert.org .flags(total); 3877002Snate@binkert.org 3886657Snate@binkert.org vecAluAccesses 3896657Snate@binkert.org .name(name() + ".vec_alu_accesses") 3906657Snate@binkert.org .desc("Number of vector alu accesses") 3917007Snate@binkert.org .flags(total); 3926657Snate@binkert.org 3936657Snate@binkert.org} 3946657Snate@binkert.org 3956657Snate@binkert.orgtemplate <class Impl> 3966657Snate@binkert.orgvoid 3976999Snate@binkert.orgInstructionQueue<Impl>::resetState() 3986657Snate@binkert.org{ 3996657Snate@binkert.org //Initialize thread IQ counts 4006657Snate@binkert.org for (ThreadID tid = 0; tid < Impl::MaxThreads; tid++) { 4016657Snate@binkert.org count[tid] = 0; 4026657Snate@binkert.org instList[tid].clear(); 4036657Snate@binkert.org } 4046657Snate@binkert.org 4057002Snate@binkert.org // Initialize the number of free IQ entries. 40610472Sandreas.hansson@arm.com freeEntries = numEntries; 4077002Snate@binkert.org 40814184Sgabeblack@google.com // Note that in actuality, the registers corresponding to the logical 40911108Sdavid.hashe@amd.com // registers start off as ready. However this doesn't matter for the 4107002Snate@binkert.org // IQ as the instruction should have been correctly told if those 4117002Snate@binkert.org // registers are ready in rename. Thus it can all be initialized as 4126657Snate@binkert.org // unready. 4136657Snate@binkert.org for (int i = 0; i < numPhysRegs; ++i) { 4146657Snate@binkert.org regScoreboard[i] = false; 4156657Snate@binkert.org } 4167007Snate@binkert.org 4177007Snate@binkert.org for (ThreadID tid = 0; tid < Impl::MaxThreads; ++tid) { 4186657Snate@binkert.org squashedSeqNum[tid] = 0; 4196657Snate@binkert.org } 4206657Snate@binkert.org 4216657Snate@binkert.org for (int i = 0; i < Num_OpClasses; ++i) { 4226657Snate@binkert.org while (!readyInsts[i].empty()) 4236657Snate@binkert.org readyInsts[i].pop(); 4246657Snate@binkert.org queueOnList[i] = false; 42511118Snilay@cs.wisc.edu readyIt[i] = listOrder.end(); 42611118Snilay@cs.wisc.edu } 42711118Snilay@cs.wisc.edu nonSpecInsts.clear(); 42811118Snilay@cs.wisc.edu listOrder.clear(); 42911118Snilay@cs.wisc.edu deferredMemInsts.clear(); 4306657Snate@binkert.org blockedMemInsts.clear(); 4316657Snate@binkert.org retryMemInsts.clear(); 4326657Snate@binkert.org wbOutstanding = 0; 4336657Snate@binkert.org} 4346657Snate@binkert.org 4356657Snate@binkert.orgtemplate <class Impl> 4366657Snate@binkert.orgvoid 4376657Snate@binkert.orgInstructionQueue<Impl>::setActiveThreads(list<ThreadID> *at_ptr) 43810307Snilay@cs.wisc.edu{ 43910307Snilay@cs.wisc.edu activeThreads = at_ptr; 44010307Snilay@cs.wisc.edu} 4419298Snilay@cs.wisc.edu 4426657Snate@binkert.orgtemplate <class Impl> 4436657Snate@binkert.orgvoid 4446657Snate@binkert.orgInstructionQueue<Impl>::setIssueToExecuteQueue(TimeBuffer<IssueStruct> *i2e_ptr) 4456999Snate@binkert.org{ 4466657Snate@binkert.org issueToExecuteQueue = i2e_ptr; 4476657Snate@binkert.org} 4486657Snate@binkert.org 4496657Snate@binkert.orgtemplate <class Impl> 4506657Snate@binkert.orgvoid 4517007Snate@binkert.orgInstructionQueue<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr) 4527007Snate@binkert.org{ 4537007Snate@binkert.org timeBuffer = tb_ptr; 4546657Snate@binkert.org 4557002Snate@binkert.org fromCommit = timeBuffer->getWire(-commitToIEWDelay); 4567002Snate@binkert.org} 4577002Snate@binkert.org 4588086SBrad.Beckmann@amd.comtemplate <class Impl> 4598086SBrad.Beckmann@amd.combool 46014184Sgabeblack@google.comInstructionQueue<Impl>::isDrained() const 4618086SBrad.Beckmann@amd.com{ 4628602Snilay@cs.wisc.edu bool drained = dependGraph.empty() && 46312065Snikos.nikoleris@arm.com instsToExecute.empty() && 46412334Sgabeblack@google.com wbOutstanding == 0; 4658602Snilay@cs.wisc.edu for (ThreadID tid = 0; tid < numThreads; ++tid) 46611025Snilay@cs.wisc.edu drained = drained && memDepUnit[tid].isDrained(); 4678602Snilay@cs.wisc.edu 4688602Snilay@cs.wisc.edu return drained; 4698086SBrad.Beckmann@amd.com} 4706657Snate@binkert.org 4717007Snate@binkert.orgtemplate <class Impl> 4726657Snate@binkert.orgvoid 4736657Snate@binkert.orgInstructionQueue<Impl>::drainSanityCheck() const 4746657Snate@binkert.org{ 4756657Snate@binkert.org assert(dependGraph.empty()); 4766657Snate@binkert.org assert(instsToExecute.empty()); 4776657Snate@binkert.org for (ThreadID tid = 0; tid < numThreads; ++tid) 4786657Snate@binkert.org memDepUnit[tid].drainSanityCheck(); 4796657Snate@binkert.org} 4806657Snate@binkert.org 4816657Snate@binkert.orgtemplate <class Impl> 4826657Snate@binkert.orgvoid 48310917Sbrandon.potter@amd.comInstructionQueue<Impl>::takeOverFrom() 48410917Sbrandon.potter@amd.com{ 4856862Sdrh5@cs.wisc.edu resetState(); 4866862Sdrh5@cs.wisc.edu} 4876657Snate@binkert.org 4886657Snate@binkert.orgtemplate <class Impl> 4896657Snate@binkert.orgint 4906657Snate@binkert.orgInstructionQueue<Impl>::entryAmount(ThreadID num_threads) 4916657Snate@binkert.org{ 4927007Snate@binkert.org if (iqPolicy == SMTQueuePolicy::Partitioned) { 4937007Snate@binkert.org return numEntries / num_threads; 4947002Snate@binkert.org } else { 4957007Snate@binkert.org return 0; 4967007Snate@binkert.org } 4977002Snate@binkert.org} 4987007Snate@binkert.org 4997007Snate@binkert.org 5006657Snate@binkert.orgtemplate <class Impl> 5016657Snate@binkert.orgvoid 5026657Snate@binkert.orgInstructionQueue<Impl>::resetEntries() 50312065Snikos.nikoleris@arm.com{ 50412065Snikos.nikoleris@arm.com if (iqPolicy != SMTQueuePolicy::Dynamic || numThreads > 1) { 50512065Snikos.nikoleris@arm.com int active_threads = activeThreads->size(); 50612065Snikos.nikoleris@arm.com 50712065Snikos.nikoleris@arm.com list<ThreadID>::iterator threads = activeThreads->begin(); 50812065Snikos.nikoleris@arm.com list<ThreadID>::iterator end = activeThreads->end(); 50912065Snikos.nikoleris@arm.com 51012065Snikos.nikoleris@arm.com while (threads != end) { 51112065Snikos.nikoleris@arm.com ThreadID tid = *threads++; 51212065Snikos.nikoleris@arm.com 51312065Snikos.nikoleris@arm.com if (iqPolicy == SMTQueuePolicy::Partitioned) { 51412065Snikos.nikoleris@arm.com maxEntries[tid] = numEntries / active_threads; 51512065Snikos.nikoleris@arm.com } else if (iqPolicy == SMTQueuePolicy::Threshold && 51612065Snikos.nikoleris@arm.com active_threads == 1) { 5176657Snate@binkert.org maxEntries[tid] = numEntries; 5186657Snate@binkert.org } 5196657Snate@binkert.org } 5206657Snate@binkert.org } 5216657Snate@binkert.org} 5226657Snate@binkert.org 5236657Snate@binkert.orgtemplate <class Impl> 5246657Snate@binkert.orgunsigned 5256657Snate@binkert.orgInstructionQueue<Impl>::numFreeEntries() 5266657Snate@binkert.org{ 5278602Snilay@cs.wisc.edu return freeEntries; 5288602Snilay@cs.wisc.edu} 5298602Snilay@cs.wisc.edu 5308602Snilay@cs.wisc.edutemplate <class Impl> 5318602Snilay@cs.wisc.eduunsigned 5328086SBrad.Beckmann@amd.comInstructionQueue<Impl>::numFreeEntries(ThreadID tid) 5338086SBrad.Beckmann@amd.com{ 5348086SBrad.Beckmann@amd.com return maxEntries[tid] - count[tid]; 5358086SBrad.Beckmann@amd.com} 5368086SBrad.Beckmann@amd.com 5378086SBrad.Beckmann@amd.com// Might want to do something more complex if it knows how many instructions 5388086SBrad.Beckmann@amd.com// will be issued this cycle. 5398086SBrad.Beckmann@amd.comtemplate <class Impl> 5406657Snate@binkert.orgbool 5416657Snate@binkert.orgInstructionQueue<Impl>::isFull() 5427002Snate@binkert.org{ 5436657Snate@binkert.org if (freeEntries == 0) { 5447007Snate@binkert.org return(true); 5456657Snate@binkert.org } else { 5466657Snate@binkert.org return(false); 5476657Snate@binkert.org } 5486657Snate@binkert.org} 5496657Snate@binkert.org 5506999Snate@binkert.orgtemplate <class Impl> 5516657Snate@binkert.orgbool 5526657Snate@binkert.orgInstructionQueue<Impl>::isFull(ThreadID tid) 5536657Snate@binkert.org{ 5546657Snate@binkert.org if (numFreeEntries(tid) == 0) { 5556657Snate@binkert.org return(true); 5566657Snate@binkert.org } else { 5577832Snate@binkert.org return(false); 5587002Snate@binkert.org } 5597002Snate@binkert.org} 5607002Snate@binkert.org 56112334Sgabeblack@google.comtemplate <class Impl> 56214184Sgabeblack@google.combool 5636657Snate@binkert.orgInstructionQueue<Impl>::hasReadyInsts() 5647002Snate@binkert.org{ 5657002Snate@binkert.org if (!listOrder.empty()) { 5666657Snate@binkert.org return true; 5676657Snate@binkert.org } 5688086SBrad.Beckmann@amd.com 5698086SBrad.Beckmann@amd.com for (int i = 0; i < Num_OpClasses; ++i) { 5708086SBrad.Beckmann@amd.com if (!readyInsts[i].empty()) { 5718086SBrad.Beckmann@amd.com return true; 5728086SBrad.Beckmann@amd.com } 5738086SBrad.Beckmann@amd.com } 5748086SBrad.Beckmann@amd.com 5758086SBrad.Beckmann@amd.com return false; 5768086SBrad.Beckmann@amd.com} 5778086SBrad.Beckmann@amd.com 5788086SBrad.Beckmann@amd.comtemplate <class Impl> 5798086SBrad.Beckmann@amd.comvoid 5808086SBrad.Beckmann@amd.comInstructionQueue<Impl>::insert(const DynInstPtr &new_inst) 5818086SBrad.Beckmann@amd.com{ 5828086SBrad.Beckmann@amd.com if (new_inst->isFloating()) { 5838086SBrad.Beckmann@amd.com fpInstQueueWrites++; 5848086SBrad.Beckmann@amd.com } else if (new_inst->isVector()) { 5858086SBrad.Beckmann@amd.com vecInstQueueWrites++; 5868086SBrad.Beckmann@amd.com } else { 5878086SBrad.Beckmann@amd.com intInstQueueWrites++; 5888086SBrad.Beckmann@amd.com } 5896657Snate@binkert.org // Make sure the instruction is valid 5906657Snate@binkert.org assert(new_inst); 59111283Santhony.gutierrez@amd.com 59214184Sgabeblack@google.com DPRINTF(IQ, "Adding instruction [sn:%llu] PC %s to the IQ.\n", 59314184Sgabeblack@google.com new_inst->seqNum, new_inst->pcState()); 59410301Snilay@cs.wisc.edu 5956657Snate@binkert.org assert(freeEntries != 0); 5966657Snate@binkert.org 5977007Snate@binkert.org instList[new_inst->threadNumber].push_back(new_inst); 5987007Snate@binkert.org 5997007Snate@binkert.org --freeEntries; 6006657Snate@binkert.org 6016657Snate@binkert.org new_inst->setInIQ(); 6026657Snate@binkert.org 6036657Snate@binkert.org // Look through its source registers (physical regs), and mark any 6046657Snate@binkert.org // dependencies. 6056657Snate@binkert.org addToDependents(new_inst); 6067007Snate@binkert.org 6077007Snate@binkert.org // Have this instruction set itself as the producer of its destination 6087007Snate@binkert.org // register(s). 6096657Snate@binkert.org addToProducers(new_inst); 6106657Snate@binkert.org 6116657Snate@binkert.org if (new_inst->isMemRef()) { 6126657Snate@binkert.org memDepUnit[new_inst->threadNumber].insert(new_inst); 6136657Snate@binkert.org } else { 6146657Snate@binkert.org addIfReady(new_inst); 6156657Snate@binkert.org } 6166657Snate@binkert.org 6176657Snate@binkert.org ++iqInstsAdded; 6186657Snate@binkert.org 6196657Snate@binkert.org count[new_inst->threadNumber]++; 6206657Snate@binkert.org 6216657Snate@binkert.org assert(freeEntries == (numEntries - countInsts())); 6226657Snate@binkert.org} 6237805Snilay@cs.wisc.edu 6246657Snate@binkert.orgtemplate <class Impl> 6256657Snate@binkert.orgvoid 6266657Snate@binkert.orgInstructionQueue<Impl>::insertNonSpec(const DynInstPtr &new_inst) 6277007Snate@binkert.org{ 6287007Snate@binkert.org // @todo: Clean up this code; can do it by setting inst as unable 6297007Snate@binkert.org // to issue, then calling normal insert on the inst. 6306657Snate@binkert.org if (new_inst->isFloating()) { 6316657Snate@binkert.org fpInstQueueWrites++; 6326657Snate@binkert.org } else if (new_inst->isVector()) { 6336657Snate@binkert.org vecInstQueueWrites++; 6347007Snate@binkert.org } else { 6356657Snate@binkert.org intInstQueueWrites++; 6366657Snate@binkert.org } 6376657Snate@binkert.org 6386657Snate@binkert.org assert(new_inst); 6397007Snate@binkert.org 6406657Snate@binkert.org nonSpecInsts[new_inst->seqNum] = new_inst; 6416657Snate@binkert.org 6426657Snate@binkert.org DPRINTF(IQ, "Adding non-speculative instruction [sn:%llu] PC %s " 6436657Snate@binkert.org "to the IQ.\n", 6447805Snilay@cs.wisc.edu new_inst->seqNum, new_inst->pcState()); 6456657Snate@binkert.org 6466657Snate@binkert.org assert(freeEntries != 0); 6476657Snate@binkert.org 6487007Snate@binkert.org instList[new_inst->threadNumber].push_back(new_inst); 6497007Snate@binkert.org 6507007Snate@binkert.org --freeEntries; 6517007Snate@binkert.org 6526657Snate@binkert.org new_inst->setInIQ(); 6536657Snate@binkert.org 6546657Snate@binkert.org // Have this instruction set itself as the producer of its destination 6556657Snate@binkert.org // register(s). 6566657Snate@binkert.org addToProducers(new_inst); 6576657Snate@binkert.org 6586657Snate@binkert.org // If it's a memory instruction, add it to the memory dependency 6596657Snate@binkert.org // unit. 6606657Snate@binkert.org if (new_inst->isMemRef()) { 6617007Snate@binkert.org memDepUnit[new_inst->threadNumber].insertNonSpec(new_inst); 6627007Snate@binkert.org } 6636657Snate@binkert.org 6646657Snate@binkert.org ++iqNonSpecInstsAdded; 6656657Snate@binkert.org 6666657Snate@binkert.org count[new_inst->threadNumber]++; 6677007Snate@binkert.org 6687007Snate@binkert.org assert(freeEntries == (numEntries - countInsts())); 6696657Snate@binkert.org} 6706657Snate@binkert.org 6716657Snate@binkert.orgtemplate <class Impl> 6726657Snate@binkert.orgvoid 6736657Snate@binkert.orgInstructionQueue<Impl>::insertBarrier(const DynInstPtr &barr_inst) 6746657Snate@binkert.org{ 6756657Snate@binkert.org memDepUnit[barr_inst->threadNumber].insertBarrier(barr_inst); 6766657Snate@binkert.org 6776657Snate@binkert.org insertNonSpec(barr_inst); 6786657Snate@binkert.org} 6796657Snate@binkert.org 6806657Snate@binkert.orgtemplate <class Impl> 6816657Snate@binkert.orgtypename Impl::DynInstPtr 6826657Snate@binkert.orgInstructionQueue<Impl>::getInstToExecute() 6836657Snate@binkert.org{ 6846657Snate@binkert.org assert(!instsToExecute.empty()); 6856657Snate@binkert.org DynInstPtr inst = std::move(instsToExecute.front()); 6867805Snilay@cs.wisc.edu instsToExecute.pop_front(); 6876657Snate@binkert.org if (inst->isFloating()) { 6886657Snate@binkert.org fpInstQueueReads++; 6896657Snate@binkert.org } else if (inst->isVector()) { 6906657Snate@binkert.org vecInstQueueReads++; 6916657Snate@binkert.org } else { 6927007Snate@binkert.org intInstQueueReads++; 6936657Snate@binkert.org } 6947007Snate@binkert.org return inst; 6957007Snate@binkert.org} 6966657Snate@binkert.org 6976657Snate@binkert.orgtemplate <class Impl> 6986657Snate@binkert.orgvoid 6996657Snate@binkert.orgInstructionQueue<Impl>::addToOrderList(OpClass op_class) 7006657Snate@binkert.org{ 7016657Snate@binkert.org assert(!readyInsts[op_class].empty()); 7026657Snate@binkert.org 7036657Snate@binkert.org ListOrderEntry queue_entry; 7046657Snate@binkert.org 7056657Snate@binkert.org queue_entry.queueType = op_class; 7066657Snate@binkert.org 7076657Snate@binkert.org queue_entry.oldestInst = readyInsts[op_class].top()->seqNum; 7086657Snate@binkert.org 7096657Snate@binkert.org ListOrderIt list_it = listOrder.begin(); 7107805Snilay@cs.wisc.edu ListOrderIt list_end_it = listOrder.end(); 7116657Snate@binkert.org 7126657Snate@binkert.org while (list_it != list_end_it) { 7136657Snate@binkert.org if ((*list_it).oldestInst > queue_entry.oldestInst) { 7146657Snate@binkert.org break; 7156657Snate@binkert.org } 7166657Snate@binkert.org 7176657Snate@binkert.org list_it++; 7186657Snate@binkert.org } 7197007Snate@binkert.org 7207007Snate@binkert.org readyIt[op_class] = listOrder.insert(list_it, queue_entry); 7216657Snate@binkert.org queueOnList[op_class] = true; 7226657Snate@binkert.org} 7236657Snate@binkert.org 7246657Snate@binkert.orgtemplate <class Impl> 7256657Snate@binkert.orgvoid 7266657Snate@binkert.orgInstructionQueue<Impl>::moveToYoungerInst(ListOrderIt list_order_it) 7276657Snate@binkert.org{ 7286657Snate@binkert.org // Get iterator of next item on the list 72913709Sandreas.sandberg@arm.com // Delete the original iterator 7309773Snilay@cs.wisc.edu // Determine if the next item is either the end of the list or younger 73111283Santhony.gutierrez@amd.com // than the new instruction. If so, then add in a new iterator right here. 7329773Snilay@cs.wisc.edu // If not, then move along. 7339773Snilay@cs.wisc.edu ListOrderEntry queue_entry; 7349773Snilay@cs.wisc.edu OpClass op_class = (*list_order_it).queueType; 73512392Sjason@lowepower.com ListOrderIt next_it = list_order_it; 7366657Snate@binkert.org 7376657Snate@binkert.org ++next_it; 7386657Snate@binkert.org 7396657Snate@binkert.org queue_entry.queueType = op_class; 7406657Snate@binkert.org queue_entry.oldestInst = readyInsts[op_class].top()->seqNum; 7416657Snate@binkert.org 7427805Snilay@cs.wisc.edu while (next_it != listOrder.end() && 7436657Snate@binkert.org (*next_it).oldestInst < queue_entry.oldestInst) { 7446657Snate@binkert.org ++next_it; 7456657Snate@binkert.org } 7466657Snate@binkert.org 7476657Snate@binkert.org readyIt[op_class] = listOrder.insert(next_it, queue_entry); 7486657Snate@binkert.org} 7496657Snate@binkert.org 7506657Snate@binkert.orgtemplate <class Impl> 7517007Snate@binkert.orgvoid 7527007Snate@binkert.orgInstructionQueue<Impl>::processFUCompletion(const DynInstPtr &inst, int fu_idx) 7536657Snate@binkert.org{ 7546657Snate@binkert.org DPRINTF(IQ, "Processing FU completion [sn:%llu]\n", inst->seqNum); 7556657Snate@binkert.org assert(!cpu->switchedOut()); 7566657Snate@binkert.org // The CPU could have been sleeping until this op completed (*extremely* 7576657Snate@binkert.org // long latency op). Wake it if it was. This may be overkill. 7586657Snate@binkert.org --wbOutstanding; 7599773Snilay@cs.wisc.edu iewStage->wakeCPU(); 76011283Santhony.gutierrez@amd.com 7619773Snilay@cs.wisc.edu if (fu_idx > -1) 7629773Snilay@cs.wisc.edu fuPool->freeUnitNextCycle(fu_idx); 7639773Snilay@cs.wisc.edu 7646657Snate@binkert.org // @todo: Ensure that these FU Completions happen at the beginning 7656657Snate@binkert.org // of a cycle, otherwise they could add too many instructions to 7666657Snate@binkert.org // the queue. 7676657Snate@binkert.org issueToExecuteQueue->access(-1)->size++; 7686657Snate@binkert.org instsToExecute.push_back(inst); 7697805Snilay@cs.wisc.edu} 7706657Snate@binkert.org 7716657Snate@binkert.org// @todo: Figure out a better way to remove the squashed items from the 7726657Snate@binkert.org// lists. Checking the top item of each list to see if it's squashed 7736657Snate@binkert.org// wastes time and forces jumps. 7748602Snilay@cs.wisc.edutemplate <class Impl> 7758602Snilay@cs.wisc.eduvoid 7768602Snilay@cs.wisc.eduInstructionQueue<Impl>::scheduleReadyInsts() 7778602Snilay@cs.wisc.edu{ 7788602Snilay@cs.wisc.edu DPRINTF(IQ, "Attempting to schedule ready instructions from " 7798602Snilay@cs.wisc.edu "the IQ.\n"); 7808602Snilay@cs.wisc.edu 7818602Snilay@cs.wisc.edu IssueStruct *i2e_info = issueToExecuteQueue->access(0); 7828602Snilay@cs.wisc.edu 7838602Snilay@cs.wisc.edu DynInstPtr mem_inst; 7848602Snilay@cs.wisc.edu while (mem_inst = std::move(getDeferredMemInstToExecute())) { 7856657Snate@binkert.org addReadyMemInst(mem_inst); 7866657Snate@binkert.org } 7876657Snate@binkert.org 7886657Snate@binkert.org // See if any cache blocked instructions are able to be executed 789 while (mem_inst = std::move(getBlockedMemInstToExecute())) { 790 addReadyMemInst(mem_inst); 791 } 792 793 // Have iterator to head of the list 794 // While I haven't exceeded bandwidth or reached the end of the list, 795 // Try to get a FU that can do what this op needs. 796 // If successful, change the oldestInst to the new top of the list, put 797 // the queue in the proper place in the list. 798 // Increment the iterator. 799 // This will avoid trying to schedule a certain op class if there are no 800 // FUs that handle it. 801 int total_issued = 0; 802 ListOrderIt order_it = listOrder.begin(); 803 ListOrderIt order_end_it = listOrder.end(); 804 805 while (total_issued < totalWidth && order_it != order_end_it) { 806 OpClass op_class = (*order_it).queueType; 807 808 assert(!readyInsts[op_class].empty()); 809 810 DynInstPtr issuing_inst = readyInsts[op_class].top(); 811 812 if (issuing_inst->isFloating()) { 813 fpInstQueueReads++; 814 } else if (issuing_inst->isVector()) { 815 vecInstQueueReads++; 816 } else { 817 intInstQueueReads++; 818 } 819 820 assert(issuing_inst->seqNum == (*order_it).oldestInst); 821 822 if (issuing_inst->isSquashed()) { 823 readyInsts[op_class].pop(); 824 825 if (!readyInsts[op_class].empty()) { 826 moveToYoungerInst(order_it); 827 } else { 828 readyIt[op_class] = listOrder.end(); 829 queueOnList[op_class] = false; 830 } 831 832 listOrder.erase(order_it++); 833 834 ++iqSquashedInstsIssued; 835 836 continue; 837 } 838 839 int idx = FUPool::NoCapableFU; 840 Cycles op_latency = Cycles(1); 841 ThreadID tid = issuing_inst->threadNumber; 842 843 if (op_class != No_OpClass) { 844 idx = fuPool->getUnit(op_class); 845 if (issuing_inst->isFloating()) { 846 fpAluAccesses++; 847 } else if (issuing_inst->isVector()) { 848 vecAluAccesses++; 849 } else { 850 intAluAccesses++; 851 } 852 if (idx > FUPool::NoFreeFU) { 853 op_latency = fuPool->getOpLatency(op_class); 854 } 855 } 856 857 // If we have an instruction that doesn't require a FU, or a 858 // valid FU, then schedule for execution. 859 if (idx != FUPool::NoFreeFU) { 860 if (op_latency == Cycles(1)) { 861 i2e_info->size++; 862 instsToExecute.push_back(issuing_inst); 863 864 // Add the FU onto the list of FU's to be freed next 865 // cycle if we used one. 866 if (idx >= 0) 867 fuPool->freeUnitNextCycle(idx); 868 } else { 869 bool pipelined = fuPool->isPipelined(op_class); 870 // Generate completion event for the FU 871 ++wbOutstanding; 872 FUCompletion *execution = new FUCompletion(issuing_inst, 873 idx, this); 874 875 cpu->schedule(execution, 876 cpu->clockEdge(Cycles(op_latency - 1))); 877 878 if (!pipelined) { 879 // If FU isn't pipelined, then it must be freed 880 // upon the execution completing. 881 execution->setFreeFU(); 882 } else { 883 // Add the FU onto the list of FU's to be freed next cycle. 884 fuPool->freeUnitNextCycle(idx); 885 } 886 } 887 888 DPRINTF(IQ, "Thread %i: Issuing instruction PC %s " 889 "[sn:%llu]\n", 890 tid, issuing_inst->pcState(), 891 issuing_inst->seqNum); 892 893 readyInsts[op_class].pop(); 894 895 if (!readyInsts[op_class].empty()) { 896 moveToYoungerInst(order_it); 897 } else { 898 readyIt[op_class] = listOrder.end(); 899 queueOnList[op_class] = false; 900 } 901 902 issuing_inst->setIssued(); 903 ++total_issued; 904 905#if TRACING_ON 906 issuing_inst->issueTick = curTick() - issuing_inst->fetchTick; 907#endif 908 909 if (!issuing_inst->isMemRef()) { 910 // Memory instructions can not be freed from the IQ until they 911 // complete. 912 ++freeEntries; 913 count[tid]--; 914 issuing_inst->clearInIQ(); 915 } else { 916 memDepUnit[tid].issue(issuing_inst); 917 } 918 919 listOrder.erase(order_it++); 920 statIssuedInstType[tid][op_class]++; 921 } else { 922 statFuBusy[op_class]++; 923 fuBusy[tid]++; 924 ++order_it; 925 } 926 } 927 928 numIssuedDist.sample(total_issued); 929 iqInstsIssued+= total_issued; 930 931 // If we issued any instructions, tell the CPU we had activity. 932 // @todo If the way deferred memory instructions are handeled due to 933 // translation changes then the deferredMemInsts condition should be removed 934 // from the code below. 935 if (total_issued || !retryMemInsts.empty() || !deferredMemInsts.empty()) { 936 cpu->activityThisCycle(); 937 } else { 938 DPRINTF(IQ, "Not able to schedule any instructions.\n"); 939 } 940} 941 942template <class Impl> 943void 944InstructionQueue<Impl>::scheduleNonSpec(const InstSeqNum &inst) 945{ 946 DPRINTF(IQ, "Marking nonspeculative instruction [sn:%llu] as ready " 947 "to execute.\n", inst); 948 949 NonSpecMapIt inst_it = nonSpecInsts.find(inst); 950 951 assert(inst_it != nonSpecInsts.end()); 952 953 ThreadID tid = (*inst_it).second->threadNumber; 954 955 (*inst_it).second->setAtCommit(); 956 957 (*inst_it).second->setCanIssue(); 958 959 if (!(*inst_it).second->isMemRef()) { 960 addIfReady((*inst_it).second); 961 } else { 962 memDepUnit[tid].nonSpecInstReady((*inst_it).second); 963 } 964 965 (*inst_it).second = NULL; 966 967 nonSpecInsts.erase(inst_it); 968} 969 970template <class Impl> 971void 972InstructionQueue<Impl>::commit(const InstSeqNum &inst, ThreadID tid) 973{ 974 DPRINTF(IQ, "[tid:%i] Committing instructions older than [sn:%llu]\n", 975 tid,inst); 976 977 ListIt iq_it = instList[tid].begin(); 978 979 while (iq_it != instList[tid].end() && 980 (*iq_it)->seqNum <= inst) { 981 ++iq_it; 982 instList[tid].pop_front(); 983 } 984 985 assert(freeEntries == (numEntries - countInsts())); 986} 987 988template <class Impl> 989int 990InstructionQueue<Impl>::wakeDependents(const DynInstPtr &completed_inst) 991{ 992 int dependents = 0; 993 994 // The instruction queue here takes care of both floating and int ops 995 if (completed_inst->isFloating()) { 996 fpInstQueueWakeupAccesses++; 997 } else if (completed_inst->isVector()) { 998 vecInstQueueWakeupAccesses++; 999 } else { 1000 intInstQueueWakeupAccesses++; 1001 } 1002 1003 DPRINTF(IQ, "Waking dependents of completed instruction.\n"); 1004 1005 assert(!completed_inst->isSquashed()); 1006 1007 // Tell the memory dependence unit to wake any dependents on this 1008 // instruction if it is a memory instruction. Also complete the memory 1009 // instruction at this point since we know it executed without issues. 1010 // @todo: Might want to rename "completeMemInst" to something that 1011 // indicates that it won't need to be replayed, and call this 1012 // earlier. Might not be a big deal. 1013 if (completed_inst->isMemRef()) { 1014 memDepUnit[completed_inst->threadNumber].wakeDependents(completed_inst); 1015 completeMemInst(completed_inst); 1016 } else if (completed_inst->isMemBarrier() || 1017 completed_inst->isWriteBarrier()) { 1018 memDepUnit[completed_inst->threadNumber].completeBarrier(completed_inst); 1019 } 1020 1021 for (int dest_reg_idx = 0; 1022 dest_reg_idx < completed_inst->numDestRegs(); 1023 dest_reg_idx++) 1024 { 1025 PhysRegIdPtr dest_reg = 1026 completed_inst->renamedDestRegIdx(dest_reg_idx); 1027 1028 // Special case of uniq or control registers. They are not 1029 // handled by the IQ and thus have no dependency graph entry. 1030 if (dest_reg->isFixedMapping()) { 1031 DPRINTF(IQ, "Reg %d [%s] is part of a fix mapping, skipping\n", 1032 dest_reg->index(), dest_reg->className()); 1033 continue; 1034 } 1035 1036 // Avoid waking up dependents if the register is pinned 1037 dest_reg->decrNumPinnedWritesToComplete(); 1038 if (dest_reg->isPinned()) 1039 completed_inst->setPinnedRegsWritten(); 1040 1041 if (dest_reg->getNumPinnedWritesToComplete() != 0) { 1042 DPRINTF(IQ, "Reg %d [%s] is pinned, skipping\n", 1043 dest_reg->index(), dest_reg->className()); 1044 continue; 1045 } 1046 1047 DPRINTF(IQ, "Waking any dependents on register %i (%s).\n", 1048 dest_reg->index(), 1049 dest_reg->className()); 1050 1051 //Go through the dependency chain, marking the registers as 1052 //ready within the waiting instructions. 1053 DynInstPtr dep_inst = dependGraph.pop(dest_reg->flatIndex()); 1054 1055 while (dep_inst) { 1056 DPRINTF(IQ, "Waking up a dependent instruction, [sn:%llu] " 1057 "PC %s.\n", dep_inst->seqNum, dep_inst->pcState()); 1058 1059 // Might want to give more information to the instruction 1060 // so that it knows which of its source registers is 1061 // ready. However that would mean that the dependency 1062 // graph entries would need to hold the src_reg_idx. 1063 dep_inst->markSrcRegReady(); 1064 1065 addIfReady(dep_inst); 1066 1067 dep_inst = dependGraph.pop(dest_reg->flatIndex()); 1068 1069 ++dependents; 1070 } 1071 1072 // Reset the head node now that all of its dependents have 1073 // been woken up. 1074 assert(dependGraph.empty(dest_reg->flatIndex())); 1075 dependGraph.clearInst(dest_reg->flatIndex()); 1076 1077 // Mark the scoreboard as having that register ready. 1078 regScoreboard[dest_reg->flatIndex()] = true; 1079 } 1080 return dependents; 1081} 1082 1083template <class Impl> 1084void 1085InstructionQueue<Impl>::addReadyMemInst(const DynInstPtr &ready_inst) 1086{ 1087 OpClass op_class = ready_inst->opClass(); 1088 1089 readyInsts[op_class].push(ready_inst); 1090 1091 // Will need to reorder the list if either a queue is not on the list, 1092 // or it has an older instruction than last time. 1093 if (!queueOnList[op_class]) { 1094 addToOrderList(op_class); 1095 } else if (readyInsts[op_class].top()->seqNum < 1096 (*readyIt[op_class]).oldestInst) { 1097 listOrder.erase(readyIt[op_class]); 1098 addToOrderList(op_class); 1099 } 1100 1101 DPRINTF(IQ, "Instruction is ready to issue, putting it onto " 1102 "the ready list, PC %s opclass:%i [sn:%llu].\n", 1103 ready_inst->pcState(), op_class, ready_inst->seqNum); 1104} 1105 1106template <class Impl> 1107void 1108InstructionQueue<Impl>::rescheduleMemInst(const DynInstPtr &resched_inst) 1109{ 1110 DPRINTF(IQ, "Rescheduling mem inst [sn:%llu]\n", resched_inst->seqNum); 1111 1112 // Reset DTB translation state 1113 resched_inst->translationStarted(false); 1114 resched_inst->translationCompleted(false); 1115 1116 resched_inst->clearCanIssue(); 1117 memDepUnit[resched_inst->threadNumber].reschedule(resched_inst); 1118} 1119 1120template <class Impl> 1121void 1122InstructionQueue<Impl>::replayMemInst(const DynInstPtr &replay_inst) 1123{ 1124 memDepUnit[replay_inst->threadNumber].replay(); 1125} 1126 1127template <class Impl> 1128void 1129InstructionQueue<Impl>::completeMemInst(const DynInstPtr &completed_inst) 1130{ 1131 ThreadID tid = completed_inst->threadNumber; 1132 1133 DPRINTF(IQ, "Completing mem instruction PC: %s [sn:%llu]\n", 1134 completed_inst->pcState(), completed_inst->seqNum); 1135 1136 ++freeEntries; 1137 1138 completed_inst->memOpDone(true); 1139 1140 memDepUnit[tid].completed(completed_inst); 1141 count[tid]--; 1142} 1143 1144template <class Impl> 1145void 1146InstructionQueue<Impl>::deferMemInst(const DynInstPtr &deferred_inst) 1147{ 1148 deferredMemInsts.push_back(deferred_inst); 1149} 1150 1151template <class Impl> 1152void 1153InstructionQueue<Impl>::blockMemInst(const DynInstPtr &blocked_inst) 1154{ 1155 blocked_inst->clearIssued(); 1156 blocked_inst->clearCanIssue(); 1157 blockedMemInsts.push_back(blocked_inst); 1158} 1159 1160template <class Impl> 1161void 1162InstructionQueue<Impl>::cacheUnblocked() 1163{ 1164 retryMemInsts.splice(retryMemInsts.end(), blockedMemInsts); 1165 // Get the CPU ticking again 1166 cpu->wakeCPU(); 1167} 1168 1169template <class Impl> 1170typename Impl::DynInstPtr 1171InstructionQueue<Impl>::getDeferredMemInstToExecute() 1172{ 1173 for (ListIt it = deferredMemInsts.begin(); it != deferredMemInsts.end(); 1174 ++it) { 1175 if ((*it)->translationCompleted() || (*it)->isSquashed()) { 1176 DynInstPtr mem_inst = std::move(*it); 1177 deferredMemInsts.erase(it); 1178 return mem_inst; 1179 } 1180 } 1181 return nullptr; 1182} 1183 1184template <class Impl> 1185typename Impl::DynInstPtr 1186InstructionQueue<Impl>::getBlockedMemInstToExecute() 1187{ 1188 if (retryMemInsts.empty()) { 1189 return nullptr; 1190 } else { 1191 DynInstPtr mem_inst = std::move(retryMemInsts.front()); 1192 retryMemInsts.pop_front(); 1193 return mem_inst; 1194 } 1195} 1196 1197template <class Impl> 1198void 1199InstructionQueue<Impl>::violation(const DynInstPtr &store, 1200 const DynInstPtr &faulting_load) 1201{ 1202 intInstQueueWrites++; 1203 memDepUnit[store->threadNumber].violation(store, faulting_load); 1204} 1205 1206template <class Impl> 1207void 1208InstructionQueue<Impl>::squash(ThreadID tid) 1209{ 1210 DPRINTF(IQ, "[tid:%i] Starting to squash instructions in " 1211 "the IQ.\n", tid); 1212 1213 // Read instruction sequence number of last instruction out of the 1214 // time buffer. 1215 squashedSeqNum[tid] = fromCommit->commitInfo[tid].doneSeqNum; 1216 1217 doSquash(tid); 1218 1219 // Also tell the memory dependence unit to squash. 1220 memDepUnit[tid].squash(squashedSeqNum[tid], tid); 1221} 1222 1223template <class Impl> 1224void 1225InstructionQueue<Impl>::doSquash(ThreadID tid) 1226{ 1227 // Start at the tail. 1228 ListIt squash_it = instList[tid].end(); 1229 --squash_it; 1230 1231 DPRINTF(IQ, "[tid:%i] Squashing until sequence number %i!\n", 1232 tid, squashedSeqNum[tid]); 1233 1234 // Squash any instructions younger than the squashed sequence number 1235 // given. 1236 while (squash_it != instList[tid].end() && 1237 (*squash_it)->seqNum > squashedSeqNum[tid]) { 1238 1239 DynInstPtr squashed_inst = (*squash_it); 1240 if (squashed_inst->isFloating()) { 1241 fpInstQueueWrites++; 1242 } else if (squashed_inst->isVector()) { 1243 vecInstQueueWrites++; 1244 } else { 1245 intInstQueueWrites++; 1246 } 1247 1248 // Only handle the instruction if it actually is in the IQ and 1249 // hasn't already been squashed in the IQ. 1250 if (squashed_inst->threadNumber != tid || 1251 squashed_inst->isSquashedInIQ()) { 1252 --squash_it; 1253 continue; 1254 } 1255 1256 if (!squashed_inst->isIssued() || 1257 (squashed_inst->isMemRef() && 1258 !squashed_inst->memOpDone())) { 1259 1260 DPRINTF(IQ, "[tid:%i] Instruction [sn:%llu] PC %s squashed.\n", 1261 tid, squashed_inst->seqNum, squashed_inst->pcState()); 1262 1263 bool is_acq_rel = squashed_inst->isMemBarrier() && 1264 (squashed_inst->isLoad() || 1265 squashed_inst->isAtomic() || 1266 (squashed_inst->isStore() && 1267 !squashed_inst->isStoreConditional())); 1268 1269 // Remove the instruction from the dependency list. 1270 if (is_acq_rel || 1271 (!squashed_inst->isNonSpeculative() && 1272 !squashed_inst->isStoreConditional() && 1273 !squashed_inst->isAtomic() && 1274 !squashed_inst->isMemBarrier() && 1275 !squashed_inst->isWriteBarrier())) { 1276 1277 for (int src_reg_idx = 0; 1278 src_reg_idx < squashed_inst->numSrcRegs(); 1279 src_reg_idx++) 1280 { 1281 PhysRegIdPtr src_reg = 1282 squashed_inst->renamedSrcRegIdx(src_reg_idx); 1283 1284 // Only remove it from the dependency graph if it 1285 // was placed there in the first place. 1286 1287 // Instead of doing a linked list traversal, we 1288 // can just remove these squashed instructions 1289 // either at issue time, or when the register is 1290 // overwritten. The only downside to this is it 1291 // leaves more room for error. 1292 1293 if (!squashed_inst->isReadySrcRegIdx(src_reg_idx) && 1294 !src_reg->isFixedMapping()) { 1295 dependGraph.remove(src_reg->flatIndex(), 1296 squashed_inst); 1297 } 1298 1299 ++iqSquashedOperandsExamined; 1300 } 1301 1302 } else if (!squashed_inst->isStoreConditional() || 1303 !squashed_inst->isCompleted()) { 1304 NonSpecMapIt ns_inst_it = 1305 nonSpecInsts.find(squashed_inst->seqNum); 1306 1307 // we remove non-speculative instructions from 1308 // nonSpecInsts already when they are ready, and so we 1309 // cannot always expect to find them 1310 if (ns_inst_it == nonSpecInsts.end()) { 1311 // loads that became ready but stalled on a 1312 // blocked cache are alreayd removed from 1313 // nonSpecInsts, and have not faulted 1314 assert(squashed_inst->getFault() != NoFault || 1315 squashed_inst->isMemRef()); 1316 } else { 1317 1318 (*ns_inst_it).second = NULL; 1319 1320 nonSpecInsts.erase(ns_inst_it); 1321 1322 ++iqSquashedNonSpecRemoved; 1323 } 1324 } 1325 1326 // Might want to also clear out the head of the dependency graph. 1327 1328 // Mark it as squashed within the IQ. 1329 squashed_inst->setSquashedInIQ(); 1330 1331 // @todo: Remove this hack where several statuses are set so the 1332 // inst will flow through the rest of the pipeline. 1333 squashed_inst->setIssued(); 1334 squashed_inst->setCanCommit(); 1335 squashed_inst->clearInIQ(); 1336 1337 //Update Thread IQ Count 1338 count[squashed_inst->threadNumber]--; 1339 1340 ++freeEntries; 1341 } 1342 1343 // IQ clears out the heads of the dependency graph only when 1344 // instructions reach writeback stage. If an instruction is squashed 1345 // before writeback stage, its head of dependency graph would not be 1346 // cleared out; it holds the instruction's DynInstPtr. This prevents 1347 // freeing the squashed instruction's DynInst. 1348 // Thus, we need to manually clear out the squashed instructions' heads 1349 // of dependency graph. 1350 for (int dest_reg_idx = 0; 1351 dest_reg_idx < squashed_inst->numDestRegs(); 1352 dest_reg_idx++) 1353 { 1354 PhysRegIdPtr dest_reg = 1355 squashed_inst->renamedDestRegIdx(dest_reg_idx); 1356 if (dest_reg->isFixedMapping()){ 1357 continue; 1358 } 1359 assert(dependGraph.empty(dest_reg->flatIndex())); 1360 dependGraph.clearInst(dest_reg->flatIndex()); 1361 } 1362 instList[tid].erase(squash_it--); 1363 ++iqSquashedInstsExamined; 1364 } 1365} 1366 1367template <class Impl> 1368bool 1369InstructionQueue<Impl>::addToDependents(const DynInstPtr &new_inst) 1370{ 1371 // Loop through the instruction's source registers, adding 1372 // them to the dependency list if they are not ready. 1373 int8_t total_src_regs = new_inst->numSrcRegs(); 1374 bool return_val = false; 1375 1376 for (int src_reg_idx = 0; 1377 src_reg_idx < total_src_regs; 1378 src_reg_idx++) 1379 { 1380 // Only add it to the dependency graph if it's not ready. 1381 if (!new_inst->isReadySrcRegIdx(src_reg_idx)) { 1382 PhysRegIdPtr src_reg = new_inst->renamedSrcRegIdx(src_reg_idx); 1383 1384 // Check the IQ's scoreboard to make sure the register 1385 // hasn't become ready while the instruction was in flight 1386 // between stages. Only if it really isn't ready should 1387 // it be added to the dependency graph. 1388 if (src_reg->isFixedMapping()) { 1389 continue; 1390 } else if (!regScoreboard[src_reg->flatIndex()]) { 1391 DPRINTF(IQ, "Instruction PC %s has src reg %i (%s) that " 1392 "is being added to the dependency chain.\n", 1393 new_inst->pcState(), src_reg->index(), 1394 src_reg->className()); 1395 1396 dependGraph.insert(src_reg->flatIndex(), new_inst); 1397 1398 // Change the return value to indicate that something 1399 // was added to the dependency graph. 1400 return_val = true; 1401 } else { 1402 DPRINTF(IQ, "Instruction PC %s has src reg %i (%s) that " 1403 "became ready before it reached the IQ.\n", 1404 new_inst->pcState(), src_reg->index(), 1405 src_reg->className()); 1406 // Mark a register ready within the instruction. 1407 new_inst->markSrcRegReady(src_reg_idx); 1408 } 1409 } 1410 } 1411 1412 return return_val; 1413} 1414 1415template <class Impl> 1416void 1417InstructionQueue<Impl>::addToProducers(const DynInstPtr &new_inst) 1418{ 1419 // Nothing really needs to be marked when an instruction becomes 1420 // the producer of a register's value, but for convenience a ptr 1421 // to the producing instruction will be placed in the head node of 1422 // the dependency links. 1423 int8_t total_dest_regs = new_inst->numDestRegs(); 1424 1425 for (int dest_reg_idx = 0; 1426 dest_reg_idx < total_dest_regs; 1427 dest_reg_idx++) 1428 { 1429 PhysRegIdPtr dest_reg = new_inst->renamedDestRegIdx(dest_reg_idx); 1430 1431 // Some registers have fixed mapping, and there is no need to track 1432 // dependencies as these instructions must be executed at commit. 1433 if (dest_reg->isFixedMapping()) { 1434 continue; 1435 } 1436 1437 if (!dependGraph.empty(dest_reg->flatIndex())) { 1438 dependGraph.dump(); 1439 panic("Dependency graph %i (%s) (flat: %i) not empty!", 1440 dest_reg->index(), dest_reg->className(), 1441 dest_reg->flatIndex()); 1442 } 1443 1444 dependGraph.setInst(dest_reg->flatIndex(), new_inst); 1445 1446 // Mark the scoreboard to say it's not yet ready. 1447 regScoreboard[dest_reg->flatIndex()] = false; 1448 } 1449} 1450 1451template <class Impl> 1452void 1453InstructionQueue<Impl>::addIfReady(const DynInstPtr &inst) 1454{ 1455 // If the instruction now has all of its source registers 1456 // available, then add it to the list of ready instructions. 1457 if (inst->readyToIssue()) { 1458 1459 //Add the instruction to the proper ready list. 1460 if (inst->isMemRef()) { 1461 1462 DPRINTF(IQ, "Checking if memory instruction can issue.\n"); 1463 1464 // Message to the mem dependence unit that this instruction has 1465 // its registers ready. 1466 memDepUnit[inst->threadNumber].regsReady(inst); 1467 1468 return; 1469 } 1470 1471 OpClass op_class = inst->opClass(); 1472 1473 DPRINTF(IQ, "Instruction is ready to issue, putting it onto " 1474 "the ready list, PC %s opclass:%i [sn:%llu].\n", 1475 inst->pcState(), op_class, inst->seqNum); 1476 1477 readyInsts[op_class].push(inst); 1478 1479 // Will need to reorder the list if either a queue is not on the list, 1480 // or it has an older instruction than last time. 1481 if (!queueOnList[op_class]) { 1482 addToOrderList(op_class); 1483 } else if (readyInsts[op_class].top()->seqNum < 1484 (*readyIt[op_class]).oldestInst) { 1485 listOrder.erase(readyIt[op_class]); 1486 addToOrderList(op_class); 1487 } 1488 } 1489} 1490 1491template <class Impl> 1492int 1493InstructionQueue<Impl>::countInsts() 1494{ 1495 return numEntries - freeEntries; 1496} 1497 1498template <class Impl> 1499void 1500InstructionQueue<Impl>::dumpLists() 1501{ 1502 for (int i = 0; i < Num_OpClasses; ++i) { 1503 cprintf("Ready list %i size: %i\n", i, readyInsts[i].size()); 1504 1505 cprintf("\n"); 1506 } 1507 1508 cprintf("Non speculative list size: %i\n", nonSpecInsts.size()); 1509 1510 NonSpecMapIt non_spec_it = nonSpecInsts.begin(); 1511 NonSpecMapIt non_spec_end_it = nonSpecInsts.end(); 1512 1513 cprintf("Non speculative list: "); 1514 1515 while (non_spec_it != non_spec_end_it) { 1516 cprintf("%s [sn:%llu]", (*non_spec_it).second->pcState(), 1517 (*non_spec_it).second->seqNum); 1518 ++non_spec_it; 1519 } 1520 1521 cprintf("\n"); 1522 1523 ListOrderIt list_order_it = listOrder.begin(); 1524 ListOrderIt list_order_end_it = listOrder.end(); 1525 int i = 1; 1526 1527 cprintf("List order: "); 1528 1529 while (list_order_it != list_order_end_it) { 1530 cprintf("%i OpClass:%i [sn:%llu] ", i, (*list_order_it).queueType, 1531 (*list_order_it).oldestInst); 1532 1533 ++list_order_it; 1534 ++i; 1535 } 1536 1537 cprintf("\n"); 1538} 1539 1540 1541template <class Impl> 1542void 1543InstructionQueue<Impl>::dumpInsts() 1544{ 1545 for (ThreadID tid = 0; tid < numThreads; ++tid) { 1546 int num = 0; 1547 int valid_num = 0; 1548 ListIt inst_list_it = instList[tid].begin(); 1549 1550 while (inst_list_it != instList[tid].end()) { 1551 cprintf("Instruction:%i\n", num); 1552 if (!(*inst_list_it)->isSquashed()) { 1553 if (!(*inst_list_it)->isIssued()) { 1554 ++valid_num; 1555 cprintf("Count:%i\n", valid_num); 1556 } else if ((*inst_list_it)->isMemRef() && 1557 !(*inst_list_it)->memOpDone()) { 1558 // Loads that have not been marked as executed 1559 // still count towards the total instructions. 1560 ++valid_num; 1561 cprintf("Count:%i\n", valid_num); 1562 } 1563 } 1564 1565 cprintf("PC: %s\n[sn:%llu]\n[tid:%i]\n" 1566 "Issued:%i\nSquashed:%i\n", 1567 (*inst_list_it)->pcState(), 1568 (*inst_list_it)->seqNum, 1569 (*inst_list_it)->threadNumber, 1570 (*inst_list_it)->isIssued(), 1571 (*inst_list_it)->isSquashed()); 1572 1573 if ((*inst_list_it)->isMemRef()) { 1574 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone()); 1575 } 1576 1577 cprintf("\n"); 1578 1579 inst_list_it++; 1580 ++num; 1581 } 1582 } 1583 1584 cprintf("Insts to Execute list:\n"); 1585 1586 int num = 0; 1587 int valid_num = 0; 1588 ListIt inst_list_it = instsToExecute.begin(); 1589 1590 while (inst_list_it != instsToExecute.end()) 1591 { 1592 cprintf("Instruction:%i\n", 1593 num); 1594 if (!(*inst_list_it)->isSquashed()) { 1595 if (!(*inst_list_it)->isIssued()) { 1596 ++valid_num; 1597 cprintf("Count:%i\n", valid_num); 1598 } else if ((*inst_list_it)->isMemRef() && 1599 !(*inst_list_it)->memOpDone()) { 1600 // Loads that have not been marked as executed 1601 // still count towards the total instructions. 1602 ++valid_num; 1603 cprintf("Count:%i\n", valid_num); 1604 } 1605 } 1606 1607 cprintf("PC: %s\n[sn:%llu]\n[tid:%i]\n" 1608 "Issued:%i\nSquashed:%i\n", 1609 (*inst_list_it)->pcState(), 1610 (*inst_list_it)->seqNum, 1611 (*inst_list_it)->threadNumber, 1612 (*inst_list_it)->isIssued(), 1613 (*inst_list_it)->isSquashed()); 1614 1615 if ((*inst_list_it)->isMemRef()) { 1616 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone()); 1617 } 1618 1619 cprintf("\n"); 1620 1621 inst_list_it++; 1622 ++num; 1623 } 1624} 1625 1626#endif//__CPU_O3_INST_QUEUE_IMPL_HH__ 1627