inst_queue_impl.hh revision 13652
1/* 2 * Copyright (c) 2011-2014, 2017-2018 ARM Limited 3 * Copyright (c) 2013 Advanced Micro Devices, Inc. 4 * All rights reserved. 5 * 6 * The license below extends only to copyright in the software and shall 7 * not be construed as granting a license to any other intellectual 8 * property including but not limited to intellectual property relating 9 * to a hardware implementation of the functionality of the software 10 * licensed hereunder. You may use the software subject to the license 11 * terms below provided that you ensure that this notice is replicated 12 * unmodified and in its entirety in all distributions of the software, 13 * modified or unmodified, in source code or in binary form. 14 * 15 * Copyright (c) 2004-2006 The Regents of The University of Michigan 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Kevin Lim 42 * Korey Sewell 43 */ 44 45#ifndef __CPU_O3_INST_QUEUE_IMPL_HH__ 46#define __CPU_O3_INST_QUEUE_IMPL_HH__ 47 48#include <limits> 49#include <vector> 50 51#include "base/logging.hh" 52#include "cpu/o3/fu_pool.hh" 53#include "cpu/o3/inst_queue.hh" 54#include "debug/IQ.hh" 55#include "enums/OpClass.hh" 56#include "params/DerivO3CPU.hh" 57#include "sim/core.hh" 58 59// clang complains about std::set being overloaded with Packet::set if 60// we open up the entire namespace std 61using std::list; 62 63template <class Impl> 64InstructionQueue<Impl>::FUCompletion::FUCompletion(const DynInstPtr &_inst, 65 int fu_idx, InstructionQueue<Impl> *iq_ptr) 66 : Event(Stat_Event_Pri, AutoDelete), 67 inst(_inst), fuIdx(fu_idx), iqPtr(iq_ptr), freeFU(false) 68{ 69} 70 71template <class Impl> 72void 73InstructionQueue<Impl>::FUCompletion::process() 74{ 75 iqPtr->processFUCompletion(inst, freeFU ? fuIdx : -1); 76 inst = NULL; 77} 78 79 80template <class Impl> 81const char * 82InstructionQueue<Impl>::FUCompletion::description() const 83{ 84 return "Functional unit completion"; 85} 86 87template <class Impl> 88InstructionQueue<Impl>::InstructionQueue(O3CPU *cpu_ptr, IEW *iew_ptr, 89 DerivO3CPUParams *params) 90 : cpu(cpu_ptr), 91 iewStage(iew_ptr), 92 fuPool(params->fuPool), 93 iqPolicy(params->smtIQPolicy), 94 numEntries(params->numIQEntries), 95 totalWidth(params->issueWidth), 96 commitToIEWDelay(params->commitToIEWDelay) 97{ 98 assert(fuPool); 99 100 numThreads = params->numThreads; 101 102 // Set the number of total physical registers 103 // As the vector registers have two addressing modes, they are added twice 104 numPhysRegs = params->numPhysIntRegs + params->numPhysFloatRegs + 105 params->numPhysVecRegs + 106 params->numPhysVecRegs * TheISA::NumVecElemPerVecReg + 107 params->numPhysVecPredRegs + 108 params->numPhysCCRegs; 109 110 //Create an entry for each physical register within the 111 //dependency graph. 112 dependGraph.resize(numPhysRegs); 113 114 // Resize the register scoreboard. 115 regScoreboard.resize(numPhysRegs); 116 117 //Initialize Mem Dependence Units 118 for (ThreadID tid = 0; tid < Impl::MaxThreads; tid++) { 119 memDepUnit[tid].init(params, tid); 120 memDepUnit[tid].setIQ(this); 121 } 122 123 resetState(); 124 125 //Figure out resource sharing policy 126 if (iqPolicy == SMTQueuePolicy::Dynamic) { 127 //Set Max Entries to Total ROB Capacity 128 for (ThreadID tid = 0; tid < numThreads; tid++) { 129 maxEntries[tid] = numEntries; 130 } 131 132 } else if (iqPolicy == SMTQueuePolicy::Partitioned) { 133 //@todo:make work if part_amt doesnt divide evenly. 134 int part_amt = numEntries / numThreads; 135 136 //Divide ROB up evenly 137 for (ThreadID tid = 0; tid < numThreads; tid++) { 138 maxEntries[tid] = part_amt; 139 } 140 141 DPRINTF(IQ, "IQ sharing policy set to Partitioned:" 142 "%i entries per thread.\n",part_amt); 143 } else if (iqPolicy == SMTQueuePolicy::Threshold) { 144 double threshold = (double)params->smtIQThreshold / 100; 145 146 int thresholdIQ = (int)((double)threshold * numEntries); 147 148 //Divide up by threshold amount 149 for (ThreadID tid = 0; tid < numThreads; tid++) { 150 maxEntries[tid] = thresholdIQ; 151 } 152 153 DPRINTF(IQ, "IQ sharing policy set to Threshold:" 154 "%i entries per thread.\n",thresholdIQ); 155 } 156 for (ThreadID tid = numThreads; tid < Impl::MaxThreads; tid++) { 157 maxEntries[tid] = 0; 158 } 159} 160 161template <class Impl> 162InstructionQueue<Impl>::~InstructionQueue() 163{ 164 dependGraph.reset(); 165#ifdef DEBUG 166 cprintf("Nodes traversed: %i, removed: %i\n", 167 dependGraph.nodesTraversed, dependGraph.nodesRemoved); 168#endif 169} 170 171template <class Impl> 172std::string 173InstructionQueue<Impl>::name() const 174{ 175 return cpu->name() + ".iq"; 176} 177 178template <class Impl> 179void 180InstructionQueue<Impl>::regStats() 181{ 182 using namespace Stats; 183 iqInstsAdded 184 .name(name() + ".iqInstsAdded") 185 .desc("Number of instructions added to the IQ (excludes non-spec)") 186 .prereq(iqInstsAdded); 187 188 iqNonSpecInstsAdded 189 .name(name() + ".iqNonSpecInstsAdded") 190 .desc("Number of non-speculative instructions added to the IQ") 191 .prereq(iqNonSpecInstsAdded); 192 193 iqInstsIssued 194 .name(name() + ".iqInstsIssued") 195 .desc("Number of instructions issued") 196 .prereq(iqInstsIssued); 197 198 iqIntInstsIssued 199 .name(name() + ".iqIntInstsIssued") 200 .desc("Number of integer instructions issued") 201 .prereq(iqIntInstsIssued); 202 203 iqFloatInstsIssued 204 .name(name() + ".iqFloatInstsIssued") 205 .desc("Number of float instructions issued") 206 .prereq(iqFloatInstsIssued); 207 208 iqBranchInstsIssued 209 .name(name() + ".iqBranchInstsIssued") 210 .desc("Number of branch instructions issued") 211 .prereq(iqBranchInstsIssued); 212 213 iqMemInstsIssued 214 .name(name() + ".iqMemInstsIssued") 215 .desc("Number of memory instructions issued") 216 .prereq(iqMemInstsIssued); 217 218 iqMiscInstsIssued 219 .name(name() + ".iqMiscInstsIssued") 220 .desc("Number of miscellaneous instructions issued") 221 .prereq(iqMiscInstsIssued); 222 223 iqSquashedInstsIssued 224 .name(name() + ".iqSquashedInstsIssued") 225 .desc("Number of squashed instructions issued") 226 .prereq(iqSquashedInstsIssued); 227 228 iqSquashedInstsExamined 229 .name(name() + ".iqSquashedInstsExamined") 230 .desc("Number of squashed instructions iterated over during squash;" 231 " mainly for profiling") 232 .prereq(iqSquashedInstsExamined); 233 234 iqSquashedOperandsExamined 235 .name(name() + ".iqSquashedOperandsExamined") 236 .desc("Number of squashed operands that are examined and possibly " 237 "removed from graph") 238 .prereq(iqSquashedOperandsExamined); 239 240 iqSquashedNonSpecRemoved 241 .name(name() + ".iqSquashedNonSpecRemoved") 242 .desc("Number of squashed non-spec instructions that were removed") 243 .prereq(iqSquashedNonSpecRemoved); 244/* 245 queueResDist 246 .init(Num_OpClasses, 0, 99, 2) 247 .name(name() + ".IQ:residence:") 248 .desc("cycles from dispatch to issue") 249 .flags(total | pdf | cdf ) 250 ; 251 for (int i = 0; i < Num_OpClasses; ++i) { 252 queueResDist.subname(i, opClassStrings[i]); 253 } 254*/ 255 numIssuedDist 256 .init(0,totalWidth,1) 257 .name(name() + ".issued_per_cycle") 258 .desc("Number of insts issued each cycle") 259 .flags(pdf) 260 ; 261/* 262 dist_unissued 263 .init(Num_OpClasses+2) 264 .name(name() + ".unissued_cause") 265 .desc("Reason ready instruction not issued") 266 .flags(pdf | dist) 267 ; 268 for (int i=0; i < (Num_OpClasses + 2); ++i) { 269 dist_unissued.subname(i, unissued_names[i]); 270 } 271*/ 272 statIssuedInstType 273 .init(numThreads,Enums::Num_OpClass) 274 .name(name() + ".FU_type") 275 .desc("Type of FU issued") 276 .flags(total | pdf | dist) 277 ; 278 statIssuedInstType.ysubnames(Enums::OpClassStrings); 279 280 // 281 // How long did instructions for a particular FU type wait prior to issue 282 // 283/* 284 issueDelayDist 285 .init(Num_OpClasses,0,99,2) 286 .name(name() + ".") 287 .desc("cycles from operands ready to issue") 288 .flags(pdf | cdf) 289 ; 290 291 for (int i=0; i<Num_OpClasses; ++i) { 292 std::stringstream subname; 293 subname << opClassStrings[i] << "_delay"; 294 issueDelayDist.subname(i, subname.str()); 295 } 296*/ 297 issueRate 298 .name(name() + ".rate") 299 .desc("Inst issue rate") 300 .flags(total) 301 ; 302 issueRate = iqInstsIssued / cpu->numCycles; 303 304 statFuBusy 305 .init(Num_OpClasses) 306 .name(name() + ".fu_full") 307 .desc("attempts to use FU when none available") 308 .flags(pdf | dist) 309 ; 310 for (int i=0; i < Num_OpClasses; ++i) { 311 statFuBusy.subname(i, Enums::OpClassStrings[i]); 312 } 313 314 fuBusy 315 .init(numThreads) 316 .name(name() + ".fu_busy_cnt") 317 .desc("FU busy when requested") 318 .flags(total) 319 ; 320 321 fuBusyRate 322 .name(name() + ".fu_busy_rate") 323 .desc("FU busy rate (busy events/executed inst)") 324 .flags(total) 325 ; 326 fuBusyRate = fuBusy / iqInstsIssued; 327 328 for (ThreadID tid = 0; tid < numThreads; tid++) { 329 // Tell mem dependence unit to reg stats as well. 330 memDepUnit[tid].regStats(); 331 } 332 333 intInstQueueReads 334 .name(name() + ".int_inst_queue_reads") 335 .desc("Number of integer instruction queue reads") 336 .flags(total); 337 338 intInstQueueWrites 339 .name(name() + ".int_inst_queue_writes") 340 .desc("Number of integer instruction queue writes") 341 .flags(total); 342 343 intInstQueueWakeupAccesses 344 .name(name() + ".int_inst_queue_wakeup_accesses") 345 .desc("Number of integer instruction queue wakeup accesses") 346 .flags(total); 347 348 fpInstQueueReads 349 .name(name() + ".fp_inst_queue_reads") 350 .desc("Number of floating instruction queue reads") 351 .flags(total); 352 353 fpInstQueueWrites 354 .name(name() + ".fp_inst_queue_writes") 355 .desc("Number of floating instruction queue writes") 356 .flags(total); 357 358 fpInstQueueWakeupAccesses 359 .name(name() + ".fp_inst_queue_wakeup_accesses") 360 .desc("Number of floating instruction queue wakeup accesses") 361 .flags(total); 362 363 vecInstQueueReads 364 .name(name() + ".vec_inst_queue_reads") 365 .desc("Number of vector instruction queue reads") 366 .flags(total); 367 368 vecInstQueueWrites 369 .name(name() + ".vec_inst_queue_writes") 370 .desc("Number of vector instruction queue writes") 371 .flags(total); 372 373 vecInstQueueWakeupAccesses 374 .name(name() + ".vec_inst_queue_wakeup_accesses") 375 .desc("Number of vector instruction queue wakeup accesses") 376 .flags(total); 377 378 intAluAccesses 379 .name(name() + ".int_alu_accesses") 380 .desc("Number of integer alu accesses") 381 .flags(total); 382 383 fpAluAccesses 384 .name(name() + ".fp_alu_accesses") 385 .desc("Number of floating point alu accesses") 386 .flags(total); 387 388 vecAluAccesses 389 .name(name() + ".vec_alu_accesses") 390 .desc("Number of vector alu accesses") 391 .flags(total); 392 393} 394 395template <class Impl> 396void 397InstructionQueue<Impl>::resetState() 398{ 399 //Initialize thread IQ counts 400 for (ThreadID tid = 0; tid < Impl::MaxThreads; tid++) { 401 count[tid] = 0; 402 instList[tid].clear(); 403 } 404 405 // Initialize the number of free IQ entries. 406 freeEntries = numEntries; 407 408 // Note that in actuality, the registers corresponding to the logical 409 // registers start off as ready. However this doesn't matter for the 410 // IQ as the instruction should have been correctly told if those 411 // registers are ready in rename. Thus it can all be initialized as 412 // unready. 413 for (int i = 0; i < numPhysRegs; ++i) { 414 regScoreboard[i] = false; 415 } 416 417 for (ThreadID tid = 0; tid < Impl::MaxThreads; ++tid) { 418 squashedSeqNum[tid] = 0; 419 } 420 421 for (int i = 0; i < Num_OpClasses; ++i) { 422 while (!readyInsts[i].empty()) 423 readyInsts[i].pop(); 424 queueOnList[i] = false; 425 readyIt[i] = listOrder.end(); 426 } 427 nonSpecInsts.clear(); 428 listOrder.clear(); 429 deferredMemInsts.clear(); 430 blockedMemInsts.clear(); 431 retryMemInsts.clear(); 432 wbOutstanding = 0; 433} 434 435template <class Impl> 436void 437InstructionQueue<Impl>::setActiveThreads(list<ThreadID> *at_ptr) 438{ 439 activeThreads = at_ptr; 440} 441 442template <class Impl> 443void 444InstructionQueue<Impl>::setIssueToExecuteQueue(TimeBuffer<IssueStruct> *i2e_ptr) 445{ 446 issueToExecuteQueue = i2e_ptr; 447} 448 449template <class Impl> 450void 451InstructionQueue<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr) 452{ 453 timeBuffer = tb_ptr; 454 455 fromCommit = timeBuffer->getWire(-commitToIEWDelay); 456} 457 458template <class Impl> 459bool 460InstructionQueue<Impl>::isDrained() const 461{ 462 bool drained = dependGraph.empty() && 463 instsToExecute.empty() && 464 wbOutstanding == 0; 465 for (ThreadID tid = 0; tid < numThreads; ++tid) 466 drained = drained && memDepUnit[tid].isDrained(); 467 468 return drained; 469} 470 471template <class Impl> 472void 473InstructionQueue<Impl>::drainSanityCheck() const 474{ 475 assert(dependGraph.empty()); 476 assert(instsToExecute.empty()); 477 for (ThreadID tid = 0; tid < numThreads; ++tid) 478 memDepUnit[tid].drainSanityCheck(); 479} 480 481template <class Impl> 482void 483InstructionQueue<Impl>::takeOverFrom() 484{ 485 resetState(); 486} 487 488template <class Impl> 489int 490InstructionQueue<Impl>::entryAmount(ThreadID num_threads) 491{ 492 if (iqPolicy == SMTQueuePolicy::Partitioned) { 493 return numEntries / num_threads; 494 } else { 495 return 0; 496 } 497} 498 499 500template <class Impl> 501void 502InstructionQueue<Impl>::resetEntries() 503{ 504 if (iqPolicy != SMTQueuePolicy::Dynamic || numThreads > 1) { 505 int active_threads = activeThreads->size(); 506 507 list<ThreadID>::iterator threads = activeThreads->begin(); 508 list<ThreadID>::iterator end = activeThreads->end(); 509 510 while (threads != end) { 511 ThreadID tid = *threads++; 512 513 if (iqPolicy == SMTQueuePolicy::Partitioned) { 514 maxEntries[tid] = numEntries / active_threads; 515 } else if (iqPolicy == SMTQueuePolicy::Threshold && 516 active_threads == 1) { 517 maxEntries[tid] = numEntries; 518 } 519 } 520 } 521} 522 523template <class Impl> 524unsigned 525InstructionQueue<Impl>::numFreeEntries() 526{ 527 return freeEntries; 528} 529 530template <class Impl> 531unsigned 532InstructionQueue<Impl>::numFreeEntries(ThreadID tid) 533{ 534 return maxEntries[tid] - count[tid]; 535} 536 537// Might want to do something more complex if it knows how many instructions 538// will be issued this cycle. 539template <class Impl> 540bool 541InstructionQueue<Impl>::isFull() 542{ 543 if (freeEntries == 0) { 544 return(true); 545 } else { 546 return(false); 547 } 548} 549 550template <class Impl> 551bool 552InstructionQueue<Impl>::isFull(ThreadID tid) 553{ 554 if (numFreeEntries(tid) == 0) { 555 return(true); 556 } else { 557 return(false); 558 } 559} 560 561template <class Impl> 562bool 563InstructionQueue<Impl>::hasReadyInsts() 564{ 565 if (!listOrder.empty()) { 566 return true; 567 } 568 569 for (int i = 0; i < Num_OpClasses; ++i) { 570 if (!readyInsts[i].empty()) { 571 return true; 572 } 573 } 574 575 return false; 576} 577 578template <class Impl> 579void 580InstructionQueue<Impl>::insert(const DynInstPtr &new_inst) 581{ 582 if (new_inst->isFloating()) { 583 fpInstQueueWrites++; 584 } else if (new_inst->isVector()) { 585 vecInstQueueWrites++; 586 } else { 587 intInstQueueWrites++; 588 } 589 // Make sure the instruction is valid 590 assert(new_inst); 591 592 DPRINTF(IQ, "Adding instruction [sn:%lli] PC %s to the IQ.\n", 593 new_inst->seqNum, new_inst->pcState()); 594 595 assert(freeEntries != 0); 596 597 instList[new_inst->threadNumber].push_back(new_inst); 598 599 --freeEntries; 600 601 new_inst->setInIQ(); 602 603 // Look through its source registers (physical regs), and mark any 604 // dependencies. 605 addToDependents(new_inst); 606 607 // Have this instruction set itself as the producer of its destination 608 // register(s). 609 addToProducers(new_inst); 610 611 if (new_inst->isMemRef()) { 612 memDepUnit[new_inst->threadNumber].insert(new_inst); 613 } else { 614 addIfReady(new_inst); 615 } 616 617 ++iqInstsAdded; 618 619 count[new_inst->threadNumber]++; 620 621 assert(freeEntries == (numEntries - countInsts())); 622} 623 624template <class Impl> 625void 626InstructionQueue<Impl>::insertNonSpec(const DynInstPtr &new_inst) 627{ 628 // @todo: Clean up this code; can do it by setting inst as unable 629 // to issue, then calling normal insert on the inst. 630 if (new_inst->isFloating()) { 631 fpInstQueueWrites++; 632 } else if (new_inst->isVector()) { 633 vecInstQueueWrites++; 634 } else { 635 intInstQueueWrites++; 636 } 637 638 assert(new_inst); 639 640 nonSpecInsts[new_inst->seqNum] = new_inst; 641 642 DPRINTF(IQ, "Adding non-speculative instruction [sn:%lli] PC %s " 643 "to the IQ.\n", 644 new_inst->seqNum, new_inst->pcState()); 645 646 assert(freeEntries != 0); 647 648 instList[new_inst->threadNumber].push_back(new_inst); 649 650 --freeEntries; 651 652 new_inst->setInIQ(); 653 654 // Have this instruction set itself as the producer of its destination 655 // register(s). 656 addToProducers(new_inst); 657 658 // If it's a memory instruction, add it to the memory dependency 659 // unit. 660 if (new_inst->isMemRef()) { 661 memDepUnit[new_inst->threadNumber].insertNonSpec(new_inst); 662 } 663 664 ++iqNonSpecInstsAdded; 665 666 count[new_inst->threadNumber]++; 667 668 assert(freeEntries == (numEntries - countInsts())); 669} 670 671template <class Impl> 672void 673InstructionQueue<Impl>::insertBarrier(const DynInstPtr &barr_inst) 674{ 675 memDepUnit[barr_inst->threadNumber].insertBarrier(barr_inst); 676 677 insertNonSpec(barr_inst); 678} 679 680template <class Impl> 681typename Impl::DynInstPtr 682InstructionQueue<Impl>::getInstToExecute() 683{ 684 assert(!instsToExecute.empty()); 685 DynInstPtr inst = std::move(instsToExecute.front()); 686 instsToExecute.pop_front(); 687 if (inst->isFloating()) { 688 fpInstQueueReads++; 689 } else if (inst->isVector()) { 690 vecInstQueueReads++; 691 } else { 692 intInstQueueReads++; 693 } 694 return inst; 695} 696 697template <class Impl> 698void 699InstructionQueue<Impl>::addToOrderList(OpClass op_class) 700{ 701 assert(!readyInsts[op_class].empty()); 702 703 ListOrderEntry queue_entry; 704 705 queue_entry.queueType = op_class; 706 707 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum; 708 709 ListOrderIt list_it = listOrder.begin(); 710 ListOrderIt list_end_it = listOrder.end(); 711 712 while (list_it != list_end_it) { 713 if ((*list_it).oldestInst > queue_entry.oldestInst) { 714 break; 715 } 716 717 list_it++; 718 } 719 720 readyIt[op_class] = listOrder.insert(list_it, queue_entry); 721 queueOnList[op_class] = true; 722} 723 724template <class Impl> 725void 726InstructionQueue<Impl>::moveToYoungerInst(ListOrderIt list_order_it) 727{ 728 // Get iterator of next item on the list 729 // Delete the original iterator 730 // Determine if the next item is either the end of the list or younger 731 // than the new instruction. If so, then add in a new iterator right here. 732 // If not, then move along. 733 ListOrderEntry queue_entry; 734 OpClass op_class = (*list_order_it).queueType; 735 ListOrderIt next_it = list_order_it; 736 737 ++next_it; 738 739 queue_entry.queueType = op_class; 740 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum; 741 742 while (next_it != listOrder.end() && 743 (*next_it).oldestInst < queue_entry.oldestInst) { 744 ++next_it; 745 } 746 747 readyIt[op_class] = listOrder.insert(next_it, queue_entry); 748} 749 750template <class Impl> 751void 752InstructionQueue<Impl>::processFUCompletion(const DynInstPtr &inst, int fu_idx) 753{ 754 DPRINTF(IQ, "Processing FU completion [sn:%lli]\n", inst->seqNum); 755 assert(!cpu->switchedOut()); 756 // The CPU could have been sleeping until this op completed (*extremely* 757 // long latency op). Wake it if it was. This may be overkill. 758 --wbOutstanding; 759 iewStage->wakeCPU(); 760 761 if (fu_idx > -1) 762 fuPool->freeUnitNextCycle(fu_idx); 763 764 // @todo: Ensure that these FU Completions happen at the beginning 765 // of a cycle, otherwise they could add too many instructions to 766 // the queue. 767 issueToExecuteQueue->access(-1)->size++; 768 instsToExecute.push_back(inst); 769} 770 771// @todo: Figure out a better way to remove the squashed items from the 772// lists. Checking the top item of each list to see if it's squashed 773// wastes time and forces jumps. 774template <class Impl> 775void 776InstructionQueue<Impl>::scheduleReadyInsts() 777{ 778 DPRINTF(IQ, "Attempting to schedule ready instructions from " 779 "the IQ.\n"); 780 781 IssueStruct *i2e_info = issueToExecuteQueue->access(0); 782 783 DynInstPtr mem_inst; 784 while (mem_inst = std::move(getDeferredMemInstToExecute())) { 785 addReadyMemInst(mem_inst); 786 } 787 788 // See if any cache blocked instructions are able to be executed 789 while (mem_inst = std::move(getBlockedMemInstToExecute())) { 790 addReadyMemInst(mem_inst); 791 } 792 793 // Have iterator to head of the list 794 // While I haven't exceeded bandwidth or reached the end of the list, 795 // Try to get a FU that can do what this op needs. 796 // If successful, change the oldestInst to the new top of the list, put 797 // the queue in the proper place in the list. 798 // Increment the iterator. 799 // This will avoid trying to schedule a certain op class if there are no 800 // FUs that handle it. 801 int total_issued = 0; 802 ListOrderIt order_it = listOrder.begin(); 803 ListOrderIt order_end_it = listOrder.end(); 804 805 while (total_issued < totalWidth && order_it != order_end_it) { 806 OpClass op_class = (*order_it).queueType; 807 808 assert(!readyInsts[op_class].empty()); 809 810 DynInstPtr issuing_inst = readyInsts[op_class].top(); 811 812 if (issuing_inst->isFloating()) { 813 fpInstQueueReads++; 814 } else if (issuing_inst->isVector()) { 815 vecInstQueueReads++; 816 } else { 817 intInstQueueReads++; 818 } 819 820 assert(issuing_inst->seqNum == (*order_it).oldestInst); 821 822 if (issuing_inst->isSquashed()) { 823 readyInsts[op_class].pop(); 824 825 if (!readyInsts[op_class].empty()) { 826 moveToYoungerInst(order_it); 827 } else { 828 readyIt[op_class] = listOrder.end(); 829 queueOnList[op_class] = false; 830 } 831 832 listOrder.erase(order_it++); 833 834 ++iqSquashedInstsIssued; 835 836 continue; 837 } 838 839 int idx = FUPool::NoCapableFU; 840 Cycles op_latency = Cycles(1); 841 ThreadID tid = issuing_inst->threadNumber; 842 843 if (op_class != No_OpClass) { 844 idx = fuPool->getUnit(op_class); 845 if (issuing_inst->isFloating()) { 846 fpAluAccesses++; 847 } else if (issuing_inst->isVector()) { 848 vecAluAccesses++; 849 } else { 850 intAluAccesses++; 851 } 852 if (idx > FUPool::NoFreeFU) { 853 op_latency = fuPool->getOpLatency(op_class); 854 } 855 } 856 857 // If we have an instruction that doesn't require a FU, or a 858 // valid FU, then schedule for execution. 859 if (idx != FUPool::NoFreeFU) { 860 if (op_latency == Cycles(1)) { 861 i2e_info->size++; 862 instsToExecute.push_back(issuing_inst); 863 864 // Add the FU onto the list of FU's to be freed next 865 // cycle if we used one. 866 if (idx >= 0) 867 fuPool->freeUnitNextCycle(idx); 868 } else { 869 bool pipelined = fuPool->isPipelined(op_class); 870 // Generate completion event for the FU 871 ++wbOutstanding; 872 FUCompletion *execution = new FUCompletion(issuing_inst, 873 idx, this); 874 875 cpu->schedule(execution, 876 cpu->clockEdge(Cycles(op_latency - 1))); 877 878 if (!pipelined) { 879 // If FU isn't pipelined, then it must be freed 880 // upon the execution completing. 881 execution->setFreeFU(); 882 } else { 883 // Add the FU onto the list of FU's to be freed next cycle. 884 fuPool->freeUnitNextCycle(idx); 885 } 886 } 887 888 DPRINTF(IQ, "Thread %i: Issuing instruction PC %s " 889 "[sn:%lli]\n", 890 tid, issuing_inst->pcState(), 891 issuing_inst->seqNum); 892 893 readyInsts[op_class].pop(); 894 895 if (!readyInsts[op_class].empty()) { 896 moveToYoungerInst(order_it); 897 } else { 898 readyIt[op_class] = listOrder.end(); 899 queueOnList[op_class] = false; 900 } 901 902 issuing_inst->setIssued(); 903 ++total_issued; 904 905#if TRACING_ON 906 issuing_inst->issueTick = curTick() - issuing_inst->fetchTick; 907#endif 908 909 if (!issuing_inst->isMemRef()) { 910 // Memory instructions can not be freed from the IQ until they 911 // complete. 912 ++freeEntries; 913 count[tid]--; 914 issuing_inst->clearInIQ(); 915 } else { 916 memDepUnit[tid].issue(issuing_inst); 917 } 918 919 listOrder.erase(order_it++); 920 statIssuedInstType[tid][op_class]++; 921 } else { 922 statFuBusy[op_class]++; 923 fuBusy[tid]++; 924 ++order_it; 925 } 926 } 927 928 numIssuedDist.sample(total_issued); 929 iqInstsIssued+= total_issued; 930 931 // If we issued any instructions, tell the CPU we had activity. 932 // @todo If the way deferred memory instructions are handeled due to 933 // translation changes then the deferredMemInsts condition should be removed 934 // from the code below. 935 if (total_issued || !retryMemInsts.empty() || !deferredMemInsts.empty()) { 936 cpu->activityThisCycle(); 937 } else { 938 DPRINTF(IQ, "Not able to schedule any instructions.\n"); 939 } 940} 941 942template <class Impl> 943void 944InstructionQueue<Impl>::scheduleNonSpec(const InstSeqNum &inst) 945{ 946 DPRINTF(IQ, "Marking nonspeculative instruction [sn:%lli] as ready " 947 "to execute.\n", inst); 948 949 NonSpecMapIt inst_it = nonSpecInsts.find(inst); 950 951 assert(inst_it != nonSpecInsts.end()); 952 953 ThreadID tid = (*inst_it).second->threadNumber; 954 955 (*inst_it).second->setAtCommit(); 956 957 (*inst_it).second->setCanIssue(); 958 959 if (!(*inst_it).second->isMemRef()) { 960 addIfReady((*inst_it).second); 961 } else { 962 memDepUnit[tid].nonSpecInstReady((*inst_it).second); 963 } 964 965 (*inst_it).second = NULL; 966 967 nonSpecInsts.erase(inst_it); 968} 969 970template <class Impl> 971void 972InstructionQueue<Impl>::commit(const InstSeqNum &inst, ThreadID tid) 973{ 974 DPRINTF(IQ, "[tid:%i]: Committing instructions older than [sn:%i]\n", 975 tid,inst); 976 977 ListIt iq_it = instList[tid].begin(); 978 979 while (iq_it != instList[tid].end() && 980 (*iq_it)->seqNum <= inst) { 981 ++iq_it; 982 instList[tid].pop_front(); 983 } 984 985 assert(freeEntries == (numEntries - countInsts())); 986} 987 988template <class Impl> 989int 990InstructionQueue<Impl>::wakeDependents(const DynInstPtr &completed_inst) 991{ 992 int dependents = 0; 993 994 // The instruction queue here takes care of both floating and int ops 995 if (completed_inst->isFloating()) { 996 fpInstQueueWakeupAccesses++; 997 } else if (completed_inst->isVector()) { 998 vecInstQueueWakeupAccesses++; 999 } else { 1000 intInstQueueWakeupAccesses++; 1001 } 1002 1003 DPRINTF(IQ, "Waking dependents of completed instruction.\n"); 1004 1005 assert(!completed_inst->isSquashed()); 1006 1007 // Tell the memory dependence unit to wake any dependents on this 1008 // instruction if it is a memory instruction. Also complete the memory 1009 // instruction at this point since we know it executed without issues. 1010 // @todo: Might want to rename "completeMemInst" to something that 1011 // indicates that it won't need to be replayed, and call this 1012 // earlier. Might not be a big deal. 1013 if (completed_inst->isMemRef()) { 1014 memDepUnit[completed_inst->threadNumber].wakeDependents(completed_inst); 1015 completeMemInst(completed_inst); 1016 } else if (completed_inst->isMemBarrier() || 1017 completed_inst->isWriteBarrier()) { 1018 memDepUnit[completed_inst->threadNumber].completeBarrier(completed_inst); 1019 } 1020 1021 for (int dest_reg_idx = 0; 1022 dest_reg_idx < completed_inst->numDestRegs(); 1023 dest_reg_idx++) 1024 { 1025 PhysRegIdPtr dest_reg = 1026 completed_inst->renamedDestRegIdx(dest_reg_idx); 1027 1028 // Special case of uniq or control registers. They are not 1029 // handled by the IQ and thus have no dependency graph entry. 1030 if (dest_reg->isFixedMapping()) { 1031 DPRINTF(IQ, "Reg %d [%s] is part of a fix mapping, skipping\n", 1032 dest_reg->index(), dest_reg->className()); 1033 continue; 1034 } 1035 1036 DPRINTF(IQ, "Waking any dependents on register %i (%s).\n", 1037 dest_reg->index(), 1038 dest_reg->className()); 1039 1040 //Go through the dependency chain, marking the registers as 1041 //ready within the waiting instructions. 1042 DynInstPtr dep_inst = dependGraph.pop(dest_reg->flatIndex()); 1043 1044 while (dep_inst) { 1045 DPRINTF(IQ, "Waking up a dependent instruction, [sn:%lli] " 1046 "PC %s.\n", dep_inst->seqNum, dep_inst->pcState()); 1047 1048 // Might want to give more information to the instruction 1049 // so that it knows which of its source registers is 1050 // ready. However that would mean that the dependency 1051 // graph entries would need to hold the src_reg_idx. 1052 dep_inst->markSrcRegReady(); 1053 1054 addIfReady(dep_inst); 1055 1056 dep_inst = dependGraph.pop(dest_reg->flatIndex()); 1057 1058 ++dependents; 1059 } 1060 1061 // Reset the head node now that all of its dependents have 1062 // been woken up. 1063 assert(dependGraph.empty(dest_reg->flatIndex())); 1064 dependGraph.clearInst(dest_reg->flatIndex()); 1065 1066 // Mark the scoreboard as having that register ready. 1067 regScoreboard[dest_reg->flatIndex()] = true; 1068 } 1069 return dependents; 1070} 1071 1072template <class Impl> 1073void 1074InstructionQueue<Impl>::addReadyMemInst(const DynInstPtr &ready_inst) 1075{ 1076 OpClass op_class = ready_inst->opClass(); 1077 1078 readyInsts[op_class].push(ready_inst); 1079 1080 // Will need to reorder the list if either a queue is not on the list, 1081 // or it has an older instruction than last time. 1082 if (!queueOnList[op_class]) { 1083 addToOrderList(op_class); 1084 } else if (readyInsts[op_class].top()->seqNum < 1085 (*readyIt[op_class]).oldestInst) { 1086 listOrder.erase(readyIt[op_class]); 1087 addToOrderList(op_class); 1088 } 1089 1090 DPRINTF(IQ, "Instruction is ready to issue, putting it onto " 1091 "the ready list, PC %s opclass:%i [sn:%lli].\n", 1092 ready_inst->pcState(), op_class, ready_inst->seqNum); 1093} 1094 1095template <class Impl> 1096void 1097InstructionQueue<Impl>::rescheduleMemInst(const DynInstPtr &resched_inst) 1098{ 1099 DPRINTF(IQ, "Rescheduling mem inst [sn:%lli]\n", resched_inst->seqNum); 1100 1101 // Reset DTB translation state 1102 resched_inst->translationStarted(false); 1103 resched_inst->translationCompleted(false); 1104 1105 resched_inst->clearCanIssue(); 1106 memDepUnit[resched_inst->threadNumber].reschedule(resched_inst); 1107} 1108 1109template <class Impl> 1110void 1111InstructionQueue<Impl>::replayMemInst(const DynInstPtr &replay_inst) 1112{ 1113 memDepUnit[replay_inst->threadNumber].replay(); 1114} 1115 1116template <class Impl> 1117void 1118InstructionQueue<Impl>::completeMemInst(const DynInstPtr &completed_inst) 1119{ 1120 ThreadID tid = completed_inst->threadNumber; 1121 1122 DPRINTF(IQ, "Completing mem instruction PC: %s [sn:%lli]\n", 1123 completed_inst->pcState(), completed_inst->seqNum); 1124 1125 ++freeEntries; 1126 1127 completed_inst->memOpDone(true); 1128 1129 memDepUnit[tid].completed(completed_inst); 1130 count[tid]--; 1131} 1132 1133template <class Impl> 1134void 1135InstructionQueue<Impl>::deferMemInst(const DynInstPtr &deferred_inst) 1136{ 1137 deferredMemInsts.push_back(deferred_inst); 1138} 1139 1140template <class Impl> 1141void 1142InstructionQueue<Impl>::blockMemInst(const DynInstPtr &blocked_inst) 1143{ 1144 blocked_inst->clearIssued(); 1145 blocked_inst->clearCanIssue(); 1146 blockedMemInsts.push_back(blocked_inst); 1147} 1148 1149template <class Impl> 1150void 1151InstructionQueue<Impl>::cacheUnblocked() 1152{ 1153 retryMemInsts.splice(retryMemInsts.end(), blockedMemInsts); 1154 // Get the CPU ticking again 1155 cpu->wakeCPU(); 1156} 1157 1158template <class Impl> 1159typename Impl::DynInstPtr 1160InstructionQueue<Impl>::getDeferredMemInstToExecute() 1161{ 1162 for (ListIt it = deferredMemInsts.begin(); it != deferredMemInsts.end(); 1163 ++it) { 1164 if ((*it)->translationCompleted() || (*it)->isSquashed()) { 1165 DynInstPtr mem_inst = std::move(*it); 1166 deferredMemInsts.erase(it); 1167 return mem_inst; 1168 } 1169 } 1170 return nullptr; 1171} 1172 1173template <class Impl> 1174typename Impl::DynInstPtr 1175InstructionQueue<Impl>::getBlockedMemInstToExecute() 1176{ 1177 if (retryMemInsts.empty()) { 1178 return nullptr; 1179 } else { 1180 DynInstPtr mem_inst = std::move(retryMemInsts.front()); 1181 retryMemInsts.pop_front(); 1182 return mem_inst; 1183 } 1184} 1185 1186template <class Impl> 1187void 1188InstructionQueue<Impl>::violation(const DynInstPtr &store, 1189 const DynInstPtr &faulting_load) 1190{ 1191 intInstQueueWrites++; 1192 memDepUnit[store->threadNumber].violation(store, faulting_load); 1193} 1194 1195template <class Impl> 1196void 1197InstructionQueue<Impl>::squash(ThreadID tid) 1198{ 1199 DPRINTF(IQ, "[tid:%i]: Starting to squash instructions in " 1200 "the IQ.\n", tid); 1201 1202 // Read instruction sequence number of last instruction out of the 1203 // time buffer. 1204 squashedSeqNum[tid] = fromCommit->commitInfo[tid].doneSeqNum; 1205 1206 doSquash(tid); 1207 1208 // Also tell the memory dependence unit to squash. 1209 memDepUnit[tid].squash(squashedSeqNum[tid], tid); 1210} 1211 1212template <class Impl> 1213void 1214InstructionQueue<Impl>::doSquash(ThreadID tid) 1215{ 1216 // Start at the tail. 1217 ListIt squash_it = instList[tid].end(); 1218 --squash_it; 1219 1220 DPRINTF(IQ, "[tid:%i]: Squashing until sequence number %i!\n", 1221 tid, squashedSeqNum[tid]); 1222 1223 // Squash any instructions younger than the squashed sequence number 1224 // given. 1225 while (squash_it != instList[tid].end() && 1226 (*squash_it)->seqNum > squashedSeqNum[tid]) { 1227 1228 DynInstPtr squashed_inst = (*squash_it); 1229 if (squashed_inst->isFloating()) { 1230 fpInstQueueWrites++; 1231 } else if (squashed_inst->isVector()) { 1232 vecInstQueueWrites++; 1233 } else { 1234 intInstQueueWrites++; 1235 } 1236 1237 // Only handle the instruction if it actually is in the IQ and 1238 // hasn't already been squashed in the IQ. 1239 if (squashed_inst->threadNumber != tid || 1240 squashed_inst->isSquashedInIQ()) { 1241 --squash_it; 1242 continue; 1243 } 1244 1245 if (!squashed_inst->isIssued() || 1246 (squashed_inst->isMemRef() && 1247 !squashed_inst->memOpDone())) { 1248 1249 DPRINTF(IQ, "[tid:%i]: Instruction [sn:%lli] PC %s squashed.\n", 1250 tid, squashed_inst->seqNum, squashed_inst->pcState()); 1251 1252 bool is_acq_rel = squashed_inst->isMemBarrier() && 1253 (squashed_inst->isLoad() || 1254 squashed_inst->isAtomic() || 1255 (squashed_inst->isStore() && 1256 !squashed_inst->isStoreConditional())); 1257 1258 // Remove the instruction from the dependency list. 1259 if (is_acq_rel || 1260 (!squashed_inst->isNonSpeculative() && 1261 !squashed_inst->isStoreConditional() && 1262 !squashed_inst->isAtomic() && 1263 !squashed_inst->isMemBarrier() && 1264 !squashed_inst->isWriteBarrier())) { 1265 1266 for (int src_reg_idx = 0; 1267 src_reg_idx < squashed_inst->numSrcRegs(); 1268 src_reg_idx++) 1269 { 1270 PhysRegIdPtr src_reg = 1271 squashed_inst->renamedSrcRegIdx(src_reg_idx); 1272 1273 // Only remove it from the dependency graph if it 1274 // was placed there in the first place. 1275 1276 // Instead of doing a linked list traversal, we 1277 // can just remove these squashed instructions 1278 // either at issue time, or when the register is 1279 // overwritten. The only downside to this is it 1280 // leaves more room for error. 1281 1282 if (!squashed_inst->isReadySrcRegIdx(src_reg_idx) && 1283 !src_reg->isFixedMapping()) { 1284 dependGraph.remove(src_reg->flatIndex(), 1285 squashed_inst); 1286 } 1287 1288 ++iqSquashedOperandsExamined; 1289 } 1290 1291 } else if (!squashed_inst->isStoreConditional() || 1292 !squashed_inst->isCompleted()) { 1293 NonSpecMapIt ns_inst_it = 1294 nonSpecInsts.find(squashed_inst->seqNum); 1295 1296 // we remove non-speculative instructions from 1297 // nonSpecInsts already when they are ready, and so we 1298 // cannot always expect to find them 1299 if (ns_inst_it == nonSpecInsts.end()) { 1300 // loads that became ready but stalled on a 1301 // blocked cache are alreayd removed from 1302 // nonSpecInsts, and have not faulted 1303 assert(squashed_inst->getFault() != NoFault || 1304 squashed_inst->isMemRef()); 1305 } else { 1306 1307 (*ns_inst_it).second = NULL; 1308 1309 nonSpecInsts.erase(ns_inst_it); 1310 1311 ++iqSquashedNonSpecRemoved; 1312 } 1313 } 1314 1315 // Might want to also clear out the head of the dependency graph. 1316 1317 // Mark it as squashed within the IQ. 1318 squashed_inst->setSquashedInIQ(); 1319 1320 // @todo: Remove this hack where several statuses are set so the 1321 // inst will flow through the rest of the pipeline. 1322 squashed_inst->setIssued(); 1323 squashed_inst->setCanCommit(); 1324 squashed_inst->clearInIQ(); 1325 1326 //Update Thread IQ Count 1327 count[squashed_inst->threadNumber]--; 1328 1329 ++freeEntries; 1330 } 1331 1332 // IQ clears out the heads of the dependency graph only when 1333 // instructions reach writeback stage. If an instruction is squashed 1334 // before writeback stage, its head of dependency graph would not be 1335 // cleared out; it holds the instruction's DynInstPtr. This prevents 1336 // freeing the squashed instruction's DynInst. 1337 // Thus, we need to manually clear out the squashed instructions' heads 1338 // of dependency graph. 1339 for (int dest_reg_idx = 0; 1340 dest_reg_idx < squashed_inst->numDestRegs(); 1341 dest_reg_idx++) 1342 { 1343 PhysRegIdPtr dest_reg = 1344 squashed_inst->renamedDestRegIdx(dest_reg_idx); 1345 if (dest_reg->isFixedMapping()){ 1346 continue; 1347 } 1348 assert(dependGraph.empty(dest_reg->flatIndex())); 1349 dependGraph.clearInst(dest_reg->flatIndex()); 1350 } 1351 instList[tid].erase(squash_it--); 1352 ++iqSquashedInstsExamined; 1353 } 1354} 1355 1356template <class Impl> 1357bool 1358InstructionQueue<Impl>::addToDependents(const DynInstPtr &new_inst) 1359{ 1360 // Loop through the instruction's source registers, adding 1361 // them to the dependency list if they are not ready. 1362 int8_t total_src_regs = new_inst->numSrcRegs(); 1363 bool return_val = false; 1364 1365 for (int src_reg_idx = 0; 1366 src_reg_idx < total_src_regs; 1367 src_reg_idx++) 1368 { 1369 // Only add it to the dependency graph if it's not ready. 1370 if (!new_inst->isReadySrcRegIdx(src_reg_idx)) { 1371 PhysRegIdPtr src_reg = new_inst->renamedSrcRegIdx(src_reg_idx); 1372 1373 // Check the IQ's scoreboard to make sure the register 1374 // hasn't become ready while the instruction was in flight 1375 // between stages. Only if it really isn't ready should 1376 // it be added to the dependency graph. 1377 if (src_reg->isFixedMapping()) { 1378 continue; 1379 } else if (!regScoreboard[src_reg->flatIndex()]) { 1380 DPRINTF(IQ, "Instruction PC %s has src reg %i (%s) that " 1381 "is being added to the dependency chain.\n", 1382 new_inst->pcState(), src_reg->index(), 1383 src_reg->className()); 1384 1385 dependGraph.insert(src_reg->flatIndex(), new_inst); 1386 1387 // Change the return value to indicate that something 1388 // was added to the dependency graph. 1389 return_val = true; 1390 } else { 1391 DPRINTF(IQ, "Instruction PC %s has src reg %i (%s) that " 1392 "became ready before it reached the IQ.\n", 1393 new_inst->pcState(), src_reg->index(), 1394 src_reg->className()); 1395 // Mark a register ready within the instruction. 1396 new_inst->markSrcRegReady(src_reg_idx); 1397 } 1398 } 1399 } 1400 1401 return return_val; 1402} 1403 1404template <class Impl> 1405void 1406InstructionQueue<Impl>::addToProducers(const DynInstPtr &new_inst) 1407{ 1408 // Nothing really needs to be marked when an instruction becomes 1409 // the producer of a register's value, but for convenience a ptr 1410 // to the producing instruction will be placed in the head node of 1411 // the dependency links. 1412 int8_t total_dest_regs = new_inst->numDestRegs(); 1413 1414 for (int dest_reg_idx = 0; 1415 dest_reg_idx < total_dest_regs; 1416 dest_reg_idx++) 1417 { 1418 PhysRegIdPtr dest_reg = new_inst->renamedDestRegIdx(dest_reg_idx); 1419 1420 // Some registers have fixed mapping, and there is no need to track 1421 // dependencies as these instructions must be executed at commit. 1422 if (dest_reg->isFixedMapping()) { 1423 continue; 1424 } 1425 1426 if (!dependGraph.empty(dest_reg->flatIndex())) { 1427 dependGraph.dump(); 1428 panic("Dependency graph %i (%s) (flat: %i) not empty!", 1429 dest_reg->index(), dest_reg->className(), 1430 dest_reg->flatIndex()); 1431 } 1432 1433 dependGraph.setInst(dest_reg->flatIndex(), new_inst); 1434 1435 // Mark the scoreboard to say it's not yet ready. 1436 regScoreboard[dest_reg->flatIndex()] = false; 1437 } 1438} 1439 1440template <class Impl> 1441void 1442InstructionQueue<Impl>::addIfReady(const DynInstPtr &inst) 1443{ 1444 // If the instruction now has all of its source registers 1445 // available, then add it to the list of ready instructions. 1446 if (inst->readyToIssue()) { 1447 1448 //Add the instruction to the proper ready list. 1449 if (inst->isMemRef()) { 1450 1451 DPRINTF(IQ, "Checking if memory instruction can issue.\n"); 1452 1453 // Message to the mem dependence unit that this instruction has 1454 // its registers ready. 1455 memDepUnit[inst->threadNumber].regsReady(inst); 1456 1457 return; 1458 } 1459 1460 OpClass op_class = inst->opClass(); 1461 1462 DPRINTF(IQ, "Instruction is ready to issue, putting it onto " 1463 "the ready list, PC %s opclass:%i [sn:%lli].\n", 1464 inst->pcState(), op_class, inst->seqNum); 1465 1466 readyInsts[op_class].push(inst); 1467 1468 // Will need to reorder the list if either a queue is not on the list, 1469 // or it has an older instruction than last time. 1470 if (!queueOnList[op_class]) { 1471 addToOrderList(op_class); 1472 } else if (readyInsts[op_class].top()->seqNum < 1473 (*readyIt[op_class]).oldestInst) { 1474 listOrder.erase(readyIt[op_class]); 1475 addToOrderList(op_class); 1476 } 1477 } 1478} 1479 1480template <class Impl> 1481int 1482InstructionQueue<Impl>::countInsts() 1483{ 1484#if 0 1485 //ksewell:This works but definitely could use a cleaner write 1486 //with a more intuitive way of counting. Right now it's 1487 //just brute force .... 1488 // Change the #if if you want to use this method. 1489 int total_insts = 0; 1490 1491 for (ThreadID tid = 0; tid < numThreads; ++tid) { 1492 ListIt count_it = instList[tid].begin(); 1493 1494 while (count_it != instList[tid].end()) { 1495 if (!(*count_it)->isSquashed() && !(*count_it)->isSquashedInIQ()) { 1496 if (!(*count_it)->isIssued()) { 1497 ++total_insts; 1498 } else if ((*count_it)->isMemRef() && 1499 !(*count_it)->memOpDone) { 1500 // Loads that have not been marked as executed still count 1501 // towards the total instructions. 1502 ++total_insts; 1503 } 1504 } 1505 1506 ++count_it; 1507 } 1508 } 1509 1510 return total_insts; 1511#else 1512 return numEntries - freeEntries; 1513#endif 1514} 1515 1516template <class Impl> 1517void 1518InstructionQueue<Impl>::dumpLists() 1519{ 1520 for (int i = 0; i < Num_OpClasses; ++i) { 1521 cprintf("Ready list %i size: %i\n", i, readyInsts[i].size()); 1522 1523 cprintf("\n"); 1524 } 1525 1526 cprintf("Non speculative list size: %i\n", nonSpecInsts.size()); 1527 1528 NonSpecMapIt non_spec_it = nonSpecInsts.begin(); 1529 NonSpecMapIt non_spec_end_it = nonSpecInsts.end(); 1530 1531 cprintf("Non speculative list: "); 1532 1533 while (non_spec_it != non_spec_end_it) { 1534 cprintf("%s [sn:%lli]", (*non_spec_it).second->pcState(), 1535 (*non_spec_it).second->seqNum); 1536 ++non_spec_it; 1537 } 1538 1539 cprintf("\n"); 1540 1541 ListOrderIt list_order_it = listOrder.begin(); 1542 ListOrderIt list_order_end_it = listOrder.end(); 1543 int i = 1; 1544 1545 cprintf("List order: "); 1546 1547 while (list_order_it != list_order_end_it) { 1548 cprintf("%i OpClass:%i [sn:%lli] ", i, (*list_order_it).queueType, 1549 (*list_order_it).oldestInst); 1550 1551 ++list_order_it; 1552 ++i; 1553 } 1554 1555 cprintf("\n"); 1556} 1557 1558 1559template <class Impl> 1560void 1561InstructionQueue<Impl>::dumpInsts() 1562{ 1563 for (ThreadID tid = 0; tid < numThreads; ++tid) { 1564 int num = 0; 1565 int valid_num = 0; 1566 ListIt inst_list_it = instList[tid].begin(); 1567 1568 while (inst_list_it != instList[tid].end()) { 1569 cprintf("Instruction:%i\n", num); 1570 if (!(*inst_list_it)->isSquashed()) { 1571 if (!(*inst_list_it)->isIssued()) { 1572 ++valid_num; 1573 cprintf("Count:%i\n", valid_num); 1574 } else if ((*inst_list_it)->isMemRef() && 1575 !(*inst_list_it)->memOpDone()) { 1576 // Loads that have not been marked as executed 1577 // still count towards the total instructions. 1578 ++valid_num; 1579 cprintf("Count:%i\n", valid_num); 1580 } 1581 } 1582 1583 cprintf("PC: %s\n[sn:%lli]\n[tid:%i]\n" 1584 "Issued:%i\nSquashed:%i\n", 1585 (*inst_list_it)->pcState(), 1586 (*inst_list_it)->seqNum, 1587 (*inst_list_it)->threadNumber, 1588 (*inst_list_it)->isIssued(), 1589 (*inst_list_it)->isSquashed()); 1590 1591 if ((*inst_list_it)->isMemRef()) { 1592 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone()); 1593 } 1594 1595 cprintf("\n"); 1596 1597 inst_list_it++; 1598 ++num; 1599 } 1600 } 1601 1602 cprintf("Insts to Execute list:\n"); 1603 1604 int num = 0; 1605 int valid_num = 0; 1606 ListIt inst_list_it = instsToExecute.begin(); 1607 1608 while (inst_list_it != instsToExecute.end()) 1609 { 1610 cprintf("Instruction:%i\n", 1611 num); 1612 if (!(*inst_list_it)->isSquashed()) { 1613 if (!(*inst_list_it)->isIssued()) { 1614 ++valid_num; 1615 cprintf("Count:%i\n", valid_num); 1616 } else if ((*inst_list_it)->isMemRef() && 1617 !(*inst_list_it)->memOpDone()) { 1618 // Loads that have not been marked as executed 1619 // still count towards the total instructions. 1620 ++valid_num; 1621 cprintf("Count:%i\n", valid_num); 1622 } 1623 } 1624 1625 cprintf("PC: %s\n[sn:%lli]\n[tid:%i]\n" 1626 "Issued:%i\nSquashed:%i\n", 1627 (*inst_list_it)->pcState(), 1628 (*inst_list_it)->seqNum, 1629 (*inst_list_it)->threadNumber, 1630 (*inst_list_it)->isIssued(), 1631 (*inst_list_it)->isSquashed()); 1632 1633 if ((*inst_list_it)->isMemRef()) { 1634 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone()); 1635 } 1636 1637 cprintf("\n"); 1638 1639 inst_list_it++; 1640 ++num; 1641 } 1642} 1643 1644#endif//__CPU_O3_INST_QUEUE_IMPL_HH__ 1645