inst_queue_impl.hh revision 13449
1/* 2 * Copyright (c) 2011-2014 ARM Limited 3 * Copyright (c) 2013 Advanced Micro Devices, Inc. 4 * All rights reserved. 5 * 6 * The license below extends only to copyright in the software and shall 7 * not be construed as granting a license to any other intellectual 8 * property including but not limited to intellectual property relating 9 * to a hardware implementation of the functionality of the software 10 * licensed hereunder. You may use the software subject to the license 11 * terms below provided that you ensure that this notice is replicated 12 * unmodified and in its entirety in all distributions of the software, 13 * modified or unmodified, in source code or in binary form. 14 * 15 * Copyright (c) 2004-2006 The Regents of The University of Michigan 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Kevin Lim 42 * Korey Sewell 43 */ 44 45#ifndef __CPU_O3_INST_QUEUE_IMPL_HH__ 46#define __CPU_O3_INST_QUEUE_IMPL_HH__ 47 48#include <limits> 49#include <vector> 50 51#include "base/logging.hh" 52#include "cpu/o3/fu_pool.hh" 53#include "cpu/o3/inst_queue.hh" 54#include "debug/IQ.hh" 55#include "enums/OpClass.hh" 56#include "params/DerivO3CPU.hh" 57#include "sim/core.hh" 58 59// clang complains about std::set being overloaded with Packet::set if 60// we open up the entire namespace std 61using std::list; 62 63template <class Impl> 64InstructionQueue<Impl>::FUCompletion::FUCompletion(const DynInstPtr &_inst, 65 int fu_idx, InstructionQueue<Impl> *iq_ptr) 66 : Event(Stat_Event_Pri, AutoDelete), 67 inst(_inst), fuIdx(fu_idx), iqPtr(iq_ptr), freeFU(false) 68{ 69} 70 71template <class Impl> 72void 73InstructionQueue<Impl>::FUCompletion::process() 74{ 75 iqPtr->processFUCompletion(inst, freeFU ? fuIdx : -1); 76 inst = NULL; 77} 78 79 80template <class Impl> 81const char * 82InstructionQueue<Impl>::FUCompletion::description() const 83{ 84 return "Functional unit completion"; 85} 86 87template <class Impl> 88InstructionQueue<Impl>::InstructionQueue(O3CPU *cpu_ptr, IEW *iew_ptr, 89 DerivO3CPUParams *params) 90 : cpu(cpu_ptr), 91 iewStage(iew_ptr), 92 fuPool(params->fuPool), 93 numEntries(params->numIQEntries), 94 totalWidth(params->issueWidth), 95 commitToIEWDelay(params->commitToIEWDelay) 96{ 97 assert(fuPool); 98 99 numThreads = params->numThreads; 100 101 // Set the number of total physical registers 102 // As the vector registers have two addressing modes, they are added twice 103 numPhysRegs = params->numPhysIntRegs + params->numPhysFloatRegs + 104 params->numPhysVecRegs + 105 params->numPhysVecRegs * TheISA::NumVecElemPerVecReg + 106 params->numPhysCCRegs; 107 108 //Create an entry for each physical register within the 109 //dependency graph. 110 dependGraph.resize(numPhysRegs); 111 112 // Resize the register scoreboard. 113 regScoreboard.resize(numPhysRegs); 114 115 //Initialize Mem Dependence Units 116 for (ThreadID tid = 0; tid < numThreads; tid++) { 117 memDepUnit[tid].init(params, tid); 118 memDepUnit[tid].setIQ(this); 119 } 120 121 resetState(); 122 123 std::string policy = params->smtIQPolicy; 124 125 //Convert string to lowercase 126 std::transform(policy.begin(), policy.end(), policy.begin(), 127 (int(*)(int)) tolower); 128 129 //Figure out resource sharing policy 130 if (policy == "dynamic") { 131 iqPolicy = Dynamic; 132 133 //Set Max Entries to Total ROB Capacity 134 for (ThreadID tid = 0; tid < numThreads; tid++) { 135 maxEntries[tid] = numEntries; 136 } 137 138 } else if (policy == "partitioned") { 139 iqPolicy = Partitioned; 140 141 //@todo:make work if part_amt doesnt divide evenly. 142 int part_amt = numEntries / numThreads; 143 144 //Divide ROB up evenly 145 for (ThreadID tid = 0; tid < numThreads; tid++) { 146 maxEntries[tid] = part_amt; 147 } 148 149 DPRINTF(IQ, "IQ sharing policy set to Partitioned:" 150 "%i entries per thread.\n",part_amt); 151 } else if (policy == "threshold") { 152 iqPolicy = Threshold; 153 154 double threshold = (double)params->smtIQThreshold / 100; 155 156 int thresholdIQ = (int)((double)threshold * numEntries); 157 158 //Divide up by threshold amount 159 for (ThreadID tid = 0; tid < numThreads; tid++) { 160 maxEntries[tid] = thresholdIQ; 161 } 162 163 DPRINTF(IQ, "IQ sharing policy set to Threshold:" 164 "%i entries per thread.\n",thresholdIQ); 165 } else { 166 panic("Invalid IQ sharing policy. Options are: Dynamic, " 167 "Partitioned, Threshold"); 168 } 169} 170 171template <class Impl> 172InstructionQueue<Impl>::~InstructionQueue() 173{ 174 dependGraph.reset(); 175#ifdef DEBUG 176 cprintf("Nodes traversed: %i, removed: %i\n", 177 dependGraph.nodesTraversed, dependGraph.nodesRemoved); 178#endif 179} 180 181template <class Impl> 182std::string 183InstructionQueue<Impl>::name() const 184{ 185 return cpu->name() + ".iq"; 186} 187 188template <class Impl> 189void 190InstructionQueue<Impl>::regStats() 191{ 192 using namespace Stats; 193 iqInstsAdded 194 .name(name() + ".iqInstsAdded") 195 .desc("Number of instructions added to the IQ (excludes non-spec)") 196 .prereq(iqInstsAdded); 197 198 iqNonSpecInstsAdded 199 .name(name() + ".iqNonSpecInstsAdded") 200 .desc("Number of non-speculative instructions added to the IQ") 201 .prereq(iqNonSpecInstsAdded); 202 203 iqInstsIssued 204 .name(name() + ".iqInstsIssued") 205 .desc("Number of instructions issued") 206 .prereq(iqInstsIssued); 207 208 iqIntInstsIssued 209 .name(name() + ".iqIntInstsIssued") 210 .desc("Number of integer instructions issued") 211 .prereq(iqIntInstsIssued); 212 213 iqFloatInstsIssued 214 .name(name() + ".iqFloatInstsIssued") 215 .desc("Number of float instructions issued") 216 .prereq(iqFloatInstsIssued); 217 218 iqBranchInstsIssued 219 .name(name() + ".iqBranchInstsIssued") 220 .desc("Number of branch instructions issued") 221 .prereq(iqBranchInstsIssued); 222 223 iqMemInstsIssued 224 .name(name() + ".iqMemInstsIssued") 225 .desc("Number of memory instructions issued") 226 .prereq(iqMemInstsIssued); 227 228 iqMiscInstsIssued 229 .name(name() + ".iqMiscInstsIssued") 230 .desc("Number of miscellaneous instructions issued") 231 .prereq(iqMiscInstsIssued); 232 233 iqSquashedInstsIssued 234 .name(name() + ".iqSquashedInstsIssued") 235 .desc("Number of squashed instructions issued") 236 .prereq(iqSquashedInstsIssued); 237 238 iqSquashedInstsExamined 239 .name(name() + ".iqSquashedInstsExamined") 240 .desc("Number of squashed instructions iterated over during squash;" 241 " mainly for profiling") 242 .prereq(iqSquashedInstsExamined); 243 244 iqSquashedOperandsExamined 245 .name(name() + ".iqSquashedOperandsExamined") 246 .desc("Number of squashed operands that are examined and possibly " 247 "removed from graph") 248 .prereq(iqSquashedOperandsExamined); 249 250 iqSquashedNonSpecRemoved 251 .name(name() + ".iqSquashedNonSpecRemoved") 252 .desc("Number of squashed non-spec instructions that were removed") 253 .prereq(iqSquashedNonSpecRemoved); 254/* 255 queueResDist 256 .init(Num_OpClasses, 0, 99, 2) 257 .name(name() + ".IQ:residence:") 258 .desc("cycles from dispatch to issue") 259 .flags(total | pdf | cdf ) 260 ; 261 for (int i = 0; i < Num_OpClasses; ++i) { 262 queueResDist.subname(i, opClassStrings[i]); 263 } 264*/ 265 numIssuedDist 266 .init(0,totalWidth,1) 267 .name(name() + ".issued_per_cycle") 268 .desc("Number of insts issued each cycle") 269 .flags(pdf) 270 ; 271/* 272 dist_unissued 273 .init(Num_OpClasses+2) 274 .name(name() + ".unissued_cause") 275 .desc("Reason ready instruction not issued") 276 .flags(pdf | dist) 277 ; 278 for (int i=0; i < (Num_OpClasses + 2); ++i) { 279 dist_unissued.subname(i, unissued_names[i]); 280 } 281*/ 282 statIssuedInstType 283 .init(numThreads,Enums::Num_OpClass) 284 .name(name() + ".FU_type") 285 .desc("Type of FU issued") 286 .flags(total | pdf | dist) 287 ; 288 statIssuedInstType.ysubnames(Enums::OpClassStrings); 289 290 // 291 // How long did instructions for a particular FU type wait prior to issue 292 // 293/* 294 issueDelayDist 295 .init(Num_OpClasses,0,99,2) 296 .name(name() + ".") 297 .desc("cycles from operands ready to issue") 298 .flags(pdf | cdf) 299 ; 300 301 for (int i=0; i<Num_OpClasses; ++i) { 302 std::stringstream subname; 303 subname << opClassStrings[i] << "_delay"; 304 issueDelayDist.subname(i, subname.str()); 305 } 306*/ 307 issueRate 308 .name(name() + ".rate") 309 .desc("Inst issue rate") 310 .flags(total) 311 ; 312 issueRate = iqInstsIssued / cpu->numCycles; 313 314 statFuBusy 315 .init(Num_OpClasses) 316 .name(name() + ".fu_full") 317 .desc("attempts to use FU when none available") 318 .flags(pdf | dist) 319 ; 320 for (int i=0; i < Num_OpClasses; ++i) { 321 statFuBusy.subname(i, Enums::OpClassStrings[i]); 322 } 323 324 fuBusy 325 .init(numThreads) 326 .name(name() + ".fu_busy_cnt") 327 .desc("FU busy when requested") 328 .flags(total) 329 ; 330 331 fuBusyRate 332 .name(name() + ".fu_busy_rate") 333 .desc("FU busy rate (busy events/executed inst)") 334 .flags(total) 335 ; 336 fuBusyRate = fuBusy / iqInstsIssued; 337 338 for (ThreadID tid = 0; tid < numThreads; tid++) { 339 // Tell mem dependence unit to reg stats as well. 340 memDepUnit[tid].regStats(); 341 } 342 343 intInstQueueReads 344 .name(name() + ".int_inst_queue_reads") 345 .desc("Number of integer instruction queue reads") 346 .flags(total); 347 348 intInstQueueWrites 349 .name(name() + ".int_inst_queue_writes") 350 .desc("Number of integer instruction queue writes") 351 .flags(total); 352 353 intInstQueueWakeupAccesses 354 .name(name() + ".int_inst_queue_wakeup_accesses") 355 .desc("Number of integer instruction queue wakeup accesses") 356 .flags(total); 357 358 fpInstQueueReads 359 .name(name() + ".fp_inst_queue_reads") 360 .desc("Number of floating instruction queue reads") 361 .flags(total); 362 363 fpInstQueueWrites 364 .name(name() + ".fp_inst_queue_writes") 365 .desc("Number of floating instruction queue writes") 366 .flags(total); 367 368 fpInstQueueWakeupAccesses 369 .name(name() + ".fp_inst_queue_wakeup_accesses") 370 .desc("Number of floating instruction queue wakeup accesses") 371 .flags(total); 372 373 vecInstQueueReads 374 .name(name() + ".vec_inst_queue_reads") 375 .desc("Number of vector instruction queue reads") 376 .flags(total); 377 378 vecInstQueueWrites 379 .name(name() + ".vec_inst_queue_writes") 380 .desc("Number of vector instruction queue writes") 381 .flags(total); 382 383 vecInstQueueWakeupAccesses 384 .name(name() + ".vec_inst_queue_wakeup_accesses") 385 .desc("Number of vector instruction queue wakeup accesses") 386 .flags(total); 387 388 intAluAccesses 389 .name(name() + ".int_alu_accesses") 390 .desc("Number of integer alu accesses") 391 .flags(total); 392 393 fpAluAccesses 394 .name(name() + ".fp_alu_accesses") 395 .desc("Number of floating point alu accesses") 396 .flags(total); 397 398 vecAluAccesses 399 .name(name() + ".vec_alu_accesses") 400 .desc("Number of vector alu accesses") 401 .flags(total); 402 403} 404 405template <class Impl> 406void 407InstructionQueue<Impl>::resetState() 408{ 409 //Initialize thread IQ counts 410 for (ThreadID tid = 0; tid <numThreads; tid++) { 411 count[tid] = 0; 412 instList[tid].clear(); 413 } 414 415 // Initialize the number of free IQ entries. 416 freeEntries = numEntries; 417 418 // Note that in actuality, the registers corresponding to the logical 419 // registers start off as ready. However this doesn't matter for the 420 // IQ as the instruction should have been correctly told if those 421 // registers are ready in rename. Thus it can all be initialized as 422 // unready. 423 for (int i = 0; i < numPhysRegs; ++i) { 424 regScoreboard[i] = false; 425 } 426 427 for (ThreadID tid = 0; tid < numThreads; ++tid) { 428 squashedSeqNum[tid] = 0; 429 } 430 431 for (int i = 0; i < Num_OpClasses; ++i) { 432 while (!readyInsts[i].empty()) 433 readyInsts[i].pop(); 434 queueOnList[i] = false; 435 readyIt[i] = listOrder.end(); 436 } 437 nonSpecInsts.clear(); 438 listOrder.clear(); 439 deferredMemInsts.clear(); 440 blockedMemInsts.clear(); 441 retryMemInsts.clear(); 442 wbOutstanding = 0; 443} 444 445template <class Impl> 446void 447InstructionQueue<Impl>::setActiveThreads(list<ThreadID> *at_ptr) 448{ 449 activeThreads = at_ptr; 450} 451 452template <class Impl> 453void 454InstructionQueue<Impl>::setIssueToExecuteQueue(TimeBuffer<IssueStruct> *i2e_ptr) 455{ 456 issueToExecuteQueue = i2e_ptr; 457} 458 459template <class Impl> 460void 461InstructionQueue<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr) 462{ 463 timeBuffer = tb_ptr; 464 465 fromCommit = timeBuffer->getWire(-commitToIEWDelay); 466} 467 468template <class Impl> 469bool 470InstructionQueue<Impl>::isDrained() const 471{ 472 bool drained = dependGraph.empty() && 473 instsToExecute.empty() && 474 wbOutstanding == 0; 475 for (ThreadID tid = 0; tid < numThreads; ++tid) 476 drained = drained && memDepUnit[tid].isDrained(); 477 478 return drained; 479} 480 481template <class Impl> 482void 483InstructionQueue<Impl>::drainSanityCheck() const 484{ 485 assert(dependGraph.empty()); 486 assert(instsToExecute.empty()); 487 for (ThreadID tid = 0; tid < numThreads; ++tid) 488 memDepUnit[tid].drainSanityCheck(); 489} 490 491template <class Impl> 492void 493InstructionQueue<Impl>::takeOverFrom() 494{ 495 resetState(); 496} 497 498template <class Impl> 499int 500InstructionQueue<Impl>::entryAmount(ThreadID num_threads) 501{ 502 if (iqPolicy == Partitioned) { 503 return numEntries / num_threads; 504 } else { 505 return 0; 506 } 507} 508 509 510template <class Impl> 511void 512InstructionQueue<Impl>::resetEntries() 513{ 514 if (iqPolicy != Dynamic || numThreads > 1) { 515 int active_threads = activeThreads->size(); 516 517 list<ThreadID>::iterator threads = activeThreads->begin(); 518 list<ThreadID>::iterator end = activeThreads->end(); 519 520 while (threads != end) { 521 ThreadID tid = *threads++; 522 523 if (iqPolicy == Partitioned) { 524 maxEntries[tid] = numEntries / active_threads; 525 } else if (iqPolicy == Threshold && active_threads == 1) { 526 maxEntries[tid] = numEntries; 527 } 528 } 529 } 530} 531 532template <class Impl> 533unsigned 534InstructionQueue<Impl>::numFreeEntries() 535{ 536 return freeEntries; 537} 538 539template <class Impl> 540unsigned 541InstructionQueue<Impl>::numFreeEntries(ThreadID tid) 542{ 543 return maxEntries[tid] - count[tid]; 544} 545 546// Might want to do something more complex if it knows how many instructions 547// will be issued this cycle. 548template <class Impl> 549bool 550InstructionQueue<Impl>::isFull() 551{ 552 if (freeEntries == 0) { 553 return(true); 554 } else { 555 return(false); 556 } 557} 558 559template <class Impl> 560bool 561InstructionQueue<Impl>::isFull(ThreadID tid) 562{ 563 if (numFreeEntries(tid) == 0) { 564 return(true); 565 } else { 566 return(false); 567 } 568} 569 570template <class Impl> 571bool 572InstructionQueue<Impl>::hasReadyInsts() 573{ 574 if (!listOrder.empty()) { 575 return true; 576 } 577 578 for (int i = 0; i < Num_OpClasses; ++i) { 579 if (!readyInsts[i].empty()) { 580 return true; 581 } 582 } 583 584 return false; 585} 586 587template <class Impl> 588void 589InstructionQueue<Impl>::insert(const DynInstPtr &new_inst) 590{ 591 if (new_inst->isFloating()) { 592 fpInstQueueWrites++; 593 } else if (new_inst->isVector()) { 594 vecInstQueueWrites++; 595 } else { 596 intInstQueueWrites++; 597 } 598 // Make sure the instruction is valid 599 assert(new_inst); 600 601 DPRINTF(IQ, "Adding instruction [sn:%lli] PC %s to the IQ.\n", 602 new_inst->seqNum, new_inst->pcState()); 603 604 assert(freeEntries != 0); 605 606 instList[new_inst->threadNumber].push_back(new_inst); 607 608 --freeEntries; 609 610 new_inst->setInIQ(); 611 612 // Look through its source registers (physical regs), and mark any 613 // dependencies. 614 addToDependents(new_inst); 615 616 // Have this instruction set itself as the producer of its destination 617 // register(s). 618 addToProducers(new_inst); 619 620 if (new_inst->isMemRef()) { 621 memDepUnit[new_inst->threadNumber].insert(new_inst); 622 } else { 623 addIfReady(new_inst); 624 } 625 626 ++iqInstsAdded; 627 628 count[new_inst->threadNumber]++; 629 630 assert(freeEntries == (numEntries - countInsts())); 631} 632 633template <class Impl> 634void 635InstructionQueue<Impl>::insertNonSpec(const DynInstPtr &new_inst) 636{ 637 // @todo: Clean up this code; can do it by setting inst as unable 638 // to issue, then calling normal insert on the inst. 639 if (new_inst->isFloating()) { 640 fpInstQueueWrites++; 641 } else if (new_inst->isVector()) { 642 vecInstQueueWrites++; 643 } else { 644 intInstQueueWrites++; 645 } 646 647 assert(new_inst); 648 649 nonSpecInsts[new_inst->seqNum] = new_inst; 650 651 DPRINTF(IQ, "Adding non-speculative instruction [sn:%lli] PC %s " 652 "to the IQ.\n", 653 new_inst->seqNum, new_inst->pcState()); 654 655 assert(freeEntries != 0); 656 657 instList[new_inst->threadNumber].push_back(new_inst); 658 659 --freeEntries; 660 661 new_inst->setInIQ(); 662 663 // Have this instruction set itself as the producer of its destination 664 // register(s). 665 addToProducers(new_inst); 666 667 // If it's a memory instruction, add it to the memory dependency 668 // unit. 669 if (new_inst->isMemRef()) { 670 memDepUnit[new_inst->threadNumber].insertNonSpec(new_inst); 671 } 672 673 ++iqNonSpecInstsAdded; 674 675 count[new_inst->threadNumber]++; 676 677 assert(freeEntries == (numEntries - countInsts())); 678} 679 680template <class Impl> 681void 682InstructionQueue<Impl>::insertBarrier(const DynInstPtr &barr_inst) 683{ 684 memDepUnit[barr_inst->threadNumber].insertBarrier(barr_inst); 685 686 insertNonSpec(barr_inst); 687} 688 689template <class Impl> 690typename Impl::DynInstPtr 691InstructionQueue<Impl>::getInstToExecute() 692{ 693 assert(!instsToExecute.empty()); 694 DynInstPtr inst = std::move(instsToExecute.front()); 695 instsToExecute.pop_front(); 696 if (inst->isFloating()) { 697 fpInstQueueReads++; 698 } else if (inst->isVector()) { 699 vecInstQueueReads++; 700 } else { 701 intInstQueueReads++; 702 } 703 return inst; 704} 705 706template <class Impl> 707void 708InstructionQueue<Impl>::addToOrderList(OpClass op_class) 709{ 710 assert(!readyInsts[op_class].empty()); 711 712 ListOrderEntry queue_entry; 713 714 queue_entry.queueType = op_class; 715 716 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum; 717 718 ListOrderIt list_it = listOrder.begin(); 719 ListOrderIt list_end_it = listOrder.end(); 720 721 while (list_it != list_end_it) { 722 if ((*list_it).oldestInst > queue_entry.oldestInst) { 723 break; 724 } 725 726 list_it++; 727 } 728 729 readyIt[op_class] = listOrder.insert(list_it, queue_entry); 730 queueOnList[op_class] = true; 731} 732 733template <class Impl> 734void 735InstructionQueue<Impl>::moveToYoungerInst(ListOrderIt list_order_it) 736{ 737 // Get iterator of next item on the list 738 // Delete the original iterator 739 // Determine if the next item is either the end of the list or younger 740 // than the new instruction. If so, then add in a new iterator right here. 741 // If not, then move along. 742 ListOrderEntry queue_entry; 743 OpClass op_class = (*list_order_it).queueType; 744 ListOrderIt next_it = list_order_it; 745 746 ++next_it; 747 748 queue_entry.queueType = op_class; 749 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum; 750 751 while (next_it != listOrder.end() && 752 (*next_it).oldestInst < queue_entry.oldestInst) { 753 ++next_it; 754 } 755 756 readyIt[op_class] = listOrder.insert(next_it, queue_entry); 757} 758 759template <class Impl> 760void 761InstructionQueue<Impl>::processFUCompletion(const DynInstPtr &inst, int fu_idx) 762{ 763 DPRINTF(IQ, "Processing FU completion [sn:%lli]\n", inst->seqNum); 764 assert(!cpu->switchedOut()); 765 // The CPU could have been sleeping until this op completed (*extremely* 766 // long latency op). Wake it if it was. This may be overkill. 767 --wbOutstanding; 768 iewStage->wakeCPU(); 769 770 if (fu_idx > -1) 771 fuPool->freeUnitNextCycle(fu_idx); 772 773 // @todo: Ensure that these FU Completions happen at the beginning 774 // of a cycle, otherwise they could add too many instructions to 775 // the queue. 776 issueToExecuteQueue->access(-1)->size++; 777 instsToExecute.push_back(inst); 778} 779 780// @todo: Figure out a better way to remove the squashed items from the 781// lists. Checking the top item of each list to see if it's squashed 782// wastes time and forces jumps. 783template <class Impl> 784void 785InstructionQueue<Impl>::scheduleReadyInsts() 786{ 787 DPRINTF(IQ, "Attempting to schedule ready instructions from " 788 "the IQ.\n"); 789 790 IssueStruct *i2e_info = issueToExecuteQueue->access(0); 791 792 DynInstPtr mem_inst; 793 while (mem_inst = std::move(getDeferredMemInstToExecute())) { 794 addReadyMemInst(mem_inst); 795 } 796 797 // See if any cache blocked instructions are able to be executed 798 while (mem_inst = std::move(getBlockedMemInstToExecute())) { 799 addReadyMemInst(mem_inst); 800 } 801 802 // Have iterator to head of the list 803 // While I haven't exceeded bandwidth or reached the end of the list, 804 // Try to get a FU that can do what this op needs. 805 // If successful, change the oldestInst to the new top of the list, put 806 // the queue in the proper place in the list. 807 // Increment the iterator. 808 // This will avoid trying to schedule a certain op class if there are no 809 // FUs that handle it. 810 int total_issued = 0; 811 ListOrderIt order_it = listOrder.begin(); 812 ListOrderIt order_end_it = listOrder.end(); 813 814 while (total_issued < totalWidth && order_it != order_end_it) { 815 OpClass op_class = (*order_it).queueType; 816 817 assert(!readyInsts[op_class].empty()); 818 819 DynInstPtr issuing_inst = readyInsts[op_class].top(); 820 821 if (issuing_inst->isFloating()) { 822 fpInstQueueReads++; 823 } else if (issuing_inst->isVector()) { 824 vecInstQueueReads++; 825 } else { 826 intInstQueueReads++; 827 } 828 829 assert(issuing_inst->seqNum == (*order_it).oldestInst); 830 831 if (issuing_inst->isSquashed()) { 832 readyInsts[op_class].pop(); 833 834 if (!readyInsts[op_class].empty()) { 835 moveToYoungerInst(order_it); 836 } else { 837 readyIt[op_class] = listOrder.end(); 838 queueOnList[op_class] = false; 839 } 840 841 listOrder.erase(order_it++); 842 843 ++iqSquashedInstsIssued; 844 845 continue; 846 } 847 848 int idx = FUPool::NoCapableFU; 849 Cycles op_latency = Cycles(1); 850 ThreadID tid = issuing_inst->threadNumber; 851 852 if (op_class != No_OpClass) { 853 idx = fuPool->getUnit(op_class); 854 if (issuing_inst->isFloating()) { 855 fpAluAccesses++; 856 } else if (issuing_inst->isVector()) { 857 vecAluAccesses++; 858 } else { 859 intAluAccesses++; 860 } 861 if (idx > FUPool::NoFreeFU) { 862 op_latency = fuPool->getOpLatency(op_class); 863 } 864 } 865 866 // If we have an instruction that doesn't require a FU, or a 867 // valid FU, then schedule for execution. 868 if (idx != FUPool::NoFreeFU) { 869 if (op_latency == Cycles(1)) { 870 i2e_info->size++; 871 instsToExecute.push_back(issuing_inst); 872 873 // Add the FU onto the list of FU's to be freed next 874 // cycle if we used one. 875 if (idx >= 0) 876 fuPool->freeUnitNextCycle(idx); 877 } else { 878 bool pipelined = fuPool->isPipelined(op_class); 879 // Generate completion event for the FU 880 ++wbOutstanding; 881 FUCompletion *execution = new FUCompletion(issuing_inst, 882 idx, this); 883 884 cpu->schedule(execution, 885 cpu->clockEdge(Cycles(op_latency - 1))); 886 887 if (!pipelined) { 888 // If FU isn't pipelined, then it must be freed 889 // upon the execution completing. 890 execution->setFreeFU(); 891 } else { 892 // Add the FU onto the list of FU's to be freed next cycle. 893 fuPool->freeUnitNextCycle(idx); 894 } 895 } 896 897 DPRINTF(IQ, "Thread %i: Issuing instruction PC %s " 898 "[sn:%lli]\n", 899 tid, issuing_inst->pcState(), 900 issuing_inst->seqNum); 901 902 readyInsts[op_class].pop(); 903 904 if (!readyInsts[op_class].empty()) { 905 moveToYoungerInst(order_it); 906 } else { 907 readyIt[op_class] = listOrder.end(); 908 queueOnList[op_class] = false; 909 } 910 911 issuing_inst->setIssued(); 912 ++total_issued; 913 914#if TRACING_ON 915 issuing_inst->issueTick = curTick() - issuing_inst->fetchTick; 916#endif 917 918 if (!issuing_inst->isMemRef()) { 919 // Memory instructions can not be freed from the IQ until they 920 // complete. 921 ++freeEntries; 922 count[tid]--; 923 issuing_inst->clearInIQ(); 924 } else { 925 memDepUnit[tid].issue(issuing_inst); 926 } 927 928 listOrder.erase(order_it++); 929 statIssuedInstType[tid][op_class]++; 930 } else { 931 statFuBusy[op_class]++; 932 fuBusy[tid]++; 933 ++order_it; 934 } 935 } 936 937 numIssuedDist.sample(total_issued); 938 iqInstsIssued+= total_issued; 939 940 // If we issued any instructions, tell the CPU we had activity. 941 // @todo If the way deferred memory instructions are handeled due to 942 // translation changes then the deferredMemInsts condition should be removed 943 // from the code below. 944 if (total_issued || !retryMemInsts.empty() || !deferredMemInsts.empty()) { 945 cpu->activityThisCycle(); 946 } else { 947 DPRINTF(IQ, "Not able to schedule any instructions.\n"); 948 } 949} 950 951template <class Impl> 952void 953InstructionQueue<Impl>::scheduleNonSpec(const InstSeqNum &inst) 954{ 955 DPRINTF(IQ, "Marking nonspeculative instruction [sn:%lli] as ready " 956 "to execute.\n", inst); 957 958 NonSpecMapIt inst_it = nonSpecInsts.find(inst); 959 960 assert(inst_it != nonSpecInsts.end()); 961 962 ThreadID tid = (*inst_it).second->threadNumber; 963 964 (*inst_it).second->setAtCommit(); 965 966 (*inst_it).second->setCanIssue(); 967 968 if (!(*inst_it).second->isMemRef()) { 969 addIfReady((*inst_it).second); 970 } else { 971 memDepUnit[tid].nonSpecInstReady((*inst_it).second); 972 } 973 974 (*inst_it).second = NULL; 975 976 nonSpecInsts.erase(inst_it); 977} 978 979template <class Impl> 980void 981InstructionQueue<Impl>::commit(const InstSeqNum &inst, ThreadID tid) 982{ 983 DPRINTF(IQ, "[tid:%i]: Committing instructions older than [sn:%i]\n", 984 tid,inst); 985 986 ListIt iq_it = instList[tid].begin(); 987 988 while (iq_it != instList[tid].end() && 989 (*iq_it)->seqNum <= inst) { 990 ++iq_it; 991 instList[tid].pop_front(); 992 } 993 994 assert(freeEntries == (numEntries - countInsts())); 995} 996 997template <class Impl> 998int 999InstructionQueue<Impl>::wakeDependents(const DynInstPtr &completed_inst) 1000{ 1001 int dependents = 0; 1002 1003 // The instruction queue here takes care of both floating and int ops 1004 if (completed_inst->isFloating()) { 1005 fpInstQueueWakeupAccesses++; 1006 } else if (completed_inst->isVector()) { 1007 vecInstQueueWakeupAccesses++; 1008 } else { 1009 intInstQueueWakeupAccesses++; 1010 } 1011 1012 DPRINTF(IQ, "Waking dependents of completed instruction.\n"); 1013 1014 assert(!completed_inst->isSquashed()); 1015 1016 // Tell the memory dependence unit to wake any dependents on this 1017 // instruction if it is a memory instruction. Also complete the memory 1018 // instruction at this point since we know it executed without issues. 1019 // @todo: Might want to rename "completeMemInst" to something that 1020 // indicates that it won't need to be replayed, and call this 1021 // earlier. Might not be a big deal. 1022 if (completed_inst->isMemRef()) { 1023 memDepUnit[completed_inst->threadNumber].wakeDependents(completed_inst); 1024 completeMemInst(completed_inst); 1025 } else if (completed_inst->isMemBarrier() || 1026 completed_inst->isWriteBarrier()) { 1027 memDepUnit[completed_inst->threadNumber].completeBarrier(completed_inst); 1028 } 1029 1030 for (int dest_reg_idx = 0; 1031 dest_reg_idx < completed_inst->numDestRegs(); 1032 dest_reg_idx++) 1033 { 1034 PhysRegIdPtr dest_reg = 1035 completed_inst->renamedDestRegIdx(dest_reg_idx); 1036 1037 // Special case of uniq or control registers. They are not 1038 // handled by the IQ and thus have no dependency graph entry. 1039 if (dest_reg->isFixedMapping()) { 1040 DPRINTF(IQ, "Reg %d [%s] is part of a fix mapping, skipping\n", 1041 dest_reg->index(), dest_reg->className()); 1042 continue; 1043 } 1044 1045 DPRINTF(IQ, "Waking any dependents on register %i (%s).\n", 1046 dest_reg->index(), 1047 dest_reg->className()); 1048 1049 //Go through the dependency chain, marking the registers as 1050 //ready within the waiting instructions. 1051 DynInstPtr dep_inst = dependGraph.pop(dest_reg->flatIndex()); 1052 1053 while (dep_inst) { 1054 DPRINTF(IQ, "Waking up a dependent instruction, [sn:%lli] " 1055 "PC %s.\n", dep_inst->seqNum, dep_inst->pcState()); 1056 1057 // Might want to give more information to the instruction 1058 // so that it knows which of its source registers is 1059 // ready. However that would mean that the dependency 1060 // graph entries would need to hold the src_reg_idx. 1061 dep_inst->markSrcRegReady(); 1062 1063 addIfReady(dep_inst); 1064 1065 dep_inst = dependGraph.pop(dest_reg->flatIndex()); 1066 1067 ++dependents; 1068 } 1069 1070 // Reset the head node now that all of its dependents have 1071 // been woken up. 1072 assert(dependGraph.empty(dest_reg->flatIndex())); 1073 dependGraph.clearInst(dest_reg->flatIndex()); 1074 1075 // Mark the scoreboard as having that register ready. 1076 regScoreboard[dest_reg->flatIndex()] = true; 1077 } 1078 return dependents; 1079} 1080 1081template <class Impl> 1082void 1083InstructionQueue<Impl>::addReadyMemInst(const DynInstPtr &ready_inst) 1084{ 1085 OpClass op_class = ready_inst->opClass(); 1086 1087 readyInsts[op_class].push(ready_inst); 1088 1089 // Will need to reorder the list if either a queue is not on the list, 1090 // or it has an older instruction than last time. 1091 if (!queueOnList[op_class]) { 1092 addToOrderList(op_class); 1093 } else if (readyInsts[op_class].top()->seqNum < 1094 (*readyIt[op_class]).oldestInst) { 1095 listOrder.erase(readyIt[op_class]); 1096 addToOrderList(op_class); 1097 } 1098 1099 DPRINTF(IQ, "Instruction is ready to issue, putting it onto " 1100 "the ready list, PC %s opclass:%i [sn:%lli].\n", 1101 ready_inst->pcState(), op_class, ready_inst->seqNum); 1102} 1103 1104template <class Impl> 1105void 1106InstructionQueue<Impl>::rescheduleMemInst(const DynInstPtr &resched_inst) 1107{ 1108 DPRINTF(IQ, "Rescheduling mem inst [sn:%lli]\n", resched_inst->seqNum); 1109 1110 // Reset DTB translation state 1111 resched_inst->translationStarted(false); 1112 resched_inst->translationCompleted(false); 1113 1114 resched_inst->clearCanIssue(); 1115 memDepUnit[resched_inst->threadNumber].reschedule(resched_inst); 1116} 1117 1118template <class Impl> 1119void 1120InstructionQueue<Impl>::replayMemInst(const DynInstPtr &replay_inst) 1121{ 1122 memDepUnit[replay_inst->threadNumber].replay(); 1123} 1124 1125template <class Impl> 1126void 1127InstructionQueue<Impl>::completeMemInst(const DynInstPtr &completed_inst) 1128{ 1129 ThreadID tid = completed_inst->threadNumber; 1130 1131 DPRINTF(IQ, "Completing mem instruction PC: %s [sn:%lli]\n", 1132 completed_inst->pcState(), completed_inst->seqNum); 1133 1134 ++freeEntries; 1135 1136 completed_inst->memOpDone(true); 1137 1138 memDepUnit[tid].completed(completed_inst); 1139 count[tid]--; 1140} 1141 1142template <class Impl> 1143void 1144InstructionQueue<Impl>::deferMemInst(const DynInstPtr &deferred_inst) 1145{ 1146 deferredMemInsts.push_back(deferred_inst); 1147} 1148 1149template <class Impl> 1150void 1151InstructionQueue<Impl>::blockMemInst(const DynInstPtr &blocked_inst) 1152{ 1153 blocked_inst->translationStarted(false); 1154 blocked_inst->translationCompleted(false); 1155 1156 blocked_inst->clearIssued(); 1157 blocked_inst->clearCanIssue(); 1158 blockedMemInsts.push_back(blocked_inst); 1159} 1160 1161template <class Impl> 1162void 1163InstructionQueue<Impl>::cacheUnblocked() 1164{ 1165 retryMemInsts.splice(retryMemInsts.end(), blockedMemInsts); 1166 // Get the CPU ticking again 1167 cpu->wakeCPU(); 1168} 1169 1170template <class Impl> 1171typename Impl::DynInstPtr 1172InstructionQueue<Impl>::getDeferredMemInstToExecute() 1173{ 1174 for (ListIt it = deferredMemInsts.begin(); it != deferredMemInsts.end(); 1175 ++it) { 1176 if ((*it)->translationCompleted() || (*it)->isSquashed()) { 1177 DynInstPtr mem_inst = std::move(*it); 1178 deferredMemInsts.erase(it); 1179 return mem_inst; 1180 } 1181 } 1182 return nullptr; 1183} 1184 1185template <class Impl> 1186typename Impl::DynInstPtr 1187InstructionQueue<Impl>::getBlockedMemInstToExecute() 1188{ 1189 if (retryMemInsts.empty()) { 1190 return nullptr; 1191 } else { 1192 DynInstPtr mem_inst = std::move(retryMemInsts.front()); 1193 retryMemInsts.pop_front(); 1194 return mem_inst; 1195 } 1196} 1197 1198template <class Impl> 1199void 1200InstructionQueue<Impl>::violation(const DynInstPtr &store, 1201 const DynInstPtr &faulting_load) 1202{ 1203 intInstQueueWrites++; 1204 memDepUnit[store->threadNumber].violation(store, faulting_load); 1205} 1206 1207template <class Impl> 1208void 1209InstructionQueue<Impl>::squash(ThreadID tid) 1210{ 1211 DPRINTF(IQ, "[tid:%i]: Starting to squash instructions in " 1212 "the IQ.\n", tid); 1213 1214 // Read instruction sequence number of last instruction out of the 1215 // time buffer. 1216 squashedSeqNum[tid] = fromCommit->commitInfo[tid].doneSeqNum; 1217 1218 doSquash(tid); 1219 1220 // Also tell the memory dependence unit to squash. 1221 memDepUnit[tid].squash(squashedSeqNum[tid], tid); 1222} 1223 1224template <class Impl> 1225void 1226InstructionQueue<Impl>::doSquash(ThreadID tid) 1227{ 1228 // Start at the tail. 1229 ListIt squash_it = instList[tid].end(); 1230 --squash_it; 1231 1232 DPRINTF(IQ, "[tid:%i]: Squashing until sequence number %i!\n", 1233 tid, squashedSeqNum[tid]); 1234 1235 // Squash any instructions younger than the squashed sequence number 1236 // given. 1237 while (squash_it != instList[tid].end() && 1238 (*squash_it)->seqNum > squashedSeqNum[tid]) { 1239 1240 DynInstPtr squashed_inst = (*squash_it); 1241 if (squashed_inst->isFloating()) { 1242 fpInstQueueWrites++; 1243 } else if (squashed_inst->isVector()) { 1244 vecInstQueueWrites++; 1245 } else { 1246 intInstQueueWrites++; 1247 } 1248 1249 // Only handle the instruction if it actually is in the IQ and 1250 // hasn't already been squashed in the IQ. 1251 if (squashed_inst->threadNumber != tid || 1252 squashed_inst->isSquashedInIQ()) { 1253 --squash_it; 1254 continue; 1255 } 1256 1257 if (!squashed_inst->isIssued() || 1258 (squashed_inst->isMemRef() && 1259 !squashed_inst->memOpDone())) { 1260 1261 DPRINTF(IQ, "[tid:%i]: Instruction [sn:%lli] PC %s squashed.\n", 1262 tid, squashed_inst->seqNum, squashed_inst->pcState()); 1263 1264 bool is_acq_rel = squashed_inst->isMemBarrier() && 1265 (squashed_inst->isLoad() || 1266 (squashed_inst->isStore() && 1267 !squashed_inst->isStoreConditional())); 1268 1269 // Remove the instruction from the dependency list. 1270 if (is_acq_rel || 1271 (!squashed_inst->isNonSpeculative() && 1272 !squashed_inst->isStoreConditional() && 1273 !squashed_inst->isMemBarrier() && 1274 !squashed_inst->isWriteBarrier())) { 1275 1276 for (int src_reg_idx = 0; 1277 src_reg_idx < squashed_inst->numSrcRegs(); 1278 src_reg_idx++) 1279 { 1280 PhysRegIdPtr src_reg = 1281 squashed_inst->renamedSrcRegIdx(src_reg_idx); 1282 1283 // Only remove it from the dependency graph if it 1284 // was placed there in the first place. 1285 1286 // Instead of doing a linked list traversal, we 1287 // can just remove these squashed instructions 1288 // either at issue time, or when the register is 1289 // overwritten. The only downside to this is it 1290 // leaves more room for error. 1291 1292 if (!squashed_inst->isReadySrcRegIdx(src_reg_idx) && 1293 !src_reg->isFixedMapping()) { 1294 dependGraph.remove(src_reg->flatIndex(), 1295 squashed_inst); 1296 } 1297 1298 1299 ++iqSquashedOperandsExamined; 1300 } 1301 } else if (!squashed_inst->isStoreConditional() || 1302 !squashed_inst->isCompleted()) { 1303 NonSpecMapIt ns_inst_it = 1304 nonSpecInsts.find(squashed_inst->seqNum); 1305 1306 // we remove non-speculative instructions from 1307 // nonSpecInsts already when they are ready, and so we 1308 // cannot always expect to find them 1309 if (ns_inst_it == nonSpecInsts.end()) { 1310 // loads that became ready but stalled on a 1311 // blocked cache are alreayd removed from 1312 // nonSpecInsts, and have not faulted 1313 assert(squashed_inst->getFault() != NoFault || 1314 squashed_inst->isMemRef()); 1315 } else { 1316 1317 (*ns_inst_it).second = NULL; 1318 1319 nonSpecInsts.erase(ns_inst_it); 1320 1321 ++iqSquashedNonSpecRemoved; 1322 } 1323 } 1324 1325 // Might want to also clear out the head of the dependency graph. 1326 1327 // Mark it as squashed within the IQ. 1328 squashed_inst->setSquashedInIQ(); 1329 1330 // @todo: Remove this hack where several statuses are set so the 1331 // inst will flow through the rest of the pipeline. 1332 squashed_inst->setIssued(); 1333 squashed_inst->setCanCommit(); 1334 squashed_inst->clearInIQ(); 1335 1336 //Update Thread IQ Count 1337 count[squashed_inst->threadNumber]--; 1338 1339 ++freeEntries; 1340 } 1341 1342 // IQ clears out the heads of the dependency graph only when 1343 // instructions reach writeback stage. If an instruction is squashed 1344 // before writeback stage, its head of dependency graph would not be 1345 // cleared out; it holds the instruction's DynInstPtr. This prevents 1346 // freeing the squashed instruction's DynInst. 1347 // Thus, we need to manually clear out the squashed instructions' heads 1348 // of dependency graph. 1349 for (int dest_reg_idx = 0; 1350 dest_reg_idx < squashed_inst->numDestRegs(); 1351 dest_reg_idx++) 1352 { 1353 PhysRegIdPtr dest_reg = 1354 squashed_inst->renamedDestRegIdx(dest_reg_idx); 1355 if (dest_reg->isFixedMapping()){ 1356 continue; 1357 } 1358 assert(dependGraph.empty(dest_reg->flatIndex())); 1359 dependGraph.clearInst(dest_reg->flatIndex()); 1360 } 1361 instList[tid].erase(squash_it--); 1362 ++iqSquashedInstsExamined; 1363 } 1364} 1365 1366template <class Impl> 1367bool 1368InstructionQueue<Impl>::addToDependents(const DynInstPtr &new_inst) 1369{ 1370 // Loop through the instruction's source registers, adding 1371 // them to the dependency list if they are not ready. 1372 int8_t total_src_regs = new_inst->numSrcRegs(); 1373 bool return_val = false; 1374 1375 for (int src_reg_idx = 0; 1376 src_reg_idx < total_src_regs; 1377 src_reg_idx++) 1378 { 1379 // Only add it to the dependency graph if it's not ready. 1380 if (!new_inst->isReadySrcRegIdx(src_reg_idx)) { 1381 PhysRegIdPtr src_reg = new_inst->renamedSrcRegIdx(src_reg_idx); 1382 1383 // Check the IQ's scoreboard to make sure the register 1384 // hasn't become ready while the instruction was in flight 1385 // between stages. Only if it really isn't ready should 1386 // it be added to the dependency graph. 1387 if (src_reg->isFixedMapping()) { 1388 continue; 1389 } else if (!regScoreboard[src_reg->flatIndex()]) { 1390 DPRINTF(IQ, "Instruction PC %s has src reg %i (%s) that " 1391 "is being added to the dependency chain.\n", 1392 new_inst->pcState(), src_reg->index(), 1393 src_reg->className()); 1394 1395 dependGraph.insert(src_reg->flatIndex(), new_inst); 1396 1397 // Change the return value to indicate that something 1398 // was added to the dependency graph. 1399 return_val = true; 1400 } else { 1401 DPRINTF(IQ, "Instruction PC %s has src reg %i (%s) that " 1402 "became ready before it reached the IQ.\n", 1403 new_inst->pcState(), src_reg->index(), 1404 src_reg->className()); 1405 // Mark a register ready within the instruction. 1406 new_inst->markSrcRegReady(src_reg_idx); 1407 } 1408 } 1409 } 1410 1411 return return_val; 1412} 1413 1414template <class Impl> 1415void 1416InstructionQueue<Impl>::addToProducers(const DynInstPtr &new_inst) 1417{ 1418 // Nothing really needs to be marked when an instruction becomes 1419 // the producer of a register's value, but for convenience a ptr 1420 // to the producing instruction will be placed in the head node of 1421 // the dependency links. 1422 int8_t total_dest_regs = new_inst->numDestRegs(); 1423 1424 for (int dest_reg_idx = 0; 1425 dest_reg_idx < total_dest_regs; 1426 dest_reg_idx++) 1427 { 1428 PhysRegIdPtr dest_reg = new_inst->renamedDestRegIdx(dest_reg_idx); 1429 1430 // Some registers have fixed mapping, and there is no need to track 1431 // dependencies as these instructions must be executed at commit. 1432 if (dest_reg->isFixedMapping()) { 1433 continue; 1434 } 1435 1436 if (!dependGraph.empty(dest_reg->flatIndex())) { 1437 dependGraph.dump(); 1438 panic("Dependency graph %i (%s) (flat: %i) not empty!", 1439 dest_reg->index(), dest_reg->className(), 1440 dest_reg->flatIndex()); 1441 } 1442 1443 dependGraph.setInst(dest_reg->flatIndex(), new_inst); 1444 1445 // Mark the scoreboard to say it's not yet ready. 1446 regScoreboard[dest_reg->flatIndex()] = false; 1447 } 1448} 1449 1450template <class Impl> 1451void 1452InstructionQueue<Impl>::addIfReady(const DynInstPtr &inst) 1453{ 1454 // If the instruction now has all of its source registers 1455 // available, then add it to the list of ready instructions. 1456 if (inst->readyToIssue()) { 1457 1458 //Add the instruction to the proper ready list. 1459 if (inst->isMemRef()) { 1460 1461 DPRINTF(IQ, "Checking if memory instruction can issue.\n"); 1462 1463 // Message to the mem dependence unit that this instruction has 1464 // its registers ready. 1465 memDepUnit[inst->threadNumber].regsReady(inst); 1466 1467 return; 1468 } 1469 1470 OpClass op_class = inst->opClass(); 1471 1472 DPRINTF(IQ, "Instruction is ready to issue, putting it onto " 1473 "the ready list, PC %s opclass:%i [sn:%lli].\n", 1474 inst->pcState(), op_class, inst->seqNum); 1475 1476 readyInsts[op_class].push(inst); 1477 1478 // Will need to reorder the list if either a queue is not on the list, 1479 // or it has an older instruction than last time. 1480 if (!queueOnList[op_class]) { 1481 addToOrderList(op_class); 1482 } else if (readyInsts[op_class].top()->seqNum < 1483 (*readyIt[op_class]).oldestInst) { 1484 listOrder.erase(readyIt[op_class]); 1485 addToOrderList(op_class); 1486 } 1487 } 1488} 1489 1490template <class Impl> 1491int 1492InstructionQueue<Impl>::countInsts() 1493{ 1494#if 0 1495 //ksewell:This works but definitely could use a cleaner write 1496 //with a more intuitive way of counting. Right now it's 1497 //just brute force .... 1498 // Change the #if if you want to use this method. 1499 int total_insts = 0; 1500 1501 for (ThreadID tid = 0; tid < numThreads; ++tid) { 1502 ListIt count_it = instList[tid].begin(); 1503 1504 while (count_it != instList[tid].end()) { 1505 if (!(*count_it)->isSquashed() && !(*count_it)->isSquashedInIQ()) { 1506 if (!(*count_it)->isIssued()) { 1507 ++total_insts; 1508 } else if ((*count_it)->isMemRef() && 1509 !(*count_it)->memOpDone) { 1510 // Loads that have not been marked as executed still count 1511 // towards the total instructions. 1512 ++total_insts; 1513 } 1514 } 1515 1516 ++count_it; 1517 } 1518 } 1519 1520 return total_insts; 1521#else 1522 return numEntries - freeEntries; 1523#endif 1524} 1525 1526template <class Impl> 1527void 1528InstructionQueue<Impl>::dumpLists() 1529{ 1530 for (int i = 0; i < Num_OpClasses; ++i) { 1531 cprintf("Ready list %i size: %i\n", i, readyInsts[i].size()); 1532 1533 cprintf("\n"); 1534 } 1535 1536 cprintf("Non speculative list size: %i\n", nonSpecInsts.size()); 1537 1538 NonSpecMapIt non_spec_it = nonSpecInsts.begin(); 1539 NonSpecMapIt non_spec_end_it = nonSpecInsts.end(); 1540 1541 cprintf("Non speculative list: "); 1542 1543 while (non_spec_it != non_spec_end_it) { 1544 cprintf("%s [sn:%lli]", (*non_spec_it).second->pcState(), 1545 (*non_spec_it).second->seqNum); 1546 ++non_spec_it; 1547 } 1548 1549 cprintf("\n"); 1550 1551 ListOrderIt list_order_it = listOrder.begin(); 1552 ListOrderIt list_order_end_it = listOrder.end(); 1553 int i = 1; 1554 1555 cprintf("List order: "); 1556 1557 while (list_order_it != list_order_end_it) { 1558 cprintf("%i OpClass:%i [sn:%lli] ", i, (*list_order_it).queueType, 1559 (*list_order_it).oldestInst); 1560 1561 ++list_order_it; 1562 ++i; 1563 } 1564 1565 cprintf("\n"); 1566} 1567 1568 1569template <class Impl> 1570void 1571InstructionQueue<Impl>::dumpInsts() 1572{ 1573 for (ThreadID tid = 0; tid < numThreads; ++tid) { 1574 int num = 0; 1575 int valid_num = 0; 1576 ListIt inst_list_it = instList[tid].begin(); 1577 1578 while (inst_list_it != instList[tid].end()) { 1579 cprintf("Instruction:%i\n", num); 1580 if (!(*inst_list_it)->isSquashed()) { 1581 if (!(*inst_list_it)->isIssued()) { 1582 ++valid_num; 1583 cprintf("Count:%i\n", valid_num); 1584 } else if ((*inst_list_it)->isMemRef() && 1585 !(*inst_list_it)->memOpDone()) { 1586 // Loads that have not been marked as executed 1587 // still count towards the total instructions. 1588 ++valid_num; 1589 cprintf("Count:%i\n", valid_num); 1590 } 1591 } 1592 1593 cprintf("PC: %s\n[sn:%lli]\n[tid:%i]\n" 1594 "Issued:%i\nSquashed:%i\n", 1595 (*inst_list_it)->pcState(), 1596 (*inst_list_it)->seqNum, 1597 (*inst_list_it)->threadNumber, 1598 (*inst_list_it)->isIssued(), 1599 (*inst_list_it)->isSquashed()); 1600 1601 if ((*inst_list_it)->isMemRef()) { 1602 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone()); 1603 } 1604 1605 cprintf("\n"); 1606 1607 inst_list_it++; 1608 ++num; 1609 } 1610 } 1611 1612 cprintf("Insts to Execute list:\n"); 1613 1614 int num = 0; 1615 int valid_num = 0; 1616 ListIt inst_list_it = instsToExecute.begin(); 1617 1618 while (inst_list_it != instsToExecute.end()) 1619 { 1620 cprintf("Instruction:%i\n", 1621 num); 1622 if (!(*inst_list_it)->isSquashed()) { 1623 if (!(*inst_list_it)->isIssued()) { 1624 ++valid_num; 1625 cprintf("Count:%i\n", valid_num); 1626 } else if ((*inst_list_it)->isMemRef() && 1627 !(*inst_list_it)->memOpDone()) { 1628 // Loads that have not been marked as executed 1629 // still count towards the total instructions. 1630 ++valid_num; 1631 cprintf("Count:%i\n", valid_num); 1632 } 1633 } 1634 1635 cprintf("PC: %s\n[sn:%lli]\n[tid:%i]\n" 1636 "Issued:%i\nSquashed:%i\n", 1637 (*inst_list_it)->pcState(), 1638 (*inst_list_it)->seqNum, 1639 (*inst_list_it)->threadNumber, 1640 (*inst_list_it)->isIssued(), 1641 (*inst_list_it)->isSquashed()); 1642 1643 if ((*inst_list_it)->isMemRef()) { 1644 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone()); 1645 } 1646 1647 cprintf("\n"); 1648 1649 inst_list_it++; 1650 ++num; 1651 } 1652} 1653 1654#endif//__CPU_O3_INST_QUEUE_IMPL_HH__ 1655