63} 64 65template <class Impl> 66void 67InstructionQueue<Impl>::FUCompletion::process() 68{ 69 iqPtr->processFUCompletion(inst, freeFU ? fuIdx : -1); 70 inst = NULL; 71} 72 73 74template <class Impl> 75const char * 76InstructionQueue<Impl>::FUCompletion::description() const 77{ 78 return "Functional unit completion"; 79} 80 81template <class Impl> 82InstructionQueue<Impl>::InstructionQueue(O3CPU *cpu_ptr, IEW *iew_ptr, 83 DerivO3CPUParams *params) 84 : cpu(cpu_ptr), 85 iewStage(iew_ptr), 86 fuPool(params->fuPool), 87 numEntries(params->numIQEntries), 88 totalWidth(params->issueWidth), 89 numPhysIntRegs(params->numPhysIntRegs), 90 numPhysFloatRegs(params->numPhysFloatRegs), 91 commitToIEWDelay(params->commitToIEWDelay) 92{ 93 assert(fuPool); 94 95 switchedOut = false; 96 97 numThreads = params->numThreads; 98 99 // Set the number of physical registers as the number of int + float 100 numPhysRegs = numPhysIntRegs + numPhysFloatRegs; 101 102 //Create an entry for each physical register within the 103 //dependency graph. 104 dependGraph.resize(numPhysRegs); 105 106 // Resize the register scoreboard. 107 regScoreboard.resize(numPhysRegs); 108 109 //Initialize Mem Dependence Units 110 for (ThreadID tid = 0; tid < numThreads; tid++) { 111 memDepUnit[tid].init(params, tid); 112 memDepUnit[tid].setIQ(this); 113 } 114 115 resetState(); 116 117 std::string policy = params->smtIQPolicy; 118 119 //Convert string to lowercase 120 std::transform(policy.begin(), policy.end(), policy.begin(), 121 (int(*)(int)) tolower); 122 123 //Figure out resource sharing policy 124 if (policy == "dynamic") { 125 iqPolicy = Dynamic; 126 127 //Set Max Entries to Total ROB Capacity 128 for (ThreadID tid = 0; tid < numThreads; tid++) { 129 maxEntries[tid] = numEntries; 130 } 131 132 } else if (policy == "partitioned") { 133 iqPolicy = Partitioned; 134 135 //@todo:make work if part_amt doesnt divide evenly. 136 int part_amt = numEntries / numThreads; 137 138 //Divide ROB up evenly 139 for (ThreadID tid = 0; tid < numThreads; tid++) { 140 maxEntries[tid] = part_amt; 141 } 142 143 DPRINTF(IQ, "IQ sharing policy set to Partitioned:" 144 "%i entries per thread.\n",part_amt); 145 } else if (policy == "threshold") { 146 iqPolicy = Threshold; 147 148 double threshold = (double)params->smtIQThreshold / 100; 149 150 int thresholdIQ = (int)((double)threshold * numEntries); 151 152 //Divide up by threshold amount 153 for (ThreadID tid = 0; tid < numThreads; tid++) { 154 maxEntries[tid] = thresholdIQ; 155 } 156 157 DPRINTF(IQ, "IQ sharing policy set to Threshold:" 158 "%i entries per thread.\n",thresholdIQ); 159 } else { 160 assert(0 && "Invalid IQ Sharing Policy.Options Are:{Dynamic," 161 "Partitioned, Threshold}"); 162 } 163} 164 165template <class Impl> 166InstructionQueue<Impl>::~InstructionQueue() 167{ 168 dependGraph.reset(); 169#ifdef DEBUG 170 cprintf("Nodes traversed: %i, removed: %i\n", 171 dependGraph.nodesTraversed, dependGraph.nodesRemoved); 172#endif 173} 174 175template <class Impl> 176std::string 177InstructionQueue<Impl>::name() const 178{ 179 return cpu->name() + ".iq"; 180} 181 182template <class Impl> 183void 184InstructionQueue<Impl>::regStats() 185{ 186 using namespace Stats; 187 iqInstsAdded 188 .name(name() + ".iqInstsAdded") 189 .desc("Number of instructions added to the IQ (excludes non-spec)") 190 .prereq(iqInstsAdded); 191 192 iqNonSpecInstsAdded 193 .name(name() + ".iqNonSpecInstsAdded") 194 .desc("Number of non-speculative instructions added to the IQ") 195 .prereq(iqNonSpecInstsAdded); 196 197 iqInstsIssued 198 .name(name() + ".iqInstsIssued") 199 .desc("Number of instructions issued") 200 .prereq(iqInstsIssued); 201 202 iqIntInstsIssued 203 .name(name() + ".iqIntInstsIssued") 204 .desc("Number of integer instructions issued") 205 .prereq(iqIntInstsIssued); 206 207 iqFloatInstsIssued 208 .name(name() + ".iqFloatInstsIssued") 209 .desc("Number of float instructions issued") 210 .prereq(iqFloatInstsIssued); 211 212 iqBranchInstsIssued 213 .name(name() + ".iqBranchInstsIssued") 214 .desc("Number of branch instructions issued") 215 .prereq(iqBranchInstsIssued); 216 217 iqMemInstsIssued 218 .name(name() + ".iqMemInstsIssued") 219 .desc("Number of memory instructions issued") 220 .prereq(iqMemInstsIssued); 221 222 iqMiscInstsIssued 223 .name(name() + ".iqMiscInstsIssued") 224 .desc("Number of miscellaneous instructions issued") 225 .prereq(iqMiscInstsIssued); 226 227 iqSquashedInstsIssued 228 .name(name() + ".iqSquashedInstsIssued") 229 .desc("Number of squashed instructions issued") 230 .prereq(iqSquashedInstsIssued); 231 232 iqSquashedInstsExamined 233 .name(name() + ".iqSquashedInstsExamined") 234 .desc("Number of squashed instructions iterated over during squash;" 235 " mainly for profiling") 236 .prereq(iqSquashedInstsExamined); 237 238 iqSquashedOperandsExamined 239 .name(name() + ".iqSquashedOperandsExamined") 240 .desc("Number of squashed operands that are examined and possibly " 241 "removed from graph") 242 .prereq(iqSquashedOperandsExamined); 243 244 iqSquashedNonSpecRemoved 245 .name(name() + ".iqSquashedNonSpecRemoved") 246 .desc("Number of squashed non-spec instructions that were removed") 247 .prereq(iqSquashedNonSpecRemoved); 248/* 249 queueResDist 250 .init(Num_OpClasses, 0, 99, 2) 251 .name(name() + ".IQ:residence:") 252 .desc("cycles from dispatch to issue") 253 .flags(total | pdf | cdf ) 254 ; 255 for (int i = 0; i < Num_OpClasses; ++i) { 256 queueResDist.subname(i, opClassStrings[i]); 257 } 258*/ 259 numIssuedDist 260 .init(0,totalWidth,1) 261 .name(name() + ".issued_per_cycle") 262 .desc("Number of insts issued each cycle") 263 .flags(pdf) 264 ; 265/* 266 dist_unissued 267 .init(Num_OpClasses+2) 268 .name(name() + ".unissued_cause") 269 .desc("Reason ready instruction not issued") 270 .flags(pdf | dist) 271 ; 272 for (int i=0; i < (Num_OpClasses + 2); ++i) { 273 dist_unissued.subname(i, unissued_names[i]); 274 } 275*/ 276 statIssuedInstType 277 .init(numThreads,Enums::Num_OpClass) 278 .name(name() + ".FU_type") 279 .desc("Type of FU issued") 280 .flags(total | pdf | dist) 281 ; 282 statIssuedInstType.ysubnames(Enums::OpClassStrings); 283 284 // 285 // How long did instructions for a particular FU type wait prior to issue 286 // 287/* 288 issueDelayDist 289 .init(Num_OpClasses,0,99,2) 290 .name(name() + ".") 291 .desc("cycles from operands ready to issue") 292 .flags(pdf | cdf) 293 ; 294 295 for (int i=0; i<Num_OpClasses; ++i) { 296 std::stringstream subname; 297 subname << opClassStrings[i] << "_delay"; 298 issueDelayDist.subname(i, subname.str()); 299 } 300*/ 301 issueRate 302 .name(name() + ".rate") 303 .desc("Inst issue rate") 304 .flags(total) 305 ; 306 issueRate = iqInstsIssued / cpu->numCycles; 307 308 statFuBusy 309 .init(Num_OpClasses) 310 .name(name() + ".fu_full") 311 .desc("attempts to use FU when none available") 312 .flags(pdf | dist) 313 ; 314 for (int i=0; i < Num_OpClasses; ++i) { 315 statFuBusy.subname(i, Enums::OpClassStrings[i]); 316 } 317 318 fuBusy 319 .init(numThreads) 320 .name(name() + ".fu_busy_cnt") 321 .desc("FU busy when requested") 322 .flags(total) 323 ; 324 325 fuBusyRate 326 .name(name() + ".fu_busy_rate") 327 .desc("FU busy rate (busy events/executed inst)") 328 .flags(total) 329 ; 330 fuBusyRate = fuBusy / iqInstsIssued; 331 332 for (ThreadID tid = 0; tid < numThreads; tid++) { 333 // Tell mem dependence unit to reg stats as well. 334 memDepUnit[tid].regStats(); 335 } 336 337 intInstQueueReads 338 .name(name() + ".int_inst_queue_reads") 339 .desc("Number of integer instruction queue reads") 340 .flags(total); 341 342 intInstQueueWrites 343 .name(name() + ".int_inst_queue_writes") 344 .desc("Number of integer instruction queue writes") 345 .flags(total); 346 347 intInstQueueWakeupAccesses 348 .name(name() + ".int_inst_queue_wakeup_accesses") 349 .desc("Number of integer instruction queue wakeup accesses") 350 .flags(total); 351 352 fpInstQueueReads 353 .name(name() + ".fp_inst_queue_reads") 354 .desc("Number of floating instruction queue reads") 355 .flags(total); 356 357 fpInstQueueWrites 358 .name(name() + ".fp_inst_queue_writes") 359 .desc("Number of floating instruction queue writes") 360 .flags(total); 361 362 fpInstQueueWakeupQccesses 363 .name(name() + ".fp_inst_queue_wakeup_accesses") 364 .desc("Number of floating instruction queue wakeup accesses") 365 .flags(total); 366 367 intAluAccesses 368 .name(name() + ".int_alu_accesses") 369 .desc("Number of integer alu accesses") 370 .flags(total); 371 372 fpAluAccesses 373 .name(name() + ".fp_alu_accesses") 374 .desc("Number of floating point alu accesses") 375 .flags(total); 376 377} 378 379template <class Impl> 380void 381InstructionQueue<Impl>::resetState() 382{ 383 //Initialize thread IQ counts 384 for (ThreadID tid = 0; tid <numThreads; tid++) { 385 count[tid] = 0; 386 instList[tid].clear(); 387 } 388 389 // Initialize the number of free IQ entries. 390 freeEntries = numEntries; 391 392 // Note that in actuality, the registers corresponding to the logical 393 // registers start off as ready. However this doesn't matter for the 394 // IQ as the instruction should have been correctly told if those 395 // registers are ready in rename. Thus it can all be initialized as 396 // unready. 397 for (int i = 0; i < numPhysRegs; ++i) { 398 regScoreboard[i] = false; 399 } 400 401 for (ThreadID tid = 0; tid < numThreads; ++tid) { 402 squashedSeqNum[tid] = 0; 403 } 404 405 for (int i = 0; i < Num_OpClasses; ++i) { 406 while (!readyInsts[i].empty()) 407 readyInsts[i].pop(); 408 queueOnList[i] = false; 409 readyIt[i] = listOrder.end(); 410 } 411 nonSpecInsts.clear(); 412 listOrder.clear(); 413 deferredMemInsts.clear(); 414} 415 416template <class Impl> 417void 418InstructionQueue<Impl>::setActiveThreads(list<ThreadID> *at_ptr) 419{ 420 activeThreads = at_ptr; 421} 422 423template <class Impl> 424void 425InstructionQueue<Impl>::setIssueToExecuteQueue(TimeBuffer<IssueStruct> *i2e_ptr) 426{ 427 issueToExecuteQueue = i2e_ptr; 428} 429 430template <class Impl> 431void 432InstructionQueue<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr) 433{ 434 timeBuffer = tb_ptr; 435 436 fromCommit = timeBuffer->getWire(-commitToIEWDelay); 437} 438 439template <class Impl> 440void 441InstructionQueue<Impl>::switchOut() 442{ 443/* 444 if (!instList[0].empty() || (numEntries != freeEntries) || 445 !readyInsts[0].empty() || !nonSpecInsts.empty() || !listOrder.empty()) { 446 dumpInsts(); 447// assert(0); 448 } 449*/ 450 resetState(); 451 dependGraph.reset(); 452 instsToExecute.clear(); 453 switchedOut = true; 454 for (ThreadID tid = 0; tid < numThreads; ++tid) { 455 memDepUnit[tid].switchOut(); 456 } 457} 458 459template <class Impl> 460void 461InstructionQueue<Impl>::takeOverFrom() 462{ 463 switchedOut = false; 464} 465 466template <class Impl> 467int 468InstructionQueue<Impl>::entryAmount(ThreadID num_threads) 469{ 470 if (iqPolicy == Partitioned) { 471 return numEntries / num_threads; 472 } else { 473 return 0; 474 } 475} 476 477 478template <class Impl> 479void 480InstructionQueue<Impl>::resetEntries() 481{ 482 if (iqPolicy != Dynamic || numThreads > 1) { 483 int active_threads = activeThreads->size(); 484 485 list<ThreadID>::iterator threads = activeThreads->begin(); 486 list<ThreadID>::iterator end = activeThreads->end(); 487 488 while (threads != end) { 489 ThreadID tid = *threads++; 490 491 if (iqPolicy == Partitioned) { 492 maxEntries[tid] = numEntries / active_threads; 493 } else if(iqPolicy == Threshold && active_threads == 1) { 494 maxEntries[tid] = numEntries; 495 } 496 } 497 } 498} 499 500template <class Impl> 501unsigned 502InstructionQueue<Impl>::numFreeEntries() 503{ 504 return freeEntries; 505} 506 507template <class Impl> 508unsigned 509InstructionQueue<Impl>::numFreeEntries(ThreadID tid) 510{ 511 return maxEntries[tid] - count[tid]; 512} 513 514// Might want to do something more complex if it knows how many instructions 515// will be issued this cycle. 516template <class Impl> 517bool 518InstructionQueue<Impl>::isFull() 519{ 520 if (freeEntries == 0) { 521 return(true); 522 } else { 523 return(false); 524 } 525} 526 527template <class Impl> 528bool 529InstructionQueue<Impl>::isFull(ThreadID tid) 530{ 531 if (numFreeEntries(tid) == 0) { 532 return(true); 533 } else { 534 return(false); 535 } 536} 537 538template <class Impl> 539bool 540InstructionQueue<Impl>::hasReadyInsts() 541{ 542 if (!listOrder.empty()) { 543 return true; 544 } 545 546 for (int i = 0; i < Num_OpClasses; ++i) { 547 if (!readyInsts[i].empty()) { 548 return true; 549 } 550 } 551 552 return false; 553} 554 555template <class Impl> 556void 557InstructionQueue<Impl>::insert(DynInstPtr &new_inst) 558{ 559 new_inst->isFloating() ? fpInstQueueWrites++ : intInstQueueWrites++; 560 // Make sure the instruction is valid 561 assert(new_inst); 562 563 DPRINTF(IQ, "Adding instruction [sn:%lli] PC %s to the IQ.\n", 564 new_inst->seqNum, new_inst->pcState()); 565 566 assert(freeEntries != 0); 567 568 instList[new_inst->threadNumber].push_back(new_inst); 569 570 --freeEntries; 571 572 new_inst->setInIQ(); 573 574 // Look through its source registers (physical regs), and mark any 575 // dependencies. 576 addToDependents(new_inst); 577 578 // Have this instruction set itself as the producer of its destination 579 // register(s). 580 addToProducers(new_inst); 581 582 if (new_inst->isMemRef()) { 583 memDepUnit[new_inst->threadNumber].insert(new_inst); 584 } else { 585 addIfReady(new_inst); 586 } 587 588 ++iqInstsAdded; 589 590 count[new_inst->threadNumber]++; 591 592 assert(freeEntries == (numEntries - countInsts())); 593} 594 595template <class Impl> 596void 597InstructionQueue<Impl>::insertNonSpec(DynInstPtr &new_inst) 598{ 599 // @todo: Clean up this code; can do it by setting inst as unable 600 // to issue, then calling normal insert on the inst. 601 new_inst->isFloating() ? fpInstQueueWrites++ : intInstQueueWrites++; 602 603 assert(new_inst); 604 605 nonSpecInsts[new_inst->seqNum] = new_inst; 606 607 DPRINTF(IQ, "Adding non-speculative instruction [sn:%lli] PC %s " 608 "to the IQ.\n", 609 new_inst->seqNum, new_inst->pcState()); 610 611 assert(freeEntries != 0); 612 613 instList[new_inst->threadNumber].push_back(new_inst); 614 615 --freeEntries; 616 617 new_inst->setInIQ(); 618 619 // Have this instruction set itself as the producer of its destination 620 // register(s). 621 addToProducers(new_inst); 622 623 // If it's a memory instruction, add it to the memory dependency 624 // unit. 625 if (new_inst->isMemRef()) { 626 memDepUnit[new_inst->threadNumber].insertNonSpec(new_inst); 627 } 628 629 ++iqNonSpecInstsAdded; 630 631 count[new_inst->threadNumber]++; 632 633 assert(freeEntries == (numEntries - countInsts())); 634} 635 636template <class Impl> 637void 638InstructionQueue<Impl>::insertBarrier(DynInstPtr &barr_inst) 639{ 640 memDepUnit[barr_inst->threadNumber].insertBarrier(barr_inst); 641 642 insertNonSpec(barr_inst); 643} 644 645template <class Impl> 646typename Impl::DynInstPtr 647InstructionQueue<Impl>::getInstToExecute() 648{ 649 assert(!instsToExecute.empty()); 650 DynInstPtr inst = instsToExecute.front(); 651 instsToExecute.pop_front(); 652 if (inst->isFloating()){ 653 fpInstQueueReads++; 654 } else { 655 intInstQueueReads++; 656 } 657 return inst; 658} 659 660template <class Impl> 661void 662InstructionQueue<Impl>::addToOrderList(OpClass op_class) 663{ 664 assert(!readyInsts[op_class].empty()); 665 666 ListOrderEntry queue_entry; 667 668 queue_entry.queueType = op_class; 669 670 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum; 671 672 ListOrderIt list_it = listOrder.begin(); 673 ListOrderIt list_end_it = listOrder.end(); 674 675 while (list_it != list_end_it) { 676 if ((*list_it).oldestInst > queue_entry.oldestInst) { 677 break; 678 } 679 680 list_it++; 681 } 682 683 readyIt[op_class] = listOrder.insert(list_it, queue_entry); 684 queueOnList[op_class] = true; 685} 686 687template <class Impl> 688void 689InstructionQueue<Impl>::moveToYoungerInst(ListOrderIt list_order_it) 690{ 691 // Get iterator of next item on the list 692 // Delete the original iterator 693 // Determine if the next item is either the end of the list or younger 694 // than the new instruction. If so, then add in a new iterator right here. 695 // If not, then move along. 696 ListOrderEntry queue_entry; 697 OpClass op_class = (*list_order_it).queueType; 698 ListOrderIt next_it = list_order_it; 699 700 ++next_it; 701 702 queue_entry.queueType = op_class; 703 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum; 704 705 while (next_it != listOrder.end() && 706 (*next_it).oldestInst < queue_entry.oldestInst) { 707 ++next_it; 708 } 709 710 readyIt[op_class] = listOrder.insert(next_it, queue_entry); 711} 712 713template <class Impl> 714void 715InstructionQueue<Impl>::processFUCompletion(DynInstPtr &inst, int fu_idx) 716{ 717 DPRINTF(IQ, "Processing FU completion [sn:%lli]\n", inst->seqNum); 718 // The CPU could have been sleeping until this op completed (*extremely* 719 // long latency op). Wake it if it was. This may be overkill. 720 if (isSwitchedOut()) { 721 DPRINTF(IQ, "FU completion not processed, IQ is switched out [sn:%lli]\n", 722 inst->seqNum); 723 return; 724 } 725 726 iewStage->wakeCPU(); 727 728 if (fu_idx > -1) 729 fuPool->freeUnitNextCycle(fu_idx); 730 731 // @todo: Ensure that these FU Completions happen at the beginning 732 // of a cycle, otherwise they could add too many instructions to 733 // the queue. 734 issueToExecuteQueue->access(-1)->size++; 735 instsToExecute.push_back(inst); 736} 737 738// @todo: Figure out a better way to remove the squashed items from the 739// lists. Checking the top item of each list to see if it's squashed 740// wastes time and forces jumps. 741template <class Impl> 742void 743InstructionQueue<Impl>::scheduleReadyInsts() 744{ 745 DPRINTF(IQ, "Attempting to schedule ready instructions from " 746 "the IQ.\n"); 747 748 IssueStruct *i2e_info = issueToExecuteQueue->access(0); 749 750 DynInstPtr deferred_mem_inst; 751 int total_deferred_mem_issued = 0; 752 while (total_deferred_mem_issued < totalWidth && 753 (deferred_mem_inst = getDeferredMemInstToExecute()) != 0) { 754 issueToExecuteQueue->access(0)->size++; 755 instsToExecute.push_back(deferred_mem_inst); 756 total_deferred_mem_issued++; 757 } 758 759 // Have iterator to head of the list 760 // While I haven't exceeded bandwidth or reached the end of the list, 761 // Try to get a FU that can do what this op needs. 762 // If successful, change the oldestInst to the new top of the list, put 763 // the queue in the proper place in the list. 764 // Increment the iterator. 765 // This will avoid trying to schedule a certain op class if there are no 766 // FUs that handle it. 767 ListOrderIt order_it = listOrder.begin(); 768 ListOrderIt order_end_it = listOrder.end(); 769 int total_issued = 0; 770 771 while (total_issued < (totalWidth - total_deferred_mem_issued) && 772 iewStage->canIssue() && 773 order_it != order_end_it) { 774 OpClass op_class = (*order_it).queueType; 775 776 assert(!readyInsts[op_class].empty()); 777 778 DynInstPtr issuing_inst = readyInsts[op_class].top(); 779 780 issuing_inst->isFloating() ? fpInstQueueReads++ : intInstQueueReads++; 781 782 assert(issuing_inst->seqNum == (*order_it).oldestInst); 783 784 if (issuing_inst->isSquashed()) { 785 readyInsts[op_class].pop(); 786 787 if (!readyInsts[op_class].empty()) { 788 moveToYoungerInst(order_it); 789 } else { 790 readyIt[op_class] = listOrder.end(); 791 queueOnList[op_class] = false; 792 } 793 794 listOrder.erase(order_it++); 795 796 ++iqSquashedInstsIssued; 797 798 continue; 799 } 800 801 int idx = -2; 802 int op_latency = 1; 803 ThreadID tid = issuing_inst->threadNumber; 804 805 if (op_class != No_OpClass) { 806 idx = fuPool->getUnit(op_class); 807 issuing_inst->isFloating() ? fpAluAccesses++ : intAluAccesses++; 808 if (idx > -1) { 809 op_latency = fuPool->getOpLatency(op_class); 810 } 811 } 812 813 // If we have an instruction that doesn't require a FU, or a 814 // valid FU, then schedule for execution. 815 if (idx == -2 || idx != -1) { 816 if (op_latency == 1) { 817 i2e_info->size++; 818 instsToExecute.push_back(issuing_inst); 819 820 // Add the FU onto the list of FU's to be freed next 821 // cycle if we used one. 822 if (idx >= 0) 823 fuPool->freeUnitNextCycle(idx); 824 } else { 825 int issue_latency = fuPool->getIssueLatency(op_class); 826 // Generate completion event for the FU 827 FUCompletion *execution = new FUCompletion(issuing_inst, 828 idx, this); 829 830 cpu->schedule(execution, curTick() + cpu->ticks(op_latency - 1)); 831 832 // @todo: Enforce that issue_latency == 1 or op_latency 833 if (issue_latency > 1) { 834 // If FU isn't pipelined, then it must be freed 835 // upon the execution completing. 836 execution->setFreeFU(); 837 } else { 838 // Add the FU onto the list of FU's to be freed next cycle. 839 fuPool->freeUnitNextCycle(idx); 840 } 841 } 842 843 DPRINTF(IQ, "Thread %i: Issuing instruction PC %s " 844 "[sn:%lli]\n", 845 tid, issuing_inst->pcState(), 846 issuing_inst->seqNum); 847 848 readyInsts[op_class].pop(); 849 850 if (!readyInsts[op_class].empty()) { 851 moveToYoungerInst(order_it); 852 } else { 853 readyIt[op_class] = listOrder.end(); 854 queueOnList[op_class] = false; 855 } 856 857 issuing_inst->setIssued(); 858 ++total_issued; 859 860#if TRACING_ON 861 issuing_inst->issueTick = curTick(); 862#endif 863 864 if (!issuing_inst->isMemRef()) { 865 // Memory instructions can not be freed from the IQ until they 866 // complete. 867 ++freeEntries; 868 count[tid]--; 869 issuing_inst->clearInIQ(); 870 } else { 871 memDepUnit[tid].issue(issuing_inst); 872 } 873 874 listOrder.erase(order_it++); 875 statIssuedInstType[tid][op_class]++; 876 iewStage->incrWb(issuing_inst->seqNum); 877 } else { 878 statFuBusy[op_class]++; 879 fuBusy[tid]++; 880 ++order_it; 881 } 882 } 883 884 numIssuedDist.sample(total_issued); 885 iqInstsIssued+= total_issued; 886 887 // If we issued any instructions, tell the CPU we had activity. 888 // @todo If the way deferred memory instructions are handeled due to 889 // translation changes then the deferredMemInsts condition should be removed 890 // from the code below. 891 if (total_issued || total_deferred_mem_issued || deferredMemInsts.size()) { 892 cpu->activityThisCycle(); 893 } else { 894 DPRINTF(IQ, "Not able to schedule any instructions.\n"); 895 } 896} 897 898template <class Impl> 899void 900InstructionQueue<Impl>::scheduleNonSpec(const InstSeqNum &inst) 901{ 902 DPRINTF(IQ, "Marking nonspeculative instruction [sn:%lli] as ready " 903 "to execute.\n", inst); 904 905 NonSpecMapIt inst_it = nonSpecInsts.find(inst); 906 907 assert(inst_it != nonSpecInsts.end()); 908 909 ThreadID tid = (*inst_it).second->threadNumber; 910 911 (*inst_it).second->setAtCommit(); 912 913 (*inst_it).second->setCanIssue(); 914 915 if (!(*inst_it).second->isMemRef()) { 916 addIfReady((*inst_it).second); 917 } else { 918 memDepUnit[tid].nonSpecInstReady((*inst_it).second); 919 } 920 921 (*inst_it).second = NULL; 922 923 nonSpecInsts.erase(inst_it); 924} 925 926template <class Impl> 927void 928InstructionQueue<Impl>::commit(const InstSeqNum &inst, ThreadID tid) 929{ 930 DPRINTF(IQ, "[tid:%i]: Committing instructions older than [sn:%i]\n", 931 tid,inst); 932 933 ListIt iq_it = instList[tid].begin(); 934 935 while (iq_it != instList[tid].end() && 936 (*iq_it)->seqNum <= inst) { 937 ++iq_it; 938 instList[tid].pop_front(); 939 } 940 941 assert(freeEntries == (numEntries - countInsts())); 942} 943 944template <class Impl> 945int 946InstructionQueue<Impl>::wakeDependents(DynInstPtr &completed_inst) 947{ 948 int dependents = 0; 949 950 // The instruction queue here takes care of both floating and int ops 951 if (completed_inst->isFloating()) { 952 fpInstQueueWakeupQccesses++; 953 } else { 954 intInstQueueWakeupAccesses++; 955 } 956 957 DPRINTF(IQ, "Waking dependents of completed instruction.\n"); 958 959 assert(!completed_inst->isSquashed()); 960 961 // Tell the memory dependence unit to wake any dependents on this 962 // instruction if it is a memory instruction. Also complete the memory 963 // instruction at this point since we know it executed without issues. 964 // @todo: Might want to rename "completeMemInst" to something that 965 // indicates that it won't need to be replayed, and call this 966 // earlier. Might not be a big deal. 967 if (completed_inst->isMemRef()) { 968 memDepUnit[completed_inst->threadNumber].wakeDependents(completed_inst); 969 completeMemInst(completed_inst); 970 } else if (completed_inst->isMemBarrier() || 971 completed_inst->isWriteBarrier()) { 972 memDepUnit[completed_inst->threadNumber].completeBarrier(completed_inst); 973 } 974 975 for (int dest_reg_idx = 0; 976 dest_reg_idx < completed_inst->numDestRegs(); 977 dest_reg_idx++) 978 { 979 PhysRegIndex dest_reg = 980 completed_inst->renamedDestRegIdx(dest_reg_idx); 981 982 // Special case of uniq or control registers. They are not 983 // handled by the IQ and thus have no dependency graph entry. 984 // @todo Figure out a cleaner way to handle this. 985 if (dest_reg >= numPhysRegs) { 986 DPRINTF(IQ, "dest_reg :%d, numPhysRegs: %d\n", dest_reg, 987 numPhysRegs); 988 continue; 989 } 990 991 DPRINTF(IQ, "Waking any dependents on register %i.\n", 992 (int) dest_reg); 993 994 //Go through the dependency chain, marking the registers as 995 //ready within the waiting instructions. 996 DynInstPtr dep_inst = dependGraph.pop(dest_reg); 997 998 while (dep_inst) { 999 DPRINTF(IQ, "Waking up a dependent instruction, [sn:%lli] " 1000 "PC %s.\n", dep_inst->seqNum, dep_inst->pcState()); 1001 1002 // Might want to give more information to the instruction 1003 // so that it knows which of its source registers is 1004 // ready. However that would mean that the dependency 1005 // graph entries would need to hold the src_reg_idx. 1006 dep_inst->markSrcRegReady(); 1007 1008 addIfReady(dep_inst); 1009 1010 dep_inst = dependGraph.pop(dest_reg); 1011 1012 ++dependents; 1013 } 1014 1015 // Reset the head node now that all of its dependents have 1016 // been woken up. 1017 assert(dependGraph.empty(dest_reg)); 1018 dependGraph.clearInst(dest_reg); 1019 1020 // Mark the scoreboard as having that register ready. 1021 regScoreboard[dest_reg] = true; 1022 } 1023 return dependents; 1024} 1025 1026template <class Impl> 1027void 1028InstructionQueue<Impl>::addReadyMemInst(DynInstPtr &ready_inst) 1029{ 1030 OpClass op_class = ready_inst->opClass(); 1031 1032 readyInsts[op_class].push(ready_inst); 1033 1034 // Will need to reorder the list if either a queue is not on the list, 1035 // or it has an older instruction than last time. 1036 if (!queueOnList[op_class]) { 1037 addToOrderList(op_class); 1038 } else if (readyInsts[op_class].top()->seqNum < 1039 (*readyIt[op_class]).oldestInst) { 1040 listOrder.erase(readyIt[op_class]); 1041 addToOrderList(op_class); 1042 } 1043 1044 DPRINTF(IQ, "Instruction is ready to issue, putting it onto " 1045 "the ready list, PC %s opclass:%i [sn:%lli].\n", 1046 ready_inst->pcState(), op_class, ready_inst->seqNum); 1047} 1048 1049template <class Impl> 1050void 1051InstructionQueue<Impl>::rescheduleMemInst(DynInstPtr &resched_inst) 1052{ 1053 DPRINTF(IQ, "Rescheduling mem inst [sn:%lli]\n", resched_inst->seqNum); 1054 1055 // Reset DTB translation state 1056 resched_inst->translationStarted = false; 1057 resched_inst->translationCompleted = false; 1058 1059 resched_inst->clearCanIssue(); 1060 memDepUnit[resched_inst->threadNumber].reschedule(resched_inst); 1061} 1062 1063template <class Impl> 1064void 1065InstructionQueue<Impl>::replayMemInst(DynInstPtr &replay_inst) 1066{ 1067 memDepUnit[replay_inst->threadNumber].replay(replay_inst); 1068} 1069 1070template <class Impl> 1071void 1072InstructionQueue<Impl>::completeMemInst(DynInstPtr &completed_inst) 1073{ 1074 ThreadID tid = completed_inst->threadNumber; 1075 1076 DPRINTF(IQ, "Completing mem instruction PC: %s [sn:%lli]\n", 1077 completed_inst->pcState(), completed_inst->seqNum); 1078 1079 ++freeEntries; 1080 1081 completed_inst->memOpDone = true; 1082 1083 memDepUnit[tid].completed(completed_inst); 1084 count[tid]--; 1085} 1086 1087template <class Impl> 1088void 1089InstructionQueue<Impl>::deferMemInst(DynInstPtr &deferred_inst) 1090{ 1091 deferredMemInsts.push_back(deferred_inst); 1092} 1093 1094template <class Impl> 1095typename Impl::DynInstPtr 1096InstructionQueue<Impl>::getDeferredMemInstToExecute() 1097{ 1098 for (ListIt it = deferredMemInsts.begin(); it != deferredMemInsts.end(); 1099 ++it) { 1100 if ((*it)->translationCompleted || (*it)->isSquashed()) { 1101 DynInstPtr ret = *it; 1102 deferredMemInsts.erase(it); 1103 return ret; 1104 } 1105 } 1106 return NULL; 1107} 1108 1109template <class Impl> 1110void 1111InstructionQueue<Impl>::violation(DynInstPtr &store, 1112 DynInstPtr &faulting_load) 1113{ 1114 intInstQueueWrites++; 1115 memDepUnit[store->threadNumber].violation(store, faulting_load); 1116} 1117 1118template <class Impl> 1119void 1120InstructionQueue<Impl>::squash(ThreadID tid) 1121{ 1122 DPRINTF(IQ, "[tid:%i]: Starting to squash instructions in " 1123 "the IQ.\n", tid); 1124 1125 // Read instruction sequence number of last instruction out of the 1126 // time buffer. 1127 squashedSeqNum[tid] = fromCommit->commitInfo[tid].doneSeqNum; 1128 1129 // Call doSquash if there are insts in the IQ 1130 if (count[tid] > 0) { 1131 doSquash(tid); 1132 } 1133 1134 // Also tell the memory dependence unit to squash. 1135 memDepUnit[tid].squash(squashedSeqNum[tid], tid); 1136} 1137 1138template <class Impl> 1139void 1140InstructionQueue<Impl>::doSquash(ThreadID tid) 1141{ 1142 // Start at the tail. 1143 ListIt squash_it = instList[tid].end(); 1144 --squash_it; 1145 1146 DPRINTF(IQ, "[tid:%i]: Squashing until sequence number %i!\n", 1147 tid, squashedSeqNum[tid]); 1148 1149 // Squash any instructions younger than the squashed sequence number 1150 // given. 1151 while (squash_it != instList[tid].end() && 1152 (*squash_it)->seqNum > squashedSeqNum[tid]) { 1153 1154 DynInstPtr squashed_inst = (*squash_it); 1155 squashed_inst->isFloating() ? fpInstQueueWrites++ : intInstQueueWrites++; 1156 1157 // Only handle the instruction if it actually is in the IQ and 1158 // hasn't already been squashed in the IQ. 1159 if (squashed_inst->threadNumber != tid || 1160 squashed_inst->isSquashedInIQ()) { 1161 --squash_it; 1162 continue; 1163 } 1164 1165 if (!squashed_inst->isIssued() || 1166 (squashed_inst->isMemRef() && 1167 !squashed_inst->memOpDone)) { 1168 1169 DPRINTF(IQ, "[tid:%i]: Instruction [sn:%lli] PC %s squashed.\n", 1170 tid, squashed_inst->seqNum, squashed_inst->pcState()); 1171 1172 // Remove the instruction from the dependency list. 1173 if (!squashed_inst->isNonSpeculative() && 1174 !squashed_inst->isStoreConditional() && 1175 !squashed_inst->isMemBarrier() && 1176 !squashed_inst->isWriteBarrier()) { 1177 1178 for (int src_reg_idx = 0; 1179 src_reg_idx < squashed_inst->numSrcRegs(); 1180 src_reg_idx++) 1181 { 1182 PhysRegIndex src_reg = 1183 squashed_inst->renamedSrcRegIdx(src_reg_idx); 1184 1185 // Only remove it from the dependency graph if it 1186 // was placed there in the first place. 1187 1188 // Instead of doing a linked list traversal, we 1189 // can just remove these squashed instructions 1190 // either at issue time, or when the register is 1191 // overwritten. The only downside to this is it 1192 // leaves more room for error. 1193 1194 if (!squashed_inst->isReadySrcRegIdx(src_reg_idx) && 1195 src_reg < numPhysRegs) { 1196 dependGraph.remove(src_reg, squashed_inst); 1197 } 1198 1199 1200 ++iqSquashedOperandsExamined; 1201 } 1202 } else if (!squashed_inst->isStoreConditional() || 1203 !squashed_inst->isCompleted()) { 1204 NonSpecMapIt ns_inst_it = 1205 nonSpecInsts.find(squashed_inst->seqNum); 1206 1207 if (ns_inst_it == nonSpecInsts.end()) { 1208 assert(squashed_inst->getFault() != NoFault); 1209 } else { 1210 1211 (*ns_inst_it).second = NULL; 1212 1213 nonSpecInsts.erase(ns_inst_it); 1214 1215 ++iqSquashedNonSpecRemoved; 1216 } 1217 } 1218 1219 // Might want to also clear out the head of the dependency graph. 1220 1221 // Mark it as squashed within the IQ. 1222 squashed_inst->setSquashedInIQ(); 1223 1224 // @todo: Remove this hack where several statuses are set so the 1225 // inst will flow through the rest of the pipeline. 1226 squashed_inst->setIssued(); 1227 squashed_inst->setCanCommit(); 1228 squashed_inst->clearInIQ(); 1229 1230 //Update Thread IQ Count 1231 count[squashed_inst->threadNumber]--; 1232 1233 ++freeEntries; 1234 } 1235 1236 instList[tid].erase(squash_it--); 1237 ++iqSquashedInstsExamined; 1238 } 1239} 1240 1241template <class Impl> 1242bool 1243InstructionQueue<Impl>::addToDependents(DynInstPtr &new_inst) 1244{ 1245 // Loop through the instruction's source registers, adding 1246 // them to the dependency list if they are not ready. 1247 int8_t total_src_regs = new_inst->numSrcRegs(); 1248 bool return_val = false; 1249 1250 for (int src_reg_idx = 0; 1251 src_reg_idx < total_src_regs; 1252 src_reg_idx++) 1253 { 1254 // Only add it to the dependency graph if it's not ready. 1255 if (!new_inst->isReadySrcRegIdx(src_reg_idx)) { 1256 PhysRegIndex src_reg = new_inst->renamedSrcRegIdx(src_reg_idx); 1257 1258 // Check the IQ's scoreboard to make sure the register 1259 // hasn't become ready while the instruction was in flight 1260 // between stages. Only if it really isn't ready should 1261 // it be added to the dependency graph. 1262 if (src_reg >= numPhysRegs) { 1263 continue; 1264 } else if (regScoreboard[src_reg] == false) { 1265 DPRINTF(IQ, "Instruction PC %s has src reg %i that " 1266 "is being added to the dependency chain.\n", 1267 new_inst->pcState(), src_reg); 1268 1269 dependGraph.insert(src_reg, new_inst); 1270 1271 // Change the return value to indicate that something 1272 // was added to the dependency graph. 1273 return_val = true; 1274 } else { 1275 DPRINTF(IQ, "Instruction PC %s has src reg %i that " 1276 "became ready before it reached the IQ.\n", 1277 new_inst->pcState(), src_reg); 1278 // Mark a register ready within the instruction. 1279 new_inst->markSrcRegReady(src_reg_idx); 1280 } 1281 } 1282 } 1283 1284 return return_val; 1285} 1286 1287template <class Impl> 1288void 1289InstructionQueue<Impl>::addToProducers(DynInstPtr &new_inst) 1290{ 1291 // Nothing really needs to be marked when an instruction becomes 1292 // the producer of a register's value, but for convenience a ptr 1293 // to the producing instruction will be placed in the head node of 1294 // the dependency links. 1295 int8_t total_dest_regs = new_inst->numDestRegs(); 1296 1297 for (int dest_reg_idx = 0; 1298 dest_reg_idx < total_dest_regs; 1299 dest_reg_idx++) 1300 { 1301 PhysRegIndex dest_reg = new_inst->renamedDestRegIdx(dest_reg_idx); 1302 1303 // Instructions that use the misc regs will have a reg number 1304 // higher than the normal physical registers. In this case these 1305 // registers are not renamed, and there is no need to track 1306 // dependencies as these instructions must be executed at commit. 1307 if (dest_reg >= numPhysRegs) { 1308 continue; 1309 } 1310 1311 if (!dependGraph.empty(dest_reg)) { 1312 dependGraph.dump(); 1313 panic("Dependency graph %i not empty!", dest_reg); 1314 } 1315 1316 dependGraph.setInst(dest_reg, new_inst); 1317 1318 // Mark the scoreboard to say it's not yet ready. 1319 regScoreboard[dest_reg] = false; 1320 } 1321} 1322 1323template <class Impl> 1324void 1325InstructionQueue<Impl>::addIfReady(DynInstPtr &inst) 1326{ 1327 // If the instruction now has all of its source registers 1328 // available, then add it to the list of ready instructions. 1329 if (inst->readyToIssue()) { 1330 1331 //Add the instruction to the proper ready list. 1332 if (inst->isMemRef()) { 1333 1334 DPRINTF(IQ, "Checking if memory instruction can issue.\n"); 1335 1336 // Message to the mem dependence unit that this instruction has 1337 // its registers ready. 1338 memDepUnit[inst->threadNumber].regsReady(inst); 1339 1340 return; 1341 } 1342 1343 OpClass op_class = inst->opClass(); 1344 1345 DPRINTF(IQ, "Instruction is ready to issue, putting it onto " 1346 "the ready list, PC %s opclass:%i [sn:%lli].\n", 1347 inst->pcState(), op_class, inst->seqNum); 1348 1349 readyInsts[op_class].push(inst); 1350 1351 // Will need to reorder the list if either a queue is not on the list, 1352 // or it has an older instruction than last time. 1353 if (!queueOnList[op_class]) { 1354 addToOrderList(op_class); 1355 } else if (readyInsts[op_class].top()->seqNum < 1356 (*readyIt[op_class]).oldestInst) { 1357 listOrder.erase(readyIt[op_class]); 1358 addToOrderList(op_class); 1359 } 1360 } 1361} 1362 1363template <class Impl> 1364int 1365InstructionQueue<Impl>::countInsts() 1366{ 1367#if 0 1368 //ksewell:This works but definitely could use a cleaner write 1369 //with a more intuitive way of counting. Right now it's 1370 //just brute force .... 1371 // Change the #if if you want to use this method. 1372 int total_insts = 0; 1373 1374 for (ThreadID tid = 0; tid < numThreads; ++tid) { 1375 ListIt count_it = instList[tid].begin(); 1376 1377 while (count_it != instList[tid].end()) { 1378 if (!(*count_it)->isSquashed() && !(*count_it)->isSquashedInIQ()) { 1379 if (!(*count_it)->isIssued()) { 1380 ++total_insts; 1381 } else if ((*count_it)->isMemRef() && 1382 !(*count_it)->memOpDone) { 1383 // Loads that have not been marked as executed still count 1384 // towards the total instructions. 1385 ++total_insts; 1386 } 1387 } 1388 1389 ++count_it; 1390 } 1391 } 1392 1393 return total_insts; 1394#else 1395 return numEntries - freeEntries; 1396#endif 1397} 1398 1399template <class Impl> 1400void 1401InstructionQueue<Impl>::dumpLists() 1402{ 1403 for (int i = 0; i < Num_OpClasses; ++i) { 1404 cprintf("Ready list %i size: %i\n", i, readyInsts[i].size()); 1405 1406 cprintf("\n"); 1407 } 1408 1409 cprintf("Non speculative list size: %i\n", nonSpecInsts.size()); 1410 1411 NonSpecMapIt non_spec_it = nonSpecInsts.begin(); 1412 NonSpecMapIt non_spec_end_it = nonSpecInsts.end(); 1413 1414 cprintf("Non speculative list: "); 1415 1416 while (non_spec_it != non_spec_end_it) { 1417 cprintf("%s [sn:%lli]", (*non_spec_it).second->pcState(), 1418 (*non_spec_it).second->seqNum); 1419 ++non_spec_it; 1420 } 1421 1422 cprintf("\n"); 1423 1424 ListOrderIt list_order_it = listOrder.begin(); 1425 ListOrderIt list_order_end_it = listOrder.end(); 1426 int i = 1; 1427 1428 cprintf("List order: "); 1429 1430 while (list_order_it != list_order_end_it) { 1431 cprintf("%i OpClass:%i [sn:%lli] ", i, (*list_order_it).queueType, 1432 (*list_order_it).oldestInst); 1433 1434 ++list_order_it; 1435 ++i; 1436 } 1437 1438 cprintf("\n"); 1439} 1440 1441 1442template <class Impl> 1443void 1444InstructionQueue<Impl>::dumpInsts() 1445{ 1446 for (ThreadID tid = 0; tid < numThreads; ++tid) { 1447 int num = 0; 1448 int valid_num = 0; 1449 ListIt inst_list_it = instList[tid].begin(); 1450 1451 while (inst_list_it != instList[tid].end()) { 1452 cprintf("Instruction:%i\n", num); 1453 if (!(*inst_list_it)->isSquashed()) { 1454 if (!(*inst_list_it)->isIssued()) { 1455 ++valid_num; 1456 cprintf("Count:%i\n", valid_num); 1457 } else if ((*inst_list_it)->isMemRef() && 1458 !(*inst_list_it)->memOpDone) { 1459 // Loads that have not been marked as executed 1460 // still count towards the total instructions. 1461 ++valid_num; 1462 cprintf("Count:%i\n", valid_num); 1463 } 1464 } 1465 1466 cprintf("PC: %s\n[sn:%lli]\n[tid:%i]\n" 1467 "Issued:%i\nSquashed:%i\n", 1468 (*inst_list_it)->pcState(), 1469 (*inst_list_it)->seqNum, 1470 (*inst_list_it)->threadNumber, 1471 (*inst_list_it)->isIssued(), 1472 (*inst_list_it)->isSquashed()); 1473 1474 if ((*inst_list_it)->isMemRef()) { 1475 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone); 1476 } 1477 1478 cprintf("\n"); 1479 1480 inst_list_it++; 1481 ++num; 1482 } 1483 } 1484 1485 cprintf("Insts to Execute list:\n"); 1486 1487 int num = 0; 1488 int valid_num = 0; 1489 ListIt inst_list_it = instsToExecute.begin(); 1490 1491 while (inst_list_it != instsToExecute.end()) 1492 { 1493 cprintf("Instruction:%i\n", 1494 num); 1495 if (!(*inst_list_it)->isSquashed()) { 1496 if (!(*inst_list_it)->isIssued()) { 1497 ++valid_num; 1498 cprintf("Count:%i\n", valid_num); 1499 } else if ((*inst_list_it)->isMemRef() && 1500 !(*inst_list_it)->memOpDone) { 1501 // Loads that have not been marked as executed 1502 // still count towards the total instructions. 1503 ++valid_num; 1504 cprintf("Count:%i\n", valid_num); 1505 } 1506 } 1507 1508 cprintf("PC: %s\n[sn:%lli]\n[tid:%i]\n" 1509 "Issued:%i\nSquashed:%i\n", 1510 (*inst_list_it)->pcState(), 1511 (*inst_list_it)->seqNum, 1512 (*inst_list_it)->threadNumber, 1513 (*inst_list_it)->isIssued(), 1514 (*inst_list_it)->isSquashed()); 1515 1516 if ((*inst_list_it)->isMemRef()) { 1517 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone); 1518 } 1519 1520 cprintf("\n"); 1521 1522 inst_list_it++; 1523 ++num; 1524 } 1525}
| 62} 63 64template <class Impl> 65void 66InstructionQueue<Impl>::FUCompletion::process() 67{ 68 iqPtr->processFUCompletion(inst, freeFU ? fuIdx : -1); 69 inst = NULL; 70} 71 72 73template <class Impl> 74const char * 75InstructionQueue<Impl>::FUCompletion::description() const 76{ 77 return "Functional unit completion"; 78} 79 80template <class Impl> 81InstructionQueue<Impl>::InstructionQueue(O3CPU *cpu_ptr, IEW *iew_ptr, 82 DerivO3CPUParams *params) 83 : cpu(cpu_ptr), 84 iewStage(iew_ptr), 85 fuPool(params->fuPool), 86 numEntries(params->numIQEntries), 87 totalWidth(params->issueWidth), 88 numPhysIntRegs(params->numPhysIntRegs), 89 numPhysFloatRegs(params->numPhysFloatRegs), 90 commitToIEWDelay(params->commitToIEWDelay) 91{ 92 assert(fuPool); 93 94 switchedOut = false; 95 96 numThreads = params->numThreads; 97 98 // Set the number of physical registers as the number of int + float 99 numPhysRegs = numPhysIntRegs + numPhysFloatRegs; 100 101 //Create an entry for each physical register within the 102 //dependency graph. 103 dependGraph.resize(numPhysRegs); 104 105 // Resize the register scoreboard. 106 regScoreboard.resize(numPhysRegs); 107 108 //Initialize Mem Dependence Units 109 for (ThreadID tid = 0; tid < numThreads; tid++) { 110 memDepUnit[tid].init(params, tid); 111 memDepUnit[tid].setIQ(this); 112 } 113 114 resetState(); 115 116 std::string policy = params->smtIQPolicy; 117 118 //Convert string to lowercase 119 std::transform(policy.begin(), policy.end(), policy.begin(), 120 (int(*)(int)) tolower); 121 122 //Figure out resource sharing policy 123 if (policy == "dynamic") { 124 iqPolicy = Dynamic; 125 126 //Set Max Entries to Total ROB Capacity 127 for (ThreadID tid = 0; tid < numThreads; tid++) { 128 maxEntries[tid] = numEntries; 129 } 130 131 } else if (policy == "partitioned") { 132 iqPolicy = Partitioned; 133 134 //@todo:make work if part_amt doesnt divide evenly. 135 int part_amt = numEntries / numThreads; 136 137 //Divide ROB up evenly 138 for (ThreadID tid = 0; tid < numThreads; tid++) { 139 maxEntries[tid] = part_amt; 140 } 141 142 DPRINTF(IQ, "IQ sharing policy set to Partitioned:" 143 "%i entries per thread.\n",part_amt); 144 } else if (policy == "threshold") { 145 iqPolicy = Threshold; 146 147 double threshold = (double)params->smtIQThreshold / 100; 148 149 int thresholdIQ = (int)((double)threshold * numEntries); 150 151 //Divide up by threshold amount 152 for (ThreadID tid = 0; tid < numThreads; tid++) { 153 maxEntries[tid] = thresholdIQ; 154 } 155 156 DPRINTF(IQ, "IQ sharing policy set to Threshold:" 157 "%i entries per thread.\n",thresholdIQ); 158 } else { 159 assert(0 && "Invalid IQ Sharing Policy.Options Are:{Dynamic," 160 "Partitioned, Threshold}"); 161 } 162} 163 164template <class Impl> 165InstructionQueue<Impl>::~InstructionQueue() 166{ 167 dependGraph.reset(); 168#ifdef DEBUG 169 cprintf("Nodes traversed: %i, removed: %i\n", 170 dependGraph.nodesTraversed, dependGraph.nodesRemoved); 171#endif 172} 173 174template <class Impl> 175std::string 176InstructionQueue<Impl>::name() const 177{ 178 return cpu->name() + ".iq"; 179} 180 181template <class Impl> 182void 183InstructionQueue<Impl>::regStats() 184{ 185 using namespace Stats; 186 iqInstsAdded 187 .name(name() + ".iqInstsAdded") 188 .desc("Number of instructions added to the IQ (excludes non-spec)") 189 .prereq(iqInstsAdded); 190 191 iqNonSpecInstsAdded 192 .name(name() + ".iqNonSpecInstsAdded") 193 .desc("Number of non-speculative instructions added to the IQ") 194 .prereq(iqNonSpecInstsAdded); 195 196 iqInstsIssued 197 .name(name() + ".iqInstsIssued") 198 .desc("Number of instructions issued") 199 .prereq(iqInstsIssued); 200 201 iqIntInstsIssued 202 .name(name() + ".iqIntInstsIssued") 203 .desc("Number of integer instructions issued") 204 .prereq(iqIntInstsIssued); 205 206 iqFloatInstsIssued 207 .name(name() + ".iqFloatInstsIssued") 208 .desc("Number of float instructions issued") 209 .prereq(iqFloatInstsIssued); 210 211 iqBranchInstsIssued 212 .name(name() + ".iqBranchInstsIssued") 213 .desc("Number of branch instructions issued") 214 .prereq(iqBranchInstsIssued); 215 216 iqMemInstsIssued 217 .name(name() + ".iqMemInstsIssued") 218 .desc("Number of memory instructions issued") 219 .prereq(iqMemInstsIssued); 220 221 iqMiscInstsIssued 222 .name(name() + ".iqMiscInstsIssued") 223 .desc("Number of miscellaneous instructions issued") 224 .prereq(iqMiscInstsIssued); 225 226 iqSquashedInstsIssued 227 .name(name() + ".iqSquashedInstsIssued") 228 .desc("Number of squashed instructions issued") 229 .prereq(iqSquashedInstsIssued); 230 231 iqSquashedInstsExamined 232 .name(name() + ".iqSquashedInstsExamined") 233 .desc("Number of squashed instructions iterated over during squash;" 234 " mainly for profiling") 235 .prereq(iqSquashedInstsExamined); 236 237 iqSquashedOperandsExamined 238 .name(name() + ".iqSquashedOperandsExamined") 239 .desc("Number of squashed operands that are examined and possibly " 240 "removed from graph") 241 .prereq(iqSquashedOperandsExamined); 242 243 iqSquashedNonSpecRemoved 244 .name(name() + ".iqSquashedNonSpecRemoved") 245 .desc("Number of squashed non-spec instructions that were removed") 246 .prereq(iqSquashedNonSpecRemoved); 247/* 248 queueResDist 249 .init(Num_OpClasses, 0, 99, 2) 250 .name(name() + ".IQ:residence:") 251 .desc("cycles from dispatch to issue") 252 .flags(total | pdf | cdf ) 253 ; 254 for (int i = 0; i < Num_OpClasses; ++i) { 255 queueResDist.subname(i, opClassStrings[i]); 256 } 257*/ 258 numIssuedDist 259 .init(0,totalWidth,1) 260 .name(name() + ".issued_per_cycle") 261 .desc("Number of insts issued each cycle") 262 .flags(pdf) 263 ; 264/* 265 dist_unissued 266 .init(Num_OpClasses+2) 267 .name(name() + ".unissued_cause") 268 .desc("Reason ready instruction not issued") 269 .flags(pdf | dist) 270 ; 271 for (int i=0; i < (Num_OpClasses + 2); ++i) { 272 dist_unissued.subname(i, unissued_names[i]); 273 } 274*/ 275 statIssuedInstType 276 .init(numThreads,Enums::Num_OpClass) 277 .name(name() + ".FU_type") 278 .desc("Type of FU issued") 279 .flags(total | pdf | dist) 280 ; 281 statIssuedInstType.ysubnames(Enums::OpClassStrings); 282 283 // 284 // How long did instructions for a particular FU type wait prior to issue 285 // 286/* 287 issueDelayDist 288 .init(Num_OpClasses,0,99,2) 289 .name(name() + ".") 290 .desc("cycles from operands ready to issue") 291 .flags(pdf | cdf) 292 ; 293 294 for (int i=0; i<Num_OpClasses; ++i) { 295 std::stringstream subname; 296 subname << opClassStrings[i] << "_delay"; 297 issueDelayDist.subname(i, subname.str()); 298 } 299*/ 300 issueRate 301 .name(name() + ".rate") 302 .desc("Inst issue rate") 303 .flags(total) 304 ; 305 issueRate = iqInstsIssued / cpu->numCycles; 306 307 statFuBusy 308 .init(Num_OpClasses) 309 .name(name() + ".fu_full") 310 .desc("attempts to use FU when none available") 311 .flags(pdf | dist) 312 ; 313 for (int i=0; i < Num_OpClasses; ++i) { 314 statFuBusy.subname(i, Enums::OpClassStrings[i]); 315 } 316 317 fuBusy 318 .init(numThreads) 319 .name(name() + ".fu_busy_cnt") 320 .desc("FU busy when requested") 321 .flags(total) 322 ; 323 324 fuBusyRate 325 .name(name() + ".fu_busy_rate") 326 .desc("FU busy rate (busy events/executed inst)") 327 .flags(total) 328 ; 329 fuBusyRate = fuBusy / iqInstsIssued; 330 331 for (ThreadID tid = 0; tid < numThreads; tid++) { 332 // Tell mem dependence unit to reg stats as well. 333 memDepUnit[tid].regStats(); 334 } 335 336 intInstQueueReads 337 .name(name() + ".int_inst_queue_reads") 338 .desc("Number of integer instruction queue reads") 339 .flags(total); 340 341 intInstQueueWrites 342 .name(name() + ".int_inst_queue_writes") 343 .desc("Number of integer instruction queue writes") 344 .flags(total); 345 346 intInstQueueWakeupAccesses 347 .name(name() + ".int_inst_queue_wakeup_accesses") 348 .desc("Number of integer instruction queue wakeup accesses") 349 .flags(total); 350 351 fpInstQueueReads 352 .name(name() + ".fp_inst_queue_reads") 353 .desc("Number of floating instruction queue reads") 354 .flags(total); 355 356 fpInstQueueWrites 357 .name(name() + ".fp_inst_queue_writes") 358 .desc("Number of floating instruction queue writes") 359 .flags(total); 360 361 fpInstQueueWakeupQccesses 362 .name(name() + ".fp_inst_queue_wakeup_accesses") 363 .desc("Number of floating instruction queue wakeup accesses") 364 .flags(total); 365 366 intAluAccesses 367 .name(name() + ".int_alu_accesses") 368 .desc("Number of integer alu accesses") 369 .flags(total); 370 371 fpAluAccesses 372 .name(name() + ".fp_alu_accesses") 373 .desc("Number of floating point alu accesses") 374 .flags(total); 375 376} 377 378template <class Impl> 379void 380InstructionQueue<Impl>::resetState() 381{ 382 //Initialize thread IQ counts 383 for (ThreadID tid = 0; tid <numThreads; tid++) { 384 count[tid] = 0; 385 instList[tid].clear(); 386 } 387 388 // Initialize the number of free IQ entries. 389 freeEntries = numEntries; 390 391 // Note that in actuality, the registers corresponding to the logical 392 // registers start off as ready. However this doesn't matter for the 393 // IQ as the instruction should have been correctly told if those 394 // registers are ready in rename. Thus it can all be initialized as 395 // unready. 396 for (int i = 0; i < numPhysRegs; ++i) { 397 regScoreboard[i] = false; 398 } 399 400 for (ThreadID tid = 0; tid < numThreads; ++tid) { 401 squashedSeqNum[tid] = 0; 402 } 403 404 for (int i = 0; i < Num_OpClasses; ++i) { 405 while (!readyInsts[i].empty()) 406 readyInsts[i].pop(); 407 queueOnList[i] = false; 408 readyIt[i] = listOrder.end(); 409 } 410 nonSpecInsts.clear(); 411 listOrder.clear(); 412 deferredMemInsts.clear(); 413} 414 415template <class Impl> 416void 417InstructionQueue<Impl>::setActiveThreads(list<ThreadID> *at_ptr) 418{ 419 activeThreads = at_ptr; 420} 421 422template <class Impl> 423void 424InstructionQueue<Impl>::setIssueToExecuteQueue(TimeBuffer<IssueStruct> *i2e_ptr) 425{ 426 issueToExecuteQueue = i2e_ptr; 427} 428 429template <class Impl> 430void 431InstructionQueue<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr) 432{ 433 timeBuffer = tb_ptr; 434 435 fromCommit = timeBuffer->getWire(-commitToIEWDelay); 436} 437 438template <class Impl> 439void 440InstructionQueue<Impl>::switchOut() 441{ 442/* 443 if (!instList[0].empty() || (numEntries != freeEntries) || 444 !readyInsts[0].empty() || !nonSpecInsts.empty() || !listOrder.empty()) { 445 dumpInsts(); 446// assert(0); 447 } 448*/ 449 resetState(); 450 dependGraph.reset(); 451 instsToExecute.clear(); 452 switchedOut = true; 453 for (ThreadID tid = 0; tid < numThreads; ++tid) { 454 memDepUnit[tid].switchOut(); 455 } 456} 457 458template <class Impl> 459void 460InstructionQueue<Impl>::takeOverFrom() 461{ 462 switchedOut = false; 463} 464 465template <class Impl> 466int 467InstructionQueue<Impl>::entryAmount(ThreadID num_threads) 468{ 469 if (iqPolicy == Partitioned) { 470 return numEntries / num_threads; 471 } else { 472 return 0; 473 } 474} 475 476 477template <class Impl> 478void 479InstructionQueue<Impl>::resetEntries() 480{ 481 if (iqPolicy != Dynamic || numThreads > 1) { 482 int active_threads = activeThreads->size(); 483 484 list<ThreadID>::iterator threads = activeThreads->begin(); 485 list<ThreadID>::iterator end = activeThreads->end(); 486 487 while (threads != end) { 488 ThreadID tid = *threads++; 489 490 if (iqPolicy == Partitioned) { 491 maxEntries[tid] = numEntries / active_threads; 492 } else if(iqPolicy == Threshold && active_threads == 1) { 493 maxEntries[tid] = numEntries; 494 } 495 } 496 } 497} 498 499template <class Impl> 500unsigned 501InstructionQueue<Impl>::numFreeEntries() 502{ 503 return freeEntries; 504} 505 506template <class Impl> 507unsigned 508InstructionQueue<Impl>::numFreeEntries(ThreadID tid) 509{ 510 return maxEntries[tid] - count[tid]; 511} 512 513// Might want to do something more complex if it knows how many instructions 514// will be issued this cycle. 515template <class Impl> 516bool 517InstructionQueue<Impl>::isFull() 518{ 519 if (freeEntries == 0) { 520 return(true); 521 } else { 522 return(false); 523 } 524} 525 526template <class Impl> 527bool 528InstructionQueue<Impl>::isFull(ThreadID tid) 529{ 530 if (numFreeEntries(tid) == 0) { 531 return(true); 532 } else { 533 return(false); 534 } 535} 536 537template <class Impl> 538bool 539InstructionQueue<Impl>::hasReadyInsts() 540{ 541 if (!listOrder.empty()) { 542 return true; 543 } 544 545 for (int i = 0; i < Num_OpClasses; ++i) { 546 if (!readyInsts[i].empty()) { 547 return true; 548 } 549 } 550 551 return false; 552} 553 554template <class Impl> 555void 556InstructionQueue<Impl>::insert(DynInstPtr &new_inst) 557{ 558 new_inst->isFloating() ? fpInstQueueWrites++ : intInstQueueWrites++; 559 // Make sure the instruction is valid 560 assert(new_inst); 561 562 DPRINTF(IQ, "Adding instruction [sn:%lli] PC %s to the IQ.\n", 563 new_inst->seqNum, new_inst->pcState()); 564 565 assert(freeEntries != 0); 566 567 instList[new_inst->threadNumber].push_back(new_inst); 568 569 --freeEntries; 570 571 new_inst->setInIQ(); 572 573 // Look through its source registers (physical regs), and mark any 574 // dependencies. 575 addToDependents(new_inst); 576 577 // Have this instruction set itself as the producer of its destination 578 // register(s). 579 addToProducers(new_inst); 580 581 if (new_inst->isMemRef()) { 582 memDepUnit[new_inst->threadNumber].insert(new_inst); 583 } else { 584 addIfReady(new_inst); 585 } 586 587 ++iqInstsAdded; 588 589 count[new_inst->threadNumber]++; 590 591 assert(freeEntries == (numEntries - countInsts())); 592} 593 594template <class Impl> 595void 596InstructionQueue<Impl>::insertNonSpec(DynInstPtr &new_inst) 597{ 598 // @todo: Clean up this code; can do it by setting inst as unable 599 // to issue, then calling normal insert on the inst. 600 new_inst->isFloating() ? fpInstQueueWrites++ : intInstQueueWrites++; 601 602 assert(new_inst); 603 604 nonSpecInsts[new_inst->seqNum] = new_inst; 605 606 DPRINTF(IQ, "Adding non-speculative instruction [sn:%lli] PC %s " 607 "to the IQ.\n", 608 new_inst->seqNum, new_inst->pcState()); 609 610 assert(freeEntries != 0); 611 612 instList[new_inst->threadNumber].push_back(new_inst); 613 614 --freeEntries; 615 616 new_inst->setInIQ(); 617 618 // Have this instruction set itself as the producer of its destination 619 // register(s). 620 addToProducers(new_inst); 621 622 // If it's a memory instruction, add it to the memory dependency 623 // unit. 624 if (new_inst->isMemRef()) { 625 memDepUnit[new_inst->threadNumber].insertNonSpec(new_inst); 626 } 627 628 ++iqNonSpecInstsAdded; 629 630 count[new_inst->threadNumber]++; 631 632 assert(freeEntries == (numEntries - countInsts())); 633} 634 635template <class Impl> 636void 637InstructionQueue<Impl>::insertBarrier(DynInstPtr &barr_inst) 638{ 639 memDepUnit[barr_inst->threadNumber].insertBarrier(barr_inst); 640 641 insertNonSpec(barr_inst); 642} 643 644template <class Impl> 645typename Impl::DynInstPtr 646InstructionQueue<Impl>::getInstToExecute() 647{ 648 assert(!instsToExecute.empty()); 649 DynInstPtr inst = instsToExecute.front(); 650 instsToExecute.pop_front(); 651 if (inst->isFloating()){ 652 fpInstQueueReads++; 653 } else { 654 intInstQueueReads++; 655 } 656 return inst; 657} 658 659template <class Impl> 660void 661InstructionQueue<Impl>::addToOrderList(OpClass op_class) 662{ 663 assert(!readyInsts[op_class].empty()); 664 665 ListOrderEntry queue_entry; 666 667 queue_entry.queueType = op_class; 668 669 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum; 670 671 ListOrderIt list_it = listOrder.begin(); 672 ListOrderIt list_end_it = listOrder.end(); 673 674 while (list_it != list_end_it) { 675 if ((*list_it).oldestInst > queue_entry.oldestInst) { 676 break; 677 } 678 679 list_it++; 680 } 681 682 readyIt[op_class] = listOrder.insert(list_it, queue_entry); 683 queueOnList[op_class] = true; 684} 685 686template <class Impl> 687void 688InstructionQueue<Impl>::moveToYoungerInst(ListOrderIt list_order_it) 689{ 690 // Get iterator of next item on the list 691 // Delete the original iterator 692 // Determine if the next item is either the end of the list or younger 693 // than the new instruction. If so, then add in a new iterator right here. 694 // If not, then move along. 695 ListOrderEntry queue_entry; 696 OpClass op_class = (*list_order_it).queueType; 697 ListOrderIt next_it = list_order_it; 698 699 ++next_it; 700 701 queue_entry.queueType = op_class; 702 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum; 703 704 while (next_it != listOrder.end() && 705 (*next_it).oldestInst < queue_entry.oldestInst) { 706 ++next_it; 707 } 708 709 readyIt[op_class] = listOrder.insert(next_it, queue_entry); 710} 711 712template <class Impl> 713void 714InstructionQueue<Impl>::processFUCompletion(DynInstPtr &inst, int fu_idx) 715{ 716 DPRINTF(IQ, "Processing FU completion [sn:%lli]\n", inst->seqNum); 717 // The CPU could have been sleeping until this op completed (*extremely* 718 // long latency op). Wake it if it was. This may be overkill. 719 if (isSwitchedOut()) { 720 DPRINTF(IQ, "FU completion not processed, IQ is switched out [sn:%lli]\n", 721 inst->seqNum); 722 return; 723 } 724 725 iewStage->wakeCPU(); 726 727 if (fu_idx > -1) 728 fuPool->freeUnitNextCycle(fu_idx); 729 730 // @todo: Ensure that these FU Completions happen at the beginning 731 // of a cycle, otherwise they could add too many instructions to 732 // the queue. 733 issueToExecuteQueue->access(-1)->size++; 734 instsToExecute.push_back(inst); 735} 736 737// @todo: Figure out a better way to remove the squashed items from the 738// lists. Checking the top item of each list to see if it's squashed 739// wastes time and forces jumps. 740template <class Impl> 741void 742InstructionQueue<Impl>::scheduleReadyInsts() 743{ 744 DPRINTF(IQ, "Attempting to schedule ready instructions from " 745 "the IQ.\n"); 746 747 IssueStruct *i2e_info = issueToExecuteQueue->access(0); 748 749 DynInstPtr deferred_mem_inst; 750 int total_deferred_mem_issued = 0; 751 while (total_deferred_mem_issued < totalWidth && 752 (deferred_mem_inst = getDeferredMemInstToExecute()) != 0) { 753 issueToExecuteQueue->access(0)->size++; 754 instsToExecute.push_back(deferred_mem_inst); 755 total_deferred_mem_issued++; 756 } 757 758 // Have iterator to head of the list 759 // While I haven't exceeded bandwidth or reached the end of the list, 760 // Try to get a FU that can do what this op needs. 761 // If successful, change the oldestInst to the new top of the list, put 762 // the queue in the proper place in the list. 763 // Increment the iterator. 764 // This will avoid trying to schedule a certain op class if there are no 765 // FUs that handle it. 766 ListOrderIt order_it = listOrder.begin(); 767 ListOrderIt order_end_it = listOrder.end(); 768 int total_issued = 0; 769 770 while (total_issued < (totalWidth - total_deferred_mem_issued) && 771 iewStage->canIssue() && 772 order_it != order_end_it) { 773 OpClass op_class = (*order_it).queueType; 774 775 assert(!readyInsts[op_class].empty()); 776 777 DynInstPtr issuing_inst = readyInsts[op_class].top(); 778 779 issuing_inst->isFloating() ? fpInstQueueReads++ : intInstQueueReads++; 780 781 assert(issuing_inst->seqNum == (*order_it).oldestInst); 782 783 if (issuing_inst->isSquashed()) { 784 readyInsts[op_class].pop(); 785 786 if (!readyInsts[op_class].empty()) { 787 moveToYoungerInst(order_it); 788 } else { 789 readyIt[op_class] = listOrder.end(); 790 queueOnList[op_class] = false; 791 } 792 793 listOrder.erase(order_it++); 794 795 ++iqSquashedInstsIssued; 796 797 continue; 798 } 799 800 int idx = -2; 801 int op_latency = 1; 802 ThreadID tid = issuing_inst->threadNumber; 803 804 if (op_class != No_OpClass) { 805 idx = fuPool->getUnit(op_class); 806 issuing_inst->isFloating() ? fpAluAccesses++ : intAluAccesses++; 807 if (idx > -1) { 808 op_latency = fuPool->getOpLatency(op_class); 809 } 810 } 811 812 // If we have an instruction that doesn't require a FU, or a 813 // valid FU, then schedule for execution. 814 if (idx == -2 || idx != -1) { 815 if (op_latency == 1) { 816 i2e_info->size++; 817 instsToExecute.push_back(issuing_inst); 818 819 // Add the FU onto the list of FU's to be freed next 820 // cycle if we used one. 821 if (idx >= 0) 822 fuPool->freeUnitNextCycle(idx); 823 } else { 824 int issue_latency = fuPool->getIssueLatency(op_class); 825 // Generate completion event for the FU 826 FUCompletion *execution = new FUCompletion(issuing_inst, 827 idx, this); 828 829 cpu->schedule(execution, curTick() + cpu->ticks(op_latency - 1)); 830 831 // @todo: Enforce that issue_latency == 1 or op_latency 832 if (issue_latency > 1) { 833 // If FU isn't pipelined, then it must be freed 834 // upon the execution completing. 835 execution->setFreeFU(); 836 } else { 837 // Add the FU onto the list of FU's to be freed next cycle. 838 fuPool->freeUnitNextCycle(idx); 839 } 840 } 841 842 DPRINTF(IQ, "Thread %i: Issuing instruction PC %s " 843 "[sn:%lli]\n", 844 tid, issuing_inst->pcState(), 845 issuing_inst->seqNum); 846 847 readyInsts[op_class].pop(); 848 849 if (!readyInsts[op_class].empty()) { 850 moveToYoungerInst(order_it); 851 } else { 852 readyIt[op_class] = listOrder.end(); 853 queueOnList[op_class] = false; 854 } 855 856 issuing_inst->setIssued(); 857 ++total_issued; 858 859#if TRACING_ON 860 issuing_inst->issueTick = curTick(); 861#endif 862 863 if (!issuing_inst->isMemRef()) { 864 // Memory instructions can not be freed from the IQ until they 865 // complete. 866 ++freeEntries; 867 count[tid]--; 868 issuing_inst->clearInIQ(); 869 } else { 870 memDepUnit[tid].issue(issuing_inst); 871 } 872 873 listOrder.erase(order_it++); 874 statIssuedInstType[tid][op_class]++; 875 iewStage->incrWb(issuing_inst->seqNum); 876 } else { 877 statFuBusy[op_class]++; 878 fuBusy[tid]++; 879 ++order_it; 880 } 881 } 882 883 numIssuedDist.sample(total_issued); 884 iqInstsIssued+= total_issued; 885 886 // If we issued any instructions, tell the CPU we had activity. 887 // @todo If the way deferred memory instructions are handeled due to 888 // translation changes then the deferredMemInsts condition should be removed 889 // from the code below. 890 if (total_issued || total_deferred_mem_issued || deferredMemInsts.size()) { 891 cpu->activityThisCycle(); 892 } else { 893 DPRINTF(IQ, "Not able to schedule any instructions.\n"); 894 } 895} 896 897template <class Impl> 898void 899InstructionQueue<Impl>::scheduleNonSpec(const InstSeqNum &inst) 900{ 901 DPRINTF(IQ, "Marking nonspeculative instruction [sn:%lli] as ready " 902 "to execute.\n", inst); 903 904 NonSpecMapIt inst_it = nonSpecInsts.find(inst); 905 906 assert(inst_it != nonSpecInsts.end()); 907 908 ThreadID tid = (*inst_it).second->threadNumber; 909 910 (*inst_it).second->setAtCommit(); 911 912 (*inst_it).second->setCanIssue(); 913 914 if (!(*inst_it).second->isMemRef()) { 915 addIfReady((*inst_it).second); 916 } else { 917 memDepUnit[tid].nonSpecInstReady((*inst_it).second); 918 } 919 920 (*inst_it).second = NULL; 921 922 nonSpecInsts.erase(inst_it); 923} 924 925template <class Impl> 926void 927InstructionQueue<Impl>::commit(const InstSeqNum &inst, ThreadID tid) 928{ 929 DPRINTF(IQ, "[tid:%i]: Committing instructions older than [sn:%i]\n", 930 tid,inst); 931 932 ListIt iq_it = instList[tid].begin(); 933 934 while (iq_it != instList[tid].end() && 935 (*iq_it)->seqNum <= inst) { 936 ++iq_it; 937 instList[tid].pop_front(); 938 } 939 940 assert(freeEntries == (numEntries - countInsts())); 941} 942 943template <class Impl> 944int 945InstructionQueue<Impl>::wakeDependents(DynInstPtr &completed_inst) 946{ 947 int dependents = 0; 948 949 // The instruction queue here takes care of both floating and int ops 950 if (completed_inst->isFloating()) { 951 fpInstQueueWakeupQccesses++; 952 } else { 953 intInstQueueWakeupAccesses++; 954 } 955 956 DPRINTF(IQ, "Waking dependents of completed instruction.\n"); 957 958 assert(!completed_inst->isSquashed()); 959 960 // Tell the memory dependence unit to wake any dependents on this 961 // instruction if it is a memory instruction. Also complete the memory 962 // instruction at this point since we know it executed without issues. 963 // @todo: Might want to rename "completeMemInst" to something that 964 // indicates that it won't need to be replayed, and call this 965 // earlier. Might not be a big deal. 966 if (completed_inst->isMemRef()) { 967 memDepUnit[completed_inst->threadNumber].wakeDependents(completed_inst); 968 completeMemInst(completed_inst); 969 } else if (completed_inst->isMemBarrier() || 970 completed_inst->isWriteBarrier()) { 971 memDepUnit[completed_inst->threadNumber].completeBarrier(completed_inst); 972 } 973 974 for (int dest_reg_idx = 0; 975 dest_reg_idx < completed_inst->numDestRegs(); 976 dest_reg_idx++) 977 { 978 PhysRegIndex dest_reg = 979 completed_inst->renamedDestRegIdx(dest_reg_idx); 980 981 // Special case of uniq or control registers. They are not 982 // handled by the IQ and thus have no dependency graph entry. 983 // @todo Figure out a cleaner way to handle this. 984 if (dest_reg >= numPhysRegs) { 985 DPRINTF(IQ, "dest_reg :%d, numPhysRegs: %d\n", dest_reg, 986 numPhysRegs); 987 continue; 988 } 989 990 DPRINTF(IQ, "Waking any dependents on register %i.\n", 991 (int) dest_reg); 992 993 //Go through the dependency chain, marking the registers as 994 //ready within the waiting instructions. 995 DynInstPtr dep_inst = dependGraph.pop(dest_reg); 996 997 while (dep_inst) { 998 DPRINTF(IQ, "Waking up a dependent instruction, [sn:%lli] " 999 "PC %s.\n", dep_inst->seqNum, dep_inst->pcState()); 1000 1001 // Might want to give more information to the instruction 1002 // so that it knows which of its source registers is 1003 // ready. However that would mean that the dependency 1004 // graph entries would need to hold the src_reg_idx. 1005 dep_inst->markSrcRegReady(); 1006 1007 addIfReady(dep_inst); 1008 1009 dep_inst = dependGraph.pop(dest_reg); 1010 1011 ++dependents; 1012 } 1013 1014 // Reset the head node now that all of its dependents have 1015 // been woken up. 1016 assert(dependGraph.empty(dest_reg)); 1017 dependGraph.clearInst(dest_reg); 1018 1019 // Mark the scoreboard as having that register ready. 1020 regScoreboard[dest_reg] = true; 1021 } 1022 return dependents; 1023} 1024 1025template <class Impl> 1026void 1027InstructionQueue<Impl>::addReadyMemInst(DynInstPtr &ready_inst) 1028{ 1029 OpClass op_class = ready_inst->opClass(); 1030 1031 readyInsts[op_class].push(ready_inst); 1032 1033 // Will need to reorder the list if either a queue is not on the list, 1034 // or it has an older instruction than last time. 1035 if (!queueOnList[op_class]) { 1036 addToOrderList(op_class); 1037 } else if (readyInsts[op_class].top()->seqNum < 1038 (*readyIt[op_class]).oldestInst) { 1039 listOrder.erase(readyIt[op_class]); 1040 addToOrderList(op_class); 1041 } 1042 1043 DPRINTF(IQ, "Instruction is ready to issue, putting it onto " 1044 "the ready list, PC %s opclass:%i [sn:%lli].\n", 1045 ready_inst->pcState(), op_class, ready_inst->seqNum); 1046} 1047 1048template <class Impl> 1049void 1050InstructionQueue<Impl>::rescheduleMemInst(DynInstPtr &resched_inst) 1051{ 1052 DPRINTF(IQ, "Rescheduling mem inst [sn:%lli]\n", resched_inst->seqNum); 1053 1054 // Reset DTB translation state 1055 resched_inst->translationStarted = false; 1056 resched_inst->translationCompleted = false; 1057 1058 resched_inst->clearCanIssue(); 1059 memDepUnit[resched_inst->threadNumber].reschedule(resched_inst); 1060} 1061 1062template <class Impl> 1063void 1064InstructionQueue<Impl>::replayMemInst(DynInstPtr &replay_inst) 1065{ 1066 memDepUnit[replay_inst->threadNumber].replay(replay_inst); 1067} 1068 1069template <class Impl> 1070void 1071InstructionQueue<Impl>::completeMemInst(DynInstPtr &completed_inst) 1072{ 1073 ThreadID tid = completed_inst->threadNumber; 1074 1075 DPRINTF(IQ, "Completing mem instruction PC: %s [sn:%lli]\n", 1076 completed_inst->pcState(), completed_inst->seqNum); 1077 1078 ++freeEntries; 1079 1080 completed_inst->memOpDone = true; 1081 1082 memDepUnit[tid].completed(completed_inst); 1083 count[tid]--; 1084} 1085 1086template <class Impl> 1087void 1088InstructionQueue<Impl>::deferMemInst(DynInstPtr &deferred_inst) 1089{ 1090 deferredMemInsts.push_back(deferred_inst); 1091} 1092 1093template <class Impl> 1094typename Impl::DynInstPtr 1095InstructionQueue<Impl>::getDeferredMemInstToExecute() 1096{ 1097 for (ListIt it = deferredMemInsts.begin(); it != deferredMemInsts.end(); 1098 ++it) { 1099 if ((*it)->translationCompleted || (*it)->isSquashed()) { 1100 DynInstPtr ret = *it; 1101 deferredMemInsts.erase(it); 1102 return ret; 1103 } 1104 } 1105 return NULL; 1106} 1107 1108template <class Impl> 1109void 1110InstructionQueue<Impl>::violation(DynInstPtr &store, 1111 DynInstPtr &faulting_load) 1112{ 1113 intInstQueueWrites++; 1114 memDepUnit[store->threadNumber].violation(store, faulting_load); 1115} 1116 1117template <class Impl> 1118void 1119InstructionQueue<Impl>::squash(ThreadID tid) 1120{ 1121 DPRINTF(IQ, "[tid:%i]: Starting to squash instructions in " 1122 "the IQ.\n", tid); 1123 1124 // Read instruction sequence number of last instruction out of the 1125 // time buffer. 1126 squashedSeqNum[tid] = fromCommit->commitInfo[tid].doneSeqNum; 1127 1128 // Call doSquash if there are insts in the IQ 1129 if (count[tid] > 0) { 1130 doSquash(tid); 1131 } 1132 1133 // Also tell the memory dependence unit to squash. 1134 memDepUnit[tid].squash(squashedSeqNum[tid], tid); 1135} 1136 1137template <class Impl> 1138void 1139InstructionQueue<Impl>::doSquash(ThreadID tid) 1140{ 1141 // Start at the tail. 1142 ListIt squash_it = instList[tid].end(); 1143 --squash_it; 1144 1145 DPRINTF(IQ, "[tid:%i]: Squashing until sequence number %i!\n", 1146 tid, squashedSeqNum[tid]); 1147 1148 // Squash any instructions younger than the squashed sequence number 1149 // given. 1150 while (squash_it != instList[tid].end() && 1151 (*squash_it)->seqNum > squashedSeqNum[tid]) { 1152 1153 DynInstPtr squashed_inst = (*squash_it); 1154 squashed_inst->isFloating() ? fpInstQueueWrites++ : intInstQueueWrites++; 1155 1156 // Only handle the instruction if it actually is in the IQ and 1157 // hasn't already been squashed in the IQ. 1158 if (squashed_inst->threadNumber != tid || 1159 squashed_inst->isSquashedInIQ()) { 1160 --squash_it; 1161 continue; 1162 } 1163 1164 if (!squashed_inst->isIssued() || 1165 (squashed_inst->isMemRef() && 1166 !squashed_inst->memOpDone)) { 1167 1168 DPRINTF(IQ, "[tid:%i]: Instruction [sn:%lli] PC %s squashed.\n", 1169 tid, squashed_inst->seqNum, squashed_inst->pcState()); 1170 1171 // Remove the instruction from the dependency list. 1172 if (!squashed_inst->isNonSpeculative() && 1173 !squashed_inst->isStoreConditional() && 1174 !squashed_inst->isMemBarrier() && 1175 !squashed_inst->isWriteBarrier()) { 1176 1177 for (int src_reg_idx = 0; 1178 src_reg_idx < squashed_inst->numSrcRegs(); 1179 src_reg_idx++) 1180 { 1181 PhysRegIndex src_reg = 1182 squashed_inst->renamedSrcRegIdx(src_reg_idx); 1183 1184 // Only remove it from the dependency graph if it 1185 // was placed there in the first place. 1186 1187 // Instead of doing a linked list traversal, we 1188 // can just remove these squashed instructions 1189 // either at issue time, or when the register is 1190 // overwritten. The only downside to this is it 1191 // leaves more room for error. 1192 1193 if (!squashed_inst->isReadySrcRegIdx(src_reg_idx) && 1194 src_reg < numPhysRegs) { 1195 dependGraph.remove(src_reg, squashed_inst); 1196 } 1197 1198 1199 ++iqSquashedOperandsExamined; 1200 } 1201 } else if (!squashed_inst->isStoreConditional() || 1202 !squashed_inst->isCompleted()) { 1203 NonSpecMapIt ns_inst_it = 1204 nonSpecInsts.find(squashed_inst->seqNum); 1205 1206 if (ns_inst_it == nonSpecInsts.end()) { 1207 assert(squashed_inst->getFault() != NoFault); 1208 } else { 1209 1210 (*ns_inst_it).second = NULL; 1211 1212 nonSpecInsts.erase(ns_inst_it); 1213 1214 ++iqSquashedNonSpecRemoved; 1215 } 1216 } 1217 1218 // Might want to also clear out the head of the dependency graph. 1219 1220 // Mark it as squashed within the IQ. 1221 squashed_inst->setSquashedInIQ(); 1222 1223 // @todo: Remove this hack where several statuses are set so the 1224 // inst will flow through the rest of the pipeline. 1225 squashed_inst->setIssued(); 1226 squashed_inst->setCanCommit(); 1227 squashed_inst->clearInIQ(); 1228 1229 //Update Thread IQ Count 1230 count[squashed_inst->threadNumber]--; 1231 1232 ++freeEntries; 1233 } 1234 1235 instList[tid].erase(squash_it--); 1236 ++iqSquashedInstsExamined; 1237 } 1238} 1239 1240template <class Impl> 1241bool 1242InstructionQueue<Impl>::addToDependents(DynInstPtr &new_inst) 1243{ 1244 // Loop through the instruction's source registers, adding 1245 // them to the dependency list if they are not ready. 1246 int8_t total_src_regs = new_inst->numSrcRegs(); 1247 bool return_val = false; 1248 1249 for (int src_reg_idx = 0; 1250 src_reg_idx < total_src_regs; 1251 src_reg_idx++) 1252 { 1253 // Only add it to the dependency graph if it's not ready. 1254 if (!new_inst->isReadySrcRegIdx(src_reg_idx)) { 1255 PhysRegIndex src_reg = new_inst->renamedSrcRegIdx(src_reg_idx); 1256 1257 // Check the IQ's scoreboard to make sure the register 1258 // hasn't become ready while the instruction was in flight 1259 // between stages. Only if it really isn't ready should 1260 // it be added to the dependency graph. 1261 if (src_reg >= numPhysRegs) { 1262 continue; 1263 } else if (regScoreboard[src_reg] == false) { 1264 DPRINTF(IQ, "Instruction PC %s has src reg %i that " 1265 "is being added to the dependency chain.\n", 1266 new_inst->pcState(), src_reg); 1267 1268 dependGraph.insert(src_reg, new_inst); 1269 1270 // Change the return value to indicate that something 1271 // was added to the dependency graph. 1272 return_val = true; 1273 } else { 1274 DPRINTF(IQ, "Instruction PC %s has src reg %i that " 1275 "became ready before it reached the IQ.\n", 1276 new_inst->pcState(), src_reg); 1277 // Mark a register ready within the instruction. 1278 new_inst->markSrcRegReady(src_reg_idx); 1279 } 1280 } 1281 } 1282 1283 return return_val; 1284} 1285 1286template <class Impl> 1287void 1288InstructionQueue<Impl>::addToProducers(DynInstPtr &new_inst) 1289{ 1290 // Nothing really needs to be marked when an instruction becomes 1291 // the producer of a register's value, but for convenience a ptr 1292 // to the producing instruction will be placed in the head node of 1293 // the dependency links. 1294 int8_t total_dest_regs = new_inst->numDestRegs(); 1295 1296 for (int dest_reg_idx = 0; 1297 dest_reg_idx < total_dest_regs; 1298 dest_reg_idx++) 1299 { 1300 PhysRegIndex dest_reg = new_inst->renamedDestRegIdx(dest_reg_idx); 1301 1302 // Instructions that use the misc regs will have a reg number 1303 // higher than the normal physical registers. In this case these 1304 // registers are not renamed, and there is no need to track 1305 // dependencies as these instructions must be executed at commit. 1306 if (dest_reg >= numPhysRegs) { 1307 continue; 1308 } 1309 1310 if (!dependGraph.empty(dest_reg)) { 1311 dependGraph.dump(); 1312 panic("Dependency graph %i not empty!", dest_reg); 1313 } 1314 1315 dependGraph.setInst(dest_reg, new_inst); 1316 1317 // Mark the scoreboard to say it's not yet ready. 1318 regScoreboard[dest_reg] = false; 1319 } 1320} 1321 1322template <class Impl> 1323void 1324InstructionQueue<Impl>::addIfReady(DynInstPtr &inst) 1325{ 1326 // If the instruction now has all of its source registers 1327 // available, then add it to the list of ready instructions. 1328 if (inst->readyToIssue()) { 1329 1330 //Add the instruction to the proper ready list. 1331 if (inst->isMemRef()) { 1332 1333 DPRINTF(IQ, "Checking if memory instruction can issue.\n"); 1334 1335 // Message to the mem dependence unit that this instruction has 1336 // its registers ready. 1337 memDepUnit[inst->threadNumber].regsReady(inst); 1338 1339 return; 1340 } 1341 1342 OpClass op_class = inst->opClass(); 1343 1344 DPRINTF(IQ, "Instruction is ready to issue, putting it onto " 1345 "the ready list, PC %s opclass:%i [sn:%lli].\n", 1346 inst->pcState(), op_class, inst->seqNum); 1347 1348 readyInsts[op_class].push(inst); 1349 1350 // Will need to reorder the list if either a queue is not on the list, 1351 // or it has an older instruction than last time. 1352 if (!queueOnList[op_class]) { 1353 addToOrderList(op_class); 1354 } else if (readyInsts[op_class].top()->seqNum < 1355 (*readyIt[op_class]).oldestInst) { 1356 listOrder.erase(readyIt[op_class]); 1357 addToOrderList(op_class); 1358 } 1359 } 1360} 1361 1362template <class Impl> 1363int 1364InstructionQueue<Impl>::countInsts() 1365{ 1366#if 0 1367 //ksewell:This works but definitely could use a cleaner write 1368 //with a more intuitive way of counting. Right now it's 1369 //just brute force .... 1370 // Change the #if if you want to use this method. 1371 int total_insts = 0; 1372 1373 for (ThreadID tid = 0; tid < numThreads; ++tid) { 1374 ListIt count_it = instList[tid].begin(); 1375 1376 while (count_it != instList[tid].end()) { 1377 if (!(*count_it)->isSquashed() && !(*count_it)->isSquashedInIQ()) { 1378 if (!(*count_it)->isIssued()) { 1379 ++total_insts; 1380 } else if ((*count_it)->isMemRef() && 1381 !(*count_it)->memOpDone) { 1382 // Loads that have not been marked as executed still count 1383 // towards the total instructions. 1384 ++total_insts; 1385 } 1386 } 1387 1388 ++count_it; 1389 } 1390 } 1391 1392 return total_insts; 1393#else 1394 return numEntries - freeEntries; 1395#endif 1396} 1397 1398template <class Impl> 1399void 1400InstructionQueue<Impl>::dumpLists() 1401{ 1402 for (int i = 0; i < Num_OpClasses; ++i) { 1403 cprintf("Ready list %i size: %i\n", i, readyInsts[i].size()); 1404 1405 cprintf("\n"); 1406 } 1407 1408 cprintf("Non speculative list size: %i\n", nonSpecInsts.size()); 1409 1410 NonSpecMapIt non_spec_it = nonSpecInsts.begin(); 1411 NonSpecMapIt non_spec_end_it = nonSpecInsts.end(); 1412 1413 cprintf("Non speculative list: "); 1414 1415 while (non_spec_it != non_spec_end_it) { 1416 cprintf("%s [sn:%lli]", (*non_spec_it).second->pcState(), 1417 (*non_spec_it).second->seqNum); 1418 ++non_spec_it; 1419 } 1420 1421 cprintf("\n"); 1422 1423 ListOrderIt list_order_it = listOrder.begin(); 1424 ListOrderIt list_order_end_it = listOrder.end(); 1425 int i = 1; 1426 1427 cprintf("List order: "); 1428 1429 while (list_order_it != list_order_end_it) { 1430 cprintf("%i OpClass:%i [sn:%lli] ", i, (*list_order_it).queueType, 1431 (*list_order_it).oldestInst); 1432 1433 ++list_order_it; 1434 ++i; 1435 } 1436 1437 cprintf("\n"); 1438} 1439 1440 1441template <class Impl> 1442void 1443InstructionQueue<Impl>::dumpInsts() 1444{ 1445 for (ThreadID tid = 0; tid < numThreads; ++tid) { 1446 int num = 0; 1447 int valid_num = 0; 1448 ListIt inst_list_it = instList[tid].begin(); 1449 1450 while (inst_list_it != instList[tid].end()) { 1451 cprintf("Instruction:%i\n", num); 1452 if (!(*inst_list_it)->isSquashed()) { 1453 if (!(*inst_list_it)->isIssued()) { 1454 ++valid_num; 1455 cprintf("Count:%i\n", valid_num); 1456 } else if ((*inst_list_it)->isMemRef() && 1457 !(*inst_list_it)->memOpDone) { 1458 // Loads that have not been marked as executed 1459 // still count towards the total instructions. 1460 ++valid_num; 1461 cprintf("Count:%i\n", valid_num); 1462 } 1463 } 1464 1465 cprintf("PC: %s\n[sn:%lli]\n[tid:%i]\n" 1466 "Issued:%i\nSquashed:%i\n", 1467 (*inst_list_it)->pcState(), 1468 (*inst_list_it)->seqNum, 1469 (*inst_list_it)->threadNumber, 1470 (*inst_list_it)->isIssued(), 1471 (*inst_list_it)->isSquashed()); 1472 1473 if ((*inst_list_it)->isMemRef()) { 1474 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone); 1475 } 1476 1477 cprintf("\n"); 1478 1479 inst_list_it++; 1480 ++num; 1481 } 1482 } 1483 1484 cprintf("Insts to Execute list:\n"); 1485 1486 int num = 0; 1487 int valid_num = 0; 1488 ListIt inst_list_it = instsToExecute.begin(); 1489 1490 while (inst_list_it != instsToExecute.end()) 1491 { 1492 cprintf("Instruction:%i\n", 1493 num); 1494 if (!(*inst_list_it)->isSquashed()) { 1495 if (!(*inst_list_it)->isIssued()) { 1496 ++valid_num; 1497 cprintf("Count:%i\n", valid_num); 1498 } else if ((*inst_list_it)->isMemRef() && 1499 !(*inst_list_it)->memOpDone) { 1500 // Loads that have not been marked as executed 1501 // still count towards the total instructions. 1502 ++valid_num; 1503 cprintf("Count:%i\n", valid_num); 1504 } 1505 } 1506 1507 cprintf("PC: %s\n[sn:%lli]\n[tid:%i]\n" 1508 "Issued:%i\nSquashed:%i\n", 1509 (*inst_list_it)->pcState(), 1510 (*inst_list_it)->seqNum, 1511 (*inst_list_it)->threadNumber, 1512 (*inst_list_it)->isIssued(), 1513 (*inst_list_it)->isSquashed()); 1514 1515 if ((*inst_list_it)->isMemRef()) { 1516 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone); 1517 } 1518 1519 cprintf("\n"); 1520 1521 inst_list_it++; 1522 ++num; 1523 } 1524}
|