inst_queue_impl.hh revision 7599
1/* 2 * Copyright (c) 2004-2006 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Kevin Lim 29 * Korey Sewell 30 */ 31 32#include <limits> 33#include <vector> 34 35#include "cpu/o3/fu_pool.hh" 36#include "cpu/o3/inst_queue.hh" 37#include "enums/OpClass.hh" 38#include "params/DerivO3CPU.hh" 39#include "sim/core.hh" 40 41using namespace std; 42 43template <class Impl> 44InstructionQueue<Impl>::FUCompletion::FUCompletion(DynInstPtr &_inst, 45 int fu_idx, InstructionQueue<Impl> *iq_ptr) 46 : Event(Stat_Event_Pri), inst(_inst), fuIdx(fu_idx), iqPtr(iq_ptr), 47 freeFU(false) 48{ 49 this->setFlags(Event::AutoDelete); 50} 51 52template <class Impl> 53void 54InstructionQueue<Impl>::FUCompletion::process() 55{ 56 iqPtr->processFUCompletion(inst, freeFU ? fuIdx : -1); 57 inst = NULL; 58} 59 60 61template <class Impl> 62const char * 63InstructionQueue<Impl>::FUCompletion::description() const 64{ 65 return "Functional unit completion"; 66} 67 68template <class Impl> 69InstructionQueue<Impl>::InstructionQueue(O3CPU *cpu_ptr, IEW *iew_ptr, 70 DerivO3CPUParams *params) 71 : cpu(cpu_ptr), 72 iewStage(iew_ptr), 73 fuPool(params->fuPool), 74 numEntries(params->numIQEntries), 75 totalWidth(params->issueWidth), 76 numPhysIntRegs(params->numPhysIntRegs), 77 numPhysFloatRegs(params->numPhysFloatRegs), 78 commitToIEWDelay(params->commitToIEWDelay) 79{ 80 assert(fuPool); 81 82 switchedOut = false; 83 84 numThreads = params->numThreads; 85 86 // Set the number of physical registers as the number of int + float 87 numPhysRegs = numPhysIntRegs + numPhysFloatRegs; 88 89 //Create an entry for each physical register within the 90 //dependency graph. 91 dependGraph.resize(numPhysRegs); 92 93 // Resize the register scoreboard. 94 regScoreboard.resize(numPhysRegs); 95 96 //Initialize Mem Dependence Units 97 for (ThreadID tid = 0; tid < numThreads; tid++) { 98 memDepUnit[tid].init(params, tid); 99 memDepUnit[tid].setIQ(this); 100 } 101 102 resetState(); 103 104 std::string policy = params->smtIQPolicy; 105 106 //Convert string to lowercase 107 std::transform(policy.begin(), policy.end(), policy.begin(), 108 (int(*)(int)) tolower); 109 110 //Figure out resource sharing policy 111 if (policy == "dynamic") { 112 iqPolicy = Dynamic; 113 114 //Set Max Entries to Total ROB Capacity 115 for (ThreadID tid = 0; tid < numThreads; tid++) { 116 maxEntries[tid] = numEntries; 117 } 118 119 } else if (policy == "partitioned") { 120 iqPolicy = Partitioned; 121 122 //@todo:make work if part_amt doesnt divide evenly. 123 int part_amt = numEntries / numThreads; 124 125 //Divide ROB up evenly 126 for (ThreadID tid = 0; tid < numThreads; tid++) { 127 maxEntries[tid] = part_amt; 128 } 129 130 DPRINTF(IQ, "IQ sharing policy set to Partitioned:" 131 "%i entries per thread.\n",part_amt); 132 } else if (policy == "threshold") { 133 iqPolicy = Threshold; 134 135 double threshold = (double)params->smtIQThreshold / 100; 136 137 int thresholdIQ = (int)((double)threshold * numEntries); 138 139 //Divide up by threshold amount 140 for (ThreadID tid = 0; tid < numThreads; tid++) { 141 maxEntries[tid] = thresholdIQ; 142 } 143 144 DPRINTF(IQ, "IQ sharing policy set to Threshold:" 145 "%i entries per thread.\n",thresholdIQ); 146 } else { 147 assert(0 && "Invalid IQ Sharing Policy.Options Are:{Dynamic," 148 "Partitioned, Threshold}"); 149 } 150} 151 152template <class Impl> 153InstructionQueue<Impl>::~InstructionQueue() 154{ 155 dependGraph.reset(); 156#ifdef DEBUG 157 cprintf("Nodes traversed: %i, removed: %i\n", 158 dependGraph.nodesTraversed, dependGraph.nodesRemoved); 159#endif 160} 161 162template <class Impl> 163std::string 164InstructionQueue<Impl>::name() const 165{ 166 return cpu->name() + ".iq"; 167} 168 169template <class Impl> 170void 171InstructionQueue<Impl>::regStats() 172{ 173 using namespace Stats; 174 iqInstsAdded 175 .name(name() + ".iqInstsAdded") 176 .desc("Number of instructions added to the IQ (excludes non-spec)") 177 .prereq(iqInstsAdded); 178 179 iqNonSpecInstsAdded 180 .name(name() + ".iqNonSpecInstsAdded") 181 .desc("Number of non-speculative instructions added to the IQ") 182 .prereq(iqNonSpecInstsAdded); 183 184 iqInstsIssued 185 .name(name() + ".iqInstsIssued") 186 .desc("Number of instructions issued") 187 .prereq(iqInstsIssued); 188 189 iqIntInstsIssued 190 .name(name() + ".iqIntInstsIssued") 191 .desc("Number of integer instructions issued") 192 .prereq(iqIntInstsIssued); 193 194 iqFloatInstsIssued 195 .name(name() + ".iqFloatInstsIssued") 196 .desc("Number of float instructions issued") 197 .prereq(iqFloatInstsIssued); 198 199 iqBranchInstsIssued 200 .name(name() + ".iqBranchInstsIssued") 201 .desc("Number of branch instructions issued") 202 .prereq(iqBranchInstsIssued); 203 204 iqMemInstsIssued 205 .name(name() + ".iqMemInstsIssued") 206 .desc("Number of memory instructions issued") 207 .prereq(iqMemInstsIssued); 208 209 iqMiscInstsIssued 210 .name(name() + ".iqMiscInstsIssued") 211 .desc("Number of miscellaneous instructions issued") 212 .prereq(iqMiscInstsIssued); 213 214 iqSquashedInstsIssued 215 .name(name() + ".iqSquashedInstsIssued") 216 .desc("Number of squashed instructions issued") 217 .prereq(iqSquashedInstsIssued); 218 219 iqSquashedInstsExamined 220 .name(name() + ".iqSquashedInstsExamined") 221 .desc("Number of squashed instructions iterated over during squash;" 222 " mainly for profiling") 223 .prereq(iqSquashedInstsExamined); 224 225 iqSquashedOperandsExamined 226 .name(name() + ".iqSquashedOperandsExamined") 227 .desc("Number of squashed operands that are examined and possibly " 228 "removed from graph") 229 .prereq(iqSquashedOperandsExamined); 230 231 iqSquashedNonSpecRemoved 232 .name(name() + ".iqSquashedNonSpecRemoved") 233 .desc("Number of squashed non-spec instructions that were removed") 234 .prereq(iqSquashedNonSpecRemoved); 235/* 236 queueResDist 237 .init(Num_OpClasses, 0, 99, 2) 238 .name(name() + ".IQ:residence:") 239 .desc("cycles from dispatch to issue") 240 .flags(total | pdf | cdf ) 241 ; 242 for (int i = 0; i < Num_OpClasses; ++i) { 243 queueResDist.subname(i, opClassStrings[i]); 244 } 245*/ 246 numIssuedDist 247 .init(0,totalWidth,1) 248 .name(name() + ".ISSUE:issued_per_cycle") 249 .desc("Number of insts issued each cycle") 250 .flags(pdf) 251 ; 252/* 253 dist_unissued 254 .init(Num_OpClasses+2) 255 .name(name() + ".ISSUE:unissued_cause") 256 .desc("Reason ready instruction not issued") 257 .flags(pdf | dist) 258 ; 259 for (int i=0; i < (Num_OpClasses + 2); ++i) { 260 dist_unissued.subname(i, unissued_names[i]); 261 } 262*/ 263 statIssuedInstType 264 .init(numThreads,Enums::Num_OpClass) 265 .name(name() + ".ISSUE:FU_type") 266 .desc("Type of FU issued") 267 .flags(total | pdf | dist) 268 ; 269 statIssuedInstType.ysubnames(Enums::OpClassStrings); 270 271 // 272 // How long did instructions for a particular FU type wait prior to issue 273 // 274/* 275 issueDelayDist 276 .init(Num_OpClasses,0,99,2) 277 .name(name() + ".ISSUE:") 278 .desc("cycles from operands ready to issue") 279 .flags(pdf | cdf) 280 ; 281 282 for (int i=0; i<Num_OpClasses; ++i) { 283 std::stringstream subname; 284 subname << opClassStrings[i] << "_delay"; 285 issueDelayDist.subname(i, subname.str()); 286 } 287*/ 288 issueRate 289 .name(name() + ".ISSUE:rate") 290 .desc("Inst issue rate") 291 .flags(total) 292 ; 293 issueRate = iqInstsIssued / cpu->numCycles; 294 295 statFuBusy 296 .init(Num_OpClasses) 297 .name(name() + ".ISSUE:fu_full") 298 .desc("attempts to use FU when none available") 299 .flags(pdf | dist) 300 ; 301 for (int i=0; i < Num_OpClasses; ++i) { 302 statFuBusy.subname(i, Enums::OpClassStrings[i]); 303 } 304 305 fuBusy 306 .init(numThreads) 307 .name(name() + ".ISSUE:fu_busy_cnt") 308 .desc("FU busy when requested") 309 .flags(total) 310 ; 311 312 fuBusyRate 313 .name(name() + ".ISSUE:fu_busy_rate") 314 .desc("FU busy rate (busy events/executed inst)") 315 .flags(total) 316 ; 317 fuBusyRate = fuBusy / iqInstsIssued; 318 319 for (ThreadID tid = 0; tid < numThreads; tid++) { 320 // Tell mem dependence unit to reg stats as well. 321 memDepUnit[tid].regStats(); 322 } 323} 324 325template <class Impl> 326void 327InstructionQueue<Impl>::resetState() 328{ 329 //Initialize thread IQ counts 330 for (ThreadID tid = 0; tid <numThreads; tid++) { 331 count[tid] = 0; 332 instList[tid].clear(); 333 } 334 335 // Initialize the number of free IQ entries. 336 freeEntries = numEntries; 337 338 // Note that in actuality, the registers corresponding to the logical 339 // registers start off as ready. However this doesn't matter for the 340 // IQ as the instruction should have been correctly told if those 341 // registers are ready in rename. Thus it can all be initialized as 342 // unready. 343 for (int i = 0; i < numPhysRegs; ++i) { 344 regScoreboard[i] = false; 345 } 346 347 for (ThreadID tid = 0; tid < numThreads; ++tid) { 348 squashedSeqNum[tid] = 0; 349 } 350 351 for (int i = 0; i < Num_OpClasses; ++i) { 352 while (!readyInsts[i].empty()) 353 readyInsts[i].pop(); 354 queueOnList[i] = false; 355 readyIt[i] = listOrder.end(); 356 } 357 nonSpecInsts.clear(); 358 listOrder.clear(); 359} 360 361template <class Impl> 362void 363InstructionQueue<Impl>::setActiveThreads(list<ThreadID> *at_ptr) 364{ 365 activeThreads = at_ptr; 366} 367 368template <class Impl> 369void 370InstructionQueue<Impl>::setIssueToExecuteQueue(TimeBuffer<IssueStruct> *i2e_ptr) 371{ 372 issueToExecuteQueue = i2e_ptr; 373} 374 375template <class Impl> 376void 377InstructionQueue<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr) 378{ 379 timeBuffer = tb_ptr; 380 381 fromCommit = timeBuffer->getWire(-commitToIEWDelay); 382} 383 384template <class Impl> 385void 386InstructionQueue<Impl>::switchOut() 387{ 388/* 389 if (!instList[0].empty() || (numEntries != freeEntries) || 390 !readyInsts[0].empty() || !nonSpecInsts.empty() || !listOrder.empty()) { 391 dumpInsts(); 392// assert(0); 393 } 394*/ 395 resetState(); 396 dependGraph.reset(); 397 instsToExecute.clear(); 398 switchedOut = true; 399 for (ThreadID tid = 0; tid < numThreads; ++tid) { 400 memDepUnit[tid].switchOut(); 401 } 402} 403 404template <class Impl> 405void 406InstructionQueue<Impl>::takeOverFrom() 407{ 408 switchedOut = false; 409} 410 411template <class Impl> 412int 413InstructionQueue<Impl>::entryAmount(ThreadID num_threads) 414{ 415 if (iqPolicy == Partitioned) { 416 return numEntries / num_threads; 417 } else { 418 return 0; 419 } 420} 421 422 423template <class Impl> 424void 425InstructionQueue<Impl>::resetEntries() 426{ 427 if (iqPolicy != Dynamic || numThreads > 1) { 428 int active_threads = activeThreads->size(); 429 430 list<ThreadID>::iterator threads = activeThreads->begin(); 431 list<ThreadID>::iterator end = activeThreads->end(); 432 433 while (threads != end) { 434 ThreadID tid = *threads++; 435 436 if (iqPolicy == Partitioned) { 437 maxEntries[tid] = numEntries / active_threads; 438 } else if(iqPolicy == Threshold && active_threads == 1) { 439 maxEntries[tid] = numEntries; 440 } 441 } 442 } 443} 444 445template <class Impl> 446unsigned 447InstructionQueue<Impl>::numFreeEntries() 448{ 449 return freeEntries; 450} 451 452template <class Impl> 453unsigned 454InstructionQueue<Impl>::numFreeEntries(ThreadID tid) 455{ 456 return maxEntries[tid] - count[tid]; 457} 458 459// Might want to do something more complex if it knows how many instructions 460// will be issued this cycle. 461template <class Impl> 462bool 463InstructionQueue<Impl>::isFull() 464{ 465 if (freeEntries == 0) { 466 return(true); 467 } else { 468 return(false); 469 } 470} 471 472template <class Impl> 473bool 474InstructionQueue<Impl>::isFull(ThreadID tid) 475{ 476 if (numFreeEntries(tid) == 0) { 477 return(true); 478 } else { 479 return(false); 480 } 481} 482 483template <class Impl> 484bool 485InstructionQueue<Impl>::hasReadyInsts() 486{ 487 if (!listOrder.empty()) { 488 return true; 489 } 490 491 for (int i = 0; i < Num_OpClasses; ++i) { 492 if (!readyInsts[i].empty()) { 493 return true; 494 } 495 } 496 497 return false; 498} 499 500template <class Impl> 501void 502InstructionQueue<Impl>::insert(DynInstPtr &new_inst) 503{ 504 // Make sure the instruction is valid 505 assert(new_inst); 506 507 DPRINTF(IQ, "Adding instruction [sn:%lli] PC %#x to the IQ.\n", 508 new_inst->seqNum, new_inst->readPC()); 509 510 assert(freeEntries != 0); 511 512 instList[new_inst->threadNumber].push_back(new_inst); 513 514 --freeEntries; 515 516 new_inst->setInIQ(); 517 518 // Look through its source registers (physical regs), and mark any 519 // dependencies. 520 addToDependents(new_inst); 521 522 // Have this instruction set itself as the producer of its destination 523 // register(s). 524 addToProducers(new_inst); 525 526 if (new_inst->isMemRef()) { 527 memDepUnit[new_inst->threadNumber].insert(new_inst); 528 } else { 529 addIfReady(new_inst); 530 } 531 532 ++iqInstsAdded; 533 534 count[new_inst->threadNumber]++; 535 536 assert(freeEntries == (numEntries - countInsts())); 537} 538 539template <class Impl> 540void 541InstructionQueue<Impl>::insertNonSpec(DynInstPtr &new_inst) 542{ 543 // @todo: Clean up this code; can do it by setting inst as unable 544 // to issue, then calling normal insert on the inst. 545 546 assert(new_inst); 547 548 nonSpecInsts[new_inst->seqNum] = new_inst; 549 550 DPRINTF(IQ, "Adding non-speculative instruction [sn:%lli] PC %#x " 551 "to the IQ.\n", 552 new_inst->seqNum, new_inst->readPC()); 553 554 assert(freeEntries != 0); 555 556 instList[new_inst->threadNumber].push_back(new_inst); 557 558 --freeEntries; 559 560 new_inst->setInIQ(); 561 562 // Have this instruction set itself as the producer of its destination 563 // register(s). 564 addToProducers(new_inst); 565 566 // If it's a memory instruction, add it to the memory dependency 567 // unit. 568 if (new_inst->isMemRef()) { 569 memDepUnit[new_inst->threadNumber].insertNonSpec(new_inst); 570 } 571 572 ++iqNonSpecInstsAdded; 573 574 count[new_inst->threadNumber]++; 575 576 assert(freeEntries == (numEntries - countInsts())); 577} 578 579template <class Impl> 580void 581InstructionQueue<Impl>::insertBarrier(DynInstPtr &barr_inst) 582{ 583 memDepUnit[barr_inst->threadNumber].insertBarrier(barr_inst); 584 585 insertNonSpec(barr_inst); 586} 587 588template <class Impl> 589typename Impl::DynInstPtr 590InstructionQueue<Impl>::getInstToExecute() 591{ 592 assert(!instsToExecute.empty()); 593 DynInstPtr inst = instsToExecute.front(); 594 instsToExecute.pop_front(); 595 return inst; 596} 597 598template <class Impl> 599void 600InstructionQueue<Impl>::addToOrderList(OpClass op_class) 601{ 602 assert(!readyInsts[op_class].empty()); 603 604 ListOrderEntry queue_entry; 605 606 queue_entry.queueType = op_class; 607 608 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum; 609 610 ListOrderIt list_it = listOrder.begin(); 611 ListOrderIt list_end_it = listOrder.end(); 612 613 while (list_it != list_end_it) { 614 if ((*list_it).oldestInst > queue_entry.oldestInst) { 615 break; 616 } 617 618 list_it++; 619 } 620 621 readyIt[op_class] = listOrder.insert(list_it, queue_entry); 622 queueOnList[op_class] = true; 623} 624 625template <class Impl> 626void 627InstructionQueue<Impl>::moveToYoungerInst(ListOrderIt list_order_it) 628{ 629 // Get iterator of next item on the list 630 // Delete the original iterator 631 // Determine if the next item is either the end of the list or younger 632 // than the new instruction. If so, then add in a new iterator right here. 633 // If not, then move along. 634 ListOrderEntry queue_entry; 635 OpClass op_class = (*list_order_it).queueType; 636 ListOrderIt next_it = list_order_it; 637 638 ++next_it; 639 640 queue_entry.queueType = op_class; 641 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum; 642 643 while (next_it != listOrder.end() && 644 (*next_it).oldestInst < queue_entry.oldestInst) { 645 ++next_it; 646 } 647 648 readyIt[op_class] = listOrder.insert(next_it, queue_entry); 649} 650 651template <class Impl> 652void 653InstructionQueue<Impl>::processFUCompletion(DynInstPtr &inst, int fu_idx) 654{ 655 DPRINTF(IQ, "Processing FU completion [sn:%lli]\n", inst->seqNum); 656 // The CPU could have been sleeping until this op completed (*extremely* 657 // long latency op). Wake it if it was. This may be overkill. 658 if (isSwitchedOut()) { 659 DPRINTF(IQ, "FU completion not processed, IQ is switched out [sn:%lli]\n", 660 inst->seqNum); 661 return; 662 } 663 664 iewStage->wakeCPU(); 665 666 if (fu_idx > -1) 667 fuPool->freeUnitNextCycle(fu_idx); 668 669 // @todo: Ensure that these FU Completions happen at the beginning 670 // of a cycle, otherwise they could add too many instructions to 671 // the queue. 672 issueToExecuteQueue->access(-1)->size++; 673 instsToExecute.push_back(inst); 674} 675 676// @todo: Figure out a better way to remove the squashed items from the 677// lists. Checking the top item of each list to see if it's squashed 678// wastes time and forces jumps. 679template <class Impl> 680void 681InstructionQueue<Impl>::scheduleReadyInsts() 682{ 683 DPRINTF(IQ, "Attempting to schedule ready instructions from " 684 "the IQ.\n"); 685 686 IssueStruct *i2e_info = issueToExecuteQueue->access(0); 687 688 // Have iterator to head of the list 689 // While I haven't exceeded bandwidth or reached the end of the list, 690 // Try to get a FU that can do what this op needs. 691 // If successful, change the oldestInst to the new top of the list, put 692 // the queue in the proper place in the list. 693 // Increment the iterator. 694 // This will avoid trying to schedule a certain op class if there are no 695 // FUs that handle it. 696 ListOrderIt order_it = listOrder.begin(); 697 ListOrderIt order_end_it = listOrder.end(); 698 int total_issued = 0; 699 700 while (total_issued < totalWidth && 701 iewStage->canIssue() && 702 order_it != order_end_it) { 703 OpClass op_class = (*order_it).queueType; 704 705 assert(!readyInsts[op_class].empty()); 706 707 DynInstPtr issuing_inst = readyInsts[op_class].top(); 708 709 assert(issuing_inst->seqNum == (*order_it).oldestInst); 710 711 if (issuing_inst->isSquashed()) { 712 readyInsts[op_class].pop(); 713 714 if (!readyInsts[op_class].empty()) { 715 moveToYoungerInst(order_it); 716 } else { 717 readyIt[op_class] = listOrder.end(); 718 queueOnList[op_class] = false; 719 } 720 721 listOrder.erase(order_it++); 722 723 ++iqSquashedInstsIssued; 724 725 continue; 726 } 727 728 int idx = -2; 729 int op_latency = 1; 730 ThreadID tid = issuing_inst->threadNumber; 731 732 if (op_class != No_OpClass) { 733 idx = fuPool->getUnit(op_class); 734 735 if (idx > -1) { 736 op_latency = fuPool->getOpLatency(op_class); 737 } 738 } 739 740 // If we have an instruction that doesn't require a FU, or a 741 // valid FU, then schedule for execution. 742 if (idx == -2 || idx != -1) { 743 if (op_latency == 1) { 744 i2e_info->size++; 745 instsToExecute.push_back(issuing_inst); 746 747 // Add the FU onto the list of FU's to be freed next 748 // cycle if we used one. 749 if (idx >= 0) 750 fuPool->freeUnitNextCycle(idx); 751 } else { 752 int issue_latency = fuPool->getIssueLatency(op_class); 753 // Generate completion event for the FU 754 FUCompletion *execution = new FUCompletion(issuing_inst, 755 idx, this); 756 757 cpu->schedule(execution, curTick + cpu->ticks(op_latency - 1)); 758 759 // @todo: Enforce that issue_latency == 1 or op_latency 760 if (issue_latency > 1) { 761 // If FU isn't pipelined, then it must be freed 762 // upon the execution completing. 763 execution->setFreeFU(); 764 } else { 765 // Add the FU onto the list of FU's to be freed next cycle. 766 fuPool->freeUnitNextCycle(idx); 767 } 768 } 769 770 DPRINTF(IQ, "Thread %i: Issuing instruction PC %#x " 771 "[sn:%lli]\n", 772 tid, issuing_inst->readPC(), 773 issuing_inst->seqNum); 774 775 readyInsts[op_class].pop(); 776 777 if (!readyInsts[op_class].empty()) { 778 moveToYoungerInst(order_it); 779 } else { 780 readyIt[op_class] = listOrder.end(); 781 queueOnList[op_class] = false; 782 } 783 784 issuing_inst->setIssued(); 785 ++total_issued; 786 787 if (!issuing_inst->isMemRef()) { 788 // Memory instructions can not be freed from the IQ until they 789 // complete. 790 ++freeEntries; 791 count[tid]--; 792 issuing_inst->clearInIQ(); 793 } else { 794 memDepUnit[tid].issue(issuing_inst); 795 } 796 797 listOrder.erase(order_it++); 798 statIssuedInstType[tid][op_class]++; 799 iewStage->incrWb(issuing_inst->seqNum); 800 } else { 801 statFuBusy[op_class]++; 802 fuBusy[tid]++; 803 ++order_it; 804 } 805 } 806 807 numIssuedDist.sample(total_issued); 808 iqInstsIssued+= total_issued; 809 810 // If we issued any instructions, tell the CPU we had activity. 811 if (total_issued) { 812 cpu->activityThisCycle(); 813 } else { 814 DPRINTF(IQ, "Not able to schedule any instructions.\n"); 815 } 816} 817 818template <class Impl> 819void 820InstructionQueue<Impl>::scheduleNonSpec(const InstSeqNum &inst) 821{ 822 DPRINTF(IQ, "Marking nonspeculative instruction [sn:%lli] as ready " 823 "to execute.\n", inst); 824 825 NonSpecMapIt inst_it = nonSpecInsts.find(inst); 826 827 assert(inst_it != nonSpecInsts.end()); 828 829 ThreadID tid = (*inst_it).second->threadNumber; 830 831 (*inst_it).second->setAtCommit(); 832 833 (*inst_it).second->setCanIssue(); 834 835 if (!(*inst_it).second->isMemRef()) { 836 addIfReady((*inst_it).second); 837 } else { 838 memDepUnit[tid].nonSpecInstReady((*inst_it).second); 839 } 840 841 (*inst_it).second = NULL; 842 843 nonSpecInsts.erase(inst_it); 844} 845 846template <class Impl> 847void 848InstructionQueue<Impl>::commit(const InstSeqNum &inst, ThreadID tid) 849{ 850 DPRINTF(IQ, "[tid:%i]: Committing instructions older than [sn:%i]\n", 851 tid,inst); 852 853 ListIt iq_it = instList[tid].begin(); 854 855 while (iq_it != instList[tid].end() && 856 (*iq_it)->seqNum <= inst) { 857 ++iq_it; 858 instList[tid].pop_front(); 859 } 860 861 assert(freeEntries == (numEntries - countInsts())); 862} 863 864template <class Impl> 865int 866InstructionQueue<Impl>::wakeDependents(DynInstPtr &completed_inst) 867{ 868 int dependents = 0; 869 870 DPRINTF(IQ, "Waking dependents of completed instruction.\n"); 871 872 assert(!completed_inst->isSquashed()); 873 874 // Tell the memory dependence unit to wake any dependents on this 875 // instruction if it is a memory instruction. Also complete the memory 876 // instruction at this point since we know it executed without issues. 877 // @todo: Might want to rename "completeMemInst" to something that 878 // indicates that it won't need to be replayed, and call this 879 // earlier. Might not be a big deal. 880 if (completed_inst->isMemRef()) { 881 memDepUnit[completed_inst->threadNumber].wakeDependents(completed_inst); 882 completeMemInst(completed_inst); 883 } else if (completed_inst->isMemBarrier() || 884 completed_inst->isWriteBarrier()) { 885 memDepUnit[completed_inst->threadNumber].completeBarrier(completed_inst); 886 } 887 888 for (int dest_reg_idx = 0; 889 dest_reg_idx < completed_inst->numDestRegs(); 890 dest_reg_idx++) 891 { 892 PhysRegIndex dest_reg = 893 completed_inst->renamedDestRegIdx(dest_reg_idx); 894 895 // Special case of uniq or control registers. They are not 896 // handled by the IQ and thus have no dependency graph entry. 897 // @todo Figure out a cleaner way to handle this. 898 if (dest_reg >= numPhysRegs) { 899 DPRINTF(IQ, "dest_reg :%d, numPhysRegs: %d\n", dest_reg, 900 numPhysRegs); 901 continue; 902 } 903 904 DPRINTF(IQ, "Waking any dependents on register %i.\n", 905 (int) dest_reg); 906 907 //Go through the dependency chain, marking the registers as 908 //ready within the waiting instructions. 909 DynInstPtr dep_inst = dependGraph.pop(dest_reg); 910 911 while (dep_inst) { 912 DPRINTF(IQ, "Waking up a dependent instruction, [sn:%lli] " 913 "PC%#x.\n", dep_inst->seqNum, dep_inst->readPC()); 914 915 // Might want to give more information to the instruction 916 // so that it knows which of its source registers is 917 // ready. However that would mean that the dependency 918 // graph entries would need to hold the src_reg_idx. 919 dep_inst->markSrcRegReady(); 920 921 addIfReady(dep_inst); 922 923 dep_inst = dependGraph.pop(dest_reg); 924 925 ++dependents; 926 } 927 928 // Reset the head node now that all of its dependents have 929 // been woken up. 930 assert(dependGraph.empty(dest_reg)); 931 dependGraph.clearInst(dest_reg); 932 933 // Mark the scoreboard as having that register ready. 934 regScoreboard[dest_reg] = true; 935 } 936 return dependents; 937} 938 939template <class Impl> 940void 941InstructionQueue<Impl>::addReadyMemInst(DynInstPtr &ready_inst) 942{ 943 OpClass op_class = ready_inst->opClass(); 944 945 readyInsts[op_class].push(ready_inst); 946 947 // Will need to reorder the list if either a queue is not on the list, 948 // or it has an older instruction than last time. 949 if (!queueOnList[op_class]) { 950 addToOrderList(op_class); 951 } else if (readyInsts[op_class].top()->seqNum < 952 (*readyIt[op_class]).oldestInst) { 953 listOrder.erase(readyIt[op_class]); 954 addToOrderList(op_class); 955 } 956 957 DPRINTF(IQ, "Instruction is ready to issue, putting it onto " 958 "the ready list, PC %#x opclass:%i [sn:%lli].\n", 959 ready_inst->readPC(), op_class, ready_inst->seqNum); 960} 961 962template <class Impl> 963void 964InstructionQueue<Impl>::rescheduleMemInst(DynInstPtr &resched_inst) 965{ 966 DPRINTF(IQ, "Rescheduling mem inst [sn:%lli]\n", resched_inst->seqNum); 967 resched_inst->clearCanIssue(); 968 memDepUnit[resched_inst->threadNumber].reschedule(resched_inst); 969} 970 971template <class Impl> 972void 973InstructionQueue<Impl>::replayMemInst(DynInstPtr &replay_inst) 974{ 975 memDepUnit[replay_inst->threadNumber].replay(replay_inst); 976} 977 978template <class Impl> 979void 980InstructionQueue<Impl>::completeMemInst(DynInstPtr &completed_inst) 981{ 982 ThreadID tid = completed_inst->threadNumber; 983 984 DPRINTF(IQ, "Completing mem instruction PC:%#x [sn:%lli]\n", 985 completed_inst->readPC(), completed_inst->seqNum); 986 987 ++freeEntries; 988 989 completed_inst->memOpDone = true; 990 991 memDepUnit[tid].completed(completed_inst); 992 count[tid]--; 993} 994 995template <class Impl> 996void 997InstructionQueue<Impl>::violation(DynInstPtr &store, 998 DynInstPtr &faulting_load) 999{ 1000 memDepUnit[store->threadNumber].violation(store, faulting_load); 1001} 1002 1003template <class Impl> 1004void 1005InstructionQueue<Impl>::squash(ThreadID tid) 1006{ 1007 DPRINTF(IQ, "[tid:%i]: Starting to squash instructions in " 1008 "the IQ.\n", tid); 1009 1010 // Read instruction sequence number of last instruction out of the 1011 // time buffer. 1012 squashedSeqNum[tid] = fromCommit->commitInfo[tid].doneSeqNum; 1013 1014 // Call doSquash if there are insts in the IQ 1015 if (count[tid] > 0) { 1016 doSquash(tid); 1017 } 1018 1019 // Also tell the memory dependence unit to squash. 1020 memDepUnit[tid].squash(squashedSeqNum[tid], tid); 1021} 1022 1023template <class Impl> 1024void 1025InstructionQueue<Impl>::doSquash(ThreadID tid) 1026{ 1027 // Start at the tail. 1028 ListIt squash_it = instList[tid].end(); 1029 --squash_it; 1030 1031 DPRINTF(IQ, "[tid:%i]: Squashing until sequence number %i!\n", 1032 tid, squashedSeqNum[tid]); 1033 1034 // Squash any instructions younger than the squashed sequence number 1035 // given. 1036 while (squash_it != instList[tid].end() && 1037 (*squash_it)->seqNum > squashedSeqNum[tid]) { 1038 1039 DynInstPtr squashed_inst = (*squash_it); 1040 1041 // Only handle the instruction if it actually is in the IQ and 1042 // hasn't already been squashed in the IQ. 1043 if (squashed_inst->threadNumber != tid || 1044 squashed_inst->isSquashedInIQ()) { 1045 --squash_it; 1046 continue; 1047 } 1048 1049 if (!squashed_inst->isIssued() || 1050 (squashed_inst->isMemRef() && 1051 !squashed_inst->memOpDone)) { 1052 1053 DPRINTF(IQ, "[tid:%i]: Instruction [sn:%lli] PC %#x " 1054 "squashed.\n", 1055 tid, squashed_inst->seqNum, squashed_inst->readPC()); 1056 1057 // Remove the instruction from the dependency list. 1058 if (!squashed_inst->isNonSpeculative() && 1059 !squashed_inst->isStoreConditional() && 1060 !squashed_inst->isMemBarrier() && 1061 !squashed_inst->isWriteBarrier()) { 1062 1063 for (int src_reg_idx = 0; 1064 src_reg_idx < squashed_inst->numSrcRegs(); 1065 src_reg_idx++) 1066 { 1067 PhysRegIndex src_reg = 1068 squashed_inst->renamedSrcRegIdx(src_reg_idx); 1069 1070 // Only remove it from the dependency graph if it 1071 // was placed there in the first place. 1072 1073 // Instead of doing a linked list traversal, we 1074 // can just remove these squashed instructions 1075 // either at issue time, or when the register is 1076 // overwritten. The only downside to this is it 1077 // leaves more room for error. 1078 1079 if (!squashed_inst->isReadySrcRegIdx(src_reg_idx) && 1080 src_reg < numPhysRegs) { 1081 dependGraph.remove(src_reg, squashed_inst); 1082 } 1083 1084 1085 ++iqSquashedOperandsExamined; 1086 } 1087 } else if (!squashed_inst->isStoreConditional() || 1088 !squashed_inst->isCompleted()) { 1089 NonSpecMapIt ns_inst_it = 1090 nonSpecInsts.find(squashed_inst->seqNum); 1091 assert(ns_inst_it != nonSpecInsts.end()); 1092 if (ns_inst_it == nonSpecInsts.end()) { 1093 assert(squashed_inst->getFault() != NoFault); 1094 } else { 1095 1096 (*ns_inst_it).second = NULL; 1097 1098 nonSpecInsts.erase(ns_inst_it); 1099 1100 ++iqSquashedNonSpecRemoved; 1101 } 1102 } 1103 1104 // Might want to also clear out the head of the dependency graph. 1105 1106 // Mark it as squashed within the IQ. 1107 squashed_inst->setSquashedInIQ(); 1108 1109 // @todo: Remove this hack where several statuses are set so the 1110 // inst will flow through the rest of the pipeline. 1111 squashed_inst->setIssued(); 1112 squashed_inst->setCanCommit(); 1113 squashed_inst->clearInIQ(); 1114 1115 //Update Thread IQ Count 1116 count[squashed_inst->threadNumber]--; 1117 1118 ++freeEntries; 1119 } 1120 1121 instList[tid].erase(squash_it--); 1122 ++iqSquashedInstsExamined; 1123 } 1124} 1125 1126template <class Impl> 1127bool 1128InstructionQueue<Impl>::addToDependents(DynInstPtr &new_inst) 1129{ 1130 // Loop through the instruction's source registers, adding 1131 // them to the dependency list if they are not ready. 1132 int8_t total_src_regs = new_inst->numSrcRegs(); 1133 bool return_val = false; 1134 1135 for (int src_reg_idx = 0; 1136 src_reg_idx < total_src_regs; 1137 src_reg_idx++) 1138 { 1139 // Only add it to the dependency graph if it's not ready. 1140 if (!new_inst->isReadySrcRegIdx(src_reg_idx)) { 1141 PhysRegIndex src_reg = new_inst->renamedSrcRegIdx(src_reg_idx); 1142 1143 // Check the IQ's scoreboard to make sure the register 1144 // hasn't become ready while the instruction was in flight 1145 // between stages. Only if it really isn't ready should 1146 // it be added to the dependency graph. 1147 if (src_reg >= numPhysRegs) { 1148 continue; 1149 } else if (regScoreboard[src_reg] == false) { 1150 DPRINTF(IQ, "Instruction PC %#x has src reg %i that " 1151 "is being added to the dependency chain.\n", 1152 new_inst->readPC(), src_reg); 1153 1154 dependGraph.insert(src_reg, new_inst); 1155 1156 // Change the return value to indicate that something 1157 // was added to the dependency graph. 1158 return_val = true; 1159 } else { 1160 DPRINTF(IQ, "Instruction PC %#x has src reg %i that " 1161 "became ready before it reached the IQ.\n", 1162 new_inst->readPC(), src_reg); 1163 // Mark a register ready within the instruction. 1164 new_inst->markSrcRegReady(src_reg_idx); 1165 } 1166 } 1167 } 1168 1169 return return_val; 1170} 1171 1172template <class Impl> 1173void 1174InstructionQueue<Impl>::addToProducers(DynInstPtr &new_inst) 1175{ 1176 // Nothing really needs to be marked when an instruction becomes 1177 // the producer of a register's value, but for convenience a ptr 1178 // to the producing instruction will be placed in the head node of 1179 // the dependency links. 1180 int8_t total_dest_regs = new_inst->numDestRegs(); 1181 1182 for (int dest_reg_idx = 0; 1183 dest_reg_idx < total_dest_regs; 1184 dest_reg_idx++) 1185 { 1186 PhysRegIndex dest_reg = new_inst->renamedDestRegIdx(dest_reg_idx); 1187 1188 // Instructions that use the misc regs will have a reg number 1189 // higher than the normal physical registers. In this case these 1190 // registers are not renamed, and there is no need to track 1191 // dependencies as these instructions must be executed at commit. 1192 if (dest_reg >= numPhysRegs) { 1193 continue; 1194 } 1195 1196 if (!dependGraph.empty(dest_reg)) { 1197 dependGraph.dump(); 1198 panic("Dependency graph %i not empty!", dest_reg); 1199 } 1200 1201 dependGraph.setInst(dest_reg, new_inst); 1202 1203 // Mark the scoreboard to say it's not yet ready. 1204 regScoreboard[dest_reg] = false; 1205 } 1206} 1207 1208template <class Impl> 1209void 1210InstructionQueue<Impl>::addIfReady(DynInstPtr &inst) 1211{ 1212 // If the instruction now has all of its source registers 1213 // available, then add it to the list of ready instructions. 1214 if (inst->readyToIssue()) { 1215 1216 //Add the instruction to the proper ready list. 1217 if (inst->isMemRef()) { 1218 1219 DPRINTF(IQ, "Checking if memory instruction can issue.\n"); 1220 1221 // Message to the mem dependence unit that this instruction has 1222 // its registers ready. 1223 memDepUnit[inst->threadNumber].regsReady(inst); 1224 1225 return; 1226 } 1227 1228 OpClass op_class = inst->opClass(); 1229 1230 DPRINTF(IQ, "Instruction is ready to issue, putting it onto " 1231 "the ready list, PC %#x opclass:%i [sn:%lli].\n", 1232 inst->readPC(), op_class, inst->seqNum); 1233 1234 readyInsts[op_class].push(inst); 1235 1236 // Will need to reorder the list if either a queue is not on the list, 1237 // or it has an older instruction than last time. 1238 if (!queueOnList[op_class]) { 1239 addToOrderList(op_class); 1240 } else if (readyInsts[op_class].top()->seqNum < 1241 (*readyIt[op_class]).oldestInst) { 1242 listOrder.erase(readyIt[op_class]); 1243 addToOrderList(op_class); 1244 } 1245 } 1246} 1247 1248template <class Impl> 1249int 1250InstructionQueue<Impl>::countInsts() 1251{ 1252#if 0 1253 //ksewell:This works but definitely could use a cleaner write 1254 //with a more intuitive way of counting. Right now it's 1255 //just brute force .... 1256 // Change the #if if you want to use this method. 1257 int total_insts = 0; 1258 1259 for (ThreadID tid = 0; tid < numThreads; ++tid) { 1260 ListIt count_it = instList[tid].begin(); 1261 1262 while (count_it != instList[tid].end()) { 1263 if (!(*count_it)->isSquashed() && !(*count_it)->isSquashedInIQ()) { 1264 if (!(*count_it)->isIssued()) { 1265 ++total_insts; 1266 } else if ((*count_it)->isMemRef() && 1267 !(*count_it)->memOpDone) { 1268 // Loads that have not been marked as executed still count 1269 // towards the total instructions. 1270 ++total_insts; 1271 } 1272 } 1273 1274 ++count_it; 1275 } 1276 } 1277 1278 return total_insts; 1279#else 1280 return numEntries - freeEntries; 1281#endif 1282} 1283 1284template <class Impl> 1285void 1286InstructionQueue<Impl>::dumpLists() 1287{ 1288 for (int i = 0; i < Num_OpClasses; ++i) { 1289 cprintf("Ready list %i size: %i\n", i, readyInsts[i].size()); 1290 1291 cprintf("\n"); 1292 } 1293 1294 cprintf("Non speculative list size: %i\n", nonSpecInsts.size()); 1295 1296 NonSpecMapIt non_spec_it = nonSpecInsts.begin(); 1297 NonSpecMapIt non_spec_end_it = nonSpecInsts.end(); 1298 1299 cprintf("Non speculative list: "); 1300 1301 while (non_spec_it != non_spec_end_it) { 1302 cprintf("%#x [sn:%lli]", (*non_spec_it).second->readPC(), 1303 (*non_spec_it).second->seqNum); 1304 ++non_spec_it; 1305 } 1306 1307 cprintf("\n"); 1308 1309 ListOrderIt list_order_it = listOrder.begin(); 1310 ListOrderIt list_order_end_it = listOrder.end(); 1311 int i = 1; 1312 1313 cprintf("List order: "); 1314 1315 while (list_order_it != list_order_end_it) { 1316 cprintf("%i OpClass:%i [sn:%lli] ", i, (*list_order_it).queueType, 1317 (*list_order_it).oldestInst); 1318 1319 ++list_order_it; 1320 ++i; 1321 } 1322 1323 cprintf("\n"); 1324} 1325 1326 1327template <class Impl> 1328void 1329InstructionQueue<Impl>::dumpInsts() 1330{ 1331 for (ThreadID tid = 0; tid < numThreads; ++tid) { 1332 int num = 0; 1333 int valid_num = 0; 1334 ListIt inst_list_it = instList[tid].begin(); 1335 1336 while (inst_list_it != instList[tid].end()) { 1337 cprintf("Instruction:%i\n", num); 1338 if (!(*inst_list_it)->isSquashed()) { 1339 if (!(*inst_list_it)->isIssued()) { 1340 ++valid_num; 1341 cprintf("Count:%i\n", valid_num); 1342 } else if ((*inst_list_it)->isMemRef() && 1343 !(*inst_list_it)->memOpDone) { 1344 // Loads that have not been marked as executed 1345 // still count towards the total instructions. 1346 ++valid_num; 1347 cprintf("Count:%i\n", valid_num); 1348 } 1349 } 1350 1351 cprintf("PC:%#x\n[sn:%lli]\n[tid:%i]\n" 1352 "Issued:%i\nSquashed:%i\n", 1353 (*inst_list_it)->readPC(), 1354 (*inst_list_it)->seqNum, 1355 (*inst_list_it)->threadNumber, 1356 (*inst_list_it)->isIssued(), 1357 (*inst_list_it)->isSquashed()); 1358 1359 if ((*inst_list_it)->isMemRef()) { 1360 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone); 1361 } 1362 1363 cprintf("\n"); 1364 1365 inst_list_it++; 1366 ++num; 1367 } 1368 } 1369 1370 cprintf("Insts to Execute list:\n"); 1371 1372 int num = 0; 1373 int valid_num = 0; 1374 ListIt inst_list_it = instsToExecute.begin(); 1375 1376 while (inst_list_it != instsToExecute.end()) 1377 { 1378 cprintf("Instruction:%i\n", 1379 num); 1380 if (!(*inst_list_it)->isSquashed()) { 1381 if (!(*inst_list_it)->isIssued()) { 1382 ++valid_num; 1383 cprintf("Count:%i\n", valid_num); 1384 } else if ((*inst_list_it)->isMemRef() && 1385 !(*inst_list_it)->memOpDone) { 1386 // Loads that have not been marked as executed 1387 // still count towards the total instructions. 1388 ++valid_num; 1389 cprintf("Count:%i\n", valid_num); 1390 } 1391 } 1392 1393 cprintf("PC:%#x\n[sn:%lli]\n[tid:%i]\n" 1394 "Issued:%i\nSquashed:%i\n", 1395 (*inst_list_it)->readPC(), 1396 (*inst_list_it)->seqNum, 1397 (*inst_list_it)->threadNumber, 1398 (*inst_list_it)->isIssued(), 1399 (*inst_list_it)->isSquashed()); 1400 1401 if ((*inst_list_it)->isMemRef()) { 1402 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone); 1403 } 1404 1405 cprintf("\n"); 1406 1407 inst_list_it++; 1408 ++num; 1409 } 1410} 1411