inst_queue_impl.hh revision 4329
1/* 2 * Copyright (c) 2004-2006 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Kevin Lim 29 * Korey Sewell 30 */ 31 32#include <limits> 33#include <vector> 34 35#include "sim/core.hh" 36 37#include "cpu/o3/fu_pool.hh" 38#include "cpu/o3/inst_queue.hh" 39 40template <class Impl> 41InstructionQueue<Impl>::FUCompletion::FUCompletion(DynInstPtr &_inst, 42 int fu_idx, 43 InstructionQueue<Impl> *iq_ptr) 44 : Event(&mainEventQueue, Stat_Event_Pri), 45 inst(_inst), fuIdx(fu_idx), iqPtr(iq_ptr), freeFU(false) 46{ 47 this->setFlags(Event::AutoDelete); 48} 49 50template <class Impl> 51void 52InstructionQueue<Impl>::FUCompletion::process() 53{ 54 iqPtr->processFUCompletion(inst, freeFU ? fuIdx : -1); 55 inst = NULL; 56} 57 58 59template <class Impl> 60const char * 61InstructionQueue<Impl>::FUCompletion::description() 62{ 63 return "Functional unit completion event"; 64} 65 66template <class Impl> 67InstructionQueue<Impl>::InstructionQueue(O3CPU *cpu_ptr, IEW *iew_ptr, 68 Params *params) 69 : cpu(cpu_ptr), 70 iewStage(iew_ptr), 71 fuPool(params->fuPool), 72 numEntries(params->numIQEntries), 73 totalWidth(params->issueWidth), 74 numPhysIntRegs(params->numPhysIntRegs), 75 numPhysFloatRegs(params->numPhysFloatRegs), 76 commitToIEWDelay(params->commitToIEWDelay) 77{ 78 assert(fuPool); 79 80 switchedOut = false; 81 82 numThreads = params->numberOfThreads; 83 84 // Set the number of physical registers as the number of int + float 85 numPhysRegs = numPhysIntRegs + numPhysFloatRegs; 86 87 //Create an entry for each physical register within the 88 //dependency graph. 89 dependGraph.resize(numPhysRegs); 90 91 // Resize the register scoreboard. 92 regScoreboard.resize(numPhysRegs); 93 94 //Initialize Mem Dependence Units 95 for (int i = 0; i < numThreads; i++) { 96 memDepUnit[i].init(params,i); 97 memDepUnit[i].setIQ(this); 98 } 99 100 resetState(); 101 102 std::string policy = params->smtIQPolicy; 103 104 //Convert string to lowercase 105 std::transform(policy.begin(), policy.end(), policy.begin(), 106 (int(*)(int)) tolower); 107 108 //Figure out resource sharing policy 109 if (policy == "dynamic") { 110 iqPolicy = Dynamic; 111 112 //Set Max Entries to Total ROB Capacity 113 for (int i = 0; i < numThreads; i++) { 114 maxEntries[i] = numEntries; 115 } 116 117 } else if (policy == "partitioned") { 118 iqPolicy = Partitioned; 119 120 //@todo:make work if part_amt doesnt divide evenly. 121 int part_amt = numEntries / numThreads; 122 123 //Divide ROB up evenly 124 for (int i = 0; i < numThreads; i++) { 125 maxEntries[i] = part_amt; 126 } 127 128 DPRINTF(IQ, "IQ sharing policy set to Partitioned:" 129 "%i entries per thread.\n",part_amt); 130 } else if (policy == "threshold") { 131 iqPolicy = Threshold; 132 133 double threshold = (double)params->smtIQThreshold / 100; 134 135 int thresholdIQ = (int)((double)threshold * numEntries); 136 137 //Divide up by threshold amount 138 for (int i = 0; i < numThreads; i++) { 139 maxEntries[i] = thresholdIQ; 140 } 141 142 DPRINTF(IQ, "IQ sharing policy set to Threshold:" 143 "%i entries per thread.\n",thresholdIQ); 144 } else { 145 assert(0 && "Invalid IQ Sharing Policy.Options Are:{Dynamic," 146 "Partitioned, Threshold}"); 147 } 148} 149 150template <class Impl> 151InstructionQueue<Impl>::~InstructionQueue() 152{ 153 dependGraph.reset(); 154#ifdef DEBUG 155 cprintf("Nodes traversed: %i, removed: %i\n", 156 dependGraph.nodesTraversed, dependGraph.nodesRemoved); 157#endif 158} 159 160template <class Impl> 161std::string 162InstructionQueue<Impl>::name() const 163{ 164 return cpu->name() + ".iq"; 165} 166 167template <class Impl> 168void 169InstructionQueue<Impl>::regStats() 170{ 171 using namespace Stats; 172 iqInstsAdded 173 .name(name() + ".iqInstsAdded") 174 .desc("Number of instructions added to the IQ (excludes non-spec)") 175 .prereq(iqInstsAdded); 176 177 iqNonSpecInstsAdded 178 .name(name() + ".iqNonSpecInstsAdded") 179 .desc("Number of non-speculative instructions added to the IQ") 180 .prereq(iqNonSpecInstsAdded); 181 182 iqInstsIssued 183 .name(name() + ".iqInstsIssued") 184 .desc("Number of instructions issued") 185 .prereq(iqInstsIssued); 186 187 iqIntInstsIssued 188 .name(name() + ".iqIntInstsIssued") 189 .desc("Number of integer instructions issued") 190 .prereq(iqIntInstsIssued); 191 192 iqFloatInstsIssued 193 .name(name() + ".iqFloatInstsIssued") 194 .desc("Number of float instructions issued") 195 .prereq(iqFloatInstsIssued); 196 197 iqBranchInstsIssued 198 .name(name() + ".iqBranchInstsIssued") 199 .desc("Number of branch instructions issued") 200 .prereq(iqBranchInstsIssued); 201 202 iqMemInstsIssued 203 .name(name() + ".iqMemInstsIssued") 204 .desc("Number of memory instructions issued") 205 .prereq(iqMemInstsIssued); 206 207 iqMiscInstsIssued 208 .name(name() + ".iqMiscInstsIssued") 209 .desc("Number of miscellaneous instructions issued") 210 .prereq(iqMiscInstsIssued); 211 212 iqSquashedInstsIssued 213 .name(name() + ".iqSquashedInstsIssued") 214 .desc("Number of squashed instructions issued") 215 .prereq(iqSquashedInstsIssued); 216 217 iqSquashedInstsExamined 218 .name(name() + ".iqSquashedInstsExamined") 219 .desc("Number of squashed instructions iterated over during squash;" 220 " mainly for profiling") 221 .prereq(iqSquashedInstsExamined); 222 223 iqSquashedOperandsExamined 224 .name(name() + ".iqSquashedOperandsExamined") 225 .desc("Number of squashed operands that are examined and possibly " 226 "removed from graph") 227 .prereq(iqSquashedOperandsExamined); 228 229 iqSquashedNonSpecRemoved 230 .name(name() + ".iqSquashedNonSpecRemoved") 231 .desc("Number of squashed non-spec instructions that were removed") 232 .prereq(iqSquashedNonSpecRemoved); 233/* 234 queueResDist 235 .init(Num_OpClasses, 0, 99, 2) 236 .name(name() + ".IQ:residence:") 237 .desc("cycles from dispatch to issue") 238 .flags(total | pdf | cdf ) 239 ; 240 for (int i = 0; i < Num_OpClasses; ++i) { 241 queueResDist.subname(i, opClassStrings[i]); 242 } 243*/ 244 numIssuedDist 245 .init(0,totalWidth,1) 246 .name(name() + ".ISSUE:issued_per_cycle") 247 .desc("Number of insts issued each cycle") 248 .flags(pdf) 249 ; 250/* 251 dist_unissued 252 .init(Num_OpClasses+2) 253 .name(name() + ".ISSUE:unissued_cause") 254 .desc("Reason ready instruction not issued") 255 .flags(pdf | dist) 256 ; 257 for (int i=0; i < (Num_OpClasses + 2); ++i) { 258 dist_unissued.subname(i, unissued_names[i]); 259 } 260*/ 261 statIssuedInstType 262 .init(numThreads,Num_OpClasses) 263 .name(name() + ".ISSUE:FU_type") 264 .desc("Type of FU issued") 265 .flags(total | pdf | dist) 266 ; 267 statIssuedInstType.ysubnames(opClassStrings); 268 269 // 270 // How long did instructions for a particular FU type wait prior to issue 271 // 272/* 273 issueDelayDist 274 .init(Num_OpClasses,0,99,2) 275 .name(name() + ".ISSUE:") 276 .desc("cycles from operands ready to issue") 277 .flags(pdf | cdf) 278 ; 279 280 for (int i=0; i<Num_OpClasses; ++i) { 281 std::stringstream subname; 282 subname << opClassStrings[i] << "_delay"; 283 issueDelayDist.subname(i, subname.str()); 284 } 285*/ 286 issueRate 287 .name(name() + ".ISSUE:rate") 288 .desc("Inst issue rate") 289 .flags(total) 290 ; 291 issueRate = iqInstsIssued / cpu->numCycles; 292 293 statFuBusy 294 .init(Num_OpClasses) 295 .name(name() + ".ISSUE:fu_full") 296 .desc("attempts to use FU when none available") 297 .flags(pdf | dist) 298 ; 299 for (int i=0; i < Num_OpClasses; ++i) { 300 statFuBusy.subname(i, opClassStrings[i]); 301 } 302 303 fuBusy 304 .init(numThreads) 305 .name(name() + ".ISSUE:fu_busy_cnt") 306 .desc("FU busy when requested") 307 .flags(total) 308 ; 309 310 fuBusyRate 311 .name(name() + ".ISSUE:fu_busy_rate") 312 .desc("FU busy rate (busy events/executed inst)") 313 .flags(total) 314 ; 315 fuBusyRate = fuBusy / iqInstsIssued; 316 317 for ( int i=0; i < numThreads; i++) { 318 // Tell mem dependence unit to reg stats as well. 319 memDepUnit[i].regStats(); 320 } 321} 322 323template <class Impl> 324void 325InstructionQueue<Impl>::resetState() 326{ 327 //Initialize thread IQ counts 328 for (int i = 0; i <numThreads; i++) { 329 count[i] = 0; 330 instList[i].clear(); 331 } 332 333 // Initialize the number of free IQ entries. 334 freeEntries = numEntries; 335 336 // Note that in actuality, the registers corresponding to the logical 337 // registers start off as ready. However this doesn't matter for the 338 // IQ as the instruction should have been correctly told if those 339 // registers are ready in rename. Thus it can all be initialized as 340 // unready. 341 for (int i = 0; i < numPhysRegs; ++i) { 342 regScoreboard[i] = false; 343 } 344 345 for (int i = 0; i < numThreads; ++i) { 346 squashedSeqNum[i] = 0; 347 } 348 349 for (int i = 0; i < Num_OpClasses; ++i) { 350 while (!readyInsts[i].empty()) 351 readyInsts[i].pop(); 352 queueOnList[i] = false; 353 readyIt[i] = listOrder.end(); 354 } 355 nonSpecInsts.clear(); 356 listOrder.clear(); 357} 358 359template <class Impl> 360void 361InstructionQueue<Impl>::setActiveThreads(std::list<unsigned> *at_ptr) 362{ 363 activeThreads = at_ptr; 364} 365 366template <class Impl> 367void 368InstructionQueue<Impl>::setIssueToExecuteQueue(TimeBuffer<IssueStruct> *i2e_ptr) 369{ 370 issueToExecuteQueue = i2e_ptr; 371} 372 373template <class Impl> 374void 375InstructionQueue<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr) 376{ 377 timeBuffer = tb_ptr; 378 379 fromCommit = timeBuffer->getWire(-commitToIEWDelay); 380} 381 382template <class Impl> 383void 384InstructionQueue<Impl>::switchOut() 385{ 386/* 387 if (!instList[0].empty() || (numEntries != freeEntries) || 388 !readyInsts[0].empty() || !nonSpecInsts.empty() || !listOrder.empty()) { 389 dumpInsts(); 390// assert(0); 391 } 392*/ 393 resetState(); 394 dependGraph.reset(); 395 instsToExecute.clear(); 396 switchedOut = true; 397 for (int i = 0; i < numThreads; ++i) { 398 memDepUnit[i].switchOut(); 399 } 400} 401 402template <class Impl> 403void 404InstructionQueue<Impl>::takeOverFrom() 405{ 406 switchedOut = false; 407} 408 409template <class Impl> 410int 411InstructionQueue<Impl>::entryAmount(int num_threads) 412{ 413 if (iqPolicy == Partitioned) { 414 return numEntries / num_threads; 415 } else { 416 return 0; 417 } 418} 419 420 421template <class Impl> 422void 423InstructionQueue<Impl>::resetEntries() 424{ 425 if (iqPolicy != Dynamic || numThreads > 1) { 426 int active_threads = activeThreads->size(); 427 428 std::list<unsigned>::iterator threads = activeThreads->begin(); 429 std::list<unsigned>::iterator end = activeThreads->end(); 430 431 while (threads != end) { 432 unsigned tid = *threads++; 433 434 if (iqPolicy == Partitioned) { 435 maxEntries[tid] = numEntries / active_threads; 436 } else if(iqPolicy == Threshold && active_threads == 1) { 437 maxEntries[tid] = numEntries; 438 } 439 } 440 } 441} 442 443template <class Impl> 444unsigned 445InstructionQueue<Impl>::numFreeEntries() 446{ 447 return freeEntries; 448} 449 450template <class Impl> 451unsigned 452InstructionQueue<Impl>::numFreeEntries(unsigned tid) 453{ 454 return maxEntries[tid] - count[tid]; 455} 456 457// Might want to do something more complex if it knows how many instructions 458// will be issued this cycle. 459template <class Impl> 460bool 461InstructionQueue<Impl>::isFull() 462{ 463 if (freeEntries == 0) { 464 return(true); 465 } else { 466 return(false); 467 } 468} 469 470template <class Impl> 471bool 472InstructionQueue<Impl>::isFull(unsigned tid) 473{ 474 if (numFreeEntries(tid) == 0) { 475 return(true); 476 } else { 477 return(false); 478 } 479} 480 481template <class Impl> 482bool 483InstructionQueue<Impl>::hasReadyInsts() 484{ 485 if (!listOrder.empty()) { 486 return true; 487 } 488 489 for (int i = 0; i < Num_OpClasses; ++i) { 490 if (!readyInsts[i].empty()) { 491 return true; 492 } 493 } 494 495 return false; 496} 497 498template <class Impl> 499void 500InstructionQueue<Impl>::insert(DynInstPtr &new_inst) 501{ 502 // Make sure the instruction is valid 503 assert(new_inst); 504 505 DPRINTF(IQ, "Adding instruction [sn:%lli] PC %#x to the IQ.\n", 506 new_inst->seqNum, new_inst->readPC()); 507 508 assert(freeEntries != 0); 509 510 instList[new_inst->threadNumber].push_back(new_inst); 511 512 --freeEntries; 513 514 new_inst->setInIQ(); 515 516 // Look through its source registers (physical regs), and mark any 517 // dependencies. 518 addToDependents(new_inst); 519 520 // Have this instruction set itself as the producer of its destination 521 // register(s). 522 addToProducers(new_inst); 523 524 if (new_inst->isMemRef()) { 525 memDepUnit[new_inst->threadNumber].insert(new_inst); 526 } else { 527 addIfReady(new_inst); 528 } 529 530 ++iqInstsAdded; 531 532 count[new_inst->threadNumber]++; 533 534 assert(freeEntries == (numEntries - countInsts())); 535} 536 537template <class Impl> 538void 539InstructionQueue<Impl>::insertNonSpec(DynInstPtr &new_inst) 540{ 541 // @todo: Clean up this code; can do it by setting inst as unable 542 // to issue, then calling normal insert on the inst. 543 544 assert(new_inst); 545 546 nonSpecInsts[new_inst->seqNum] = new_inst; 547 548 DPRINTF(IQ, "Adding non-speculative instruction [sn:%lli] PC %#x " 549 "to the IQ.\n", 550 new_inst->seqNum, new_inst->readPC()); 551 552 assert(freeEntries != 0); 553 554 instList[new_inst->threadNumber].push_back(new_inst); 555 556 --freeEntries; 557 558 new_inst->setInIQ(); 559 560 // Have this instruction set itself as the producer of its destination 561 // register(s). 562 addToProducers(new_inst); 563 564 // If it's a memory instruction, add it to the memory dependency 565 // unit. 566 if (new_inst->isMemRef()) { 567 memDepUnit[new_inst->threadNumber].insertNonSpec(new_inst); 568 } 569 570 ++iqNonSpecInstsAdded; 571 572 count[new_inst->threadNumber]++; 573 574 assert(freeEntries == (numEntries - countInsts())); 575} 576 577template <class Impl> 578void 579InstructionQueue<Impl>::insertBarrier(DynInstPtr &barr_inst) 580{ 581 memDepUnit[barr_inst->threadNumber].insertBarrier(barr_inst); 582 583 insertNonSpec(barr_inst); 584} 585 586template <class Impl> 587typename Impl::DynInstPtr 588InstructionQueue<Impl>::getInstToExecute() 589{ 590 assert(!instsToExecute.empty()); 591 DynInstPtr inst = instsToExecute.front(); 592 instsToExecute.pop_front(); 593 return inst; 594} 595 596template <class Impl> 597void 598InstructionQueue<Impl>::addToOrderList(OpClass op_class) 599{ 600 assert(!readyInsts[op_class].empty()); 601 602 ListOrderEntry queue_entry; 603 604 queue_entry.queueType = op_class; 605 606 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum; 607 608 ListOrderIt list_it = listOrder.begin(); 609 ListOrderIt list_end_it = listOrder.end(); 610 611 while (list_it != list_end_it) { 612 if ((*list_it).oldestInst > queue_entry.oldestInst) { 613 break; 614 } 615 616 list_it++; 617 } 618 619 readyIt[op_class] = listOrder.insert(list_it, queue_entry); 620 queueOnList[op_class] = true; 621} 622 623template <class Impl> 624void 625InstructionQueue<Impl>::moveToYoungerInst(ListOrderIt list_order_it) 626{ 627 // Get iterator of next item on the list 628 // Delete the original iterator 629 // Determine if the next item is either the end of the list or younger 630 // than the new instruction. If so, then add in a new iterator right here. 631 // If not, then move along. 632 ListOrderEntry queue_entry; 633 OpClass op_class = (*list_order_it).queueType; 634 ListOrderIt next_it = list_order_it; 635 636 ++next_it; 637 638 queue_entry.queueType = op_class; 639 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum; 640 641 while (next_it != listOrder.end() && 642 (*next_it).oldestInst < queue_entry.oldestInst) { 643 ++next_it; 644 } 645 646 readyIt[op_class] = listOrder.insert(next_it, queue_entry); 647} 648 649template <class Impl> 650void 651InstructionQueue<Impl>::processFUCompletion(DynInstPtr &inst, int fu_idx) 652{ 653 DPRINTF(IQ, "Processing FU completion [sn:%lli]\n", inst->seqNum); 654 // The CPU could have been sleeping until this op completed (*extremely* 655 // long latency op). Wake it if it was. This may be overkill. 656 if (isSwitchedOut()) { 657 DPRINTF(IQ, "FU completion not processed, IQ is switched out [sn:%lli]\n", 658 inst->seqNum); 659 return; 660 } 661 662 iewStage->wakeCPU(); 663 664 if (fu_idx > -1) 665 fuPool->freeUnitNextCycle(fu_idx); 666 667 // @todo: Ensure that these FU Completions happen at the beginning 668 // of a cycle, otherwise they could add too many instructions to 669 // the queue. 670 issueToExecuteQueue->access(0)->size++; 671 instsToExecute.push_back(inst); 672} 673 674// @todo: Figure out a better way to remove the squashed items from the 675// lists. Checking the top item of each list to see if it's squashed 676// wastes time and forces jumps. 677template <class Impl> 678void 679InstructionQueue<Impl>::scheduleReadyInsts() 680{ 681 DPRINTF(IQ, "Attempting to schedule ready instructions from " 682 "the IQ.\n"); 683 684 IssueStruct *i2e_info = issueToExecuteQueue->access(0); 685 686 // Have iterator to head of the list 687 // While I haven't exceeded bandwidth or reached the end of the list, 688 // Try to get a FU that can do what this op needs. 689 // If successful, change the oldestInst to the new top of the list, put 690 // the queue in the proper place in the list. 691 // Increment the iterator. 692 // This will avoid trying to schedule a certain op class if there are no 693 // FUs that handle it. 694 ListOrderIt order_it = listOrder.begin(); 695 ListOrderIt order_end_it = listOrder.end(); 696 int total_issued = 0; 697 698 while (total_issued < totalWidth && 699 iewStage->canIssue() && 700 order_it != order_end_it) { 701 OpClass op_class = (*order_it).queueType; 702 703 assert(!readyInsts[op_class].empty()); 704 705 DynInstPtr issuing_inst = readyInsts[op_class].top(); 706 707 assert(issuing_inst->seqNum == (*order_it).oldestInst); 708 709 if (issuing_inst->isSquashed()) { 710 readyInsts[op_class].pop(); 711 712 if (!readyInsts[op_class].empty()) { 713 moveToYoungerInst(order_it); 714 } else { 715 readyIt[op_class] = listOrder.end(); 716 queueOnList[op_class] = false; 717 } 718 719 listOrder.erase(order_it++); 720 721 ++iqSquashedInstsIssued; 722 723 continue; 724 } 725 726 int idx = -2; 727 int op_latency = 1; 728 int tid = issuing_inst->threadNumber; 729 730 if (op_class != No_OpClass) { 731 idx = fuPool->getUnit(op_class); 732 733 if (idx > -1) { 734 op_latency = fuPool->getOpLatency(op_class); 735 } 736 } 737 738 // If we have an instruction that doesn't require a FU, or a 739 // valid FU, then schedule for execution. 740 if (idx == -2 || idx != -1) { 741 if (op_latency == 1) { 742 i2e_info->size++; 743 instsToExecute.push_back(issuing_inst); 744 745 // Add the FU onto the list of FU's to be freed next 746 // cycle if we used one. 747 if (idx >= 0) 748 fuPool->freeUnitNextCycle(idx); 749 } else { 750 int issue_latency = fuPool->getIssueLatency(op_class); 751 // Generate completion event for the FU 752 FUCompletion *execution = new FUCompletion(issuing_inst, 753 idx, this); 754 755 execution->schedule(curTick + cpu->cycles(issue_latency - 1)); 756 757 // @todo: Enforce that issue_latency == 1 or op_latency 758 if (issue_latency > 1) { 759 // If FU isn't pipelined, then it must be freed 760 // upon the execution completing. 761 execution->setFreeFU(); 762 } else { 763 // Add the FU onto the list of FU's to be freed next cycle. 764 fuPool->freeUnitNextCycle(idx); 765 } 766 } 767 768 DPRINTF(IQ, "Thread %i: Issuing instruction PC %#x " 769 "[sn:%lli]\n", 770 tid, issuing_inst->readPC(), 771 issuing_inst->seqNum); 772 773 readyInsts[op_class].pop(); 774 775 if (!readyInsts[op_class].empty()) { 776 moveToYoungerInst(order_it); 777 } else { 778 readyIt[op_class] = listOrder.end(); 779 queueOnList[op_class] = false; 780 } 781 782 issuing_inst->setIssued(); 783 ++total_issued; 784 785 if (!issuing_inst->isMemRef()) { 786 // Memory instructions can not be freed from the IQ until they 787 // complete. 788 ++freeEntries; 789 count[tid]--; 790 issuing_inst->clearInIQ(); 791 } else { 792 memDepUnit[tid].issue(issuing_inst); 793 } 794 795 listOrder.erase(order_it++); 796 statIssuedInstType[tid][op_class]++; 797 iewStage->incrWb(issuing_inst->seqNum); 798 } else { 799 statFuBusy[op_class]++; 800 fuBusy[tid]++; 801 ++order_it; 802 } 803 } 804 805 numIssuedDist.sample(total_issued); 806 iqInstsIssued+= total_issued; 807 808 // If we issued any instructions, tell the CPU we had activity. 809 if (total_issued) { 810 cpu->activityThisCycle(); 811 } else { 812 DPRINTF(IQ, "Not able to schedule any instructions.\n"); 813 } 814} 815 816template <class Impl> 817void 818InstructionQueue<Impl>::scheduleNonSpec(const InstSeqNum &inst) 819{ 820 DPRINTF(IQ, "Marking nonspeculative instruction [sn:%lli] as ready " 821 "to execute.\n", inst); 822 823 NonSpecMapIt inst_it = nonSpecInsts.find(inst); 824 825 assert(inst_it != nonSpecInsts.end()); 826 827 unsigned tid = (*inst_it).second->threadNumber; 828 829 (*inst_it).second->setAtCommit(); 830 831 (*inst_it).second->setCanIssue(); 832 833 if (!(*inst_it).second->isMemRef()) { 834 addIfReady((*inst_it).second); 835 } else { 836 memDepUnit[tid].nonSpecInstReady((*inst_it).second); 837 } 838 839 (*inst_it).second = NULL; 840 841 nonSpecInsts.erase(inst_it); 842} 843 844template <class Impl> 845void 846InstructionQueue<Impl>::commit(const InstSeqNum &inst, unsigned tid) 847{ 848 DPRINTF(IQ, "[tid:%i]: Committing instructions older than [sn:%i]\n", 849 tid,inst); 850 851 ListIt iq_it = instList[tid].begin(); 852 853 while (iq_it != instList[tid].end() && 854 (*iq_it)->seqNum <= inst) { 855 ++iq_it; 856 instList[tid].pop_front(); 857 } 858 859 assert(freeEntries == (numEntries - countInsts())); 860} 861 862template <class Impl> 863int 864InstructionQueue<Impl>::wakeDependents(DynInstPtr &completed_inst) 865{ 866 int dependents = 0; 867 868 DPRINTF(IQ, "Waking dependents of completed instruction.\n"); 869 870 assert(!completed_inst->isSquashed()); 871 872 // Tell the memory dependence unit to wake any dependents on this 873 // instruction if it is a memory instruction. Also complete the memory 874 // instruction at this point since we know it executed without issues. 875 // @todo: Might want to rename "completeMemInst" to something that 876 // indicates that it won't need to be replayed, and call this 877 // earlier. Might not be a big deal. 878 if (completed_inst->isMemRef()) { 879 memDepUnit[completed_inst->threadNumber].wakeDependents(completed_inst); 880 completeMemInst(completed_inst); 881 } else if (completed_inst->isMemBarrier() || 882 completed_inst->isWriteBarrier()) { 883 memDepUnit[completed_inst->threadNumber].completeBarrier(completed_inst); 884 } 885 886 for (int dest_reg_idx = 0; 887 dest_reg_idx < completed_inst->numDestRegs(); 888 dest_reg_idx++) 889 { 890 PhysRegIndex dest_reg = 891 completed_inst->renamedDestRegIdx(dest_reg_idx); 892 893 // Special case of uniq or control registers. They are not 894 // handled by the IQ and thus have no dependency graph entry. 895 // @todo Figure out a cleaner way to handle this. 896 if (dest_reg >= numPhysRegs) { 897 continue; 898 } 899 900 DPRINTF(IQ, "Waking any dependents on register %i.\n", 901 (int) dest_reg); 902 903 //Go through the dependency chain, marking the registers as 904 //ready within the waiting instructions. 905 DynInstPtr dep_inst = dependGraph.pop(dest_reg); 906 907 while (dep_inst) { 908 DPRINTF(IQ, "Waking up a dependent instruction, PC%#x.\n", 909 dep_inst->readPC()); 910 911 // Might want to give more information to the instruction 912 // so that it knows which of its source registers is 913 // ready. However that would mean that the dependency 914 // graph entries would need to hold the src_reg_idx. 915 dep_inst->markSrcRegReady(); 916 917 addIfReady(dep_inst); 918 919 dep_inst = dependGraph.pop(dest_reg); 920 921 ++dependents; 922 } 923 924 // Reset the head node now that all of its dependents have 925 // been woken up. 926 assert(dependGraph.empty(dest_reg)); 927 dependGraph.clearInst(dest_reg); 928 929 // Mark the scoreboard as having that register ready. 930 regScoreboard[dest_reg] = true; 931 } 932 return dependents; 933} 934 935template <class Impl> 936void 937InstructionQueue<Impl>::addReadyMemInst(DynInstPtr &ready_inst) 938{ 939 OpClass op_class = ready_inst->opClass(); 940 941 readyInsts[op_class].push(ready_inst); 942 943 // Will need to reorder the list if either a queue is not on the list, 944 // or it has an older instruction than last time. 945 if (!queueOnList[op_class]) { 946 addToOrderList(op_class); 947 } else if (readyInsts[op_class].top()->seqNum < 948 (*readyIt[op_class]).oldestInst) { 949 listOrder.erase(readyIt[op_class]); 950 addToOrderList(op_class); 951 } 952 953 DPRINTF(IQ, "Instruction is ready to issue, putting it onto " 954 "the ready list, PC %#x opclass:%i [sn:%lli].\n", 955 ready_inst->readPC(), op_class, ready_inst->seqNum); 956} 957 958template <class Impl> 959void 960InstructionQueue<Impl>::rescheduleMemInst(DynInstPtr &resched_inst) 961{ 962 DPRINTF(IQ, "Rescheduling mem inst [sn:%lli]\n", resched_inst->seqNum); 963 resched_inst->clearCanIssue(); 964 memDepUnit[resched_inst->threadNumber].reschedule(resched_inst); 965} 966 967template <class Impl> 968void 969InstructionQueue<Impl>::replayMemInst(DynInstPtr &replay_inst) 970{ 971 memDepUnit[replay_inst->threadNumber].replay(replay_inst); 972} 973 974template <class Impl> 975void 976InstructionQueue<Impl>::completeMemInst(DynInstPtr &completed_inst) 977{ 978 int tid = completed_inst->threadNumber; 979 980 DPRINTF(IQ, "Completing mem instruction PC:%#x [sn:%lli]\n", 981 completed_inst->readPC(), completed_inst->seqNum); 982 983 ++freeEntries; 984 985 completed_inst->memOpDone = true; 986 987 memDepUnit[tid].completed(completed_inst); 988 count[tid]--; 989} 990 991template <class Impl> 992void 993InstructionQueue<Impl>::violation(DynInstPtr &store, 994 DynInstPtr &faulting_load) 995{ 996 memDepUnit[store->threadNumber].violation(store, faulting_load); 997} 998 999template <class Impl> 1000void 1001InstructionQueue<Impl>::squash(unsigned tid) 1002{ 1003 DPRINTF(IQ, "[tid:%i]: Starting to squash instructions in " 1004 "the IQ.\n", tid); 1005 1006 // Read instruction sequence number of last instruction out of the 1007 // time buffer. 1008#if ISA_HAS_DELAY_SLOT 1009 squashedSeqNum[tid] = fromCommit->commitInfo[tid].bdelayDoneSeqNum; 1010#else 1011 squashedSeqNum[tid] = fromCommit->commitInfo[tid].doneSeqNum; 1012#endif 1013 1014 // Call doSquash if there are insts in the IQ 1015 if (count[tid] > 0) { 1016 doSquash(tid); 1017 } 1018 1019 // Also tell the memory dependence unit to squash. 1020 memDepUnit[tid].squash(squashedSeqNum[tid], tid); 1021} 1022 1023template <class Impl> 1024void 1025InstructionQueue<Impl>::doSquash(unsigned tid) 1026{ 1027 // Start at the tail. 1028 ListIt squash_it = instList[tid].end(); 1029 --squash_it; 1030 1031 DPRINTF(IQ, "[tid:%i]: Squashing until sequence number %i!\n", 1032 tid, squashedSeqNum[tid]); 1033 1034 // Squash any instructions younger than the squashed sequence number 1035 // given. 1036 while (squash_it != instList[tid].end() && 1037 (*squash_it)->seqNum > squashedSeqNum[tid]) { 1038 1039 DynInstPtr squashed_inst = (*squash_it); 1040 1041 // Only handle the instruction if it actually is in the IQ and 1042 // hasn't already been squashed in the IQ. 1043 if (squashed_inst->threadNumber != tid || 1044 squashed_inst->isSquashedInIQ()) { 1045 --squash_it; 1046 continue; 1047 } 1048 1049 if (!squashed_inst->isIssued() || 1050 (squashed_inst->isMemRef() && 1051 !squashed_inst->memOpDone)) { 1052 1053 DPRINTF(IQ, "[tid:%i]: Instruction [sn:%lli] PC %#x " 1054 "squashed.\n", 1055 tid, squashed_inst->seqNum, squashed_inst->readPC()); 1056 1057 // Remove the instruction from the dependency list. 1058 if (!squashed_inst->isNonSpeculative() && 1059 !squashed_inst->isStoreConditional() && 1060 !squashed_inst->isMemBarrier() && 1061 !squashed_inst->isWriteBarrier()) { 1062 1063 for (int src_reg_idx = 0; 1064 src_reg_idx < squashed_inst->numSrcRegs(); 1065 src_reg_idx++) 1066 { 1067 PhysRegIndex src_reg = 1068 squashed_inst->renamedSrcRegIdx(src_reg_idx); 1069 1070 // Only remove it from the dependency graph if it 1071 // was placed there in the first place. 1072 1073 // Instead of doing a linked list traversal, we 1074 // can just remove these squashed instructions 1075 // either at issue time, or when the register is 1076 // overwritten. The only downside to this is it 1077 // leaves more room for error. 1078 1079 if (!squashed_inst->isReadySrcRegIdx(src_reg_idx) && 1080 src_reg < numPhysRegs) { 1081 dependGraph.remove(src_reg, squashed_inst); 1082 } 1083 1084 1085 ++iqSquashedOperandsExamined; 1086 } 1087 } else if (!squashed_inst->isStoreConditional() || 1088 !squashed_inst->isCompleted()) { 1089 NonSpecMapIt ns_inst_it = 1090 nonSpecInsts.find(squashed_inst->seqNum); 1091 assert(ns_inst_it != nonSpecInsts.end()); 1092 if (ns_inst_it == nonSpecInsts.end()) { 1093 assert(squashed_inst->getFault() != NoFault); 1094 } else { 1095 1096 (*ns_inst_it).second = NULL; 1097 1098 nonSpecInsts.erase(ns_inst_it); 1099 1100 ++iqSquashedNonSpecRemoved; 1101 } 1102 } 1103 1104 // Might want to also clear out the head of the dependency graph. 1105 1106 // Mark it as squashed within the IQ. 1107 squashed_inst->setSquashedInIQ(); 1108 1109 // @todo: Remove this hack where several statuses are set so the 1110 // inst will flow through the rest of the pipeline. 1111 squashed_inst->setIssued(); 1112 squashed_inst->setCanCommit(); 1113 squashed_inst->clearInIQ(); 1114 1115 //Update Thread IQ Count 1116 count[squashed_inst->threadNumber]--; 1117 1118 ++freeEntries; 1119 } 1120 1121 instList[tid].erase(squash_it--); 1122 ++iqSquashedInstsExamined; 1123 } 1124} 1125 1126template <class Impl> 1127bool 1128InstructionQueue<Impl>::addToDependents(DynInstPtr &new_inst) 1129{ 1130 // Loop through the instruction's source registers, adding 1131 // them to the dependency list if they are not ready. 1132 int8_t total_src_regs = new_inst->numSrcRegs(); 1133 bool return_val = false; 1134 1135 for (int src_reg_idx = 0; 1136 src_reg_idx < total_src_regs; 1137 src_reg_idx++) 1138 { 1139 // Only add it to the dependency graph if it's not ready. 1140 if (!new_inst->isReadySrcRegIdx(src_reg_idx)) { 1141 PhysRegIndex src_reg = new_inst->renamedSrcRegIdx(src_reg_idx); 1142 1143 // Check the IQ's scoreboard to make sure the register 1144 // hasn't become ready while the instruction was in flight 1145 // between stages. Only if it really isn't ready should 1146 // it be added to the dependency graph. 1147 if (src_reg >= numPhysRegs) { 1148 continue; 1149 } else if (regScoreboard[src_reg] == false) { 1150 DPRINTF(IQ, "Instruction PC %#x has src reg %i that " 1151 "is being added to the dependency chain.\n", 1152 new_inst->readPC(), src_reg); 1153 1154 dependGraph.insert(src_reg, new_inst); 1155 1156 // Change the return value to indicate that something 1157 // was added to the dependency graph. 1158 return_val = true; 1159 } else { 1160 DPRINTF(IQ, "Instruction PC %#x has src reg %i that " 1161 "became ready before it reached the IQ.\n", 1162 new_inst->readPC(), src_reg); 1163 // Mark a register ready within the instruction. 1164 new_inst->markSrcRegReady(src_reg_idx); 1165 } 1166 } 1167 } 1168 1169 return return_val; 1170} 1171 1172template <class Impl> 1173void 1174InstructionQueue<Impl>::addToProducers(DynInstPtr &new_inst) 1175{ 1176 // Nothing really needs to be marked when an instruction becomes 1177 // the producer of a register's value, but for convenience a ptr 1178 // to the producing instruction will be placed in the head node of 1179 // the dependency links. 1180 int8_t total_dest_regs = new_inst->numDestRegs(); 1181 1182 for (int dest_reg_idx = 0; 1183 dest_reg_idx < total_dest_regs; 1184 dest_reg_idx++) 1185 { 1186 PhysRegIndex dest_reg = new_inst->renamedDestRegIdx(dest_reg_idx); 1187 1188 // Instructions that use the misc regs will have a reg number 1189 // higher than the normal physical registers. In this case these 1190 // registers are not renamed, and there is no need to track 1191 // dependencies as these instructions must be executed at commit. 1192 if (dest_reg >= numPhysRegs) { 1193 continue; 1194 } 1195 1196 if (!dependGraph.empty(dest_reg)) { 1197 dependGraph.dump(); 1198 panic("Dependency graph %i not empty!", dest_reg); 1199 } 1200 1201 dependGraph.setInst(dest_reg, new_inst); 1202 1203 // Mark the scoreboard to say it's not yet ready. 1204 regScoreboard[dest_reg] = false; 1205 } 1206} 1207 1208template <class Impl> 1209void 1210InstructionQueue<Impl>::addIfReady(DynInstPtr &inst) 1211{ 1212 // If the instruction now has all of its source registers 1213 // available, then add it to the list of ready instructions. 1214 if (inst->readyToIssue()) { 1215 1216 //Add the instruction to the proper ready list. 1217 if (inst->isMemRef()) { 1218 1219 DPRINTF(IQ, "Checking if memory instruction can issue.\n"); 1220 1221 // Message to the mem dependence unit that this instruction has 1222 // its registers ready. 1223 memDepUnit[inst->threadNumber].regsReady(inst); 1224 1225 return; 1226 } 1227 1228 OpClass op_class = inst->opClass(); 1229 1230 DPRINTF(IQ, "Instruction is ready to issue, putting it onto " 1231 "the ready list, PC %#x opclass:%i [sn:%lli].\n", 1232 inst->readPC(), op_class, inst->seqNum); 1233 1234 readyInsts[op_class].push(inst); 1235 1236 // Will need to reorder the list if either a queue is not on the list, 1237 // or it has an older instruction than last time. 1238 if (!queueOnList[op_class]) { 1239 addToOrderList(op_class); 1240 } else if (readyInsts[op_class].top()->seqNum < 1241 (*readyIt[op_class]).oldestInst) { 1242 listOrder.erase(readyIt[op_class]); 1243 addToOrderList(op_class); 1244 } 1245 } 1246} 1247 1248template <class Impl> 1249int 1250InstructionQueue<Impl>::countInsts() 1251{ 1252#if 0 1253 //ksewell:This works but definitely could use a cleaner write 1254 //with a more intuitive way of counting. Right now it's 1255 //just brute force .... 1256 // Change the #if if you want to use this method. 1257 int total_insts = 0; 1258 1259 for (int i = 0; i < numThreads; ++i) { 1260 ListIt count_it = instList[i].begin(); 1261 1262 while (count_it != instList[i].end()) { 1263 if (!(*count_it)->isSquashed() && !(*count_it)->isSquashedInIQ()) { 1264 if (!(*count_it)->isIssued()) { 1265 ++total_insts; 1266 } else if ((*count_it)->isMemRef() && 1267 !(*count_it)->memOpDone) { 1268 // Loads that have not been marked as executed still count 1269 // towards the total instructions. 1270 ++total_insts; 1271 } 1272 } 1273 1274 ++count_it; 1275 } 1276 } 1277 1278 return total_insts; 1279#else 1280 return numEntries - freeEntries; 1281#endif 1282} 1283 1284template <class Impl> 1285void 1286InstructionQueue<Impl>::dumpLists() 1287{ 1288 for (int i = 0; i < Num_OpClasses; ++i) { 1289 cprintf("Ready list %i size: %i\n", i, readyInsts[i].size()); 1290 1291 cprintf("\n"); 1292 } 1293 1294 cprintf("Non speculative list size: %i\n", nonSpecInsts.size()); 1295 1296 NonSpecMapIt non_spec_it = nonSpecInsts.begin(); 1297 NonSpecMapIt non_spec_end_it = nonSpecInsts.end(); 1298 1299 cprintf("Non speculative list: "); 1300 1301 while (non_spec_it != non_spec_end_it) { 1302 cprintf("%#x [sn:%lli]", (*non_spec_it).second->readPC(), 1303 (*non_spec_it).second->seqNum); 1304 ++non_spec_it; 1305 } 1306 1307 cprintf("\n"); 1308 1309 ListOrderIt list_order_it = listOrder.begin(); 1310 ListOrderIt list_order_end_it = listOrder.end(); 1311 int i = 1; 1312 1313 cprintf("List order: "); 1314 1315 while (list_order_it != list_order_end_it) { 1316 cprintf("%i OpClass:%i [sn:%lli] ", i, (*list_order_it).queueType, 1317 (*list_order_it).oldestInst); 1318 1319 ++list_order_it; 1320 ++i; 1321 } 1322 1323 cprintf("\n"); 1324} 1325 1326 1327template <class Impl> 1328void 1329InstructionQueue<Impl>::dumpInsts() 1330{ 1331 for (int i = 0; i < numThreads; ++i) { 1332 int num = 0; 1333 int valid_num = 0; 1334 ListIt inst_list_it = instList[i].begin(); 1335 1336 while (inst_list_it != instList[i].end()) 1337 { 1338 cprintf("Instruction:%i\n", 1339 num); 1340 if (!(*inst_list_it)->isSquashed()) { 1341 if (!(*inst_list_it)->isIssued()) { 1342 ++valid_num; 1343 cprintf("Count:%i\n", valid_num); 1344 } else if ((*inst_list_it)->isMemRef() && 1345 !(*inst_list_it)->memOpDone) { 1346 // Loads that have not been marked as executed 1347 // still count towards the total instructions. 1348 ++valid_num; 1349 cprintf("Count:%i\n", valid_num); 1350 } 1351 } 1352 1353 cprintf("PC:%#x\n[sn:%lli]\n[tid:%i]\n" 1354 "Issued:%i\nSquashed:%i\n", 1355 (*inst_list_it)->readPC(), 1356 (*inst_list_it)->seqNum, 1357 (*inst_list_it)->threadNumber, 1358 (*inst_list_it)->isIssued(), 1359 (*inst_list_it)->isSquashed()); 1360 1361 if ((*inst_list_it)->isMemRef()) { 1362 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone); 1363 } 1364 1365 cprintf("\n"); 1366 1367 inst_list_it++; 1368 ++num; 1369 } 1370 } 1371 1372 cprintf("Insts to Execute list:\n"); 1373 1374 int num = 0; 1375 int valid_num = 0; 1376 ListIt inst_list_it = instsToExecute.begin(); 1377 1378 while (inst_list_it != instsToExecute.end()) 1379 { 1380 cprintf("Instruction:%i\n", 1381 num); 1382 if (!(*inst_list_it)->isSquashed()) { 1383 if (!(*inst_list_it)->isIssued()) { 1384 ++valid_num; 1385 cprintf("Count:%i\n", valid_num); 1386 } else if ((*inst_list_it)->isMemRef() && 1387 !(*inst_list_it)->memOpDone) { 1388 // Loads that have not been marked as executed 1389 // still count towards the total instructions. 1390 ++valid_num; 1391 cprintf("Count:%i\n", valid_num); 1392 } 1393 } 1394 1395 cprintf("PC:%#x\n[sn:%lli]\n[tid:%i]\n" 1396 "Issued:%i\nSquashed:%i\n", 1397 (*inst_list_it)->readPC(), 1398 (*inst_list_it)->seqNum, 1399 (*inst_list_it)->threadNumber, 1400 (*inst_list_it)->isIssued(), 1401 (*inst_list_it)->isSquashed()); 1402 1403 if ((*inst_list_it)->isMemRef()) { 1404 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone); 1405 } 1406 1407 cprintf("\n"); 1408 1409 inst_list_it++; 1410 ++num; 1411 } 1412} 1413