inst_queue_impl.hh revision 11365
1/* 2 * Copyright (c) 2011-2014 ARM Limited 3 * Copyright (c) 2013 Advanced Micro Devices, Inc. 4 * All rights reserved. 5 * 6 * The license below extends only to copyright in the software and shall 7 * not be construed as granting a license to any other intellectual 8 * property including but not limited to intellectual property relating 9 * to a hardware implementation of the functionality of the software 10 * licensed hereunder. You may use the software subject to the license 11 * terms below provided that you ensure that this notice is replicated 12 * unmodified and in its entirety in all distributions of the software, 13 * modified or unmodified, in source code or in binary form. 14 * 15 * Copyright (c) 2004-2006 The Regents of The University of Michigan 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Kevin Lim 42 * Korey Sewell 43 */ 44 45#ifndef __CPU_O3_INST_QUEUE_IMPL_HH__ 46#define __CPU_O3_INST_QUEUE_IMPL_HH__ 47 48#include <limits> 49#include <vector> 50 51#include "cpu/o3/fu_pool.hh" 52#include "cpu/o3/inst_queue.hh" 53#include "debug/IQ.hh" 54#include "enums/OpClass.hh" 55#include "params/DerivO3CPU.hh" 56#include "sim/core.hh" 57 58// clang complains about std::set being overloaded with Packet::set if 59// we open up the entire namespace std 60using std::list; 61 62template <class Impl> 63InstructionQueue<Impl>::FUCompletion::FUCompletion(DynInstPtr &_inst, 64 int fu_idx, InstructionQueue<Impl> *iq_ptr) 65 : Event(Stat_Event_Pri, AutoDelete), 66 inst(_inst), fuIdx(fu_idx), iqPtr(iq_ptr), freeFU(false) 67{ 68} 69 70template <class Impl> 71void 72InstructionQueue<Impl>::FUCompletion::process() 73{ 74 iqPtr->processFUCompletion(inst, freeFU ? fuIdx : -1); 75 inst = NULL; 76} 77 78 79template <class Impl> 80const char * 81InstructionQueue<Impl>::FUCompletion::description() const 82{ 83 return "Functional unit completion"; 84} 85 86template <class Impl> 87InstructionQueue<Impl>::InstructionQueue(O3CPU *cpu_ptr, IEW *iew_ptr, 88 DerivO3CPUParams *params) 89 : cpu(cpu_ptr), 90 iewStage(iew_ptr), 91 fuPool(params->fuPool), 92 numEntries(params->numIQEntries), 93 totalWidth(params->issueWidth), 94 commitToIEWDelay(params->commitToIEWDelay) 95{ 96 assert(fuPool); 97 98 numThreads = params->numThreads; 99 100 // Set the number of total physical registers 101 numPhysRegs = params->numPhysIntRegs + params->numPhysFloatRegs + 102 params->numPhysCCRegs; 103 104 //Create an entry for each physical register within the 105 //dependency graph. 106 dependGraph.resize(numPhysRegs); 107 108 // Resize the register scoreboard. 109 regScoreboard.resize(numPhysRegs); 110 111 //Initialize Mem Dependence Units 112 for (ThreadID tid = 0; tid < numThreads; tid++) { 113 memDepUnit[tid].init(params, tid); 114 memDepUnit[tid].setIQ(this); 115 } 116 117 resetState(); 118 119 std::string policy = params->smtIQPolicy; 120 121 //Convert string to lowercase 122 std::transform(policy.begin(), policy.end(), policy.begin(), 123 (int(*)(int)) tolower); 124 125 //Figure out resource sharing policy 126 if (policy == "dynamic") { 127 iqPolicy = Dynamic; 128 129 //Set Max Entries to Total ROB Capacity 130 for (ThreadID tid = 0; tid < numThreads; tid++) { 131 maxEntries[tid] = numEntries; 132 } 133 134 } else if (policy == "partitioned") { 135 iqPolicy = Partitioned; 136 137 //@todo:make work if part_amt doesnt divide evenly. 138 int part_amt = numEntries / numThreads; 139 140 //Divide ROB up evenly 141 for (ThreadID tid = 0; tid < numThreads; tid++) { 142 maxEntries[tid] = part_amt; 143 } 144 145 DPRINTF(IQ, "IQ sharing policy set to Partitioned:" 146 "%i entries per thread.\n",part_amt); 147 } else if (policy == "threshold") { 148 iqPolicy = Threshold; 149 150 double threshold = (double)params->smtIQThreshold / 100; 151 152 int thresholdIQ = (int)((double)threshold * numEntries); 153 154 //Divide up by threshold amount 155 for (ThreadID tid = 0; tid < numThreads; tid++) { 156 maxEntries[tid] = thresholdIQ; 157 } 158 159 DPRINTF(IQ, "IQ sharing policy set to Threshold:" 160 "%i entries per thread.\n",thresholdIQ); 161 } else { 162 assert(0 && "Invalid IQ Sharing Policy.Options Are:{Dynamic," 163 "Partitioned, Threshold}"); 164 } 165} 166 167template <class Impl> 168InstructionQueue<Impl>::~InstructionQueue() 169{ 170 dependGraph.reset(); 171#ifdef DEBUG 172 cprintf("Nodes traversed: %i, removed: %i\n", 173 dependGraph.nodesTraversed, dependGraph.nodesRemoved); 174#endif 175} 176 177template <class Impl> 178std::string 179InstructionQueue<Impl>::name() const 180{ 181 return cpu->name() + ".iq"; 182} 183 184template <class Impl> 185void 186InstructionQueue<Impl>::regStats() 187{ 188 using namespace Stats; 189 iqInstsAdded 190 .name(name() + ".iqInstsAdded") 191 .desc("Number of instructions added to the IQ (excludes non-spec)") 192 .prereq(iqInstsAdded); 193 194 iqNonSpecInstsAdded 195 .name(name() + ".iqNonSpecInstsAdded") 196 .desc("Number of non-speculative instructions added to the IQ") 197 .prereq(iqNonSpecInstsAdded); 198 199 iqInstsIssued 200 .name(name() + ".iqInstsIssued") 201 .desc("Number of instructions issued") 202 .prereq(iqInstsIssued); 203 204 iqIntInstsIssued 205 .name(name() + ".iqIntInstsIssued") 206 .desc("Number of integer instructions issued") 207 .prereq(iqIntInstsIssued); 208 209 iqFloatInstsIssued 210 .name(name() + ".iqFloatInstsIssued") 211 .desc("Number of float instructions issued") 212 .prereq(iqFloatInstsIssued); 213 214 iqBranchInstsIssued 215 .name(name() + ".iqBranchInstsIssued") 216 .desc("Number of branch instructions issued") 217 .prereq(iqBranchInstsIssued); 218 219 iqMemInstsIssued 220 .name(name() + ".iqMemInstsIssued") 221 .desc("Number of memory instructions issued") 222 .prereq(iqMemInstsIssued); 223 224 iqMiscInstsIssued 225 .name(name() + ".iqMiscInstsIssued") 226 .desc("Number of miscellaneous instructions issued") 227 .prereq(iqMiscInstsIssued); 228 229 iqSquashedInstsIssued 230 .name(name() + ".iqSquashedInstsIssued") 231 .desc("Number of squashed instructions issued") 232 .prereq(iqSquashedInstsIssued); 233 234 iqSquashedInstsExamined 235 .name(name() + ".iqSquashedInstsExamined") 236 .desc("Number of squashed instructions iterated over during squash;" 237 " mainly for profiling") 238 .prereq(iqSquashedInstsExamined); 239 240 iqSquashedOperandsExamined 241 .name(name() + ".iqSquashedOperandsExamined") 242 .desc("Number of squashed operands that are examined and possibly " 243 "removed from graph") 244 .prereq(iqSquashedOperandsExamined); 245 246 iqSquashedNonSpecRemoved 247 .name(name() + ".iqSquashedNonSpecRemoved") 248 .desc("Number of squashed non-spec instructions that were removed") 249 .prereq(iqSquashedNonSpecRemoved); 250/* 251 queueResDist 252 .init(Num_OpClasses, 0, 99, 2) 253 .name(name() + ".IQ:residence:") 254 .desc("cycles from dispatch to issue") 255 .flags(total | pdf | cdf ) 256 ; 257 for (int i = 0; i < Num_OpClasses; ++i) { 258 queueResDist.subname(i, opClassStrings[i]); 259 } 260*/ 261 numIssuedDist 262 .init(0,totalWidth,1) 263 .name(name() + ".issued_per_cycle") 264 .desc("Number of insts issued each cycle") 265 .flags(pdf) 266 ; 267/* 268 dist_unissued 269 .init(Num_OpClasses+2) 270 .name(name() + ".unissued_cause") 271 .desc("Reason ready instruction not issued") 272 .flags(pdf | dist) 273 ; 274 for (int i=0; i < (Num_OpClasses + 2); ++i) { 275 dist_unissued.subname(i, unissued_names[i]); 276 } 277*/ 278 statIssuedInstType 279 .init(numThreads,Enums::Num_OpClass) 280 .name(name() + ".FU_type") 281 .desc("Type of FU issued") 282 .flags(total | pdf | dist) 283 ; 284 statIssuedInstType.ysubnames(Enums::OpClassStrings); 285 286 // 287 // How long did instructions for a particular FU type wait prior to issue 288 // 289/* 290 issueDelayDist 291 .init(Num_OpClasses,0,99,2) 292 .name(name() + ".") 293 .desc("cycles from operands ready to issue") 294 .flags(pdf | cdf) 295 ; 296 297 for (int i=0; i<Num_OpClasses; ++i) { 298 std::stringstream subname; 299 subname << opClassStrings[i] << "_delay"; 300 issueDelayDist.subname(i, subname.str()); 301 } 302*/ 303 issueRate 304 .name(name() + ".rate") 305 .desc("Inst issue rate") 306 .flags(total) 307 ; 308 issueRate = iqInstsIssued / cpu->numCycles; 309 310 statFuBusy 311 .init(Num_OpClasses) 312 .name(name() + ".fu_full") 313 .desc("attempts to use FU when none available") 314 .flags(pdf | dist) 315 ; 316 for (int i=0; i < Num_OpClasses; ++i) { 317 statFuBusy.subname(i, Enums::OpClassStrings[i]); 318 } 319 320 fuBusy 321 .init(numThreads) 322 .name(name() + ".fu_busy_cnt") 323 .desc("FU busy when requested") 324 .flags(total) 325 ; 326 327 fuBusyRate 328 .name(name() + ".fu_busy_rate") 329 .desc("FU busy rate (busy events/executed inst)") 330 .flags(total) 331 ; 332 fuBusyRate = fuBusy / iqInstsIssued; 333 334 for (ThreadID tid = 0; tid < numThreads; tid++) { 335 // Tell mem dependence unit to reg stats as well. 336 memDepUnit[tid].regStats(); 337 } 338 339 intInstQueueReads 340 .name(name() + ".int_inst_queue_reads") 341 .desc("Number of integer instruction queue reads") 342 .flags(total); 343 344 intInstQueueWrites 345 .name(name() + ".int_inst_queue_writes") 346 .desc("Number of integer instruction queue writes") 347 .flags(total); 348 349 intInstQueueWakeupAccesses 350 .name(name() + ".int_inst_queue_wakeup_accesses") 351 .desc("Number of integer instruction queue wakeup accesses") 352 .flags(total); 353 354 fpInstQueueReads 355 .name(name() + ".fp_inst_queue_reads") 356 .desc("Number of floating instruction queue reads") 357 .flags(total); 358 359 fpInstQueueWrites 360 .name(name() + ".fp_inst_queue_writes") 361 .desc("Number of floating instruction queue writes") 362 .flags(total); 363 364 fpInstQueueWakeupQccesses 365 .name(name() + ".fp_inst_queue_wakeup_accesses") 366 .desc("Number of floating instruction queue wakeup accesses") 367 .flags(total); 368 369 intAluAccesses 370 .name(name() + ".int_alu_accesses") 371 .desc("Number of integer alu accesses") 372 .flags(total); 373 374 fpAluAccesses 375 .name(name() + ".fp_alu_accesses") 376 .desc("Number of floating point alu accesses") 377 .flags(total); 378 379} 380 381template <class Impl> 382void 383InstructionQueue<Impl>::resetState() 384{ 385 //Initialize thread IQ counts 386 for (ThreadID tid = 0; tid <numThreads; tid++) { 387 count[tid] = 0; 388 instList[tid].clear(); 389 } 390 391 // Initialize the number of free IQ entries. 392 freeEntries = numEntries; 393 394 // Note that in actuality, the registers corresponding to the logical 395 // registers start off as ready. However this doesn't matter for the 396 // IQ as the instruction should have been correctly told if those 397 // registers are ready in rename. Thus it can all be initialized as 398 // unready. 399 for (int i = 0; i < numPhysRegs; ++i) { 400 regScoreboard[i] = false; 401 } 402 403 for (ThreadID tid = 0; tid < numThreads; ++tid) { 404 squashedSeqNum[tid] = 0; 405 } 406 407 for (int i = 0; i < Num_OpClasses; ++i) { 408 while (!readyInsts[i].empty()) 409 readyInsts[i].pop(); 410 queueOnList[i] = false; 411 readyIt[i] = listOrder.end(); 412 } 413 nonSpecInsts.clear(); 414 listOrder.clear(); 415 deferredMemInsts.clear(); 416 blockedMemInsts.clear(); 417 retryMemInsts.clear(); 418 wbOutstanding = 0; 419} 420 421template <class Impl> 422void 423InstructionQueue<Impl>::setActiveThreads(list<ThreadID> *at_ptr) 424{ 425 activeThreads = at_ptr; 426} 427 428template <class Impl> 429void 430InstructionQueue<Impl>::setIssueToExecuteQueue(TimeBuffer<IssueStruct> *i2e_ptr) 431{ 432 issueToExecuteQueue = i2e_ptr; 433} 434 435template <class Impl> 436void 437InstructionQueue<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr) 438{ 439 timeBuffer = tb_ptr; 440 441 fromCommit = timeBuffer->getWire(-commitToIEWDelay); 442} 443 444template <class Impl> 445bool 446InstructionQueue<Impl>::isDrained() const 447{ 448 bool drained = dependGraph.empty() && 449 instsToExecute.empty() && 450 wbOutstanding == 0; 451 for (ThreadID tid = 0; tid < numThreads; ++tid) 452 drained = drained && memDepUnit[tid].isDrained(); 453 454 return drained; 455} 456 457template <class Impl> 458void 459InstructionQueue<Impl>::drainSanityCheck() const 460{ 461 assert(dependGraph.empty()); 462 assert(instsToExecute.empty()); 463 for (ThreadID tid = 0; tid < numThreads; ++tid) 464 memDepUnit[tid].drainSanityCheck(); 465} 466 467template <class Impl> 468void 469InstructionQueue<Impl>::takeOverFrom() 470{ 471 resetState(); 472} 473 474template <class Impl> 475int 476InstructionQueue<Impl>::entryAmount(ThreadID num_threads) 477{ 478 if (iqPolicy == Partitioned) { 479 return numEntries / num_threads; 480 } else { 481 return 0; 482 } 483} 484 485 486template <class Impl> 487void 488InstructionQueue<Impl>::resetEntries() 489{ 490 if (iqPolicy != Dynamic || numThreads > 1) { 491 int active_threads = activeThreads->size(); 492 493 list<ThreadID>::iterator threads = activeThreads->begin(); 494 list<ThreadID>::iterator end = activeThreads->end(); 495 496 while (threads != end) { 497 ThreadID tid = *threads++; 498 499 if (iqPolicy == Partitioned) { 500 maxEntries[tid] = numEntries / active_threads; 501 } else if (iqPolicy == Threshold && active_threads == 1) { 502 maxEntries[tid] = numEntries; 503 } 504 } 505 } 506} 507 508template <class Impl> 509unsigned 510InstructionQueue<Impl>::numFreeEntries() 511{ 512 return freeEntries; 513} 514 515template <class Impl> 516unsigned 517InstructionQueue<Impl>::numFreeEntries(ThreadID tid) 518{ 519 return maxEntries[tid] - count[tid]; 520} 521 522// Might want to do something more complex if it knows how many instructions 523// will be issued this cycle. 524template <class Impl> 525bool 526InstructionQueue<Impl>::isFull() 527{ 528 if (freeEntries == 0) { 529 return(true); 530 } else { 531 return(false); 532 } 533} 534 535template <class Impl> 536bool 537InstructionQueue<Impl>::isFull(ThreadID tid) 538{ 539 if (numFreeEntries(tid) == 0) { 540 return(true); 541 } else { 542 return(false); 543 } 544} 545 546template <class Impl> 547bool 548InstructionQueue<Impl>::hasReadyInsts() 549{ 550 if (!listOrder.empty()) { 551 return true; 552 } 553 554 for (int i = 0; i < Num_OpClasses; ++i) { 555 if (!readyInsts[i].empty()) { 556 return true; 557 } 558 } 559 560 return false; 561} 562 563template <class Impl> 564void 565InstructionQueue<Impl>::insert(DynInstPtr &new_inst) 566{ 567 new_inst->isFloating() ? fpInstQueueWrites++ : intInstQueueWrites++; 568 // Make sure the instruction is valid 569 assert(new_inst); 570 571 DPRINTF(IQ, "Adding instruction [sn:%lli] PC %s to the IQ.\n", 572 new_inst->seqNum, new_inst->pcState()); 573 574 assert(freeEntries != 0); 575 576 instList[new_inst->threadNumber].push_back(new_inst); 577 578 --freeEntries; 579 580 new_inst->setInIQ(); 581 582 // Look through its source registers (physical regs), and mark any 583 // dependencies. 584 addToDependents(new_inst); 585 586 // Have this instruction set itself as the producer of its destination 587 // register(s). 588 addToProducers(new_inst); 589 590 if (new_inst->isMemRef()) { 591 memDepUnit[new_inst->threadNumber].insert(new_inst); 592 } else { 593 addIfReady(new_inst); 594 } 595 596 ++iqInstsAdded; 597 598 count[new_inst->threadNumber]++; 599 600 assert(freeEntries == (numEntries - countInsts())); 601} 602 603template <class Impl> 604void 605InstructionQueue<Impl>::insertNonSpec(DynInstPtr &new_inst) 606{ 607 // @todo: Clean up this code; can do it by setting inst as unable 608 // to issue, then calling normal insert on the inst. 609 new_inst->isFloating() ? fpInstQueueWrites++ : intInstQueueWrites++; 610 611 assert(new_inst); 612 613 nonSpecInsts[new_inst->seqNum] = new_inst; 614 615 DPRINTF(IQ, "Adding non-speculative instruction [sn:%lli] PC %s " 616 "to the IQ.\n", 617 new_inst->seqNum, new_inst->pcState()); 618 619 assert(freeEntries != 0); 620 621 instList[new_inst->threadNumber].push_back(new_inst); 622 623 --freeEntries; 624 625 new_inst->setInIQ(); 626 627 // Have this instruction set itself as the producer of its destination 628 // register(s). 629 addToProducers(new_inst); 630 631 // If it's a memory instruction, add it to the memory dependency 632 // unit. 633 if (new_inst->isMemRef()) { 634 memDepUnit[new_inst->threadNumber].insertNonSpec(new_inst); 635 } 636 637 ++iqNonSpecInstsAdded; 638 639 count[new_inst->threadNumber]++; 640 641 assert(freeEntries == (numEntries - countInsts())); 642} 643 644template <class Impl> 645void 646InstructionQueue<Impl>::insertBarrier(DynInstPtr &barr_inst) 647{ 648 memDepUnit[barr_inst->threadNumber].insertBarrier(barr_inst); 649 650 insertNonSpec(barr_inst); 651} 652 653template <class Impl> 654typename Impl::DynInstPtr 655InstructionQueue<Impl>::getInstToExecute() 656{ 657 assert(!instsToExecute.empty()); 658 DynInstPtr inst = instsToExecute.front(); 659 instsToExecute.pop_front(); 660 if (inst->isFloating()){ 661 fpInstQueueReads++; 662 } else { 663 intInstQueueReads++; 664 } 665 return inst; 666} 667 668template <class Impl> 669void 670InstructionQueue<Impl>::addToOrderList(OpClass op_class) 671{ 672 assert(!readyInsts[op_class].empty()); 673 674 ListOrderEntry queue_entry; 675 676 queue_entry.queueType = op_class; 677 678 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum; 679 680 ListOrderIt list_it = listOrder.begin(); 681 ListOrderIt list_end_it = listOrder.end(); 682 683 while (list_it != list_end_it) { 684 if ((*list_it).oldestInst > queue_entry.oldestInst) { 685 break; 686 } 687 688 list_it++; 689 } 690 691 readyIt[op_class] = listOrder.insert(list_it, queue_entry); 692 queueOnList[op_class] = true; 693} 694 695template <class Impl> 696void 697InstructionQueue<Impl>::moveToYoungerInst(ListOrderIt list_order_it) 698{ 699 // Get iterator of next item on the list 700 // Delete the original iterator 701 // Determine if the next item is either the end of the list or younger 702 // than the new instruction. If so, then add in a new iterator right here. 703 // If not, then move along. 704 ListOrderEntry queue_entry; 705 OpClass op_class = (*list_order_it).queueType; 706 ListOrderIt next_it = list_order_it; 707 708 ++next_it; 709 710 queue_entry.queueType = op_class; 711 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum; 712 713 while (next_it != listOrder.end() && 714 (*next_it).oldestInst < queue_entry.oldestInst) { 715 ++next_it; 716 } 717 718 readyIt[op_class] = listOrder.insert(next_it, queue_entry); 719} 720 721template <class Impl> 722void 723InstructionQueue<Impl>::processFUCompletion(DynInstPtr &inst, int fu_idx) 724{ 725 DPRINTF(IQ, "Processing FU completion [sn:%lli]\n", inst->seqNum); 726 assert(!cpu->switchedOut()); 727 // The CPU could have been sleeping until this op completed (*extremely* 728 // long latency op). Wake it if it was. This may be overkill. 729 --wbOutstanding; 730 iewStage->wakeCPU(); 731 732 if (fu_idx > -1) 733 fuPool->freeUnitNextCycle(fu_idx); 734 735 // @todo: Ensure that these FU Completions happen at the beginning 736 // of a cycle, otherwise they could add too many instructions to 737 // the queue. 738 issueToExecuteQueue->access(-1)->size++; 739 instsToExecute.push_back(inst); 740} 741 742// @todo: Figure out a better way to remove the squashed items from the 743// lists. Checking the top item of each list to see if it's squashed 744// wastes time and forces jumps. 745template <class Impl> 746void 747InstructionQueue<Impl>::scheduleReadyInsts() 748{ 749 DPRINTF(IQ, "Attempting to schedule ready instructions from " 750 "the IQ.\n"); 751 752 IssueStruct *i2e_info = issueToExecuteQueue->access(0); 753 754 DynInstPtr mem_inst; 755 while (mem_inst = getDeferredMemInstToExecute()) { 756 addReadyMemInst(mem_inst); 757 } 758 759 // See if any cache blocked instructions are able to be executed 760 while (mem_inst = getBlockedMemInstToExecute()) { 761 addReadyMemInst(mem_inst); 762 } 763 764 // Have iterator to head of the list 765 // While I haven't exceeded bandwidth or reached the end of the list, 766 // Try to get a FU that can do what this op needs. 767 // If successful, change the oldestInst to the new top of the list, put 768 // the queue in the proper place in the list. 769 // Increment the iterator. 770 // This will avoid trying to schedule a certain op class if there are no 771 // FUs that handle it. 772 int total_issued = 0; 773 ListOrderIt order_it = listOrder.begin(); 774 ListOrderIt order_end_it = listOrder.end(); 775 776 while (total_issued < totalWidth && order_it != order_end_it) { 777 OpClass op_class = (*order_it).queueType; 778 779 assert(!readyInsts[op_class].empty()); 780 781 DynInstPtr issuing_inst = readyInsts[op_class].top(); 782 783 issuing_inst->isFloating() ? fpInstQueueReads++ : intInstQueueReads++; 784 785 assert(issuing_inst->seqNum == (*order_it).oldestInst); 786 787 if (issuing_inst->isSquashed()) { 788 readyInsts[op_class].pop(); 789 790 if (!readyInsts[op_class].empty()) { 791 moveToYoungerInst(order_it); 792 } else { 793 readyIt[op_class] = listOrder.end(); 794 queueOnList[op_class] = false; 795 } 796 797 listOrder.erase(order_it++); 798 799 ++iqSquashedInstsIssued; 800 801 continue; 802 } 803 804 int idx = FUPool::NoCapableFU; 805 Cycles op_latency = Cycles(1); 806 ThreadID tid = issuing_inst->threadNumber; 807 808 if (op_class != No_OpClass) { 809 idx = fuPool->getUnit(op_class); 810 issuing_inst->isFloating() ? fpAluAccesses++ : intAluAccesses++; 811 if (idx > FUPool::NoFreeFU) { 812 op_latency = fuPool->getOpLatency(op_class); 813 } 814 } 815 816 // If we have an instruction that doesn't require a FU, or a 817 // valid FU, then schedule for execution. 818 if (idx != FUPool::NoFreeFU) { 819 if (op_latency == Cycles(1)) { 820 i2e_info->size++; 821 instsToExecute.push_back(issuing_inst); 822 823 // Add the FU onto the list of FU's to be freed next 824 // cycle if we used one. 825 if (idx >= 0) 826 fuPool->freeUnitNextCycle(idx); 827 } else { 828 bool pipelined = fuPool->isPipelined(op_class); 829 // Generate completion event for the FU 830 ++wbOutstanding; 831 FUCompletion *execution = new FUCompletion(issuing_inst, 832 idx, this); 833 834 cpu->schedule(execution, 835 cpu->clockEdge(Cycles(op_latency - 1))); 836 837 if (!pipelined) { 838 // If FU isn't pipelined, then it must be freed 839 // upon the execution completing. 840 execution->setFreeFU(); 841 } else { 842 // Add the FU onto the list of FU's to be freed next cycle. 843 fuPool->freeUnitNextCycle(idx); 844 } 845 } 846 847 DPRINTF(IQ, "Thread %i: Issuing instruction PC %s " 848 "[sn:%lli]\n", 849 tid, issuing_inst->pcState(), 850 issuing_inst->seqNum); 851 852 readyInsts[op_class].pop(); 853 854 if (!readyInsts[op_class].empty()) { 855 moveToYoungerInst(order_it); 856 } else { 857 readyIt[op_class] = listOrder.end(); 858 queueOnList[op_class] = false; 859 } 860 861 issuing_inst->setIssued(); 862 ++total_issued; 863 864#if TRACING_ON 865 issuing_inst->issueTick = curTick() - issuing_inst->fetchTick; 866#endif 867 868 if (!issuing_inst->isMemRef()) { 869 // Memory instructions can not be freed from the IQ until they 870 // complete. 871 ++freeEntries; 872 count[tid]--; 873 issuing_inst->clearInIQ(); 874 } else { 875 memDepUnit[tid].issue(issuing_inst); 876 } 877 878 listOrder.erase(order_it++); 879 statIssuedInstType[tid][op_class]++; 880 } else { 881 statFuBusy[op_class]++; 882 fuBusy[tid]++; 883 ++order_it; 884 } 885 } 886 887 numIssuedDist.sample(total_issued); 888 iqInstsIssued+= total_issued; 889 890 // If we issued any instructions, tell the CPU we had activity. 891 // @todo If the way deferred memory instructions are handeled due to 892 // translation changes then the deferredMemInsts condition should be removed 893 // from the code below. 894 if (total_issued || !retryMemInsts.empty() || !deferredMemInsts.empty()) { 895 cpu->activityThisCycle(); 896 } else { 897 DPRINTF(IQ, "Not able to schedule any instructions.\n"); 898 } 899} 900 901template <class Impl> 902void 903InstructionQueue<Impl>::scheduleNonSpec(const InstSeqNum &inst) 904{ 905 DPRINTF(IQ, "Marking nonspeculative instruction [sn:%lli] as ready " 906 "to execute.\n", inst); 907 908 NonSpecMapIt inst_it = nonSpecInsts.find(inst); 909 910 assert(inst_it != nonSpecInsts.end()); 911 912 ThreadID tid = (*inst_it).second->threadNumber; 913 914 (*inst_it).second->setAtCommit(); 915 916 (*inst_it).second->setCanIssue(); 917 918 if (!(*inst_it).second->isMemRef()) { 919 addIfReady((*inst_it).second); 920 } else { 921 memDepUnit[tid].nonSpecInstReady((*inst_it).second); 922 } 923 924 (*inst_it).second = NULL; 925 926 nonSpecInsts.erase(inst_it); 927} 928 929template <class Impl> 930void 931InstructionQueue<Impl>::commit(const InstSeqNum &inst, ThreadID tid) 932{ 933 DPRINTF(IQ, "[tid:%i]: Committing instructions older than [sn:%i]\n", 934 tid,inst); 935 936 ListIt iq_it = instList[tid].begin(); 937 938 while (iq_it != instList[tid].end() && 939 (*iq_it)->seqNum <= inst) { 940 ++iq_it; 941 instList[tid].pop_front(); 942 } 943 944 assert(freeEntries == (numEntries - countInsts())); 945} 946 947template <class Impl> 948int 949InstructionQueue<Impl>::wakeDependents(DynInstPtr &completed_inst) 950{ 951 int dependents = 0; 952 953 // The instruction queue here takes care of both floating and int ops 954 if (completed_inst->isFloating()) { 955 fpInstQueueWakeupQccesses++; 956 } else { 957 intInstQueueWakeupAccesses++; 958 } 959 960 DPRINTF(IQ, "Waking dependents of completed instruction.\n"); 961 962 assert(!completed_inst->isSquashed()); 963 964 // Tell the memory dependence unit to wake any dependents on this 965 // instruction if it is a memory instruction. Also complete the memory 966 // instruction at this point since we know it executed without issues. 967 // @todo: Might want to rename "completeMemInst" to something that 968 // indicates that it won't need to be replayed, and call this 969 // earlier. Might not be a big deal. 970 if (completed_inst->isMemRef()) { 971 memDepUnit[completed_inst->threadNumber].wakeDependents(completed_inst); 972 completeMemInst(completed_inst); 973 } else if (completed_inst->isMemBarrier() || 974 completed_inst->isWriteBarrier()) { 975 memDepUnit[completed_inst->threadNumber].completeBarrier(completed_inst); 976 } 977 978 for (int dest_reg_idx = 0; 979 dest_reg_idx < completed_inst->numDestRegs(); 980 dest_reg_idx++) 981 { 982 PhysRegIndex dest_reg = 983 completed_inst->renamedDestRegIdx(dest_reg_idx); 984 985 // Special case of uniq or control registers. They are not 986 // handled by the IQ and thus have no dependency graph entry. 987 // @todo Figure out a cleaner way to handle this. 988 if (dest_reg >= numPhysRegs) { 989 DPRINTF(IQ, "dest_reg :%d, numPhysRegs: %d\n", dest_reg, 990 numPhysRegs); 991 continue; 992 } 993 994 DPRINTF(IQ, "Waking any dependents on register %i.\n", 995 (int) dest_reg); 996 997 //Go through the dependency chain, marking the registers as 998 //ready within the waiting instructions. 999 DynInstPtr dep_inst = dependGraph.pop(dest_reg); 1000 1001 while (dep_inst) { 1002 DPRINTF(IQ, "Waking up a dependent instruction, [sn:%lli] " 1003 "PC %s.\n", dep_inst->seqNum, dep_inst->pcState()); 1004 1005 // Might want to give more information to the instruction 1006 // so that it knows which of its source registers is 1007 // ready. However that would mean that the dependency 1008 // graph entries would need to hold the src_reg_idx. 1009 dep_inst->markSrcRegReady(); 1010 1011 addIfReady(dep_inst); 1012 1013 dep_inst = dependGraph.pop(dest_reg); 1014 1015 ++dependents; 1016 } 1017 1018 // Reset the head node now that all of its dependents have 1019 // been woken up. 1020 assert(dependGraph.empty(dest_reg)); 1021 dependGraph.clearInst(dest_reg); 1022 1023 // Mark the scoreboard as having that register ready. 1024 regScoreboard[dest_reg] = true; 1025 } 1026 return dependents; 1027} 1028 1029template <class Impl> 1030void 1031InstructionQueue<Impl>::addReadyMemInst(DynInstPtr &ready_inst) 1032{ 1033 OpClass op_class = ready_inst->opClass(); 1034 1035 readyInsts[op_class].push(ready_inst); 1036 1037 // Will need to reorder the list if either a queue is not on the list, 1038 // or it has an older instruction than last time. 1039 if (!queueOnList[op_class]) { 1040 addToOrderList(op_class); 1041 } else if (readyInsts[op_class].top()->seqNum < 1042 (*readyIt[op_class]).oldestInst) { 1043 listOrder.erase(readyIt[op_class]); 1044 addToOrderList(op_class); 1045 } 1046 1047 DPRINTF(IQ, "Instruction is ready to issue, putting it onto " 1048 "the ready list, PC %s opclass:%i [sn:%lli].\n", 1049 ready_inst->pcState(), op_class, ready_inst->seqNum); 1050} 1051 1052template <class Impl> 1053void 1054InstructionQueue<Impl>::rescheduleMemInst(DynInstPtr &resched_inst) 1055{ 1056 DPRINTF(IQ, "Rescheduling mem inst [sn:%lli]\n", resched_inst->seqNum); 1057 1058 // Reset DTB translation state 1059 resched_inst->translationStarted(false); 1060 resched_inst->translationCompleted(false); 1061 1062 resched_inst->clearCanIssue(); 1063 memDepUnit[resched_inst->threadNumber].reschedule(resched_inst); 1064} 1065 1066template <class Impl> 1067void 1068InstructionQueue<Impl>::replayMemInst(DynInstPtr &replay_inst) 1069{ 1070 memDepUnit[replay_inst->threadNumber].replay(); 1071} 1072 1073template <class Impl> 1074void 1075InstructionQueue<Impl>::completeMemInst(DynInstPtr &completed_inst) 1076{ 1077 ThreadID tid = completed_inst->threadNumber; 1078 1079 DPRINTF(IQ, "Completing mem instruction PC: %s [sn:%lli]\n", 1080 completed_inst->pcState(), completed_inst->seqNum); 1081 1082 ++freeEntries; 1083 1084 completed_inst->memOpDone(true); 1085 1086 memDepUnit[tid].completed(completed_inst); 1087 count[tid]--; 1088} 1089 1090template <class Impl> 1091void 1092InstructionQueue<Impl>::deferMemInst(DynInstPtr &deferred_inst) 1093{ 1094 deferredMemInsts.push_back(deferred_inst); 1095} 1096 1097template <class Impl> 1098void 1099InstructionQueue<Impl>::blockMemInst(DynInstPtr &blocked_inst) 1100{ 1101 blocked_inst->translationStarted(false); 1102 blocked_inst->translationCompleted(false); 1103 1104 blocked_inst->clearIssued(); 1105 blocked_inst->clearCanIssue(); 1106 blockedMemInsts.push_back(blocked_inst); 1107} 1108 1109template <class Impl> 1110void 1111InstructionQueue<Impl>::cacheUnblocked() 1112{ 1113 retryMemInsts.splice(retryMemInsts.end(), blockedMemInsts); 1114 // Get the CPU ticking again 1115 cpu->wakeCPU(); 1116} 1117 1118template <class Impl> 1119typename Impl::DynInstPtr 1120InstructionQueue<Impl>::getDeferredMemInstToExecute() 1121{ 1122 for (ListIt it = deferredMemInsts.begin(); it != deferredMemInsts.end(); 1123 ++it) { 1124 if ((*it)->translationCompleted() || (*it)->isSquashed()) { 1125 DynInstPtr mem_inst = *it; 1126 deferredMemInsts.erase(it); 1127 return mem_inst; 1128 } 1129 } 1130 return nullptr; 1131} 1132 1133template <class Impl> 1134typename Impl::DynInstPtr 1135InstructionQueue<Impl>::getBlockedMemInstToExecute() 1136{ 1137 if (retryMemInsts.empty()) { 1138 return nullptr; 1139 } else { 1140 DynInstPtr mem_inst = retryMemInsts.front(); 1141 retryMemInsts.pop_front(); 1142 return mem_inst; 1143 } 1144} 1145 1146template <class Impl> 1147void 1148InstructionQueue<Impl>::violation(DynInstPtr &store, 1149 DynInstPtr &faulting_load) 1150{ 1151 intInstQueueWrites++; 1152 memDepUnit[store->threadNumber].violation(store, faulting_load); 1153} 1154 1155template <class Impl> 1156void 1157InstructionQueue<Impl>::squash(ThreadID tid) 1158{ 1159 DPRINTF(IQ, "[tid:%i]: Starting to squash instructions in " 1160 "the IQ.\n", tid); 1161 1162 // Read instruction sequence number of last instruction out of the 1163 // time buffer. 1164 squashedSeqNum[tid] = fromCommit->commitInfo[tid].doneSeqNum; 1165 1166 doSquash(tid); 1167 1168 // Also tell the memory dependence unit to squash. 1169 memDepUnit[tid].squash(squashedSeqNum[tid], tid); 1170} 1171 1172template <class Impl> 1173void 1174InstructionQueue<Impl>::doSquash(ThreadID tid) 1175{ 1176 // Start at the tail. 1177 ListIt squash_it = instList[tid].end(); 1178 --squash_it; 1179 1180 DPRINTF(IQ, "[tid:%i]: Squashing until sequence number %i!\n", 1181 tid, squashedSeqNum[tid]); 1182 1183 // Squash any instructions younger than the squashed sequence number 1184 // given. 1185 while (squash_it != instList[tid].end() && 1186 (*squash_it)->seqNum > squashedSeqNum[tid]) { 1187 1188 DynInstPtr squashed_inst = (*squash_it); 1189 squashed_inst->isFloating() ? fpInstQueueWrites++ : intInstQueueWrites++; 1190 1191 // Only handle the instruction if it actually is in the IQ and 1192 // hasn't already been squashed in the IQ. 1193 if (squashed_inst->threadNumber != tid || 1194 squashed_inst->isSquashedInIQ()) { 1195 --squash_it; 1196 continue; 1197 } 1198 1199 if (!squashed_inst->isIssued() || 1200 (squashed_inst->isMemRef() && 1201 !squashed_inst->memOpDone())) { 1202 1203 DPRINTF(IQ, "[tid:%i]: Instruction [sn:%lli] PC %s squashed.\n", 1204 tid, squashed_inst->seqNum, squashed_inst->pcState()); 1205 1206 bool is_acq_rel = squashed_inst->isMemBarrier() && 1207 (squashed_inst->isLoad() || 1208 (squashed_inst->isStore() && 1209 !squashed_inst->isStoreConditional())); 1210 1211 // Remove the instruction from the dependency list. 1212 if (is_acq_rel || 1213 (!squashed_inst->isNonSpeculative() && 1214 !squashed_inst->isStoreConditional() && 1215 !squashed_inst->isMemBarrier() && 1216 !squashed_inst->isWriteBarrier())) { 1217 1218 for (int src_reg_idx = 0; 1219 src_reg_idx < squashed_inst->numSrcRegs(); 1220 src_reg_idx++) 1221 { 1222 PhysRegIndex src_reg = 1223 squashed_inst->renamedSrcRegIdx(src_reg_idx); 1224 1225 // Only remove it from the dependency graph if it 1226 // was placed there in the first place. 1227 1228 // Instead of doing a linked list traversal, we 1229 // can just remove these squashed instructions 1230 // either at issue time, or when the register is 1231 // overwritten. The only downside to this is it 1232 // leaves more room for error. 1233 1234 if (!squashed_inst->isReadySrcRegIdx(src_reg_idx) && 1235 src_reg < numPhysRegs) { 1236 dependGraph.remove(src_reg, squashed_inst); 1237 } 1238 1239 1240 ++iqSquashedOperandsExamined; 1241 } 1242 } else if (!squashed_inst->isStoreConditional() || 1243 !squashed_inst->isCompleted()) { 1244 NonSpecMapIt ns_inst_it = 1245 nonSpecInsts.find(squashed_inst->seqNum); 1246 1247 // we remove non-speculative instructions from 1248 // nonSpecInsts already when they are ready, and so we 1249 // cannot always expect to find them 1250 if (ns_inst_it == nonSpecInsts.end()) { 1251 // loads that became ready but stalled on a 1252 // blocked cache are alreayd removed from 1253 // nonSpecInsts, and have not faulted 1254 assert(squashed_inst->getFault() != NoFault || 1255 squashed_inst->isMemRef()); 1256 } else { 1257 1258 (*ns_inst_it).second = NULL; 1259 1260 nonSpecInsts.erase(ns_inst_it); 1261 1262 ++iqSquashedNonSpecRemoved; 1263 } 1264 } 1265 1266 // Might want to also clear out the head of the dependency graph. 1267 1268 // Mark it as squashed within the IQ. 1269 squashed_inst->setSquashedInIQ(); 1270 1271 // @todo: Remove this hack where several statuses are set so the 1272 // inst will flow through the rest of the pipeline. 1273 squashed_inst->setIssued(); 1274 squashed_inst->setCanCommit(); 1275 squashed_inst->clearInIQ(); 1276 1277 //Update Thread IQ Count 1278 count[squashed_inst->threadNumber]--; 1279 1280 ++freeEntries; 1281 } 1282 1283 instList[tid].erase(squash_it--); 1284 ++iqSquashedInstsExamined; 1285 } 1286} 1287 1288template <class Impl> 1289bool 1290InstructionQueue<Impl>::addToDependents(DynInstPtr &new_inst) 1291{ 1292 // Loop through the instruction's source registers, adding 1293 // them to the dependency list if they are not ready. 1294 int8_t total_src_regs = new_inst->numSrcRegs(); 1295 bool return_val = false; 1296 1297 for (int src_reg_idx = 0; 1298 src_reg_idx < total_src_regs; 1299 src_reg_idx++) 1300 { 1301 // Only add it to the dependency graph if it's not ready. 1302 if (!new_inst->isReadySrcRegIdx(src_reg_idx)) { 1303 PhysRegIndex src_reg = new_inst->renamedSrcRegIdx(src_reg_idx); 1304 1305 // Check the IQ's scoreboard to make sure the register 1306 // hasn't become ready while the instruction was in flight 1307 // between stages. Only if it really isn't ready should 1308 // it be added to the dependency graph. 1309 if (src_reg >= numPhysRegs) { 1310 continue; 1311 } else if (!regScoreboard[src_reg]) { 1312 DPRINTF(IQ, "Instruction PC %s has src reg %i that " 1313 "is being added to the dependency chain.\n", 1314 new_inst->pcState(), src_reg); 1315 1316 dependGraph.insert(src_reg, new_inst); 1317 1318 // Change the return value to indicate that something 1319 // was added to the dependency graph. 1320 return_val = true; 1321 } else { 1322 DPRINTF(IQ, "Instruction PC %s has src reg %i that " 1323 "became ready before it reached the IQ.\n", 1324 new_inst->pcState(), src_reg); 1325 // Mark a register ready within the instruction. 1326 new_inst->markSrcRegReady(src_reg_idx); 1327 } 1328 } 1329 } 1330 1331 return return_val; 1332} 1333 1334template <class Impl> 1335void 1336InstructionQueue<Impl>::addToProducers(DynInstPtr &new_inst) 1337{ 1338 // Nothing really needs to be marked when an instruction becomes 1339 // the producer of a register's value, but for convenience a ptr 1340 // to the producing instruction will be placed in the head node of 1341 // the dependency links. 1342 int8_t total_dest_regs = new_inst->numDestRegs(); 1343 1344 for (int dest_reg_idx = 0; 1345 dest_reg_idx < total_dest_regs; 1346 dest_reg_idx++) 1347 { 1348 PhysRegIndex dest_reg = new_inst->renamedDestRegIdx(dest_reg_idx); 1349 1350 // Instructions that use the misc regs will have a reg number 1351 // higher than the normal physical registers. In this case these 1352 // registers are not renamed, and there is no need to track 1353 // dependencies as these instructions must be executed at commit. 1354 if (dest_reg >= numPhysRegs) { 1355 continue; 1356 } 1357 1358 if (!dependGraph.empty(dest_reg)) { 1359 dependGraph.dump(); 1360 panic("Dependency graph %i not empty!", dest_reg); 1361 } 1362 1363 dependGraph.setInst(dest_reg, new_inst); 1364 1365 // Mark the scoreboard to say it's not yet ready. 1366 regScoreboard[dest_reg] = false; 1367 } 1368} 1369 1370template <class Impl> 1371void 1372InstructionQueue<Impl>::addIfReady(DynInstPtr &inst) 1373{ 1374 // If the instruction now has all of its source registers 1375 // available, then add it to the list of ready instructions. 1376 if (inst->readyToIssue()) { 1377 1378 //Add the instruction to the proper ready list. 1379 if (inst->isMemRef()) { 1380 1381 DPRINTF(IQ, "Checking if memory instruction can issue.\n"); 1382 1383 // Message to the mem dependence unit that this instruction has 1384 // its registers ready. 1385 memDepUnit[inst->threadNumber].regsReady(inst); 1386 1387 return; 1388 } 1389 1390 OpClass op_class = inst->opClass(); 1391 1392 DPRINTF(IQ, "Instruction is ready to issue, putting it onto " 1393 "the ready list, PC %s opclass:%i [sn:%lli].\n", 1394 inst->pcState(), op_class, inst->seqNum); 1395 1396 readyInsts[op_class].push(inst); 1397 1398 // Will need to reorder the list if either a queue is not on the list, 1399 // or it has an older instruction than last time. 1400 if (!queueOnList[op_class]) { 1401 addToOrderList(op_class); 1402 } else if (readyInsts[op_class].top()->seqNum < 1403 (*readyIt[op_class]).oldestInst) { 1404 listOrder.erase(readyIt[op_class]); 1405 addToOrderList(op_class); 1406 } 1407 } 1408} 1409 1410template <class Impl> 1411int 1412InstructionQueue<Impl>::countInsts() 1413{ 1414#if 0 1415 //ksewell:This works but definitely could use a cleaner write 1416 //with a more intuitive way of counting. Right now it's 1417 //just brute force .... 1418 // Change the #if if you want to use this method. 1419 int total_insts = 0; 1420 1421 for (ThreadID tid = 0; tid < numThreads; ++tid) { 1422 ListIt count_it = instList[tid].begin(); 1423 1424 while (count_it != instList[tid].end()) { 1425 if (!(*count_it)->isSquashed() && !(*count_it)->isSquashedInIQ()) { 1426 if (!(*count_it)->isIssued()) { 1427 ++total_insts; 1428 } else if ((*count_it)->isMemRef() && 1429 !(*count_it)->memOpDone) { 1430 // Loads that have not been marked as executed still count 1431 // towards the total instructions. 1432 ++total_insts; 1433 } 1434 } 1435 1436 ++count_it; 1437 } 1438 } 1439 1440 return total_insts; 1441#else 1442 return numEntries - freeEntries; 1443#endif 1444} 1445 1446template <class Impl> 1447void 1448InstructionQueue<Impl>::dumpLists() 1449{ 1450 for (int i = 0; i < Num_OpClasses; ++i) { 1451 cprintf("Ready list %i size: %i\n", i, readyInsts[i].size()); 1452 1453 cprintf("\n"); 1454 } 1455 1456 cprintf("Non speculative list size: %i\n", nonSpecInsts.size()); 1457 1458 NonSpecMapIt non_spec_it = nonSpecInsts.begin(); 1459 NonSpecMapIt non_spec_end_it = nonSpecInsts.end(); 1460 1461 cprintf("Non speculative list: "); 1462 1463 while (non_spec_it != non_spec_end_it) { 1464 cprintf("%s [sn:%lli]", (*non_spec_it).second->pcState(), 1465 (*non_spec_it).second->seqNum); 1466 ++non_spec_it; 1467 } 1468 1469 cprintf("\n"); 1470 1471 ListOrderIt list_order_it = listOrder.begin(); 1472 ListOrderIt list_order_end_it = listOrder.end(); 1473 int i = 1; 1474 1475 cprintf("List order: "); 1476 1477 while (list_order_it != list_order_end_it) { 1478 cprintf("%i OpClass:%i [sn:%lli] ", i, (*list_order_it).queueType, 1479 (*list_order_it).oldestInst); 1480 1481 ++list_order_it; 1482 ++i; 1483 } 1484 1485 cprintf("\n"); 1486} 1487 1488 1489template <class Impl> 1490void 1491InstructionQueue<Impl>::dumpInsts() 1492{ 1493 for (ThreadID tid = 0; tid < numThreads; ++tid) { 1494 int num = 0; 1495 int valid_num = 0; 1496 ListIt inst_list_it = instList[tid].begin(); 1497 1498 while (inst_list_it != instList[tid].end()) { 1499 cprintf("Instruction:%i\n", num); 1500 if (!(*inst_list_it)->isSquashed()) { 1501 if (!(*inst_list_it)->isIssued()) { 1502 ++valid_num; 1503 cprintf("Count:%i\n", valid_num); 1504 } else if ((*inst_list_it)->isMemRef() && 1505 !(*inst_list_it)->memOpDone()) { 1506 // Loads that have not been marked as executed 1507 // still count towards the total instructions. 1508 ++valid_num; 1509 cprintf("Count:%i\n", valid_num); 1510 } 1511 } 1512 1513 cprintf("PC: %s\n[sn:%lli]\n[tid:%i]\n" 1514 "Issued:%i\nSquashed:%i\n", 1515 (*inst_list_it)->pcState(), 1516 (*inst_list_it)->seqNum, 1517 (*inst_list_it)->threadNumber, 1518 (*inst_list_it)->isIssued(), 1519 (*inst_list_it)->isSquashed()); 1520 1521 if ((*inst_list_it)->isMemRef()) { 1522 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone()); 1523 } 1524 1525 cprintf("\n"); 1526 1527 inst_list_it++; 1528 ++num; 1529 } 1530 } 1531 1532 cprintf("Insts to Execute list:\n"); 1533 1534 int num = 0; 1535 int valid_num = 0; 1536 ListIt inst_list_it = instsToExecute.begin(); 1537 1538 while (inst_list_it != instsToExecute.end()) 1539 { 1540 cprintf("Instruction:%i\n", 1541 num); 1542 if (!(*inst_list_it)->isSquashed()) { 1543 if (!(*inst_list_it)->isIssued()) { 1544 ++valid_num; 1545 cprintf("Count:%i\n", valid_num); 1546 } else if ((*inst_list_it)->isMemRef() && 1547 !(*inst_list_it)->memOpDone()) { 1548 // Loads that have not been marked as executed 1549 // still count towards the total instructions. 1550 ++valid_num; 1551 cprintf("Count:%i\n", valid_num); 1552 } 1553 } 1554 1555 cprintf("PC: %s\n[sn:%lli]\n[tid:%i]\n" 1556 "Issued:%i\nSquashed:%i\n", 1557 (*inst_list_it)->pcState(), 1558 (*inst_list_it)->seqNum, 1559 (*inst_list_it)->threadNumber, 1560 (*inst_list_it)->isIssued(), 1561 (*inst_list_it)->isSquashed()); 1562 1563 if ((*inst_list_it)->isMemRef()) { 1564 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone()); 1565 } 1566 1567 cprintf("\n"); 1568 1569 inst_list_it++; 1570 ++num; 1571 } 1572} 1573 1574#endif//__CPU_O3_INST_QUEUE_IMPL_HH__ 1575