iew_impl.hh revision 4329
1/* 2 * Copyright (c) 2004-2006 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Kevin Lim 29 */ 30 31// @todo: Fix the instantaneous communication among all the stages within 32// iew. There's a clear delay between issue and execute, yet backwards 33// communication happens simultaneously. 34 35#include <queue> 36 37#include "base/timebuf.hh" 38#include "cpu/o3/fu_pool.hh" 39#include "cpu/o3/iew.hh" 40 41template<class Impl> 42DefaultIEW<Impl>::DefaultIEW(O3CPU *_cpu, Params *params) 43 : issueToExecQueue(params->backComSize, params->forwardComSize), 44 cpu(_cpu), 45 instQueue(_cpu, this, params), 46 ldstQueue(_cpu, this, params), 47 fuPool(params->fuPool), 48 commitToIEWDelay(params->commitToIEWDelay), 49 renameToIEWDelay(params->renameToIEWDelay), 50 issueToExecuteDelay(params->issueToExecuteDelay), 51 dispatchWidth(params->dispatchWidth), 52 issueWidth(params->issueWidth), 53 wbOutstanding(0), 54 wbWidth(params->wbWidth), 55 numThreads(params->numberOfThreads), 56 switchedOut(false) 57{ 58 _status = Active; 59 exeStatus = Running; 60 wbStatus = Idle; 61 62 // Setup wire to read instructions coming from issue. 63 fromIssue = issueToExecQueue.getWire(-issueToExecuteDelay); 64 65 // Instruction queue needs the queue between issue and execute. 66 instQueue.setIssueToExecuteQueue(&issueToExecQueue); 67 68 for (int i=0; i < numThreads; i++) { 69 dispatchStatus[i] = Running; 70 stalls[i].commit = false; 71 fetchRedirect[i] = false; 72 bdelayDoneSeqNum[i] = 0; 73 } 74 75 wbMax = wbWidth * params->wbDepth; 76 77 updateLSQNextCycle = false; 78 79 ableToIssue = true; 80 81 skidBufferMax = (3 * (renameToIEWDelay * params->renameWidth)) + issueWidth; 82} 83 84template <class Impl> 85std::string 86DefaultIEW<Impl>::name() const 87{ 88 return cpu->name() + ".iew"; 89} 90 91template <class Impl> 92void 93DefaultIEW<Impl>::regStats() 94{ 95 using namespace Stats; 96 97 instQueue.regStats(); 98 ldstQueue.regStats(); 99 100 iewIdleCycles 101 .name(name() + ".iewIdleCycles") 102 .desc("Number of cycles IEW is idle"); 103 104 iewSquashCycles 105 .name(name() + ".iewSquashCycles") 106 .desc("Number of cycles IEW is squashing"); 107 108 iewBlockCycles 109 .name(name() + ".iewBlockCycles") 110 .desc("Number of cycles IEW is blocking"); 111 112 iewUnblockCycles 113 .name(name() + ".iewUnblockCycles") 114 .desc("Number of cycles IEW is unblocking"); 115 116 iewDispatchedInsts 117 .name(name() + ".iewDispatchedInsts") 118 .desc("Number of instructions dispatched to IQ"); 119 120 iewDispSquashedInsts 121 .name(name() + ".iewDispSquashedInsts") 122 .desc("Number of squashed instructions skipped by dispatch"); 123 124 iewDispLoadInsts 125 .name(name() + ".iewDispLoadInsts") 126 .desc("Number of dispatched load instructions"); 127 128 iewDispStoreInsts 129 .name(name() + ".iewDispStoreInsts") 130 .desc("Number of dispatched store instructions"); 131 132 iewDispNonSpecInsts 133 .name(name() + ".iewDispNonSpecInsts") 134 .desc("Number of dispatched non-speculative instructions"); 135 136 iewIQFullEvents 137 .name(name() + ".iewIQFullEvents") 138 .desc("Number of times the IQ has become full, causing a stall"); 139 140 iewLSQFullEvents 141 .name(name() + ".iewLSQFullEvents") 142 .desc("Number of times the LSQ has become full, causing a stall"); 143 144 memOrderViolationEvents 145 .name(name() + ".memOrderViolationEvents") 146 .desc("Number of memory order violations"); 147 148 predictedTakenIncorrect 149 .name(name() + ".predictedTakenIncorrect") 150 .desc("Number of branches that were predicted taken incorrectly"); 151 152 predictedNotTakenIncorrect 153 .name(name() + ".predictedNotTakenIncorrect") 154 .desc("Number of branches that were predicted not taken incorrectly"); 155 156 branchMispredicts 157 .name(name() + ".branchMispredicts") 158 .desc("Number of branch mispredicts detected at execute"); 159 160 branchMispredicts = predictedTakenIncorrect + predictedNotTakenIncorrect; 161 162 iewExecutedInsts 163 .name(name() + ".iewExecutedInsts") 164 .desc("Number of executed instructions"); 165 166 iewExecLoadInsts 167 .init(cpu->number_of_threads) 168 .name(name() + ".iewExecLoadInsts") 169 .desc("Number of load instructions executed") 170 .flags(total); 171 172 iewExecSquashedInsts 173 .name(name() + ".iewExecSquashedInsts") 174 .desc("Number of squashed instructions skipped in execute"); 175 176 iewExecutedSwp 177 .init(cpu->number_of_threads) 178 .name(name() + ".EXEC:swp") 179 .desc("number of swp insts executed") 180 .flags(total); 181 182 iewExecutedNop 183 .init(cpu->number_of_threads) 184 .name(name() + ".EXEC:nop") 185 .desc("number of nop insts executed") 186 .flags(total); 187 188 iewExecutedRefs 189 .init(cpu->number_of_threads) 190 .name(name() + ".EXEC:refs") 191 .desc("number of memory reference insts executed") 192 .flags(total); 193 194 iewExecutedBranches 195 .init(cpu->number_of_threads) 196 .name(name() + ".EXEC:branches") 197 .desc("Number of branches executed") 198 .flags(total); 199 200 iewExecStoreInsts 201 .name(name() + ".EXEC:stores") 202 .desc("Number of stores executed") 203 .flags(total); 204 iewExecStoreInsts = iewExecutedRefs - iewExecLoadInsts; 205 206 iewExecRate 207 .name(name() + ".EXEC:rate") 208 .desc("Inst execution rate") 209 .flags(total); 210 211 iewExecRate = iewExecutedInsts / cpu->numCycles; 212 213 iewInstsToCommit 214 .init(cpu->number_of_threads) 215 .name(name() + ".WB:sent") 216 .desc("cumulative count of insts sent to commit") 217 .flags(total); 218 219 writebackCount 220 .init(cpu->number_of_threads) 221 .name(name() + ".WB:count") 222 .desc("cumulative count of insts written-back") 223 .flags(total); 224 225 producerInst 226 .init(cpu->number_of_threads) 227 .name(name() + ".WB:producers") 228 .desc("num instructions producing a value") 229 .flags(total); 230 231 consumerInst 232 .init(cpu->number_of_threads) 233 .name(name() + ".WB:consumers") 234 .desc("num instructions consuming a value") 235 .flags(total); 236 237 wbPenalized 238 .init(cpu->number_of_threads) 239 .name(name() + ".WB:penalized") 240 .desc("number of instrctions required to write to 'other' IQ") 241 .flags(total); 242 243 wbPenalizedRate 244 .name(name() + ".WB:penalized_rate") 245 .desc ("fraction of instructions written-back that wrote to 'other' IQ") 246 .flags(total); 247 248 wbPenalizedRate = wbPenalized / writebackCount; 249 250 wbFanout 251 .name(name() + ".WB:fanout") 252 .desc("average fanout of values written-back") 253 .flags(total); 254 255 wbFanout = producerInst / consumerInst; 256 257 wbRate 258 .name(name() + ".WB:rate") 259 .desc("insts written-back per cycle") 260 .flags(total); 261 wbRate = writebackCount / cpu->numCycles; 262} 263 264template<class Impl> 265void 266DefaultIEW<Impl>::initStage() 267{ 268 for (int tid=0; tid < numThreads; tid++) { 269 toRename->iewInfo[tid].usedIQ = true; 270 toRename->iewInfo[tid].freeIQEntries = 271 instQueue.numFreeEntries(tid); 272 273 toRename->iewInfo[tid].usedLSQ = true; 274 toRename->iewInfo[tid].freeLSQEntries = 275 ldstQueue.numFreeEntries(tid); 276 } 277 278 cpu->activateStage(O3CPU::IEWIdx); 279} 280 281template<class Impl> 282void 283DefaultIEW<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr) 284{ 285 timeBuffer = tb_ptr; 286 287 // Setup wire to read information from time buffer, from commit. 288 fromCommit = timeBuffer->getWire(-commitToIEWDelay); 289 290 // Setup wire to write information back to previous stages. 291 toRename = timeBuffer->getWire(0); 292 293 toFetch = timeBuffer->getWire(0); 294 295 // Instruction queue also needs main time buffer. 296 instQueue.setTimeBuffer(tb_ptr); 297} 298 299template<class Impl> 300void 301DefaultIEW<Impl>::setRenameQueue(TimeBuffer<RenameStruct> *rq_ptr) 302{ 303 renameQueue = rq_ptr; 304 305 // Setup wire to read information from rename queue. 306 fromRename = renameQueue->getWire(-renameToIEWDelay); 307} 308 309template<class Impl> 310void 311DefaultIEW<Impl>::setIEWQueue(TimeBuffer<IEWStruct> *iq_ptr) 312{ 313 iewQueue = iq_ptr; 314 315 // Setup wire to write instructions to commit. 316 toCommit = iewQueue->getWire(0); 317} 318 319template<class Impl> 320void 321DefaultIEW<Impl>::setActiveThreads(std::list<unsigned> *at_ptr) 322{ 323 activeThreads = at_ptr; 324 325 ldstQueue.setActiveThreads(at_ptr); 326 instQueue.setActiveThreads(at_ptr); 327} 328 329template<class Impl> 330void 331DefaultIEW<Impl>::setScoreboard(Scoreboard *sb_ptr) 332{ 333 scoreboard = sb_ptr; 334} 335 336template <class Impl> 337bool 338DefaultIEW<Impl>::drain() 339{ 340 // IEW is ready to drain at any time. 341 cpu->signalDrained(); 342 return true; 343} 344 345template <class Impl> 346void 347DefaultIEW<Impl>::resume() 348{ 349} 350 351template <class Impl> 352void 353DefaultIEW<Impl>::switchOut() 354{ 355 // Clear any state. 356 switchedOut = true; 357 assert(insts[0].empty()); 358 assert(skidBuffer[0].empty()); 359 360 instQueue.switchOut(); 361 ldstQueue.switchOut(); 362 fuPool->switchOut(); 363 364 for (int i = 0; i < numThreads; i++) { 365 while (!insts[i].empty()) 366 insts[i].pop(); 367 while (!skidBuffer[i].empty()) 368 skidBuffer[i].pop(); 369 } 370} 371 372template <class Impl> 373void 374DefaultIEW<Impl>::takeOverFrom() 375{ 376 // Reset all state. 377 _status = Active; 378 exeStatus = Running; 379 wbStatus = Idle; 380 switchedOut = false; 381 382 instQueue.takeOverFrom(); 383 ldstQueue.takeOverFrom(); 384 fuPool->takeOverFrom(); 385 386 initStage(); 387 cpu->activityThisCycle(); 388 389 for (int i=0; i < numThreads; i++) { 390 dispatchStatus[i] = Running; 391 stalls[i].commit = false; 392 fetchRedirect[i] = false; 393 } 394 395 updateLSQNextCycle = false; 396 397 for (int i = 0; i < issueToExecQueue.getSize(); ++i) { 398 issueToExecQueue.advance(); 399 } 400} 401 402template<class Impl> 403void 404DefaultIEW<Impl>::squash(unsigned tid) 405{ 406 DPRINTF(IEW, "[tid:%i]: Squashing all instructions.\n", 407 tid); 408 409 // Tell the IQ to start squashing. 410 instQueue.squash(tid); 411 412 // Tell the LDSTQ to start squashing. 413#if ISA_HAS_DELAY_SLOT 414 ldstQueue.squash(fromCommit->commitInfo[tid].bdelayDoneSeqNum, tid); 415#else 416 ldstQueue.squash(fromCommit->commitInfo[tid].doneSeqNum, tid); 417#endif 418 updatedQueues = true; 419 420 // Clear the skid buffer in case it has any data in it. 421 DPRINTF(IEW, "[tid:%i]: Removing skidbuffer instructions until [sn:%i].\n", 422 tid, fromCommit->commitInfo[tid].bdelayDoneSeqNum); 423 424 while (!skidBuffer[tid].empty()) { 425#if ISA_HAS_DELAY_SLOT 426 if (skidBuffer[tid].front()->seqNum <= 427 fromCommit->commitInfo[tid].bdelayDoneSeqNum) { 428 DPRINTF(IEW, "[tid:%i]: Cannot remove skidbuffer instructions " 429 "that occur before delay slot [sn:%i].\n", 430 fromCommit->commitInfo[tid].bdelayDoneSeqNum, 431 tid); 432 break; 433 } else { 434 DPRINTF(IEW, "[tid:%i]: Removing instruction [sn:%i] from " 435 "skidBuffer.\n", tid, skidBuffer[tid].front()->seqNum); 436 } 437#endif 438 if (skidBuffer[tid].front()->isLoad() || 439 skidBuffer[tid].front()->isStore() ) { 440 toRename->iewInfo[tid].dispatchedToLSQ++; 441 } 442 443 toRename->iewInfo[tid].dispatched++; 444 445 skidBuffer[tid].pop(); 446 } 447 448 bdelayDoneSeqNum[tid] = fromCommit->commitInfo[tid].bdelayDoneSeqNum; 449 450 emptyRenameInsts(tid); 451} 452 453template<class Impl> 454void 455DefaultIEW<Impl>::squashDueToBranch(DynInstPtr &inst, unsigned tid) 456{ 457 DPRINTF(IEW, "[tid:%i]: Squashing from a specific instruction, PC: %#x " 458 "[sn:%i].\n", tid, inst->readPC(), inst->seqNum); 459 460 toCommit->squash[tid] = true; 461 toCommit->squashedSeqNum[tid] = inst->seqNum; 462 toCommit->mispredPC[tid] = inst->readPC(); 463 toCommit->branchMispredict[tid] = true; 464 465 int instSize = sizeof(TheISA::MachInst); 466#if ISA_HAS_DELAY_SLOT 467 bool branch_taken = 468 !(inst->readNextPC() + instSize == inst->readNextNPC() && 469 (inst->readNextPC() == inst->readPC() + instSize || 470 inst->readNextPC() == inst->readPC() + 2 * instSize)); 471 DPRINTF(Sparc, "Branch taken = %s [sn:%i]\n", 472 branch_taken ? "true": "false", inst->seqNum); 473 474 toCommit->branchTaken[tid] = branch_taken; 475 476 bool squashDelaySlot = true; 477// (inst->readNextPC() != inst->readPC() + sizeof(TheISA::MachInst)); 478 DPRINTF(Sparc, "Squash delay slot = %s [sn:%i]\n", 479 squashDelaySlot ? "true": "false", inst->seqNum); 480 toCommit->squashDelaySlot[tid] = squashDelaySlot; 481 //If we're squashing the delay slot, we need to pick back up at NextPC. 482 //Otherwise, NextPC isn't being squashed, so we should pick back up at 483 //NextNPC. 484 if (squashDelaySlot) { 485 toCommit->nextPC[tid] = inst->readNextPC(); 486 toCommit->nextNPC[tid] = inst->readNextNPC(); 487 } else { 488 toCommit->nextPC[tid] = inst->readNextNPC(); 489 toCommit->nextNPC[tid] = inst->readNextNPC() + instSize; 490 } 491#else 492 toCommit->branchTaken[tid] = inst->readNextPC() != 493 (inst->readPC() + sizeof(TheISA::MachInst)); 494 toCommit->nextPC[tid] = inst->readNextPC(); 495 toCommit->nextNPC[tid] = inst->readNextPC() + instSize; 496#endif 497 498 toCommit->includeSquashInst[tid] = false; 499 500 wroteToTimeBuffer = true; 501} 502 503template<class Impl> 504void 505DefaultIEW<Impl>::squashDueToMemOrder(DynInstPtr &inst, unsigned tid) 506{ 507 DPRINTF(IEW, "[tid:%i]: Squashing from a specific instruction, " 508 "PC: %#x [sn:%i].\n", tid, inst->readPC(), inst->seqNum); 509 510 toCommit->squash[tid] = true; 511 toCommit->squashedSeqNum[tid] = inst->seqNum; 512 toCommit->nextPC[tid] = inst->readNextPC(); 513#if ISA_HAS_DELAY_SLOT 514 toCommit->nextNPC[tid] = inst->readNextNPC(); 515#else 516 toCommit->nextNPC[tid] = inst->readNextPC() + sizeof(TheISA::MachInst); 517#endif 518 toCommit->branchMispredict[tid] = false; 519 520 toCommit->includeSquashInst[tid] = false; 521 522 wroteToTimeBuffer = true; 523} 524 525template<class Impl> 526void 527DefaultIEW<Impl>::squashDueToMemBlocked(DynInstPtr &inst, unsigned tid) 528{ 529 DPRINTF(IEW, "[tid:%i]: Memory blocked, squashing load and younger insts, " 530 "PC: %#x [sn:%i].\n", tid, inst->readPC(), inst->seqNum); 531 532 toCommit->squash[tid] = true; 533 toCommit->squashedSeqNum[tid] = inst->seqNum; 534 toCommit->nextPC[tid] = inst->readPC(); 535#if ISA_HAS_DELAY_SLOT 536 toCommit->nextNPC[tid] = inst->readNextPC(); 537#else 538 toCommit->nextNPC[tid] = inst->readPC() + sizeof(TheISA::MachInst); 539#endif 540 toCommit->branchMispredict[tid] = false; 541 542 // Must include the broadcasted SN in the squash. 543 toCommit->includeSquashInst[tid] = true; 544 545 ldstQueue.setLoadBlockedHandled(tid); 546 547 wroteToTimeBuffer = true; 548} 549 550template<class Impl> 551void 552DefaultIEW<Impl>::block(unsigned tid) 553{ 554 DPRINTF(IEW, "[tid:%u]: Blocking.\n", tid); 555 556 if (dispatchStatus[tid] != Blocked && 557 dispatchStatus[tid] != Unblocking) { 558 toRename->iewBlock[tid] = true; 559 wroteToTimeBuffer = true; 560 } 561 562 // Add the current inputs to the skid buffer so they can be 563 // reprocessed when this stage unblocks. 564 skidInsert(tid); 565 566 dispatchStatus[tid] = Blocked; 567} 568 569template<class Impl> 570void 571DefaultIEW<Impl>::unblock(unsigned tid) 572{ 573 DPRINTF(IEW, "[tid:%i]: Reading instructions out of the skid " 574 "buffer %u.\n",tid, tid); 575 576 // If the skid bufffer is empty, signal back to previous stages to unblock. 577 // Also switch status to running. 578 if (skidBuffer[tid].empty()) { 579 toRename->iewUnblock[tid] = true; 580 wroteToTimeBuffer = true; 581 DPRINTF(IEW, "[tid:%i]: Done unblocking.\n",tid); 582 dispatchStatus[tid] = Running; 583 } 584} 585 586template<class Impl> 587void 588DefaultIEW<Impl>::wakeDependents(DynInstPtr &inst) 589{ 590 instQueue.wakeDependents(inst); 591} 592 593template<class Impl> 594void 595DefaultIEW<Impl>::rescheduleMemInst(DynInstPtr &inst) 596{ 597 instQueue.rescheduleMemInst(inst); 598} 599 600template<class Impl> 601void 602DefaultIEW<Impl>::replayMemInst(DynInstPtr &inst) 603{ 604 instQueue.replayMemInst(inst); 605} 606 607template<class Impl> 608void 609DefaultIEW<Impl>::instToCommit(DynInstPtr &inst) 610{ 611 // This function should not be called after writebackInsts in a 612 // single cycle. That will cause problems with an instruction 613 // being added to the queue to commit without being processed by 614 // writebackInsts prior to being sent to commit. 615 616 // First check the time slot that this instruction will write 617 // to. If there are free write ports at the time, then go ahead 618 // and write the instruction to that time. If there are not, 619 // keep looking back to see where's the first time there's a 620 // free slot. 621 while ((*iewQueue)[wbCycle].insts[wbNumInst]) { 622 ++wbNumInst; 623 if (wbNumInst == wbWidth) { 624 ++wbCycle; 625 wbNumInst = 0; 626 } 627 628 assert((wbCycle * wbWidth + wbNumInst) <= wbMax); 629 } 630 631 DPRINTF(IEW, "Current wb cycle: %i, width: %i, numInst: %i\nwbActual:%i\n", 632 wbCycle, wbWidth, wbNumInst, wbCycle * wbWidth + wbNumInst); 633 // Add finished instruction to queue to commit. 634 (*iewQueue)[wbCycle].insts[wbNumInst] = inst; 635 (*iewQueue)[wbCycle].size++; 636} 637 638template <class Impl> 639unsigned 640DefaultIEW<Impl>::validInstsFromRename() 641{ 642 unsigned inst_count = 0; 643 644 for (int i=0; i<fromRename->size; i++) { 645 if (!fromRename->insts[i]->isSquashed()) 646 inst_count++; 647 } 648 649 return inst_count; 650} 651 652template<class Impl> 653void 654DefaultIEW<Impl>::skidInsert(unsigned tid) 655{ 656 DynInstPtr inst = NULL; 657 658 while (!insts[tid].empty()) { 659 inst = insts[tid].front(); 660 661 insts[tid].pop(); 662 663 DPRINTF(Decode,"[tid:%i]: Inserting [sn:%lli] PC:%#x into " 664 "dispatch skidBuffer %i\n",tid, inst->seqNum, 665 inst->readPC(),tid); 666 667 skidBuffer[tid].push(inst); 668 } 669 670 assert(skidBuffer[tid].size() <= skidBufferMax && 671 "Skidbuffer Exceeded Max Size"); 672} 673 674template<class Impl> 675int 676DefaultIEW<Impl>::skidCount() 677{ 678 int max=0; 679 680 std::list<unsigned>::iterator threads = activeThreads->begin(); 681 std::list<unsigned>::iterator end = activeThreads->end(); 682 683 while (threads != end) { 684 unsigned tid = *threads++; 685 unsigned thread_count = skidBuffer[tid].size(); 686 if (max < thread_count) 687 max = thread_count; 688 } 689 690 return max; 691} 692 693template<class Impl> 694bool 695DefaultIEW<Impl>::skidsEmpty() 696{ 697 std::list<unsigned>::iterator threads = activeThreads->begin(); 698 std::list<unsigned>::iterator end = activeThreads->end(); 699 700 while (threads != end) { 701 unsigned tid = *threads++; 702 703 if (!skidBuffer[tid].empty()) 704 return false; 705 } 706 707 return true; 708} 709 710template <class Impl> 711void 712DefaultIEW<Impl>::updateStatus() 713{ 714 bool any_unblocking = false; 715 716 std::list<unsigned>::iterator threads = activeThreads->begin(); 717 std::list<unsigned>::iterator end = activeThreads->end(); 718 719 while (threads != end) { 720 unsigned tid = *threads++; 721 722 if (dispatchStatus[tid] == Unblocking) { 723 any_unblocking = true; 724 break; 725 } 726 } 727 728 // If there are no ready instructions waiting to be scheduled by the IQ, 729 // and there's no stores waiting to write back, and dispatch is not 730 // unblocking, then there is no internal activity for the IEW stage. 731 if (_status == Active && !instQueue.hasReadyInsts() && 732 !ldstQueue.willWB() && !any_unblocking) { 733 DPRINTF(IEW, "IEW switching to idle\n"); 734 735 deactivateStage(); 736 737 _status = Inactive; 738 } else if (_status == Inactive && (instQueue.hasReadyInsts() || 739 ldstQueue.willWB() || 740 any_unblocking)) { 741 // Otherwise there is internal activity. Set to active. 742 DPRINTF(IEW, "IEW switching to active\n"); 743 744 activateStage(); 745 746 _status = Active; 747 } 748} 749 750template <class Impl> 751void 752DefaultIEW<Impl>::resetEntries() 753{ 754 instQueue.resetEntries(); 755 ldstQueue.resetEntries(); 756} 757 758template <class Impl> 759void 760DefaultIEW<Impl>::readStallSignals(unsigned tid) 761{ 762 if (fromCommit->commitBlock[tid]) { 763 stalls[tid].commit = true; 764 } 765 766 if (fromCommit->commitUnblock[tid]) { 767 assert(stalls[tid].commit); 768 stalls[tid].commit = false; 769 } 770} 771 772template <class Impl> 773bool 774DefaultIEW<Impl>::checkStall(unsigned tid) 775{ 776 bool ret_val(false); 777 778 if (stalls[tid].commit) { 779 DPRINTF(IEW,"[tid:%i]: Stall from Commit stage detected.\n",tid); 780 ret_val = true; 781 } else if (instQueue.isFull(tid)) { 782 DPRINTF(IEW,"[tid:%i]: Stall: IQ is full.\n",tid); 783 ret_val = true; 784 } else if (ldstQueue.isFull(tid)) { 785 DPRINTF(IEW,"[tid:%i]: Stall: LSQ is full\n",tid); 786 787 if (ldstQueue.numLoads(tid) > 0 ) { 788 789 DPRINTF(IEW,"[tid:%i]: LSQ oldest load: [sn:%i] \n", 790 tid,ldstQueue.getLoadHeadSeqNum(tid)); 791 } 792 793 if (ldstQueue.numStores(tid) > 0) { 794 795 DPRINTF(IEW,"[tid:%i]: LSQ oldest store: [sn:%i] \n", 796 tid,ldstQueue.getStoreHeadSeqNum(tid)); 797 } 798 799 ret_val = true; 800 } else if (ldstQueue.isStalled(tid)) { 801 DPRINTF(IEW,"[tid:%i]: Stall: LSQ stall detected.\n",tid); 802 ret_val = true; 803 } 804 805 return ret_val; 806} 807 808template <class Impl> 809void 810DefaultIEW<Impl>::checkSignalsAndUpdate(unsigned tid) 811{ 812 // Check if there's a squash signal, squash if there is 813 // Check stall signals, block if there is. 814 // If status was Blocked 815 // if so then go to unblocking 816 // If status was Squashing 817 // check if squashing is not high. Switch to running this cycle. 818 819 readStallSignals(tid); 820 821 if (fromCommit->commitInfo[tid].squash) { 822 squash(tid); 823 824 if (dispatchStatus[tid] == Blocked || 825 dispatchStatus[tid] == Unblocking) { 826 toRename->iewUnblock[tid] = true; 827 wroteToTimeBuffer = true; 828 } 829 830 dispatchStatus[tid] = Squashing; 831 832 fetchRedirect[tid] = false; 833 return; 834 } 835 836 if (fromCommit->commitInfo[tid].robSquashing) { 837 DPRINTF(IEW, "[tid:%i]: ROB is still squashing.\n", tid); 838 839 dispatchStatus[tid] = Squashing; 840 841 emptyRenameInsts(tid); 842 wroteToTimeBuffer = true; 843 return; 844 } 845 846 if (checkStall(tid)) { 847 block(tid); 848 dispatchStatus[tid] = Blocked; 849 return; 850 } 851 852 if (dispatchStatus[tid] == Blocked) { 853 // Status from previous cycle was blocked, but there are no more stall 854 // conditions. Switch over to unblocking. 855 DPRINTF(IEW, "[tid:%i]: Done blocking, switching to unblocking.\n", 856 tid); 857 858 dispatchStatus[tid] = Unblocking; 859 860 unblock(tid); 861 862 return; 863 } 864 865 if (dispatchStatus[tid] == Squashing) { 866 // Switch status to running if rename isn't being told to block or 867 // squash this cycle. 868 DPRINTF(IEW, "[tid:%i]: Done squashing, switching to running.\n", 869 tid); 870 871 dispatchStatus[tid] = Running; 872 873 return; 874 } 875} 876 877template <class Impl> 878void 879DefaultIEW<Impl>::sortInsts() 880{ 881 int insts_from_rename = fromRename->size; 882#ifdef DEBUG 883#if !ISA_HAS_DELAY_SLOT 884 for (int i = 0; i < numThreads; i++) 885 assert(insts[i].empty()); 886#endif 887#endif 888 for (int i = 0; i < insts_from_rename; ++i) { 889 insts[fromRename->insts[i]->threadNumber].push(fromRename->insts[i]); 890 } 891} 892 893template <class Impl> 894void 895DefaultIEW<Impl>::emptyRenameInsts(unsigned tid) 896{ 897 DPRINTF(IEW, "[tid:%i]: Removing incoming rename instructions until " 898 "[sn:%i].\n", tid, bdelayDoneSeqNum[tid]); 899 900 while (!insts[tid].empty()) { 901#if ISA_HAS_DELAY_SLOT 902 if (insts[tid].front()->seqNum <= bdelayDoneSeqNum[tid]) { 903 DPRINTF(IEW, "[tid:%i]: Done removing, cannot remove instruction" 904 " that occurs at or before delay slot [sn:%i].\n", 905 tid, bdelayDoneSeqNum[tid]); 906 break; 907 } else { 908 DPRINTF(IEW, "[tid:%i]: Removing incoming rename instruction " 909 "[sn:%i].\n", tid, insts[tid].front()->seqNum); 910 } 911#endif 912 913 if (insts[tid].front()->isLoad() || 914 insts[tid].front()->isStore() ) { 915 toRename->iewInfo[tid].dispatchedToLSQ++; 916 } 917 918 toRename->iewInfo[tid].dispatched++; 919 920 insts[tid].pop(); 921 } 922} 923 924template <class Impl> 925void 926DefaultIEW<Impl>::wakeCPU() 927{ 928 cpu->wakeCPU(); 929} 930 931template <class Impl> 932void 933DefaultIEW<Impl>::activityThisCycle() 934{ 935 DPRINTF(Activity, "Activity this cycle.\n"); 936 cpu->activityThisCycle(); 937} 938 939template <class Impl> 940inline void 941DefaultIEW<Impl>::activateStage() 942{ 943 DPRINTF(Activity, "Activating stage.\n"); 944 cpu->activateStage(O3CPU::IEWIdx); 945} 946 947template <class Impl> 948inline void 949DefaultIEW<Impl>::deactivateStage() 950{ 951 DPRINTF(Activity, "Deactivating stage.\n"); 952 cpu->deactivateStage(O3CPU::IEWIdx); 953} 954 955template<class Impl> 956void 957DefaultIEW<Impl>::dispatch(unsigned tid) 958{ 959 // If status is Running or idle, 960 // call dispatchInsts() 961 // If status is Unblocking, 962 // buffer any instructions coming from rename 963 // continue trying to empty skid buffer 964 // check if stall conditions have passed 965 966 if (dispatchStatus[tid] == Blocked) { 967 ++iewBlockCycles; 968 969 } else if (dispatchStatus[tid] == Squashing) { 970 ++iewSquashCycles; 971 } 972 973 // Dispatch should try to dispatch as many instructions as its bandwidth 974 // will allow, as long as it is not currently blocked. 975 if (dispatchStatus[tid] == Running || 976 dispatchStatus[tid] == Idle) { 977 DPRINTF(IEW, "[tid:%i] Not blocked, so attempting to run " 978 "dispatch.\n", tid); 979 980 dispatchInsts(tid); 981 } else if (dispatchStatus[tid] == Unblocking) { 982 // Make sure that the skid buffer has something in it if the 983 // status is unblocking. 984 assert(!skidsEmpty()); 985 986 // If the status was unblocking, then instructions from the skid 987 // buffer were used. Remove those instructions and handle 988 // the rest of unblocking. 989 dispatchInsts(tid); 990 991 ++iewUnblockCycles; 992 993 if (validInstsFromRename() && dispatchedAllInsts) { 994 // Add the current inputs to the skid buffer so they can be 995 // reprocessed when this stage unblocks. 996 skidInsert(tid); 997 } 998 999 unblock(tid); 1000 } 1001} 1002 1003template <class Impl> 1004void 1005DefaultIEW<Impl>::dispatchInsts(unsigned tid) 1006{ 1007 dispatchedAllInsts = true; 1008 1009 // Obtain instructions from skid buffer if unblocking, or queue from rename 1010 // otherwise. 1011 std::queue<DynInstPtr> &insts_to_dispatch = 1012 dispatchStatus[tid] == Unblocking ? 1013 skidBuffer[tid] : insts[tid]; 1014 1015 int insts_to_add = insts_to_dispatch.size(); 1016 1017 DynInstPtr inst; 1018 bool add_to_iq = false; 1019 int dis_num_inst = 0; 1020 1021 // Loop through the instructions, putting them in the instruction 1022 // queue. 1023 for ( ; dis_num_inst < insts_to_add && 1024 dis_num_inst < dispatchWidth; 1025 ++dis_num_inst) 1026 { 1027 inst = insts_to_dispatch.front(); 1028 1029 if (dispatchStatus[tid] == Unblocking) { 1030 DPRINTF(IEW, "[tid:%i]: Issue: Examining instruction from skid " 1031 "buffer\n", tid); 1032 } 1033 1034 // Make sure there's a valid instruction there. 1035 assert(inst); 1036 1037 DPRINTF(IEW, "[tid:%i]: Issue: Adding PC %#x [sn:%lli] [tid:%i] to " 1038 "IQ.\n", 1039 tid, inst->readPC(), inst->seqNum, inst->threadNumber); 1040 1041 // Be sure to mark these instructions as ready so that the 1042 // commit stage can go ahead and execute them, and mark 1043 // them as issued so the IQ doesn't reprocess them. 1044 1045 // Check for squashed instructions. 1046 if (inst->isSquashed()) { 1047 DPRINTF(IEW, "[tid:%i]: Issue: Squashed instruction encountered, " 1048 "not adding to IQ.\n", tid); 1049 1050 ++iewDispSquashedInsts; 1051 1052 insts_to_dispatch.pop(); 1053 1054 //Tell Rename That An Instruction has been processed 1055 if (inst->isLoad() || inst->isStore()) { 1056 toRename->iewInfo[tid].dispatchedToLSQ++; 1057 } 1058 toRename->iewInfo[tid].dispatched++; 1059 1060 continue; 1061 } 1062 1063 // Check for full conditions. 1064 if (instQueue.isFull(tid)) { 1065 DPRINTF(IEW, "[tid:%i]: Issue: IQ has become full.\n", tid); 1066 1067 // Call function to start blocking. 1068 block(tid); 1069 1070 // Set unblock to false. Special case where we are using 1071 // skidbuffer (unblocking) instructions but then we still 1072 // get full in the IQ. 1073 toRename->iewUnblock[tid] = false; 1074 1075 dispatchedAllInsts = false; 1076 1077 ++iewIQFullEvents; 1078 break; 1079 } else if (ldstQueue.isFull(tid)) { 1080 DPRINTF(IEW, "[tid:%i]: Issue: LSQ has become full.\n",tid); 1081 1082 // Call function to start blocking. 1083 block(tid); 1084 1085 // Set unblock to false. Special case where we are using 1086 // skidbuffer (unblocking) instructions but then we still 1087 // get full in the IQ. 1088 toRename->iewUnblock[tid] = false; 1089 1090 dispatchedAllInsts = false; 1091 1092 ++iewLSQFullEvents; 1093 break; 1094 } 1095 1096 // Otherwise issue the instruction just fine. 1097 if (inst->isLoad()) { 1098 DPRINTF(IEW, "[tid:%i]: Issue: Memory instruction " 1099 "encountered, adding to LSQ.\n", tid); 1100 1101 // Reserve a spot in the load store queue for this 1102 // memory access. 1103 ldstQueue.insertLoad(inst); 1104 1105 ++iewDispLoadInsts; 1106 1107 add_to_iq = true; 1108 1109 toRename->iewInfo[tid].dispatchedToLSQ++; 1110 } else if (inst->isStore()) { 1111 DPRINTF(IEW, "[tid:%i]: Issue: Memory instruction " 1112 "encountered, adding to LSQ.\n", tid); 1113 1114 ldstQueue.insertStore(inst); 1115 1116 ++iewDispStoreInsts; 1117 1118 if (inst->isStoreConditional()) { 1119 // Store conditionals need to be set as "canCommit()" 1120 // so that commit can process them when they reach the 1121 // head of commit. 1122 // @todo: This is somewhat specific to Alpha. 1123 inst->setCanCommit(); 1124 instQueue.insertNonSpec(inst); 1125 add_to_iq = false; 1126 1127 ++iewDispNonSpecInsts; 1128 } else { 1129 add_to_iq = true; 1130 } 1131 1132 toRename->iewInfo[tid].dispatchedToLSQ++; 1133 } else if (inst->isMemBarrier() || inst->isWriteBarrier()) { 1134 // Same as non-speculative stores. 1135 inst->setCanCommit(); 1136 instQueue.insertBarrier(inst); 1137 add_to_iq = false; 1138 } else if (inst->isNop()) { 1139 DPRINTF(IEW, "[tid:%i]: Issue: Nop instruction encountered, " 1140 "skipping.\n", tid); 1141 1142 inst->setIssued(); 1143 inst->setExecuted(); 1144 inst->setCanCommit(); 1145 1146 instQueue.recordProducer(inst); 1147 1148 iewExecutedNop[tid]++; 1149 1150 add_to_iq = false; 1151 } else if (inst->isExecuted()) { 1152 assert(0 && "Instruction shouldn't be executed.\n"); 1153 DPRINTF(IEW, "Issue: Executed branch encountered, " 1154 "skipping.\n"); 1155 1156 inst->setIssued(); 1157 inst->setCanCommit(); 1158 1159 instQueue.recordProducer(inst); 1160 1161 add_to_iq = false; 1162 } else { 1163 add_to_iq = true; 1164 } 1165 if (inst->isNonSpeculative()) { 1166 DPRINTF(IEW, "[tid:%i]: Issue: Nonspeculative instruction " 1167 "encountered, skipping.\n", tid); 1168 1169 // Same as non-speculative stores. 1170 inst->setCanCommit(); 1171 1172 // Specifically insert it as nonspeculative. 1173 instQueue.insertNonSpec(inst); 1174 1175 ++iewDispNonSpecInsts; 1176 1177 add_to_iq = false; 1178 } 1179 1180 // If the instruction queue is not full, then add the 1181 // instruction. 1182 if (add_to_iq) { 1183 instQueue.insert(inst); 1184 } 1185 1186 insts_to_dispatch.pop(); 1187 1188 toRename->iewInfo[tid].dispatched++; 1189 1190 ++iewDispatchedInsts; 1191 } 1192 1193 if (!insts_to_dispatch.empty()) { 1194 DPRINTF(IEW,"[tid:%i]: Issue: Bandwidth Full. Blocking.\n", tid); 1195 block(tid); 1196 toRename->iewUnblock[tid] = false; 1197 } 1198 1199 if (dispatchStatus[tid] == Idle && dis_num_inst) { 1200 dispatchStatus[tid] = Running; 1201 1202 updatedQueues = true; 1203 } 1204 1205 dis_num_inst = 0; 1206} 1207 1208template <class Impl> 1209void 1210DefaultIEW<Impl>::printAvailableInsts() 1211{ 1212 int inst = 0; 1213 1214 std::cout << "Available Instructions: "; 1215 1216 while (fromIssue->insts[inst]) { 1217 1218 if (inst%3==0) std::cout << "\n\t"; 1219 1220 std::cout << "PC: " << fromIssue->insts[inst]->readPC() 1221 << " TN: " << fromIssue->insts[inst]->threadNumber 1222 << " SN: " << fromIssue->insts[inst]->seqNum << " | "; 1223 1224 inst++; 1225 1226 } 1227 1228 std::cout << "\n"; 1229} 1230 1231template <class Impl> 1232void 1233DefaultIEW<Impl>::executeInsts() 1234{ 1235 wbNumInst = 0; 1236 wbCycle = 0; 1237 1238 std::list<unsigned>::iterator threads = activeThreads->begin(); 1239 std::list<unsigned>::iterator end = activeThreads->end(); 1240 1241 while (threads != end) { 1242 unsigned tid = *threads++; 1243 fetchRedirect[tid] = false; 1244 } 1245 1246 // Uncomment this if you want to see all available instructions. 1247// printAvailableInsts(); 1248 1249 // Execute/writeback any instructions that are available. 1250 int insts_to_execute = fromIssue->size; 1251 int inst_num = 0; 1252 for (; inst_num < insts_to_execute; 1253 ++inst_num) { 1254 1255 DPRINTF(IEW, "Execute: Executing instructions from IQ.\n"); 1256 1257 DynInstPtr inst = instQueue.getInstToExecute(); 1258 1259 DPRINTF(IEW, "Execute: Processing PC %#x, [tid:%i] [sn:%i].\n", 1260 inst->readPC(), inst->threadNumber,inst->seqNum); 1261 1262 // Check if the instruction is squashed; if so then skip it 1263 if (inst->isSquashed()) { 1264 DPRINTF(IEW, "Execute: Instruction was squashed.\n"); 1265 1266 // Consider this instruction executed so that commit can go 1267 // ahead and retire the instruction. 1268 inst->setExecuted(); 1269 1270 // Not sure if I should set this here or just let commit try to 1271 // commit any squashed instructions. I like the latter a bit more. 1272 inst->setCanCommit(); 1273 1274 ++iewExecSquashedInsts; 1275 1276 decrWb(inst->seqNum); 1277 continue; 1278 } 1279 1280 Fault fault = NoFault; 1281 1282 // Execute instruction. 1283 // Note that if the instruction faults, it will be handled 1284 // at the commit stage. 1285 if (inst->isMemRef() && 1286 (!inst->isDataPrefetch() && !inst->isInstPrefetch())) { 1287 DPRINTF(IEW, "Execute: Calculating address for memory " 1288 "reference.\n"); 1289 1290 // Tell the LDSTQ to execute this instruction (if it is a load). 1291 if (inst->isLoad()) { 1292 // Loads will mark themselves as executed, and their writeback 1293 // event adds the instruction to the queue to commit 1294 fault = ldstQueue.executeLoad(inst); 1295 } else if (inst->isStore()) { 1296 fault = ldstQueue.executeStore(inst); 1297 1298 // If the store had a fault then it may not have a mem req 1299 if (!inst->isStoreConditional() && fault == NoFault) { 1300 inst->setExecuted(); 1301 1302 instToCommit(inst); 1303 } else if (fault != NoFault) { 1304 // If the instruction faulted, then we need to send it along to commit 1305 // without the instruction completing. 1306 DPRINTF(IEW, "Store has fault %s! [sn:%lli]\n", 1307 fault->name(), inst->seqNum); 1308 1309 // Send this instruction to commit, also make sure iew stage 1310 // realizes there is activity. 1311 inst->setExecuted(); 1312 1313 instToCommit(inst); 1314 activityThisCycle(); 1315 } 1316 1317 // Store conditionals will mark themselves as 1318 // executed, and their writeback event will add the 1319 // instruction to the queue to commit. 1320 } else { 1321 panic("Unexpected memory type!\n"); 1322 } 1323 1324 } else { 1325 inst->execute(); 1326 1327 inst->setExecuted(); 1328 1329 instToCommit(inst); 1330 } 1331 1332 updateExeInstStats(inst); 1333 1334 // Check if branch prediction was correct, if not then we need 1335 // to tell commit to squash in flight instructions. Only 1336 // handle this if there hasn't already been something that 1337 // redirects fetch in this group of instructions. 1338 1339 // This probably needs to prioritize the redirects if a different 1340 // scheduler is used. Currently the scheduler schedules the oldest 1341 // instruction first, so the branch resolution order will be correct. 1342 unsigned tid = inst->threadNumber; 1343 1344 if (!fetchRedirect[tid] || 1345 toCommit->squashedSeqNum[tid] > inst->seqNum) { 1346 1347 if (inst->mispredicted()) { 1348 fetchRedirect[tid] = true; 1349 1350 DPRINTF(IEW, "Execute: Branch mispredict detected.\n"); 1351 DPRINTF(IEW, "Predicted target was %#x, %#x.\n", 1352 inst->readPredPC(), inst->readPredNPC()); 1353 DPRINTF(IEW, "Execute: Redirecting fetch to PC: %#x," 1354 " NPC: %#x.\n", inst->readNextPC(), 1355 inst->readNextNPC()); 1356 // If incorrect, then signal the ROB that it must be squashed. 1357 squashDueToBranch(inst, tid); 1358 1359 if (inst->readPredTaken()) { 1360 predictedTakenIncorrect++; 1361 } else { 1362 predictedNotTakenIncorrect++; 1363 } 1364 } else if (ldstQueue.violation(tid)) { 1365 assert(inst->isMemRef()); 1366 // If there was an ordering violation, then get the 1367 // DynInst that caused the violation. Note that this 1368 // clears the violation signal. 1369 DynInstPtr violator; 1370 violator = ldstQueue.getMemDepViolator(tid); 1371 1372 DPRINTF(IEW, "LDSTQ detected a violation. Violator PC: " 1373 "%#x, inst PC: %#x. Addr is: %#x.\n", 1374 violator->readPC(), inst->readPC(), inst->physEffAddr); 1375 1376 // Ensure the violating instruction is older than 1377 // current squash 1378/* if (fetchRedirect[tid] && 1379 violator->seqNum >= toCommit->squashedSeqNum[tid] + 1) 1380 continue; 1381*/ 1382 fetchRedirect[tid] = true; 1383 1384 // Tell the instruction queue that a violation has occured. 1385 instQueue.violation(inst, violator); 1386 1387 // Squash. 1388 squashDueToMemOrder(inst,tid); 1389 1390 ++memOrderViolationEvents; 1391 } else if (ldstQueue.loadBlocked(tid) && 1392 !ldstQueue.isLoadBlockedHandled(tid)) { 1393 fetchRedirect[tid] = true; 1394 1395 DPRINTF(IEW, "Load operation couldn't execute because the " 1396 "memory system is blocked. PC: %#x [sn:%lli]\n", 1397 inst->readPC(), inst->seqNum); 1398 1399 squashDueToMemBlocked(inst, tid); 1400 } 1401 } else { 1402 // Reset any state associated with redirects that will not 1403 // be used. 1404 if (ldstQueue.violation(tid)) { 1405 assert(inst->isMemRef()); 1406 1407 DynInstPtr violator = ldstQueue.getMemDepViolator(tid); 1408 1409 DPRINTF(IEW, "LDSTQ detected a violation. Violator PC: " 1410 "%#x, inst PC: %#x. Addr is: %#x.\n", 1411 violator->readPC(), inst->readPC(), inst->physEffAddr); 1412 DPRINTF(IEW, "Violation will not be handled because " 1413 "already squashing\n"); 1414 1415 ++memOrderViolationEvents; 1416 } 1417 if (ldstQueue.loadBlocked(tid) && 1418 !ldstQueue.isLoadBlockedHandled(tid)) { 1419 DPRINTF(IEW, "Load operation couldn't execute because the " 1420 "memory system is blocked. PC: %#x [sn:%lli]\n", 1421 inst->readPC(), inst->seqNum); 1422 DPRINTF(IEW, "Blocked load will not be handled because " 1423 "already squashing\n"); 1424 1425 ldstQueue.setLoadBlockedHandled(tid); 1426 } 1427 1428 } 1429 } 1430 1431 // Update and record activity if we processed any instructions. 1432 if (inst_num) { 1433 if (exeStatus == Idle) { 1434 exeStatus = Running; 1435 } 1436 1437 updatedQueues = true; 1438 1439 cpu->activityThisCycle(); 1440 } 1441 1442 // Need to reset this in case a writeback event needs to write into the 1443 // iew queue. That way the writeback event will write into the correct 1444 // spot in the queue. 1445 wbNumInst = 0; 1446} 1447 1448template <class Impl> 1449void 1450DefaultIEW<Impl>::writebackInsts() 1451{ 1452 // Loop through the head of the time buffer and wake any 1453 // dependents. These instructions are about to write back. Also 1454 // mark scoreboard that this instruction is finally complete. 1455 // Either have IEW have direct access to scoreboard, or have this 1456 // as part of backwards communication. 1457 for (int inst_num = 0; inst_num < wbWidth && 1458 toCommit->insts[inst_num]; inst_num++) { 1459 DynInstPtr inst = toCommit->insts[inst_num]; 1460 int tid = inst->threadNumber; 1461 1462 DPRINTF(IEW, "Sending instructions to commit, [sn:%lli] PC %#x.\n", 1463 inst->seqNum, inst->readPC()); 1464 1465 iewInstsToCommit[tid]++; 1466 1467 // Some instructions will be sent to commit without having 1468 // executed because they need commit to handle them. 1469 // E.g. Uncached loads have not actually executed when they 1470 // are first sent to commit. Instead commit must tell the LSQ 1471 // when it's ready to execute the uncached load. 1472 if (!inst->isSquashed() && inst->isExecuted() && inst->getFault() == NoFault) { 1473 int dependents = instQueue.wakeDependents(inst); 1474 1475 for (int i = 0; i < inst->numDestRegs(); i++) { 1476 //mark as Ready 1477 DPRINTF(IEW,"Setting Destination Register %i\n", 1478 inst->renamedDestRegIdx(i)); 1479 scoreboard->setReg(inst->renamedDestRegIdx(i)); 1480 } 1481 1482 if (dependents) { 1483 producerInst[tid]++; 1484 consumerInst[tid]+= dependents; 1485 } 1486 writebackCount[tid]++; 1487 } 1488 1489 decrWb(inst->seqNum); 1490 } 1491} 1492 1493template<class Impl> 1494void 1495DefaultIEW<Impl>::tick() 1496{ 1497 wbNumInst = 0; 1498 wbCycle = 0; 1499 1500 wroteToTimeBuffer = false; 1501 updatedQueues = false; 1502 1503 sortInsts(); 1504 1505 // Free function units marked as being freed this cycle. 1506 fuPool->processFreeUnits(); 1507 1508 std::list<unsigned>::iterator threads = activeThreads->begin(); 1509 std::list<unsigned>::iterator end = activeThreads->end(); 1510 1511 // Check stall and squash signals, dispatch any instructions. 1512 while (threads != end) { 1513 unsigned tid = *threads++; 1514 1515 DPRINTF(IEW,"Issue: Processing [tid:%i]\n",tid); 1516 1517 checkSignalsAndUpdate(tid); 1518 dispatch(tid); 1519 } 1520 1521 if (exeStatus != Squashing) { 1522 executeInsts(); 1523 1524 writebackInsts(); 1525 1526 // Have the instruction queue try to schedule any ready instructions. 1527 // (In actuality, this scheduling is for instructions that will 1528 // be executed next cycle.) 1529 instQueue.scheduleReadyInsts(); 1530 1531 // Also should advance its own time buffers if the stage ran. 1532 // Not the best place for it, but this works (hopefully). 1533 issueToExecQueue.advance(); 1534 } 1535 1536 bool broadcast_free_entries = false; 1537 1538 if (updatedQueues || exeStatus == Running || updateLSQNextCycle) { 1539 exeStatus = Idle; 1540 updateLSQNextCycle = false; 1541 1542 broadcast_free_entries = true; 1543 } 1544 1545 // Writeback any stores using any leftover bandwidth. 1546 ldstQueue.writebackStores(); 1547 1548 // Check the committed load/store signals to see if there's a load 1549 // or store to commit. Also check if it's being told to execute a 1550 // nonspeculative instruction. 1551 // This is pretty inefficient... 1552 1553 threads = activeThreads->begin(); 1554 while (threads != end) { 1555 unsigned tid = (*threads++); 1556 1557 DPRINTF(IEW,"Processing [tid:%i]\n",tid); 1558 1559 // Update structures based on instructions committed. 1560 if (fromCommit->commitInfo[tid].doneSeqNum != 0 && 1561 !fromCommit->commitInfo[tid].squash && 1562 !fromCommit->commitInfo[tid].robSquashing) { 1563 1564 ldstQueue.commitStores(fromCommit->commitInfo[tid].doneSeqNum,tid); 1565 1566 ldstQueue.commitLoads(fromCommit->commitInfo[tid].doneSeqNum,tid); 1567 1568 updateLSQNextCycle = true; 1569 instQueue.commit(fromCommit->commitInfo[tid].doneSeqNum,tid); 1570 } 1571 1572 if (fromCommit->commitInfo[tid].nonSpecSeqNum != 0) { 1573 1574 //DPRINTF(IEW,"NonspecInst from thread %i",tid); 1575 if (fromCommit->commitInfo[tid].uncached) { 1576 instQueue.replayMemInst(fromCommit->commitInfo[tid].uncachedLoad); 1577 fromCommit->commitInfo[tid].uncachedLoad->setAtCommit(); 1578 } else { 1579 instQueue.scheduleNonSpec( 1580 fromCommit->commitInfo[tid].nonSpecSeqNum); 1581 } 1582 } 1583 1584 if (broadcast_free_entries) { 1585 toFetch->iewInfo[tid].iqCount = 1586 instQueue.getCount(tid); 1587 toFetch->iewInfo[tid].ldstqCount = 1588 ldstQueue.getCount(tid); 1589 1590 toRename->iewInfo[tid].usedIQ = true; 1591 toRename->iewInfo[tid].freeIQEntries = 1592 instQueue.numFreeEntries(); 1593 toRename->iewInfo[tid].usedLSQ = true; 1594 toRename->iewInfo[tid].freeLSQEntries = 1595 ldstQueue.numFreeEntries(tid); 1596 1597 wroteToTimeBuffer = true; 1598 } 1599 1600 DPRINTF(IEW, "[tid:%i], Dispatch dispatched %i instructions.\n", 1601 tid, toRename->iewInfo[tid].dispatched); 1602 } 1603 1604 DPRINTF(IEW, "IQ has %i free entries (Can schedule: %i). " 1605 "LSQ has %i free entries.\n", 1606 instQueue.numFreeEntries(), instQueue.hasReadyInsts(), 1607 ldstQueue.numFreeEntries()); 1608 1609 updateStatus(); 1610 1611 if (wroteToTimeBuffer) { 1612 DPRINTF(Activity, "Activity this cycle.\n"); 1613 cpu->activityThisCycle(); 1614 } 1615} 1616 1617template <class Impl> 1618void 1619DefaultIEW<Impl>::updateExeInstStats(DynInstPtr &inst) 1620{ 1621 int thread_number = inst->threadNumber; 1622 1623 // 1624 // Pick off the software prefetches 1625 // 1626#ifdef TARGET_ALPHA 1627 if (inst->isDataPrefetch()) 1628 iewExecutedSwp[thread_number]++; 1629 else 1630 iewIewExecutedcutedInsts++; 1631#else 1632 iewExecutedInsts++; 1633#endif 1634 1635 // 1636 // Control operations 1637 // 1638 if (inst->isControl()) 1639 iewExecutedBranches[thread_number]++; 1640 1641 // 1642 // Memory operations 1643 // 1644 if (inst->isMemRef()) { 1645 iewExecutedRefs[thread_number]++; 1646 1647 if (inst->isLoad()) { 1648 iewExecLoadInsts[thread_number]++; 1649 } 1650 } 1651} 1652