iew_impl.hh revision 13831
1/* 2 * Copyright (c) 2010-2013, 2018 ARM Limited 3 * Copyright (c) 2013 Advanced Micro Devices, Inc. 4 * All rights reserved. 5 * 6 * The license below extends only to copyright in the software and shall 7 * not be construed as granting a license to any other intellectual 8 * property including but not limited to intellectual property relating 9 * to a hardware implementation of the functionality of the software 10 * licensed hereunder. You may use the software subject to the license 11 * terms below provided that you ensure that this notice is replicated 12 * unmodified and in its entirety in all distributions of the software, 13 * modified or unmodified, in source code or in binary form. 14 * 15 * Copyright (c) 2004-2006 The Regents of The University of Michigan 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Kevin Lim 42 */ 43 44#ifndef __CPU_O3_IEW_IMPL_IMPL_HH__ 45#define __CPU_O3_IEW_IMPL_IMPL_HH__ 46 47// @todo: Fix the instantaneous communication among all the stages within 48// iew. There's a clear delay between issue and execute, yet backwards 49// communication happens simultaneously. 50 51#include <queue> 52 53#include "arch/utility.hh" 54#include "config/the_isa.hh" 55#include "cpu/checker/cpu.hh" 56#include "cpu/o3/fu_pool.hh" 57#include "cpu/o3/iew.hh" 58#include "cpu/timebuf.hh" 59#include "debug/Activity.hh" 60#include "debug/Drain.hh" 61#include "debug/IEW.hh" 62#include "debug/O3PipeView.hh" 63#include "params/DerivO3CPU.hh" 64 65using namespace std; 66 67template<class Impl> 68DefaultIEW<Impl>::DefaultIEW(O3CPU *_cpu, DerivO3CPUParams *params) 69 : issueToExecQueue(params->backComSize, params->forwardComSize), 70 cpu(_cpu), 71 instQueue(_cpu, this, params), 72 ldstQueue(_cpu, this, params), 73 fuPool(params->fuPool), 74 commitToIEWDelay(params->commitToIEWDelay), 75 renameToIEWDelay(params->renameToIEWDelay), 76 issueToExecuteDelay(params->issueToExecuteDelay), 77 dispatchWidth(params->dispatchWidth), 78 issueWidth(params->issueWidth), 79 wbNumInst(0), 80 wbCycle(0), 81 wbWidth(params->wbWidth), 82 numThreads(params->numThreads) 83{ 84 if (dispatchWidth > Impl::MaxWidth) 85 fatal("dispatchWidth (%d) is larger than compiled limit (%d),\n" 86 "\tincrease MaxWidth in src/cpu/o3/impl.hh\n", 87 dispatchWidth, static_cast<int>(Impl::MaxWidth)); 88 if (issueWidth > Impl::MaxWidth) 89 fatal("issueWidth (%d) is larger than compiled limit (%d),\n" 90 "\tincrease MaxWidth in src/cpu/o3/impl.hh\n", 91 issueWidth, static_cast<int>(Impl::MaxWidth)); 92 if (wbWidth > Impl::MaxWidth) 93 fatal("wbWidth (%d) is larger than compiled limit (%d),\n" 94 "\tincrease MaxWidth in src/cpu/o3/impl.hh\n", 95 wbWidth, static_cast<int>(Impl::MaxWidth)); 96 97 _status = Active; 98 exeStatus = Running; 99 wbStatus = Idle; 100 101 // Setup wire to read instructions coming from issue. 102 fromIssue = issueToExecQueue.getWire(-issueToExecuteDelay); 103 104 // Instruction queue needs the queue between issue and execute. 105 instQueue.setIssueToExecuteQueue(&issueToExecQueue); 106 107 for (ThreadID tid = 0; tid < Impl::MaxThreads; tid++) { 108 dispatchStatus[tid] = Running; 109 fetchRedirect[tid] = false; 110 } 111 112 updateLSQNextCycle = false; 113 114 skidBufferMax = (renameToIEWDelay + 1) * params->renameWidth; 115} 116 117template <class Impl> 118std::string 119DefaultIEW<Impl>::name() const 120{ 121 return cpu->name() + ".iew"; 122} 123 124template <class Impl> 125void 126DefaultIEW<Impl>::regProbePoints() 127{ 128 ppDispatch = new ProbePointArg<DynInstPtr>(cpu->getProbeManager(), "Dispatch"); 129 ppMispredict = new ProbePointArg<DynInstPtr>(cpu->getProbeManager(), "Mispredict"); 130 /** 131 * Probe point with dynamic instruction as the argument used to probe when 132 * an instruction starts to execute. 133 */ 134 ppExecute = new ProbePointArg<DynInstPtr>(cpu->getProbeManager(), 135 "Execute"); 136 /** 137 * Probe point with dynamic instruction as the argument used to probe when 138 * an instruction execution completes and it is marked ready to commit. 139 */ 140 ppToCommit = new ProbePointArg<DynInstPtr>(cpu->getProbeManager(), 141 "ToCommit"); 142} 143 144template <class Impl> 145void 146DefaultIEW<Impl>::regStats() 147{ 148 using namespace Stats; 149 150 instQueue.regStats(); 151 ldstQueue.regStats(); 152 153 iewIdleCycles 154 .name(name() + ".iewIdleCycles") 155 .desc("Number of cycles IEW is idle"); 156 157 iewSquashCycles 158 .name(name() + ".iewSquashCycles") 159 .desc("Number of cycles IEW is squashing"); 160 161 iewBlockCycles 162 .name(name() + ".iewBlockCycles") 163 .desc("Number of cycles IEW is blocking"); 164 165 iewUnblockCycles 166 .name(name() + ".iewUnblockCycles") 167 .desc("Number of cycles IEW is unblocking"); 168 169 iewDispatchedInsts 170 .name(name() + ".iewDispatchedInsts") 171 .desc("Number of instructions dispatched to IQ"); 172 173 iewDispSquashedInsts 174 .name(name() + ".iewDispSquashedInsts") 175 .desc("Number of squashed instructions skipped by dispatch"); 176 177 iewDispLoadInsts 178 .name(name() + ".iewDispLoadInsts") 179 .desc("Number of dispatched load instructions"); 180 181 iewDispStoreInsts 182 .name(name() + ".iewDispStoreInsts") 183 .desc("Number of dispatched store instructions"); 184 185 iewDispNonSpecInsts 186 .name(name() + ".iewDispNonSpecInsts") 187 .desc("Number of dispatched non-speculative instructions"); 188 189 iewIQFullEvents 190 .name(name() + ".iewIQFullEvents") 191 .desc("Number of times the IQ has become full, causing a stall"); 192 193 iewLSQFullEvents 194 .name(name() + ".iewLSQFullEvents") 195 .desc("Number of times the LSQ has become full, causing a stall"); 196 197 memOrderViolationEvents 198 .name(name() + ".memOrderViolationEvents") 199 .desc("Number of memory order violations"); 200 201 predictedTakenIncorrect 202 .name(name() + ".predictedTakenIncorrect") 203 .desc("Number of branches that were predicted taken incorrectly"); 204 205 predictedNotTakenIncorrect 206 .name(name() + ".predictedNotTakenIncorrect") 207 .desc("Number of branches that were predicted not taken incorrectly"); 208 209 branchMispredicts 210 .name(name() + ".branchMispredicts") 211 .desc("Number of branch mispredicts detected at execute"); 212 213 branchMispredicts = predictedTakenIncorrect + predictedNotTakenIncorrect; 214 215 iewExecutedInsts 216 .name(name() + ".iewExecutedInsts") 217 .desc("Number of executed instructions"); 218 219 iewExecLoadInsts 220 .init(cpu->numThreads) 221 .name(name() + ".iewExecLoadInsts") 222 .desc("Number of load instructions executed") 223 .flags(total); 224 225 iewExecSquashedInsts 226 .name(name() + ".iewExecSquashedInsts") 227 .desc("Number of squashed instructions skipped in execute"); 228 229 iewExecutedSwp 230 .init(cpu->numThreads) 231 .name(name() + ".exec_swp") 232 .desc("number of swp insts executed") 233 .flags(total); 234 235 iewExecutedNop 236 .init(cpu->numThreads) 237 .name(name() + ".exec_nop") 238 .desc("number of nop insts executed") 239 .flags(total); 240 241 iewExecutedRefs 242 .init(cpu->numThreads) 243 .name(name() + ".exec_refs") 244 .desc("number of memory reference insts executed") 245 .flags(total); 246 247 iewExecutedBranches 248 .init(cpu->numThreads) 249 .name(name() + ".exec_branches") 250 .desc("Number of branches executed") 251 .flags(total); 252 253 iewExecStoreInsts 254 .name(name() + ".exec_stores") 255 .desc("Number of stores executed") 256 .flags(total); 257 iewExecStoreInsts = iewExecutedRefs - iewExecLoadInsts; 258 259 iewExecRate 260 .name(name() + ".exec_rate") 261 .desc("Inst execution rate") 262 .flags(total); 263 264 iewExecRate = iewExecutedInsts / cpu->numCycles; 265 266 iewInstsToCommit 267 .init(cpu->numThreads) 268 .name(name() + ".wb_sent") 269 .desc("cumulative count of insts sent to commit") 270 .flags(total); 271 272 writebackCount 273 .init(cpu->numThreads) 274 .name(name() + ".wb_count") 275 .desc("cumulative count of insts written-back") 276 .flags(total); 277 278 producerInst 279 .init(cpu->numThreads) 280 .name(name() + ".wb_producers") 281 .desc("num instructions producing a value") 282 .flags(total); 283 284 consumerInst 285 .init(cpu->numThreads) 286 .name(name() + ".wb_consumers") 287 .desc("num instructions consuming a value") 288 .flags(total); 289 290 wbFanout 291 .name(name() + ".wb_fanout") 292 .desc("average fanout of values written-back") 293 .flags(total); 294 295 wbFanout = producerInst / consumerInst; 296 297 wbRate 298 .name(name() + ".wb_rate") 299 .desc("insts written-back per cycle") 300 .flags(total); 301 wbRate = writebackCount / cpu->numCycles; 302} 303 304template<class Impl> 305void 306DefaultIEW<Impl>::startupStage() 307{ 308 for (ThreadID tid = 0; tid < numThreads; tid++) { 309 toRename->iewInfo[tid].usedIQ = true; 310 toRename->iewInfo[tid].freeIQEntries = 311 instQueue.numFreeEntries(tid); 312 313 toRename->iewInfo[tid].usedLSQ = true; 314 toRename->iewInfo[tid].freeLQEntries = ldstQueue.numFreeLoadEntries(tid); 315 toRename->iewInfo[tid].freeSQEntries = ldstQueue.numFreeStoreEntries(tid); 316 } 317 318 // Initialize the checker's dcache port here 319 if (cpu->checker) { 320 cpu->checker->setDcachePort(&cpu->getDataPort()); 321 } 322 323 cpu->activateStage(O3CPU::IEWIdx); 324} 325 326template<class Impl> 327void 328DefaultIEW<Impl>::clearStates(ThreadID tid) 329{ 330 toRename->iewInfo[tid].usedIQ = true; 331 toRename->iewInfo[tid].freeIQEntries = 332 instQueue.numFreeEntries(tid); 333 334 toRename->iewInfo[tid].usedLSQ = true; 335 toRename->iewInfo[tid].freeLQEntries = ldstQueue.numFreeLoadEntries(tid); 336 toRename->iewInfo[tid].freeSQEntries = ldstQueue.numFreeStoreEntries(tid); 337} 338 339template<class Impl> 340void 341DefaultIEW<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr) 342{ 343 timeBuffer = tb_ptr; 344 345 // Setup wire to read information from time buffer, from commit. 346 fromCommit = timeBuffer->getWire(-commitToIEWDelay); 347 348 // Setup wire to write information back to previous stages. 349 toRename = timeBuffer->getWire(0); 350 351 toFetch = timeBuffer->getWire(0); 352 353 // Instruction queue also needs main time buffer. 354 instQueue.setTimeBuffer(tb_ptr); 355} 356 357template<class Impl> 358void 359DefaultIEW<Impl>::setRenameQueue(TimeBuffer<RenameStruct> *rq_ptr) 360{ 361 renameQueue = rq_ptr; 362 363 // Setup wire to read information from rename queue. 364 fromRename = renameQueue->getWire(-renameToIEWDelay); 365} 366 367template<class Impl> 368void 369DefaultIEW<Impl>::setIEWQueue(TimeBuffer<IEWStruct> *iq_ptr) 370{ 371 iewQueue = iq_ptr; 372 373 // Setup wire to write instructions to commit. 374 toCommit = iewQueue->getWire(0); 375} 376 377template<class Impl> 378void 379DefaultIEW<Impl>::setActiveThreads(list<ThreadID> *at_ptr) 380{ 381 activeThreads = at_ptr; 382 383 ldstQueue.setActiveThreads(at_ptr); 384 instQueue.setActiveThreads(at_ptr); 385} 386 387template<class Impl> 388void 389DefaultIEW<Impl>::setScoreboard(Scoreboard *sb_ptr) 390{ 391 scoreboard = sb_ptr; 392} 393 394template <class Impl> 395bool 396DefaultIEW<Impl>::isDrained() const 397{ 398 bool drained = ldstQueue.isDrained() && instQueue.isDrained(); 399 400 for (ThreadID tid = 0; tid < numThreads; tid++) { 401 if (!insts[tid].empty()) { 402 DPRINTF(Drain, "%i: Insts not empty.\n", tid); 403 drained = false; 404 } 405 if (!skidBuffer[tid].empty()) { 406 DPRINTF(Drain, "%i: Skid buffer not empty.\n", tid); 407 drained = false; 408 } 409 drained = drained && dispatchStatus[tid] == Running; 410 } 411 412 // Also check the FU pool as instructions are "stored" in FU 413 // completion events until they are done and not accounted for 414 // above 415 if (drained && !fuPool->isDrained()) { 416 DPRINTF(Drain, "FU pool still busy.\n"); 417 drained = false; 418 } 419 420 return drained; 421} 422 423template <class Impl> 424void 425DefaultIEW<Impl>::drainSanityCheck() const 426{ 427 assert(isDrained()); 428 429 instQueue.drainSanityCheck(); 430 ldstQueue.drainSanityCheck(); 431} 432 433template <class Impl> 434void 435DefaultIEW<Impl>::takeOverFrom() 436{ 437 // Reset all state. 438 _status = Active; 439 exeStatus = Running; 440 wbStatus = Idle; 441 442 instQueue.takeOverFrom(); 443 ldstQueue.takeOverFrom(); 444 fuPool->takeOverFrom(); 445 446 startupStage(); 447 cpu->activityThisCycle(); 448 449 for (ThreadID tid = 0; tid < numThreads; tid++) { 450 dispatchStatus[tid] = Running; 451 fetchRedirect[tid] = false; 452 } 453 454 updateLSQNextCycle = false; 455 456 for (int i = 0; i < issueToExecQueue.getSize(); ++i) { 457 issueToExecQueue.advance(); 458 } 459} 460 461template<class Impl> 462void 463DefaultIEW<Impl>::squash(ThreadID tid) 464{ 465 DPRINTF(IEW, "[tid:%i] Squashing all instructions.\n", tid); 466 467 // Tell the IQ to start squashing. 468 instQueue.squash(tid); 469 470 // Tell the LDSTQ to start squashing. 471 ldstQueue.squash(fromCommit->commitInfo[tid].doneSeqNum, tid); 472 updatedQueues = true; 473 474 // Clear the skid buffer in case it has any data in it. 475 DPRINTF(IEW, 476 "Removing skidbuffer instructions until " 477 "[sn:%llu] [tid:%i]\n", 478 fromCommit->commitInfo[tid].doneSeqNum, tid); 479 480 while (!skidBuffer[tid].empty()) { 481 if (skidBuffer[tid].front()->isLoad()) { 482 toRename->iewInfo[tid].dispatchedToLQ++; 483 } 484 if (skidBuffer[tid].front()->isStore() || 485 skidBuffer[tid].front()->isAtomic()) { 486 toRename->iewInfo[tid].dispatchedToSQ++; 487 } 488 489 toRename->iewInfo[tid].dispatched++; 490 491 skidBuffer[tid].pop(); 492 } 493 494 emptyRenameInsts(tid); 495} 496 497template<class Impl> 498void 499DefaultIEW<Impl>::squashDueToBranch(const DynInstPtr& inst, ThreadID tid) 500{ 501 DPRINTF(IEW, "[tid:%i] [sn:%llu] Squashing from a specific instruction," 502 " PC: %s " 503 "\n", tid, inst->seqNum, inst->pcState() ); 504 505 if (!toCommit->squash[tid] || 506 inst->seqNum < toCommit->squashedSeqNum[tid]) { 507 toCommit->squash[tid] = true; 508 toCommit->squashedSeqNum[tid] = inst->seqNum; 509 toCommit->branchTaken[tid] = inst->pcState().branching(); 510 511 TheISA::PCState pc = inst->pcState(); 512 TheISA::advancePC(pc, inst->staticInst); 513 514 toCommit->pc[tid] = pc; 515 toCommit->mispredictInst[tid] = inst; 516 toCommit->includeSquashInst[tid] = false; 517 518 wroteToTimeBuffer = true; 519 } 520 521} 522 523template<class Impl> 524void 525DefaultIEW<Impl>::squashDueToMemOrder(const DynInstPtr& inst, ThreadID tid) 526{ 527 DPRINTF(IEW, "[tid:%i] Memory violation, squashing violator and younger " 528 "insts, PC: %s [sn:%llu].\n", tid, inst->pcState(), inst->seqNum); 529 // Need to include inst->seqNum in the following comparison to cover the 530 // corner case when a branch misprediction and a memory violation for the 531 // same instruction (e.g. load PC) are detected in the same cycle. In this 532 // case the memory violator should take precedence over the branch 533 // misprediction because it requires the violator itself to be included in 534 // the squash. 535 if (!toCommit->squash[tid] || 536 inst->seqNum <= toCommit->squashedSeqNum[tid]) { 537 toCommit->squash[tid] = true; 538 539 toCommit->squashedSeqNum[tid] = inst->seqNum; 540 toCommit->pc[tid] = inst->pcState(); 541 toCommit->mispredictInst[tid] = NULL; 542 543 // Must include the memory violator in the squash. 544 toCommit->includeSquashInst[tid] = true; 545 546 wroteToTimeBuffer = true; 547 } 548} 549 550template<class Impl> 551void 552DefaultIEW<Impl>::block(ThreadID tid) 553{ 554 DPRINTF(IEW, "[tid:%i] Blocking.\n", tid); 555 556 if (dispatchStatus[tid] != Blocked && 557 dispatchStatus[tid] != Unblocking) { 558 toRename->iewBlock[tid] = true; 559 wroteToTimeBuffer = true; 560 } 561 562 // Add the current inputs to the skid buffer so they can be 563 // reprocessed when this stage unblocks. 564 skidInsert(tid); 565 566 dispatchStatus[tid] = Blocked; 567} 568 569template<class Impl> 570void 571DefaultIEW<Impl>::unblock(ThreadID tid) 572{ 573 DPRINTF(IEW, "[tid:%i] Reading instructions out of the skid " 574 "buffer %u.\n",tid, tid); 575 576 // If the skid bufffer is empty, signal back to previous stages to unblock. 577 // Also switch status to running. 578 if (skidBuffer[tid].empty()) { 579 toRename->iewUnblock[tid] = true; 580 wroteToTimeBuffer = true; 581 DPRINTF(IEW, "[tid:%i] Done unblocking.\n",tid); 582 dispatchStatus[tid] = Running; 583 } 584} 585 586template<class Impl> 587void 588DefaultIEW<Impl>::wakeDependents(const DynInstPtr& inst) 589{ 590 instQueue.wakeDependents(inst); 591} 592 593template<class Impl> 594void 595DefaultIEW<Impl>::rescheduleMemInst(const DynInstPtr& inst) 596{ 597 instQueue.rescheduleMemInst(inst); 598} 599 600template<class Impl> 601void 602DefaultIEW<Impl>::replayMemInst(const DynInstPtr& inst) 603{ 604 instQueue.replayMemInst(inst); 605} 606 607template<class Impl> 608void 609DefaultIEW<Impl>::blockMemInst(const DynInstPtr& inst) 610{ 611 instQueue.blockMemInst(inst); 612} 613 614template<class Impl> 615void 616DefaultIEW<Impl>::cacheUnblocked() 617{ 618 instQueue.cacheUnblocked(); 619} 620 621template<class Impl> 622void 623DefaultIEW<Impl>::instToCommit(const DynInstPtr& inst) 624{ 625 // This function should not be called after writebackInsts in a 626 // single cycle. That will cause problems with an instruction 627 // being added to the queue to commit without being processed by 628 // writebackInsts prior to being sent to commit. 629 630 // First check the time slot that this instruction will write 631 // to. If there are free write ports at the time, then go ahead 632 // and write the instruction to that time. If there are not, 633 // keep looking back to see where's the first time there's a 634 // free slot. 635 while ((*iewQueue)[wbCycle].insts[wbNumInst]) { 636 ++wbNumInst; 637 if (wbNumInst == wbWidth) { 638 ++wbCycle; 639 wbNumInst = 0; 640 } 641 } 642 643 DPRINTF(IEW, "Current wb cycle: %i, width: %i, numInst: %i\nwbActual:%i\n", 644 wbCycle, wbWidth, wbNumInst, wbCycle * wbWidth + wbNumInst); 645 // Add finished instruction to queue to commit. 646 (*iewQueue)[wbCycle].insts[wbNumInst] = inst; 647 (*iewQueue)[wbCycle].size++; 648} 649 650template <class Impl> 651unsigned 652DefaultIEW<Impl>::validInstsFromRename() 653{ 654 unsigned inst_count = 0; 655 656 for (int i=0; i<fromRename->size; i++) { 657 if (!fromRename->insts[i]->isSquashed()) 658 inst_count++; 659 } 660 661 return inst_count; 662} 663 664template<class Impl> 665void 666DefaultIEW<Impl>::skidInsert(ThreadID tid) 667{ 668 DynInstPtr inst = NULL; 669 670 while (!insts[tid].empty()) { 671 inst = insts[tid].front(); 672 673 insts[tid].pop(); 674 675 DPRINTF(IEW,"[tid:%i] Inserting [sn:%lli] PC:%s into " 676 "dispatch skidBuffer %i\n",tid, inst->seqNum, 677 inst->pcState(),tid); 678 679 skidBuffer[tid].push(inst); 680 } 681 682 assert(skidBuffer[tid].size() <= skidBufferMax && 683 "Skidbuffer Exceeded Max Size"); 684} 685 686template<class Impl> 687int 688DefaultIEW<Impl>::skidCount() 689{ 690 int max=0; 691 692 list<ThreadID>::iterator threads = activeThreads->begin(); 693 list<ThreadID>::iterator end = activeThreads->end(); 694 695 while (threads != end) { 696 ThreadID tid = *threads++; 697 unsigned thread_count = skidBuffer[tid].size(); 698 if (max < thread_count) 699 max = thread_count; 700 } 701 702 return max; 703} 704 705template<class Impl> 706bool 707DefaultIEW<Impl>::skidsEmpty() 708{ 709 list<ThreadID>::iterator threads = activeThreads->begin(); 710 list<ThreadID>::iterator end = activeThreads->end(); 711 712 while (threads != end) { 713 ThreadID tid = *threads++; 714 715 if (!skidBuffer[tid].empty()) 716 return false; 717 } 718 719 return true; 720} 721 722template <class Impl> 723void 724DefaultIEW<Impl>::updateStatus() 725{ 726 bool any_unblocking = false; 727 728 list<ThreadID>::iterator threads = activeThreads->begin(); 729 list<ThreadID>::iterator end = activeThreads->end(); 730 731 while (threads != end) { 732 ThreadID tid = *threads++; 733 734 if (dispatchStatus[tid] == Unblocking) { 735 any_unblocking = true; 736 break; 737 } 738 } 739 740 // If there are no ready instructions waiting to be scheduled by the IQ, 741 // and there's no stores waiting to write back, and dispatch is not 742 // unblocking, then there is no internal activity for the IEW stage. 743 instQueue.intInstQueueReads++; 744 if (_status == Active && !instQueue.hasReadyInsts() && 745 !ldstQueue.willWB() && !any_unblocking) { 746 DPRINTF(IEW, "IEW switching to idle\n"); 747 748 deactivateStage(); 749 750 _status = Inactive; 751 } else if (_status == Inactive && (instQueue.hasReadyInsts() || 752 ldstQueue.willWB() || 753 any_unblocking)) { 754 // Otherwise there is internal activity. Set to active. 755 DPRINTF(IEW, "IEW switching to active\n"); 756 757 activateStage(); 758 759 _status = Active; 760 } 761} 762 763template <class Impl> 764bool 765DefaultIEW<Impl>::checkStall(ThreadID tid) 766{ 767 bool ret_val(false); 768 769 if (fromCommit->commitInfo[tid].robSquashing) { 770 DPRINTF(IEW,"[tid:%i] Stall from Commit stage detected.\n",tid); 771 ret_val = true; 772 } else if (instQueue.isFull(tid)) { 773 DPRINTF(IEW,"[tid:%i] Stall: IQ is full.\n",tid); 774 ret_val = true; 775 } 776 777 return ret_val; 778} 779 780template <class Impl> 781void 782DefaultIEW<Impl>::checkSignalsAndUpdate(ThreadID tid) 783{ 784 // Check if there's a squash signal, squash if there is 785 // Check stall signals, block if there is. 786 // If status was Blocked 787 // if so then go to unblocking 788 // If status was Squashing 789 // check if squashing is not high. Switch to running this cycle. 790 791 if (fromCommit->commitInfo[tid].squash) { 792 squash(tid); 793 794 if (dispatchStatus[tid] == Blocked || 795 dispatchStatus[tid] == Unblocking) { 796 toRename->iewUnblock[tid] = true; 797 wroteToTimeBuffer = true; 798 } 799 800 dispatchStatus[tid] = Squashing; 801 fetchRedirect[tid] = false; 802 return; 803 } 804 805 if (fromCommit->commitInfo[tid].robSquashing) { 806 DPRINTF(IEW, "[tid:%i] ROB is still squashing.\n", tid); 807 808 dispatchStatus[tid] = Squashing; 809 emptyRenameInsts(tid); 810 wroteToTimeBuffer = true; 811 } 812 813 if (checkStall(tid)) { 814 block(tid); 815 dispatchStatus[tid] = Blocked; 816 return; 817 } 818 819 if (dispatchStatus[tid] == Blocked) { 820 // Status from previous cycle was blocked, but there are no more stall 821 // conditions. Switch over to unblocking. 822 DPRINTF(IEW, "[tid:%i] Done blocking, switching to unblocking.\n", 823 tid); 824 825 dispatchStatus[tid] = Unblocking; 826 827 unblock(tid); 828 829 return; 830 } 831 832 if (dispatchStatus[tid] == Squashing) { 833 // Switch status to running if rename isn't being told to block or 834 // squash this cycle. 835 DPRINTF(IEW, "[tid:%i] Done squashing, switching to running.\n", 836 tid); 837 838 dispatchStatus[tid] = Running; 839 840 return; 841 } 842} 843 844template <class Impl> 845void 846DefaultIEW<Impl>::sortInsts() 847{ 848 int insts_from_rename = fromRename->size; 849#ifdef DEBUG 850 for (ThreadID tid = 0; tid < numThreads; tid++) 851 assert(insts[tid].empty()); 852#endif 853 for (int i = 0; i < insts_from_rename; ++i) { 854 insts[fromRename->insts[i]->threadNumber].push(fromRename->insts[i]); 855 } 856} 857 858template <class Impl> 859void 860DefaultIEW<Impl>::emptyRenameInsts(ThreadID tid) 861{ 862 DPRINTF(IEW, "[tid:%i] Removing incoming rename instructions\n", tid); 863 864 while (!insts[tid].empty()) { 865 866 if (insts[tid].front()->isLoad()) { 867 toRename->iewInfo[tid].dispatchedToLQ++; 868 } 869 if (insts[tid].front()->isStore() || 870 insts[tid].front()->isAtomic()) { 871 toRename->iewInfo[tid].dispatchedToSQ++; 872 } 873 874 toRename->iewInfo[tid].dispatched++; 875 876 insts[tid].pop(); 877 } 878} 879 880template <class Impl> 881void 882DefaultIEW<Impl>::wakeCPU() 883{ 884 cpu->wakeCPU(); 885} 886 887template <class Impl> 888void 889DefaultIEW<Impl>::activityThisCycle() 890{ 891 DPRINTF(Activity, "Activity this cycle.\n"); 892 cpu->activityThisCycle(); 893} 894 895template <class Impl> 896inline void 897DefaultIEW<Impl>::activateStage() 898{ 899 DPRINTF(Activity, "Activating stage.\n"); 900 cpu->activateStage(O3CPU::IEWIdx); 901} 902 903template <class Impl> 904inline void 905DefaultIEW<Impl>::deactivateStage() 906{ 907 DPRINTF(Activity, "Deactivating stage.\n"); 908 cpu->deactivateStage(O3CPU::IEWIdx); 909} 910 911template<class Impl> 912void 913DefaultIEW<Impl>::dispatch(ThreadID tid) 914{ 915 // If status is Running or idle, 916 // call dispatchInsts() 917 // If status is Unblocking, 918 // buffer any instructions coming from rename 919 // continue trying to empty skid buffer 920 // check if stall conditions have passed 921 922 if (dispatchStatus[tid] == Blocked) { 923 ++iewBlockCycles; 924 925 } else if (dispatchStatus[tid] == Squashing) { 926 ++iewSquashCycles; 927 } 928 929 // Dispatch should try to dispatch as many instructions as its bandwidth 930 // will allow, as long as it is not currently blocked. 931 if (dispatchStatus[tid] == Running || 932 dispatchStatus[tid] == Idle) { 933 DPRINTF(IEW, "[tid:%i] Not blocked, so attempting to run " 934 "dispatch.\n", tid); 935 936 dispatchInsts(tid); 937 } else if (dispatchStatus[tid] == Unblocking) { 938 // Make sure that the skid buffer has something in it if the 939 // status is unblocking. 940 assert(!skidsEmpty()); 941 942 // If the status was unblocking, then instructions from the skid 943 // buffer were used. Remove those instructions and handle 944 // the rest of unblocking. 945 dispatchInsts(tid); 946 947 ++iewUnblockCycles; 948 949 if (validInstsFromRename()) { 950 // Add the current inputs to the skid buffer so they can be 951 // reprocessed when this stage unblocks. 952 skidInsert(tid); 953 } 954 955 unblock(tid); 956 } 957} 958 959template <class Impl> 960void 961DefaultIEW<Impl>::dispatchInsts(ThreadID tid) 962{ 963 // Obtain instructions from skid buffer if unblocking, or queue from rename 964 // otherwise. 965 std::queue<DynInstPtr> &insts_to_dispatch = 966 dispatchStatus[tid] == Unblocking ? 967 skidBuffer[tid] : insts[tid]; 968 969 int insts_to_add = insts_to_dispatch.size(); 970 971 DynInstPtr inst; 972 bool add_to_iq = false; 973 int dis_num_inst = 0; 974 975 // Loop through the instructions, putting them in the instruction 976 // queue. 977 for ( ; dis_num_inst < insts_to_add && 978 dis_num_inst < dispatchWidth; 979 ++dis_num_inst) 980 { 981 inst = insts_to_dispatch.front(); 982 983 if (dispatchStatus[tid] == Unblocking) { 984 DPRINTF(IEW, "[tid:%i] Issue: Examining instruction from skid " 985 "buffer\n", tid); 986 } 987 988 // Make sure there's a valid instruction there. 989 assert(inst); 990 991 DPRINTF(IEW, "[tid:%i] Issue: Adding PC %s [sn:%lli] [tid:%i] to " 992 "IQ.\n", 993 tid, inst->pcState(), inst->seqNum, inst->threadNumber); 994 995 // Be sure to mark these instructions as ready so that the 996 // commit stage can go ahead and execute them, and mark 997 // them as issued so the IQ doesn't reprocess them. 998 999 // Check for squashed instructions. 1000 if (inst->isSquashed()) { 1001 DPRINTF(IEW, "[tid:%i] Issue: Squashed instruction encountered, " 1002 "not adding to IQ.\n", tid); 1003 1004 ++iewDispSquashedInsts; 1005 1006 insts_to_dispatch.pop(); 1007 1008 //Tell Rename That An Instruction has been processed 1009 if (inst->isLoad()) { 1010 toRename->iewInfo[tid].dispatchedToLQ++; 1011 } 1012 if (inst->isStore() || inst->isAtomic()) { 1013 toRename->iewInfo[tid].dispatchedToSQ++; 1014 } 1015 1016 toRename->iewInfo[tid].dispatched++; 1017 1018 continue; 1019 } 1020 1021 // Check for full conditions. 1022 if (instQueue.isFull(tid)) { 1023 DPRINTF(IEW, "[tid:%i] Issue: IQ has become full.\n", tid); 1024 1025 // Call function to start blocking. 1026 block(tid); 1027 1028 // Set unblock to false. Special case where we are using 1029 // skidbuffer (unblocking) instructions but then we still 1030 // get full in the IQ. 1031 toRename->iewUnblock[tid] = false; 1032 1033 ++iewIQFullEvents; 1034 break; 1035 } 1036 1037 // Check LSQ if inst is LD/ST 1038 if ((inst->isAtomic() && ldstQueue.sqFull(tid)) || 1039 (inst->isLoad() && ldstQueue.lqFull(tid)) || 1040 (inst->isStore() && ldstQueue.sqFull(tid))) { 1041 DPRINTF(IEW, "[tid:%i] Issue: %s has become full.\n",tid, 1042 inst->isLoad() ? "LQ" : "SQ"); 1043 1044 // Call function to start blocking. 1045 block(tid); 1046 1047 // Set unblock to false. Special case where we are using 1048 // skidbuffer (unblocking) instructions but then we still 1049 // get full in the IQ. 1050 toRename->iewUnblock[tid] = false; 1051 1052 ++iewLSQFullEvents; 1053 break; 1054 } 1055 1056 // Otherwise issue the instruction just fine. 1057 if (inst->isAtomic()) { 1058 DPRINTF(IEW, "[tid:%i] Issue: Memory instruction " 1059 "encountered, adding to LSQ.\n", tid); 1060 1061 ldstQueue.insertStore(inst); 1062 1063 ++iewDispStoreInsts; 1064 1065 // AMOs need to be set as "canCommit()" 1066 // so that commit can process them when they reach the 1067 // head of commit. 1068 inst->setCanCommit(); 1069 instQueue.insertNonSpec(inst); 1070 add_to_iq = false; 1071 1072 ++iewDispNonSpecInsts; 1073 1074 toRename->iewInfo[tid].dispatchedToSQ++; 1075 } else if (inst->isLoad()) { 1076 DPRINTF(IEW, "[tid:%i] Issue: Memory instruction " 1077 "encountered, adding to LSQ.\n", tid); 1078 1079 // Reserve a spot in the load store queue for this 1080 // memory access. 1081 ldstQueue.insertLoad(inst); 1082 1083 ++iewDispLoadInsts; 1084 1085 add_to_iq = true; 1086 1087 toRename->iewInfo[tid].dispatchedToLQ++; 1088 } else if (inst->isStore()) { 1089 DPRINTF(IEW, "[tid:%i] Issue: Memory instruction " 1090 "encountered, adding to LSQ.\n", tid); 1091 1092 ldstQueue.insertStore(inst); 1093 1094 ++iewDispStoreInsts; 1095 1096 if (inst->isStoreConditional()) { 1097 // Store conditionals need to be set as "canCommit()" 1098 // so that commit can process them when they reach the 1099 // head of commit. 1100 // @todo: This is somewhat specific to Alpha. 1101 inst->setCanCommit(); 1102 instQueue.insertNonSpec(inst); 1103 add_to_iq = false; 1104 1105 ++iewDispNonSpecInsts; 1106 } else { 1107 add_to_iq = true; 1108 } 1109 1110 toRename->iewInfo[tid].dispatchedToSQ++; 1111 } else if (inst->isMemBarrier() || inst->isWriteBarrier()) { 1112 // Same as non-speculative stores. 1113 inst->setCanCommit(); 1114 instQueue.insertBarrier(inst); 1115 add_to_iq = false; 1116 } else if (inst->isNop()) { 1117 DPRINTF(IEW, "[tid:%i] Issue: Nop instruction encountered, " 1118 "skipping.\n", tid); 1119 1120 inst->setIssued(); 1121 inst->setExecuted(); 1122 inst->setCanCommit(); 1123 1124 instQueue.recordProducer(inst); 1125 1126 iewExecutedNop[tid]++; 1127 1128 add_to_iq = false; 1129 } else { 1130 assert(!inst->isExecuted()); 1131 add_to_iq = true; 1132 } 1133 1134 if (add_to_iq && inst->isNonSpeculative()) { 1135 DPRINTF(IEW, "[tid:%i] Issue: Nonspeculative instruction " 1136 "encountered, skipping.\n", tid); 1137 1138 // Same as non-speculative stores. 1139 inst->setCanCommit(); 1140 1141 // Specifically insert it as nonspeculative. 1142 instQueue.insertNonSpec(inst); 1143 1144 ++iewDispNonSpecInsts; 1145 1146 add_to_iq = false; 1147 } 1148 1149 // If the instruction queue is not full, then add the 1150 // instruction. 1151 if (add_to_iq) { 1152 instQueue.insert(inst); 1153 } 1154 1155 insts_to_dispatch.pop(); 1156 1157 toRename->iewInfo[tid].dispatched++; 1158 1159 ++iewDispatchedInsts; 1160 1161#if TRACING_ON 1162 inst->dispatchTick = curTick() - inst->fetchTick; 1163#endif 1164 ppDispatch->notify(inst); 1165 } 1166 1167 if (!insts_to_dispatch.empty()) { 1168 DPRINTF(IEW,"[tid:%i] Issue: Bandwidth Full. Blocking.\n", tid); 1169 block(tid); 1170 toRename->iewUnblock[tid] = false; 1171 } 1172 1173 if (dispatchStatus[tid] == Idle && dis_num_inst) { 1174 dispatchStatus[tid] = Running; 1175 1176 updatedQueues = true; 1177 } 1178 1179 dis_num_inst = 0; 1180} 1181 1182template <class Impl> 1183void 1184DefaultIEW<Impl>::printAvailableInsts() 1185{ 1186 int inst = 0; 1187 1188 std::cout << "Available Instructions: "; 1189 1190 while (fromIssue->insts[inst]) { 1191 1192 if (inst%3==0) std::cout << "\n\t"; 1193 1194 std::cout << "PC: " << fromIssue->insts[inst]->pcState() 1195 << " TN: " << fromIssue->insts[inst]->threadNumber 1196 << " SN: " << fromIssue->insts[inst]->seqNum << " | "; 1197 1198 inst++; 1199 1200 } 1201 1202 std::cout << "\n"; 1203} 1204 1205template <class Impl> 1206void 1207DefaultIEW<Impl>::executeInsts() 1208{ 1209 wbNumInst = 0; 1210 wbCycle = 0; 1211 1212 list<ThreadID>::iterator threads = activeThreads->begin(); 1213 list<ThreadID>::iterator end = activeThreads->end(); 1214 1215 while (threads != end) { 1216 ThreadID tid = *threads++; 1217 fetchRedirect[tid] = false; 1218 } 1219 1220 // Uncomment this if you want to see all available instructions. 1221 // @todo This doesn't actually work anymore, we should fix it. 1222// printAvailableInsts(); 1223 1224 // Execute/writeback any instructions that are available. 1225 int insts_to_execute = fromIssue->size; 1226 int inst_num = 0; 1227 for (; inst_num < insts_to_execute; 1228 ++inst_num) { 1229 1230 DPRINTF(IEW, "Execute: Executing instructions from IQ.\n"); 1231 1232 DynInstPtr inst = instQueue.getInstToExecute(); 1233 1234 DPRINTF(IEW, "Execute: Processing PC %s, [tid:%i] [sn:%llu].\n", 1235 inst->pcState(), inst->threadNumber,inst->seqNum); 1236 1237 // Notify potential listeners that this instruction has started 1238 // executing 1239 ppExecute->notify(inst); 1240 1241 // Check if the instruction is squashed; if so then skip it 1242 if (inst->isSquashed()) { 1243 DPRINTF(IEW, "Execute: Instruction was squashed. PC: %s, [tid:%i]" 1244 " [sn:%llu]\n", inst->pcState(), inst->threadNumber, 1245 inst->seqNum); 1246 1247 // Consider this instruction executed so that commit can go 1248 // ahead and retire the instruction. 1249 inst->setExecuted(); 1250 1251 // Not sure if I should set this here or just let commit try to 1252 // commit any squashed instructions. I like the latter a bit more. 1253 inst->setCanCommit(); 1254 1255 ++iewExecSquashedInsts; 1256 1257 continue; 1258 } 1259 1260 Fault fault = NoFault; 1261 1262 // Execute instruction. 1263 // Note that if the instruction faults, it will be handled 1264 // at the commit stage. 1265 if (inst->isMemRef()) { 1266 DPRINTF(IEW, "Execute: Calculating address for memory " 1267 "reference.\n"); 1268 1269 // Tell the LDSTQ to execute this instruction (if it is a load). 1270 if (inst->isAtomic()) { 1271 // AMOs are treated like store requests 1272 fault = ldstQueue.executeStore(inst); 1273 1274 if (inst->isTranslationDelayed() && 1275 fault == NoFault) { 1276 // A hw page table walk is currently going on; the 1277 // instruction must be deferred. 1278 DPRINTF(IEW, "Execute: Delayed translation, deferring " 1279 "store.\n"); 1280 instQueue.deferMemInst(inst); 1281 continue; 1282 } 1283 } else if (inst->isLoad()) { 1284 // Loads will mark themselves as executed, and their writeback 1285 // event adds the instruction to the queue to commit 1286 fault = ldstQueue.executeLoad(inst); 1287 1288 if (inst->isTranslationDelayed() && 1289 fault == NoFault) { 1290 // A hw page table walk is currently going on; the 1291 // instruction must be deferred. 1292 DPRINTF(IEW, "Execute: Delayed translation, deferring " 1293 "load.\n"); 1294 instQueue.deferMemInst(inst); 1295 continue; 1296 } 1297 1298 if (inst->isDataPrefetch() || inst->isInstPrefetch()) { 1299 inst->fault = NoFault; 1300 } 1301 } else if (inst->isStore()) { 1302 fault = ldstQueue.executeStore(inst); 1303 1304 if (inst->isTranslationDelayed() && 1305 fault == NoFault) { 1306 // A hw page table walk is currently going on; the 1307 // instruction must be deferred. 1308 DPRINTF(IEW, "Execute: Delayed translation, deferring " 1309 "store.\n"); 1310 instQueue.deferMemInst(inst); 1311 continue; 1312 } 1313 1314 // If the store had a fault then it may not have a mem req 1315 if (fault != NoFault || !inst->readPredicate() || 1316 !inst->isStoreConditional()) { 1317 // If the instruction faulted, then we need to send it along 1318 // to commit without the instruction completing. 1319 // Send this instruction to commit, also make sure iew stage 1320 // realizes there is activity. 1321 inst->setExecuted(); 1322 instToCommit(inst); 1323 activityThisCycle(); 1324 } 1325 1326 // Store conditionals will mark themselves as 1327 // executed, and their writeback event will add the 1328 // instruction to the queue to commit. 1329 } else { 1330 panic("Unexpected memory type!\n"); 1331 } 1332 1333 } else { 1334 // If the instruction has already faulted, then skip executing it. 1335 // Such case can happen when it faulted during ITLB translation. 1336 // If we execute the instruction (even if it's a nop) the fault 1337 // will be replaced and we will lose it. 1338 if (inst->getFault() == NoFault) { 1339 inst->execute(); 1340 if (!inst->readPredicate()) 1341 inst->forwardOldRegs(); 1342 } 1343 1344 inst->setExecuted(); 1345 1346 instToCommit(inst); 1347 } 1348 1349 updateExeInstStats(inst); 1350 1351 // Check if branch prediction was correct, if not then we need 1352 // to tell commit to squash in flight instructions. Only 1353 // handle this if there hasn't already been something that 1354 // redirects fetch in this group of instructions. 1355 1356 // This probably needs to prioritize the redirects if a different 1357 // scheduler is used. Currently the scheduler schedules the oldest 1358 // instruction first, so the branch resolution order will be correct. 1359 ThreadID tid = inst->threadNumber; 1360 1361 if (!fetchRedirect[tid] || 1362 !toCommit->squash[tid] || 1363 toCommit->squashedSeqNum[tid] > inst->seqNum) { 1364 1365 // Prevent testing for misprediction on load instructions, 1366 // that have not been executed. 1367 bool loadNotExecuted = !inst->isExecuted() && inst->isLoad(); 1368 1369 if (inst->mispredicted() && !loadNotExecuted) { 1370 fetchRedirect[tid] = true; 1371 1372 DPRINTF(IEW, "[tid:%i] [sn:%llu] Execute: " 1373 "Branch mispredict detected.\n", 1374 tid,inst->seqNum); 1375 DPRINTF(IEW, "[tid:%i] [sn:%llu] " 1376 "Predicted target was PC: %s\n", 1377 tid,inst->seqNum,inst->readPredTarg()); 1378 DPRINTF(IEW, "[tid:%i] [sn:%llu] Execute: " 1379 "Redirecting fetch to PC: %s\n", 1380 tid,inst->seqNum,inst->pcState()); 1381 // If incorrect, then signal the ROB that it must be squashed. 1382 squashDueToBranch(inst, tid); 1383 1384 ppMispredict->notify(inst); 1385 1386 if (inst->readPredTaken()) { 1387 predictedTakenIncorrect++; 1388 } else { 1389 predictedNotTakenIncorrect++; 1390 } 1391 } else if (ldstQueue.violation(tid)) { 1392 assert(inst->isMemRef()); 1393 // If there was an ordering violation, then get the 1394 // DynInst that caused the violation. Note that this 1395 // clears the violation signal. 1396 DynInstPtr violator; 1397 violator = ldstQueue.getMemDepViolator(tid); 1398 1399 DPRINTF(IEW, "LDSTQ detected a violation. Violator PC: %s " 1400 "[sn:%lli], inst PC: %s [sn:%lli]. Addr is: %#x.\n", 1401 violator->pcState(), violator->seqNum, 1402 inst->pcState(), inst->seqNum, inst->physEffAddr); 1403 1404 fetchRedirect[tid] = true; 1405 1406 // Tell the instruction queue that a violation has occured. 1407 instQueue.violation(inst, violator); 1408 1409 // Squash. 1410 squashDueToMemOrder(violator, tid); 1411 1412 ++memOrderViolationEvents; 1413 } 1414 } else { 1415 // Reset any state associated with redirects that will not 1416 // be used. 1417 if (ldstQueue.violation(tid)) { 1418 assert(inst->isMemRef()); 1419 1420 DynInstPtr violator = ldstQueue.getMemDepViolator(tid); 1421 1422 DPRINTF(IEW, "LDSTQ detected a violation. Violator PC: " 1423 "%s, inst PC: %s. Addr is: %#x.\n", 1424 violator->pcState(), inst->pcState(), 1425 inst->physEffAddr); 1426 DPRINTF(IEW, "Violation will not be handled because " 1427 "already squashing\n"); 1428 1429 ++memOrderViolationEvents; 1430 } 1431 } 1432 } 1433 1434 // Update and record activity if we processed any instructions. 1435 if (inst_num) { 1436 if (exeStatus == Idle) { 1437 exeStatus = Running; 1438 } 1439 1440 updatedQueues = true; 1441 1442 cpu->activityThisCycle(); 1443 } 1444 1445 // Need to reset this in case a writeback event needs to write into the 1446 // iew queue. That way the writeback event will write into the correct 1447 // spot in the queue. 1448 wbNumInst = 0; 1449 1450} 1451 1452template <class Impl> 1453void 1454DefaultIEW<Impl>::writebackInsts() 1455{ 1456 // Loop through the head of the time buffer and wake any 1457 // dependents. These instructions are about to write back. Also 1458 // mark scoreboard that this instruction is finally complete. 1459 // Either have IEW have direct access to scoreboard, or have this 1460 // as part of backwards communication. 1461 for (int inst_num = 0; inst_num < wbWidth && 1462 toCommit->insts[inst_num]; inst_num++) { 1463 DynInstPtr inst = toCommit->insts[inst_num]; 1464 ThreadID tid = inst->threadNumber; 1465 1466 DPRINTF(IEW, "Sending instructions to commit, [sn:%lli] PC %s.\n", 1467 inst->seqNum, inst->pcState()); 1468 1469 iewInstsToCommit[tid]++; 1470 // Notify potential listeners that execution is complete for this 1471 // instruction. 1472 ppToCommit->notify(inst); 1473 1474 // Some instructions will be sent to commit without having 1475 // executed because they need commit to handle them. 1476 // E.g. Strictly ordered loads have not actually executed when they 1477 // are first sent to commit. Instead commit must tell the LSQ 1478 // when it's ready to execute the strictly ordered load. 1479 if (!inst->isSquashed() && inst->isExecuted() && inst->getFault() == NoFault) { 1480 int dependents = instQueue.wakeDependents(inst); 1481 1482 for (int i = 0; i < inst->numDestRegs(); i++) { 1483 //mark as Ready 1484 DPRINTF(IEW,"Setting Destination Register %i (%s)\n", 1485 inst->renamedDestRegIdx(i)->index(), 1486 inst->renamedDestRegIdx(i)->className()); 1487 scoreboard->setReg(inst->renamedDestRegIdx(i)); 1488 } 1489 1490 if (dependents) { 1491 producerInst[tid]++; 1492 consumerInst[tid]+= dependents; 1493 } 1494 writebackCount[tid]++; 1495 } 1496 } 1497} 1498 1499template<class Impl> 1500void 1501DefaultIEW<Impl>::tick() 1502{ 1503 wbNumInst = 0; 1504 wbCycle = 0; 1505 1506 wroteToTimeBuffer = false; 1507 updatedQueues = false; 1508 1509 ldstQueue.tick(); 1510 1511 sortInsts(); 1512 1513 // Free function units marked as being freed this cycle. 1514 fuPool->processFreeUnits(); 1515 1516 list<ThreadID>::iterator threads = activeThreads->begin(); 1517 list<ThreadID>::iterator end = activeThreads->end(); 1518 1519 // Check stall and squash signals, dispatch any instructions. 1520 while (threads != end) { 1521 ThreadID tid = *threads++; 1522 1523 DPRINTF(IEW,"Issue: Processing [tid:%i]\n",tid); 1524 1525 checkSignalsAndUpdate(tid); 1526 dispatch(tid); 1527 } 1528 1529 if (exeStatus != Squashing) { 1530 executeInsts(); 1531 1532 writebackInsts(); 1533 1534 // Have the instruction queue try to schedule any ready instructions. 1535 // (In actuality, this scheduling is for instructions that will 1536 // be executed next cycle.) 1537 instQueue.scheduleReadyInsts(); 1538 1539 // Also should advance its own time buffers if the stage ran. 1540 // Not the best place for it, but this works (hopefully). 1541 issueToExecQueue.advance(); 1542 } 1543 1544 bool broadcast_free_entries = false; 1545 1546 if (updatedQueues || exeStatus == Running || updateLSQNextCycle) { 1547 exeStatus = Idle; 1548 updateLSQNextCycle = false; 1549 1550 broadcast_free_entries = true; 1551 } 1552 1553 // Writeback any stores using any leftover bandwidth. 1554 ldstQueue.writebackStores(); 1555 1556 // Check the committed load/store signals to see if there's a load 1557 // or store to commit. Also check if it's being told to execute a 1558 // nonspeculative instruction. 1559 // This is pretty inefficient... 1560 1561 threads = activeThreads->begin(); 1562 while (threads != end) { 1563 ThreadID tid = (*threads++); 1564 1565 DPRINTF(IEW,"Processing [tid:%i]\n",tid); 1566 1567 // Update structures based on instructions committed. 1568 if (fromCommit->commitInfo[tid].doneSeqNum != 0 && 1569 !fromCommit->commitInfo[tid].squash && 1570 !fromCommit->commitInfo[tid].robSquashing) { 1571 1572 ldstQueue.commitStores(fromCommit->commitInfo[tid].doneSeqNum,tid); 1573 1574 ldstQueue.commitLoads(fromCommit->commitInfo[tid].doneSeqNum,tid); 1575 1576 updateLSQNextCycle = true; 1577 instQueue.commit(fromCommit->commitInfo[tid].doneSeqNum,tid); 1578 } 1579 1580 if (fromCommit->commitInfo[tid].nonSpecSeqNum != 0) { 1581 1582 //DPRINTF(IEW,"NonspecInst from thread %i",tid); 1583 if (fromCommit->commitInfo[tid].strictlyOrdered) { 1584 instQueue.replayMemInst( 1585 fromCommit->commitInfo[tid].strictlyOrderedLoad); 1586 fromCommit->commitInfo[tid].strictlyOrderedLoad->setAtCommit(); 1587 } else { 1588 instQueue.scheduleNonSpec( 1589 fromCommit->commitInfo[tid].nonSpecSeqNum); 1590 } 1591 } 1592 1593 if (broadcast_free_entries) { 1594 toFetch->iewInfo[tid].iqCount = 1595 instQueue.getCount(tid); 1596 toFetch->iewInfo[tid].ldstqCount = 1597 ldstQueue.getCount(tid); 1598 1599 toRename->iewInfo[tid].usedIQ = true; 1600 toRename->iewInfo[tid].freeIQEntries = 1601 instQueue.numFreeEntries(tid); 1602 toRename->iewInfo[tid].usedLSQ = true; 1603 1604 toRename->iewInfo[tid].freeLQEntries = 1605 ldstQueue.numFreeLoadEntries(tid); 1606 toRename->iewInfo[tid].freeSQEntries = 1607 ldstQueue.numFreeStoreEntries(tid); 1608 1609 wroteToTimeBuffer = true; 1610 } 1611 1612 DPRINTF(IEW, "[tid:%i], Dispatch dispatched %i instructions.\n", 1613 tid, toRename->iewInfo[tid].dispatched); 1614 } 1615 1616 DPRINTF(IEW, "IQ has %i free entries (Can schedule: %i). " 1617 "LQ has %i free entries. SQ has %i free entries.\n", 1618 instQueue.numFreeEntries(), instQueue.hasReadyInsts(), 1619 ldstQueue.numFreeLoadEntries(), ldstQueue.numFreeStoreEntries()); 1620 1621 updateStatus(); 1622 1623 if (wroteToTimeBuffer) { 1624 DPRINTF(Activity, "Activity this cycle.\n"); 1625 cpu->activityThisCycle(); 1626 } 1627} 1628 1629template <class Impl> 1630void 1631DefaultIEW<Impl>::updateExeInstStats(const DynInstPtr& inst) 1632{ 1633 ThreadID tid = inst->threadNumber; 1634 1635 iewExecutedInsts++; 1636 1637#if TRACING_ON 1638 if (DTRACE(O3PipeView)) { 1639 inst->completeTick = curTick() - inst->fetchTick; 1640 } 1641#endif 1642 1643 // 1644 // Control operations 1645 // 1646 if (inst->isControl()) 1647 iewExecutedBranches[tid]++; 1648 1649 // 1650 // Memory operations 1651 // 1652 if (inst->isMemRef()) { 1653 iewExecutedRefs[tid]++; 1654 1655 if (inst->isLoad()) { 1656 iewExecLoadInsts[tid]++; 1657 } 1658 } 1659} 1660 1661template <class Impl> 1662void 1663DefaultIEW<Impl>::checkMisprediction(const DynInstPtr& inst) 1664{ 1665 ThreadID tid = inst->threadNumber; 1666 1667 if (!fetchRedirect[tid] || 1668 !toCommit->squash[tid] || 1669 toCommit->squashedSeqNum[tid] > inst->seqNum) { 1670 1671 if (inst->mispredicted()) { 1672 fetchRedirect[tid] = true; 1673 1674 DPRINTF(IEW, "[tid:%i] [sn:%llu] Execute: " 1675 "Branch mispredict detected.\n", 1676 tid,inst->seqNum); 1677 DPRINTF(IEW, "[tid:%i] [sn:%llu] Predicted target " 1678 "was PC:%#x, NPC:%#x\n", 1679 tid,inst->seqNum, 1680 inst->predInstAddr(), inst->predNextInstAddr()); 1681 DPRINTF(IEW, "[tid:%i] [sn:%llu] Execute: " 1682 "Redirecting fetch to PC: %#x, " 1683 "NPC: %#x.\n", 1684 tid,inst->seqNum, 1685 inst->nextInstAddr(), 1686 inst->nextInstAddr()); 1687 // If incorrect, then signal the ROB that it must be squashed. 1688 squashDueToBranch(inst, tid); 1689 1690 if (inst->readPredTaken()) { 1691 predictedTakenIncorrect++; 1692 } else { 1693 predictedNotTakenIncorrect++; 1694 } 1695 } 1696 } 1697} 1698 1699#endif//__CPU_O3_IEW_IMPL_IMPL_HH__ 1700