351 scoreboard = sb_ptr; 352} 353 354template <class Impl> 355bool 356DefaultIEW<Impl>::drain() 357{ 358 // IEW is ready to drain at any time. 359 cpu->signalDrained(); 360 return true; 361} 362 363template <class Impl> 364void 365DefaultIEW<Impl>::resume() 366{ 367} 368 369template <class Impl> 370void 371DefaultIEW<Impl>::switchOut() 372{ 373 // Clear any state. 374 switchedOut = true; 375 assert(insts[0].empty()); 376 assert(skidBuffer[0].empty()); 377 378 instQueue.switchOut(); 379 ldstQueue.switchOut(); 380 fuPool->switchOut(); 381 382 for (int i = 0; i < numThreads; i++) { 383 while (!insts[i].empty()) 384 insts[i].pop(); 385 while (!skidBuffer[i].empty()) 386 skidBuffer[i].pop(); 387 } 388} 389 390template <class Impl> 391void 392DefaultIEW<Impl>::takeOverFrom() 393{ 394 // Reset all state. 395 _status = Active; 396 exeStatus = Running; 397 wbStatus = Idle; 398 switchedOut = false; 399 400 instQueue.takeOverFrom(); 401 ldstQueue.takeOverFrom(); 402 fuPool->takeOverFrom(); 403 404 initStage(); 405 cpu->activityThisCycle(); 406 407 for (int i=0; i < numThreads; i++) { 408 dispatchStatus[i] = Running; 409 stalls[i].commit = false; 410 fetchRedirect[i] = false; 411 } 412 413 updateLSQNextCycle = false; 414 415 for (int i = 0; i < issueToExecQueue.getSize(); ++i) { 416 issueToExecQueue.advance(); 417 } 418} 419 420template<class Impl> 421void 422DefaultIEW<Impl>::squash(unsigned tid) 423{ 424 DPRINTF(IEW, "[tid:%i]: Squashing all instructions.\n", 425 tid); 426 427 // Tell the IQ to start squashing. 428 instQueue.squash(tid); 429 430 // Tell the LDSTQ to start squashing. 431#if ISA_HAS_DELAY_SLOT 432 ldstQueue.squash(fromCommit->commitInfo[tid].bdelayDoneSeqNum, tid); 433#else 434 ldstQueue.squash(fromCommit->commitInfo[tid].doneSeqNum, tid); 435#endif 436 updatedQueues = true; 437 438 // Clear the skid buffer in case it has any data in it. 439 DPRINTF(IEW, "[tid:%i]: Removing skidbuffer instructions until [sn:%i].\n", 440 tid, fromCommit->commitInfo[tid].bdelayDoneSeqNum); 441 442 while (!skidBuffer[tid].empty()) { 443#if ISA_HAS_DELAY_SLOT 444 if (skidBuffer[tid].front()->seqNum <= 445 fromCommit->commitInfo[tid].bdelayDoneSeqNum) { 446 DPRINTF(IEW, "[tid:%i]: Cannot remove skidbuffer instructions " 447 "that occur before delay slot [sn:%i].\n", 448 fromCommit->commitInfo[tid].bdelayDoneSeqNum, 449 tid); 450 break; 451 } else { 452 DPRINTF(IEW, "[tid:%i]: Removing instruction [sn:%i] from " 453 "skidBuffer.\n", tid, skidBuffer[tid].front()->seqNum); 454 } 455#endif 456 if (skidBuffer[tid].front()->isLoad() || 457 skidBuffer[tid].front()->isStore() ) { 458 toRename->iewInfo[tid].dispatchedToLSQ++; 459 } 460 461 toRename->iewInfo[tid].dispatched++; 462 463 skidBuffer[tid].pop(); 464 } 465 466 bdelayDoneSeqNum[tid] = fromCommit->commitInfo[tid].bdelayDoneSeqNum; 467 468 emptyRenameInsts(tid); 469} 470 471template<class Impl> 472void 473DefaultIEW<Impl>::squashDueToBranch(DynInstPtr &inst, unsigned tid) 474{ 475 DPRINTF(IEW, "[tid:%i]: Squashing from a specific instruction, PC: %#x " 476 "[sn:%i].\n", tid, inst->readPC(), inst->seqNum); 477 478 toCommit->squash[tid] = true; 479 toCommit->squashedSeqNum[tid] = inst->seqNum; 480 toCommit->mispredPC[tid] = inst->readPC(); 481 toCommit->branchMispredict[tid] = true; 482 483 int instSize = sizeof(TheISA::MachInst); 484#if ISA_HAS_DELAY_SLOT 485 bool branch_taken = 486 !(inst->readNextPC() + instSize == inst->readNextNPC() && 487 (inst->readNextPC() == inst->readPC() + instSize || 488 inst->readNextPC() == inst->readPC() + 2 * instSize)); 489 DPRINTF(Sparc, "Branch taken = %s [sn:%i]\n", 490 branch_taken ? "true": "false", inst->seqNum); 491 492 toCommit->branchTaken[tid] = branch_taken; 493 494 bool squashDelaySlot = true; 495// (inst->readNextPC() != inst->readPC() + sizeof(TheISA::MachInst)); 496 DPRINTF(Sparc, "Squash delay slot = %s [sn:%i]\n", 497 squashDelaySlot ? "true": "false", inst->seqNum); 498 toCommit->squashDelaySlot[tid] = squashDelaySlot; 499 //If we're squashing the delay slot, we need to pick back up at NextPC. 500 //Otherwise, NextPC isn't being squashed, so we should pick back up at 501 //NextNPC. 502 if (squashDelaySlot) { 503 toCommit->nextPC[tid] = inst->readNextPC(); 504 toCommit->nextNPC[tid] = inst->readNextNPC(); 505 } else { 506 toCommit->nextPC[tid] = inst->readNextNPC(); 507 toCommit->nextNPC[tid] = inst->readNextNPC() + instSize; 508 } 509#else 510 toCommit->branchTaken[tid] = inst->readNextPC() != 511 (inst->readPC() + sizeof(TheISA::MachInst)); 512 toCommit->nextPC[tid] = inst->readNextPC(); 513 toCommit->nextNPC[tid] = inst->readNextPC() + instSize; 514#endif 515 516 toCommit->includeSquashInst[tid] = false; 517 518 wroteToTimeBuffer = true; 519} 520 521template<class Impl> 522void 523DefaultIEW<Impl>::squashDueToMemOrder(DynInstPtr &inst, unsigned tid) 524{ 525 DPRINTF(IEW, "[tid:%i]: Squashing from a specific instruction, " 526 "PC: %#x [sn:%i].\n", tid, inst->readPC(), inst->seqNum); 527 528 toCommit->squash[tid] = true; 529 toCommit->squashedSeqNum[tid] = inst->seqNum; 530 toCommit->nextPC[tid] = inst->readNextPC(); 531#if ISA_HAS_DELAY_SLOT 532 toCommit->nextNPC[tid] = inst->readNextNPC(); 533#else 534 toCommit->nextNPC[tid] = inst->readNextPC() + sizeof(TheISA::MachInst); 535#endif 536 toCommit->branchMispredict[tid] = false; 537 538 toCommit->includeSquashInst[tid] = false; 539 540 wroteToTimeBuffer = true; 541} 542 543template<class Impl> 544void 545DefaultIEW<Impl>::squashDueToMemBlocked(DynInstPtr &inst, unsigned tid) 546{ 547 DPRINTF(IEW, "[tid:%i]: Memory blocked, squashing load and younger insts, " 548 "PC: %#x [sn:%i].\n", tid, inst->readPC(), inst->seqNum); 549 550 toCommit->squash[tid] = true; 551 toCommit->squashedSeqNum[tid] = inst->seqNum; 552 toCommit->nextPC[tid] = inst->readPC(); 553#if ISA_HAS_DELAY_SLOT 554 toCommit->nextNPC[tid] = inst->readNextPC(); 555#else 556 toCommit->nextNPC[tid] = inst->readPC() + sizeof(TheISA::MachInst); 557#endif 558 toCommit->branchMispredict[tid] = false; 559 560 // Must include the broadcasted SN in the squash. 561 toCommit->includeSquashInst[tid] = true; 562 563 ldstQueue.setLoadBlockedHandled(tid); 564 565 wroteToTimeBuffer = true; 566} 567 568template<class Impl> 569void 570DefaultIEW<Impl>::block(unsigned tid) 571{ 572 DPRINTF(IEW, "[tid:%u]: Blocking.\n", tid); 573 574 if (dispatchStatus[tid] != Blocked && 575 dispatchStatus[tid] != Unblocking) { 576 toRename->iewBlock[tid] = true; 577 wroteToTimeBuffer = true; 578 } 579 580 // Add the current inputs to the skid buffer so they can be 581 // reprocessed when this stage unblocks. 582 skidInsert(tid); 583 584 dispatchStatus[tid] = Blocked; 585} 586 587template<class Impl> 588void 589DefaultIEW<Impl>::unblock(unsigned tid) 590{ 591 DPRINTF(IEW, "[tid:%i]: Reading instructions out of the skid " 592 "buffer %u.\n",tid, tid); 593 594 // If the skid bufffer is empty, signal back to previous stages to unblock. 595 // Also switch status to running. 596 if (skidBuffer[tid].empty()) { 597 toRename->iewUnblock[tid] = true; 598 wroteToTimeBuffer = true; 599 DPRINTF(IEW, "[tid:%i]: Done unblocking.\n",tid); 600 dispatchStatus[tid] = Running; 601 } 602} 603 604template<class Impl> 605void 606DefaultIEW<Impl>::wakeDependents(DynInstPtr &inst) 607{ 608 instQueue.wakeDependents(inst); 609} 610 611template<class Impl> 612void 613DefaultIEW<Impl>::rescheduleMemInst(DynInstPtr &inst) 614{ 615 instQueue.rescheduleMemInst(inst); 616} 617 618template<class Impl> 619void 620DefaultIEW<Impl>::replayMemInst(DynInstPtr &inst) 621{ 622 instQueue.replayMemInst(inst); 623} 624 625template<class Impl> 626void 627DefaultIEW<Impl>::instToCommit(DynInstPtr &inst) 628{ 629 // This function should not be called after writebackInsts in a 630 // single cycle. That will cause problems with an instruction 631 // being added to the queue to commit without being processed by 632 // writebackInsts prior to being sent to commit. 633 634 // First check the time slot that this instruction will write 635 // to. If there are free write ports at the time, then go ahead 636 // and write the instruction to that time. If there are not, 637 // keep looking back to see where's the first time there's a 638 // free slot. 639 while ((*iewQueue)[wbCycle].insts[wbNumInst]) { 640 ++wbNumInst; 641 if (wbNumInst == wbWidth) { 642 ++wbCycle; 643 wbNumInst = 0; 644 } 645 646 assert((wbCycle * wbWidth + wbNumInst) <= wbMax); 647 } 648 649 DPRINTF(IEW, "Current wb cycle: %i, width: %i, numInst: %i\nwbActual:%i\n", 650 wbCycle, wbWidth, wbNumInst, wbCycle * wbWidth + wbNumInst); 651 // Add finished instruction to queue to commit. 652 (*iewQueue)[wbCycle].insts[wbNumInst] = inst; 653 (*iewQueue)[wbCycle].size++; 654} 655 656template <class Impl> 657unsigned 658DefaultIEW<Impl>::validInstsFromRename() 659{ 660 unsigned inst_count = 0; 661 662 for (int i=0; i<fromRename->size; i++) { 663 if (!fromRename->insts[i]->isSquashed()) 664 inst_count++; 665 } 666 667 return inst_count; 668} 669 670template<class Impl> 671void 672DefaultIEW<Impl>::skidInsert(unsigned tid) 673{ 674 DynInstPtr inst = NULL; 675 676 while (!insts[tid].empty()) { 677 inst = insts[tid].front(); 678 679 insts[tid].pop(); 680 681 DPRINTF(Decode,"[tid:%i]: Inserting [sn:%lli] PC:%#x into " 682 "dispatch skidBuffer %i\n",tid, inst->seqNum, 683 inst->readPC(),tid); 684 685 skidBuffer[tid].push(inst); 686 } 687 688 assert(skidBuffer[tid].size() <= skidBufferMax && 689 "Skidbuffer Exceeded Max Size"); 690} 691 692template<class Impl> 693int 694DefaultIEW<Impl>::skidCount() 695{ 696 int max=0; 697 698 std::list<unsigned>::iterator threads = activeThreads->begin(); 699 std::list<unsigned>::iterator end = activeThreads->end(); 700 701 while (threads != end) { 702 unsigned tid = *threads++; 703 unsigned thread_count = skidBuffer[tid].size(); 704 if (max < thread_count) 705 max = thread_count; 706 } 707 708 return max; 709} 710 711template<class Impl> 712bool 713DefaultIEW<Impl>::skidsEmpty() 714{ 715 std::list<unsigned>::iterator threads = activeThreads->begin(); 716 std::list<unsigned>::iterator end = activeThreads->end(); 717 718 while (threads != end) { 719 unsigned tid = *threads++; 720 721 if (!skidBuffer[tid].empty()) 722 return false; 723 } 724 725 return true; 726} 727 728template <class Impl> 729void 730DefaultIEW<Impl>::updateStatus() 731{ 732 bool any_unblocking = false; 733 734 std::list<unsigned>::iterator threads = activeThreads->begin(); 735 std::list<unsigned>::iterator end = activeThreads->end(); 736 737 while (threads != end) { 738 unsigned tid = *threads++; 739 740 if (dispatchStatus[tid] == Unblocking) { 741 any_unblocking = true; 742 break; 743 } 744 } 745 746 // If there are no ready instructions waiting to be scheduled by the IQ, 747 // and there's no stores waiting to write back, and dispatch is not 748 // unblocking, then there is no internal activity for the IEW stage. 749 if (_status == Active && !instQueue.hasReadyInsts() && 750 !ldstQueue.willWB() && !any_unblocking) { 751 DPRINTF(IEW, "IEW switching to idle\n"); 752 753 deactivateStage(); 754 755 _status = Inactive; 756 } else if (_status == Inactive && (instQueue.hasReadyInsts() || 757 ldstQueue.willWB() || 758 any_unblocking)) { 759 // Otherwise there is internal activity. Set to active. 760 DPRINTF(IEW, "IEW switching to active\n"); 761 762 activateStage(); 763 764 _status = Active; 765 } 766} 767 768template <class Impl> 769void 770DefaultIEW<Impl>::resetEntries() 771{ 772 instQueue.resetEntries(); 773 ldstQueue.resetEntries(); 774} 775 776template <class Impl> 777void 778DefaultIEW<Impl>::readStallSignals(unsigned tid) 779{ 780 if (fromCommit->commitBlock[tid]) { 781 stalls[tid].commit = true; 782 } 783 784 if (fromCommit->commitUnblock[tid]) { 785 assert(stalls[tid].commit); 786 stalls[tid].commit = false; 787 } 788} 789 790template <class Impl> 791bool 792DefaultIEW<Impl>::checkStall(unsigned tid) 793{ 794 bool ret_val(false); 795 796 if (stalls[tid].commit) { 797 DPRINTF(IEW,"[tid:%i]: Stall from Commit stage detected.\n",tid); 798 ret_val = true; 799 } else if (instQueue.isFull(tid)) { 800 DPRINTF(IEW,"[tid:%i]: Stall: IQ is full.\n",tid); 801 ret_val = true; 802 } else if (ldstQueue.isFull(tid)) { 803 DPRINTF(IEW,"[tid:%i]: Stall: LSQ is full\n",tid); 804 805 if (ldstQueue.numLoads(tid) > 0 ) { 806 807 DPRINTF(IEW,"[tid:%i]: LSQ oldest load: [sn:%i] \n", 808 tid,ldstQueue.getLoadHeadSeqNum(tid)); 809 } 810 811 if (ldstQueue.numStores(tid) > 0) { 812 813 DPRINTF(IEW,"[tid:%i]: LSQ oldest store: [sn:%i] \n", 814 tid,ldstQueue.getStoreHeadSeqNum(tid)); 815 } 816 817 ret_val = true; 818 } else if (ldstQueue.isStalled(tid)) { 819 DPRINTF(IEW,"[tid:%i]: Stall: LSQ stall detected.\n",tid); 820 ret_val = true; 821 } 822 823 return ret_val; 824} 825 826template <class Impl> 827void 828DefaultIEW<Impl>::checkSignalsAndUpdate(unsigned tid) 829{ 830 // Check if there's a squash signal, squash if there is 831 // Check stall signals, block if there is. 832 // If status was Blocked 833 // if so then go to unblocking 834 // If status was Squashing 835 // check if squashing is not high. Switch to running this cycle. 836 837 readStallSignals(tid); 838 839 if (fromCommit->commitInfo[tid].squash) { 840 squash(tid); 841 842 if (dispatchStatus[tid] == Blocked || 843 dispatchStatus[tid] == Unblocking) { 844 toRename->iewUnblock[tid] = true; 845 wroteToTimeBuffer = true; 846 } 847 848 dispatchStatus[tid] = Squashing; 849 850 fetchRedirect[tid] = false; 851 return; 852 } 853 854 if (fromCommit->commitInfo[tid].robSquashing) { 855 DPRINTF(IEW, "[tid:%i]: ROB is still squashing.\n", tid); 856 857 dispatchStatus[tid] = Squashing; 858 859 emptyRenameInsts(tid); 860 wroteToTimeBuffer = true; 861 return; 862 } 863 864 if (checkStall(tid)) { 865 block(tid); 866 dispatchStatus[tid] = Blocked; 867 return; 868 } 869 870 if (dispatchStatus[tid] == Blocked) { 871 // Status from previous cycle was blocked, but there are no more stall 872 // conditions. Switch over to unblocking. 873 DPRINTF(IEW, "[tid:%i]: Done blocking, switching to unblocking.\n", 874 tid); 875 876 dispatchStatus[tid] = Unblocking; 877 878 unblock(tid); 879 880 return; 881 } 882 883 if (dispatchStatus[tid] == Squashing) { 884 // Switch status to running if rename isn't being told to block or 885 // squash this cycle. 886 DPRINTF(IEW, "[tid:%i]: Done squashing, switching to running.\n", 887 tid); 888 889 dispatchStatus[tid] = Running; 890 891 return; 892 } 893} 894 895template <class Impl> 896void 897DefaultIEW<Impl>::sortInsts() 898{ 899 int insts_from_rename = fromRename->size; 900#ifdef DEBUG 901#if !ISA_HAS_DELAY_SLOT 902 for (int i = 0; i < numThreads; i++) 903 assert(insts[i].empty()); 904#endif 905#endif 906 for (int i = 0; i < insts_from_rename; ++i) { 907 insts[fromRename->insts[i]->threadNumber].push(fromRename->insts[i]); 908 } 909} 910 911template <class Impl> 912void 913DefaultIEW<Impl>::emptyRenameInsts(unsigned tid) 914{ 915 DPRINTF(IEW, "[tid:%i]: Removing incoming rename instructions until " 916 "[sn:%i].\n", tid, bdelayDoneSeqNum[tid]); 917 918 while (!insts[tid].empty()) { 919#if ISA_HAS_DELAY_SLOT 920 if (insts[tid].front()->seqNum <= bdelayDoneSeqNum[tid]) { 921 DPRINTF(IEW, "[tid:%i]: Done removing, cannot remove instruction" 922 " that occurs at or before delay slot [sn:%i].\n", 923 tid, bdelayDoneSeqNum[tid]); 924 break; 925 } else { 926 DPRINTF(IEW, "[tid:%i]: Removing incoming rename instruction " 927 "[sn:%i].\n", tid, insts[tid].front()->seqNum); 928 } 929#endif 930 931 if (insts[tid].front()->isLoad() || 932 insts[tid].front()->isStore() ) { 933 toRename->iewInfo[tid].dispatchedToLSQ++; 934 } 935 936 toRename->iewInfo[tid].dispatched++; 937 938 insts[tid].pop(); 939 } 940} 941 942template <class Impl> 943void 944DefaultIEW<Impl>::wakeCPU() 945{ 946 cpu->wakeCPU(); 947} 948 949template <class Impl> 950void 951DefaultIEW<Impl>::activityThisCycle() 952{ 953 DPRINTF(Activity, "Activity this cycle.\n"); 954 cpu->activityThisCycle(); 955} 956 957template <class Impl> 958inline void 959DefaultIEW<Impl>::activateStage() 960{ 961 DPRINTF(Activity, "Activating stage.\n"); 962 cpu->activateStage(O3CPU::IEWIdx); 963} 964 965template <class Impl> 966inline void 967DefaultIEW<Impl>::deactivateStage() 968{ 969 DPRINTF(Activity, "Deactivating stage.\n"); 970 cpu->deactivateStage(O3CPU::IEWIdx); 971} 972 973template<class Impl> 974void 975DefaultIEW<Impl>::dispatch(unsigned tid) 976{ 977 // If status is Running or idle, 978 // call dispatchInsts() 979 // If status is Unblocking, 980 // buffer any instructions coming from rename 981 // continue trying to empty skid buffer 982 // check if stall conditions have passed 983 984 if (dispatchStatus[tid] == Blocked) { 985 ++iewBlockCycles; 986 987 } else if (dispatchStatus[tid] == Squashing) { 988 ++iewSquashCycles; 989 } 990 991 // Dispatch should try to dispatch as many instructions as its bandwidth 992 // will allow, as long as it is not currently blocked. 993 if (dispatchStatus[tid] == Running || 994 dispatchStatus[tid] == Idle) { 995 DPRINTF(IEW, "[tid:%i] Not blocked, so attempting to run " 996 "dispatch.\n", tid); 997 998 dispatchInsts(tid); 999 } else if (dispatchStatus[tid] == Unblocking) { 1000 // Make sure that the skid buffer has something in it if the 1001 // status is unblocking. 1002 assert(!skidsEmpty()); 1003 1004 // If the status was unblocking, then instructions from the skid 1005 // buffer were used. Remove those instructions and handle 1006 // the rest of unblocking. 1007 dispatchInsts(tid); 1008 1009 ++iewUnblockCycles; 1010 1011 if (validInstsFromRename() && dispatchedAllInsts) { 1012 // Add the current inputs to the skid buffer so they can be 1013 // reprocessed when this stage unblocks. 1014 skidInsert(tid); 1015 } 1016 1017 unblock(tid); 1018 } 1019} 1020 1021template <class Impl> 1022void 1023DefaultIEW<Impl>::dispatchInsts(unsigned tid) 1024{ 1025 dispatchedAllInsts = true; 1026 1027 // Obtain instructions from skid buffer if unblocking, or queue from rename 1028 // otherwise. 1029 std::queue<DynInstPtr> &insts_to_dispatch = 1030 dispatchStatus[tid] == Unblocking ? 1031 skidBuffer[tid] : insts[tid]; 1032 1033 int insts_to_add = insts_to_dispatch.size(); 1034 1035 DynInstPtr inst; 1036 bool add_to_iq = false; 1037 int dis_num_inst = 0; 1038 1039 // Loop through the instructions, putting them in the instruction 1040 // queue. 1041 for ( ; dis_num_inst < insts_to_add && 1042 dis_num_inst < dispatchWidth; 1043 ++dis_num_inst) 1044 { 1045 inst = insts_to_dispatch.front(); 1046 1047 if (dispatchStatus[tid] == Unblocking) { 1048 DPRINTF(IEW, "[tid:%i]: Issue: Examining instruction from skid " 1049 "buffer\n", tid); 1050 } 1051 1052 // Make sure there's a valid instruction there. 1053 assert(inst); 1054 1055 DPRINTF(IEW, "[tid:%i]: Issue: Adding PC %#x [sn:%lli] [tid:%i] to " 1056 "IQ.\n", 1057 tid, inst->readPC(), inst->seqNum, inst->threadNumber); 1058 1059 // Be sure to mark these instructions as ready so that the 1060 // commit stage can go ahead and execute them, and mark 1061 // them as issued so the IQ doesn't reprocess them. 1062 1063 // Check for squashed instructions. 1064 if (inst->isSquashed()) { 1065 DPRINTF(IEW, "[tid:%i]: Issue: Squashed instruction encountered, " 1066 "not adding to IQ.\n", tid); 1067 1068 ++iewDispSquashedInsts; 1069 1070 insts_to_dispatch.pop(); 1071 1072 //Tell Rename That An Instruction has been processed 1073 if (inst->isLoad() || inst->isStore()) { 1074 toRename->iewInfo[tid].dispatchedToLSQ++; 1075 } 1076 toRename->iewInfo[tid].dispatched++; 1077 1078 continue; 1079 } 1080 1081 // Check for full conditions. 1082 if (instQueue.isFull(tid)) { 1083 DPRINTF(IEW, "[tid:%i]: Issue: IQ has become full.\n", tid); 1084 1085 // Call function to start blocking. 1086 block(tid); 1087 1088 // Set unblock to false. Special case where we are using 1089 // skidbuffer (unblocking) instructions but then we still 1090 // get full in the IQ. 1091 toRename->iewUnblock[tid] = false; 1092 1093 dispatchedAllInsts = false; 1094 1095 ++iewIQFullEvents; 1096 break; 1097 } else if (ldstQueue.isFull(tid)) { 1098 DPRINTF(IEW, "[tid:%i]: Issue: LSQ has become full.\n",tid); 1099 1100 // Call function to start blocking. 1101 block(tid); 1102 1103 // Set unblock to false. Special case where we are using 1104 // skidbuffer (unblocking) instructions but then we still 1105 // get full in the IQ. 1106 toRename->iewUnblock[tid] = false; 1107 1108 dispatchedAllInsts = false; 1109 1110 ++iewLSQFullEvents; 1111 break; 1112 } 1113 1114 // Otherwise issue the instruction just fine. 1115 if (inst->isLoad()) { 1116 DPRINTF(IEW, "[tid:%i]: Issue: Memory instruction " 1117 "encountered, adding to LSQ.\n", tid); 1118 1119 // Reserve a spot in the load store queue for this 1120 // memory access. 1121 ldstQueue.insertLoad(inst); 1122 1123 ++iewDispLoadInsts; 1124 1125 add_to_iq = true; 1126 1127 toRename->iewInfo[tid].dispatchedToLSQ++; 1128 } else if (inst->isStore()) { 1129 DPRINTF(IEW, "[tid:%i]: Issue: Memory instruction " 1130 "encountered, adding to LSQ.\n", tid); 1131 1132 ldstQueue.insertStore(inst); 1133 1134 ++iewDispStoreInsts; 1135 1136 if (inst->isStoreConditional()) { 1137 // Store conditionals need to be set as "canCommit()" 1138 // so that commit can process them when they reach the 1139 // head of commit. 1140 // @todo: This is somewhat specific to Alpha. 1141 inst->setCanCommit(); 1142 instQueue.insertNonSpec(inst); 1143 add_to_iq = false; 1144 1145 ++iewDispNonSpecInsts; 1146 } else { 1147 add_to_iq = true; 1148 } 1149 1150 toRename->iewInfo[tid].dispatchedToLSQ++; 1151 } else if (inst->isMemBarrier() || inst->isWriteBarrier()) { 1152 // Same as non-speculative stores. 1153 inst->setCanCommit(); 1154 instQueue.insertBarrier(inst); 1155 add_to_iq = false; 1156 } else if (inst->isNop()) { 1157 DPRINTF(IEW, "[tid:%i]: Issue: Nop instruction encountered, " 1158 "skipping.\n", tid); 1159 1160 inst->setIssued(); 1161 inst->setExecuted(); 1162 inst->setCanCommit(); 1163 1164 instQueue.recordProducer(inst); 1165 1166 iewExecutedNop[tid]++; 1167 1168 add_to_iq = false; 1169 } else if (inst->isExecuted()) { 1170 assert(0 && "Instruction shouldn't be executed.\n"); 1171 DPRINTF(IEW, "Issue: Executed branch encountered, " 1172 "skipping.\n"); 1173 1174 inst->setIssued(); 1175 inst->setCanCommit(); 1176 1177 instQueue.recordProducer(inst); 1178 1179 add_to_iq = false; 1180 } else { 1181 add_to_iq = true; 1182 } 1183 if (inst->isNonSpeculative()) { 1184 DPRINTF(IEW, "[tid:%i]: Issue: Nonspeculative instruction " 1185 "encountered, skipping.\n", tid); 1186 1187 // Same as non-speculative stores. 1188 inst->setCanCommit(); 1189 1190 // Specifically insert it as nonspeculative. 1191 instQueue.insertNonSpec(inst); 1192 1193 ++iewDispNonSpecInsts; 1194 1195 add_to_iq = false; 1196 } 1197 1198 // If the instruction queue is not full, then add the 1199 // instruction. 1200 if (add_to_iq) { 1201 instQueue.insert(inst); 1202 } 1203 1204 insts_to_dispatch.pop(); 1205 1206 toRename->iewInfo[tid].dispatched++; 1207 1208 ++iewDispatchedInsts; 1209 } 1210 1211 if (!insts_to_dispatch.empty()) { 1212 DPRINTF(IEW,"[tid:%i]: Issue: Bandwidth Full. Blocking.\n", tid); 1213 block(tid); 1214 toRename->iewUnblock[tid] = false; 1215 } 1216 1217 if (dispatchStatus[tid] == Idle && dis_num_inst) { 1218 dispatchStatus[tid] = Running; 1219 1220 updatedQueues = true; 1221 } 1222 1223 dis_num_inst = 0; 1224} 1225 1226template <class Impl> 1227void 1228DefaultIEW<Impl>::printAvailableInsts() 1229{ 1230 int inst = 0; 1231 1232 std::cout << "Available Instructions: "; 1233 1234 while (fromIssue->insts[inst]) { 1235 1236 if (inst%3==0) std::cout << "\n\t"; 1237 1238 std::cout << "PC: " << fromIssue->insts[inst]->readPC() 1239 << " TN: " << fromIssue->insts[inst]->threadNumber 1240 << " SN: " << fromIssue->insts[inst]->seqNum << " | "; 1241 1242 inst++; 1243 1244 } 1245 1246 std::cout << "\n"; 1247} 1248 1249template <class Impl> 1250void 1251DefaultIEW<Impl>::executeInsts() 1252{ 1253 wbNumInst = 0; 1254 wbCycle = 0; 1255 1256 std::list<unsigned>::iterator threads = activeThreads->begin(); 1257 std::list<unsigned>::iterator end = activeThreads->end(); 1258 1259 while (threads != end) { 1260 unsigned tid = *threads++; 1261 fetchRedirect[tid] = false; 1262 } 1263 1264 // Uncomment this if you want to see all available instructions. 1265// printAvailableInsts(); 1266 1267 // Execute/writeback any instructions that are available. 1268 int insts_to_execute = fromIssue->size; 1269 int inst_num = 0; 1270 for (; inst_num < insts_to_execute; 1271 ++inst_num) { 1272 1273 DPRINTF(IEW, "Execute: Executing instructions from IQ.\n"); 1274 1275 DynInstPtr inst = instQueue.getInstToExecute(); 1276 1277 DPRINTF(IEW, "Execute: Processing PC %#x, [tid:%i] [sn:%i].\n", 1278 inst->readPC(), inst->threadNumber,inst->seqNum); 1279 1280 // Check if the instruction is squashed; if so then skip it 1281 if (inst->isSquashed()) { 1282 DPRINTF(IEW, "Execute: Instruction was squashed.\n"); 1283 1284 // Consider this instruction executed so that commit can go 1285 // ahead and retire the instruction. 1286 inst->setExecuted(); 1287 1288 // Not sure if I should set this here or just let commit try to 1289 // commit any squashed instructions. I like the latter a bit more. 1290 inst->setCanCommit(); 1291 1292 ++iewExecSquashedInsts; 1293 1294 decrWb(inst->seqNum); 1295 continue; 1296 } 1297 1298 Fault fault = NoFault; 1299 1300 // Execute instruction. 1301 // Note that if the instruction faults, it will be handled 1302 // at the commit stage. 1303 if (inst->isMemRef() && 1304 (!inst->isDataPrefetch() && !inst->isInstPrefetch())) { 1305 DPRINTF(IEW, "Execute: Calculating address for memory " 1306 "reference.\n"); 1307 1308 // Tell the LDSTQ to execute this instruction (if it is a load). 1309 if (inst->isLoad()) { 1310 // Loads will mark themselves as executed, and their writeback 1311 // event adds the instruction to the queue to commit 1312 fault = ldstQueue.executeLoad(inst); 1313 } else if (inst->isStore()) { 1314 fault = ldstQueue.executeStore(inst); 1315 1316 // If the store had a fault then it may not have a mem req 1317 if (!inst->isStoreConditional() && fault == NoFault) { 1318 inst->setExecuted(); 1319 1320 instToCommit(inst); 1321 } else if (fault != NoFault) { 1322 // If the instruction faulted, then we need to send it along to commit 1323 // without the instruction completing. 1324 DPRINTF(IEW, "Store has fault %s! [sn:%lli]\n", 1325 fault->name(), inst->seqNum); 1326 1327 // Send this instruction to commit, also make sure iew stage 1328 // realizes there is activity. 1329 inst->setExecuted(); 1330 1331 instToCommit(inst); 1332 activityThisCycle(); 1333 } 1334 1335 // Store conditionals will mark themselves as 1336 // executed, and their writeback event will add the 1337 // instruction to the queue to commit. 1338 } else { 1339 panic("Unexpected memory type!\n"); 1340 } 1341 1342 } else { 1343 inst->execute(); 1344 1345 inst->setExecuted(); 1346 1347 instToCommit(inst); 1348 } 1349 1350 updateExeInstStats(inst); 1351 1352 // Check if branch prediction was correct, if not then we need 1353 // to tell commit to squash in flight instructions. Only 1354 // handle this if there hasn't already been something that 1355 // redirects fetch in this group of instructions. 1356 1357 // This probably needs to prioritize the redirects if a different 1358 // scheduler is used. Currently the scheduler schedules the oldest 1359 // instruction first, so the branch resolution order will be correct. 1360 unsigned tid = inst->threadNumber; 1361 1362 if (!fetchRedirect[tid] || 1363 toCommit->squashedSeqNum[tid] > inst->seqNum) { 1364 1365 if (inst->mispredicted()) { 1366 fetchRedirect[tid] = true; 1367 1368 DPRINTF(IEW, "Execute: Branch mispredict detected.\n"); 1369 DPRINTF(IEW, "Predicted target was %#x, %#x.\n", 1370 inst->readPredPC(), inst->readPredNPC()); 1371 DPRINTF(IEW, "Execute: Redirecting fetch to PC: %#x," 1372 " NPC: %#x.\n", inst->readNextPC(), 1373 inst->readNextNPC()); 1374 // If incorrect, then signal the ROB that it must be squashed. 1375 squashDueToBranch(inst, tid); 1376 1377 if (inst->readPredTaken()) { 1378 predictedTakenIncorrect++; 1379 } else { 1380 predictedNotTakenIncorrect++; 1381 } 1382 } else if (ldstQueue.violation(tid)) { 1383 assert(inst->isMemRef()); 1384 // If there was an ordering violation, then get the 1385 // DynInst that caused the violation. Note that this 1386 // clears the violation signal. 1387 DynInstPtr violator; 1388 violator = ldstQueue.getMemDepViolator(tid); 1389 1390 DPRINTF(IEW, "LDSTQ detected a violation. Violator PC: " 1391 "%#x, inst PC: %#x. Addr is: %#x.\n", 1392 violator->readPC(), inst->readPC(), inst->physEffAddr); 1393 1394 // Ensure the violating instruction is older than 1395 // current squash 1396/* if (fetchRedirect[tid] && 1397 violator->seqNum >= toCommit->squashedSeqNum[tid] + 1) 1398 continue; 1399*/ 1400 fetchRedirect[tid] = true; 1401 1402 // Tell the instruction queue that a violation has occured. 1403 instQueue.violation(inst, violator); 1404 1405 // Squash. 1406 squashDueToMemOrder(inst,tid); 1407 1408 ++memOrderViolationEvents; 1409 } else if (ldstQueue.loadBlocked(tid) && 1410 !ldstQueue.isLoadBlockedHandled(tid)) { 1411 fetchRedirect[tid] = true; 1412 1413 DPRINTF(IEW, "Load operation couldn't execute because the " 1414 "memory system is blocked. PC: %#x [sn:%lli]\n", 1415 inst->readPC(), inst->seqNum); 1416 1417 squashDueToMemBlocked(inst, tid); 1418 } 1419 } else { 1420 // Reset any state associated with redirects that will not 1421 // be used. 1422 if (ldstQueue.violation(tid)) { 1423 assert(inst->isMemRef()); 1424 1425 DynInstPtr violator = ldstQueue.getMemDepViolator(tid); 1426 1427 DPRINTF(IEW, "LDSTQ detected a violation. Violator PC: " 1428 "%#x, inst PC: %#x. Addr is: %#x.\n", 1429 violator->readPC(), inst->readPC(), inst->physEffAddr); 1430 DPRINTF(IEW, "Violation will not be handled because " 1431 "already squashing\n"); 1432 1433 ++memOrderViolationEvents; 1434 } 1435 if (ldstQueue.loadBlocked(tid) && 1436 !ldstQueue.isLoadBlockedHandled(tid)) { 1437 DPRINTF(IEW, "Load operation couldn't execute because the " 1438 "memory system is blocked. PC: %#x [sn:%lli]\n", 1439 inst->readPC(), inst->seqNum); 1440 DPRINTF(IEW, "Blocked load will not be handled because " 1441 "already squashing\n"); 1442 1443 ldstQueue.setLoadBlockedHandled(tid); 1444 } 1445 1446 } 1447 } 1448 1449 // Update and record activity if we processed any instructions. 1450 if (inst_num) { 1451 if (exeStatus == Idle) { 1452 exeStatus = Running; 1453 } 1454 1455 updatedQueues = true; 1456 1457 cpu->activityThisCycle(); 1458 } 1459 1460 // Need to reset this in case a writeback event needs to write into the 1461 // iew queue. That way the writeback event will write into the correct 1462 // spot in the queue. 1463 wbNumInst = 0; 1464} 1465 1466template <class Impl> 1467void 1468DefaultIEW<Impl>::writebackInsts() 1469{ 1470 // Loop through the head of the time buffer and wake any 1471 // dependents. These instructions are about to write back. Also 1472 // mark scoreboard that this instruction is finally complete. 1473 // Either have IEW have direct access to scoreboard, or have this 1474 // as part of backwards communication. 1475 for (int inst_num = 0; inst_num < wbWidth && 1476 toCommit->insts[inst_num]; inst_num++) { 1477 DynInstPtr inst = toCommit->insts[inst_num]; 1478 int tid = inst->threadNumber; 1479 1480 DPRINTF(IEW, "Sending instructions to commit, [sn:%lli] PC %#x.\n", 1481 inst->seqNum, inst->readPC()); 1482 1483 iewInstsToCommit[tid]++; 1484 1485 // Some instructions will be sent to commit without having 1486 // executed because they need commit to handle them. 1487 // E.g. Uncached loads have not actually executed when they 1488 // are first sent to commit. Instead commit must tell the LSQ 1489 // when it's ready to execute the uncached load. 1490 if (!inst->isSquashed() && inst->isExecuted() && inst->getFault() == NoFault) { 1491 int dependents = instQueue.wakeDependents(inst); 1492 1493 for (int i = 0; i < inst->numDestRegs(); i++) { 1494 //mark as Ready 1495 DPRINTF(IEW,"Setting Destination Register %i\n", 1496 inst->renamedDestRegIdx(i)); 1497 scoreboard->setReg(inst->renamedDestRegIdx(i)); 1498 } 1499 1500 if (dependents) { 1501 producerInst[tid]++; 1502 consumerInst[tid]+= dependents; 1503 } 1504 writebackCount[tid]++; 1505 } 1506 1507 decrWb(inst->seqNum); 1508 } 1509} 1510 1511template<class Impl> 1512void 1513DefaultIEW<Impl>::tick() 1514{ 1515 wbNumInst = 0; 1516 wbCycle = 0; 1517 1518 wroteToTimeBuffer = false; 1519 updatedQueues = false; 1520 1521 sortInsts(); 1522 1523 // Free function units marked as being freed this cycle. 1524 fuPool->processFreeUnits(); 1525 1526 std::list<unsigned>::iterator threads = activeThreads->begin(); 1527 std::list<unsigned>::iterator end = activeThreads->end(); 1528 1529 // Check stall and squash signals, dispatch any instructions. 1530 while (threads != end) { 1531 unsigned tid = *threads++; 1532 1533 DPRINTF(IEW,"Issue: Processing [tid:%i]\n",tid); 1534 1535 checkSignalsAndUpdate(tid); 1536 dispatch(tid); 1537 } 1538 1539 if (exeStatus != Squashing) { 1540 executeInsts(); 1541 1542 writebackInsts(); 1543 1544 // Have the instruction queue try to schedule any ready instructions. 1545 // (In actuality, this scheduling is for instructions that will 1546 // be executed next cycle.) 1547 instQueue.scheduleReadyInsts(); 1548 1549 // Also should advance its own time buffers if the stage ran. 1550 // Not the best place for it, but this works (hopefully). 1551 issueToExecQueue.advance(); 1552 } 1553 1554 bool broadcast_free_entries = false; 1555 1556 if (updatedQueues || exeStatus == Running || updateLSQNextCycle) { 1557 exeStatus = Idle; 1558 updateLSQNextCycle = false; 1559 1560 broadcast_free_entries = true; 1561 } 1562 1563 // Writeback any stores using any leftover bandwidth. 1564 ldstQueue.writebackStores(); 1565 1566 // Check the committed load/store signals to see if there's a load 1567 // or store to commit. Also check if it's being told to execute a 1568 // nonspeculative instruction. 1569 // This is pretty inefficient... 1570 1571 threads = activeThreads->begin(); 1572 while (threads != end) { 1573 unsigned tid = (*threads++); 1574 1575 DPRINTF(IEW,"Processing [tid:%i]\n",tid); 1576 1577 // Update structures based on instructions committed. 1578 if (fromCommit->commitInfo[tid].doneSeqNum != 0 && 1579 !fromCommit->commitInfo[tid].squash && 1580 !fromCommit->commitInfo[tid].robSquashing) { 1581 1582 ldstQueue.commitStores(fromCommit->commitInfo[tid].doneSeqNum,tid); 1583 1584 ldstQueue.commitLoads(fromCommit->commitInfo[tid].doneSeqNum,tid); 1585 1586 updateLSQNextCycle = true; 1587 instQueue.commit(fromCommit->commitInfo[tid].doneSeqNum,tid); 1588 } 1589 1590 if (fromCommit->commitInfo[tid].nonSpecSeqNum != 0) { 1591 1592 //DPRINTF(IEW,"NonspecInst from thread %i",tid); 1593 if (fromCommit->commitInfo[tid].uncached) { 1594 instQueue.replayMemInst(fromCommit->commitInfo[tid].uncachedLoad); 1595 fromCommit->commitInfo[tid].uncachedLoad->setAtCommit(); 1596 } else { 1597 instQueue.scheduleNonSpec( 1598 fromCommit->commitInfo[tid].nonSpecSeqNum); 1599 } 1600 } 1601 1602 if (broadcast_free_entries) { 1603 toFetch->iewInfo[tid].iqCount = 1604 instQueue.getCount(tid); 1605 toFetch->iewInfo[tid].ldstqCount = 1606 ldstQueue.getCount(tid); 1607 1608 toRename->iewInfo[tid].usedIQ = true; 1609 toRename->iewInfo[tid].freeIQEntries = 1610 instQueue.numFreeEntries(); 1611 toRename->iewInfo[tid].usedLSQ = true; 1612 toRename->iewInfo[tid].freeLSQEntries = 1613 ldstQueue.numFreeEntries(tid); 1614 1615 wroteToTimeBuffer = true; 1616 } 1617 1618 DPRINTF(IEW, "[tid:%i], Dispatch dispatched %i instructions.\n", 1619 tid, toRename->iewInfo[tid].dispatched); 1620 } 1621 1622 DPRINTF(IEW, "IQ has %i free entries (Can schedule: %i). " 1623 "LSQ has %i free entries.\n", 1624 instQueue.numFreeEntries(), instQueue.hasReadyInsts(), 1625 ldstQueue.numFreeEntries()); 1626 1627 updateStatus(); 1628 1629 if (wroteToTimeBuffer) { 1630 DPRINTF(Activity, "Activity this cycle.\n"); 1631 cpu->activityThisCycle(); 1632 } 1633} 1634 1635template <class Impl> 1636void 1637DefaultIEW<Impl>::updateExeInstStats(DynInstPtr &inst) 1638{ 1639 int thread_number = inst->threadNumber; 1640 1641 // 1642 // Pick off the software prefetches 1643 // 1644#ifdef TARGET_ALPHA 1645 if (inst->isDataPrefetch()) 1646 iewExecutedSwp[thread_number]++; 1647 else 1648 iewIewExecutedcutedInsts++; 1649#else 1650 iewExecutedInsts++; 1651#endif 1652 1653 // 1654 // Control operations 1655 // 1656 if (inst->isControl()) 1657 iewExecutedBranches[thread_number]++; 1658 1659 // 1660 // Memory operations 1661 // 1662 if (inst->isMemRef()) { 1663 iewExecutedRefs[thread_number]++; 1664 1665 if (inst->isLoad()) { 1666 iewExecLoadInsts[thread_number]++; 1667 } 1668 } 1669}
| 346 scoreboard = sb_ptr; 347} 348 349template <class Impl> 350bool 351DefaultIEW<Impl>::drain() 352{ 353 // IEW is ready to drain at any time. 354 cpu->signalDrained(); 355 return true; 356} 357 358template <class Impl> 359void 360DefaultIEW<Impl>::resume() 361{ 362} 363 364template <class Impl> 365void 366DefaultIEW<Impl>::switchOut() 367{ 368 // Clear any state. 369 switchedOut = true; 370 assert(insts[0].empty()); 371 assert(skidBuffer[0].empty()); 372 373 instQueue.switchOut(); 374 ldstQueue.switchOut(); 375 fuPool->switchOut(); 376 377 for (int i = 0; i < numThreads; i++) { 378 while (!insts[i].empty()) 379 insts[i].pop(); 380 while (!skidBuffer[i].empty()) 381 skidBuffer[i].pop(); 382 } 383} 384 385template <class Impl> 386void 387DefaultIEW<Impl>::takeOverFrom() 388{ 389 // Reset all state. 390 _status = Active; 391 exeStatus = Running; 392 wbStatus = Idle; 393 switchedOut = false; 394 395 instQueue.takeOverFrom(); 396 ldstQueue.takeOverFrom(); 397 fuPool->takeOverFrom(); 398 399 initStage(); 400 cpu->activityThisCycle(); 401 402 for (int i=0; i < numThreads; i++) { 403 dispatchStatus[i] = Running; 404 stalls[i].commit = false; 405 fetchRedirect[i] = false; 406 } 407 408 updateLSQNextCycle = false; 409 410 for (int i = 0; i < issueToExecQueue.getSize(); ++i) { 411 issueToExecQueue.advance(); 412 } 413} 414 415template<class Impl> 416void 417DefaultIEW<Impl>::squash(unsigned tid) 418{ 419 DPRINTF(IEW, "[tid:%i]: Squashing all instructions.\n", 420 tid); 421 422 // Tell the IQ to start squashing. 423 instQueue.squash(tid); 424 425 // Tell the LDSTQ to start squashing. 426#if ISA_HAS_DELAY_SLOT 427 ldstQueue.squash(fromCommit->commitInfo[tid].bdelayDoneSeqNum, tid); 428#else 429 ldstQueue.squash(fromCommit->commitInfo[tid].doneSeqNum, tid); 430#endif 431 updatedQueues = true; 432 433 // Clear the skid buffer in case it has any data in it. 434 DPRINTF(IEW, "[tid:%i]: Removing skidbuffer instructions until [sn:%i].\n", 435 tid, fromCommit->commitInfo[tid].bdelayDoneSeqNum); 436 437 while (!skidBuffer[tid].empty()) { 438#if ISA_HAS_DELAY_SLOT 439 if (skidBuffer[tid].front()->seqNum <= 440 fromCommit->commitInfo[tid].bdelayDoneSeqNum) { 441 DPRINTF(IEW, "[tid:%i]: Cannot remove skidbuffer instructions " 442 "that occur before delay slot [sn:%i].\n", 443 fromCommit->commitInfo[tid].bdelayDoneSeqNum, 444 tid); 445 break; 446 } else { 447 DPRINTF(IEW, "[tid:%i]: Removing instruction [sn:%i] from " 448 "skidBuffer.\n", tid, skidBuffer[tid].front()->seqNum); 449 } 450#endif 451 if (skidBuffer[tid].front()->isLoad() || 452 skidBuffer[tid].front()->isStore() ) { 453 toRename->iewInfo[tid].dispatchedToLSQ++; 454 } 455 456 toRename->iewInfo[tid].dispatched++; 457 458 skidBuffer[tid].pop(); 459 } 460 461 bdelayDoneSeqNum[tid] = fromCommit->commitInfo[tid].bdelayDoneSeqNum; 462 463 emptyRenameInsts(tid); 464} 465 466template<class Impl> 467void 468DefaultIEW<Impl>::squashDueToBranch(DynInstPtr &inst, unsigned tid) 469{ 470 DPRINTF(IEW, "[tid:%i]: Squashing from a specific instruction, PC: %#x " 471 "[sn:%i].\n", tid, inst->readPC(), inst->seqNum); 472 473 toCommit->squash[tid] = true; 474 toCommit->squashedSeqNum[tid] = inst->seqNum; 475 toCommit->mispredPC[tid] = inst->readPC(); 476 toCommit->branchMispredict[tid] = true; 477 478 int instSize = sizeof(TheISA::MachInst); 479#if ISA_HAS_DELAY_SLOT 480 bool branch_taken = 481 !(inst->readNextPC() + instSize == inst->readNextNPC() && 482 (inst->readNextPC() == inst->readPC() + instSize || 483 inst->readNextPC() == inst->readPC() + 2 * instSize)); 484 DPRINTF(Sparc, "Branch taken = %s [sn:%i]\n", 485 branch_taken ? "true": "false", inst->seqNum); 486 487 toCommit->branchTaken[tid] = branch_taken; 488 489 bool squashDelaySlot = true; 490// (inst->readNextPC() != inst->readPC() + sizeof(TheISA::MachInst)); 491 DPRINTF(Sparc, "Squash delay slot = %s [sn:%i]\n", 492 squashDelaySlot ? "true": "false", inst->seqNum); 493 toCommit->squashDelaySlot[tid] = squashDelaySlot; 494 //If we're squashing the delay slot, we need to pick back up at NextPC. 495 //Otherwise, NextPC isn't being squashed, so we should pick back up at 496 //NextNPC. 497 if (squashDelaySlot) { 498 toCommit->nextPC[tid] = inst->readNextPC(); 499 toCommit->nextNPC[tid] = inst->readNextNPC(); 500 } else { 501 toCommit->nextPC[tid] = inst->readNextNPC(); 502 toCommit->nextNPC[tid] = inst->readNextNPC() + instSize; 503 } 504#else 505 toCommit->branchTaken[tid] = inst->readNextPC() != 506 (inst->readPC() + sizeof(TheISA::MachInst)); 507 toCommit->nextPC[tid] = inst->readNextPC(); 508 toCommit->nextNPC[tid] = inst->readNextPC() + instSize; 509#endif 510 511 toCommit->includeSquashInst[tid] = false; 512 513 wroteToTimeBuffer = true; 514} 515 516template<class Impl> 517void 518DefaultIEW<Impl>::squashDueToMemOrder(DynInstPtr &inst, unsigned tid) 519{ 520 DPRINTF(IEW, "[tid:%i]: Squashing from a specific instruction, " 521 "PC: %#x [sn:%i].\n", tid, inst->readPC(), inst->seqNum); 522 523 toCommit->squash[tid] = true; 524 toCommit->squashedSeqNum[tid] = inst->seqNum; 525 toCommit->nextPC[tid] = inst->readNextPC(); 526#if ISA_HAS_DELAY_SLOT 527 toCommit->nextNPC[tid] = inst->readNextNPC(); 528#else 529 toCommit->nextNPC[tid] = inst->readNextPC() + sizeof(TheISA::MachInst); 530#endif 531 toCommit->branchMispredict[tid] = false; 532 533 toCommit->includeSquashInst[tid] = false; 534 535 wroteToTimeBuffer = true; 536} 537 538template<class Impl> 539void 540DefaultIEW<Impl>::squashDueToMemBlocked(DynInstPtr &inst, unsigned tid) 541{ 542 DPRINTF(IEW, "[tid:%i]: Memory blocked, squashing load and younger insts, " 543 "PC: %#x [sn:%i].\n", tid, inst->readPC(), inst->seqNum); 544 545 toCommit->squash[tid] = true; 546 toCommit->squashedSeqNum[tid] = inst->seqNum; 547 toCommit->nextPC[tid] = inst->readPC(); 548#if ISA_HAS_DELAY_SLOT 549 toCommit->nextNPC[tid] = inst->readNextPC(); 550#else 551 toCommit->nextNPC[tid] = inst->readPC() + sizeof(TheISA::MachInst); 552#endif 553 toCommit->branchMispredict[tid] = false; 554 555 // Must include the broadcasted SN in the squash. 556 toCommit->includeSquashInst[tid] = true; 557 558 ldstQueue.setLoadBlockedHandled(tid); 559 560 wroteToTimeBuffer = true; 561} 562 563template<class Impl> 564void 565DefaultIEW<Impl>::block(unsigned tid) 566{ 567 DPRINTF(IEW, "[tid:%u]: Blocking.\n", tid); 568 569 if (dispatchStatus[tid] != Blocked && 570 dispatchStatus[tid] != Unblocking) { 571 toRename->iewBlock[tid] = true; 572 wroteToTimeBuffer = true; 573 } 574 575 // Add the current inputs to the skid buffer so they can be 576 // reprocessed when this stage unblocks. 577 skidInsert(tid); 578 579 dispatchStatus[tid] = Blocked; 580} 581 582template<class Impl> 583void 584DefaultIEW<Impl>::unblock(unsigned tid) 585{ 586 DPRINTF(IEW, "[tid:%i]: Reading instructions out of the skid " 587 "buffer %u.\n",tid, tid); 588 589 // If the skid bufffer is empty, signal back to previous stages to unblock. 590 // Also switch status to running. 591 if (skidBuffer[tid].empty()) { 592 toRename->iewUnblock[tid] = true; 593 wroteToTimeBuffer = true; 594 DPRINTF(IEW, "[tid:%i]: Done unblocking.\n",tid); 595 dispatchStatus[tid] = Running; 596 } 597} 598 599template<class Impl> 600void 601DefaultIEW<Impl>::wakeDependents(DynInstPtr &inst) 602{ 603 instQueue.wakeDependents(inst); 604} 605 606template<class Impl> 607void 608DefaultIEW<Impl>::rescheduleMemInst(DynInstPtr &inst) 609{ 610 instQueue.rescheduleMemInst(inst); 611} 612 613template<class Impl> 614void 615DefaultIEW<Impl>::replayMemInst(DynInstPtr &inst) 616{ 617 instQueue.replayMemInst(inst); 618} 619 620template<class Impl> 621void 622DefaultIEW<Impl>::instToCommit(DynInstPtr &inst) 623{ 624 // This function should not be called after writebackInsts in a 625 // single cycle. That will cause problems with an instruction 626 // being added to the queue to commit without being processed by 627 // writebackInsts prior to being sent to commit. 628 629 // First check the time slot that this instruction will write 630 // to. If there are free write ports at the time, then go ahead 631 // and write the instruction to that time. If there are not, 632 // keep looking back to see where's the first time there's a 633 // free slot. 634 while ((*iewQueue)[wbCycle].insts[wbNumInst]) { 635 ++wbNumInst; 636 if (wbNumInst == wbWidth) { 637 ++wbCycle; 638 wbNumInst = 0; 639 } 640 641 assert((wbCycle * wbWidth + wbNumInst) <= wbMax); 642 } 643 644 DPRINTF(IEW, "Current wb cycle: %i, width: %i, numInst: %i\nwbActual:%i\n", 645 wbCycle, wbWidth, wbNumInst, wbCycle * wbWidth + wbNumInst); 646 // Add finished instruction to queue to commit. 647 (*iewQueue)[wbCycle].insts[wbNumInst] = inst; 648 (*iewQueue)[wbCycle].size++; 649} 650 651template <class Impl> 652unsigned 653DefaultIEW<Impl>::validInstsFromRename() 654{ 655 unsigned inst_count = 0; 656 657 for (int i=0; i<fromRename->size; i++) { 658 if (!fromRename->insts[i]->isSquashed()) 659 inst_count++; 660 } 661 662 return inst_count; 663} 664 665template<class Impl> 666void 667DefaultIEW<Impl>::skidInsert(unsigned tid) 668{ 669 DynInstPtr inst = NULL; 670 671 while (!insts[tid].empty()) { 672 inst = insts[tid].front(); 673 674 insts[tid].pop(); 675 676 DPRINTF(Decode,"[tid:%i]: Inserting [sn:%lli] PC:%#x into " 677 "dispatch skidBuffer %i\n",tid, inst->seqNum, 678 inst->readPC(),tid); 679 680 skidBuffer[tid].push(inst); 681 } 682 683 assert(skidBuffer[tid].size() <= skidBufferMax && 684 "Skidbuffer Exceeded Max Size"); 685} 686 687template<class Impl> 688int 689DefaultIEW<Impl>::skidCount() 690{ 691 int max=0; 692 693 std::list<unsigned>::iterator threads = activeThreads->begin(); 694 std::list<unsigned>::iterator end = activeThreads->end(); 695 696 while (threads != end) { 697 unsigned tid = *threads++; 698 unsigned thread_count = skidBuffer[tid].size(); 699 if (max < thread_count) 700 max = thread_count; 701 } 702 703 return max; 704} 705 706template<class Impl> 707bool 708DefaultIEW<Impl>::skidsEmpty() 709{ 710 std::list<unsigned>::iterator threads = activeThreads->begin(); 711 std::list<unsigned>::iterator end = activeThreads->end(); 712 713 while (threads != end) { 714 unsigned tid = *threads++; 715 716 if (!skidBuffer[tid].empty()) 717 return false; 718 } 719 720 return true; 721} 722 723template <class Impl> 724void 725DefaultIEW<Impl>::updateStatus() 726{ 727 bool any_unblocking = false; 728 729 std::list<unsigned>::iterator threads = activeThreads->begin(); 730 std::list<unsigned>::iterator end = activeThreads->end(); 731 732 while (threads != end) { 733 unsigned tid = *threads++; 734 735 if (dispatchStatus[tid] == Unblocking) { 736 any_unblocking = true; 737 break; 738 } 739 } 740 741 // If there are no ready instructions waiting to be scheduled by the IQ, 742 // and there's no stores waiting to write back, and dispatch is not 743 // unblocking, then there is no internal activity for the IEW stage. 744 if (_status == Active && !instQueue.hasReadyInsts() && 745 !ldstQueue.willWB() && !any_unblocking) { 746 DPRINTF(IEW, "IEW switching to idle\n"); 747 748 deactivateStage(); 749 750 _status = Inactive; 751 } else if (_status == Inactive && (instQueue.hasReadyInsts() || 752 ldstQueue.willWB() || 753 any_unblocking)) { 754 // Otherwise there is internal activity. Set to active. 755 DPRINTF(IEW, "IEW switching to active\n"); 756 757 activateStage(); 758 759 _status = Active; 760 } 761} 762 763template <class Impl> 764void 765DefaultIEW<Impl>::resetEntries() 766{ 767 instQueue.resetEntries(); 768 ldstQueue.resetEntries(); 769} 770 771template <class Impl> 772void 773DefaultIEW<Impl>::readStallSignals(unsigned tid) 774{ 775 if (fromCommit->commitBlock[tid]) { 776 stalls[tid].commit = true; 777 } 778 779 if (fromCommit->commitUnblock[tid]) { 780 assert(stalls[tid].commit); 781 stalls[tid].commit = false; 782 } 783} 784 785template <class Impl> 786bool 787DefaultIEW<Impl>::checkStall(unsigned tid) 788{ 789 bool ret_val(false); 790 791 if (stalls[tid].commit) { 792 DPRINTF(IEW,"[tid:%i]: Stall from Commit stage detected.\n",tid); 793 ret_val = true; 794 } else if (instQueue.isFull(tid)) { 795 DPRINTF(IEW,"[tid:%i]: Stall: IQ is full.\n",tid); 796 ret_val = true; 797 } else if (ldstQueue.isFull(tid)) { 798 DPRINTF(IEW,"[tid:%i]: Stall: LSQ is full\n",tid); 799 800 if (ldstQueue.numLoads(tid) > 0 ) { 801 802 DPRINTF(IEW,"[tid:%i]: LSQ oldest load: [sn:%i] \n", 803 tid,ldstQueue.getLoadHeadSeqNum(tid)); 804 } 805 806 if (ldstQueue.numStores(tid) > 0) { 807 808 DPRINTF(IEW,"[tid:%i]: LSQ oldest store: [sn:%i] \n", 809 tid,ldstQueue.getStoreHeadSeqNum(tid)); 810 } 811 812 ret_val = true; 813 } else if (ldstQueue.isStalled(tid)) { 814 DPRINTF(IEW,"[tid:%i]: Stall: LSQ stall detected.\n",tid); 815 ret_val = true; 816 } 817 818 return ret_val; 819} 820 821template <class Impl> 822void 823DefaultIEW<Impl>::checkSignalsAndUpdate(unsigned tid) 824{ 825 // Check if there's a squash signal, squash if there is 826 // Check stall signals, block if there is. 827 // If status was Blocked 828 // if so then go to unblocking 829 // If status was Squashing 830 // check if squashing is not high. Switch to running this cycle. 831 832 readStallSignals(tid); 833 834 if (fromCommit->commitInfo[tid].squash) { 835 squash(tid); 836 837 if (dispatchStatus[tid] == Blocked || 838 dispatchStatus[tid] == Unblocking) { 839 toRename->iewUnblock[tid] = true; 840 wroteToTimeBuffer = true; 841 } 842 843 dispatchStatus[tid] = Squashing; 844 845 fetchRedirect[tid] = false; 846 return; 847 } 848 849 if (fromCommit->commitInfo[tid].robSquashing) { 850 DPRINTF(IEW, "[tid:%i]: ROB is still squashing.\n", tid); 851 852 dispatchStatus[tid] = Squashing; 853 854 emptyRenameInsts(tid); 855 wroteToTimeBuffer = true; 856 return; 857 } 858 859 if (checkStall(tid)) { 860 block(tid); 861 dispatchStatus[tid] = Blocked; 862 return; 863 } 864 865 if (dispatchStatus[tid] == Blocked) { 866 // Status from previous cycle was blocked, but there are no more stall 867 // conditions. Switch over to unblocking. 868 DPRINTF(IEW, "[tid:%i]: Done blocking, switching to unblocking.\n", 869 tid); 870 871 dispatchStatus[tid] = Unblocking; 872 873 unblock(tid); 874 875 return; 876 } 877 878 if (dispatchStatus[tid] == Squashing) { 879 // Switch status to running if rename isn't being told to block or 880 // squash this cycle. 881 DPRINTF(IEW, "[tid:%i]: Done squashing, switching to running.\n", 882 tid); 883 884 dispatchStatus[tid] = Running; 885 886 return; 887 } 888} 889 890template <class Impl> 891void 892DefaultIEW<Impl>::sortInsts() 893{ 894 int insts_from_rename = fromRename->size; 895#ifdef DEBUG 896#if !ISA_HAS_DELAY_SLOT 897 for (int i = 0; i < numThreads; i++) 898 assert(insts[i].empty()); 899#endif 900#endif 901 for (int i = 0; i < insts_from_rename; ++i) { 902 insts[fromRename->insts[i]->threadNumber].push(fromRename->insts[i]); 903 } 904} 905 906template <class Impl> 907void 908DefaultIEW<Impl>::emptyRenameInsts(unsigned tid) 909{ 910 DPRINTF(IEW, "[tid:%i]: Removing incoming rename instructions until " 911 "[sn:%i].\n", tid, bdelayDoneSeqNum[tid]); 912 913 while (!insts[tid].empty()) { 914#if ISA_HAS_DELAY_SLOT 915 if (insts[tid].front()->seqNum <= bdelayDoneSeqNum[tid]) { 916 DPRINTF(IEW, "[tid:%i]: Done removing, cannot remove instruction" 917 " that occurs at or before delay slot [sn:%i].\n", 918 tid, bdelayDoneSeqNum[tid]); 919 break; 920 } else { 921 DPRINTF(IEW, "[tid:%i]: Removing incoming rename instruction " 922 "[sn:%i].\n", tid, insts[tid].front()->seqNum); 923 } 924#endif 925 926 if (insts[tid].front()->isLoad() || 927 insts[tid].front()->isStore() ) { 928 toRename->iewInfo[tid].dispatchedToLSQ++; 929 } 930 931 toRename->iewInfo[tid].dispatched++; 932 933 insts[tid].pop(); 934 } 935} 936 937template <class Impl> 938void 939DefaultIEW<Impl>::wakeCPU() 940{ 941 cpu->wakeCPU(); 942} 943 944template <class Impl> 945void 946DefaultIEW<Impl>::activityThisCycle() 947{ 948 DPRINTF(Activity, "Activity this cycle.\n"); 949 cpu->activityThisCycle(); 950} 951 952template <class Impl> 953inline void 954DefaultIEW<Impl>::activateStage() 955{ 956 DPRINTF(Activity, "Activating stage.\n"); 957 cpu->activateStage(O3CPU::IEWIdx); 958} 959 960template <class Impl> 961inline void 962DefaultIEW<Impl>::deactivateStage() 963{ 964 DPRINTF(Activity, "Deactivating stage.\n"); 965 cpu->deactivateStage(O3CPU::IEWIdx); 966} 967 968template<class Impl> 969void 970DefaultIEW<Impl>::dispatch(unsigned tid) 971{ 972 // If status is Running or idle, 973 // call dispatchInsts() 974 // If status is Unblocking, 975 // buffer any instructions coming from rename 976 // continue trying to empty skid buffer 977 // check if stall conditions have passed 978 979 if (dispatchStatus[tid] == Blocked) { 980 ++iewBlockCycles; 981 982 } else if (dispatchStatus[tid] == Squashing) { 983 ++iewSquashCycles; 984 } 985 986 // Dispatch should try to dispatch as many instructions as its bandwidth 987 // will allow, as long as it is not currently blocked. 988 if (dispatchStatus[tid] == Running || 989 dispatchStatus[tid] == Idle) { 990 DPRINTF(IEW, "[tid:%i] Not blocked, so attempting to run " 991 "dispatch.\n", tid); 992 993 dispatchInsts(tid); 994 } else if (dispatchStatus[tid] == Unblocking) { 995 // Make sure that the skid buffer has something in it if the 996 // status is unblocking. 997 assert(!skidsEmpty()); 998 999 // If the status was unblocking, then instructions from the skid 1000 // buffer were used. Remove those instructions and handle 1001 // the rest of unblocking. 1002 dispatchInsts(tid); 1003 1004 ++iewUnblockCycles; 1005 1006 if (validInstsFromRename() && dispatchedAllInsts) { 1007 // Add the current inputs to the skid buffer so they can be 1008 // reprocessed when this stage unblocks. 1009 skidInsert(tid); 1010 } 1011 1012 unblock(tid); 1013 } 1014} 1015 1016template <class Impl> 1017void 1018DefaultIEW<Impl>::dispatchInsts(unsigned tid) 1019{ 1020 dispatchedAllInsts = true; 1021 1022 // Obtain instructions from skid buffer if unblocking, or queue from rename 1023 // otherwise. 1024 std::queue<DynInstPtr> &insts_to_dispatch = 1025 dispatchStatus[tid] == Unblocking ? 1026 skidBuffer[tid] : insts[tid]; 1027 1028 int insts_to_add = insts_to_dispatch.size(); 1029 1030 DynInstPtr inst; 1031 bool add_to_iq = false; 1032 int dis_num_inst = 0; 1033 1034 // Loop through the instructions, putting them in the instruction 1035 // queue. 1036 for ( ; dis_num_inst < insts_to_add && 1037 dis_num_inst < dispatchWidth; 1038 ++dis_num_inst) 1039 { 1040 inst = insts_to_dispatch.front(); 1041 1042 if (dispatchStatus[tid] == Unblocking) { 1043 DPRINTF(IEW, "[tid:%i]: Issue: Examining instruction from skid " 1044 "buffer\n", tid); 1045 } 1046 1047 // Make sure there's a valid instruction there. 1048 assert(inst); 1049 1050 DPRINTF(IEW, "[tid:%i]: Issue: Adding PC %#x [sn:%lli] [tid:%i] to " 1051 "IQ.\n", 1052 tid, inst->readPC(), inst->seqNum, inst->threadNumber); 1053 1054 // Be sure to mark these instructions as ready so that the 1055 // commit stage can go ahead and execute them, and mark 1056 // them as issued so the IQ doesn't reprocess them. 1057 1058 // Check for squashed instructions. 1059 if (inst->isSquashed()) { 1060 DPRINTF(IEW, "[tid:%i]: Issue: Squashed instruction encountered, " 1061 "not adding to IQ.\n", tid); 1062 1063 ++iewDispSquashedInsts; 1064 1065 insts_to_dispatch.pop(); 1066 1067 //Tell Rename That An Instruction has been processed 1068 if (inst->isLoad() || inst->isStore()) { 1069 toRename->iewInfo[tid].dispatchedToLSQ++; 1070 } 1071 toRename->iewInfo[tid].dispatched++; 1072 1073 continue; 1074 } 1075 1076 // Check for full conditions. 1077 if (instQueue.isFull(tid)) { 1078 DPRINTF(IEW, "[tid:%i]: Issue: IQ has become full.\n", tid); 1079 1080 // Call function to start blocking. 1081 block(tid); 1082 1083 // Set unblock to false. Special case where we are using 1084 // skidbuffer (unblocking) instructions but then we still 1085 // get full in the IQ. 1086 toRename->iewUnblock[tid] = false; 1087 1088 dispatchedAllInsts = false; 1089 1090 ++iewIQFullEvents; 1091 break; 1092 } else if (ldstQueue.isFull(tid)) { 1093 DPRINTF(IEW, "[tid:%i]: Issue: LSQ has become full.\n",tid); 1094 1095 // Call function to start blocking. 1096 block(tid); 1097 1098 // Set unblock to false. Special case where we are using 1099 // skidbuffer (unblocking) instructions but then we still 1100 // get full in the IQ. 1101 toRename->iewUnblock[tid] = false; 1102 1103 dispatchedAllInsts = false; 1104 1105 ++iewLSQFullEvents; 1106 break; 1107 } 1108 1109 // Otherwise issue the instruction just fine. 1110 if (inst->isLoad()) { 1111 DPRINTF(IEW, "[tid:%i]: Issue: Memory instruction " 1112 "encountered, adding to LSQ.\n", tid); 1113 1114 // Reserve a spot in the load store queue for this 1115 // memory access. 1116 ldstQueue.insertLoad(inst); 1117 1118 ++iewDispLoadInsts; 1119 1120 add_to_iq = true; 1121 1122 toRename->iewInfo[tid].dispatchedToLSQ++; 1123 } else if (inst->isStore()) { 1124 DPRINTF(IEW, "[tid:%i]: Issue: Memory instruction " 1125 "encountered, adding to LSQ.\n", tid); 1126 1127 ldstQueue.insertStore(inst); 1128 1129 ++iewDispStoreInsts; 1130 1131 if (inst->isStoreConditional()) { 1132 // Store conditionals need to be set as "canCommit()" 1133 // so that commit can process them when they reach the 1134 // head of commit. 1135 // @todo: This is somewhat specific to Alpha. 1136 inst->setCanCommit(); 1137 instQueue.insertNonSpec(inst); 1138 add_to_iq = false; 1139 1140 ++iewDispNonSpecInsts; 1141 } else { 1142 add_to_iq = true; 1143 } 1144 1145 toRename->iewInfo[tid].dispatchedToLSQ++; 1146 } else if (inst->isMemBarrier() || inst->isWriteBarrier()) { 1147 // Same as non-speculative stores. 1148 inst->setCanCommit(); 1149 instQueue.insertBarrier(inst); 1150 add_to_iq = false; 1151 } else if (inst->isNop()) { 1152 DPRINTF(IEW, "[tid:%i]: Issue: Nop instruction encountered, " 1153 "skipping.\n", tid); 1154 1155 inst->setIssued(); 1156 inst->setExecuted(); 1157 inst->setCanCommit(); 1158 1159 instQueue.recordProducer(inst); 1160 1161 iewExecutedNop[tid]++; 1162 1163 add_to_iq = false; 1164 } else if (inst->isExecuted()) { 1165 assert(0 && "Instruction shouldn't be executed.\n"); 1166 DPRINTF(IEW, "Issue: Executed branch encountered, " 1167 "skipping.\n"); 1168 1169 inst->setIssued(); 1170 inst->setCanCommit(); 1171 1172 instQueue.recordProducer(inst); 1173 1174 add_to_iq = false; 1175 } else { 1176 add_to_iq = true; 1177 } 1178 if (inst->isNonSpeculative()) { 1179 DPRINTF(IEW, "[tid:%i]: Issue: Nonspeculative instruction " 1180 "encountered, skipping.\n", tid); 1181 1182 // Same as non-speculative stores. 1183 inst->setCanCommit(); 1184 1185 // Specifically insert it as nonspeculative. 1186 instQueue.insertNonSpec(inst); 1187 1188 ++iewDispNonSpecInsts; 1189 1190 add_to_iq = false; 1191 } 1192 1193 // If the instruction queue is not full, then add the 1194 // instruction. 1195 if (add_to_iq) { 1196 instQueue.insert(inst); 1197 } 1198 1199 insts_to_dispatch.pop(); 1200 1201 toRename->iewInfo[tid].dispatched++; 1202 1203 ++iewDispatchedInsts; 1204 } 1205 1206 if (!insts_to_dispatch.empty()) { 1207 DPRINTF(IEW,"[tid:%i]: Issue: Bandwidth Full. Blocking.\n", tid); 1208 block(tid); 1209 toRename->iewUnblock[tid] = false; 1210 } 1211 1212 if (dispatchStatus[tid] == Idle && dis_num_inst) { 1213 dispatchStatus[tid] = Running; 1214 1215 updatedQueues = true; 1216 } 1217 1218 dis_num_inst = 0; 1219} 1220 1221template <class Impl> 1222void 1223DefaultIEW<Impl>::printAvailableInsts() 1224{ 1225 int inst = 0; 1226 1227 std::cout << "Available Instructions: "; 1228 1229 while (fromIssue->insts[inst]) { 1230 1231 if (inst%3==0) std::cout << "\n\t"; 1232 1233 std::cout << "PC: " << fromIssue->insts[inst]->readPC() 1234 << " TN: " << fromIssue->insts[inst]->threadNumber 1235 << " SN: " << fromIssue->insts[inst]->seqNum << " | "; 1236 1237 inst++; 1238 1239 } 1240 1241 std::cout << "\n"; 1242} 1243 1244template <class Impl> 1245void 1246DefaultIEW<Impl>::executeInsts() 1247{ 1248 wbNumInst = 0; 1249 wbCycle = 0; 1250 1251 std::list<unsigned>::iterator threads = activeThreads->begin(); 1252 std::list<unsigned>::iterator end = activeThreads->end(); 1253 1254 while (threads != end) { 1255 unsigned tid = *threads++; 1256 fetchRedirect[tid] = false; 1257 } 1258 1259 // Uncomment this if you want to see all available instructions. 1260// printAvailableInsts(); 1261 1262 // Execute/writeback any instructions that are available. 1263 int insts_to_execute = fromIssue->size; 1264 int inst_num = 0; 1265 for (; inst_num < insts_to_execute; 1266 ++inst_num) { 1267 1268 DPRINTF(IEW, "Execute: Executing instructions from IQ.\n"); 1269 1270 DynInstPtr inst = instQueue.getInstToExecute(); 1271 1272 DPRINTF(IEW, "Execute: Processing PC %#x, [tid:%i] [sn:%i].\n", 1273 inst->readPC(), inst->threadNumber,inst->seqNum); 1274 1275 // Check if the instruction is squashed; if so then skip it 1276 if (inst->isSquashed()) { 1277 DPRINTF(IEW, "Execute: Instruction was squashed.\n"); 1278 1279 // Consider this instruction executed so that commit can go 1280 // ahead and retire the instruction. 1281 inst->setExecuted(); 1282 1283 // Not sure if I should set this here or just let commit try to 1284 // commit any squashed instructions. I like the latter a bit more. 1285 inst->setCanCommit(); 1286 1287 ++iewExecSquashedInsts; 1288 1289 decrWb(inst->seqNum); 1290 continue; 1291 } 1292 1293 Fault fault = NoFault; 1294 1295 // Execute instruction. 1296 // Note that if the instruction faults, it will be handled 1297 // at the commit stage. 1298 if (inst->isMemRef() && 1299 (!inst->isDataPrefetch() && !inst->isInstPrefetch())) { 1300 DPRINTF(IEW, "Execute: Calculating address for memory " 1301 "reference.\n"); 1302 1303 // Tell the LDSTQ to execute this instruction (if it is a load). 1304 if (inst->isLoad()) { 1305 // Loads will mark themselves as executed, and their writeback 1306 // event adds the instruction to the queue to commit 1307 fault = ldstQueue.executeLoad(inst); 1308 } else if (inst->isStore()) { 1309 fault = ldstQueue.executeStore(inst); 1310 1311 // If the store had a fault then it may not have a mem req 1312 if (!inst->isStoreConditional() && fault == NoFault) { 1313 inst->setExecuted(); 1314 1315 instToCommit(inst); 1316 } else if (fault != NoFault) { 1317 // If the instruction faulted, then we need to send it along to commit 1318 // without the instruction completing. 1319 DPRINTF(IEW, "Store has fault %s! [sn:%lli]\n", 1320 fault->name(), inst->seqNum); 1321 1322 // Send this instruction to commit, also make sure iew stage 1323 // realizes there is activity. 1324 inst->setExecuted(); 1325 1326 instToCommit(inst); 1327 activityThisCycle(); 1328 } 1329 1330 // Store conditionals will mark themselves as 1331 // executed, and their writeback event will add the 1332 // instruction to the queue to commit. 1333 } else { 1334 panic("Unexpected memory type!\n"); 1335 } 1336 1337 } else { 1338 inst->execute(); 1339 1340 inst->setExecuted(); 1341 1342 instToCommit(inst); 1343 } 1344 1345 updateExeInstStats(inst); 1346 1347 // Check if branch prediction was correct, if not then we need 1348 // to tell commit to squash in flight instructions. Only 1349 // handle this if there hasn't already been something that 1350 // redirects fetch in this group of instructions. 1351 1352 // This probably needs to prioritize the redirects if a different 1353 // scheduler is used. Currently the scheduler schedules the oldest 1354 // instruction first, so the branch resolution order will be correct. 1355 unsigned tid = inst->threadNumber; 1356 1357 if (!fetchRedirect[tid] || 1358 toCommit->squashedSeqNum[tid] > inst->seqNum) { 1359 1360 if (inst->mispredicted()) { 1361 fetchRedirect[tid] = true; 1362 1363 DPRINTF(IEW, "Execute: Branch mispredict detected.\n"); 1364 DPRINTF(IEW, "Predicted target was %#x, %#x.\n", 1365 inst->readPredPC(), inst->readPredNPC()); 1366 DPRINTF(IEW, "Execute: Redirecting fetch to PC: %#x," 1367 " NPC: %#x.\n", inst->readNextPC(), 1368 inst->readNextNPC()); 1369 // If incorrect, then signal the ROB that it must be squashed. 1370 squashDueToBranch(inst, tid); 1371 1372 if (inst->readPredTaken()) { 1373 predictedTakenIncorrect++; 1374 } else { 1375 predictedNotTakenIncorrect++; 1376 } 1377 } else if (ldstQueue.violation(tid)) { 1378 assert(inst->isMemRef()); 1379 // If there was an ordering violation, then get the 1380 // DynInst that caused the violation. Note that this 1381 // clears the violation signal. 1382 DynInstPtr violator; 1383 violator = ldstQueue.getMemDepViolator(tid); 1384 1385 DPRINTF(IEW, "LDSTQ detected a violation. Violator PC: " 1386 "%#x, inst PC: %#x. Addr is: %#x.\n", 1387 violator->readPC(), inst->readPC(), inst->physEffAddr); 1388 1389 // Ensure the violating instruction is older than 1390 // current squash 1391/* if (fetchRedirect[tid] && 1392 violator->seqNum >= toCommit->squashedSeqNum[tid] + 1) 1393 continue; 1394*/ 1395 fetchRedirect[tid] = true; 1396 1397 // Tell the instruction queue that a violation has occured. 1398 instQueue.violation(inst, violator); 1399 1400 // Squash. 1401 squashDueToMemOrder(inst,tid); 1402 1403 ++memOrderViolationEvents; 1404 } else if (ldstQueue.loadBlocked(tid) && 1405 !ldstQueue.isLoadBlockedHandled(tid)) { 1406 fetchRedirect[tid] = true; 1407 1408 DPRINTF(IEW, "Load operation couldn't execute because the " 1409 "memory system is blocked. PC: %#x [sn:%lli]\n", 1410 inst->readPC(), inst->seqNum); 1411 1412 squashDueToMemBlocked(inst, tid); 1413 } 1414 } else { 1415 // Reset any state associated with redirects that will not 1416 // be used. 1417 if (ldstQueue.violation(tid)) { 1418 assert(inst->isMemRef()); 1419 1420 DynInstPtr violator = ldstQueue.getMemDepViolator(tid); 1421 1422 DPRINTF(IEW, "LDSTQ detected a violation. Violator PC: " 1423 "%#x, inst PC: %#x. Addr is: %#x.\n", 1424 violator->readPC(), inst->readPC(), inst->physEffAddr); 1425 DPRINTF(IEW, "Violation will not be handled because " 1426 "already squashing\n"); 1427 1428 ++memOrderViolationEvents; 1429 } 1430 if (ldstQueue.loadBlocked(tid) && 1431 !ldstQueue.isLoadBlockedHandled(tid)) { 1432 DPRINTF(IEW, "Load operation couldn't execute because the " 1433 "memory system is blocked. PC: %#x [sn:%lli]\n", 1434 inst->readPC(), inst->seqNum); 1435 DPRINTF(IEW, "Blocked load will not be handled because " 1436 "already squashing\n"); 1437 1438 ldstQueue.setLoadBlockedHandled(tid); 1439 } 1440 1441 } 1442 } 1443 1444 // Update and record activity if we processed any instructions. 1445 if (inst_num) { 1446 if (exeStatus == Idle) { 1447 exeStatus = Running; 1448 } 1449 1450 updatedQueues = true; 1451 1452 cpu->activityThisCycle(); 1453 } 1454 1455 // Need to reset this in case a writeback event needs to write into the 1456 // iew queue. That way the writeback event will write into the correct 1457 // spot in the queue. 1458 wbNumInst = 0; 1459} 1460 1461template <class Impl> 1462void 1463DefaultIEW<Impl>::writebackInsts() 1464{ 1465 // Loop through the head of the time buffer and wake any 1466 // dependents. These instructions are about to write back. Also 1467 // mark scoreboard that this instruction is finally complete. 1468 // Either have IEW have direct access to scoreboard, or have this 1469 // as part of backwards communication. 1470 for (int inst_num = 0; inst_num < wbWidth && 1471 toCommit->insts[inst_num]; inst_num++) { 1472 DynInstPtr inst = toCommit->insts[inst_num]; 1473 int tid = inst->threadNumber; 1474 1475 DPRINTF(IEW, "Sending instructions to commit, [sn:%lli] PC %#x.\n", 1476 inst->seqNum, inst->readPC()); 1477 1478 iewInstsToCommit[tid]++; 1479 1480 // Some instructions will be sent to commit without having 1481 // executed because they need commit to handle them. 1482 // E.g. Uncached loads have not actually executed when they 1483 // are first sent to commit. Instead commit must tell the LSQ 1484 // when it's ready to execute the uncached load. 1485 if (!inst->isSquashed() && inst->isExecuted() && inst->getFault() == NoFault) { 1486 int dependents = instQueue.wakeDependents(inst); 1487 1488 for (int i = 0; i < inst->numDestRegs(); i++) { 1489 //mark as Ready 1490 DPRINTF(IEW,"Setting Destination Register %i\n", 1491 inst->renamedDestRegIdx(i)); 1492 scoreboard->setReg(inst->renamedDestRegIdx(i)); 1493 } 1494 1495 if (dependents) { 1496 producerInst[tid]++; 1497 consumerInst[tid]+= dependents; 1498 } 1499 writebackCount[tid]++; 1500 } 1501 1502 decrWb(inst->seqNum); 1503 } 1504} 1505 1506template<class Impl> 1507void 1508DefaultIEW<Impl>::tick() 1509{ 1510 wbNumInst = 0; 1511 wbCycle = 0; 1512 1513 wroteToTimeBuffer = false; 1514 updatedQueues = false; 1515 1516 sortInsts(); 1517 1518 // Free function units marked as being freed this cycle. 1519 fuPool->processFreeUnits(); 1520 1521 std::list<unsigned>::iterator threads = activeThreads->begin(); 1522 std::list<unsigned>::iterator end = activeThreads->end(); 1523 1524 // Check stall and squash signals, dispatch any instructions. 1525 while (threads != end) { 1526 unsigned tid = *threads++; 1527 1528 DPRINTF(IEW,"Issue: Processing [tid:%i]\n",tid); 1529 1530 checkSignalsAndUpdate(tid); 1531 dispatch(tid); 1532 } 1533 1534 if (exeStatus != Squashing) { 1535 executeInsts(); 1536 1537 writebackInsts(); 1538 1539 // Have the instruction queue try to schedule any ready instructions. 1540 // (In actuality, this scheduling is for instructions that will 1541 // be executed next cycle.) 1542 instQueue.scheduleReadyInsts(); 1543 1544 // Also should advance its own time buffers if the stage ran. 1545 // Not the best place for it, but this works (hopefully). 1546 issueToExecQueue.advance(); 1547 } 1548 1549 bool broadcast_free_entries = false; 1550 1551 if (updatedQueues || exeStatus == Running || updateLSQNextCycle) { 1552 exeStatus = Idle; 1553 updateLSQNextCycle = false; 1554 1555 broadcast_free_entries = true; 1556 } 1557 1558 // Writeback any stores using any leftover bandwidth. 1559 ldstQueue.writebackStores(); 1560 1561 // Check the committed load/store signals to see if there's a load 1562 // or store to commit. Also check if it's being told to execute a 1563 // nonspeculative instruction. 1564 // This is pretty inefficient... 1565 1566 threads = activeThreads->begin(); 1567 while (threads != end) { 1568 unsigned tid = (*threads++); 1569 1570 DPRINTF(IEW,"Processing [tid:%i]\n",tid); 1571 1572 // Update structures based on instructions committed. 1573 if (fromCommit->commitInfo[tid].doneSeqNum != 0 && 1574 !fromCommit->commitInfo[tid].squash && 1575 !fromCommit->commitInfo[tid].robSquashing) { 1576 1577 ldstQueue.commitStores(fromCommit->commitInfo[tid].doneSeqNum,tid); 1578 1579 ldstQueue.commitLoads(fromCommit->commitInfo[tid].doneSeqNum,tid); 1580 1581 updateLSQNextCycle = true; 1582 instQueue.commit(fromCommit->commitInfo[tid].doneSeqNum,tid); 1583 } 1584 1585 if (fromCommit->commitInfo[tid].nonSpecSeqNum != 0) { 1586 1587 //DPRINTF(IEW,"NonspecInst from thread %i",tid); 1588 if (fromCommit->commitInfo[tid].uncached) { 1589 instQueue.replayMemInst(fromCommit->commitInfo[tid].uncachedLoad); 1590 fromCommit->commitInfo[tid].uncachedLoad->setAtCommit(); 1591 } else { 1592 instQueue.scheduleNonSpec( 1593 fromCommit->commitInfo[tid].nonSpecSeqNum); 1594 } 1595 } 1596 1597 if (broadcast_free_entries) { 1598 toFetch->iewInfo[tid].iqCount = 1599 instQueue.getCount(tid); 1600 toFetch->iewInfo[tid].ldstqCount = 1601 ldstQueue.getCount(tid); 1602 1603 toRename->iewInfo[tid].usedIQ = true; 1604 toRename->iewInfo[tid].freeIQEntries = 1605 instQueue.numFreeEntries(); 1606 toRename->iewInfo[tid].usedLSQ = true; 1607 toRename->iewInfo[tid].freeLSQEntries = 1608 ldstQueue.numFreeEntries(tid); 1609 1610 wroteToTimeBuffer = true; 1611 } 1612 1613 DPRINTF(IEW, "[tid:%i], Dispatch dispatched %i instructions.\n", 1614 tid, toRename->iewInfo[tid].dispatched); 1615 } 1616 1617 DPRINTF(IEW, "IQ has %i free entries (Can schedule: %i). " 1618 "LSQ has %i free entries.\n", 1619 instQueue.numFreeEntries(), instQueue.hasReadyInsts(), 1620 ldstQueue.numFreeEntries()); 1621 1622 updateStatus(); 1623 1624 if (wroteToTimeBuffer) { 1625 DPRINTF(Activity, "Activity this cycle.\n"); 1626 cpu->activityThisCycle(); 1627 } 1628} 1629 1630template <class Impl> 1631void 1632DefaultIEW<Impl>::updateExeInstStats(DynInstPtr &inst) 1633{ 1634 int thread_number = inst->threadNumber; 1635 1636 // 1637 // Pick off the software prefetches 1638 // 1639#ifdef TARGET_ALPHA 1640 if (inst->isDataPrefetch()) 1641 iewExecutedSwp[thread_number]++; 1642 else 1643 iewIewExecutedcutedInsts++; 1644#else 1645 iewExecutedInsts++; 1646#endif 1647 1648 // 1649 // Control operations 1650 // 1651 if (inst->isControl()) 1652 iewExecutedBranches[thread_number]++; 1653 1654 // 1655 // Memory operations 1656 // 1657 if (inst->isMemRef()) { 1658 iewExecutedRefs[thread_number]++; 1659 1660 if (inst->isLoad()) { 1661 iewExecLoadInsts[thread_number]++; 1662 } 1663 } 1664}
|