inst_queue_impl.hh revision 2333
12SN/A/* 21762SN/A * Copyright (c) 2004-2006 The Regents of The University of Michigan 37534Ssteve.reinhardt@amd.com * All rights reserved. 42SN/A * 52SN/A * Redistribution and use in source and binary forms, with or without 62SN/A * modification, are permitted provided that the following conditions are 72SN/A * met: redistributions of source code must retain the above copyright 82SN/A * notice, this list of conditions and the following disclaimer; 92SN/A * redistributions in binary form must reproduce the above copyright 102SN/A * notice, this list of conditions and the following disclaimer in the 112SN/A * documentation and/or other materials provided with the distribution; 122SN/A * neither the name of the copyright holders nor the names of its 132SN/A * contributors may be used to endorse or promote products derived from 142SN/A * this software without specific prior written permission. 152SN/A * 162SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 172SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 182SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 192SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 202SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 212SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 222SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 232SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 242SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 252SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 262SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 272SN/A */ 282665Ssaidi@eecs.umich.edu 292665Ssaidi@eecs.umich.edu#include <limits> 302665Ssaidi@eecs.umich.edu#include <vector> 312SN/A 322SN/A#include "sim/root.hh" 336216Snate@binkert.org 342SN/A#include "cpu/o3/fu_pool.hh" 35330SN/A#include "cpu/o3/inst_queue.hh" 3656SN/A 371031SN/Ausing namespace std; 38330SN/A 39330SN/Atemplate <class Impl> 406214Snate@binkert.orgInstructionQueue<Impl>::FUCompletion::FUCompletion(DynInstPtr &_inst, 418320Ssteve.reinhardt@amd.com int fu_idx, 4210023Smatt.horsnell@ARM.com InstructionQueue<Impl> *iq_ptr) 43330SN/A : Event(&mainEventQueue, Stat_Event_Pri), 44695SN/A inst(_inst), fuIdx(fu_idx), iqPtr(iq_ptr), freeFU(false) 452SN/A{ 462SN/A this->setFlags(Event::AutoDelete); 472SN/A} 482SN/A 492SN/Atemplate <class Impl> 502SN/Avoid 512SN/AInstructionQueue<Impl>::FUCompletion::process() 522SN/A{ 532SN/A iqPtr->processFUCompletion(inst, freeFU ? fuIdx : -1); 542SN/A inst = NULL; 552SN/A} 562SN/A 572SN/A 582SN/Atemplate <class Impl> 592SN/Aconst char * 602SN/AInstructionQueue<Impl>::FUCompletion::description() 612SN/A{ 622SN/A return "Functional unit completion event"; 634762Snate@binkert.org} 649983Sstever@gmail.com 652SN/Atemplate <class Impl> 661031SN/AInstructionQueue<Impl>::InstructionQueue(Params *params) 671031SN/A : dcacheInterface(params->dcacheInterface), 681031SN/A fuPool(params->fuPool), 691553SN/A numEntries(params->numIQEntries), 7010023Smatt.horsnell@ARM.com totalWidth(params->issueWidth), 711553SN/A numPhysIntRegs(params->numPhysIntRegs), 721553SN/A numPhysFloatRegs(params->numPhysFloatRegs), 7310422Sandreas.hansson@arm.com commitToIEWDelay(params->commitToIEWDelay) 7410422Sandreas.hansson@arm.com{ 7510422Sandreas.hansson@arm.com assert(fuPool); 7610422Sandreas.hansson@arm.com 7710422Sandreas.hansson@arm.com switchedOut = false; 78465SN/A 79465SN/A numThreads = params->numberOfThreads; 80465SN/A 81465SN/A // Set the number of physical registers as the number of int + float 82465SN/A numPhysRegs = numPhysIntRegs + numPhysFloatRegs; 837492Ssteve.reinhardt@amd.com 8410905Sandreas.sandberg@arm.com DPRINTF(IQ, "There are %i physical registers.\n", numPhysRegs); 857532Ssteve.reinhardt@amd.com 8610905Sandreas.sandberg@arm.com //Create an entry for each physical register within the 878320Ssteve.reinhardt@amd.com //dependency graph. 8810905Sandreas.sandberg@arm.com dependGraph.resize(numPhysRegs); 8910905Sandreas.sandberg@arm.com 9010905Sandreas.sandberg@arm.com // Resize the register scoreboard. 918320Ssteve.reinhardt@amd.com regScoreboard.resize(numPhysRegs); 928320Ssteve.reinhardt@amd.com 938320Ssteve.reinhardt@amd.com //Initialize Mem Dependence Units 947532Ssteve.reinhardt@amd.com for (int i = 0; i < numThreads; i++) { 957532Ssteve.reinhardt@amd.com memDepUnit[i].init(params,i); 967532Ssteve.reinhardt@amd.com memDepUnit[i].setIQ(this); 977532Ssteve.reinhardt@amd.com } 987532Ssteve.reinhardt@amd.com 997532Ssteve.reinhardt@amd.com resetState(); 1007532Ssteve.reinhardt@amd.com 1017532Ssteve.reinhardt@amd.com string policy = params->smtIQPolicy; 1027492Ssteve.reinhardt@amd.com 1037492Ssteve.reinhardt@amd.com //Convert string to lowercase 1047492Ssteve.reinhardt@amd.com std::transform(policy.begin(), policy.end(), policy.begin(), 1057492Ssteve.reinhardt@amd.com (int(*)(int)) tolower); 1062SN/A 1072SN/A //Figure out resource sharing policy 1082SN/A if (policy == "dynamic") { 1092SN/A iqPolicy = Dynamic; 1102SN/A 1112SN/A //Set Max Entries to Total ROB Capacity 1122SN/A for (int i = 0; i < numThreads; i++) { 1132SN/A maxEntries[i] = numEntries; 1142SN/A } 115330SN/A 116330SN/A } else if (policy == "partitioned") { 117330SN/A iqPolicy = Partitioned; 118330SN/A 11910023Smatt.horsnell@ARM.com //@todo:make work if part_amt doesnt divide evenly. 12010023Smatt.horsnell@ARM.com int part_amt = numEntries / numThreads; 12110023Smatt.horsnell@ARM.com 12210023Smatt.horsnell@ARM.com //Divide ROB up evenly 12310023Smatt.horsnell@ARM.com for (int i = 0; i < numThreads; i++) { 12410023Smatt.horsnell@ARM.com maxEntries[i] = part_amt; 12510023Smatt.horsnell@ARM.com } 12610023Smatt.horsnell@ARM.com 12710023Smatt.horsnell@ARM.com DPRINTF(Fetch, "IQ sharing policy set to Partitioned:" 12810023Smatt.horsnell@ARM.com "%i entries per thread.\n",part_amt); 12910023Smatt.horsnell@ARM.com 13010023Smatt.horsnell@ARM.com } else if (policy == "threshold") { 13110023Smatt.horsnell@ARM.com iqPolicy = Threshold; 13210023Smatt.horsnell@ARM.com 13310023Smatt.horsnell@ARM.com double threshold = (double)params->smtIQThreshold / 100; 13410023Smatt.horsnell@ARM.com 13510023Smatt.horsnell@ARM.com int thresholdIQ = (int)((double)threshold * numEntries); 13610023Smatt.horsnell@ARM.com 13710023Smatt.horsnell@ARM.com //Divide up by threshold amount 13810023Smatt.horsnell@ARM.com for (int i = 0; i < numThreads; i++) { 13910023Smatt.horsnell@ARM.com maxEntries[i] = thresholdIQ; 14010023Smatt.horsnell@ARM.com } 1412SN/A 142395SN/A DPRINTF(Fetch, "IQ sharing policy set to Threshold:" 143395SN/A "%i entries per thread.\n",thresholdIQ); 144395SN/A } else { 14510905Sandreas.sandberg@arm.com assert(0 && "Invalid IQ Sharing Policy.Options Are:{Dynamic," 146395SN/A "Partitioned, Threshold}"); 147573SN/A } 148573SN/A} 149395SN/A 150573SN/Atemplate <class Impl> 151573SN/AInstructionQueue<Impl>::~InstructionQueue() 15210905Sandreas.sandberg@arm.com{ 15310905Sandreas.sandberg@arm.com dependGraph.reset(); 15410905Sandreas.sandberg@arm.com cprintf("Nodes traversed: %i, removed: %i\n", 155395SN/A dependGraph.nodesTraversed, dependGraph.nodesRemoved); 156395SN/A} 157843SN/A 1587492Ssteve.reinhardt@amd.comtemplate <class Impl> 1591031SN/Astd::string 1601031SN/AInstructionQueue<Impl>::name() const 1611031SN/A{ 1621031SN/A return cpu->name() + ".iq"; 1631031SN/A} 1641031SN/A 1651031SN/Atemplate <class Impl> 1661031SN/Avoid 1671031SN/AInstructionQueue<Impl>::regStats() 1681031SN/A{ 1691031SN/A using namespace Stats; 1701031SN/A iqInstsAdded 1711031SN/A .name(name() + ".iqInstsAdded") 1721031SN/A .desc("Number of instructions added to the IQ (excludes non-spec)") 1731031SN/A .prereq(iqInstsAdded); 1741031SN/A 1751031SN/A iqNonSpecInstsAdded 1761031SN/A .name(name() + ".iqNonSpecInstsAdded") 1771031SN/A .desc("Number of non-speculative instructions added to the IQ") 1781031SN/A .prereq(iqNonSpecInstsAdded); 1791031SN/A 1801031SN/A iqInstsIssued 1811031SN/A .name(name() + ".iqInstsIssued") 1821031SN/A .desc("Number of instructions issued") 1832901Ssaidi@eecs.umich.edu .prereq(iqInstsIssued); 1849342SAndreas.Sandberg@arm.com 1852797Sktlim@umich.edu iqIntInstsIssued 1869342SAndreas.Sandberg@arm.com .name(name() + ".iqIntInstsIssued") 1872901Ssaidi@eecs.umich.edu .desc("Number of integer instructions issued") 1882797Sktlim@umich.edu .prereq(iqIntInstsIssued); 1892797Sktlim@umich.edu 1902797Sktlim@umich.edu iqFloatInstsIssued 1915314Sstever@gmail.com .name(name() + ".iqFloatInstsIssued") 1925314Sstever@gmail.com .desc("Number of float instructions issued") 1935314Sstever@gmail.com .prereq(iqFloatInstsIssued); 1945314Sstever@gmail.com 1955314Sstever@gmail.com iqBranchInstsIssued 1965314Sstever@gmail.com .name(name() + ".iqBranchInstsIssued") 1975314Sstever@gmail.com .desc("Number of branch instructions issued") 1985314Sstever@gmail.com .prereq(iqBranchInstsIssued); 1995314Sstever@gmail.com 2005314Sstever@gmail.com iqMemInstsIssued 2015314Sstever@gmail.com .name(name() + ".iqMemInstsIssued") 2025314Sstever@gmail.com .desc("Number of memory instructions issued") 2035314Sstever@gmail.com .prereq(iqMemInstsIssued); 2045314Sstever@gmail.com 205 iqMiscInstsIssued 206 .name(name() + ".iqMiscInstsIssued") 207 .desc("Number of miscellaneous instructions issued") 208 .prereq(iqMiscInstsIssued); 209 210 iqSquashedInstsIssued 211 .name(name() + ".iqSquashedInstsIssued") 212 .desc("Number of squashed instructions issued") 213 .prereq(iqSquashedInstsIssued); 214 215 iqSquashedInstsExamined 216 .name(name() + ".iqSquashedInstsExamined") 217 .desc("Number of squashed instructions iterated over during squash;" 218 " mainly for profiling") 219 .prereq(iqSquashedInstsExamined); 220 221 iqSquashedOperandsExamined 222 .name(name() + ".iqSquashedOperandsExamined") 223 .desc("Number of squashed operands that are examined and possibly " 224 "removed from graph") 225 .prereq(iqSquashedOperandsExamined); 226 227 iqSquashedNonSpecRemoved 228 .name(name() + ".iqSquashedNonSpecRemoved") 229 .desc("Number of squashed non-spec instructions that were removed") 230 .prereq(iqSquashedNonSpecRemoved); 231 232 queueResDist 233 .init(Num_OpClasses, 0, 99, 2) 234 .name(name() + ".IQ:residence:") 235 .desc("cycles from dispatch to issue") 236 .flags(total | pdf | cdf ) 237 ; 238 for (int i = 0; i < Num_OpClasses; ++i) { 239 queueResDist.subname(i, opClassStrings[i]); 240 } 241 numIssuedDist 242 .init(0,totalWidth,1) 243 .name(name() + ".ISSUE:issued_per_cycle") 244 .desc("Number of insts issued each cycle") 245 .flags(pdf) 246 ; 247/* 248 dist_unissued 249 .init(Num_OpClasses+2) 250 .name(name() + ".ISSUE:unissued_cause") 251 .desc("Reason ready instruction not issued") 252 .flags(pdf | dist) 253 ; 254 for (int i=0; i < (Num_OpClasses + 2); ++i) { 255 dist_unissued.subname(i, unissued_names[i]); 256 } 257*/ 258 statIssuedInstType 259 .init(numThreads,Num_OpClasses) 260 .name(name() + ".ISSUE:FU_type") 261 .desc("Type of FU issued") 262 .flags(total | pdf | dist) 263 ; 264 statIssuedInstType.ysubnames(opClassStrings); 265 266 // 267 // How long did instructions for a particular FU type wait prior to issue 268 // 269 270 issueDelayDist 271 .init(Num_OpClasses,0,99,2) 272 .name(name() + ".ISSUE:") 273 .desc("cycles from operands ready to issue") 274 .flags(pdf | cdf) 275 ; 276 277 for (int i=0; i<Num_OpClasses; ++i) { 278 stringstream subname; 279 subname << opClassStrings[i] << "_delay"; 280 issueDelayDist.subname(i, subname.str()); 281 } 282 283 issueRate 284 .name(name() + ".ISSUE:rate") 285 .desc("Inst issue rate") 286 .flags(total) 287 ; 288 issueRate = iqInstsIssued / cpu->numCycles; 289/* 290 issue_stores 291 .name(name() + ".ISSUE:stores") 292 .desc("Number of stores issued") 293 .flags(total) 294 ; 295 issue_stores = exe_refs - exe_loads; 296*/ 297/* 298 issue_op_rate 299 .name(name() + ".ISSUE:op_rate") 300 .desc("Operation issue rate") 301 .flags(total) 302 ; 303 issue_op_rate = issued_ops / numCycles; 304*/ 305 statFuBusy 306 .init(Num_OpClasses) 307 .name(name() + ".ISSUE:fu_full") 308 .desc("attempts to use FU when none available") 309 .flags(pdf | dist) 310 ; 311 for (int i=0; i < Num_OpClasses; ++i) { 312 statFuBusy.subname(i, opClassStrings[i]); 313 } 314 315 fuBusy 316 .init(numThreads) 317 .name(name() + ".ISSUE:fu_busy_cnt") 318 .desc("FU busy when requested") 319 .flags(total) 320 ; 321 322 fuBusyRate 323 .name(name() + ".ISSUE:fu_busy_rate") 324 .desc("FU busy rate (busy events/executed inst)") 325 .flags(total) 326 ; 327 fuBusyRate = fuBusy / iqInstsIssued; 328 329 for ( int i=0; i < numThreads; i++) { 330 // Tell mem dependence unit to reg stats as well. 331 memDepUnit[i].regStats(); 332 } 333} 334 335template <class Impl> 336void 337InstructionQueue<Impl>::resetState() 338{ 339 //Initialize thread IQ counts 340 for (int i = 0; i <numThreads; i++) { 341 count[i] = 0; 342 instList[i].clear(); 343 } 344 345 // Initialize the number of free IQ entries. 346 freeEntries = numEntries; 347 348 // Note that in actuality, the registers corresponding to the logical 349 // registers start off as ready. However this doesn't matter for the 350 // IQ as the instruction should have been correctly told if those 351 // registers are ready in rename. Thus it can all be initialized as 352 // unready. 353 for (int i = 0; i < numPhysRegs; ++i) { 354 regScoreboard[i] = false; 355 } 356 357 for (int i = 0; i < numThreads; ++i) { 358 squashedSeqNum[i] = 0; 359 } 360 361 for (int i = 0; i < Num_OpClasses; ++i) { 362 while (!readyInsts[i].empty()) 363 readyInsts[i].pop(); 364 queueOnList[i] = false; 365 readyIt[i] = listOrder.end(); 366 } 367 nonSpecInsts.clear(); 368 listOrder.clear(); 369} 370 371template <class Impl> 372void 373InstructionQueue<Impl>::setActiveThreads(list<unsigned> *at_ptr) 374{ 375 DPRINTF(IQ, "Setting active threads list pointer.\n"); 376 activeThreads = at_ptr; 377} 378 379template <class Impl> 380void 381InstructionQueue<Impl>::setIssueToExecuteQueue(TimeBuffer<IssueStruct> *i2e_ptr) 382{ 383 DPRINTF(IQ, "Set the issue to execute queue.\n"); 384 issueToExecuteQueue = i2e_ptr; 385} 386 387template <class Impl> 388void 389InstructionQueue<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr) 390{ 391 DPRINTF(IQ, "Set the time buffer.\n"); 392 timeBuffer = tb_ptr; 393 394 fromCommit = timeBuffer->getWire(-commitToIEWDelay); 395} 396 397template <class Impl> 398void 399InstructionQueue<Impl>::switchOut() 400{ 401 resetState(); 402 dependGraph.reset(); 403 switchedOut = true; 404 for (int i = 0; i < numThreads; ++i) { 405 memDepUnit[i].switchOut(); 406 } 407} 408 409template <class Impl> 410void 411InstructionQueue<Impl>::takeOverFrom() 412{ 413 switchedOut = false; 414} 415 416template <class Impl> 417int 418InstructionQueue<Impl>::entryAmount(int num_threads) 419{ 420 if (iqPolicy == Partitioned) { 421 return numEntries / num_threads; 422 } else { 423 return 0; 424 } 425} 426 427 428template <class Impl> 429void 430InstructionQueue<Impl>::resetEntries() 431{ 432 if (iqPolicy != Dynamic || numThreads > 1) { 433 int active_threads = (*activeThreads).size(); 434 435 list<unsigned>::iterator threads = (*activeThreads).begin(); 436 list<unsigned>::iterator list_end = (*activeThreads).end(); 437 438 while (threads != list_end) { 439 if (iqPolicy == Partitioned) { 440 maxEntries[*threads++] = numEntries / active_threads; 441 } else if(iqPolicy == Threshold && active_threads == 1) { 442 maxEntries[*threads++] = numEntries; 443 } 444 } 445 } 446} 447 448template <class Impl> 449unsigned 450InstructionQueue<Impl>::numFreeEntries() 451{ 452 return freeEntries; 453} 454 455template <class Impl> 456unsigned 457InstructionQueue<Impl>::numFreeEntries(unsigned tid) 458{ 459 return maxEntries[tid] - count[tid]; 460} 461 462// Might want to do something more complex if it knows how many instructions 463// will be issued this cycle. 464template <class Impl> 465bool 466InstructionQueue<Impl>::isFull() 467{ 468 if (freeEntries == 0) { 469 return(true); 470 } else { 471 return(false); 472 } 473} 474 475template <class Impl> 476bool 477InstructionQueue<Impl>::isFull(unsigned tid) 478{ 479 if (numFreeEntries(tid) == 0) { 480 return(true); 481 } else { 482 return(false); 483 } 484} 485 486template <class Impl> 487bool 488InstructionQueue<Impl>::hasReadyInsts() 489{ 490 if (!listOrder.empty()) { 491 return true; 492 } 493 494 for (int i = 0; i < Num_OpClasses; ++i) { 495 if (!readyInsts[i].empty()) { 496 return true; 497 } 498 } 499 500 return false; 501} 502 503template <class Impl> 504void 505InstructionQueue<Impl>::insert(DynInstPtr &new_inst) 506{ 507 // Make sure the instruction is valid 508 assert(new_inst); 509 510 DPRINTF(IQ, "Adding instruction [sn:%lli] PC %#x to the IQ.\n", 511 new_inst->seqNum, new_inst->readPC()); 512 513 assert(freeEntries != 0); 514 515 instList[new_inst->threadNumber].push_back(new_inst); 516 517 --freeEntries; 518 519 new_inst->setInIQ(); 520 521 // Look through its source registers (physical regs), and mark any 522 // dependencies. 523 addToDependents(new_inst); 524 525 // Have this instruction set itself as the producer of its destination 526 // register(s). 527 addToProducers(new_inst); 528 529 if (new_inst->isMemRef()) { 530 memDepUnit[new_inst->threadNumber].insert(new_inst); 531 } else { 532 addIfReady(new_inst); 533 } 534 535 ++iqInstsAdded; 536 537 count[new_inst->threadNumber]++; 538 539 assert(freeEntries == (numEntries - countInsts())); 540} 541 542template <class Impl> 543void 544InstructionQueue<Impl>::insertNonSpec(DynInstPtr &new_inst) 545{ 546 // @todo: Clean up this code; can do it by setting inst as unable 547 // to issue, then calling normal insert on the inst. 548 549 assert(new_inst); 550 551 nonSpecInsts[new_inst->seqNum] = new_inst; 552 553 DPRINTF(IQ, "Adding non-speculative instruction [sn:%lli] PC %#x " 554 "to the IQ.\n", 555 new_inst->seqNum, new_inst->readPC()); 556 557 assert(freeEntries != 0); 558 559 instList[new_inst->threadNumber].push_back(new_inst); 560 561 --freeEntries; 562 563 new_inst->setInIQ(); 564 565 // Have this instruction set itself as the producer of its destination 566 // register(s). 567 addToProducers(new_inst); 568 569 // If it's a memory instruction, add it to the memory dependency 570 // unit. 571 if (new_inst->isMemRef()) { 572 memDepUnit[new_inst->threadNumber].insertNonSpec(new_inst); 573 } 574 575 ++iqNonSpecInstsAdded; 576 577 count[new_inst->threadNumber]++; 578 579 assert(freeEntries == (numEntries - countInsts())); 580} 581 582template <class Impl> 583void 584InstructionQueue<Impl>::insertBarrier(DynInstPtr &barr_inst) 585{ 586 memDepUnit[barr_inst->threadNumber].insertBarrier(barr_inst); 587 588 insertNonSpec(barr_inst); 589} 590 591template <class Impl> 592typename Impl::DynInstPtr 593InstructionQueue<Impl>::getInstToExecute() 594{ 595 assert(!instsToExecute.empty()); 596 DynInstPtr inst = instsToExecute.front(); 597 instsToExecute.pop_front(); 598 return inst; 599} 600 601template <class Impl> 602void 603InstructionQueue<Impl>::addToOrderList(OpClass op_class) 604{ 605 assert(!readyInsts[op_class].empty()); 606 607 ListOrderEntry queue_entry; 608 609 queue_entry.queueType = op_class; 610 611 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum; 612 613 ListOrderIt list_it = listOrder.begin(); 614 ListOrderIt list_end_it = listOrder.end(); 615 616 while (list_it != list_end_it) { 617 if ((*list_it).oldestInst > queue_entry.oldestInst) { 618 break; 619 } 620 621 list_it++; 622 } 623 624 readyIt[op_class] = listOrder.insert(list_it, queue_entry); 625 queueOnList[op_class] = true; 626} 627 628template <class Impl> 629void 630InstructionQueue<Impl>::moveToYoungerInst(ListOrderIt list_order_it) 631{ 632 // Get iterator of next item on the list 633 // Delete the original iterator 634 // Determine if the next item is either the end of the list or younger 635 // than the new instruction. If so, then add in a new iterator right here. 636 // If not, then move along. 637 ListOrderEntry queue_entry; 638 OpClass op_class = (*list_order_it).queueType; 639 ListOrderIt next_it = list_order_it; 640 641 ++next_it; 642 643 queue_entry.queueType = op_class; 644 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum; 645 646 while (next_it != listOrder.end() && 647 (*next_it).oldestInst < queue_entry.oldestInst) { 648 ++next_it; 649 } 650 651 readyIt[op_class] = listOrder.insert(next_it, queue_entry); 652} 653 654template <class Impl> 655void 656InstructionQueue<Impl>::processFUCompletion(DynInstPtr &inst, int fu_idx) 657{ 658 // The CPU could have been sleeping until this op completed (*extremely* 659 // long latency op). Wake it if it was. This may be overkill. 660 if (isSwitchedOut()) { 661 return; 662 } 663 664 iewStage->wakeCPU(); 665 666 if (fu_idx > -1) 667 fuPool->freeUnitNextCycle(fu_idx); 668 669 // @todo: Ensure that these FU Completions happen at the beginning 670 // of a cycle, otherwise they could add too many instructions to 671 // the queue. 672 // @todo: This could break if there's multiple multi-cycle ops 673 // finishing on this cycle. Maybe implement something like 674 // instToCommit in iew_impl.hh. 675 issueToExecuteQueue->access(0)->size++; 676 instsToExecute.push_back(inst); 677// int &size = issueToExecuteQueue->access(0)->size; 678 679// issueToExecuteQueue->access(0)->insts[size++] = inst; 680} 681 682// @todo: Figure out a better way to remove the squashed items from the 683// lists. Checking the top item of each list to see if it's squashed 684// wastes time and forces jumps. 685template <class Impl> 686void 687InstructionQueue<Impl>::scheduleReadyInsts() 688{ 689 DPRINTF(IQ, "Attempting to schedule ready instructions from " 690 "the IQ.\n"); 691 692 IssueStruct *i2e_info = issueToExecuteQueue->access(0); 693 694 // Have iterator to head of the list 695 // While I haven't exceeded bandwidth or reached the end of the list, 696 // Try to get a FU that can do what this op needs. 697 // If successful, change the oldestInst to the new top of the list, put 698 // the queue in the proper place in the list. 699 // Increment the iterator. 700 // This will avoid trying to schedule a certain op class if there are no 701 // FUs that handle it. 702 ListOrderIt order_it = listOrder.begin(); 703 ListOrderIt order_end_it = listOrder.end(); 704 int total_issued = 0; 705 706 while (total_issued < totalWidth && 707 order_it != order_end_it) { 708 OpClass op_class = (*order_it).queueType; 709 710 assert(!readyInsts[op_class].empty()); 711 712 DynInstPtr issuing_inst = readyInsts[op_class].top(); 713 714 assert(issuing_inst->seqNum == (*order_it).oldestInst); 715 716 if (issuing_inst->isSquashed()) { 717 readyInsts[op_class].pop(); 718 719 if (!readyInsts[op_class].empty()) { 720 moveToYoungerInst(order_it); 721 } else { 722 readyIt[op_class] = listOrder.end(); 723 queueOnList[op_class] = false; 724 } 725 726 listOrder.erase(order_it++); 727 728 ++iqSquashedInstsIssued; 729 730 continue; 731 } 732 733 int idx = -2; 734 int op_latency = 1; 735 int tid = issuing_inst->threadNumber; 736 737 if (op_class != No_OpClass) { 738 idx = fuPool->getUnit(op_class); 739 740 if (idx > -1) { 741 op_latency = fuPool->getOpLatency(op_class); 742 } 743 } 744 745 if (idx == -2 || idx != -1) { 746 if (op_latency == 1) { 747// i2e_info->insts[exec_queue_slot++] = issuing_inst; 748 i2e_info->size++; 749 instsToExecute.push_back(issuing_inst); 750 751 // Add the FU onto the list of FU's to be freed next 752 // cycle if we used one. 753 if (idx >= 0) 754 fuPool->freeUnitNextCycle(idx); 755 } else { 756 int issue_latency = fuPool->getIssueLatency(op_class); 757 // Generate completion event for the FU 758 FUCompletion *execution = new FUCompletion(issuing_inst, 759 idx, this); 760 761 execution->schedule(curTick + cpu->cycles(issue_latency - 1)); 762 763 // @todo: Enforce that issue_latency == 1 or op_latency 764 if (issue_latency > 1) { 765 execution->setFreeFU(); 766 } else { 767 // @todo: Not sure I'm accounting for the 768 // multi-cycle op in a pipelined FU properly, or 769 // the number of instructions issued in one cycle. 770// i2e_info->insts[exec_queue_slot++] = issuing_inst; 771// i2e_info->size++; 772 773 // Add the FU onto the list of FU's to be freed next cycle. 774 fuPool->freeUnitNextCycle(idx); 775 } 776 } 777 778 DPRINTF(IQ, "Thread %i: Issuing instruction PC %#x " 779 "[sn:%lli]\n", 780 tid, issuing_inst->readPC(), 781 issuing_inst->seqNum); 782 783 readyInsts[op_class].pop(); 784 785 if (!readyInsts[op_class].empty()) { 786 moveToYoungerInst(order_it); 787 } else { 788 readyIt[op_class] = listOrder.end(); 789 queueOnList[op_class] = false; 790 } 791 792 issuing_inst->setIssued(); 793 ++total_issued; 794 795 if (!issuing_inst->isMemRef()) { 796 // Memory instructions can not be freed from the IQ until they 797 // complete. 798 ++freeEntries; 799 count[tid]--; 800 issuing_inst->removeInIQ(); 801 } else { 802 memDepUnit[tid].issue(issuing_inst); 803 } 804 805 listOrder.erase(order_it++); 806 statIssuedInstType[tid][op_class]++; 807 } else { 808 statFuBusy[op_class]++; 809 fuBusy[tid]++; 810 ++order_it; 811 } 812 } 813 814 numIssuedDist.sample(total_issued); 815 iqInstsIssued+= total_issued; 816 817 if (total_issued) { 818 cpu->activityThisCycle(); 819 } else { 820 DPRINTF(IQ, "Not able to schedule any instructions.\n"); 821 } 822} 823 824template <class Impl> 825void 826InstructionQueue<Impl>::scheduleNonSpec(const InstSeqNum &inst) 827{ 828 DPRINTF(IQ, "Marking nonspeculative instruction [sn:%lli] as ready " 829 "to execute.\n", inst); 830 831 NonSpecMapIt inst_it = nonSpecInsts.find(inst); 832 833 assert(inst_it != nonSpecInsts.end()); 834 835 unsigned tid = (*inst_it).second->threadNumber; 836 837 (*inst_it).second->setCanIssue(); 838 839 if (!(*inst_it).second->isMemRef()) { 840 addIfReady((*inst_it).second); 841 } else { 842 memDepUnit[tid].nonSpecInstReady((*inst_it).second); 843 } 844 845 (*inst_it).second = NULL; 846 847 nonSpecInsts.erase(inst_it); 848} 849 850template <class Impl> 851void 852InstructionQueue<Impl>::commit(const InstSeqNum &inst, unsigned tid) 853{ 854 DPRINTF(IQ, "[tid:%i]: Committing instructions older than [sn:%i]\n", 855 tid,inst); 856 857 ListIt iq_it = instList[tid].begin(); 858 859 while (iq_it != instList[tid].end() && 860 (*iq_it)->seqNum <= inst) { 861 ++iq_it; 862 instList[tid].pop_front(); 863 } 864 865 assert(freeEntries == (numEntries - countInsts())); 866} 867 868template <class Impl> 869int 870InstructionQueue<Impl>::wakeDependents(DynInstPtr &completed_inst) 871{ 872 int dependents = 0; 873 874 DPRINTF(IQ, "Waking dependents of completed instruction.\n"); 875 876 assert(!completed_inst->isSquashed()); 877 878 // Tell the memory dependence unit to wake any dependents on this 879 // instruction if it is a memory instruction. Also complete the memory 880 // instruction at this point since we know it executed without issues. 881 // @todo: Might want to rename "completeMemInst" to something that 882 // indicates that it won't need to be replayed, and call this 883 // earlier. Might not be a big deal. 884 if (completed_inst->isMemRef()) { 885 memDepUnit[completed_inst->threadNumber].wakeDependents(completed_inst); 886 completeMemInst(completed_inst); 887 } else if (completed_inst->isMemBarrier() || 888 completed_inst->isWriteBarrier()) { 889 memDepUnit[completed_inst->threadNumber].completeBarrier(completed_inst); 890 } 891 892 for (int dest_reg_idx = 0; 893 dest_reg_idx < completed_inst->numDestRegs(); 894 dest_reg_idx++) 895 { 896 PhysRegIndex dest_reg = 897 completed_inst->renamedDestRegIdx(dest_reg_idx); 898 899 // Special case of uniq or control registers. They are not 900 // handled by the IQ and thus have no dependency graph entry. 901 // @todo Figure out a cleaner way to handle this. 902 if (dest_reg >= numPhysRegs) { 903 continue; 904 } 905 906 DPRINTF(IQ, "Waking any dependents on register %i.\n", 907 (int) dest_reg); 908 909 //Go through the dependency chain, marking the registers as 910 //ready within the waiting instructions. 911 DynInstPtr dep_inst = dependGraph.pop(dest_reg); 912 913 while (dep_inst) { 914 DPRINTF(IQ, "Waking up a dependent instruction, PC%#x.\n", 915 dep_inst->readPC()); 916 917 // Might want to give more information to the instruction 918 // so that it knows which of its source registers is 919 // ready. However that would mean that the dependency 920 // graph entries would need to hold the src_reg_idx. 921 dep_inst->markSrcRegReady(); 922 923 addIfReady(dep_inst); 924 925 dep_inst = dependGraph.pop(dest_reg); 926 927 ++dependents; 928 } 929 930 // Reset the head node now that all of its dependents have 931 // been woken up. 932 assert(dependGraph.empty(dest_reg)); 933 dependGraph.clearInst(dest_reg); 934 935 // Mark the scoreboard as having that register ready. 936 regScoreboard[dest_reg] = true; 937 } 938 return dependents; 939} 940 941template <class Impl> 942void 943InstructionQueue<Impl>::addReadyMemInst(DynInstPtr &ready_inst) 944{ 945 OpClass op_class = ready_inst->opClass(); 946 947 readyInsts[op_class].push(ready_inst); 948 949 // Will need to reorder the list if either a queue is not on the list, 950 // or it has an older instruction than last time. 951 if (!queueOnList[op_class]) { 952 addToOrderList(op_class); 953 } else if (readyInsts[op_class].top()->seqNum < 954 (*readyIt[op_class]).oldestInst) { 955 listOrder.erase(readyIt[op_class]); 956 addToOrderList(op_class); 957 } 958 959 DPRINTF(IQ, "Instruction is ready to issue, putting it onto " 960 "the ready list, PC %#x opclass:%i [sn:%lli].\n", 961 ready_inst->readPC(), op_class, ready_inst->seqNum); 962} 963 964template <class Impl> 965void 966InstructionQueue<Impl>::rescheduleMemInst(DynInstPtr &resched_inst) 967{ 968 memDepUnit[resched_inst->threadNumber].reschedule(resched_inst); 969} 970 971template <class Impl> 972void 973InstructionQueue<Impl>::replayMemInst(DynInstPtr &replay_inst) 974{ 975 memDepUnit[replay_inst->threadNumber].replay(replay_inst); 976} 977 978template <class Impl> 979void 980InstructionQueue<Impl>::completeMemInst(DynInstPtr &completed_inst) 981{ 982 int tid = completed_inst->threadNumber; 983 984 DPRINTF(IQ, "Completing mem instruction PC:%#x [sn:%lli]\n", 985 completed_inst->readPC(), completed_inst->seqNum); 986 987 ++freeEntries; 988 989 completed_inst->memOpDone = true; 990 991 memDepUnit[tid].completed(completed_inst); 992 993 count[tid]--; 994} 995 996template <class Impl> 997void 998InstructionQueue<Impl>::violation(DynInstPtr &store, 999 DynInstPtr &faulting_load) 1000{ 1001 memDepUnit[store->threadNumber].violation(store, faulting_load); 1002} 1003 1004template <class Impl> 1005void 1006InstructionQueue<Impl>::squash(unsigned tid) 1007{ 1008 DPRINTF(IQ, "[tid:%i]: Starting to squash instructions in " 1009 "the IQ.\n", tid); 1010 1011 // Read instruction sequence number of last instruction out of the 1012 // time buffer. 1013 squashedSeqNum[tid] = fromCommit->commitInfo[tid].doneSeqNum; 1014 1015 // Call doSquash if there are insts in the IQ 1016 if (count[tid] > 0) { 1017 doSquash(tid); 1018 } 1019 1020 // Also tell the memory dependence unit to squash. 1021 memDepUnit[tid].squash(squashedSeqNum[tid], tid); 1022} 1023 1024template <class Impl> 1025void 1026InstructionQueue<Impl>::doSquash(unsigned tid) 1027{ 1028 // Start at the tail. 1029 ListIt squash_it = instList[tid].end(); 1030 --squash_it; 1031 1032 DPRINTF(IQ, "[tid:%i]: Squashing until sequence number %i!\n", 1033 tid, squashedSeqNum[tid]); 1034 1035 // Squash any instructions younger than the squashed sequence number 1036 // given. 1037 while (squash_it != instList[tid].end() && 1038 (*squash_it)->seqNum > squashedSeqNum[tid]) { 1039 1040 DynInstPtr squashed_inst = (*squash_it); 1041 1042 // Only handle the instruction if it actually is in the IQ and 1043 // hasn't already been squashed in the IQ. 1044 if (squashed_inst->threadNumber != tid || 1045 squashed_inst->isSquashedInIQ()) { 1046 --squash_it; 1047 continue; 1048 } 1049 1050 if (!squashed_inst->isIssued() || 1051 (squashed_inst->isMemRef() && 1052 !squashed_inst->memOpDone)) { 1053 1054 // Remove the instruction from the dependency list. 1055 if (!squashed_inst->isNonSpeculative() && 1056 !squashed_inst->isMemBarrier() && 1057 !squashed_inst->isWriteBarrier()) { 1058 1059 for (int src_reg_idx = 0; 1060 src_reg_idx < squashed_inst->numSrcRegs(); 1061 src_reg_idx++) 1062 { 1063 PhysRegIndex src_reg = 1064 squashed_inst->renamedSrcRegIdx(src_reg_idx); 1065 1066 // Only remove it from the dependency graph if it 1067 // was placed there in the first place. 1068 1069 // Instead of doing a linked list traversal, we 1070 // can just remove these squashed instructions 1071 // either at issue time, or when the register is 1072 // overwritten. The only downside to this is it 1073 // leaves more room for error. 1074 1075 if (!squashed_inst->isReadySrcRegIdx(src_reg_idx) && 1076 src_reg < numPhysRegs) { 1077 dependGraph.remove(src_reg, squashed_inst); 1078 } 1079 1080 1081 ++iqSquashedOperandsExamined; 1082 } 1083 } else { 1084 NonSpecMapIt ns_inst_it = 1085 nonSpecInsts.find(squashed_inst->seqNum); 1086 assert(ns_inst_it != nonSpecInsts.end()); 1087 1088 (*ns_inst_it).second = NULL; 1089 1090 nonSpecInsts.erase(ns_inst_it); 1091 1092 ++iqSquashedNonSpecRemoved; 1093 } 1094 1095 // Might want to also clear out the head of the dependency graph. 1096 1097 // Mark it as squashed within the IQ. 1098 squashed_inst->setSquashedInIQ(); 1099 1100 // @todo: Remove this hack where several statuses are set so the 1101 // inst will flow through the rest of the pipeline. 1102 squashed_inst->setIssued(); 1103 squashed_inst->setCanCommit(); 1104 squashed_inst->removeInIQ(); 1105 1106 //Update Thread IQ Count 1107 count[squashed_inst->threadNumber]--; 1108 1109 ++freeEntries; 1110 1111 DPRINTF(IQ, "[tid:%i]: Instruction [sn:%lli] PC %#x " 1112 "squashed.\n", 1113 tid, squashed_inst->seqNum, squashed_inst->readPC()); 1114 } 1115 1116 instList[tid].erase(squash_it--); 1117 ++iqSquashedInstsExamined; 1118 } 1119} 1120 1121template <class Impl> 1122bool 1123InstructionQueue<Impl>::addToDependents(DynInstPtr &new_inst) 1124{ 1125 // Loop through the instruction's source registers, adding 1126 // them to the dependency list if they are not ready. 1127 int8_t total_src_regs = new_inst->numSrcRegs(); 1128 bool return_val = false; 1129 1130 for (int src_reg_idx = 0; 1131 src_reg_idx < total_src_regs; 1132 src_reg_idx++) 1133 { 1134 // Only add it to the dependency graph if it's not ready. 1135 if (!new_inst->isReadySrcRegIdx(src_reg_idx)) { 1136 PhysRegIndex src_reg = new_inst->renamedSrcRegIdx(src_reg_idx); 1137 1138 // Check the IQ's scoreboard to make sure the register 1139 // hasn't become ready while the instruction was in flight 1140 // between stages. Only if it really isn't ready should 1141 // it be added to the dependency graph. 1142 if (src_reg >= numPhysRegs) { 1143 continue; 1144 } else if (regScoreboard[src_reg] == false) { 1145 DPRINTF(IQ, "Instruction PC %#x has src reg %i that " 1146 "is being added to the dependency chain.\n", 1147 new_inst->readPC(), src_reg); 1148 1149 dependGraph.insert(src_reg, new_inst); 1150 1151 // Change the return value to indicate that something 1152 // was added to the dependency graph. 1153 return_val = true; 1154 } else { 1155 DPRINTF(IQ, "Instruction PC %#x has src reg %i that " 1156 "became ready before it reached the IQ.\n", 1157 new_inst->readPC(), src_reg); 1158 // Mark a register ready within the instruction. 1159 new_inst->markSrcRegReady(src_reg_idx); 1160 } 1161 } 1162 } 1163 1164 return return_val; 1165} 1166 1167template <class Impl> 1168void 1169InstructionQueue<Impl>::addToProducers(DynInstPtr &new_inst) 1170{ 1171 // Nothing really needs to be marked when an instruction becomes 1172 // the producer of a register's value, but for convenience a ptr 1173 // to the producing instruction will be placed in the head node of 1174 // the dependency links. 1175 int8_t total_dest_regs = new_inst->numDestRegs(); 1176 1177 for (int dest_reg_idx = 0; 1178 dest_reg_idx < total_dest_regs; 1179 dest_reg_idx++) 1180 { 1181 PhysRegIndex dest_reg = new_inst->renamedDestRegIdx(dest_reg_idx); 1182 1183 // Instructions that use the misc regs will have a reg number 1184 // higher than the normal physical registers. In this case these 1185 // registers are not renamed, and there is no need to track 1186 // dependencies as these instructions must be executed at commit. 1187 if (dest_reg >= numPhysRegs) { 1188 continue; 1189 } 1190 1191 if (!dependGraph.empty(dest_reg)) { 1192 dependGraph.dump(); 1193 panic("Dependency graph %i not empty!", dest_reg); 1194 } 1195 1196 dependGraph.setInst(dest_reg, new_inst); 1197 1198 // Mark the scoreboard to say it's not yet ready. 1199 regScoreboard[dest_reg] = false; 1200 } 1201} 1202 1203template <class Impl> 1204void 1205InstructionQueue<Impl>::addIfReady(DynInstPtr &inst) 1206{ 1207 // If the instruction now has all of its source registers 1208 // available, then add it to the list of ready instructions. 1209 if (inst->readyToIssue()) { 1210 1211 //Add the instruction to the proper ready list. 1212 if (inst->isMemRef()) { 1213 1214 DPRINTF(IQ, "Checking if memory instruction can issue.\n"); 1215 1216 // Message to the mem dependence unit that this instruction has 1217 // its registers ready. 1218 memDepUnit[inst->threadNumber].regsReady(inst); 1219 1220 return; 1221 } 1222 1223 OpClass op_class = inst->opClass(); 1224 1225 DPRINTF(IQ, "Instruction is ready to issue, putting it onto " 1226 "the ready list, PC %#x opclass:%i [sn:%lli].\n", 1227 inst->readPC(), op_class, inst->seqNum); 1228 1229 readyInsts[op_class].push(inst); 1230 1231 // Will need to reorder the list if either a queue is not on the list, 1232 // or it has an older instruction than last time. 1233 if (!queueOnList[op_class]) { 1234 addToOrderList(op_class); 1235 } else if (readyInsts[op_class].top()->seqNum < 1236 (*readyIt[op_class]).oldestInst) { 1237 listOrder.erase(readyIt[op_class]); 1238 addToOrderList(op_class); 1239 } 1240 } 1241} 1242 1243template <class Impl> 1244int 1245InstructionQueue<Impl>::countInsts() 1246{ 1247 //ksewell:This works but definitely could use a cleaner write 1248 //with a more intuitive way of counting. Right now it's 1249 //just brute force .... 1250 1251#if 0 1252 int total_insts = 0; 1253 1254 for (int i = 0; i < numThreads; ++i) { 1255 ListIt count_it = instList[i].begin(); 1256 1257 while (count_it != instList[i].end()) { 1258 if (!(*count_it)->isSquashed() && !(*count_it)->isSquashedInIQ()) { 1259 if (!(*count_it)->isIssued()) { 1260 ++total_insts; 1261 } else if ((*count_it)->isMemRef() && 1262 !(*count_it)->memOpDone) { 1263 // Loads that have not been marked as executed still count 1264 // towards the total instructions. 1265 ++total_insts; 1266 } 1267 } 1268 1269 ++count_it; 1270 } 1271 } 1272 1273 return total_insts; 1274#else 1275 return numEntries - freeEntries; 1276#endif 1277} 1278 1279template <class Impl> 1280void 1281InstructionQueue<Impl>::dumpLists() 1282{ 1283 for (int i = 0; i < Num_OpClasses; ++i) { 1284 cprintf("Ready list %i size: %i\n", i, readyInsts[i].size()); 1285 1286 cprintf("\n"); 1287 } 1288 1289 cprintf("Non speculative list size: %i\n", nonSpecInsts.size()); 1290 1291 NonSpecMapIt non_spec_it = nonSpecInsts.begin(); 1292 NonSpecMapIt non_spec_end_it = nonSpecInsts.end(); 1293 1294 cprintf("Non speculative list: "); 1295 1296 while (non_spec_it != non_spec_end_it) { 1297 cprintf("%#x [sn:%lli]", (*non_spec_it).second->readPC(), 1298 (*non_spec_it).second->seqNum); 1299 ++non_spec_it; 1300 } 1301 1302 cprintf("\n"); 1303 1304 ListOrderIt list_order_it = listOrder.begin(); 1305 ListOrderIt list_order_end_it = listOrder.end(); 1306 int i = 1; 1307 1308 cprintf("List order: "); 1309 1310 while (list_order_it != list_order_end_it) { 1311 cprintf("%i OpClass:%i [sn:%lli] ", i, (*list_order_it).queueType, 1312 (*list_order_it).oldestInst); 1313 1314 ++list_order_it; 1315 ++i; 1316 } 1317 1318 cprintf("\n"); 1319} 1320 1321 1322template <class Impl> 1323void 1324InstructionQueue<Impl>::dumpInsts() 1325{ 1326 for (int i = 0; i < numThreads; ++i) { 1327 int num = 0; 1328 int valid_num = 0; 1329 ListIt inst_list_it = instList[i].begin(); 1330 1331 while (inst_list_it != instList[i].end()) 1332 { 1333 cprintf("Instruction:%i\n", 1334 num); 1335 if (!(*inst_list_it)->isSquashed()) { 1336 if (!(*inst_list_it)->isIssued()) { 1337 ++valid_num; 1338 cprintf("Count:%i\n", valid_num); 1339 } else if ((*inst_list_it)->isMemRef() && 1340 !(*inst_list_it)->memOpDone) { 1341 // Loads that have not been marked as executed 1342 // still count towards the total instructions. 1343 ++valid_num; 1344 cprintf("Count:%i\n", valid_num); 1345 } 1346 } 1347 1348 cprintf("PC:%#x\n[sn:%lli]\n[tid:%i]\n" 1349 "Issued:%i\nSquashed:%i\n", 1350 (*inst_list_it)->readPC(), 1351 (*inst_list_it)->seqNum, 1352 (*inst_list_it)->threadNumber, 1353 (*inst_list_it)->isIssued(), 1354 (*inst_list_it)->isSquashed()); 1355 1356 if ((*inst_list_it)->isMemRef()) { 1357 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone); 1358 } 1359 1360 cprintf("\n"); 1361 1362 inst_list_it++; 1363 ++num; 1364 } 1365 } 1366} 1367