46#include "cpu/minor/pipeline.hh" 47#include "debug/Drain.hh" 48#include "debug/Fetch.hh" 49#include "debug/MinorTrace.hh" 50 51namespace Minor 52{ 53 54Fetch1::Fetch1(const std::string &name_, 55 MinorCPU &cpu_, 56 MinorCPUParams ¶ms, 57 Latch<BranchData>::Output inp_, 58 Latch<ForwardLineData>::Input out_, 59 Latch<BranchData>::Output prediction_, 60 std::vector<InputBuffer<ForwardLineData>> &next_stage_input_buffer) : 61 Named(name_), 62 cpu(cpu_), 63 inp(inp_), 64 out(out_), 65 prediction(prediction_), 66 nextStageReserve(next_stage_input_buffer), 67 icachePort(name_ + ".icache_port", *this, cpu_), 68 lineSnap(params.fetch1LineSnapWidth), 69 maxLineWidth(params.fetch1LineWidth), 70 fetchLimit(params.fetch1FetchLimit), 71 fetchInfo(params.numThreads), 72 threadPriority(0), 73 requests(name_ + ".requests", "lines", params.fetch1FetchLimit), 74 transfers(name_ + ".transfers", "lines", params.fetch1FetchLimit), 75 icacheState(IcacheRunning), 76 lineSeqNum(InstId::firstLineSeqNum), 77 numFetchesInMemorySystem(0), 78 numFetchesInITLB(0) 79{ 80 if (lineSnap == 0) { 81 lineSnap = cpu.cacheLineSize(); 82 DPRINTF(Fetch, "lineSnap set to cache line size of: %d\n", 83 lineSnap); 84 } 85 86 if (maxLineWidth == 0) { 87 maxLineWidth = cpu.cacheLineSize(); 88 DPRINTF(Fetch, "maxLineWidth set to cache line size of: %d\n", 89 maxLineWidth); 90 } 91 92 /* These assertions should be copied to the Python config. as well */ 93 if ((lineSnap % sizeof(TheISA::MachInst)) != 0) { 94 fatal("%s: fetch1LineSnapWidth must be a multiple " 95 "of sizeof(TheISA::MachInst) (%d)\n", name_, 96 sizeof(TheISA::MachInst)); 97 } 98 99 if (!(maxLineWidth >= lineSnap && 100 (maxLineWidth % sizeof(TheISA::MachInst)) == 0)) 101 { 102 fatal("%s: fetch1LineWidth must be a multiple of" 103 " sizeof(TheISA::MachInst)" 104 " (%d), and >= fetch1LineSnapWidth (%d)\n", 105 name_, sizeof(TheISA::MachInst), lineSnap); 106 } 107 108 if (fetchLimit < 1) { 109 fatal("%s: fetch1FetchLimit must be >= 1 (%d)\n", name_, 110 fetchLimit); 111 } 112} 113 114inline ThreadID 115Fetch1::getScheduledThread() 116{ 117 /* Select thread via policy. */ 118 std::vector<ThreadID> priority_list; 119 120 switch (cpu.threadPolicy) { 121 case Enums::SingleThreaded: 122 priority_list.push_back(0); 123 break; 124 case Enums::RoundRobin: 125 priority_list = cpu.roundRobinPriority(threadPriority); 126 break; 127 case Enums::Random: 128 priority_list = cpu.randomPriority(); 129 break; 130 default: 131 panic("Unknown fetch policy"); 132 } 133 134 for (auto tid : priority_list) { 135 if (cpu.getContext(tid)->status() == ThreadContext::Active && 136 !fetchInfo[tid].blocked && 137 fetchInfo[tid].state == FetchRunning) { 138 threadPriority = tid; 139 return tid; 140 } 141 } 142 143 return InvalidThreadID; 144} 145 146void 147Fetch1::fetchLine(ThreadID tid) 148{ 149 /* Reference the currently used thread state. */ 150 Fetch1ThreadInfo &thread = fetchInfo[tid]; 151 152 /* If line_offset != 0, a request is pushed for the remainder of the 153 * line. */ 154 /* Use a lower, sizeof(MachInst) aligned address for the fetch */ 155 Addr aligned_pc = thread.pc.instAddr() & ~((Addr) lineSnap - 1); 156 unsigned int line_offset = aligned_pc % lineSnap; 157 unsigned int request_size = maxLineWidth - line_offset; 158 159 /* Fill in the line's id */ 160 InstId request_id(tid, 161 thread.streamSeqNum, thread.predictionSeqNum, 162 lineSeqNum); 163 164 FetchRequestPtr request = new FetchRequest(*this, request_id, thread.pc); 165 166 DPRINTF(Fetch, "Inserting fetch into the fetch queue " 167 "%s addr: 0x%x pc: %s line_offset: %d request_size: %d\n", 168 request_id, aligned_pc, thread.pc, line_offset, request_size); 169 170 request->request.setContext(cpu.threads[tid]->getTC()->contextId()); 171 request->request.setVirt(0 /* asid */, 172 aligned_pc, request_size, Request::INST_FETCH, cpu.instMasterId(), 173 /* I've no idea why we need the PC, but give it */ 174 thread.pc.instAddr()); 175 176 DPRINTF(Fetch, "Submitting ITLB request\n"); 177 numFetchesInITLB++; 178 179 request->state = FetchRequest::InTranslation; 180 181 /* Reserve space in the queues upstream of requests for results */ 182 transfers.reserve(); 183 requests.push(request); 184 185 /* Submit the translation request. The response will come 186 * through finish/markDelayed on this request as it bears 187 * the Translation interface */ 188 cpu.threads[request->id.threadId]->itb->translateTiming( 189 &request->request, 190 cpu.getContext(request->id.threadId), 191 request, BaseTLB::Execute); 192 193 lineSeqNum++; 194 195 /* Step the PC for the next line onto the line aligned next address. 196 * Note that as instructions can span lines, this PC is only a 197 * reliable 'new' PC if the next line has a new stream sequence number. */ 198#if THE_ISA == ALPHA_ISA 199 /* Restore the low bits of the PC used as address space flags */ 200 Addr pc_low_bits = thread.pc.instAddr() & 201 ((Addr) (1 << sizeof(TheISA::MachInst)) - 1); 202 203 thread.pc.set(aligned_pc + request_size + pc_low_bits); 204#else 205 thread.pc.set(aligned_pc + request_size); 206#endif 207} 208 209std::ostream & 210operator <<(std::ostream &os, Fetch1::IcacheState state) 211{ 212 switch (state) { 213 case Fetch1::IcacheRunning: 214 os << "IcacheRunning"; 215 break; 216 case Fetch1::IcacheNeedsRetry: 217 os << "IcacheNeedsRetry"; 218 break; 219 default: 220 os << "IcacheState-" << static_cast<int>(state); 221 break; 222 } 223 return os; 224} 225 226void 227Fetch1::FetchRequest::makePacket() 228{ 229 /* Make the necessary packet for a memory transaction */ 230 packet = new Packet(&request, MemCmd::ReadReq); 231 packet->allocate(); 232 233 /* This FetchRequest becomes SenderState to allow the response to be 234 * identified */ 235 packet->pushSenderState(this); 236} 237 238void 239Fetch1::FetchRequest::finish(const Fault &fault_, RequestPtr request_, 240 ThreadContext *tc, BaseTLB::Mode mode) 241{ 242 fault = fault_; 243 244 state = Translated; 245 fetch.handleTLBResponse(this); 246 247 /* Let's try and wake up the processor for the next cycle */ 248 fetch.cpu.wakeupOnEvent(Pipeline::Fetch1StageId); 249} 250 251void 252Fetch1::handleTLBResponse(FetchRequestPtr response) 253{ 254 numFetchesInITLB--; 255 256 if (response->fault != NoFault) { 257 DPRINTF(Fetch, "Fault in address ITLB translation: %s, " 258 "paddr: 0x%x, vaddr: 0x%x\n", 259 response->fault->name(), 260 (response->request.hasPaddr() ? response->request.getPaddr() : 0), 261 response->request.getVaddr()); 262 263 if (DTRACE(MinorTrace)) 264 minorTraceResponseLine(name(), response); 265 } else { 266 DPRINTF(Fetch, "Got ITLB response\n"); 267 } 268 269 response->state = FetchRequest::Translated; 270 271 tryToSendToTransfers(response); 272} 273 274Fetch1::FetchRequest::~FetchRequest() 275{ 276 if (packet) 277 delete packet; 278} 279 280void 281Fetch1::tryToSendToTransfers(FetchRequestPtr request) 282{ 283 if (!requests.empty() && requests.front() != request) { 284 DPRINTF(Fetch, "Fetch not at front of requests queue, can't" 285 " issue to memory\n"); 286 return; 287 } 288 289 if (request->state == FetchRequest::InTranslation) { 290 DPRINTF(Fetch, "Fetch still in translation, not issuing to" 291 " memory\n"); 292 return; 293 } 294 295 if (request->isDiscardable() || request->fault != NoFault) { 296 /* Discarded and faulting requests carry on through transfers 297 * as Complete/packet == NULL */ 298 299 request->state = FetchRequest::Complete; 300 moveFromRequestsToTransfers(request); 301 302 /* Wake up the pipeline next cycle as there will be no event 303 * for this queue->queue transfer */ 304 cpu.wakeupOnEvent(Pipeline::Fetch1StageId); 305 } else if (request->state == FetchRequest::Translated) { 306 if (!request->packet) 307 request->makePacket(); 308 309 /* Ensure that the packet won't delete the request */ 310 assert(request->packet->needsResponse()); 311 312 if (tryToSend(request)) 313 moveFromRequestsToTransfers(request); 314 } else { 315 DPRINTF(Fetch, "Not advancing line fetch\n"); 316 } 317} 318 319void 320Fetch1::moveFromRequestsToTransfers(FetchRequestPtr request) 321{ 322 assert(!requests.empty() && requests.front() == request); 323 324 requests.pop(); 325 transfers.push(request); 326} 327 328bool 329Fetch1::tryToSend(FetchRequestPtr request) 330{ 331 bool ret = false; 332 333 if (icachePort.sendTimingReq(request->packet)) { 334 /* Invalidate the fetch_requests packet so we don't 335 * accidentally fail to deallocate it (or use it!) 336 * later by overwriting it */ 337 request->packet = NULL; 338 request->state = FetchRequest::RequestIssuing; 339 numFetchesInMemorySystem++; 340 341 ret = true; 342 343 DPRINTF(Fetch, "Issued fetch request to memory: %s\n", 344 request->id); 345 } else { 346 /* Needs to be resent, wait for that */ 347 icacheState = IcacheNeedsRetry; 348 349 DPRINTF(Fetch, "Line fetch needs to retry: %s\n", 350 request->id); 351 } 352 353 return ret; 354} 355 356void 357Fetch1::stepQueues() 358{ 359 IcacheState old_icache_state = icacheState; 360 361 switch (icacheState) { 362 case IcacheRunning: 363 /* Move ITLB results on to the memory system */ 364 if (!requests.empty()) { 365 tryToSendToTransfers(requests.front()); 366 } 367 break; 368 case IcacheNeedsRetry: 369 break; 370 } 371 372 if (icacheState != old_icache_state) { 373 DPRINTF(Fetch, "Step in state %s moving to state %s\n", 374 old_icache_state, icacheState); 375 } 376} 377 378void 379Fetch1::popAndDiscard(FetchQueue &queue) 380{ 381 if (!queue.empty()) { 382 delete queue.front(); 383 queue.pop(); 384 } 385} 386 387unsigned int 388Fetch1::numInFlightFetches() 389{ 390 return requests.occupiedSpace() + 391 transfers.occupiedSpace(); 392} 393 394/** Print the appropriate MinorLine line for a fetch response */ 395void 396Fetch1::minorTraceResponseLine(const std::string &name, 397 Fetch1::FetchRequestPtr response) const 398{ 399 Request &request M5_VAR_USED = response->request; 400 401 if (response->packet && response->packet->isError()) { 402 MINORLINE(this, "id=F;%s vaddr=0x%x fault=\"error packet\"\n", 403 response->id, request.getVaddr()); 404 } else if (response->fault != NoFault) { 405 MINORLINE(this, "id=F;%s vaddr=0x%x fault=\"%s\"\n", 406 response->id, request.getVaddr(), response->fault->name()); 407 } else { 408 MINORLINE(this, "id=%s size=%d vaddr=0x%x paddr=0x%x\n", 409 response->id, request.getSize(), 410 request.getVaddr(), request.getPaddr()); 411 } 412} 413 414bool 415Fetch1::recvTimingResp(PacketPtr response) 416{ 417 DPRINTF(Fetch, "recvTimingResp %d\n", numFetchesInMemorySystem); 418 419 /* Only push the response if we didn't change stream? No, all responses 420 * should hit the responses queue. It's the job of 'step' to throw them 421 * away. */ 422 FetchRequestPtr fetch_request = safe_cast<FetchRequestPtr> 423 (response->popSenderState()); 424 425 /* Fixup packet in fetch_request as this may have changed */ 426 assert(!fetch_request->packet); 427 fetch_request->packet = response; 428 429 numFetchesInMemorySystem--; 430 fetch_request->state = FetchRequest::Complete; 431 432 if (DTRACE(MinorTrace)) 433 minorTraceResponseLine(name(), fetch_request); 434 435 if (response->isError()) { 436 DPRINTF(Fetch, "Received error response packet: %s\n", 437 fetch_request->id); 438 } 439 440 /* We go to idle even if there are more things to do on the queues as 441 * it's the job of step to actually step us on to the next transaction */ 442 443 /* Let's try and wake up the processor for the next cycle to move on 444 * queues */ 445 cpu.wakeupOnEvent(Pipeline::Fetch1StageId); 446 447 /* Never busy */ 448 return true; 449} 450 451void 452Fetch1::recvReqRetry() 453{ 454 DPRINTF(Fetch, "recvRetry\n"); 455 assert(icacheState == IcacheNeedsRetry); 456 assert(!requests.empty()); 457 458 FetchRequestPtr retryRequest = requests.front(); 459 460 icacheState = IcacheRunning; 461 462 if (tryToSend(retryRequest)) 463 moveFromRequestsToTransfers(retryRequest); 464} 465 466std::ostream & 467operator <<(std::ostream &os, Fetch1::FetchState state) 468{ 469 switch (state) { 470 case Fetch1::FetchHalted: 471 os << "FetchHalted"; 472 break; 473 case Fetch1::FetchWaitingForPC: 474 os << "FetchWaitingForPC"; 475 break; 476 case Fetch1::FetchRunning: 477 os << "FetchRunning"; 478 break; 479 default: 480 os << "FetchState-" << static_cast<int>(state); 481 break; 482 } 483 return os; 484} 485 486void 487Fetch1::changeStream(const BranchData &branch) 488{ 489 Fetch1ThreadInfo &thread = fetchInfo[branch.threadId]; 490 491 updateExpectedSeqNums(branch); 492 493 /* Start fetching again if we were stopped */ 494 switch (branch.reason) { 495 case BranchData::SuspendThread: 496 { 497 if (thread.wakeupGuard) { 498 DPRINTF(Fetch, "Not suspending fetch due to guard: %s\n", 499 branch); 500 } else { 501 DPRINTF(Fetch, "Suspending fetch: %s\n", branch); 502 thread.state = FetchWaitingForPC; 503 } 504 } 505 break; 506 case BranchData::HaltFetch: 507 DPRINTF(Fetch, "Halting fetch\n"); 508 thread.state = FetchHalted; 509 break; 510 default: 511 DPRINTF(Fetch, "Changing stream on branch: %s\n", branch); 512 thread.state = FetchRunning; 513 break; 514 } 515 thread.pc = branch.target; 516} 517 518void 519Fetch1::updateExpectedSeqNums(const BranchData &branch) 520{ 521 Fetch1ThreadInfo &thread = fetchInfo[branch.threadId]; 522 523 DPRINTF(Fetch, "Updating streamSeqNum from: %d to %d," 524 " predictionSeqNum from: %d to %d\n", 525 thread.streamSeqNum, branch.newStreamSeqNum, 526 thread.predictionSeqNum, branch.newPredictionSeqNum); 527 528 /* Change the stream */ 529 thread.streamSeqNum = branch.newStreamSeqNum; 530 /* Update the prediction. Note that it's possible for this to 531 * actually set the prediction to an *older* value if new 532 * predictions have been discarded by execute */ 533 thread.predictionSeqNum = branch.newPredictionSeqNum; 534} 535 536void 537Fetch1::processResponse(Fetch1::FetchRequestPtr response, 538 ForwardLineData &line) 539{ 540 Fetch1ThreadInfo &thread = fetchInfo[response->id.threadId]; 541 PacketPtr packet = response->packet; 542 543 /* Pass the prefetch abort (if any) on to Fetch2 in a ForwardLineData 544 * structure */ 545 line.setFault(response->fault); 546 /* Make sequence numbers valid in return */ 547 line.id = response->id; 548 /* Set PC to virtual address */ 549 line.pc = response->pc; 550 /* Set the lineBase, which is a sizeof(MachInst) aligned address <= 551 * pc.instAddr() */ 552 line.lineBaseAddr = response->request.getVaddr(); 553 554 if (response->fault != NoFault) { 555 /* Stop fetching if there was a fault */ 556 /* Should probably try to flush the queues as well, but we 557 * can't be sure that this fault will actually reach Execute, and we 558 * can't (currently) selectively remove this stream from the queues */ 559 DPRINTF(Fetch, "Stopping line fetch because of fault: %s\n", 560 response->fault->name()); 561 thread.state = Fetch1::FetchWaitingForPC; 562 } else { 563 line.adoptPacketData(packet); 564 /* Null the response's packet to prevent the response from trying to 565 * deallocate the packet */ 566 response->packet = NULL; 567 } 568} 569 570void 571Fetch1::evaluate() 572{ 573 const BranchData &execute_branch = *inp.outputWire; 574 const BranchData &fetch2_branch = *prediction.outputWire; 575 ForwardLineData &line_out = *out.inputWire; 576 577 assert(line_out.isBubble()); 578 579 for (ThreadID tid = 0; tid < cpu.numThreads; tid++) 580 fetchInfo[tid].blocked = !nextStageReserve[tid].canReserve(); 581 582 /** Are both branches from later stages valid and for the same thread? */ 583 if (execute_branch.threadId != InvalidThreadID && 584 execute_branch.threadId == fetch2_branch.threadId) { 585 586 Fetch1ThreadInfo &thread = fetchInfo[execute_branch.threadId]; 587 588 /* Are we changing stream? Look to the Execute branches first, then 589 * to predicted changes of stream from Fetch2 */ 590 if (execute_branch.isStreamChange()) { 591 if (thread.state == FetchHalted) { 592 DPRINTF(Fetch, "Halted, ignoring branch: %s\n", execute_branch); 593 } else { 594 changeStream(execute_branch); 595 } 596 597 if (!fetch2_branch.isBubble()) { 598 DPRINTF(Fetch, "Ignoring simultaneous prediction: %s\n", 599 fetch2_branch); 600 } 601 602 /* The streamSeqNum tagging in request/response ->req should handle 603 * discarding those requests when we get to them. */ 604 } else if (thread.state != FetchHalted && fetch2_branch.isStreamChange()) { 605 /* Handle branch predictions by changing the instruction source 606 * if we're still processing the same stream (as set by streamSeqNum) 607 * as the one of the prediction. 608 */ 609 if (fetch2_branch.newStreamSeqNum != thread.streamSeqNum) { 610 DPRINTF(Fetch, "Not changing stream on prediction: %s," 611 " streamSeqNum mismatch\n", 612 fetch2_branch); 613 } else { 614 changeStream(fetch2_branch); 615 } 616 } 617 } else { 618 /* Fetch2 and Execute branches are for different threads */ 619 if (execute_branch.threadId != InvalidThreadID && 620 execute_branch.isStreamChange()) { 621 622 if (fetchInfo[execute_branch.threadId].state == FetchHalted) { 623 DPRINTF(Fetch, "Halted, ignoring branch: %s\n", execute_branch); 624 } else { 625 changeStream(execute_branch); 626 } 627 } 628 629 if (fetch2_branch.threadId != InvalidThreadID && 630 fetch2_branch.isStreamChange()) { 631 632 if (fetchInfo[fetch2_branch.threadId].state == FetchHalted) { 633 DPRINTF(Fetch, "Halted, ignoring branch: %s\n", fetch2_branch); 634 } else if (fetch2_branch.newStreamSeqNum != fetchInfo[fetch2_branch.threadId].streamSeqNum) { 635 DPRINTF(Fetch, "Not changing stream on prediction: %s," 636 " streamSeqNum mismatch\n", fetch2_branch); 637 } else { 638 changeStream(fetch2_branch); 639 } 640 } 641 } 642 643 if (numInFlightFetches() < fetchLimit) { 644 ThreadID fetch_tid = getScheduledThread(); 645 646 if (fetch_tid != InvalidThreadID) { 647 DPRINTF(Fetch, "Fetching from thread %d\n", fetch_tid); 648 649 /* Generate fetch to selected thread */ 650 fetchLine(fetch_tid); 651 /* Take up a slot in the fetch queue */ 652 nextStageReserve[fetch_tid].reserve(); 653 } else { 654 DPRINTF(Fetch, "No active threads available to fetch from\n"); 655 } 656 } 657 658 659 /* Halting shouldn't prevent fetches in flight from being processed */ 660 /* Step fetches through the icachePort queues and memory system */ 661 stepQueues(); 662 663 /* As we've thrown away early lines, if there is a line, it must 664 * be from the right stream */ 665 if (!transfers.empty() && 666 transfers.front()->isComplete()) 667 { 668 Fetch1::FetchRequestPtr response = transfers.front(); 669 670 if (response->isDiscardable()) { 671 nextStageReserve[response->id.threadId].freeReservation(); 672 673 DPRINTF(Fetch, "Discarding translated fetch as it's for" 674 " an old stream\n"); 675 676 /* Wake up next cycle just in case there was some other 677 * action to do */ 678 cpu.wakeupOnEvent(Pipeline::Fetch1StageId); 679 } else { 680 DPRINTF(Fetch, "Processing fetched line: %s\n", 681 response->id); 682 683 processResponse(response, line_out); 684 } 685 686 popAndDiscard(transfers); 687 } 688 689 /* If we generated output, and mark the stage as being active 690 * to encourage that output on to the next stage */ 691 if (!line_out.isBubble()) 692 cpu.activityRecorder->activity(); 693 694 /* Fetch1 has no inputBuffer so the only activity we can have is to 695 * generate a line output (tested just above) or to initiate a memory 696 * fetch which will signal activity when it returns/needs stepping 697 * between queues */ 698 699 700 /* This looks hackish. And it is, but there doesn't seem to be a better 701 * way to do this. The signal from commit to suspend fetch takes 1 702 * clock cycle to propagate to fetch. However, a legitimate wakeup 703 * may occur between cycles from the memory system. Thus wakeup guard 704 * prevents us from suspending in that case. */ 705 706 for (auto& thread : fetchInfo) { 707 thread.wakeupGuard = false; 708 } 709} 710 711void 712Fetch1::wakeupFetch(ThreadID tid) 713{ 714 ThreadContext *thread_ctx = cpu.getContext(tid); 715 Fetch1ThreadInfo &thread = fetchInfo[tid]; 716 thread.pc = thread_ctx->pcState(); 717 thread.state = FetchRunning; 718 thread.wakeupGuard = true; 719 DPRINTF(Fetch, "[tid:%d]: Changing stream wakeup %s\n", 720 tid, thread_ctx->pcState()); 721 722 cpu.wakeupOnEvent(Pipeline::Fetch1StageId); 723} 724 725bool 726Fetch1::isDrained() 727{ 728 bool drained = numInFlightFetches() == 0 && (*out.inputWire).isBubble(); 729 for (ThreadID tid = 0; tid < cpu.numThreads; tid++) { 730 Fetch1ThreadInfo &thread = fetchInfo[tid]; 731 DPRINTF(Drain, "isDrained[tid:%d]: %s %s%s\n", 732 tid, 733 thread.state == FetchHalted, 734 (numInFlightFetches() == 0 ? "" : "inFlightFetches "), 735 ((*out.inputWire).isBubble() ? "" : "outputtingLine")); 736 737 drained = drained && (thread.state != FetchRunning); 738 } 739 740 return drained; 741} 742 743void 744Fetch1::FetchRequest::reportData(std::ostream &os) const 745{ 746 os << id; 747} 748 749bool Fetch1::FetchRequest::isDiscardable() const 750{ 751 Fetch1ThreadInfo &thread = fetch.fetchInfo[id.threadId]; 752 753 /* Can't discard lines in TLB/memory */ 754 return state != InTranslation && state != RequestIssuing && 755 (id.streamSeqNum != thread.streamSeqNum || 756 id.predictionSeqNum != thread.predictionSeqNum); 757} 758 759void 760Fetch1::minorTrace() const 761{ 762 // TODO: Un-bork minorTrace for THREADS 763 // bork bork bork 764 const Fetch1ThreadInfo &thread = fetchInfo[0]; 765 766 std::ostringstream data; 767 768 if (thread.blocked) 769 data << 'B'; 770 else 771 (*out.inputWire).reportData(data); 772 773 MINORTRACE("state=%s icacheState=%s in_tlb_mem=%s/%s" 774 " streamSeqNum=%d lines=%s\n", thread.state, icacheState, 775 numFetchesInITLB, numFetchesInMemorySystem, 776 thread.streamSeqNum, data.str()); 777 requests.minorTrace(); 778 transfers.minorTrace(); 779} 780 781}
| 47#include "cpu/minor/pipeline.hh" 48#include "debug/Drain.hh" 49#include "debug/Fetch.hh" 50#include "debug/MinorTrace.hh" 51 52namespace Minor 53{ 54 55Fetch1::Fetch1(const std::string &name_, 56 MinorCPU &cpu_, 57 MinorCPUParams ¶ms, 58 Latch<BranchData>::Output inp_, 59 Latch<ForwardLineData>::Input out_, 60 Latch<BranchData>::Output prediction_, 61 std::vector<InputBuffer<ForwardLineData>> &next_stage_input_buffer) : 62 Named(name_), 63 cpu(cpu_), 64 inp(inp_), 65 out(out_), 66 prediction(prediction_), 67 nextStageReserve(next_stage_input_buffer), 68 icachePort(name_ + ".icache_port", *this, cpu_), 69 lineSnap(params.fetch1LineSnapWidth), 70 maxLineWidth(params.fetch1LineWidth), 71 fetchLimit(params.fetch1FetchLimit), 72 fetchInfo(params.numThreads), 73 threadPriority(0), 74 requests(name_ + ".requests", "lines", params.fetch1FetchLimit), 75 transfers(name_ + ".transfers", "lines", params.fetch1FetchLimit), 76 icacheState(IcacheRunning), 77 lineSeqNum(InstId::firstLineSeqNum), 78 numFetchesInMemorySystem(0), 79 numFetchesInITLB(0) 80{ 81 if (lineSnap == 0) { 82 lineSnap = cpu.cacheLineSize(); 83 DPRINTF(Fetch, "lineSnap set to cache line size of: %d\n", 84 lineSnap); 85 } 86 87 if (maxLineWidth == 0) { 88 maxLineWidth = cpu.cacheLineSize(); 89 DPRINTF(Fetch, "maxLineWidth set to cache line size of: %d\n", 90 maxLineWidth); 91 } 92 93 /* These assertions should be copied to the Python config. as well */ 94 if ((lineSnap % sizeof(TheISA::MachInst)) != 0) { 95 fatal("%s: fetch1LineSnapWidth must be a multiple " 96 "of sizeof(TheISA::MachInst) (%d)\n", name_, 97 sizeof(TheISA::MachInst)); 98 } 99 100 if (!(maxLineWidth >= lineSnap && 101 (maxLineWidth % sizeof(TheISA::MachInst)) == 0)) 102 { 103 fatal("%s: fetch1LineWidth must be a multiple of" 104 " sizeof(TheISA::MachInst)" 105 " (%d), and >= fetch1LineSnapWidth (%d)\n", 106 name_, sizeof(TheISA::MachInst), lineSnap); 107 } 108 109 if (fetchLimit < 1) { 110 fatal("%s: fetch1FetchLimit must be >= 1 (%d)\n", name_, 111 fetchLimit); 112 } 113} 114 115inline ThreadID 116Fetch1::getScheduledThread() 117{ 118 /* Select thread via policy. */ 119 std::vector<ThreadID> priority_list; 120 121 switch (cpu.threadPolicy) { 122 case Enums::SingleThreaded: 123 priority_list.push_back(0); 124 break; 125 case Enums::RoundRobin: 126 priority_list = cpu.roundRobinPriority(threadPriority); 127 break; 128 case Enums::Random: 129 priority_list = cpu.randomPriority(); 130 break; 131 default: 132 panic("Unknown fetch policy"); 133 } 134 135 for (auto tid : priority_list) { 136 if (cpu.getContext(tid)->status() == ThreadContext::Active && 137 !fetchInfo[tid].blocked && 138 fetchInfo[tid].state == FetchRunning) { 139 threadPriority = tid; 140 return tid; 141 } 142 } 143 144 return InvalidThreadID; 145} 146 147void 148Fetch1::fetchLine(ThreadID tid) 149{ 150 /* Reference the currently used thread state. */ 151 Fetch1ThreadInfo &thread = fetchInfo[tid]; 152 153 /* If line_offset != 0, a request is pushed for the remainder of the 154 * line. */ 155 /* Use a lower, sizeof(MachInst) aligned address for the fetch */ 156 Addr aligned_pc = thread.pc.instAddr() & ~((Addr) lineSnap - 1); 157 unsigned int line_offset = aligned_pc % lineSnap; 158 unsigned int request_size = maxLineWidth - line_offset; 159 160 /* Fill in the line's id */ 161 InstId request_id(tid, 162 thread.streamSeqNum, thread.predictionSeqNum, 163 lineSeqNum); 164 165 FetchRequestPtr request = new FetchRequest(*this, request_id, thread.pc); 166 167 DPRINTF(Fetch, "Inserting fetch into the fetch queue " 168 "%s addr: 0x%x pc: %s line_offset: %d request_size: %d\n", 169 request_id, aligned_pc, thread.pc, line_offset, request_size); 170 171 request->request.setContext(cpu.threads[tid]->getTC()->contextId()); 172 request->request.setVirt(0 /* asid */, 173 aligned_pc, request_size, Request::INST_FETCH, cpu.instMasterId(), 174 /* I've no idea why we need the PC, but give it */ 175 thread.pc.instAddr()); 176 177 DPRINTF(Fetch, "Submitting ITLB request\n"); 178 numFetchesInITLB++; 179 180 request->state = FetchRequest::InTranslation; 181 182 /* Reserve space in the queues upstream of requests for results */ 183 transfers.reserve(); 184 requests.push(request); 185 186 /* Submit the translation request. The response will come 187 * through finish/markDelayed on this request as it bears 188 * the Translation interface */ 189 cpu.threads[request->id.threadId]->itb->translateTiming( 190 &request->request, 191 cpu.getContext(request->id.threadId), 192 request, BaseTLB::Execute); 193 194 lineSeqNum++; 195 196 /* Step the PC for the next line onto the line aligned next address. 197 * Note that as instructions can span lines, this PC is only a 198 * reliable 'new' PC if the next line has a new stream sequence number. */ 199#if THE_ISA == ALPHA_ISA 200 /* Restore the low bits of the PC used as address space flags */ 201 Addr pc_low_bits = thread.pc.instAddr() & 202 ((Addr) (1 << sizeof(TheISA::MachInst)) - 1); 203 204 thread.pc.set(aligned_pc + request_size + pc_low_bits); 205#else 206 thread.pc.set(aligned_pc + request_size); 207#endif 208} 209 210std::ostream & 211operator <<(std::ostream &os, Fetch1::IcacheState state) 212{ 213 switch (state) { 214 case Fetch1::IcacheRunning: 215 os << "IcacheRunning"; 216 break; 217 case Fetch1::IcacheNeedsRetry: 218 os << "IcacheNeedsRetry"; 219 break; 220 default: 221 os << "IcacheState-" << static_cast<int>(state); 222 break; 223 } 224 return os; 225} 226 227void 228Fetch1::FetchRequest::makePacket() 229{ 230 /* Make the necessary packet for a memory transaction */ 231 packet = new Packet(&request, MemCmd::ReadReq); 232 packet->allocate(); 233 234 /* This FetchRequest becomes SenderState to allow the response to be 235 * identified */ 236 packet->pushSenderState(this); 237} 238 239void 240Fetch1::FetchRequest::finish(const Fault &fault_, RequestPtr request_, 241 ThreadContext *tc, BaseTLB::Mode mode) 242{ 243 fault = fault_; 244 245 state = Translated; 246 fetch.handleTLBResponse(this); 247 248 /* Let's try and wake up the processor for the next cycle */ 249 fetch.cpu.wakeupOnEvent(Pipeline::Fetch1StageId); 250} 251 252void 253Fetch1::handleTLBResponse(FetchRequestPtr response) 254{ 255 numFetchesInITLB--; 256 257 if (response->fault != NoFault) { 258 DPRINTF(Fetch, "Fault in address ITLB translation: %s, " 259 "paddr: 0x%x, vaddr: 0x%x\n", 260 response->fault->name(), 261 (response->request.hasPaddr() ? response->request.getPaddr() : 0), 262 response->request.getVaddr()); 263 264 if (DTRACE(MinorTrace)) 265 minorTraceResponseLine(name(), response); 266 } else { 267 DPRINTF(Fetch, "Got ITLB response\n"); 268 } 269 270 response->state = FetchRequest::Translated; 271 272 tryToSendToTransfers(response); 273} 274 275Fetch1::FetchRequest::~FetchRequest() 276{ 277 if (packet) 278 delete packet; 279} 280 281void 282Fetch1::tryToSendToTransfers(FetchRequestPtr request) 283{ 284 if (!requests.empty() && requests.front() != request) { 285 DPRINTF(Fetch, "Fetch not at front of requests queue, can't" 286 " issue to memory\n"); 287 return; 288 } 289 290 if (request->state == FetchRequest::InTranslation) { 291 DPRINTF(Fetch, "Fetch still in translation, not issuing to" 292 " memory\n"); 293 return; 294 } 295 296 if (request->isDiscardable() || request->fault != NoFault) { 297 /* Discarded and faulting requests carry on through transfers 298 * as Complete/packet == NULL */ 299 300 request->state = FetchRequest::Complete; 301 moveFromRequestsToTransfers(request); 302 303 /* Wake up the pipeline next cycle as there will be no event 304 * for this queue->queue transfer */ 305 cpu.wakeupOnEvent(Pipeline::Fetch1StageId); 306 } else if (request->state == FetchRequest::Translated) { 307 if (!request->packet) 308 request->makePacket(); 309 310 /* Ensure that the packet won't delete the request */ 311 assert(request->packet->needsResponse()); 312 313 if (tryToSend(request)) 314 moveFromRequestsToTransfers(request); 315 } else { 316 DPRINTF(Fetch, "Not advancing line fetch\n"); 317 } 318} 319 320void 321Fetch1::moveFromRequestsToTransfers(FetchRequestPtr request) 322{ 323 assert(!requests.empty() && requests.front() == request); 324 325 requests.pop(); 326 transfers.push(request); 327} 328 329bool 330Fetch1::tryToSend(FetchRequestPtr request) 331{ 332 bool ret = false; 333 334 if (icachePort.sendTimingReq(request->packet)) { 335 /* Invalidate the fetch_requests packet so we don't 336 * accidentally fail to deallocate it (or use it!) 337 * later by overwriting it */ 338 request->packet = NULL; 339 request->state = FetchRequest::RequestIssuing; 340 numFetchesInMemorySystem++; 341 342 ret = true; 343 344 DPRINTF(Fetch, "Issued fetch request to memory: %s\n", 345 request->id); 346 } else { 347 /* Needs to be resent, wait for that */ 348 icacheState = IcacheNeedsRetry; 349 350 DPRINTF(Fetch, "Line fetch needs to retry: %s\n", 351 request->id); 352 } 353 354 return ret; 355} 356 357void 358Fetch1::stepQueues() 359{ 360 IcacheState old_icache_state = icacheState; 361 362 switch (icacheState) { 363 case IcacheRunning: 364 /* Move ITLB results on to the memory system */ 365 if (!requests.empty()) { 366 tryToSendToTransfers(requests.front()); 367 } 368 break; 369 case IcacheNeedsRetry: 370 break; 371 } 372 373 if (icacheState != old_icache_state) { 374 DPRINTF(Fetch, "Step in state %s moving to state %s\n", 375 old_icache_state, icacheState); 376 } 377} 378 379void 380Fetch1::popAndDiscard(FetchQueue &queue) 381{ 382 if (!queue.empty()) { 383 delete queue.front(); 384 queue.pop(); 385 } 386} 387 388unsigned int 389Fetch1::numInFlightFetches() 390{ 391 return requests.occupiedSpace() + 392 transfers.occupiedSpace(); 393} 394 395/** Print the appropriate MinorLine line for a fetch response */ 396void 397Fetch1::minorTraceResponseLine(const std::string &name, 398 Fetch1::FetchRequestPtr response) const 399{ 400 Request &request M5_VAR_USED = response->request; 401 402 if (response->packet && response->packet->isError()) { 403 MINORLINE(this, "id=F;%s vaddr=0x%x fault=\"error packet\"\n", 404 response->id, request.getVaddr()); 405 } else if (response->fault != NoFault) { 406 MINORLINE(this, "id=F;%s vaddr=0x%x fault=\"%s\"\n", 407 response->id, request.getVaddr(), response->fault->name()); 408 } else { 409 MINORLINE(this, "id=%s size=%d vaddr=0x%x paddr=0x%x\n", 410 response->id, request.getSize(), 411 request.getVaddr(), request.getPaddr()); 412 } 413} 414 415bool 416Fetch1::recvTimingResp(PacketPtr response) 417{ 418 DPRINTF(Fetch, "recvTimingResp %d\n", numFetchesInMemorySystem); 419 420 /* Only push the response if we didn't change stream? No, all responses 421 * should hit the responses queue. It's the job of 'step' to throw them 422 * away. */ 423 FetchRequestPtr fetch_request = safe_cast<FetchRequestPtr> 424 (response->popSenderState()); 425 426 /* Fixup packet in fetch_request as this may have changed */ 427 assert(!fetch_request->packet); 428 fetch_request->packet = response; 429 430 numFetchesInMemorySystem--; 431 fetch_request->state = FetchRequest::Complete; 432 433 if (DTRACE(MinorTrace)) 434 minorTraceResponseLine(name(), fetch_request); 435 436 if (response->isError()) { 437 DPRINTF(Fetch, "Received error response packet: %s\n", 438 fetch_request->id); 439 } 440 441 /* We go to idle even if there are more things to do on the queues as 442 * it's the job of step to actually step us on to the next transaction */ 443 444 /* Let's try and wake up the processor for the next cycle to move on 445 * queues */ 446 cpu.wakeupOnEvent(Pipeline::Fetch1StageId); 447 448 /* Never busy */ 449 return true; 450} 451 452void 453Fetch1::recvReqRetry() 454{ 455 DPRINTF(Fetch, "recvRetry\n"); 456 assert(icacheState == IcacheNeedsRetry); 457 assert(!requests.empty()); 458 459 FetchRequestPtr retryRequest = requests.front(); 460 461 icacheState = IcacheRunning; 462 463 if (tryToSend(retryRequest)) 464 moveFromRequestsToTransfers(retryRequest); 465} 466 467std::ostream & 468operator <<(std::ostream &os, Fetch1::FetchState state) 469{ 470 switch (state) { 471 case Fetch1::FetchHalted: 472 os << "FetchHalted"; 473 break; 474 case Fetch1::FetchWaitingForPC: 475 os << "FetchWaitingForPC"; 476 break; 477 case Fetch1::FetchRunning: 478 os << "FetchRunning"; 479 break; 480 default: 481 os << "FetchState-" << static_cast<int>(state); 482 break; 483 } 484 return os; 485} 486 487void 488Fetch1::changeStream(const BranchData &branch) 489{ 490 Fetch1ThreadInfo &thread = fetchInfo[branch.threadId]; 491 492 updateExpectedSeqNums(branch); 493 494 /* Start fetching again if we were stopped */ 495 switch (branch.reason) { 496 case BranchData::SuspendThread: 497 { 498 if (thread.wakeupGuard) { 499 DPRINTF(Fetch, "Not suspending fetch due to guard: %s\n", 500 branch); 501 } else { 502 DPRINTF(Fetch, "Suspending fetch: %s\n", branch); 503 thread.state = FetchWaitingForPC; 504 } 505 } 506 break; 507 case BranchData::HaltFetch: 508 DPRINTF(Fetch, "Halting fetch\n"); 509 thread.state = FetchHalted; 510 break; 511 default: 512 DPRINTF(Fetch, "Changing stream on branch: %s\n", branch); 513 thread.state = FetchRunning; 514 break; 515 } 516 thread.pc = branch.target; 517} 518 519void 520Fetch1::updateExpectedSeqNums(const BranchData &branch) 521{ 522 Fetch1ThreadInfo &thread = fetchInfo[branch.threadId]; 523 524 DPRINTF(Fetch, "Updating streamSeqNum from: %d to %d," 525 " predictionSeqNum from: %d to %d\n", 526 thread.streamSeqNum, branch.newStreamSeqNum, 527 thread.predictionSeqNum, branch.newPredictionSeqNum); 528 529 /* Change the stream */ 530 thread.streamSeqNum = branch.newStreamSeqNum; 531 /* Update the prediction. Note that it's possible for this to 532 * actually set the prediction to an *older* value if new 533 * predictions have been discarded by execute */ 534 thread.predictionSeqNum = branch.newPredictionSeqNum; 535} 536 537void 538Fetch1::processResponse(Fetch1::FetchRequestPtr response, 539 ForwardLineData &line) 540{ 541 Fetch1ThreadInfo &thread = fetchInfo[response->id.threadId]; 542 PacketPtr packet = response->packet; 543 544 /* Pass the prefetch abort (if any) on to Fetch2 in a ForwardLineData 545 * structure */ 546 line.setFault(response->fault); 547 /* Make sequence numbers valid in return */ 548 line.id = response->id; 549 /* Set PC to virtual address */ 550 line.pc = response->pc; 551 /* Set the lineBase, which is a sizeof(MachInst) aligned address <= 552 * pc.instAddr() */ 553 line.lineBaseAddr = response->request.getVaddr(); 554 555 if (response->fault != NoFault) { 556 /* Stop fetching if there was a fault */ 557 /* Should probably try to flush the queues as well, but we 558 * can't be sure that this fault will actually reach Execute, and we 559 * can't (currently) selectively remove this stream from the queues */ 560 DPRINTF(Fetch, "Stopping line fetch because of fault: %s\n", 561 response->fault->name()); 562 thread.state = Fetch1::FetchWaitingForPC; 563 } else { 564 line.adoptPacketData(packet); 565 /* Null the response's packet to prevent the response from trying to 566 * deallocate the packet */ 567 response->packet = NULL; 568 } 569} 570 571void 572Fetch1::evaluate() 573{ 574 const BranchData &execute_branch = *inp.outputWire; 575 const BranchData &fetch2_branch = *prediction.outputWire; 576 ForwardLineData &line_out = *out.inputWire; 577 578 assert(line_out.isBubble()); 579 580 for (ThreadID tid = 0; tid < cpu.numThreads; tid++) 581 fetchInfo[tid].blocked = !nextStageReserve[tid].canReserve(); 582 583 /** Are both branches from later stages valid and for the same thread? */ 584 if (execute_branch.threadId != InvalidThreadID && 585 execute_branch.threadId == fetch2_branch.threadId) { 586 587 Fetch1ThreadInfo &thread = fetchInfo[execute_branch.threadId]; 588 589 /* Are we changing stream? Look to the Execute branches first, then 590 * to predicted changes of stream from Fetch2 */ 591 if (execute_branch.isStreamChange()) { 592 if (thread.state == FetchHalted) { 593 DPRINTF(Fetch, "Halted, ignoring branch: %s\n", execute_branch); 594 } else { 595 changeStream(execute_branch); 596 } 597 598 if (!fetch2_branch.isBubble()) { 599 DPRINTF(Fetch, "Ignoring simultaneous prediction: %s\n", 600 fetch2_branch); 601 } 602 603 /* The streamSeqNum tagging in request/response ->req should handle 604 * discarding those requests when we get to them. */ 605 } else if (thread.state != FetchHalted && fetch2_branch.isStreamChange()) { 606 /* Handle branch predictions by changing the instruction source 607 * if we're still processing the same stream (as set by streamSeqNum) 608 * as the one of the prediction. 609 */ 610 if (fetch2_branch.newStreamSeqNum != thread.streamSeqNum) { 611 DPRINTF(Fetch, "Not changing stream on prediction: %s," 612 " streamSeqNum mismatch\n", 613 fetch2_branch); 614 } else { 615 changeStream(fetch2_branch); 616 } 617 } 618 } else { 619 /* Fetch2 and Execute branches are for different threads */ 620 if (execute_branch.threadId != InvalidThreadID && 621 execute_branch.isStreamChange()) { 622 623 if (fetchInfo[execute_branch.threadId].state == FetchHalted) { 624 DPRINTF(Fetch, "Halted, ignoring branch: %s\n", execute_branch); 625 } else { 626 changeStream(execute_branch); 627 } 628 } 629 630 if (fetch2_branch.threadId != InvalidThreadID && 631 fetch2_branch.isStreamChange()) { 632 633 if (fetchInfo[fetch2_branch.threadId].state == FetchHalted) { 634 DPRINTF(Fetch, "Halted, ignoring branch: %s\n", fetch2_branch); 635 } else if (fetch2_branch.newStreamSeqNum != fetchInfo[fetch2_branch.threadId].streamSeqNum) { 636 DPRINTF(Fetch, "Not changing stream on prediction: %s," 637 " streamSeqNum mismatch\n", fetch2_branch); 638 } else { 639 changeStream(fetch2_branch); 640 } 641 } 642 } 643 644 if (numInFlightFetches() < fetchLimit) { 645 ThreadID fetch_tid = getScheduledThread(); 646 647 if (fetch_tid != InvalidThreadID) { 648 DPRINTF(Fetch, "Fetching from thread %d\n", fetch_tid); 649 650 /* Generate fetch to selected thread */ 651 fetchLine(fetch_tid); 652 /* Take up a slot in the fetch queue */ 653 nextStageReserve[fetch_tid].reserve(); 654 } else { 655 DPRINTF(Fetch, "No active threads available to fetch from\n"); 656 } 657 } 658 659 660 /* Halting shouldn't prevent fetches in flight from being processed */ 661 /* Step fetches through the icachePort queues and memory system */ 662 stepQueues(); 663 664 /* As we've thrown away early lines, if there is a line, it must 665 * be from the right stream */ 666 if (!transfers.empty() && 667 transfers.front()->isComplete()) 668 { 669 Fetch1::FetchRequestPtr response = transfers.front(); 670 671 if (response->isDiscardable()) { 672 nextStageReserve[response->id.threadId].freeReservation(); 673 674 DPRINTF(Fetch, "Discarding translated fetch as it's for" 675 " an old stream\n"); 676 677 /* Wake up next cycle just in case there was some other 678 * action to do */ 679 cpu.wakeupOnEvent(Pipeline::Fetch1StageId); 680 } else { 681 DPRINTF(Fetch, "Processing fetched line: %s\n", 682 response->id); 683 684 processResponse(response, line_out); 685 } 686 687 popAndDiscard(transfers); 688 } 689 690 /* If we generated output, and mark the stage as being active 691 * to encourage that output on to the next stage */ 692 if (!line_out.isBubble()) 693 cpu.activityRecorder->activity(); 694 695 /* Fetch1 has no inputBuffer so the only activity we can have is to 696 * generate a line output (tested just above) or to initiate a memory 697 * fetch which will signal activity when it returns/needs stepping 698 * between queues */ 699 700 701 /* This looks hackish. And it is, but there doesn't seem to be a better 702 * way to do this. The signal from commit to suspend fetch takes 1 703 * clock cycle to propagate to fetch. However, a legitimate wakeup 704 * may occur between cycles from the memory system. Thus wakeup guard 705 * prevents us from suspending in that case. */ 706 707 for (auto& thread : fetchInfo) { 708 thread.wakeupGuard = false; 709 } 710} 711 712void 713Fetch1::wakeupFetch(ThreadID tid) 714{ 715 ThreadContext *thread_ctx = cpu.getContext(tid); 716 Fetch1ThreadInfo &thread = fetchInfo[tid]; 717 thread.pc = thread_ctx->pcState(); 718 thread.state = FetchRunning; 719 thread.wakeupGuard = true; 720 DPRINTF(Fetch, "[tid:%d]: Changing stream wakeup %s\n", 721 tid, thread_ctx->pcState()); 722 723 cpu.wakeupOnEvent(Pipeline::Fetch1StageId); 724} 725 726bool 727Fetch1::isDrained() 728{ 729 bool drained = numInFlightFetches() == 0 && (*out.inputWire).isBubble(); 730 for (ThreadID tid = 0; tid < cpu.numThreads; tid++) { 731 Fetch1ThreadInfo &thread = fetchInfo[tid]; 732 DPRINTF(Drain, "isDrained[tid:%d]: %s %s%s\n", 733 tid, 734 thread.state == FetchHalted, 735 (numInFlightFetches() == 0 ? "" : "inFlightFetches "), 736 ((*out.inputWire).isBubble() ? "" : "outputtingLine")); 737 738 drained = drained && (thread.state != FetchRunning); 739 } 740 741 return drained; 742} 743 744void 745Fetch1::FetchRequest::reportData(std::ostream &os) const 746{ 747 os << id; 748} 749 750bool Fetch1::FetchRequest::isDiscardable() const 751{ 752 Fetch1ThreadInfo &thread = fetch.fetchInfo[id.threadId]; 753 754 /* Can't discard lines in TLB/memory */ 755 return state != InTranslation && state != RequestIssuing && 756 (id.streamSeqNum != thread.streamSeqNum || 757 id.predictionSeqNum != thread.predictionSeqNum); 758} 759 760void 761Fetch1::minorTrace() const 762{ 763 // TODO: Un-bork minorTrace for THREADS 764 // bork bork bork 765 const Fetch1ThreadInfo &thread = fetchInfo[0]; 766 767 std::ostringstream data; 768 769 if (thread.blocked) 770 data << 'B'; 771 else 772 (*out.inputWire).reportData(data); 773 774 MINORTRACE("state=%s icacheState=%s in_tlb_mem=%s/%s" 775 " streamSeqNum=%d lines=%s\n", thread.state, icacheState, 776 numFetchesInITLB, numFetchesInMemorySystem, 777 thread.streamSeqNum, data.str()); 778 requests.minorTrace(); 779 transfers.minorTrace(); 780} 781 782}
|