fetch1.cc revision 11793
1/* 2 * Copyright (c) 2013-2014 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions are 16 * met: redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer; 18 * redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution; 21 * neither the name of the copyright holders nor the names of its 22 * contributors may be used to endorse or promote products derived from 23 * this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 * 37 * Authors: Andrew Bardsley 38 */ 39 40#include "cpu/minor/fetch1.hh" 41 42#include <cstring> 43#include <iomanip> 44#include <sstream> 45 46#include "base/cast.hh" 47#include "cpu/minor/pipeline.hh" 48#include "debug/Drain.hh" 49#include "debug/Fetch.hh" 50#include "debug/MinorTrace.hh" 51 52namespace Minor 53{ 54 55Fetch1::Fetch1(const std::string &name_, 56 MinorCPU &cpu_, 57 MinorCPUParams ¶ms, 58 Latch<BranchData>::Output inp_, 59 Latch<ForwardLineData>::Input out_, 60 Latch<BranchData>::Output prediction_, 61 std::vector<InputBuffer<ForwardLineData>> &next_stage_input_buffer) : 62 Named(name_), 63 cpu(cpu_), 64 inp(inp_), 65 out(out_), 66 prediction(prediction_), 67 nextStageReserve(next_stage_input_buffer), 68 icachePort(name_ + ".icache_port", *this, cpu_), 69 lineSnap(params.fetch1LineSnapWidth), 70 maxLineWidth(params.fetch1LineWidth), 71 fetchLimit(params.fetch1FetchLimit), 72 fetchInfo(params.numThreads), 73 threadPriority(0), 74 requests(name_ + ".requests", "lines", params.fetch1FetchLimit), 75 transfers(name_ + ".transfers", "lines", params.fetch1FetchLimit), 76 icacheState(IcacheRunning), 77 lineSeqNum(InstId::firstLineSeqNum), 78 numFetchesInMemorySystem(0), 79 numFetchesInITLB(0) 80{ 81 if (lineSnap == 0) { 82 lineSnap = cpu.cacheLineSize(); 83 DPRINTF(Fetch, "lineSnap set to cache line size of: %d\n", 84 lineSnap); 85 } 86 87 if (maxLineWidth == 0) { 88 maxLineWidth = cpu.cacheLineSize(); 89 DPRINTF(Fetch, "maxLineWidth set to cache line size of: %d\n", 90 maxLineWidth); 91 } 92 93 /* These assertions should be copied to the Python config. as well */ 94 if ((lineSnap % sizeof(TheISA::MachInst)) != 0) { 95 fatal("%s: fetch1LineSnapWidth must be a multiple " 96 "of sizeof(TheISA::MachInst) (%d)\n", name_, 97 sizeof(TheISA::MachInst)); 98 } 99 100 if (!(maxLineWidth >= lineSnap && 101 (maxLineWidth % sizeof(TheISA::MachInst)) == 0)) 102 { 103 fatal("%s: fetch1LineWidth must be a multiple of" 104 " sizeof(TheISA::MachInst)" 105 " (%d), and >= fetch1LineSnapWidth (%d)\n", 106 name_, sizeof(TheISA::MachInst), lineSnap); 107 } 108 109 if (fetchLimit < 1) { 110 fatal("%s: fetch1FetchLimit must be >= 1 (%d)\n", name_, 111 fetchLimit); 112 } 113} 114 115inline ThreadID 116Fetch1::getScheduledThread() 117{ 118 /* Select thread via policy. */ 119 std::vector<ThreadID> priority_list; 120 121 switch (cpu.threadPolicy) { 122 case Enums::SingleThreaded: 123 priority_list.push_back(0); 124 break; 125 case Enums::RoundRobin: 126 priority_list = cpu.roundRobinPriority(threadPriority); 127 break; 128 case Enums::Random: 129 priority_list = cpu.randomPriority(); 130 break; 131 default: 132 panic("Unknown fetch policy"); 133 } 134 135 for (auto tid : priority_list) { 136 if (cpu.getContext(tid)->status() == ThreadContext::Active && 137 !fetchInfo[tid].blocked && 138 fetchInfo[tid].state == FetchRunning) { 139 threadPriority = tid; 140 return tid; 141 } 142 } 143 144 return InvalidThreadID; 145} 146 147void 148Fetch1::fetchLine(ThreadID tid) 149{ 150 /* Reference the currently used thread state. */ 151 Fetch1ThreadInfo &thread = fetchInfo[tid]; 152 153 /* If line_offset != 0, a request is pushed for the remainder of the 154 * line. */ 155 /* Use a lower, sizeof(MachInst) aligned address for the fetch */ 156 Addr aligned_pc = thread.pc.instAddr() & ~((Addr) lineSnap - 1); 157 unsigned int line_offset = aligned_pc % lineSnap; 158 unsigned int request_size = maxLineWidth - line_offset; 159 160 /* Fill in the line's id */ 161 InstId request_id(tid, 162 thread.streamSeqNum, thread.predictionSeqNum, 163 lineSeqNum); 164 165 FetchRequestPtr request = new FetchRequest(*this, request_id, thread.pc); 166 167 DPRINTF(Fetch, "Inserting fetch into the fetch queue " 168 "%s addr: 0x%x pc: %s line_offset: %d request_size: %d\n", 169 request_id, aligned_pc, thread.pc, line_offset, request_size); 170 171 request->request.setContext(cpu.threads[tid]->getTC()->contextId()); 172 request->request.setVirt(0 /* asid */, 173 aligned_pc, request_size, Request::INST_FETCH, cpu.instMasterId(), 174 /* I've no idea why we need the PC, but give it */ 175 thread.pc.instAddr()); 176 177 DPRINTF(Fetch, "Submitting ITLB request\n"); 178 numFetchesInITLB++; 179 180 request->state = FetchRequest::InTranslation; 181 182 /* Reserve space in the queues upstream of requests for results */ 183 transfers.reserve(); 184 requests.push(request); 185 186 /* Submit the translation request. The response will come 187 * through finish/markDelayed on this request as it bears 188 * the Translation interface */ 189 cpu.threads[request->id.threadId]->itb->translateTiming( 190 &request->request, 191 cpu.getContext(request->id.threadId), 192 request, BaseTLB::Execute); 193 194 lineSeqNum++; 195 196 /* Step the PC for the next line onto the line aligned next address. 197 * Note that as instructions can span lines, this PC is only a 198 * reliable 'new' PC if the next line has a new stream sequence number. */ 199#if THE_ISA == ALPHA_ISA 200 /* Restore the low bits of the PC used as address space flags */ 201 Addr pc_low_bits = thread.pc.instAddr() & 202 ((Addr) (1 << sizeof(TheISA::MachInst)) - 1); 203 204 thread.pc.set(aligned_pc + request_size + pc_low_bits); 205#else 206 thread.pc.set(aligned_pc + request_size); 207#endif 208} 209 210std::ostream & 211operator <<(std::ostream &os, Fetch1::IcacheState state) 212{ 213 switch (state) { 214 case Fetch1::IcacheRunning: 215 os << "IcacheRunning"; 216 break; 217 case Fetch1::IcacheNeedsRetry: 218 os << "IcacheNeedsRetry"; 219 break; 220 default: 221 os << "IcacheState-" << static_cast<int>(state); 222 break; 223 } 224 return os; 225} 226 227void 228Fetch1::FetchRequest::makePacket() 229{ 230 /* Make the necessary packet for a memory transaction */ 231 packet = new Packet(&request, MemCmd::ReadReq); 232 packet->allocate(); 233 234 /* This FetchRequest becomes SenderState to allow the response to be 235 * identified */ 236 packet->pushSenderState(this); 237} 238 239void 240Fetch1::FetchRequest::finish(const Fault &fault_, RequestPtr request_, 241 ThreadContext *tc, BaseTLB::Mode mode) 242{ 243 fault = fault_; 244 245 state = Translated; 246 fetch.handleTLBResponse(this); 247 248 /* Let's try and wake up the processor for the next cycle */ 249 fetch.cpu.wakeupOnEvent(Pipeline::Fetch1StageId); 250} 251 252void 253Fetch1::handleTLBResponse(FetchRequestPtr response) 254{ 255 numFetchesInITLB--; 256 257 if (response->fault != NoFault) { 258 DPRINTF(Fetch, "Fault in address ITLB translation: %s, " 259 "paddr: 0x%x, vaddr: 0x%x\n", 260 response->fault->name(), 261 (response->request.hasPaddr() ? response->request.getPaddr() : 0), 262 response->request.getVaddr()); 263 264 if (DTRACE(MinorTrace)) 265 minorTraceResponseLine(name(), response); 266 } else { 267 DPRINTF(Fetch, "Got ITLB response\n"); 268 } 269 270 response->state = FetchRequest::Translated; 271 272 tryToSendToTransfers(response); 273} 274 275Fetch1::FetchRequest::~FetchRequest() 276{ 277 if (packet) 278 delete packet; 279} 280 281void 282Fetch1::tryToSendToTransfers(FetchRequestPtr request) 283{ 284 if (!requests.empty() && requests.front() != request) { 285 DPRINTF(Fetch, "Fetch not at front of requests queue, can't" 286 " issue to memory\n"); 287 return; 288 } 289 290 if (request->state == FetchRequest::InTranslation) { 291 DPRINTF(Fetch, "Fetch still in translation, not issuing to" 292 " memory\n"); 293 return; 294 } 295 296 if (request->isDiscardable() || request->fault != NoFault) { 297 /* Discarded and faulting requests carry on through transfers 298 * as Complete/packet == NULL */ 299 300 request->state = FetchRequest::Complete; 301 moveFromRequestsToTransfers(request); 302 303 /* Wake up the pipeline next cycle as there will be no event 304 * for this queue->queue transfer */ 305 cpu.wakeupOnEvent(Pipeline::Fetch1StageId); 306 } else if (request->state == FetchRequest::Translated) { 307 if (!request->packet) 308 request->makePacket(); 309 310 /* Ensure that the packet won't delete the request */ 311 assert(request->packet->needsResponse()); 312 313 if (tryToSend(request)) 314 moveFromRequestsToTransfers(request); 315 } else { 316 DPRINTF(Fetch, "Not advancing line fetch\n"); 317 } 318} 319 320void 321Fetch1::moveFromRequestsToTransfers(FetchRequestPtr request) 322{ 323 assert(!requests.empty() && requests.front() == request); 324 325 requests.pop(); 326 transfers.push(request); 327} 328 329bool 330Fetch1::tryToSend(FetchRequestPtr request) 331{ 332 bool ret = false; 333 334 if (icachePort.sendTimingReq(request->packet)) { 335 /* Invalidate the fetch_requests packet so we don't 336 * accidentally fail to deallocate it (or use it!) 337 * later by overwriting it */ 338 request->packet = NULL; 339 request->state = FetchRequest::RequestIssuing; 340 numFetchesInMemorySystem++; 341 342 ret = true; 343 344 DPRINTF(Fetch, "Issued fetch request to memory: %s\n", 345 request->id); 346 } else { 347 /* Needs to be resent, wait for that */ 348 icacheState = IcacheNeedsRetry; 349 350 DPRINTF(Fetch, "Line fetch needs to retry: %s\n", 351 request->id); 352 } 353 354 return ret; 355} 356 357void 358Fetch1::stepQueues() 359{ 360 IcacheState old_icache_state = icacheState; 361 362 switch (icacheState) { 363 case IcacheRunning: 364 /* Move ITLB results on to the memory system */ 365 if (!requests.empty()) { 366 tryToSendToTransfers(requests.front()); 367 } 368 break; 369 case IcacheNeedsRetry: 370 break; 371 } 372 373 if (icacheState != old_icache_state) { 374 DPRINTF(Fetch, "Step in state %s moving to state %s\n", 375 old_icache_state, icacheState); 376 } 377} 378 379void 380Fetch1::popAndDiscard(FetchQueue &queue) 381{ 382 if (!queue.empty()) { 383 delete queue.front(); 384 queue.pop(); 385 } 386} 387 388unsigned int 389Fetch1::numInFlightFetches() 390{ 391 return requests.occupiedSpace() + 392 transfers.occupiedSpace(); 393} 394 395/** Print the appropriate MinorLine line for a fetch response */ 396void 397Fetch1::minorTraceResponseLine(const std::string &name, 398 Fetch1::FetchRequestPtr response) const 399{ 400 Request &request M5_VAR_USED = response->request; 401 402 if (response->packet && response->packet->isError()) { 403 MINORLINE(this, "id=F;%s vaddr=0x%x fault=\"error packet\"\n", 404 response->id, request.getVaddr()); 405 } else if (response->fault != NoFault) { 406 MINORLINE(this, "id=F;%s vaddr=0x%x fault=\"%s\"\n", 407 response->id, request.getVaddr(), response->fault->name()); 408 } else { 409 MINORLINE(this, "id=%s size=%d vaddr=0x%x paddr=0x%x\n", 410 response->id, request.getSize(), 411 request.getVaddr(), request.getPaddr()); 412 } 413} 414 415bool 416Fetch1::recvTimingResp(PacketPtr response) 417{ 418 DPRINTF(Fetch, "recvTimingResp %d\n", numFetchesInMemorySystem); 419 420 /* Only push the response if we didn't change stream? No, all responses 421 * should hit the responses queue. It's the job of 'step' to throw them 422 * away. */ 423 FetchRequestPtr fetch_request = safe_cast<FetchRequestPtr> 424 (response->popSenderState()); 425 426 /* Fixup packet in fetch_request as this may have changed */ 427 assert(!fetch_request->packet); 428 fetch_request->packet = response; 429 430 numFetchesInMemorySystem--; 431 fetch_request->state = FetchRequest::Complete; 432 433 if (DTRACE(MinorTrace)) 434 minorTraceResponseLine(name(), fetch_request); 435 436 if (response->isError()) { 437 DPRINTF(Fetch, "Received error response packet: %s\n", 438 fetch_request->id); 439 } 440 441 /* We go to idle even if there are more things to do on the queues as 442 * it's the job of step to actually step us on to the next transaction */ 443 444 /* Let's try and wake up the processor for the next cycle to move on 445 * queues */ 446 cpu.wakeupOnEvent(Pipeline::Fetch1StageId); 447 448 /* Never busy */ 449 return true; 450} 451 452void 453Fetch1::recvReqRetry() 454{ 455 DPRINTF(Fetch, "recvRetry\n"); 456 assert(icacheState == IcacheNeedsRetry); 457 assert(!requests.empty()); 458 459 FetchRequestPtr retryRequest = requests.front(); 460 461 icacheState = IcacheRunning; 462 463 if (tryToSend(retryRequest)) 464 moveFromRequestsToTransfers(retryRequest); 465} 466 467std::ostream & 468operator <<(std::ostream &os, Fetch1::FetchState state) 469{ 470 switch (state) { 471 case Fetch1::FetchHalted: 472 os << "FetchHalted"; 473 break; 474 case Fetch1::FetchWaitingForPC: 475 os << "FetchWaitingForPC"; 476 break; 477 case Fetch1::FetchRunning: 478 os << "FetchRunning"; 479 break; 480 default: 481 os << "FetchState-" << static_cast<int>(state); 482 break; 483 } 484 return os; 485} 486 487void 488Fetch1::changeStream(const BranchData &branch) 489{ 490 Fetch1ThreadInfo &thread = fetchInfo[branch.threadId]; 491 492 updateExpectedSeqNums(branch); 493 494 /* Start fetching again if we were stopped */ 495 switch (branch.reason) { 496 case BranchData::SuspendThread: 497 { 498 if (thread.wakeupGuard) { 499 DPRINTF(Fetch, "Not suspending fetch due to guard: %s\n", 500 branch); 501 } else { 502 DPRINTF(Fetch, "Suspending fetch: %s\n", branch); 503 thread.state = FetchWaitingForPC; 504 } 505 } 506 break; 507 case BranchData::HaltFetch: 508 DPRINTF(Fetch, "Halting fetch\n"); 509 thread.state = FetchHalted; 510 break; 511 default: 512 DPRINTF(Fetch, "Changing stream on branch: %s\n", branch); 513 thread.state = FetchRunning; 514 break; 515 } 516 thread.pc = branch.target; 517} 518 519void 520Fetch1::updateExpectedSeqNums(const BranchData &branch) 521{ 522 Fetch1ThreadInfo &thread = fetchInfo[branch.threadId]; 523 524 DPRINTF(Fetch, "Updating streamSeqNum from: %d to %d," 525 " predictionSeqNum from: %d to %d\n", 526 thread.streamSeqNum, branch.newStreamSeqNum, 527 thread.predictionSeqNum, branch.newPredictionSeqNum); 528 529 /* Change the stream */ 530 thread.streamSeqNum = branch.newStreamSeqNum; 531 /* Update the prediction. Note that it's possible for this to 532 * actually set the prediction to an *older* value if new 533 * predictions have been discarded by execute */ 534 thread.predictionSeqNum = branch.newPredictionSeqNum; 535} 536 537void 538Fetch1::processResponse(Fetch1::FetchRequestPtr response, 539 ForwardLineData &line) 540{ 541 Fetch1ThreadInfo &thread = fetchInfo[response->id.threadId]; 542 PacketPtr packet = response->packet; 543 544 /* Pass the prefetch abort (if any) on to Fetch2 in a ForwardLineData 545 * structure */ 546 line.setFault(response->fault); 547 /* Make sequence numbers valid in return */ 548 line.id = response->id; 549 /* Set PC to virtual address */ 550 line.pc = response->pc; 551 /* Set the lineBase, which is a sizeof(MachInst) aligned address <= 552 * pc.instAddr() */ 553 line.lineBaseAddr = response->request.getVaddr(); 554 555 if (response->fault != NoFault) { 556 /* Stop fetching if there was a fault */ 557 /* Should probably try to flush the queues as well, but we 558 * can't be sure that this fault will actually reach Execute, and we 559 * can't (currently) selectively remove this stream from the queues */ 560 DPRINTF(Fetch, "Stopping line fetch because of fault: %s\n", 561 response->fault->name()); 562 thread.state = Fetch1::FetchWaitingForPC; 563 } else { 564 line.adoptPacketData(packet); 565 /* Null the response's packet to prevent the response from trying to 566 * deallocate the packet */ 567 response->packet = NULL; 568 } 569} 570 571void 572Fetch1::evaluate() 573{ 574 const BranchData &execute_branch = *inp.outputWire; 575 const BranchData &fetch2_branch = *prediction.outputWire; 576 ForwardLineData &line_out = *out.inputWire; 577 578 assert(line_out.isBubble()); 579 580 for (ThreadID tid = 0; tid < cpu.numThreads; tid++) 581 fetchInfo[tid].blocked = !nextStageReserve[tid].canReserve(); 582 583 /** Are both branches from later stages valid and for the same thread? */ 584 if (execute_branch.threadId != InvalidThreadID && 585 execute_branch.threadId == fetch2_branch.threadId) { 586 587 Fetch1ThreadInfo &thread = fetchInfo[execute_branch.threadId]; 588 589 /* Are we changing stream? Look to the Execute branches first, then 590 * to predicted changes of stream from Fetch2 */ 591 if (execute_branch.isStreamChange()) { 592 if (thread.state == FetchHalted) { 593 DPRINTF(Fetch, "Halted, ignoring branch: %s\n", execute_branch); 594 } else { 595 changeStream(execute_branch); 596 } 597 598 if (!fetch2_branch.isBubble()) { 599 DPRINTF(Fetch, "Ignoring simultaneous prediction: %s\n", 600 fetch2_branch); 601 } 602 603 /* The streamSeqNum tagging in request/response ->req should handle 604 * discarding those requests when we get to them. */ 605 } else if (thread.state != FetchHalted && fetch2_branch.isStreamChange()) { 606 /* Handle branch predictions by changing the instruction source 607 * if we're still processing the same stream (as set by streamSeqNum) 608 * as the one of the prediction. 609 */ 610 if (fetch2_branch.newStreamSeqNum != thread.streamSeqNum) { 611 DPRINTF(Fetch, "Not changing stream on prediction: %s," 612 " streamSeqNum mismatch\n", 613 fetch2_branch); 614 } else { 615 changeStream(fetch2_branch); 616 } 617 } 618 } else { 619 /* Fetch2 and Execute branches are for different threads */ 620 if (execute_branch.threadId != InvalidThreadID && 621 execute_branch.isStreamChange()) { 622 623 if (fetchInfo[execute_branch.threadId].state == FetchHalted) { 624 DPRINTF(Fetch, "Halted, ignoring branch: %s\n", execute_branch); 625 } else { 626 changeStream(execute_branch); 627 } 628 } 629 630 if (fetch2_branch.threadId != InvalidThreadID && 631 fetch2_branch.isStreamChange()) { 632 633 if (fetchInfo[fetch2_branch.threadId].state == FetchHalted) { 634 DPRINTF(Fetch, "Halted, ignoring branch: %s\n", fetch2_branch); 635 } else if (fetch2_branch.newStreamSeqNum != fetchInfo[fetch2_branch.threadId].streamSeqNum) { 636 DPRINTF(Fetch, "Not changing stream on prediction: %s," 637 " streamSeqNum mismatch\n", fetch2_branch); 638 } else { 639 changeStream(fetch2_branch); 640 } 641 } 642 } 643 644 if (numInFlightFetches() < fetchLimit) { 645 ThreadID fetch_tid = getScheduledThread(); 646 647 if (fetch_tid != InvalidThreadID) { 648 DPRINTF(Fetch, "Fetching from thread %d\n", fetch_tid); 649 650 /* Generate fetch to selected thread */ 651 fetchLine(fetch_tid); 652 /* Take up a slot in the fetch queue */ 653 nextStageReserve[fetch_tid].reserve(); 654 } else { 655 DPRINTF(Fetch, "No active threads available to fetch from\n"); 656 } 657 } 658 659 660 /* Halting shouldn't prevent fetches in flight from being processed */ 661 /* Step fetches through the icachePort queues and memory system */ 662 stepQueues(); 663 664 /* As we've thrown away early lines, if there is a line, it must 665 * be from the right stream */ 666 if (!transfers.empty() && 667 transfers.front()->isComplete()) 668 { 669 Fetch1::FetchRequestPtr response = transfers.front(); 670 671 if (response->isDiscardable()) { 672 nextStageReserve[response->id.threadId].freeReservation(); 673 674 DPRINTF(Fetch, "Discarding translated fetch as it's for" 675 " an old stream\n"); 676 677 /* Wake up next cycle just in case there was some other 678 * action to do */ 679 cpu.wakeupOnEvent(Pipeline::Fetch1StageId); 680 } else { 681 DPRINTF(Fetch, "Processing fetched line: %s\n", 682 response->id); 683 684 processResponse(response, line_out); 685 } 686 687 popAndDiscard(transfers); 688 } 689 690 /* If we generated output, and mark the stage as being active 691 * to encourage that output on to the next stage */ 692 if (!line_out.isBubble()) 693 cpu.activityRecorder->activity(); 694 695 /* Fetch1 has no inputBuffer so the only activity we can have is to 696 * generate a line output (tested just above) or to initiate a memory 697 * fetch which will signal activity when it returns/needs stepping 698 * between queues */ 699 700 701 /* This looks hackish. And it is, but there doesn't seem to be a better 702 * way to do this. The signal from commit to suspend fetch takes 1 703 * clock cycle to propagate to fetch. However, a legitimate wakeup 704 * may occur between cycles from the memory system. Thus wakeup guard 705 * prevents us from suspending in that case. */ 706 707 for (auto& thread : fetchInfo) { 708 thread.wakeupGuard = false; 709 } 710} 711 712void 713Fetch1::wakeupFetch(ThreadID tid) 714{ 715 ThreadContext *thread_ctx = cpu.getContext(tid); 716 Fetch1ThreadInfo &thread = fetchInfo[tid]; 717 thread.pc = thread_ctx->pcState(); 718 thread.state = FetchRunning; 719 thread.wakeupGuard = true; 720 DPRINTF(Fetch, "[tid:%d]: Changing stream wakeup %s\n", 721 tid, thread_ctx->pcState()); 722 723 cpu.wakeupOnEvent(Pipeline::Fetch1StageId); 724} 725 726bool 727Fetch1::isDrained() 728{ 729 bool drained = numInFlightFetches() == 0 && (*out.inputWire).isBubble(); 730 for (ThreadID tid = 0; tid < cpu.numThreads; tid++) { 731 Fetch1ThreadInfo &thread = fetchInfo[tid]; 732 DPRINTF(Drain, "isDrained[tid:%d]: %s %s%s\n", 733 tid, 734 thread.state == FetchHalted, 735 (numInFlightFetches() == 0 ? "" : "inFlightFetches "), 736 ((*out.inputWire).isBubble() ? "" : "outputtingLine")); 737 738 drained = drained && (thread.state != FetchRunning); 739 } 740 741 return drained; 742} 743 744void 745Fetch1::FetchRequest::reportData(std::ostream &os) const 746{ 747 os << id; 748} 749 750bool Fetch1::FetchRequest::isDiscardable() const 751{ 752 Fetch1ThreadInfo &thread = fetch.fetchInfo[id.threadId]; 753 754 /* Can't discard lines in TLB/memory */ 755 return state != InTranslation && state != RequestIssuing && 756 (id.streamSeqNum != thread.streamSeqNum || 757 id.predictionSeqNum != thread.predictionSeqNum); 758} 759 760void 761Fetch1::minorTrace() const 762{ 763 // TODO: Un-bork minorTrace for THREADS 764 // bork bork bork 765 const Fetch1ThreadInfo &thread = fetchInfo[0]; 766 767 std::ostringstream data; 768 769 if (thread.blocked) 770 data << 'B'; 771 else 772 (*out.inputWire).reportData(data); 773 774 MINORTRACE("state=%s icacheState=%s in_tlb_mem=%s/%s" 775 " streamSeqNum=%d lines=%s\n", thread.state, icacheState, 776 numFetchesInITLB, numFetchesInMemorySystem, 777 thread.streamSeqNum, data.str()); 778 requests.minorTrace(); 779 transfers.minorTrace(); 780} 781 782} 783