fetch1.cc (11567:560d7fbbddd1) fetch1.cc (11568:91e95eb78191)
1/*
2 * Copyright (c) 2013-2014 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Andrew Bardsley
38 */
39
40#include <cstring>
41#include <iomanip>
42#include <sstream>
43
44#include "base/cast.hh"
45#include "cpu/minor/fetch1.hh"
46#include "cpu/minor/pipeline.hh"
47#include "debug/Drain.hh"
48#include "debug/Fetch.hh"
49#include "debug/MinorTrace.hh"
50
51namespace Minor
52{
53
54Fetch1::Fetch1(const std::string &name_,
55 MinorCPU &cpu_,
56 MinorCPUParams &params,
57 Latch<BranchData>::Output inp_,
58 Latch<ForwardLineData>::Input out_,
59 Latch<BranchData>::Output prediction_,
60 std::vector<InputBuffer<ForwardLineData>> &next_stage_input_buffer) :
61 Named(name_),
62 cpu(cpu_),
63 inp(inp_),
64 out(out_),
65 prediction(prediction_),
66 nextStageReserve(next_stage_input_buffer),
67 icachePort(name_ + ".icache_port", *this, cpu_),
68 lineSnap(params.fetch1LineSnapWidth),
69 maxLineWidth(params.fetch1LineWidth),
70 fetchLimit(params.fetch1FetchLimit),
71 fetchInfo(params.numThreads),
72 threadPriority(0),
73 requests(name_ + ".requests", "lines", params.fetch1FetchLimit),
74 transfers(name_ + ".transfers", "lines", params.fetch1FetchLimit),
75 icacheState(IcacheRunning),
76 lineSeqNum(InstId::firstLineSeqNum),
77 numFetchesInMemorySystem(0),
78 numFetchesInITLB(0)
79{
80 if (lineSnap == 0) {
81 lineSnap = cpu.cacheLineSize();
82 DPRINTF(Fetch, "lineSnap set to cache line size of: %d\n",
83 lineSnap);
84 }
85
86 if (maxLineWidth == 0) {
87 maxLineWidth = cpu.cacheLineSize();
88 DPRINTF(Fetch, "maxLineWidth set to cache line size of: %d\n",
89 maxLineWidth);
90 }
91
92 /* These assertions should be copied to the Python config. as well */
93 if ((lineSnap % sizeof(TheISA::MachInst)) != 0) {
94 fatal("%s: fetch1LineSnapWidth must be a multiple "
95 "of sizeof(TheISA::MachInst) (%d)\n", name_,
96 sizeof(TheISA::MachInst));
97 }
98
99 if (!(maxLineWidth >= lineSnap &&
100 (maxLineWidth % sizeof(TheISA::MachInst)) == 0))
101 {
102 fatal("%s: fetch1LineWidth must be a multiple of"
103 " sizeof(TheISA::MachInst)"
104 " (%d), and >= fetch1LineSnapWidth (%d)\n",
105 name_, sizeof(TheISA::MachInst), lineSnap);
106 }
107
108 if (fetchLimit < 1) {
109 fatal("%s: fetch1FetchLimit must be >= 1 (%d)\n", name_,
110 fetchLimit);
111 }
112}
113
114inline ThreadID
115Fetch1::getScheduledThread()
116{
117 /* Select thread via policy. */
118 std::vector<ThreadID> priority_list;
119
120 switch (cpu.threadPolicy) {
121 case Enums::SingleThreaded:
122 priority_list.push_back(0);
123 break;
124 case Enums::RoundRobin:
125 priority_list = cpu.roundRobinPriority(threadPriority);
126 break;
127 case Enums::Random:
128 priority_list = cpu.randomPriority();
129 break;
130 default:
131 panic("Unknown fetch policy");
132 }
133
134 for (auto tid : priority_list) {
135 if (cpu.getContext(tid)->status() == ThreadContext::Active &&
136 !fetchInfo[tid].blocked &&
137 fetchInfo[tid].state == FetchRunning) {
138 threadPriority = tid;
139 return tid;
140 }
141 }
142
143 return InvalidThreadID;
144}
145
146void
147Fetch1::fetchLine(ThreadID tid)
148{
149 /* Reference the currently used thread state. */
150 Fetch1ThreadInfo &thread = fetchInfo[tid];
151
152 /* If line_offset != 0, a request is pushed for the remainder of the
153 * line. */
154 /* Use a lower, sizeof(MachInst) aligned address for the fetch */
155 Addr aligned_pc = thread.pc.instAddr() & ~((Addr) lineSnap - 1);
156 unsigned int line_offset = aligned_pc % lineSnap;
157 unsigned int request_size = maxLineWidth - line_offset;
158
159 /* Fill in the line's id */
160 InstId request_id(tid,
161 thread.streamSeqNum, thread.predictionSeqNum,
162 lineSeqNum);
163
164 FetchRequestPtr request = new FetchRequest(*this, request_id, thread.pc);
165
166 DPRINTF(Fetch, "Inserting fetch into the fetch queue "
167 "%s addr: 0x%x pc: %s line_offset: %d request_size: %d\n",
168 request_id, aligned_pc, thread.pc, line_offset, request_size);
169
170 request->request.setContext(cpu.threads[tid]->getTC()->contextId());
171 request->request.setVirt(0 /* asid */,
172 aligned_pc, request_size, Request::INST_FETCH, cpu.instMasterId(),
173 /* I've no idea why we need the PC, but give it */
174 thread.pc.instAddr());
175
176 DPRINTF(Fetch, "Submitting ITLB request\n");
177 numFetchesInITLB++;
178
179 request->state = FetchRequest::InTranslation;
180
181 /* Reserve space in the queues upstream of requests for results */
182 transfers.reserve();
183 requests.push(request);
184
185 /* Submit the translation request. The response will come
186 * through finish/markDelayed on this request as it bears
187 * the Translation interface */
188 cpu.threads[request->id.threadId]->itb->translateTiming(
189 &request->request,
190 cpu.getContext(request->id.threadId),
191 request, BaseTLB::Execute);
192
193 lineSeqNum++;
194
195 /* Step the PC for the next line onto the line aligned next address.
196 * Note that as instructions can span lines, this PC is only a
197 * reliable 'new' PC if the next line has a new stream sequence number. */
198#if THE_ISA == ALPHA_ISA
199 /* Restore the low bits of the PC used as address space flags */
200 Addr pc_low_bits = thread.pc.instAddr() &
201 ((Addr) (1 << sizeof(TheISA::MachInst)) - 1);
202
203 thread.pc.set(aligned_pc + request_size + pc_low_bits);
204#else
205 thread.pc.set(aligned_pc + request_size);
206#endif
207}
208
209std::ostream &
210operator <<(std::ostream &os, Fetch1::IcacheState state)
211{
212 switch (state) {
213 case Fetch1::IcacheRunning:
214 os << "IcacheRunning";
215 break;
216 case Fetch1::IcacheNeedsRetry:
217 os << "IcacheNeedsRetry";
218 break;
219 default:
220 os << "IcacheState-" << static_cast<int>(state);
221 break;
222 }
223 return os;
224}
225
226void
227Fetch1::FetchRequest::makePacket()
228{
229 /* Make the necessary packet for a memory transaction */
230 packet = new Packet(&request, MemCmd::ReadReq);
231 packet->allocate();
232
233 /* This FetchRequest becomes SenderState to allow the response to be
234 * identified */
235 packet->pushSenderState(this);
236}
237
238void
239Fetch1::FetchRequest::finish(const Fault &fault_, RequestPtr request_,
240 ThreadContext *tc, BaseTLB::Mode mode)
241{
242 fault = fault_;
243
244 state = Translated;
245 fetch.handleTLBResponse(this);
246
247 /* Let's try and wake up the processor for the next cycle */
248 fetch.cpu.wakeupOnEvent(Pipeline::Fetch1StageId);
249}
250
251void
252Fetch1::handleTLBResponse(FetchRequestPtr response)
253{
254 numFetchesInITLB--;
255
256 if (response->fault != NoFault) {
257 DPRINTF(Fetch, "Fault in address ITLB translation: %s, "
258 "paddr: 0x%x, vaddr: 0x%x\n",
259 response->fault->name(),
260 (response->request.hasPaddr() ? response->request.getPaddr() : 0),
261 response->request.getVaddr());
262
263 if (DTRACE(MinorTrace))
264 minorTraceResponseLine(name(), response);
265 } else {
266 DPRINTF(Fetch, "Got ITLB response\n");
267 }
268
269 response->state = FetchRequest::Translated;
270
271 tryToSendToTransfers(response);
272}
273
274Fetch1::FetchRequest::~FetchRequest()
275{
276 if (packet)
277 delete packet;
278}
279
280void
281Fetch1::tryToSendToTransfers(FetchRequestPtr request)
282{
283 if (!requests.empty() && requests.front() != request) {
284 DPRINTF(Fetch, "Fetch not at front of requests queue, can't"
285 " issue to memory\n");
286 return;
287 }
288
289 if (request->state == FetchRequest::InTranslation) {
290 DPRINTF(Fetch, "Fetch still in translation, not issuing to"
291 " memory\n");
292 return;
293 }
294
295 if (request->isDiscardable() || request->fault != NoFault) {
296 /* Discarded and faulting requests carry on through transfers
297 * as Complete/packet == NULL */
298
299 request->state = FetchRequest::Complete;
300 moveFromRequestsToTransfers(request);
301
302 /* Wake up the pipeline next cycle as there will be no event
303 * for this queue->queue transfer */
304 cpu.wakeupOnEvent(Pipeline::Fetch1StageId);
305 } else if (request->state == FetchRequest::Translated) {
306 if (!request->packet)
307 request->makePacket();
308
309 /* Ensure that the packet won't delete the request */
310 assert(request->packet->needsResponse());
311
312 if (tryToSend(request))
313 moveFromRequestsToTransfers(request);
314 } else {
315 DPRINTF(Fetch, "Not advancing line fetch\n");
316 }
317}
318
319void
320Fetch1::moveFromRequestsToTransfers(FetchRequestPtr request)
321{
322 assert(!requests.empty() && requests.front() == request);
323
324 requests.pop();
325 transfers.push(request);
326}
327
328bool
329Fetch1::tryToSend(FetchRequestPtr request)
330{
331 bool ret = false;
332
333 if (icachePort.sendTimingReq(request->packet)) {
334 /* Invalidate the fetch_requests packet so we don't
335 * accidentally fail to deallocate it (or use it!)
336 * later by overwriting it */
337 request->packet = NULL;
338 request->state = FetchRequest::RequestIssuing;
339 numFetchesInMemorySystem++;
340
341 ret = true;
342
343 DPRINTF(Fetch, "Issued fetch request to memory: %s\n",
344 request->id);
345 } else {
346 /* Needs to be resent, wait for that */
347 icacheState = IcacheNeedsRetry;
348
349 DPRINTF(Fetch, "Line fetch needs to retry: %s\n",
350 request->id);
351 }
352
353 return ret;
354}
355
356void
357Fetch1::stepQueues()
358{
359 IcacheState old_icache_state = icacheState;
360
361 switch (icacheState) {
362 case IcacheRunning:
363 /* Move ITLB results on to the memory system */
364 if (!requests.empty()) {
365 tryToSendToTransfers(requests.front());
366 }
367 break;
368 case IcacheNeedsRetry:
369 break;
370 }
371
372 if (icacheState != old_icache_state) {
373 DPRINTF(Fetch, "Step in state %s moving to state %s\n",
374 old_icache_state, icacheState);
375 }
376}
377
378void
379Fetch1::popAndDiscard(FetchQueue &queue)
380{
381 if (!queue.empty()) {
382 delete queue.front();
383 queue.pop();
384 }
385}
386
387unsigned int
388Fetch1::numInFlightFetches()
389{
390 return requests.occupiedSpace() +
391 transfers.occupiedSpace();
392}
393
394/** Print the appropriate MinorLine line for a fetch response */
395void
396Fetch1::minorTraceResponseLine(const std::string &name,
397 Fetch1::FetchRequestPtr response) const
398{
399 Request &request M5_VAR_USED = response->request;
400
401 if (response->packet && response->packet->isError()) {
402 MINORLINE(this, "id=F;%s vaddr=0x%x fault=\"error packet\"\n",
403 response->id, request.getVaddr());
404 } else if (response->fault != NoFault) {
405 MINORLINE(this, "id=F;%s vaddr=0x%x fault=\"%s\"\n",
406 response->id, request.getVaddr(), response->fault->name());
407 } else {
408 MINORLINE(this, "id=%s size=%d vaddr=0x%x paddr=0x%x\n",
409 response->id, request.getSize(),
410 request.getVaddr(), request.getPaddr());
411 }
412}
413
414bool
415Fetch1::recvTimingResp(PacketPtr response)
416{
417 DPRINTF(Fetch, "recvTimingResp %d\n", numFetchesInMemorySystem);
418
419 /* Only push the response if we didn't change stream? No, all responses
420 * should hit the responses queue. It's the job of 'step' to throw them
421 * away. */
422 FetchRequestPtr fetch_request = safe_cast<FetchRequestPtr>
423 (response->popSenderState());
424
425 /* Fixup packet in fetch_request as this may have changed */
426 assert(!fetch_request->packet);
427 fetch_request->packet = response;
428
429 numFetchesInMemorySystem--;
430 fetch_request->state = FetchRequest::Complete;
431
432 if (DTRACE(MinorTrace))
433 minorTraceResponseLine(name(), fetch_request);
434
435 if (response->isError()) {
436 DPRINTF(Fetch, "Received error response packet: %s\n",
437 fetch_request->id);
438 }
439
440 /* We go to idle even if there are more things to do on the queues as
441 * it's the job of step to actually step us on to the next transaction */
442
443 /* Let's try and wake up the processor for the next cycle to move on
444 * queues */
445 cpu.wakeupOnEvent(Pipeline::Fetch1StageId);
446
447 /* Never busy */
448 return true;
449}
450
451void
452Fetch1::recvReqRetry()
453{
454 DPRINTF(Fetch, "recvRetry\n");
455 assert(icacheState == IcacheNeedsRetry);
456 assert(!requests.empty());
457
458 FetchRequestPtr retryRequest = requests.front();
459
460 icacheState = IcacheRunning;
461
462 if (tryToSend(retryRequest))
463 moveFromRequestsToTransfers(retryRequest);
464}
465
466std::ostream &
467operator <<(std::ostream &os, Fetch1::FetchState state)
468{
469 switch (state) {
470 case Fetch1::FetchHalted:
471 os << "FetchHalted";
472 break;
473 case Fetch1::FetchWaitingForPC:
474 os << "FetchWaitingForPC";
475 break;
476 case Fetch1::FetchRunning:
477 os << "FetchRunning";
478 break;
479 default:
480 os << "FetchState-" << static_cast<int>(state);
481 break;
482 }
483 return os;
484}
485
486void
487Fetch1::changeStream(const BranchData &branch)
488{
489 Fetch1ThreadInfo &thread = fetchInfo[branch.threadId];
490
491 updateExpectedSeqNums(branch);
492
493 /* Start fetching again if we were stopped */
494 switch (branch.reason) {
495 case BranchData::SuspendThread:
496 {
497 if (thread.wakeupGuard) {
498 DPRINTF(Fetch, "Not suspending fetch due to guard: %s\n",
499 branch);
500 } else {
501 DPRINTF(Fetch, "Suspending fetch: %s\n", branch);
502 thread.state = FetchWaitingForPC;
503 }
504 }
505 break;
506 case BranchData::HaltFetch:
507 DPRINTF(Fetch, "Halting fetch\n");
508 thread.state = FetchHalted;
509 break;
510 default:
511 DPRINTF(Fetch, "Changing stream on branch: %s\n", branch);
512 thread.state = FetchRunning;
513 break;
514 }
515 thread.pc = branch.target;
516}
517
518void
519Fetch1::updateExpectedSeqNums(const BranchData &branch)
520{
521 Fetch1ThreadInfo &thread = fetchInfo[branch.threadId];
522
523 DPRINTF(Fetch, "Updating streamSeqNum from: %d to %d,"
524 " predictionSeqNum from: %d to %d\n",
525 thread.streamSeqNum, branch.newStreamSeqNum,
526 thread.predictionSeqNum, branch.newPredictionSeqNum);
527
528 /* Change the stream */
529 thread.streamSeqNum = branch.newStreamSeqNum;
530 /* Update the prediction. Note that it's possible for this to
531 * actually set the prediction to an *older* value if new
532 * predictions have been discarded by execute */
533 thread.predictionSeqNum = branch.newPredictionSeqNum;
534}
535
536void
537Fetch1::processResponse(Fetch1::FetchRequestPtr response,
538 ForwardLineData &line)
539{
540 Fetch1ThreadInfo &thread = fetchInfo[response->id.threadId];
541 PacketPtr packet = response->packet;
542
543 /* Pass the prefetch abort (if any) on to Fetch2 in a ForwardLineData
544 * structure */
545 line.setFault(response->fault);
546 /* Make sequence numbers valid in return */
547 line.id = response->id;
548 /* Set PC to virtual address */
549 line.pc = response->pc;
550 /* Set the lineBase, which is a sizeof(MachInst) aligned address <=
551 * pc.instAddr() */
552 line.lineBaseAddr = response->request.getVaddr();
553
554 if (response->fault != NoFault) {
555 /* Stop fetching if there was a fault */
556 /* Should probably try to flush the queues as well, but we
557 * can't be sure that this fault will actually reach Execute, and we
558 * can't (currently) selectively remove this stream from the queues */
559 DPRINTF(Fetch, "Stopping line fetch because of fault: %s\n",
560 response->fault->name());
561 thread.state = Fetch1::FetchWaitingForPC;
562 } else {
563 line.adoptPacketData(packet);
564 /* Null the response's packet to prevent the response from trying to
565 * deallocate the packet */
566 response->packet = NULL;
567 }
568}
569
570void
571Fetch1::evaluate()
572{
573 const BranchData &execute_branch = *inp.outputWire;
574 const BranchData &fetch2_branch = *prediction.outputWire;
575 ForwardLineData &line_out = *out.inputWire;
576
577 assert(line_out.isBubble());
578
579 for (ThreadID tid = 0; tid < cpu.numThreads; tid++)
580 fetchInfo[tid].blocked = !nextStageReserve[tid].canReserve();
581
582 /** Are both branches from later stages valid and for the same thread? */
583 if (execute_branch.threadId != InvalidThreadID &&
584 execute_branch.threadId == fetch2_branch.threadId) {
585
586 Fetch1ThreadInfo &thread = fetchInfo[execute_branch.threadId];
587
588 /* Are we changing stream? Look to the Execute branches first, then
589 * to predicted changes of stream from Fetch2 */
590 if (execute_branch.isStreamChange()) {
591 if (thread.state == FetchHalted) {
592 DPRINTF(Fetch, "Halted, ignoring branch: %s\n", execute_branch);
593 } else {
594 changeStream(execute_branch);
595 }
596
597 if (!fetch2_branch.isBubble()) {
598 DPRINTF(Fetch, "Ignoring simultaneous prediction: %s\n",
599 fetch2_branch);
600 }
601
602 /* The streamSeqNum tagging in request/response ->req should handle
603 * discarding those requests when we get to them. */
604 } else if (thread.state != FetchHalted && fetch2_branch.isStreamChange()) {
605 /* Handle branch predictions by changing the instruction source
606 * if we're still processing the same stream (as set by streamSeqNum)
607 * as the one of the prediction.
608 */
609 if (fetch2_branch.newStreamSeqNum != thread.streamSeqNum) {
610 DPRINTF(Fetch, "Not changing stream on prediction: %s,"
611 " streamSeqNum mismatch\n",
612 fetch2_branch);
613 } else {
614 changeStream(fetch2_branch);
615 }
616 }
617 } else {
618 /* Fetch2 and Execute branches are for different threads */
619 if (execute_branch.threadId != InvalidThreadID &&
620 execute_branch.isStreamChange()) {
621
622 if (fetchInfo[execute_branch.threadId].state == FetchHalted) {
623 DPRINTF(Fetch, "Halted, ignoring branch: %s\n", execute_branch);
624 } else {
625 changeStream(execute_branch);
626 }
627 }
628
629 if (fetch2_branch.threadId != InvalidThreadID &&
630 fetch2_branch.isStreamChange()) {
631
632 if (fetchInfo[fetch2_branch.threadId].state == FetchHalted) {
633 DPRINTF(Fetch, "Halted, ignoring branch: %s\n", fetch2_branch);
634 } else if (fetch2_branch.newStreamSeqNum != fetchInfo[fetch2_branch.threadId].streamSeqNum) {
635 DPRINTF(Fetch, "Not changing stream on prediction: %s,"
636 " streamSeqNum mismatch\n", fetch2_branch);
637 } else {
638 changeStream(fetch2_branch);
639 }
640 }
641 }
642
643 if (numInFlightFetches() < fetchLimit) {
644 ThreadID fetch_tid = getScheduledThread();
645
646 if (fetch_tid != InvalidThreadID) {
647 DPRINTF(Fetch, "Fetching from thread %d\n", fetch_tid);
648
649 /* Generate fetch to selected thread */
650 fetchLine(fetch_tid);
651 /* Take up a slot in the fetch queue */
652 nextStageReserve[fetch_tid].reserve();
653 } else {
654 DPRINTF(Fetch, "No active threads available to fetch from\n");
655 }
656 }
657
658
659 /* Halting shouldn't prevent fetches in flight from being processed */
660 /* Step fetches through the icachePort queues and memory system */
661 stepQueues();
662
663 /* As we've thrown away early lines, if there is a line, it must
664 * be from the right stream */
665 if (!transfers.empty() &&
666 transfers.front()->isComplete())
667 {
668 Fetch1::FetchRequestPtr response = transfers.front();
669
670 if (response->isDiscardable()) {
671 nextStageReserve[response->id.threadId].freeReservation();
672
673 DPRINTF(Fetch, "Discarding translated fetch as it's for"
674 " an old stream\n");
675
676 /* Wake up next cycle just in case there was some other
677 * action to do */
678 cpu.wakeupOnEvent(Pipeline::Fetch1StageId);
679 } else {
680 DPRINTF(Fetch, "Processing fetched line: %s\n",
681 response->id);
682
683 processResponse(response, line_out);
684 }
685
686 popAndDiscard(transfers);
687 }
688
689 /* If we generated output, and mark the stage as being active
690 * to encourage that output on to the next stage */
691 if (!line_out.isBubble())
692 cpu.activityRecorder->activity();
693
694 /* Fetch1 has no inputBuffer so the only activity we can have is to
695 * generate a line output (tested just above) or to initiate a memory
696 * fetch which will signal activity when it returns/needs stepping
697 * between queues */
698
699
700 /* This looks hackish. And it is, but there doesn't seem to be a better
701 * way to do this. The signal from commit to suspend fetch takes 1
702 * clock cycle to propagate to fetch. However, a legitimate wakeup
703 * may occur between cycles from the memory system. Thus wakeup guard
704 * prevents us from suspending in that case. */
705
706 for (auto& thread : fetchInfo) {
707 thread.wakeupGuard = false;
708 }
709}
710
711void
712Fetch1::wakeupFetch(ThreadID tid)
713{
714 ThreadContext *thread_ctx = cpu.getContext(tid);
715 Fetch1ThreadInfo &thread = fetchInfo[tid];
716 thread.pc = thread_ctx->pcState();
717 thread.state = FetchRunning;
718 thread.wakeupGuard = true;
719 DPRINTF(Fetch, "[tid:%d]: Changing stream wakeup %s\n",
720 tid, thread_ctx->pcState());
721
722 cpu.wakeupOnEvent(Pipeline::Fetch1StageId);
723}
724
725bool
726Fetch1::isDrained()
727{
728 bool drained = numInFlightFetches() == 0 && (*out.inputWire).isBubble();
729 for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
730 Fetch1ThreadInfo &thread = fetchInfo[tid];
731 DPRINTF(Drain, "isDrained[tid:%d]: %s %s%s\n",
732 tid,
733 thread.state == FetchHalted,
734 (numInFlightFetches() == 0 ? "" : "inFlightFetches "),
735 ((*out.inputWire).isBubble() ? "" : "outputtingLine"));
736
1/*
2 * Copyright (c) 2013-2014 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Andrew Bardsley
38 */
39
40#include <cstring>
41#include <iomanip>
42#include <sstream>
43
44#include "base/cast.hh"
45#include "cpu/minor/fetch1.hh"
46#include "cpu/minor/pipeline.hh"
47#include "debug/Drain.hh"
48#include "debug/Fetch.hh"
49#include "debug/MinorTrace.hh"
50
51namespace Minor
52{
53
54Fetch1::Fetch1(const std::string &name_,
55 MinorCPU &cpu_,
56 MinorCPUParams &params,
57 Latch<BranchData>::Output inp_,
58 Latch<ForwardLineData>::Input out_,
59 Latch<BranchData>::Output prediction_,
60 std::vector<InputBuffer<ForwardLineData>> &next_stage_input_buffer) :
61 Named(name_),
62 cpu(cpu_),
63 inp(inp_),
64 out(out_),
65 prediction(prediction_),
66 nextStageReserve(next_stage_input_buffer),
67 icachePort(name_ + ".icache_port", *this, cpu_),
68 lineSnap(params.fetch1LineSnapWidth),
69 maxLineWidth(params.fetch1LineWidth),
70 fetchLimit(params.fetch1FetchLimit),
71 fetchInfo(params.numThreads),
72 threadPriority(0),
73 requests(name_ + ".requests", "lines", params.fetch1FetchLimit),
74 transfers(name_ + ".transfers", "lines", params.fetch1FetchLimit),
75 icacheState(IcacheRunning),
76 lineSeqNum(InstId::firstLineSeqNum),
77 numFetchesInMemorySystem(0),
78 numFetchesInITLB(0)
79{
80 if (lineSnap == 0) {
81 lineSnap = cpu.cacheLineSize();
82 DPRINTF(Fetch, "lineSnap set to cache line size of: %d\n",
83 lineSnap);
84 }
85
86 if (maxLineWidth == 0) {
87 maxLineWidth = cpu.cacheLineSize();
88 DPRINTF(Fetch, "maxLineWidth set to cache line size of: %d\n",
89 maxLineWidth);
90 }
91
92 /* These assertions should be copied to the Python config. as well */
93 if ((lineSnap % sizeof(TheISA::MachInst)) != 0) {
94 fatal("%s: fetch1LineSnapWidth must be a multiple "
95 "of sizeof(TheISA::MachInst) (%d)\n", name_,
96 sizeof(TheISA::MachInst));
97 }
98
99 if (!(maxLineWidth >= lineSnap &&
100 (maxLineWidth % sizeof(TheISA::MachInst)) == 0))
101 {
102 fatal("%s: fetch1LineWidth must be a multiple of"
103 " sizeof(TheISA::MachInst)"
104 " (%d), and >= fetch1LineSnapWidth (%d)\n",
105 name_, sizeof(TheISA::MachInst), lineSnap);
106 }
107
108 if (fetchLimit < 1) {
109 fatal("%s: fetch1FetchLimit must be >= 1 (%d)\n", name_,
110 fetchLimit);
111 }
112}
113
114inline ThreadID
115Fetch1::getScheduledThread()
116{
117 /* Select thread via policy. */
118 std::vector<ThreadID> priority_list;
119
120 switch (cpu.threadPolicy) {
121 case Enums::SingleThreaded:
122 priority_list.push_back(0);
123 break;
124 case Enums::RoundRobin:
125 priority_list = cpu.roundRobinPriority(threadPriority);
126 break;
127 case Enums::Random:
128 priority_list = cpu.randomPriority();
129 break;
130 default:
131 panic("Unknown fetch policy");
132 }
133
134 for (auto tid : priority_list) {
135 if (cpu.getContext(tid)->status() == ThreadContext::Active &&
136 !fetchInfo[tid].blocked &&
137 fetchInfo[tid].state == FetchRunning) {
138 threadPriority = tid;
139 return tid;
140 }
141 }
142
143 return InvalidThreadID;
144}
145
146void
147Fetch1::fetchLine(ThreadID tid)
148{
149 /* Reference the currently used thread state. */
150 Fetch1ThreadInfo &thread = fetchInfo[tid];
151
152 /* If line_offset != 0, a request is pushed for the remainder of the
153 * line. */
154 /* Use a lower, sizeof(MachInst) aligned address for the fetch */
155 Addr aligned_pc = thread.pc.instAddr() & ~((Addr) lineSnap - 1);
156 unsigned int line_offset = aligned_pc % lineSnap;
157 unsigned int request_size = maxLineWidth - line_offset;
158
159 /* Fill in the line's id */
160 InstId request_id(tid,
161 thread.streamSeqNum, thread.predictionSeqNum,
162 lineSeqNum);
163
164 FetchRequestPtr request = new FetchRequest(*this, request_id, thread.pc);
165
166 DPRINTF(Fetch, "Inserting fetch into the fetch queue "
167 "%s addr: 0x%x pc: %s line_offset: %d request_size: %d\n",
168 request_id, aligned_pc, thread.pc, line_offset, request_size);
169
170 request->request.setContext(cpu.threads[tid]->getTC()->contextId());
171 request->request.setVirt(0 /* asid */,
172 aligned_pc, request_size, Request::INST_FETCH, cpu.instMasterId(),
173 /* I've no idea why we need the PC, but give it */
174 thread.pc.instAddr());
175
176 DPRINTF(Fetch, "Submitting ITLB request\n");
177 numFetchesInITLB++;
178
179 request->state = FetchRequest::InTranslation;
180
181 /* Reserve space in the queues upstream of requests for results */
182 transfers.reserve();
183 requests.push(request);
184
185 /* Submit the translation request. The response will come
186 * through finish/markDelayed on this request as it bears
187 * the Translation interface */
188 cpu.threads[request->id.threadId]->itb->translateTiming(
189 &request->request,
190 cpu.getContext(request->id.threadId),
191 request, BaseTLB::Execute);
192
193 lineSeqNum++;
194
195 /* Step the PC for the next line onto the line aligned next address.
196 * Note that as instructions can span lines, this PC is only a
197 * reliable 'new' PC if the next line has a new stream sequence number. */
198#if THE_ISA == ALPHA_ISA
199 /* Restore the low bits of the PC used as address space flags */
200 Addr pc_low_bits = thread.pc.instAddr() &
201 ((Addr) (1 << sizeof(TheISA::MachInst)) - 1);
202
203 thread.pc.set(aligned_pc + request_size + pc_low_bits);
204#else
205 thread.pc.set(aligned_pc + request_size);
206#endif
207}
208
209std::ostream &
210operator <<(std::ostream &os, Fetch1::IcacheState state)
211{
212 switch (state) {
213 case Fetch1::IcacheRunning:
214 os << "IcacheRunning";
215 break;
216 case Fetch1::IcacheNeedsRetry:
217 os << "IcacheNeedsRetry";
218 break;
219 default:
220 os << "IcacheState-" << static_cast<int>(state);
221 break;
222 }
223 return os;
224}
225
226void
227Fetch1::FetchRequest::makePacket()
228{
229 /* Make the necessary packet for a memory transaction */
230 packet = new Packet(&request, MemCmd::ReadReq);
231 packet->allocate();
232
233 /* This FetchRequest becomes SenderState to allow the response to be
234 * identified */
235 packet->pushSenderState(this);
236}
237
238void
239Fetch1::FetchRequest::finish(const Fault &fault_, RequestPtr request_,
240 ThreadContext *tc, BaseTLB::Mode mode)
241{
242 fault = fault_;
243
244 state = Translated;
245 fetch.handleTLBResponse(this);
246
247 /* Let's try and wake up the processor for the next cycle */
248 fetch.cpu.wakeupOnEvent(Pipeline::Fetch1StageId);
249}
250
251void
252Fetch1::handleTLBResponse(FetchRequestPtr response)
253{
254 numFetchesInITLB--;
255
256 if (response->fault != NoFault) {
257 DPRINTF(Fetch, "Fault in address ITLB translation: %s, "
258 "paddr: 0x%x, vaddr: 0x%x\n",
259 response->fault->name(),
260 (response->request.hasPaddr() ? response->request.getPaddr() : 0),
261 response->request.getVaddr());
262
263 if (DTRACE(MinorTrace))
264 minorTraceResponseLine(name(), response);
265 } else {
266 DPRINTF(Fetch, "Got ITLB response\n");
267 }
268
269 response->state = FetchRequest::Translated;
270
271 tryToSendToTransfers(response);
272}
273
274Fetch1::FetchRequest::~FetchRequest()
275{
276 if (packet)
277 delete packet;
278}
279
280void
281Fetch1::tryToSendToTransfers(FetchRequestPtr request)
282{
283 if (!requests.empty() && requests.front() != request) {
284 DPRINTF(Fetch, "Fetch not at front of requests queue, can't"
285 " issue to memory\n");
286 return;
287 }
288
289 if (request->state == FetchRequest::InTranslation) {
290 DPRINTF(Fetch, "Fetch still in translation, not issuing to"
291 " memory\n");
292 return;
293 }
294
295 if (request->isDiscardable() || request->fault != NoFault) {
296 /* Discarded and faulting requests carry on through transfers
297 * as Complete/packet == NULL */
298
299 request->state = FetchRequest::Complete;
300 moveFromRequestsToTransfers(request);
301
302 /* Wake up the pipeline next cycle as there will be no event
303 * for this queue->queue transfer */
304 cpu.wakeupOnEvent(Pipeline::Fetch1StageId);
305 } else if (request->state == FetchRequest::Translated) {
306 if (!request->packet)
307 request->makePacket();
308
309 /* Ensure that the packet won't delete the request */
310 assert(request->packet->needsResponse());
311
312 if (tryToSend(request))
313 moveFromRequestsToTransfers(request);
314 } else {
315 DPRINTF(Fetch, "Not advancing line fetch\n");
316 }
317}
318
319void
320Fetch1::moveFromRequestsToTransfers(FetchRequestPtr request)
321{
322 assert(!requests.empty() && requests.front() == request);
323
324 requests.pop();
325 transfers.push(request);
326}
327
328bool
329Fetch1::tryToSend(FetchRequestPtr request)
330{
331 bool ret = false;
332
333 if (icachePort.sendTimingReq(request->packet)) {
334 /* Invalidate the fetch_requests packet so we don't
335 * accidentally fail to deallocate it (or use it!)
336 * later by overwriting it */
337 request->packet = NULL;
338 request->state = FetchRequest::RequestIssuing;
339 numFetchesInMemorySystem++;
340
341 ret = true;
342
343 DPRINTF(Fetch, "Issued fetch request to memory: %s\n",
344 request->id);
345 } else {
346 /* Needs to be resent, wait for that */
347 icacheState = IcacheNeedsRetry;
348
349 DPRINTF(Fetch, "Line fetch needs to retry: %s\n",
350 request->id);
351 }
352
353 return ret;
354}
355
356void
357Fetch1::stepQueues()
358{
359 IcacheState old_icache_state = icacheState;
360
361 switch (icacheState) {
362 case IcacheRunning:
363 /* Move ITLB results on to the memory system */
364 if (!requests.empty()) {
365 tryToSendToTransfers(requests.front());
366 }
367 break;
368 case IcacheNeedsRetry:
369 break;
370 }
371
372 if (icacheState != old_icache_state) {
373 DPRINTF(Fetch, "Step in state %s moving to state %s\n",
374 old_icache_state, icacheState);
375 }
376}
377
378void
379Fetch1::popAndDiscard(FetchQueue &queue)
380{
381 if (!queue.empty()) {
382 delete queue.front();
383 queue.pop();
384 }
385}
386
387unsigned int
388Fetch1::numInFlightFetches()
389{
390 return requests.occupiedSpace() +
391 transfers.occupiedSpace();
392}
393
394/** Print the appropriate MinorLine line for a fetch response */
395void
396Fetch1::minorTraceResponseLine(const std::string &name,
397 Fetch1::FetchRequestPtr response) const
398{
399 Request &request M5_VAR_USED = response->request;
400
401 if (response->packet && response->packet->isError()) {
402 MINORLINE(this, "id=F;%s vaddr=0x%x fault=\"error packet\"\n",
403 response->id, request.getVaddr());
404 } else if (response->fault != NoFault) {
405 MINORLINE(this, "id=F;%s vaddr=0x%x fault=\"%s\"\n",
406 response->id, request.getVaddr(), response->fault->name());
407 } else {
408 MINORLINE(this, "id=%s size=%d vaddr=0x%x paddr=0x%x\n",
409 response->id, request.getSize(),
410 request.getVaddr(), request.getPaddr());
411 }
412}
413
414bool
415Fetch1::recvTimingResp(PacketPtr response)
416{
417 DPRINTF(Fetch, "recvTimingResp %d\n", numFetchesInMemorySystem);
418
419 /* Only push the response if we didn't change stream? No, all responses
420 * should hit the responses queue. It's the job of 'step' to throw them
421 * away. */
422 FetchRequestPtr fetch_request = safe_cast<FetchRequestPtr>
423 (response->popSenderState());
424
425 /* Fixup packet in fetch_request as this may have changed */
426 assert(!fetch_request->packet);
427 fetch_request->packet = response;
428
429 numFetchesInMemorySystem--;
430 fetch_request->state = FetchRequest::Complete;
431
432 if (DTRACE(MinorTrace))
433 minorTraceResponseLine(name(), fetch_request);
434
435 if (response->isError()) {
436 DPRINTF(Fetch, "Received error response packet: %s\n",
437 fetch_request->id);
438 }
439
440 /* We go to idle even if there are more things to do on the queues as
441 * it's the job of step to actually step us on to the next transaction */
442
443 /* Let's try and wake up the processor for the next cycle to move on
444 * queues */
445 cpu.wakeupOnEvent(Pipeline::Fetch1StageId);
446
447 /* Never busy */
448 return true;
449}
450
451void
452Fetch1::recvReqRetry()
453{
454 DPRINTF(Fetch, "recvRetry\n");
455 assert(icacheState == IcacheNeedsRetry);
456 assert(!requests.empty());
457
458 FetchRequestPtr retryRequest = requests.front();
459
460 icacheState = IcacheRunning;
461
462 if (tryToSend(retryRequest))
463 moveFromRequestsToTransfers(retryRequest);
464}
465
466std::ostream &
467operator <<(std::ostream &os, Fetch1::FetchState state)
468{
469 switch (state) {
470 case Fetch1::FetchHalted:
471 os << "FetchHalted";
472 break;
473 case Fetch1::FetchWaitingForPC:
474 os << "FetchWaitingForPC";
475 break;
476 case Fetch1::FetchRunning:
477 os << "FetchRunning";
478 break;
479 default:
480 os << "FetchState-" << static_cast<int>(state);
481 break;
482 }
483 return os;
484}
485
486void
487Fetch1::changeStream(const BranchData &branch)
488{
489 Fetch1ThreadInfo &thread = fetchInfo[branch.threadId];
490
491 updateExpectedSeqNums(branch);
492
493 /* Start fetching again if we were stopped */
494 switch (branch.reason) {
495 case BranchData::SuspendThread:
496 {
497 if (thread.wakeupGuard) {
498 DPRINTF(Fetch, "Not suspending fetch due to guard: %s\n",
499 branch);
500 } else {
501 DPRINTF(Fetch, "Suspending fetch: %s\n", branch);
502 thread.state = FetchWaitingForPC;
503 }
504 }
505 break;
506 case BranchData::HaltFetch:
507 DPRINTF(Fetch, "Halting fetch\n");
508 thread.state = FetchHalted;
509 break;
510 default:
511 DPRINTF(Fetch, "Changing stream on branch: %s\n", branch);
512 thread.state = FetchRunning;
513 break;
514 }
515 thread.pc = branch.target;
516}
517
518void
519Fetch1::updateExpectedSeqNums(const BranchData &branch)
520{
521 Fetch1ThreadInfo &thread = fetchInfo[branch.threadId];
522
523 DPRINTF(Fetch, "Updating streamSeqNum from: %d to %d,"
524 " predictionSeqNum from: %d to %d\n",
525 thread.streamSeqNum, branch.newStreamSeqNum,
526 thread.predictionSeqNum, branch.newPredictionSeqNum);
527
528 /* Change the stream */
529 thread.streamSeqNum = branch.newStreamSeqNum;
530 /* Update the prediction. Note that it's possible for this to
531 * actually set the prediction to an *older* value if new
532 * predictions have been discarded by execute */
533 thread.predictionSeqNum = branch.newPredictionSeqNum;
534}
535
536void
537Fetch1::processResponse(Fetch1::FetchRequestPtr response,
538 ForwardLineData &line)
539{
540 Fetch1ThreadInfo &thread = fetchInfo[response->id.threadId];
541 PacketPtr packet = response->packet;
542
543 /* Pass the prefetch abort (if any) on to Fetch2 in a ForwardLineData
544 * structure */
545 line.setFault(response->fault);
546 /* Make sequence numbers valid in return */
547 line.id = response->id;
548 /* Set PC to virtual address */
549 line.pc = response->pc;
550 /* Set the lineBase, which is a sizeof(MachInst) aligned address <=
551 * pc.instAddr() */
552 line.lineBaseAddr = response->request.getVaddr();
553
554 if (response->fault != NoFault) {
555 /* Stop fetching if there was a fault */
556 /* Should probably try to flush the queues as well, but we
557 * can't be sure that this fault will actually reach Execute, and we
558 * can't (currently) selectively remove this stream from the queues */
559 DPRINTF(Fetch, "Stopping line fetch because of fault: %s\n",
560 response->fault->name());
561 thread.state = Fetch1::FetchWaitingForPC;
562 } else {
563 line.adoptPacketData(packet);
564 /* Null the response's packet to prevent the response from trying to
565 * deallocate the packet */
566 response->packet = NULL;
567 }
568}
569
570void
571Fetch1::evaluate()
572{
573 const BranchData &execute_branch = *inp.outputWire;
574 const BranchData &fetch2_branch = *prediction.outputWire;
575 ForwardLineData &line_out = *out.inputWire;
576
577 assert(line_out.isBubble());
578
579 for (ThreadID tid = 0; tid < cpu.numThreads; tid++)
580 fetchInfo[tid].blocked = !nextStageReserve[tid].canReserve();
581
582 /** Are both branches from later stages valid and for the same thread? */
583 if (execute_branch.threadId != InvalidThreadID &&
584 execute_branch.threadId == fetch2_branch.threadId) {
585
586 Fetch1ThreadInfo &thread = fetchInfo[execute_branch.threadId];
587
588 /* Are we changing stream? Look to the Execute branches first, then
589 * to predicted changes of stream from Fetch2 */
590 if (execute_branch.isStreamChange()) {
591 if (thread.state == FetchHalted) {
592 DPRINTF(Fetch, "Halted, ignoring branch: %s\n", execute_branch);
593 } else {
594 changeStream(execute_branch);
595 }
596
597 if (!fetch2_branch.isBubble()) {
598 DPRINTF(Fetch, "Ignoring simultaneous prediction: %s\n",
599 fetch2_branch);
600 }
601
602 /* The streamSeqNum tagging in request/response ->req should handle
603 * discarding those requests when we get to them. */
604 } else if (thread.state != FetchHalted && fetch2_branch.isStreamChange()) {
605 /* Handle branch predictions by changing the instruction source
606 * if we're still processing the same stream (as set by streamSeqNum)
607 * as the one of the prediction.
608 */
609 if (fetch2_branch.newStreamSeqNum != thread.streamSeqNum) {
610 DPRINTF(Fetch, "Not changing stream on prediction: %s,"
611 " streamSeqNum mismatch\n",
612 fetch2_branch);
613 } else {
614 changeStream(fetch2_branch);
615 }
616 }
617 } else {
618 /* Fetch2 and Execute branches are for different threads */
619 if (execute_branch.threadId != InvalidThreadID &&
620 execute_branch.isStreamChange()) {
621
622 if (fetchInfo[execute_branch.threadId].state == FetchHalted) {
623 DPRINTF(Fetch, "Halted, ignoring branch: %s\n", execute_branch);
624 } else {
625 changeStream(execute_branch);
626 }
627 }
628
629 if (fetch2_branch.threadId != InvalidThreadID &&
630 fetch2_branch.isStreamChange()) {
631
632 if (fetchInfo[fetch2_branch.threadId].state == FetchHalted) {
633 DPRINTF(Fetch, "Halted, ignoring branch: %s\n", fetch2_branch);
634 } else if (fetch2_branch.newStreamSeqNum != fetchInfo[fetch2_branch.threadId].streamSeqNum) {
635 DPRINTF(Fetch, "Not changing stream on prediction: %s,"
636 " streamSeqNum mismatch\n", fetch2_branch);
637 } else {
638 changeStream(fetch2_branch);
639 }
640 }
641 }
642
643 if (numInFlightFetches() < fetchLimit) {
644 ThreadID fetch_tid = getScheduledThread();
645
646 if (fetch_tid != InvalidThreadID) {
647 DPRINTF(Fetch, "Fetching from thread %d\n", fetch_tid);
648
649 /* Generate fetch to selected thread */
650 fetchLine(fetch_tid);
651 /* Take up a slot in the fetch queue */
652 nextStageReserve[fetch_tid].reserve();
653 } else {
654 DPRINTF(Fetch, "No active threads available to fetch from\n");
655 }
656 }
657
658
659 /* Halting shouldn't prevent fetches in flight from being processed */
660 /* Step fetches through the icachePort queues and memory system */
661 stepQueues();
662
663 /* As we've thrown away early lines, if there is a line, it must
664 * be from the right stream */
665 if (!transfers.empty() &&
666 transfers.front()->isComplete())
667 {
668 Fetch1::FetchRequestPtr response = transfers.front();
669
670 if (response->isDiscardable()) {
671 nextStageReserve[response->id.threadId].freeReservation();
672
673 DPRINTF(Fetch, "Discarding translated fetch as it's for"
674 " an old stream\n");
675
676 /* Wake up next cycle just in case there was some other
677 * action to do */
678 cpu.wakeupOnEvent(Pipeline::Fetch1StageId);
679 } else {
680 DPRINTF(Fetch, "Processing fetched line: %s\n",
681 response->id);
682
683 processResponse(response, line_out);
684 }
685
686 popAndDiscard(transfers);
687 }
688
689 /* If we generated output, and mark the stage as being active
690 * to encourage that output on to the next stage */
691 if (!line_out.isBubble())
692 cpu.activityRecorder->activity();
693
694 /* Fetch1 has no inputBuffer so the only activity we can have is to
695 * generate a line output (tested just above) or to initiate a memory
696 * fetch which will signal activity when it returns/needs stepping
697 * between queues */
698
699
700 /* This looks hackish. And it is, but there doesn't seem to be a better
701 * way to do this. The signal from commit to suspend fetch takes 1
702 * clock cycle to propagate to fetch. However, a legitimate wakeup
703 * may occur between cycles from the memory system. Thus wakeup guard
704 * prevents us from suspending in that case. */
705
706 for (auto& thread : fetchInfo) {
707 thread.wakeupGuard = false;
708 }
709}
710
711void
712Fetch1::wakeupFetch(ThreadID tid)
713{
714 ThreadContext *thread_ctx = cpu.getContext(tid);
715 Fetch1ThreadInfo &thread = fetchInfo[tid];
716 thread.pc = thread_ctx->pcState();
717 thread.state = FetchRunning;
718 thread.wakeupGuard = true;
719 DPRINTF(Fetch, "[tid:%d]: Changing stream wakeup %s\n",
720 tid, thread_ctx->pcState());
721
722 cpu.wakeupOnEvent(Pipeline::Fetch1StageId);
723}
724
725bool
726Fetch1::isDrained()
727{
728 bool drained = numInFlightFetches() == 0 && (*out.inputWire).isBubble();
729 for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
730 Fetch1ThreadInfo &thread = fetchInfo[tid];
731 DPRINTF(Drain, "isDrained[tid:%d]: %s %s%s\n",
732 tid,
733 thread.state == FetchHalted,
734 (numInFlightFetches() == 0 ? "" : "inFlightFetches "),
735 ((*out.inputWire).isBubble() ? "" : "outputtingLine"));
736
737 drained = drained && thread.state == FetchHalted;
737 drained = drained && (thread.state != FetchRunning);
738 }
739
740 return drained;
741}
742
743void
744Fetch1::FetchRequest::reportData(std::ostream &os) const
745{
746 os << id;
747}
748
749bool Fetch1::FetchRequest::isDiscardable() const
750{
751 Fetch1ThreadInfo &thread = fetch.fetchInfo[id.threadId];
752
753 /* Can't discard lines in TLB/memory */
754 return state != InTranslation && state != RequestIssuing &&
755 (id.streamSeqNum != thread.streamSeqNum ||
756 id.predictionSeqNum != thread.predictionSeqNum);
757}
758
759void
760Fetch1::minorTrace() const
761{
762 // TODO: Un-bork minorTrace for THREADS
763 // bork bork bork
764 const Fetch1ThreadInfo &thread = fetchInfo[0];
765
766 std::ostringstream data;
767
768 if (thread.blocked)
769 data << 'B';
770 else
771 (*out.inputWire).reportData(data);
772
773 MINORTRACE("state=%s icacheState=%s in_tlb_mem=%s/%s"
774 " streamSeqNum=%d lines=%s\n", thread.state, icacheState,
775 numFetchesInITLB, numFetchesInMemorySystem,
776 thread.streamSeqNum, data.str());
777 requests.minorTrace();
778 transfers.minorTrace();
779}
780
781}
738 }
739
740 return drained;
741}
742
743void
744Fetch1::FetchRequest::reportData(std::ostream &os) const
745{
746 os << id;
747}
748
749bool Fetch1::FetchRequest::isDiscardable() const
750{
751 Fetch1ThreadInfo &thread = fetch.fetchInfo[id.threadId];
752
753 /* Can't discard lines in TLB/memory */
754 return state != InTranslation && state != RequestIssuing &&
755 (id.streamSeqNum != thread.streamSeqNum ||
756 id.predictionSeqNum != thread.predictionSeqNum);
757}
758
759void
760Fetch1::minorTrace() const
761{
762 // TODO: Un-bork minorTrace for THREADS
763 // bork bork bork
764 const Fetch1ThreadInfo &thread = fetchInfo[0];
765
766 std::ostringstream data;
767
768 if (thread.blocked)
769 data << 'B';
770 else
771 (*out.inputWire).reportData(data);
772
773 MINORTRACE("state=%s icacheState=%s in_tlb_mem=%s/%s"
774 " streamSeqNum=%d lines=%s\n", thread.state, icacheState,
775 numFetchesInITLB, numFetchesInMemorySystem,
776 thread.streamSeqNum, data.str());
777 requests.minorTrace();
778 transfers.minorTrace();
779}
780
781}