fetch1.cc revision 10379:c00f6d7e2681
1/*
2 * Copyright (c) 2013-2014 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Andrew Bardsley
38 */
39
40#include <cstring>
41#include <iomanip>
42#include <sstream>
43
44#include "base/cast.hh"
45#include "cpu/minor/fetch1.hh"
46#include "cpu/minor/pipeline.hh"
47#include "debug/Drain.hh"
48#include "debug/Fetch.hh"
49#include "debug/MinorTrace.hh"
50
51namespace Minor
52{
53
54Fetch1::Fetch1(const std::string &name_,
55    MinorCPU &cpu_,
56    MinorCPUParams &params,
57    Latch<BranchData>::Output inp_,
58    Latch<ForwardLineData>::Input out_,
59    Latch<BranchData>::Output prediction_,
60    Reservable &next_stage_input_buffer) :
61    Named(name_),
62    cpu(cpu_),
63    inp(inp_),
64    out(out_),
65    prediction(prediction_),
66    nextStageReserve(next_stage_input_buffer),
67    icachePort(name_ + ".icache_port", *this, cpu_),
68    lineSnap(params.fetch1LineSnapWidth),
69    maxLineWidth(params.fetch1LineWidth),
70    fetchLimit(params.fetch1FetchLimit),
71    state(FetchWaitingForPC),
72    pc(0),
73    streamSeqNum(InstId::firstStreamSeqNum),
74    predictionSeqNum(InstId::firstPredictionSeqNum),
75    blocked(false),
76    requests(name_ + ".requests", "lines", params.fetch1FetchLimit),
77    transfers(name_ + ".transfers", "lines", params.fetch1FetchLimit),
78    icacheState(IcacheRunning),
79    lineSeqNum(InstId::firstLineSeqNum),
80    numFetchesInMemorySystem(0),
81    numFetchesInITLB(0)
82{
83    if (lineSnap == 0) {
84        lineSnap = cpu.cacheLineSize();
85        DPRINTF(Fetch, "lineSnap set to cache line size of: %d\n",
86            lineSnap);
87    }
88
89    if (maxLineWidth == 0) {
90        maxLineWidth = cpu.cacheLineSize();
91        DPRINTF(Fetch, "maxLineWidth set to cache line size of: %d\n",
92            maxLineWidth);
93    }
94
95    /* These assertions should be copied to the Python config. as well */
96    if ((lineSnap % sizeof(TheISA::MachInst)) != 0) {
97        fatal("%s: fetch1LineSnapWidth must be a multiple "
98            "of sizeof(TheISA::MachInst) (%d)\n", name_,
99            sizeof(TheISA::MachInst));
100    }
101
102    if (!(maxLineWidth >= lineSnap &&
103        (maxLineWidth % sizeof(TheISA::MachInst)) == 0))
104    {
105        fatal("%s: fetch1LineWidth must be a multiple of"
106            " sizeof(TheISA::MachInst)"
107            " (%d), and >= fetch1LineSnapWidth (%d)\n",
108            name_, sizeof(TheISA::MachInst), lineSnap);
109    }
110
111    if (fetchLimit < 1) {
112        fatal("%s: fetch1FetchLimit must be >= 1 (%d)\n", name_,
113            fetchLimit);
114    }
115}
116
117void
118Fetch1::fetchLine()
119{
120    /* If line_offset != 0, a request is pushed for the remainder of the
121     * line. */
122    /* Use a lower, sizeof(MachInst) aligned address for the fetch */
123    Addr aligned_pc = pc.instAddr() & ~((Addr) lineSnap - 1);
124    unsigned int line_offset = aligned_pc % lineSnap;
125    unsigned int request_size = maxLineWidth - line_offset;
126
127    /* Fill in the line's id */
128    InstId request_id(0 /* thread */,
129        streamSeqNum, predictionSeqNum,
130        lineSeqNum);
131
132    FetchRequestPtr request = new FetchRequest(*this, request_id, pc);
133
134    DPRINTF(Fetch, "Inserting fetch into the fetch queue "
135        "%s addr: 0x%x pc: %s line_offset: %d request_size: %d\n",
136        request_id, aligned_pc, pc, line_offset, request_size);
137
138    request->request.setThreadContext(cpu.cpuId(), /* thread id */ 0);
139    request->request.setVirt(0 /* asid */,
140        aligned_pc, request_size, Request::INST_FETCH, cpu.instMasterId(),
141        /* I've no idea why we need the PC, but give it */
142        pc.instAddr());
143
144    DPRINTF(Fetch, "Submitting ITLB request\n");
145    numFetchesInITLB++;
146
147    request->state = FetchRequest::InTranslation;
148
149    /* Reserve space in the queues upstream of requests for results */
150    transfers.reserve();
151    requests.push(request);
152
153    /* Submit the translation request.  The response will come
154     *  through finish/markDelayed on this request as it bears
155     *  the Translation interface */
156    cpu.threads[request->id.threadId]->itb->translateTiming(
157        &request->request,
158        cpu.getContext(request->id.threadId),
159        request, BaseTLB::Execute);
160
161    lineSeqNum++;
162
163    /* Step the PC for the next line onto the line aligned next address.
164     * Note that as instructions can span lines, this PC is only a
165     * reliable 'new' PC if the next line has a new stream sequence number. */
166#if THE_ISA == ALPHA_ISA
167    /* Restore the low bits of the PC used as address space flags */
168    Addr pc_low_bits = pc.instAddr() &
169        ((Addr) (1 << sizeof(TheISA::MachInst)) - 1);
170
171    pc.set(aligned_pc + request_size + pc_low_bits);
172#else
173    pc.set(aligned_pc + request_size);
174#endif
175}
176
177std::ostream &
178operator <<(std::ostream &os, Fetch1::IcacheState state)
179{
180    switch (state) {
181      case Fetch1::IcacheRunning:
182        os << "IcacheRunning";
183        break;
184      case Fetch1::IcacheNeedsRetry:
185        os << "IcacheNeedsRetry";
186        break;
187      default:
188        os << "IcacheState-" << static_cast<int>(state);
189        break;
190    }
191    return os;
192}
193
194void
195Fetch1::FetchRequest::makePacket()
196{
197    /* Make the necessary packet for a memory transaction */
198    packet = new Packet(&request, MemCmd::ReadReq);
199    packet->allocate();
200
201    /* This FetchRequest becomes SenderState to allow the response to be
202     *  identified */
203    packet->pushSenderState(this);
204}
205
206void
207Fetch1::FetchRequest::finish(const Fault &fault_, RequestPtr request_,
208                             ThreadContext *tc, BaseTLB::Mode mode)
209{
210    fault = fault_;
211
212    state = Translated;
213    fetch.handleTLBResponse(this);
214
215    /* Let's try and wake up the processor for the next cycle */
216    fetch.cpu.wakeupOnEvent(Pipeline::Fetch1StageId);
217}
218
219void
220Fetch1::handleTLBResponse(FetchRequestPtr response)
221{
222    numFetchesInITLB--;
223
224    if (response->fault != NoFault) {
225        DPRINTF(Fetch, "Fault in address ITLB translation: %s, "
226            "paddr: 0x%x, vaddr: 0x%x\n",
227            response->fault->name(),
228            (response->request.hasPaddr() ? response->request.getPaddr() : 0),
229            response->request.getVaddr());
230
231        if (DTRACE(MinorTrace))
232            minorTraceResponseLine(name(), response);
233    } else {
234        DPRINTF(Fetch, "Got ITLB response\n");
235    }
236
237    response->state = FetchRequest::Translated;
238
239    tryToSendToTransfers(response);
240}
241
242Fetch1::FetchRequest::~FetchRequest()
243{
244    if (packet)
245        delete packet;
246}
247
248void
249Fetch1::tryToSendToTransfers(FetchRequestPtr request)
250{
251    if (!requests.empty() && requests.front() != request) {
252        DPRINTF(Fetch, "Fetch not at front of requests queue, can't"
253            " issue to memory\n");
254        return;
255    }
256
257    if (request->state == FetchRequest::InTranslation) {
258        DPRINTF(Fetch, "Fetch still in translation, not issuing to"
259            " memory\n");
260        return;
261    }
262
263    if (request->isDiscardable() || request->fault != NoFault) {
264        /* Discarded and faulting requests carry on through transfers
265         *  as Complete/packet == NULL */
266
267        request->state = FetchRequest::Complete;
268        moveFromRequestsToTransfers(request);
269
270        /* Wake up the pipeline next cycle as there will be no event
271         *  for this queue->queue transfer */
272        cpu.wakeupOnEvent(Pipeline::Fetch1StageId);
273    } else if (request->state == FetchRequest::Translated) {
274        if (!request->packet)
275            request->makePacket();
276
277        /* Ensure that the packet won't delete the request */
278        assert(request->packet->needsResponse());
279
280        if (tryToSend(request))
281            moveFromRequestsToTransfers(request);
282    } else {
283        DPRINTF(Fetch, "Not advancing line fetch\n");
284    }
285}
286
287void
288Fetch1::moveFromRequestsToTransfers(FetchRequestPtr request)
289{
290    assert(!requests.empty() && requests.front() == request);
291
292    requests.pop();
293    transfers.push(request);
294}
295
296bool
297Fetch1::tryToSend(FetchRequestPtr request)
298{
299    bool ret = false;
300
301    if (icachePort.sendTimingReq(request->packet)) {
302        /* Invalidate the fetch_requests packet so we don't
303         *  accidentally fail to deallocate it (or use it!)
304         *  later by overwriting it */
305        request->packet = NULL;
306        request->state = FetchRequest::RequestIssuing;
307        numFetchesInMemorySystem++;
308
309        ret = true;
310
311        DPRINTF(Fetch, "Issued fetch request to memory: %s\n",
312            request->id);
313    } else {
314        /* Needs to be resent, wait for that */
315        icacheState = IcacheNeedsRetry;
316
317        DPRINTF(Fetch, "Line fetch needs to retry: %s\n",
318            request->id);
319    }
320
321    return ret;
322}
323
324void
325Fetch1::stepQueues()
326{
327    IcacheState old_icache_state = icacheState;
328
329    switch (icacheState) {
330      case IcacheRunning:
331        /* Move ITLB results on to the memory system */
332        if (!requests.empty()) {
333            tryToSendToTransfers(requests.front());
334        }
335        break;
336      case IcacheNeedsRetry:
337        break;
338    }
339
340    if (icacheState != old_icache_state) {
341        DPRINTF(Fetch, "Step in state %s moving to state %s\n",
342            old_icache_state, icacheState);
343    }
344}
345
346void
347Fetch1::popAndDiscard(FetchQueue &queue)
348{
349    if (!queue.empty()) {
350        delete queue.front();
351        queue.pop();
352    }
353}
354
355unsigned int
356Fetch1::numInFlightFetches()
357{
358    return requests.occupiedSpace() +
359        transfers.occupiedSpace();
360}
361
362/** Print the appropriate MinorLine line for a fetch response */
363void
364Fetch1::minorTraceResponseLine(const std::string &name,
365    Fetch1::FetchRequestPtr response) const
366{
367    Request &request M5_VAR_USED = response->request;
368
369    if (response->packet && response->packet->isError()) {
370        MINORLINE(this, "id=F;%s vaddr=0x%x fault=\"error packet\"\n",
371            response->id, request.getVaddr());
372    } else if (response->fault != NoFault) {
373        MINORLINE(this, "id=F;%s vaddr=0x%x fault=\"%s\"\n",
374            response->id, request.getVaddr(), response->fault->name());
375    } else {
376        MINORLINE(this, "id=%s size=%d vaddr=0x%x paddr=0x%x\n",
377            response->id, request.getSize(),
378            request.getVaddr(), request.getPaddr());
379    }
380}
381
382bool
383Fetch1::recvTimingResp(PacketPtr response)
384{
385    DPRINTF(Fetch, "recvTimingResp %d\n", numFetchesInMemorySystem);
386
387    /* Only push the response if we didn't change stream?  No,  all responses
388     *  should hit the responses queue.  It's the job of 'step' to throw them
389     *  away. */
390    FetchRequestPtr fetch_request = safe_cast<FetchRequestPtr>
391        (response->popSenderState());
392
393    /* Fixup packet in fetch_request as this may have changed */
394    assert(!fetch_request->packet);
395    fetch_request->packet = response;
396
397    numFetchesInMemorySystem--;
398    fetch_request->state = FetchRequest::Complete;
399
400    if (DTRACE(MinorTrace))
401        minorTraceResponseLine(name(), fetch_request);
402
403    if (response->isError()) {
404        DPRINTF(Fetch, "Received error response packet: %s\n",
405            fetch_request->id);
406    }
407
408    /* We go to idle even if there are more things to do on the queues as
409     *  it's the job of step to actually step us on to the next transaction */
410
411    /* Let's try and wake up the processor for the next cycle to move on
412     *  queues */
413    cpu.wakeupOnEvent(Pipeline::Fetch1StageId);
414
415    /* Never busy */
416    return true;
417}
418
419void
420Fetch1::recvRetry()
421{
422    DPRINTF(Fetch, "recvRetry\n");
423    assert(icacheState == IcacheNeedsRetry);
424    assert(!requests.empty());
425
426    FetchRequestPtr retryRequest = requests.front();
427
428    icacheState = IcacheRunning;
429
430    if (tryToSend(retryRequest))
431        moveFromRequestsToTransfers(retryRequest);
432}
433
434std::ostream &
435operator <<(std::ostream &os, Fetch1::FetchState state)
436{
437    switch (state) {
438      case Fetch1::FetchHalted:
439        os << "FetchHalted";
440        break;
441      case Fetch1::FetchWaitingForPC:
442        os << "FetchWaitingForPC";
443        break;
444      case Fetch1::FetchRunning:
445        os << "FetchRunning";
446        break;
447      default:
448        os << "FetchState-" << static_cast<int>(state);
449        break;
450    }
451    return os;
452}
453
454void
455Fetch1::changeStream(const BranchData &branch)
456{
457    updateExpectedSeqNums(branch);
458
459    /* Start fetching again if we were stopped */
460    switch (branch.reason) {
461      case BranchData::SuspendThread:
462        DPRINTF(Fetch, "Suspending fetch: %s\n", branch);
463        state = FetchWaitingForPC;
464        break;
465      case BranchData::HaltFetch:
466        DPRINTF(Fetch, "Halting fetch\n");
467        state = FetchHalted;
468        break;
469      default:
470        DPRINTF(Fetch, "Changing stream on branch: %s\n", branch);
471        state = FetchRunning;
472        break;
473    }
474    pc = branch.target;
475}
476
477void
478Fetch1::updateExpectedSeqNums(const BranchData &branch)
479{
480    DPRINTF(Fetch, "Updating streamSeqNum from: %d to %d,"
481        " predictionSeqNum from: %d to %d\n",
482        streamSeqNum, branch.newStreamSeqNum,
483        predictionSeqNum, branch.newPredictionSeqNum);
484
485    /* Change the stream */
486    streamSeqNum = branch.newStreamSeqNum;
487    /* Update the prediction.  Note that it's possible for this to
488     *  actually set the prediction to an *older* value if new
489     *  predictions have been discarded by execute */
490    predictionSeqNum = branch.newPredictionSeqNum;
491}
492
493void
494Fetch1::processResponse(Fetch1::FetchRequestPtr response,
495    ForwardLineData &line)
496{
497    PacketPtr packet = response->packet;
498
499    /* Pass the prefetch abort (if any) on to Fetch2 in a ForwardLineData
500     * structure */
501    line.setFault(response->fault);
502    /* Make sequence numbers valid in return */
503    line.id = response->id;
504    /* Set PC to virtual address */
505    line.pc = response->pc;
506    /* Set the lineBase, which is a sizeof(MachInst) aligned address <=
507     *  pc.instAddr() */
508    line.lineBaseAddr = response->request.getVaddr();
509
510    if (response->fault != NoFault) {
511        /* Stop fetching if there was a fault */
512        /* Should probably try to flush the queues as well, but we
513         * can't be sure that this fault will actually reach Execute, and we
514         * can't (currently) selectively remove this stream from the queues */
515        DPRINTF(Fetch, "Stopping line fetch because of fault: %s\n",
516            response->fault->name());
517        state = Fetch1::FetchWaitingForPC;
518    } else {
519        line.adoptPacketData(packet);
520        /* Null the response's packet to prevent the response from trying to
521         *  deallocate the packet */
522        response->packet = NULL;
523    }
524}
525
526void
527Fetch1::evaluate()
528{
529    const BranchData &execute_branch = *inp.outputWire;
530    const BranchData &fetch2_branch = *prediction.outputWire;
531    ForwardLineData &line_out = *out.inputWire;
532
533    assert(line_out.isBubble());
534
535    blocked = !nextStageReserve.canReserve();
536
537    /* Are we changing stream?  Look to the Execute branches first, then
538     * to predicted changes of stream from Fetch2 */
539    /* @todo, find better way to express ignoring branch predictions */
540    if (execute_branch.isStreamChange() &&
541        execute_branch.reason != BranchData::BranchPrediction)
542    {
543        if (state == FetchHalted) {
544            if (execute_branch.reason == BranchData::WakeupFetch) {
545                DPRINTF(Fetch, "Waking up fetch: %s\n", execute_branch);
546                changeStream(execute_branch);
547            } else {
548                DPRINTF(Fetch, "Halted, ignoring branch: %s\n",
549                    execute_branch);
550            }
551        } else {
552            changeStream(execute_branch);
553        }
554
555        if (!fetch2_branch.isBubble()) {
556            DPRINTF(Fetch, "Ignoring simultaneous prediction: %s\n",
557                fetch2_branch);
558        }
559
560        /* The streamSeqNum tagging in request/response ->req should handle
561         *  discarding those requests when we get to them. */
562    } else if (state != FetchHalted && fetch2_branch.isStreamChange()) {
563        /* Handle branch predictions by changing the instruction source
564         * if we're still processing the same stream (as set by streamSeqNum)
565         * as the one of the prediction.
566         */
567        if (fetch2_branch.newStreamSeqNum != streamSeqNum) {
568            DPRINTF(Fetch, "Not changing stream on prediction: %s,"
569                " streamSeqNum mismatch\n",
570                fetch2_branch);
571        } else {
572            changeStream(fetch2_branch);
573        }
574    }
575
576    /* Can we fetch? */
577    /* The bare minimum requirements for initiating a fetch */
578    /* THREAD need to handle multiple threads */
579    if (state == FetchRunning && /* We are actually fetching */
580        !blocked && /* Space in the Fetch2 inputBuffer */
581        /* The thread we're going to fetch for (thread 0), is active */
582        cpu.getContext(0)->status() == ThreadContext::Active &&
583        numInFlightFetches() < fetchLimit)
584    {
585        fetchLine();
586        /* Take up a slot in the fetch queue */
587        nextStageReserve.reserve();
588    }
589
590    /* Halting shouldn't prevent fetches in flight from being processed */
591    /* Step fetches through the icachePort queues and memory system */
592    stepQueues();
593
594    /* As we've thrown away early lines, if there is a line, it must
595     *  be from the right stream */
596    if (!transfers.empty() &&
597        transfers.front()->isComplete())
598    {
599        Fetch1::FetchRequestPtr response = transfers.front();
600
601        if (response->isDiscardable()) {
602            nextStageReserve.freeReservation();
603
604            DPRINTF(Fetch, "Discarding translated fetch at it's for"
605                " an old stream\n");
606
607            /* Wake up next cycle just in case there was some other
608             *  action to do */
609            cpu.wakeupOnEvent(Pipeline::Fetch1StageId);
610        } else {
611            DPRINTF(Fetch, "Processing fetched line: %s\n",
612                response->id);
613
614            processResponse(response, line_out);
615        }
616
617        popAndDiscard(transfers);
618    }
619
620    /* If we generated output, and mark the stage as being active
621     *  to encourage that output on to the next stage */
622    if (!line_out.isBubble())
623        cpu.activityRecorder->activity();
624
625    /* Fetch1 has no inputBuffer so the only activity we can have is to
626     *  generate a line output (tested just above) or to initiate a memory
627     *  fetch which will signal activity when it returns/needs stepping
628     *  between queues */
629}
630
631bool
632Fetch1::isDrained()
633{
634    DPRINTF(Drain, "isDrained %s %s%s%s\n",
635        state == FetchHalted,
636        (numInFlightFetches() == 0 ? "" : "inFlightFetches "),
637        ((*out.inputWire).isBubble() ? "" : "outputtingLine"));
638
639    return state == FetchHalted &&
640        numInFlightFetches() == 0 &&
641        (*out.inputWire).isBubble();
642}
643
644void
645Fetch1::FetchRequest::reportData(std::ostream &os) const
646{
647    os << id;
648}
649
650bool Fetch1::FetchRequest::isDiscardable() const
651{
652    /* Can't discard lines in TLB/memory */
653    return state != InTranslation && state != RequestIssuing &&
654        (id.streamSeqNum != fetch.streamSeqNum ||
655        id.predictionSeqNum != fetch.predictionSeqNum);
656}
657
658void
659Fetch1::minorTrace() const
660{
661    std::ostringstream data;
662
663    if (blocked)
664        data << 'B';
665    else
666        (*out.inputWire).reportData(data);
667
668    MINORTRACE("state=%s icacheState=%s in_tlb_mem=%s/%s"
669        " streamSeqNum=%d lines=%s\n", state, icacheState,
670        numFetchesInITLB, numFetchesInMemorySystem,
671        streamSeqNum, data.str());
672    requests.minorTrace();
673    transfers.minorTrace();
674}
675
676}
677