1/*
2 * Copyright (c) 2013-2014 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Andrew Bardsley
38 */
39
40/**
41 * @file
42 *
43 *  Fetch2 receives lines of data from Fetch1, separates them into
44 *  instructions and passes them to Decode
45 */
46
47#ifndef __CPU_MINOR_FETCH2_HH__
48#define __CPU_MINOR_FETCH2_HH__
49
50#include "cpu/minor/buffers.hh"
51#include "cpu/minor/cpu.hh"
52#include "cpu/minor/pipe_data.hh"
53#include "cpu/pred/bpred_unit.hh"
54#include "params/MinorCPU.hh"
55
56namespace Minor
57{
58
59/** This stage receives lines of data from Fetch1, separates them into
60 *  instructions and passes them to Decode */
61class Fetch2 : public Named
62{
63  protected:
64    /** Pointer back to the containing CPU */
65    MinorCPU &cpu;
66
67    /** Input port carrying lines from Fetch1 */
68    Latch<ForwardLineData>::Output inp;
69
70    /** Input port carrying branches from Execute.  This is a snoop of the
71     *  data provided to F1. */
72    Latch<BranchData>::Output branchInp;
73
74    /** Output port carrying predictions back to Fetch1 */
75    Latch<BranchData>::Input predictionOut;
76
77    /** Output port carrying instructions into Decode */
78    Latch<ForwardInstData>::Input out;
79
80    /** Interface to reserve space in the next stage */
81    std::vector<InputBuffer<ForwardInstData>> &nextStageReserve;
82
83    /** Width of output of this stage/input of next in instructions */
84    unsigned int outputWidth;
85
86    /** If true, more than one input word can be processed each cycle if
87     *  there is room in the output to contain its processed data */
88    bool processMoreThanOneInput;
89
90    /** Branch predictor passed from Python configuration */
91    BPredUnit &branchPredictor;
92
93  public:
94    /* Public so that Pipeline can pass it to Fetch1 */
95    std::vector<InputBuffer<ForwardLineData>> inputBuffer;
96
97  protected:
98    /** Data members after this line are cycle-to-cycle state */
99
100    struct Fetch2ThreadInfo {
101
102        /** Default constructor */
103        Fetch2ThreadInfo() :
104            inputIndex(0),
105            pc(TheISA::PCState(0)),
106            havePC(false),
107            lastStreamSeqNum(InstId::firstStreamSeqNum),
108            fetchSeqNum(InstId::firstFetchSeqNum),
109            expectedStreamSeqNum(InstId::firstStreamSeqNum),
110            predictionSeqNum(InstId::firstPredictionSeqNum),
111            blocked(false)
112        { }
113
114        Fetch2ThreadInfo(const Fetch2ThreadInfo& other) :
115            inputIndex(other.inputIndex),
116            pc(other.pc),
117            havePC(other.havePC),
118            lastStreamSeqNum(other.lastStreamSeqNum),
119            expectedStreamSeqNum(other.expectedStreamSeqNum),
120            predictionSeqNum(other.predictionSeqNum),
121            blocked(other.blocked)
122        { }
123
124        /** Index into an incompletely processed input line that instructions
125         *  are to be extracted from */
126        unsigned int inputIndex;
127
128
129        /** Remembered program counter value.  Between contiguous lines, this
130         *  is just updated with advancePC.  For lines following changes of
131         *  stream, a new PC must be loaded and havePC be set.
132         *  havePC is needed to accomodate instructions which span across
133         *  lines meaning that Fetch2 and the decoder need to remember a PC
134         *  value and a partially-offered instruction from the previous line */
135        TheISA::PCState pc;
136
137        /** PC is currently valid.  Initially false, gets set to true when a
138         *  change-of-stream line is received and false again when lines are
139         *  discarded for any reason */
140        bool havePC;
141
142        /** Stream sequence number of the last seen line used to identify
143         *  changes of instruction stream */
144        InstSeqNum lastStreamSeqNum;
145
146        /** Fetch2 is the source of fetch sequence numbers.  These represent the
147         *  sequence that instructions were extracted from fetched lines. */
148        InstSeqNum fetchSeqNum;
149
150        /** Stream sequence number remembered from last time the
151         *  predictionSeqNum changed.  Lines should only be discarded when their
152         *  predictionSeqNums disagree with Fetch2::predictionSeqNum *and* they
153         *  are from the same stream that bore that prediction number */
154        InstSeqNum expectedStreamSeqNum;
155
156        /** Fetch2 is the source of prediction sequence numbers.  These
157         *  represent predicted changes of control flow sources from branch
158         *  prediction in Fetch2. */
159        InstSeqNum predictionSeqNum;
160
161        /** Blocked indication for report */
162        bool blocked;
163    };
164
165    std::vector<Fetch2ThreadInfo> fetchInfo;
166    ThreadID threadPriority;
167
168    /** Stats */
169    Stats::Scalar intInstructions;
170    Stats::Scalar fpInstructions;
171    Stats::Scalar vecInstructions;
172    Stats::Scalar loadInstructions;
173    Stats::Scalar storeInstructions;
174    Stats::Scalar amoInstructions;
175
176  protected:
177    /** Get a piece of data to work on from the inputBuffer, or 0 if there
178     *  is no data. */
179    const ForwardLineData *getInput(ThreadID tid);
180
181    /** Pop an element off the input buffer, if there are any */
182    void popInput(ThreadID tid);
183
184    /** Dump the whole contents of the input buffer.  Useful after a
185     *  prediction changes control flow */
186    void dumpAllInput(ThreadID tid);
187
188    /** Update local branch prediction structures from feedback from
189     *  Execute. */
190    void updateBranchPrediction(const BranchData &branch);
191
192    /** Predicts branches for the given instruction.  Updates the
193     *  instruction's predicted... fields and also the branch which
194     *  carries the prediction to Fetch1 */
195    void predictBranch(MinorDynInstPtr inst, BranchData &branch);
196
197    /** Use the current threading policy to determine the next thread to
198     *  fetch from. */
199    ThreadID getScheduledThread();
200
201  public:
202    Fetch2(const std::string &name,
203        MinorCPU &cpu_,
204        MinorCPUParams &params,
205        Latch<ForwardLineData>::Output inp_,
206        Latch<BranchData>::Output branchInp_,
207        Latch<BranchData>::Input predictionOut_,
208        Latch<ForwardInstData>::Input out_,
209        std::vector<InputBuffer<ForwardInstData>> &next_stage_input_buffer);
210
211  public:
212    /** Pass on input/buffer data to the output if you can */
213    void evaluate();
214
215    void minorTrace() const;
216
217    void regStats();
218
219    /** Is this stage drained?  For Fetch2, draining is initiated by
220     *  Execute halting Fetch1 causing Fetch2 to naturally drain.
221     *  Branch predictions are ignored by Fetch1 during halt */
222    bool isDrained();
223};
224
225}
226
227#endif /* __CPU_MINOR_FETCH2_HH__ */
228