fetch_impl.hh (2696:30b38e36ff54) fetch_impl.hh (2698:d5f35d41e017)
1/*
2 * Copyright (c) 2004-2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Kevin Lim
29 */
30
31#include "arch/isa_traits.hh"
32#include "arch/utility.hh"
33#include "cpu/checker/cpu.hh"
34#include "cpu/exetrace.hh"
35#include "cpu/o3/fetch.hh"
36#include "mem/packet.hh"
37#include "mem/request.hh"
38#include "sim/byteswap.hh"
39#include "sim/host.hh"
40#include "sim/root.hh"
41
42#if FULL_SYSTEM
43#include "arch/tlb.hh"
44#include "arch/vtophys.hh"
45#include "base/remote_gdb.hh"
46#include "sim/system.hh"
47#endif // FULL_SYSTEM
48
49#include <algorithm>
50
51using namespace std;
52using namespace TheISA;
53
54template<class Impl>
55Tick
56DefaultFetch<Impl>::IcachePort::recvAtomic(PacketPtr pkt)
57{
58 panic("DefaultFetch doesn't expect recvAtomic callback!");
59 return curTick;
60}
61
62template<class Impl>
63void
64DefaultFetch<Impl>::IcachePort::recvFunctional(PacketPtr pkt)
65{
66 panic("DefaultFetch doesn't expect recvFunctional callback!");
67}
68
69template<class Impl>
70void
71DefaultFetch<Impl>::IcachePort::recvStatusChange(Status status)
72{
73 if (status == RangeChange)
74 return;
75
76 panic("DefaultFetch doesn't expect recvStatusChange callback!");
77}
78
79template<class Impl>
80bool
81DefaultFetch<Impl>::IcachePort::recvTiming(Packet *pkt)
82{
83 fetch->processCacheCompletion(pkt);
84 return true;
85}
86
87template<class Impl>
88void
89DefaultFetch<Impl>::IcachePort::recvRetry()
90{
91 fetch->recvRetry();
92}
93
94template<class Impl>
95DefaultFetch<Impl>::DefaultFetch(Params *params)
96 : mem(params->mem),
97 branchPred(params),
98 decodeToFetchDelay(params->decodeToFetchDelay),
99 renameToFetchDelay(params->renameToFetchDelay),
100 iewToFetchDelay(params->iewToFetchDelay),
101 commitToFetchDelay(params->commitToFetchDelay),
102 fetchWidth(params->fetchWidth),
103 cacheBlocked(false),
104 retryPkt(NULL),
105 retryTid(-1),
106 numThreads(params->numberOfThreads),
107 numFetchingThreads(params->smtNumFetchingThreads),
108 interruptPending(false),
109 switchedOut(false)
110{
111 if (numThreads > Impl::MaxThreads)
112 fatal("numThreads is not a valid value\n");
113
114 DPRINTF(Fetch, "Fetch constructor called\n");
115
116 // Set fetch stage's status to inactive.
117 _status = Inactive;
118
119 string policy = params->smtFetchPolicy;
120
121 // Convert string to lowercase
122 std::transform(policy.begin(), policy.end(), policy.begin(),
123 (int(*)(int)) tolower);
124
125 // Figure out fetch policy
126 if (policy == "singlethread") {
127 fetchPolicy = SingleThread;
128 } else if (policy == "roundrobin") {
129 fetchPolicy = RoundRobin;
130 DPRINTF(Fetch, "Fetch policy set to Round Robin\n");
131 } else if (policy == "branch") {
132 fetchPolicy = Branch;
133 DPRINTF(Fetch, "Fetch policy set to Branch Count\n");
134 } else if (policy == "iqcount") {
135 fetchPolicy = IQ;
136 DPRINTF(Fetch, "Fetch policy set to IQ count\n");
137 } else if (policy == "lsqcount") {
138 fetchPolicy = LSQ;
139 DPRINTF(Fetch, "Fetch policy set to LSQ count\n");
140 } else {
141 fatal("Invalid Fetch Policy. Options Are: {SingleThread,"
142 " RoundRobin,LSQcount,IQcount}\n");
143 }
144
145 // Size of cache block.
146 cacheBlkSize = 64;
147
148 // Create mask to get rid of offset bits.
149 cacheBlkMask = (cacheBlkSize - 1);
150
151 for (int tid=0; tid < numThreads; tid++) {
152
153 fetchStatus[tid] = Running;
154
155 priorityList.push_back(tid);
156
157 memReq[tid] = NULL;
158
159 // Create space to store a cache line.
160 cacheData[tid] = new uint8_t[cacheBlkSize];
161
162 stalls[tid].decode = 0;
163 stalls[tid].rename = 0;
164 stalls[tid].iew = 0;
165 stalls[tid].commit = 0;
166 }
167
168 // Get the size of an instruction.
169 instSize = sizeof(MachInst);
170}
171
172template <class Impl>
173std::string
174DefaultFetch<Impl>::name() const
175{
176 return cpu->name() + ".fetch";
177}
178
179template <class Impl>
180void
181DefaultFetch<Impl>::regStats()
182{
183 icacheStallCycles
184 .name(name() + ".icacheStallCycles")
185 .desc("Number of cycles fetch is stalled on an Icache miss")
186 .prereq(icacheStallCycles);
187
188 fetchedInsts
189 .name(name() + ".Insts")
190 .desc("Number of instructions fetch has processed")
191 .prereq(fetchedInsts);
192
193 fetchedBranches
194 .name(name() + ".Branches")
195 .desc("Number of branches that fetch encountered")
196 .prereq(fetchedBranches);
197
198 predictedBranches
199 .name(name() + ".predictedBranches")
200 .desc("Number of branches that fetch has predicted taken")
201 .prereq(predictedBranches);
202
203 fetchCycles
204 .name(name() + ".Cycles")
205 .desc("Number of cycles fetch has run and was not squashing or"
206 " blocked")
207 .prereq(fetchCycles);
208
209 fetchSquashCycles
210 .name(name() + ".SquashCycles")
211 .desc("Number of cycles fetch has spent squashing")
212 .prereq(fetchSquashCycles);
213
214 fetchIdleCycles
215 .name(name() + ".IdleCycles")
216 .desc("Number of cycles fetch was idle")
217 .prereq(fetchIdleCycles);
218
219 fetchBlockedCycles
220 .name(name() + ".BlockedCycles")
221 .desc("Number of cycles fetch has spent blocked")
222 .prereq(fetchBlockedCycles);
223
224 fetchedCacheLines
225 .name(name() + ".CacheLines")
226 .desc("Number of cache lines fetched")
227 .prereq(fetchedCacheLines);
228
229 fetchMiscStallCycles
230 .name(name() + ".MiscStallCycles")
231 .desc("Number of cycles fetch has spent waiting on interrupts, or "
232 "bad addresses, or out of MSHRs")
233 .prereq(fetchMiscStallCycles);
234
235 fetchIcacheSquashes
236 .name(name() + ".IcacheSquashes")
237 .desc("Number of outstanding Icache misses that were squashed")
238 .prereq(fetchIcacheSquashes);
239
240 fetchNisnDist
241 .init(/* base value */ 0,
242 /* last value */ fetchWidth,
243 /* bucket size */ 1)
244 .name(name() + ".rateDist")
245 .desc("Number of instructions fetched each cycle (Total)")
246 .flags(Stats::pdf);
247
248 idleRate
249 .name(name() + ".idleRate")
250 .desc("Percent of cycles fetch was idle")
251 .prereq(idleRate);
252 idleRate = fetchIdleCycles * 100 / cpu->numCycles;
253
254 branchRate
255 .name(name() + ".branchRate")
256 .desc("Number of branch fetches per cycle")
257 .flags(Stats::total);
258 branchRate = fetchedBranches / cpu->numCycles;
259
260 fetchRate
261 .name(name() + ".rate")
262 .desc("Number of inst fetches per cycle")
263 .flags(Stats::total);
264 fetchRate = fetchedInsts / cpu->numCycles;
265
266 branchPred.regStats();
267}
268
269template<class Impl>
270void
271DefaultFetch<Impl>::setCPU(FullCPU *cpu_ptr)
272{
273 DPRINTF(Fetch, "Setting the CPU pointer.\n");
274 cpu = cpu_ptr;
275
276 // Name is finally available, so create the port.
277 icachePort = new IcachePort(this);
278
279 Port *mem_dport = mem->getPort("");
280 icachePort->setPeer(mem_dport);
281 mem_dport->setPeer(icachePort);
282
283 if (cpu->checker) {
284 cpu->checker->setIcachePort(icachePort);
285 }
286
287 // Fetch needs to start fetching instructions at the very beginning,
288 // so it must start up in active state.
289 switchToActive();
290}
291
292template<class Impl>
293void
294DefaultFetch<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *time_buffer)
295{
296 DPRINTF(Fetch, "Setting the time buffer pointer.\n");
297 timeBuffer = time_buffer;
298
299 // Create wires to get information from proper places in time buffer.
300 fromDecode = timeBuffer->getWire(-decodeToFetchDelay);
301 fromRename = timeBuffer->getWire(-renameToFetchDelay);
302 fromIEW = timeBuffer->getWire(-iewToFetchDelay);
303 fromCommit = timeBuffer->getWire(-commitToFetchDelay);
304}
305
306template<class Impl>
307void
308DefaultFetch<Impl>::setActiveThreads(list<unsigned> *at_ptr)
309{
310 DPRINTF(Fetch, "Setting active threads list pointer.\n");
311 activeThreads = at_ptr;
312}
313
314template<class Impl>
315void
316DefaultFetch<Impl>::setFetchQueue(TimeBuffer<FetchStruct> *fq_ptr)
317{
318 DPRINTF(Fetch, "Setting the fetch queue pointer.\n");
319 fetchQueue = fq_ptr;
320
321 // Create wire to write information to proper place in fetch queue.
322 toDecode = fetchQueue->getWire(0);
323}
324
1/*
2 * Copyright (c) 2004-2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Kevin Lim
29 */
30
31#include "arch/isa_traits.hh"
32#include "arch/utility.hh"
33#include "cpu/checker/cpu.hh"
34#include "cpu/exetrace.hh"
35#include "cpu/o3/fetch.hh"
36#include "mem/packet.hh"
37#include "mem/request.hh"
38#include "sim/byteswap.hh"
39#include "sim/host.hh"
40#include "sim/root.hh"
41
42#if FULL_SYSTEM
43#include "arch/tlb.hh"
44#include "arch/vtophys.hh"
45#include "base/remote_gdb.hh"
46#include "sim/system.hh"
47#endif // FULL_SYSTEM
48
49#include <algorithm>
50
51using namespace std;
52using namespace TheISA;
53
54template<class Impl>
55Tick
56DefaultFetch<Impl>::IcachePort::recvAtomic(PacketPtr pkt)
57{
58 panic("DefaultFetch doesn't expect recvAtomic callback!");
59 return curTick;
60}
61
62template<class Impl>
63void
64DefaultFetch<Impl>::IcachePort::recvFunctional(PacketPtr pkt)
65{
66 panic("DefaultFetch doesn't expect recvFunctional callback!");
67}
68
69template<class Impl>
70void
71DefaultFetch<Impl>::IcachePort::recvStatusChange(Status status)
72{
73 if (status == RangeChange)
74 return;
75
76 panic("DefaultFetch doesn't expect recvStatusChange callback!");
77}
78
79template<class Impl>
80bool
81DefaultFetch<Impl>::IcachePort::recvTiming(Packet *pkt)
82{
83 fetch->processCacheCompletion(pkt);
84 return true;
85}
86
87template<class Impl>
88void
89DefaultFetch<Impl>::IcachePort::recvRetry()
90{
91 fetch->recvRetry();
92}
93
94template<class Impl>
95DefaultFetch<Impl>::DefaultFetch(Params *params)
96 : mem(params->mem),
97 branchPred(params),
98 decodeToFetchDelay(params->decodeToFetchDelay),
99 renameToFetchDelay(params->renameToFetchDelay),
100 iewToFetchDelay(params->iewToFetchDelay),
101 commitToFetchDelay(params->commitToFetchDelay),
102 fetchWidth(params->fetchWidth),
103 cacheBlocked(false),
104 retryPkt(NULL),
105 retryTid(-1),
106 numThreads(params->numberOfThreads),
107 numFetchingThreads(params->smtNumFetchingThreads),
108 interruptPending(false),
109 switchedOut(false)
110{
111 if (numThreads > Impl::MaxThreads)
112 fatal("numThreads is not a valid value\n");
113
114 DPRINTF(Fetch, "Fetch constructor called\n");
115
116 // Set fetch stage's status to inactive.
117 _status = Inactive;
118
119 string policy = params->smtFetchPolicy;
120
121 // Convert string to lowercase
122 std::transform(policy.begin(), policy.end(), policy.begin(),
123 (int(*)(int)) tolower);
124
125 // Figure out fetch policy
126 if (policy == "singlethread") {
127 fetchPolicy = SingleThread;
128 } else if (policy == "roundrobin") {
129 fetchPolicy = RoundRobin;
130 DPRINTF(Fetch, "Fetch policy set to Round Robin\n");
131 } else if (policy == "branch") {
132 fetchPolicy = Branch;
133 DPRINTF(Fetch, "Fetch policy set to Branch Count\n");
134 } else if (policy == "iqcount") {
135 fetchPolicy = IQ;
136 DPRINTF(Fetch, "Fetch policy set to IQ count\n");
137 } else if (policy == "lsqcount") {
138 fetchPolicy = LSQ;
139 DPRINTF(Fetch, "Fetch policy set to LSQ count\n");
140 } else {
141 fatal("Invalid Fetch Policy. Options Are: {SingleThread,"
142 " RoundRobin,LSQcount,IQcount}\n");
143 }
144
145 // Size of cache block.
146 cacheBlkSize = 64;
147
148 // Create mask to get rid of offset bits.
149 cacheBlkMask = (cacheBlkSize - 1);
150
151 for (int tid=0; tid < numThreads; tid++) {
152
153 fetchStatus[tid] = Running;
154
155 priorityList.push_back(tid);
156
157 memReq[tid] = NULL;
158
159 // Create space to store a cache line.
160 cacheData[tid] = new uint8_t[cacheBlkSize];
161
162 stalls[tid].decode = 0;
163 stalls[tid].rename = 0;
164 stalls[tid].iew = 0;
165 stalls[tid].commit = 0;
166 }
167
168 // Get the size of an instruction.
169 instSize = sizeof(MachInst);
170}
171
172template <class Impl>
173std::string
174DefaultFetch<Impl>::name() const
175{
176 return cpu->name() + ".fetch";
177}
178
179template <class Impl>
180void
181DefaultFetch<Impl>::regStats()
182{
183 icacheStallCycles
184 .name(name() + ".icacheStallCycles")
185 .desc("Number of cycles fetch is stalled on an Icache miss")
186 .prereq(icacheStallCycles);
187
188 fetchedInsts
189 .name(name() + ".Insts")
190 .desc("Number of instructions fetch has processed")
191 .prereq(fetchedInsts);
192
193 fetchedBranches
194 .name(name() + ".Branches")
195 .desc("Number of branches that fetch encountered")
196 .prereq(fetchedBranches);
197
198 predictedBranches
199 .name(name() + ".predictedBranches")
200 .desc("Number of branches that fetch has predicted taken")
201 .prereq(predictedBranches);
202
203 fetchCycles
204 .name(name() + ".Cycles")
205 .desc("Number of cycles fetch has run and was not squashing or"
206 " blocked")
207 .prereq(fetchCycles);
208
209 fetchSquashCycles
210 .name(name() + ".SquashCycles")
211 .desc("Number of cycles fetch has spent squashing")
212 .prereq(fetchSquashCycles);
213
214 fetchIdleCycles
215 .name(name() + ".IdleCycles")
216 .desc("Number of cycles fetch was idle")
217 .prereq(fetchIdleCycles);
218
219 fetchBlockedCycles
220 .name(name() + ".BlockedCycles")
221 .desc("Number of cycles fetch has spent blocked")
222 .prereq(fetchBlockedCycles);
223
224 fetchedCacheLines
225 .name(name() + ".CacheLines")
226 .desc("Number of cache lines fetched")
227 .prereq(fetchedCacheLines);
228
229 fetchMiscStallCycles
230 .name(name() + ".MiscStallCycles")
231 .desc("Number of cycles fetch has spent waiting on interrupts, or "
232 "bad addresses, or out of MSHRs")
233 .prereq(fetchMiscStallCycles);
234
235 fetchIcacheSquashes
236 .name(name() + ".IcacheSquashes")
237 .desc("Number of outstanding Icache misses that were squashed")
238 .prereq(fetchIcacheSquashes);
239
240 fetchNisnDist
241 .init(/* base value */ 0,
242 /* last value */ fetchWidth,
243 /* bucket size */ 1)
244 .name(name() + ".rateDist")
245 .desc("Number of instructions fetched each cycle (Total)")
246 .flags(Stats::pdf);
247
248 idleRate
249 .name(name() + ".idleRate")
250 .desc("Percent of cycles fetch was idle")
251 .prereq(idleRate);
252 idleRate = fetchIdleCycles * 100 / cpu->numCycles;
253
254 branchRate
255 .name(name() + ".branchRate")
256 .desc("Number of branch fetches per cycle")
257 .flags(Stats::total);
258 branchRate = fetchedBranches / cpu->numCycles;
259
260 fetchRate
261 .name(name() + ".rate")
262 .desc("Number of inst fetches per cycle")
263 .flags(Stats::total);
264 fetchRate = fetchedInsts / cpu->numCycles;
265
266 branchPred.regStats();
267}
268
269template<class Impl>
270void
271DefaultFetch<Impl>::setCPU(FullCPU *cpu_ptr)
272{
273 DPRINTF(Fetch, "Setting the CPU pointer.\n");
274 cpu = cpu_ptr;
275
276 // Name is finally available, so create the port.
277 icachePort = new IcachePort(this);
278
279 Port *mem_dport = mem->getPort("");
280 icachePort->setPeer(mem_dport);
281 mem_dport->setPeer(icachePort);
282
283 if (cpu->checker) {
284 cpu->checker->setIcachePort(icachePort);
285 }
286
287 // Fetch needs to start fetching instructions at the very beginning,
288 // so it must start up in active state.
289 switchToActive();
290}
291
292template<class Impl>
293void
294DefaultFetch<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *time_buffer)
295{
296 DPRINTF(Fetch, "Setting the time buffer pointer.\n");
297 timeBuffer = time_buffer;
298
299 // Create wires to get information from proper places in time buffer.
300 fromDecode = timeBuffer->getWire(-decodeToFetchDelay);
301 fromRename = timeBuffer->getWire(-renameToFetchDelay);
302 fromIEW = timeBuffer->getWire(-iewToFetchDelay);
303 fromCommit = timeBuffer->getWire(-commitToFetchDelay);
304}
305
306template<class Impl>
307void
308DefaultFetch<Impl>::setActiveThreads(list<unsigned> *at_ptr)
309{
310 DPRINTF(Fetch, "Setting active threads list pointer.\n");
311 activeThreads = at_ptr;
312}
313
314template<class Impl>
315void
316DefaultFetch<Impl>::setFetchQueue(TimeBuffer<FetchStruct> *fq_ptr)
317{
318 DPRINTF(Fetch, "Setting the fetch queue pointer.\n");
319 fetchQueue = fq_ptr;
320
321 // Create wire to write information to proper place in fetch queue.
322 toDecode = fetchQueue->getWire(0);
323}
324
325#if 0
326template<class Impl>
327void
325template<class Impl>
326void
328DefaultFetch<Impl>::setPageTable(PageTable *pt_ptr)
329{
330 DPRINTF(Fetch, "Setting the page table pointer.\n");
331#if !FULL_SYSTEM
332 pTable = pt_ptr;
333#endif
334}
335#endif
336
337template<class Impl>
338void
339DefaultFetch<Impl>::initStage()
340{
341 // Setup PC and nextPC with initial state.
342 for (int tid = 0; tid < numThreads; tid++) {
343 PC[tid] = cpu->readPC(tid);
344 nextPC[tid] = cpu->readNextPC(tid);
345 }
346}
347
348template<class Impl>
349void
350DefaultFetch<Impl>::processCacheCompletion(PacketPtr pkt)
351{
352 unsigned tid = pkt->req->getThreadNum();
353
354 DPRINTF(Fetch, "[tid:%u] Waking up from cache miss.\n",tid);
355
356 // Only change the status if it's still waiting on the icache access
357 // to return.
358 if (fetchStatus[tid] != IcacheWaitResponse ||
359 pkt->req != memReq[tid] ||
360 isSwitchedOut()) {
361 ++fetchIcacheSquashes;
362 delete pkt->req;
363 delete pkt;
364 memReq[tid] = NULL;
365 return;
366 }
367
368 // Wake up the CPU (if it went to sleep and was waiting on this completion
369 // event).
370 cpu->wakeCPU();
371
372 DPRINTF(Activity, "[tid:%u] Activating fetch due to cache completion\n",
373 tid);
374
375 switchToActive();
376
377 // Only switch to IcacheAccessComplete if we're not stalled as well.
378 if (checkStall(tid)) {
379 fetchStatus[tid] = Blocked;
380 } else {
381 fetchStatus[tid] = IcacheAccessComplete;
382 }
383
327DefaultFetch<Impl>::initStage()
328{
329 // Setup PC and nextPC with initial state.
330 for (int tid = 0; tid < numThreads; tid++) {
331 PC[tid] = cpu->readPC(tid);
332 nextPC[tid] = cpu->readNextPC(tid);
333 }
334}
335
336template<class Impl>
337void
338DefaultFetch<Impl>::processCacheCompletion(PacketPtr pkt)
339{
340 unsigned tid = pkt->req->getThreadNum();
341
342 DPRINTF(Fetch, "[tid:%u] Waking up from cache miss.\n",tid);
343
344 // Only change the status if it's still waiting on the icache access
345 // to return.
346 if (fetchStatus[tid] != IcacheWaitResponse ||
347 pkt->req != memReq[tid] ||
348 isSwitchedOut()) {
349 ++fetchIcacheSquashes;
350 delete pkt->req;
351 delete pkt;
352 memReq[tid] = NULL;
353 return;
354 }
355
356 // Wake up the CPU (if it went to sleep and was waiting on this completion
357 // event).
358 cpu->wakeCPU();
359
360 DPRINTF(Activity, "[tid:%u] Activating fetch due to cache completion\n",
361 tid);
362
363 switchToActive();
364
365 // Only switch to IcacheAccessComplete if we're not stalled as well.
366 if (checkStall(tid)) {
367 fetchStatus[tid] = Blocked;
368 } else {
369 fetchStatus[tid] = IcacheAccessComplete;
370 }
371
384// memcpy(cacheData[tid], memReq[tid]->data, memReq[tid]->size);
385
386 // Reset the mem req to NULL.
387 delete pkt->req;
388 delete pkt;
389 memReq[tid] = NULL;
390}
391
392template <class Impl>
393void
394DefaultFetch<Impl>::switchOut()
395{
396 // Fetch is ready to switch out at any time.
397 switchedOut = true;
398 cpu->signalSwitched();
399}
400
401template <class Impl>
402void
403DefaultFetch<Impl>::doSwitchOut()
404{
405 // Branch predictor needs to have its state cleared.
406 branchPred.switchOut();
407}
408
409template <class Impl>
410void
411DefaultFetch<Impl>::takeOverFrom()
412{
413 // Reset all state
414 for (int i = 0; i < Impl::MaxThreads; ++i) {
415 stalls[i].decode = 0;
416 stalls[i].rename = 0;
417 stalls[i].iew = 0;
418 stalls[i].commit = 0;
419 PC[i] = cpu->readPC(i);
420 nextPC[i] = cpu->readNextPC(i);
421 fetchStatus[i] = Running;
422 }
423 numInst = 0;
424 wroteToTimeBuffer = false;
425 _status = Inactive;
426 switchedOut = false;
427 branchPred.takeOverFrom();
428}
429
430template <class Impl>
431void
432DefaultFetch<Impl>::wakeFromQuiesce()
433{
434 DPRINTF(Fetch, "Waking up from quiesce\n");
435 // Hopefully this is safe
436 // @todo: Allow other threads to wake from quiesce.
437 fetchStatus[0] = Running;
438}
439
440template <class Impl>
441inline void
442DefaultFetch<Impl>::switchToActive()
443{
444 if (_status == Inactive) {
445 DPRINTF(Activity, "Activating stage.\n");
446
447 cpu->activateStage(FullCPU::FetchIdx);
448
449 _status = Active;
450 }
451}
452
453template <class Impl>
454inline void
455DefaultFetch<Impl>::switchToInactive()
456{
457 if (_status == Active) {
458 DPRINTF(Activity, "Deactivating stage.\n");
459
460 cpu->deactivateStage(FullCPU::FetchIdx);
461
462 _status = Inactive;
463 }
464}
465
466template <class Impl>
467bool
468DefaultFetch<Impl>::lookupAndUpdateNextPC(DynInstPtr &inst, Addr &next_PC)
469{
470 // Do branch prediction check here.
471 // A bit of a misnomer...next_PC is actually the current PC until
472 // this function updates it.
473 bool predict_taken;
474
475 if (!inst->isControl()) {
476 next_PC = next_PC + instSize;
477 inst->setPredTarg(next_PC);
478 return false;
479 }
480
481 predict_taken = branchPred.predict(inst, next_PC, inst->threadNumber);
482
483 ++fetchedBranches;
484
485 if (predict_taken) {
486 ++predictedBranches;
487 }
488
489 return predict_taken;
490}
491
492template <class Impl>
493bool
494DefaultFetch<Impl>::fetchCacheLine(Addr fetch_PC, Fault &ret_fault, unsigned tid)
495{
496 Fault fault = NoFault;
497
498#if FULL_SYSTEM
499 // Flag to say whether or not address is physical addr.
500 unsigned flags = cpu->inPalMode(fetch_PC) ? PHYSICAL : 0;
501#else
502 unsigned flags = 0;
503#endif // FULL_SYSTEM
504
505 if (cacheBlocked || (interruptPending && flags == 0) || switchedOut) {
506 // Hold off fetch from getting new instructions when:
507 // Cache is blocked, or
508 // while an interrupt is pending and we're not in PAL mode, or
509 // fetch is switched out.
510 return false;
511 }
512
513 // Align the fetch PC so it's at the start of a cache block.
514 fetch_PC = icacheBlockAlignPC(fetch_PC);
515
516 // Setup the memReq to do a read of the first instruction's address.
517 // Set the appropriate read size and flags as well.
518 // Build request here.
519 RequestPtr mem_req = new Request(tid, fetch_PC, cacheBlkSize, flags,
520 fetch_PC, cpu->readCpuId(), tid);
521
522 memReq[tid] = mem_req;
523
524 // Translate the instruction request.
525 fault = cpu->translateInstReq(mem_req, cpu->thread[tid]);
526
527 // In the case of faults, the fetch stage may need to stall and wait
528 // for the ITB miss to be handled.
529
530 // If translation was successful, attempt to read the first
531 // instruction.
532 if (fault == NoFault) {
533#if 0
534 if (cpu->system->memctrl->badaddr(memReq[tid]->paddr) ||
535 memReq[tid]->flags & UNCACHEABLE) {
536 DPRINTF(Fetch, "Fetch: Bad address %#x (hopefully on a "
537 "misspeculating path)!",
538 memReq[tid]->paddr);
539 ret_fault = TheISA::genMachineCheckFault();
540 return false;
541 }
542#endif
543
544 // Build packet here.
545 PacketPtr data_pkt = new Packet(mem_req,
546 Packet::ReadReq, Packet::Broadcast);
547 data_pkt->dataStatic(cacheData[tid]);
548
549 DPRINTF(Fetch, "Fetch: Doing instruction read.\n");
550
551 fetchedCacheLines++;
552
553 // Now do the timing access to see whether or not the instruction
554 // exists within the cache.
555 if (!icachePort->sendTiming(data_pkt)) {
556 assert(retryPkt == NULL);
557 assert(retryTid == -1);
558 DPRINTF(Fetch, "[tid:%i] Out of MSHRs!\n", tid);
559 fetchStatus[tid] = IcacheWaitRetry;
560 retryPkt = data_pkt;
561 retryTid = tid;
562 cacheBlocked = true;
563 return false;
564 }
565
566 DPRINTF(Fetch, "Doing cache access.\n");
567
568 lastIcacheStall[tid] = curTick;
569
570 DPRINTF(Activity, "[tid:%i]: Activity: Waiting on I-cache "
571 "response.\n", tid);
572
573 fetchStatus[tid] = IcacheWaitResponse;
574 } else {
575 delete mem_req;
576 memReq[tid] = NULL;
577 }
578
579 ret_fault = fault;
580 return true;
581}
582
583template <class Impl>
584inline void
585DefaultFetch<Impl>::doSquash(const Addr &new_PC, unsigned tid)
586{
587 DPRINTF(Fetch, "[tid:%i]: Squashing, setting PC to: %#x.\n",
588 tid, new_PC);
589
590 PC[tid] = new_PC;
591 nextPC[tid] = new_PC + instSize;
592
593 // Clear the icache miss if it's outstanding.
594 if (fetchStatus[tid] == IcacheWaitResponse) {
595 DPRINTF(Fetch, "[tid:%i]: Squashing outstanding Icache miss.\n",
596 tid);
372 // Reset the mem req to NULL.
373 delete pkt->req;
374 delete pkt;
375 memReq[tid] = NULL;
376}
377
378template <class Impl>
379void
380DefaultFetch<Impl>::switchOut()
381{
382 // Fetch is ready to switch out at any time.
383 switchedOut = true;
384 cpu->signalSwitched();
385}
386
387template <class Impl>
388void
389DefaultFetch<Impl>::doSwitchOut()
390{
391 // Branch predictor needs to have its state cleared.
392 branchPred.switchOut();
393}
394
395template <class Impl>
396void
397DefaultFetch<Impl>::takeOverFrom()
398{
399 // Reset all state
400 for (int i = 0; i < Impl::MaxThreads; ++i) {
401 stalls[i].decode = 0;
402 stalls[i].rename = 0;
403 stalls[i].iew = 0;
404 stalls[i].commit = 0;
405 PC[i] = cpu->readPC(i);
406 nextPC[i] = cpu->readNextPC(i);
407 fetchStatus[i] = Running;
408 }
409 numInst = 0;
410 wroteToTimeBuffer = false;
411 _status = Inactive;
412 switchedOut = false;
413 branchPred.takeOverFrom();
414}
415
416template <class Impl>
417void
418DefaultFetch<Impl>::wakeFromQuiesce()
419{
420 DPRINTF(Fetch, "Waking up from quiesce\n");
421 // Hopefully this is safe
422 // @todo: Allow other threads to wake from quiesce.
423 fetchStatus[0] = Running;
424}
425
426template <class Impl>
427inline void
428DefaultFetch<Impl>::switchToActive()
429{
430 if (_status == Inactive) {
431 DPRINTF(Activity, "Activating stage.\n");
432
433 cpu->activateStage(FullCPU::FetchIdx);
434
435 _status = Active;
436 }
437}
438
439template <class Impl>
440inline void
441DefaultFetch<Impl>::switchToInactive()
442{
443 if (_status == Active) {
444 DPRINTF(Activity, "Deactivating stage.\n");
445
446 cpu->deactivateStage(FullCPU::FetchIdx);
447
448 _status = Inactive;
449 }
450}
451
452template <class Impl>
453bool
454DefaultFetch<Impl>::lookupAndUpdateNextPC(DynInstPtr &inst, Addr &next_PC)
455{
456 // Do branch prediction check here.
457 // A bit of a misnomer...next_PC is actually the current PC until
458 // this function updates it.
459 bool predict_taken;
460
461 if (!inst->isControl()) {
462 next_PC = next_PC + instSize;
463 inst->setPredTarg(next_PC);
464 return false;
465 }
466
467 predict_taken = branchPred.predict(inst, next_PC, inst->threadNumber);
468
469 ++fetchedBranches;
470
471 if (predict_taken) {
472 ++predictedBranches;
473 }
474
475 return predict_taken;
476}
477
478template <class Impl>
479bool
480DefaultFetch<Impl>::fetchCacheLine(Addr fetch_PC, Fault &ret_fault, unsigned tid)
481{
482 Fault fault = NoFault;
483
484#if FULL_SYSTEM
485 // Flag to say whether or not address is physical addr.
486 unsigned flags = cpu->inPalMode(fetch_PC) ? PHYSICAL : 0;
487#else
488 unsigned flags = 0;
489#endif // FULL_SYSTEM
490
491 if (cacheBlocked || (interruptPending && flags == 0) || switchedOut) {
492 // Hold off fetch from getting new instructions when:
493 // Cache is blocked, or
494 // while an interrupt is pending and we're not in PAL mode, or
495 // fetch is switched out.
496 return false;
497 }
498
499 // Align the fetch PC so it's at the start of a cache block.
500 fetch_PC = icacheBlockAlignPC(fetch_PC);
501
502 // Setup the memReq to do a read of the first instruction's address.
503 // Set the appropriate read size and flags as well.
504 // Build request here.
505 RequestPtr mem_req = new Request(tid, fetch_PC, cacheBlkSize, flags,
506 fetch_PC, cpu->readCpuId(), tid);
507
508 memReq[tid] = mem_req;
509
510 // Translate the instruction request.
511 fault = cpu->translateInstReq(mem_req, cpu->thread[tid]);
512
513 // In the case of faults, the fetch stage may need to stall and wait
514 // for the ITB miss to be handled.
515
516 // If translation was successful, attempt to read the first
517 // instruction.
518 if (fault == NoFault) {
519#if 0
520 if (cpu->system->memctrl->badaddr(memReq[tid]->paddr) ||
521 memReq[tid]->flags & UNCACHEABLE) {
522 DPRINTF(Fetch, "Fetch: Bad address %#x (hopefully on a "
523 "misspeculating path)!",
524 memReq[tid]->paddr);
525 ret_fault = TheISA::genMachineCheckFault();
526 return false;
527 }
528#endif
529
530 // Build packet here.
531 PacketPtr data_pkt = new Packet(mem_req,
532 Packet::ReadReq, Packet::Broadcast);
533 data_pkt->dataStatic(cacheData[tid]);
534
535 DPRINTF(Fetch, "Fetch: Doing instruction read.\n");
536
537 fetchedCacheLines++;
538
539 // Now do the timing access to see whether or not the instruction
540 // exists within the cache.
541 if (!icachePort->sendTiming(data_pkt)) {
542 assert(retryPkt == NULL);
543 assert(retryTid == -1);
544 DPRINTF(Fetch, "[tid:%i] Out of MSHRs!\n", tid);
545 fetchStatus[tid] = IcacheWaitRetry;
546 retryPkt = data_pkt;
547 retryTid = tid;
548 cacheBlocked = true;
549 return false;
550 }
551
552 DPRINTF(Fetch, "Doing cache access.\n");
553
554 lastIcacheStall[tid] = curTick;
555
556 DPRINTF(Activity, "[tid:%i]: Activity: Waiting on I-cache "
557 "response.\n", tid);
558
559 fetchStatus[tid] = IcacheWaitResponse;
560 } else {
561 delete mem_req;
562 memReq[tid] = NULL;
563 }
564
565 ret_fault = fault;
566 return true;
567}
568
569template <class Impl>
570inline void
571DefaultFetch<Impl>::doSquash(const Addr &new_PC, unsigned tid)
572{
573 DPRINTF(Fetch, "[tid:%i]: Squashing, setting PC to: %#x.\n",
574 tid, new_PC);
575
576 PC[tid] = new_PC;
577 nextPC[tid] = new_PC + instSize;
578
579 // Clear the icache miss if it's outstanding.
580 if (fetchStatus[tid] == IcacheWaitResponse) {
581 DPRINTF(Fetch, "[tid:%i]: Squashing outstanding Icache miss.\n",
582 tid);
597 // Should I delete this here or when it comes back from the cache?
598// delete memReq[tid];
599 memReq[tid] = NULL;
600 }
601
602 // Get rid of the retrying packet if it was from this thread.
603 if (retryTid == tid) {
604 assert(cacheBlocked);
605 cacheBlocked = false;
606 retryTid = -1;
607 retryPkt = NULL;
608 delete retryPkt->req;
609 delete retryPkt;
610 }
611
612 fetchStatus[tid] = Squashing;
613
614 ++fetchSquashCycles;
615}
616
617template<class Impl>
618void
619DefaultFetch<Impl>::squashFromDecode(const Addr &new_PC,
620 const InstSeqNum &seq_num,
621 unsigned tid)
622{
623 DPRINTF(Fetch, "[tid:%i]: Squashing from decode.\n",tid);
624
625 doSquash(new_PC, tid);
626
627 // Tell the CPU to remove any instructions that are in flight between
628 // fetch and decode.
629 cpu->removeInstsUntil(seq_num, tid);
630}
631
632template<class Impl>
633bool
634DefaultFetch<Impl>::checkStall(unsigned tid) const
635{
636 bool ret_val = false;
637
638 if (cpu->contextSwitch) {
639 DPRINTF(Fetch,"[tid:%i]: Stalling for a context switch.\n",tid);
640 ret_val = true;
641 } else if (stalls[tid].decode) {
642 DPRINTF(Fetch,"[tid:%i]: Stall from Decode stage detected.\n",tid);
643 ret_val = true;
644 } else if (stalls[tid].rename) {
645 DPRINTF(Fetch,"[tid:%i]: Stall from Rename stage detected.\n",tid);
646 ret_val = true;
647 } else if (stalls[tid].iew) {
648 DPRINTF(Fetch,"[tid:%i]: Stall from IEW stage detected.\n",tid);
649 ret_val = true;
650 } else if (stalls[tid].commit) {
651 DPRINTF(Fetch,"[tid:%i]: Stall from Commit stage detected.\n",tid);
652 ret_val = true;
653 }
654
655 return ret_val;
656}
657
658template<class Impl>
659typename DefaultFetch<Impl>::FetchStatus
660DefaultFetch<Impl>::updateFetchStatus()
661{
662 //Check Running
663 list<unsigned>::iterator threads = (*activeThreads).begin();
664
665 while (threads != (*activeThreads).end()) {
666
667 unsigned tid = *threads++;
668
669 if (fetchStatus[tid] == Running ||
670 fetchStatus[tid] == Squashing ||
671 fetchStatus[tid] == IcacheAccessComplete) {
672
673 if (_status == Inactive) {
674 DPRINTF(Activity, "[tid:%i]: Activating stage.\n",tid);
675
676 if (fetchStatus[tid] == IcacheAccessComplete) {
677 DPRINTF(Activity, "[tid:%i]: Activating fetch due to cache"
678 "completion\n",tid);
679 }
680
681 cpu->activateStage(FullCPU::FetchIdx);
682 }
683
684 return Active;
685 }
686 }
687
688 // Stage is switching from active to inactive, notify CPU of it.
689 if (_status == Active) {
690 DPRINTF(Activity, "Deactivating stage.\n");
691
692 cpu->deactivateStage(FullCPU::FetchIdx);
693 }
694
695 return Inactive;
696}
697
698template <class Impl>
699void
700DefaultFetch<Impl>::squash(const Addr &new_PC, unsigned tid)
701{
702 DPRINTF(Fetch, "[tid:%u]: Squash from commit.\n",tid);
703
704 doSquash(new_PC, tid);
705
706 // Tell the CPU to remove any instructions that are not in the ROB.
707 cpu->removeInstsNotInROB(tid);
708}
709
710template <class Impl>
711void
712DefaultFetch<Impl>::tick()
713{
714 list<unsigned>::iterator threads = (*activeThreads).begin();
715 bool status_change = false;
716
717 wroteToTimeBuffer = false;
718
719 while (threads != (*activeThreads).end()) {
720 unsigned tid = *threads++;
721
722 // Check the signals for each thread to determine the proper status
723 // for each thread.
724 bool updated_status = checkSignalsAndUpdate(tid);
725 status_change = status_change || updated_status;
726 }
727
728 DPRINTF(Fetch, "Running stage.\n");
729
730 // Reset the number of the instruction we're fetching.
731 numInst = 0;
732
733 if (fromCommit->commitInfo[0].interruptPending) {
734 interruptPending = true;
735 }
736 if (fromCommit->commitInfo[0].clearInterrupt) {
737 interruptPending = false;
738 }
739
740 for (threadFetched = 0; threadFetched < numFetchingThreads;
741 threadFetched++) {
742 // Fetch each of the actively fetching threads.
743 fetch(status_change);
744 }
745
746 // Record number of instructions fetched this cycle for distribution.
747 fetchNisnDist.sample(numInst);
748
749 if (status_change) {
750 // Change the fetch stage status if there was a status change.
751 _status = updateFetchStatus();
752 }
753
754 // If there was activity this cycle, inform the CPU of it.
755 if (wroteToTimeBuffer || cpu->contextSwitch) {
756 DPRINTF(Activity, "Activity this cycle.\n");
757
758 cpu->activityThisCycle();
759 }
760}
761
762template <class Impl>
763bool
764DefaultFetch<Impl>::checkSignalsAndUpdate(unsigned tid)
765{
766 // Update the per thread stall statuses.
767 if (fromDecode->decodeBlock[tid]) {
768 stalls[tid].decode = true;
769 }
770
771 if (fromDecode->decodeUnblock[tid]) {
772 assert(stalls[tid].decode);
773 assert(!fromDecode->decodeBlock[tid]);
774 stalls[tid].decode = false;
775 }
776
777 if (fromRename->renameBlock[tid]) {
778 stalls[tid].rename = true;
779 }
780
781 if (fromRename->renameUnblock[tid]) {
782 assert(stalls[tid].rename);
783 assert(!fromRename->renameBlock[tid]);
784 stalls[tid].rename = false;
785 }
786
787 if (fromIEW->iewBlock[tid]) {
788 stalls[tid].iew = true;
789 }
790
791 if (fromIEW->iewUnblock[tid]) {
792 assert(stalls[tid].iew);
793 assert(!fromIEW->iewBlock[tid]);
794 stalls[tid].iew = false;
795 }
796
797 if (fromCommit->commitBlock[tid]) {
798 stalls[tid].commit = true;
799 }
800
801 if (fromCommit->commitUnblock[tid]) {
802 assert(stalls[tid].commit);
803 assert(!fromCommit->commitBlock[tid]);
804 stalls[tid].commit = false;
805 }
806
807 // Check squash signals from commit.
808 if (fromCommit->commitInfo[tid].squash) {
809
810 DPRINTF(Fetch, "[tid:%u]: Squashing instructions due to squash "
811 "from commit.\n",tid);
812
813 // In any case, squash.
814 squash(fromCommit->commitInfo[tid].nextPC,tid);
815
816 // Also check if there's a mispredict that happened.
817 if (fromCommit->commitInfo[tid].branchMispredict) {
818 branchPred.squash(fromCommit->commitInfo[tid].doneSeqNum,
819 fromCommit->commitInfo[tid].nextPC,
820 fromCommit->commitInfo[tid].branchTaken,
821 tid);
822 } else {
823 branchPred.squash(fromCommit->commitInfo[tid].doneSeqNum,
824 tid);
825 }
826
827 return true;
828 } else if (fromCommit->commitInfo[tid].doneSeqNum) {
829 // Update the branch predictor if it wasn't a squashed instruction
830 // that was broadcasted.
831 branchPred.update(fromCommit->commitInfo[tid].doneSeqNum, tid);
832 }
833
834 // Check ROB squash signals from commit.
835 if (fromCommit->commitInfo[tid].robSquashing) {
836 DPRINTF(Fetch, "[tid:%u]: ROB is still squashing Thread %u.\n", tid);
837
838 // Continue to squash.
839 fetchStatus[tid] = Squashing;
840
841 return true;
842 }
843
844 // Check squash signals from decode.
845 if (fromDecode->decodeInfo[tid].squash) {
846 DPRINTF(Fetch, "[tid:%u]: Squashing instructions due to squash "
847 "from decode.\n",tid);
848
849 // Update the branch predictor.
850 if (fromDecode->decodeInfo[tid].branchMispredict) {
851 branchPred.squash(fromDecode->decodeInfo[tid].doneSeqNum,
852 fromDecode->decodeInfo[tid].nextPC,
853 fromDecode->decodeInfo[tid].branchTaken,
854 tid);
855 } else {
856 branchPred.squash(fromDecode->decodeInfo[tid].doneSeqNum,
857 tid);
858 }
859
860 if (fetchStatus[tid] != Squashing) {
861 // Squash unless we're already squashing
862 squashFromDecode(fromDecode->decodeInfo[tid].nextPC,
863 fromDecode->decodeInfo[tid].doneSeqNum,
864 tid);
865
866 return true;
867 }
868 }
869
870 if (checkStall(tid) && fetchStatus[tid] != IcacheWaitResponse) {
871 DPRINTF(Fetch, "[tid:%i]: Setting to blocked\n",tid);
872
873 fetchStatus[tid] = Blocked;
874
875 return true;
876 }
877
878 if (fetchStatus[tid] == Blocked ||
879 fetchStatus[tid] == Squashing) {
880 // Switch status to running if fetch isn't being told to block or
881 // squash this cycle.
882 DPRINTF(Fetch, "[tid:%i]: Done squashing, switching to running.\n",
883 tid);
884
885 fetchStatus[tid] = Running;
886
887 return true;
888 }
889
890 // If we've reached this point, we have not gotten any signals that
891 // cause fetch to change its status. Fetch remains the same as before.
892 return false;
893}
894
895template<class Impl>
896void
897DefaultFetch<Impl>::fetch(bool &status_change)
898{
899 //////////////////////////////////////////
900 // Start actual fetch
901 //////////////////////////////////////////
902 int tid = getFetchingThread(fetchPolicy);
903
904 if (tid == -1) {
905 DPRINTF(Fetch,"There are no more threads available to fetch from.\n");
906
907 // Breaks looping condition in tick()
908 threadFetched = numFetchingThreads;
909 return;
910 }
911
912 // The current PC.
913 Addr &fetch_PC = PC[tid];
914
915 // Fault code for memory access.
916 Fault fault = NoFault;
917
918 // If returning from the delay of a cache miss, then update the status
919 // to running, otherwise do the cache access. Possibly move this up
920 // to tick() function.
921 if (fetchStatus[tid] == IcacheAccessComplete) {
922 DPRINTF(Fetch, "[tid:%i]: Icache miss is complete.\n",
923 tid);
924
925 fetchStatus[tid] = Running;
926 status_change = true;
927 } else if (fetchStatus[tid] == Running) {
928 DPRINTF(Fetch, "[tid:%i]: Attempting to translate and read "
929 "instruction, starting at PC %08p.\n",
930 tid, fetch_PC);
931
932 bool fetch_success = fetchCacheLine(fetch_PC, fault, tid);
933 if (!fetch_success) {
934 ++fetchMiscStallCycles;
935 return;
936 }
937 } else {
938 if (fetchStatus[tid] == Idle) {
939 ++fetchIdleCycles;
940 } else if (fetchStatus[tid] == Blocked) {
941 ++fetchBlockedCycles;
942 } else if (fetchStatus[tid] == Squashing) {
943 ++fetchSquashCycles;
944 } else if (fetchStatus[tid] == IcacheWaitResponse) {
945 ++icacheStallCycles;
946 }
947
948 // Status is Idle, Squashing, Blocked, or IcacheWaitResponse, so
949 // fetch should do nothing.
950 return;
951 }
952
953 ++fetchCycles;
954
955 // If we had a stall due to an icache miss, then return.
956 if (fetchStatus[tid] == IcacheWaitResponse) {
957 ++icacheStallCycles;
958 status_change = true;
959 return;
960 }
961
962 Addr next_PC = fetch_PC;
963 InstSeqNum inst_seq;
964 MachInst inst;
965 ExtMachInst ext_inst;
966 // @todo: Fix this hack.
967 unsigned offset = (fetch_PC & cacheBlkMask) & ~3;
968
969 if (fault == NoFault) {
970 // If the read of the first instruction was successful, then grab the
971 // instructions from the rest of the cache line and put them into the
972 // queue heading to decode.
973
974 DPRINTF(Fetch, "[tid:%i]: Adding instructions to queue to "
975 "decode.\n",tid);
976
977 // Need to keep track of whether or not a predicted branch
978 // ended this fetch block.
979 bool predicted_branch = false;
980
981 for (;
982 offset < cacheBlkSize &&
983 numInst < fetchWidth &&
984 !predicted_branch;
985 ++numInst) {
986
987 // Get a sequence number.
988 inst_seq = cpu->getAndIncrementInstSeq();
989
990 // Make sure this is a valid index.
991 assert(offset <= cacheBlkSize - instSize);
992
993 // Get the instruction from the array of the cache line.
994 inst = gtoh(*reinterpret_cast<MachInst *>
995 (&cacheData[tid][offset]));
996
997 ext_inst = TheISA::makeExtMI(inst, fetch_PC);
998
999 // Create a new DynInst from the instruction fetched.
1000 DynInstPtr instruction = new DynInst(ext_inst, fetch_PC,
1001 next_PC,
1002 inst_seq, cpu);
1003 instruction->setThread(tid);
1004
1005 instruction->setASID(tid);
1006
1007 instruction->setState(cpu->thread[tid]);
1008
1009 DPRINTF(Fetch, "[tid:%i]: Instruction PC %#x created "
1010 "[sn:%lli]\n",
1011 tid, instruction->readPC(), inst_seq);
1012
1013 DPRINTF(Fetch, "[tid:%i]: Instruction is: %s\n",
1014 tid, instruction->staticInst->disassemble(fetch_PC));
1015
1016 instruction->traceData =
1017 Trace::getInstRecord(curTick, cpu->tcBase(tid), cpu,
1018 instruction->staticInst,
1019 instruction->readPC(),tid);
1020
1021 predicted_branch = lookupAndUpdateNextPC(instruction, next_PC);
1022
1023 // Add instruction to the CPU's list of instructions.
1024 instruction->setInstListIt(cpu->addInst(instruction));
1025
1026 // Write the instruction to the first slot in the queue
1027 // that heads to decode.
1028 toDecode->insts[numInst] = instruction;
1029
1030 toDecode->size++;
1031
1032 // Increment stat of fetched instructions.
1033 ++fetchedInsts;
1034
1035 // Move to the next instruction, unless we have a branch.
1036 fetch_PC = next_PC;
1037
1038 if (instruction->isQuiesce()) {
1039 warn("%lli: Quiesce instruction encountered, halting fetch!",
1040 curTick);
1041 fetchStatus[tid] = QuiescePending;
1042 ++numInst;
1043 status_change = true;
1044 break;
1045 }
1046
1047 offset+= instSize;
1048 }
1049 }
1050
1051 if (numInst > 0) {
1052 wroteToTimeBuffer = true;
1053 }
1054
1055 // Now that fetching is completed, update the PC to signify what the next
1056 // cycle will be.
1057 if (fault == NoFault) {
1058 DPRINTF(Fetch, "[tid:%i]: Setting PC to %08p.\n",tid, next_PC);
1059
1060 PC[tid] = next_PC;
1061 nextPC[tid] = next_PC + instSize;
1062 } else {
1063 // We shouldn't be in an icache miss and also have a fault (an ITB
1064 // miss)
1065 if (fetchStatus[tid] == IcacheWaitResponse) {
1066 panic("Fetch should have exited prior to this!");
1067 }
1068
1069 // Send the fault to commit. This thread will not do anything
1070 // until commit handles the fault. The only other way it can
1071 // wake up is if a squash comes along and changes the PC.
1072#if FULL_SYSTEM
1073 assert(numInst != fetchWidth);
1074 // Get a sequence number.
1075 inst_seq = cpu->getAndIncrementInstSeq();
1076 // We will use a nop in order to carry the fault.
1077 ext_inst = TheISA::NoopMachInst;
1078
1079 // Create a new DynInst from the dummy nop.
1080 DynInstPtr instruction = new DynInst(ext_inst, fetch_PC,
1081 next_PC,
1082 inst_seq, cpu);
1083 instruction->setPredTarg(next_PC + instSize);
1084 instruction->setThread(tid);
1085
1086 instruction->setASID(tid);
1087
1088 instruction->setState(cpu->thread[tid]);
1089
1090 instruction->traceData = NULL;
1091
1092 instruction->setInstListIt(cpu->addInst(instruction));
1093
1094 instruction->fault = fault;
1095
1096 toDecode->insts[numInst] = instruction;
1097 toDecode->size++;
1098
1099 DPRINTF(Fetch, "[tid:%i]: Blocked, need to handle the trap.\n",tid);
1100
1101 fetchStatus[tid] = TrapPending;
1102 status_change = true;
1103
1104 warn("%lli fault (%d) detected @ PC %08p", curTick, fault, PC[tid]);
1105#else // !FULL_SYSTEM
1106 warn("%lli fault (%d) detected @ PC %08p", curTick, fault, PC[tid]);
1107#endif // FULL_SYSTEM
1108 }
1109}
1110
1111template<class Impl>
1112void
1113DefaultFetch<Impl>::recvRetry()
1114{
1115 assert(cacheBlocked);
1116 if (retryPkt != NULL) {
1117 assert(retryTid != -1);
1118 assert(fetchStatus[retryTid] == IcacheWaitRetry);
1119
1120 if (icachePort->sendTiming(retryPkt)) {
1121 fetchStatus[retryTid] = IcacheWaitResponse;
1122 retryPkt = NULL;
1123 retryTid = -1;
1124 cacheBlocked = false;
1125 }
1126 } else {
1127 assert(retryTid == -1);
1128 // Access has been squashed since it was sent out. Just clear
1129 // the cache being blocked.
1130 cacheBlocked = false;
1131 }
1132}
1133
1134///////////////////////////////////////
1135// //
1136// SMT FETCH POLICY MAINTAINED HERE //
1137// //
1138///////////////////////////////////////
1139template<class Impl>
1140int
1141DefaultFetch<Impl>::getFetchingThread(FetchPriority &fetch_priority)
1142{
1143 if (numThreads > 1) {
1144 switch (fetch_priority) {
1145
1146 case SingleThread:
1147 return 0;
1148
1149 case RoundRobin:
1150 return roundRobin();
1151
1152 case IQ:
1153 return iqCount();
1154
1155 case LSQ:
1156 return lsqCount();
1157
1158 case Branch:
1159 return branchCount();
1160
1161 default:
1162 return -1;
1163 }
1164 } else {
1165 int tid = *((*activeThreads).begin());
1166
1167 if (fetchStatus[tid] == Running ||
1168 fetchStatus[tid] == IcacheAccessComplete ||
1169 fetchStatus[tid] == Idle) {
1170 return tid;
1171 } else {
1172 return -1;
1173 }
1174 }
1175
1176}
1177
1178
1179template<class Impl>
1180int
1181DefaultFetch<Impl>::roundRobin()
1182{
1183 list<unsigned>::iterator pri_iter = priorityList.begin();
1184 list<unsigned>::iterator end = priorityList.end();
1185
1186 int high_pri;
1187
1188 while (pri_iter != end) {
1189 high_pri = *pri_iter;
1190
1191 assert(high_pri <= numThreads);
1192
1193 if (fetchStatus[high_pri] == Running ||
1194 fetchStatus[high_pri] == IcacheAccessComplete ||
1195 fetchStatus[high_pri] == Idle) {
1196
1197 priorityList.erase(pri_iter);
1198 priorityList.push_back(high_pri);
1199
1200 return high_pri;
1201 }
1202
1203 pri_iter++;
1204 }
1205
1206 return -1;
1207}
1208
1209template<class Impl>
1210int
1211DefaultFetch<Impl>::iqCount()
1212{
1213 priority_queue<unsigned> PQ;
1214
1215 list<unsigned>::iterator threads = (*activeThreads).begin();
1216
1217 while (threads != (*activeThreads).end()) {
1218 unsigned tid = *threads++;
1219
1220 PQ.push(fromIEW->iewInfo[tid].iqCount);
1221 }
1222
1223 while (!PQ.empty()) {
1224
1225 unsigned high_pri = PQ.top();
1226
1227 if (fetchStatus[high_pri] == Running ||
1228 fetchStatus[high_pri] == IcacheAccessComplete ||
1229 fetchStatus[high_pri] == Idle)
1230 return high_pri;
1231 else
1232 PQ.pop();
1233
1234 }
1235
1236 return -1;
1237}
1238
1239template<class Impl>
1240int
1241DefaultFetch<Impl>::lsqCount()
1242{
1243 priority_queue<unsigned> PQ;
1244
1245
1246 list<unsigned>::iterator threads = (*activeThreads).begin();
1247
1248 while (threads != (*activeThreads).end()) {
1249 unsigned tid = *threads++;
1250
1251 PQ.push(fromIEW->iewInfo[tid].ldstqCount);
1252 }
1253
1254 while (!PQ.empty()) {
1255
1256 unsigned high_pri = PQ.top();
1257
1258 if (fetchStatus[high_pri] == Running ||
1259 fetchStatus[high_pri] == IcacheAccessComplete ||
1260 fetchStatus[high_pri] == Idle)
1261 return high_pri;
1262 else
1263 PQ.pop();
1264
1265 }
1266
1267 return -1;
1268}
1269
1270template<class Impl>
1271int
1272DefaultFetch<Impl>::branchCount()
1273{
1274 list<unsigned>::iterator threads = (*activeThreads).begin();
1275
1276 return *threads;
1277}
583 memReq[tid] = NULL;
584 }
585
586 // Get rid of the retrying packet if it was from this thread.
587 if (retryTid == tid) {
588 assert(cacheBlocked);
589 cacheBlocked = false;
590 retryTid = -1;
591 retryPkt = NULL;
592 delete retryPkt->req;
593 delete retryPkt;
594 }
595
596 fetchStatus[tid] = Squashing;
597
598 ++fetchSquashCycles;
599}
600
601template<class Impl>
602void
603DefaultFetch<Impl>::squashFromDecode(const Addr &new_PC,
604 const InstSeqNum &seq_num,
605 unsigned tid)
606{
607 DPRINTF(Fetch, "[tid:%i]: Squashing from decode.\n",tid);
608
609 doSquash(new_PC, tid);
610
611 // Tell the CPU to remove any instructions that are in flight between
612 // fetch and decode.
613 cpu->removeInstsUntil(seq_num, tid);
614}
615
616template<class Impl>
617bool
618DefaultFetch<Impl>::checkStall(unsigned tid) const
619{
620 bool ret_val = false;
621
622 if (cpu->contextSwitch) {
623 DPRINTF(Fetch,"[tid:%i]: Stalling for a context switch.\n",tid);
624 ret_val = true;
625 } else if (stalls[tid].decode) {
626 DPRINTF(Fetch,"[tid:%i]: Stall from Decode stage detected.\n",tid);
627 ret_val = true;
628 } else if (stalls[tid].rename) {
629 DPRINTF(Fetch,"[tid:%i]: Stall from Rename stage detected.\n",tid);
630 ret_val = true;
631 } else if (stalls[tid].iew) {
632 DPRINTF(Fetch,"[tid:%i]: Stall from IEW stage detected.\n",tid);
633 ret_val = true;
634 } else if (stalls[tid].commit) {
635 DPRINTF(Fetch,"[tid:%i]: Stall from Commit stage detected.\n",tid);
636 ret_val = true;
637 }
638
639 return ret_val;
640}
641
642template<class Impl>
643typename DefaultFetch<Impl>::FetchStatus
644DefaultFetch<Impl>::updateFetchStatus()
645{
646 //Check Running
647 list<unsigned>::iterator threads = (*activeThreads).begin();
648
649 while (threads != (*activeThreads).end()) {
650
651 unsigned tid = *threads++;
652
653 if (fetchStatus[tid] == Running ||
654 fetchStatus[tid] == Squashing ||
655 fetchStatus[tid] == IcacheAccessComplete) {
656
657 if (_status == Inactive) {
658 DPRINTF(Activity, "[tid:%i]: Activating stage.\n",tid);
659
660 if (fetchStatus[tid] == IcacheAccessComplete) {
661 DPRINTF(Activity, "[tid:%i]: Activating fetch due to cache"
662 "completion\n",tid);
663 }
664
665 cpu->activateStage(FullCPU::FetchIdx);
666 }
667
668 return Active;
669 }
670 }
671
672 // Stage is switching from active to inactive, notify CPU of it.
673 if (_status == Active) {
674 DPRINTF(Activity, "Deactivating stage.\n");
675
676 cpu->deactivateStage(FullCPU::FetchIdx);
677 }
678
679 return Inactive;
680}
681
682template <class Impl>
683void
684DefaultFetch<Impl>::squash(const Addr &new_PC, unsigned tid)
685{
686 DPRINTF(Fetch, "[tid:%u]: Squash from commit.\n",tid);
687
688 doSquash(new_PC, tid);
689
690 // Tell the CPU to remove any instructions that are not in the ROB.
691 cpu->removeInstsNotInROB(tid);
692}
693
694template <class Impl>
695void
696DefaultFetch<Impl>::tick()
697{
698 list<unsigned>::iterator threads = (*activeThreads).begin();
699 bool status_change = false;
700
701 wroteToTimeBuffer = false;
702
703 while (threads != (*activeThreads).end()) {
704 unsigned tid = *threads++;
705
706 // Check the signals for each thread to determine the proper status
707 // for each thread.
708 bool updated_status = checkSignalsAndUpdate(tid);
709 status_change = status_change || updated_status;
710 }
711
712 DPRINTF(Fetch, "Running stage.\n");
713
714 // Reset the number of the instruction we're fetching.
715 numInst = 0;
716
717 if (fromCommit->commitInfo[0].interruptPending) {
718 interruptPending = true;
719 }
720 if (fromCommit->commitInfo[0].clearInterrupt) {
721 interruptPending = false;
722 }
723
724 for (threadFetched = 0; threadFetched < numFetchingThreads;
725 threadFetched++) {
726 // Fetch each of the actively fetching threads.
727 fetch(status_change);
728 }
729
730 // Record number of instructions fetched this cycle for distribution.
731 fetchNisnDist.sample(numInst);
732
733 if (status_change) {
734 // Change the fetch stage status if there was a status change.
735 _status = updateFetchStatus();
736 }
737
738 // If there was activity this cycle, inform the CPU of it.
739 if (wroteToTimeBuffer || cpu->contextSwitch) {
740 DPRINTF(Activity, "Activity this cycle.\n");
741
742 cpu->activityThisCycle();
743 }
744}
745
746template <class Impl>
747bool
748DefaultFetch<Impl>::checkSignalsAndUpdate(unsigned tid)
749{
750 // Update the per thread stall statuses.
751 if (fromDecode->decodeBlock[tid]) {
752 stalls[tid].decode = true;
753 }
754
755 if (fromDecode->decodeUnblock[tid]) {
756 assert(stalls[tid].decode);
757 assert(!fromDecode->decodeBlock[tid]);
758 stalls[tid].decode = false;
759 }
760
761 if (fromRename->renameBlock[tid]) {
762 stalls[tid].rename = true;
763 }
764
765 if (fromRename->renameUnblock[tid]) {
766 assert(stalls[tid].rename);
767 assert(!fromRename->renameBlock[tid]);
768 stalls[tid].rename = false;
769 }
770
771 if (fromIEW->iewBlock[tid]) {
772 stalls[tid].iew = true;
773 }
774
775 if (fromIEW->iewUnblock[tid]) {
776 assert(stalls[tid].iew);
777 assert(!fromIEW->iewBlock[tid]);
778 stalls[tid].iew = false;
779 }
780
781 if (fromCommit->commitBlock[tid]) {
782 stalls[tid].commit = true;
783 }
784
785 if (fromCommit->commitUnblock[tid]) {
786 assert(stalls[tid].commit);
787 assert(!fromCommit->commitBlock[tid]);
788 stalls[tid].commit = false;
789 }
790
791 // Check squash signals from commit.
792 if (fromCommit->commitInfo[tid].squash) {
793
794 DPRINTF(Fetch, "[tid:%u]: Squashing instructions due to squash "
795 "from commit.\n",tid);
796
797 // In any case, squash.
798 squash(fromCommit->commitInfo[tid].nextPC,tid);
799
800 // Also check if there's a mispredict that happened.
801 if (fromCommit->commitInfo[tid].branchMispredict) {
802 branchPred.squash(fromCommit->commitInfo[tid].doneSeqNum,
803 fromCommit->commitInfo[tid].nextPC,
804 fromCommit->commitInfo[tid].branchTaken,
805 tid);
806 } else {
807 branchPred.squash(fromCommit->commitInfo[tid].doneSeqNum,
808 tid);
809 }
810
811 return true;
812 } else if (fromCommit->commitInfo[tid].doneSeqNum) {
813 // Update the branch predictor if it wasn't a squashed instruction
814 // that was broadcasted.
815 branchPred.update(fromCommit->commitInfo[tid].doneSeqNum, tid);
816 }
817
818 // Check ROB squash signals from commit.
819 if (fromCommit->commitInfo[tid].robSquashing) {
820 DPRINTF(Fetch, "[tid:%u]: ROB is still squashing Thread %u.\n", tid);
821
822 // Continue to squash.
823 fetchStatus[tid] = Squashing;
824
825 return true;
826 }
827
828 // Check squash signals from decode.
829 if (fromDecode->decodeInfo[tid].squash) {
830 DPRINTF(Fetch, "[tid:%u]: Squashing instructions due to squash "
831 "from decode.\n",tid);
832
833 // Update the branch predictor.
834 if (fromDecode->decodeInfo[tid].branchMispredict) {
835 branchPred.squash(fromDecode->decodeInfo[tid].doneSeqNum,
836 fromDecode->decodeInfo[tid].nextPC,
837 fromDecode->decodeInfo[tid].branchTaken,
838 tid);
839 } else {
840 branchPred.squash(fromDecode->decodeInfo[tid].doneSeqNum,
841 tid);
842 }
843
844 if (fetchStatus[tid] != Squashing) {
845 // Squash unless we're already squashing
846 squashFromDecode(fromDecode->decodeInfo[tid].nextPC,
847 fromDecode->decodeInfo[tid].doneSeqNum,
848 tid);
849
850 return true;
851 }
852 }
853
854 if (checkStall(tid) && fetchStatus[tid] != IcacheWaitResponse) {
855 DPRINTF(Fetch, "[tid:%i]: Setting to blocked\n",tid);
856
857 fetchStatus[tid] = Blocked;
858
859 return true;
860 }
861
862 if (fetchStatus[tid] == Blocked ||
863 fetchStatus[tid] == Squashing) {
864 // Switch status to running if fetch isn't being told to block or
865 // squash this cycle.
866 DPRINTF(Fetch, "[tid:%i]: Done squashing, switching to running.\n",
867 tid);
868
869 fetchStatus[tid] = Running;
870
871 return true;
872 }
873
874 // If we've reached this point, we have not gotten any signals that
875 // cause fetch to change its status. Fetch remains the same as before.
876 return false;
877}
878
879template<class Impl>
880void
881DefaultFetch<Impl>::fetch(bool &status_change)
882{
883 //////////////////////////////////////////
884 // Start actual fetch
885 //////////////////////////////////////////
886 int tid = getFetchingThread(fetchPolicy);
887
888 if (tid == -1) {
889 DPRINTF(Fetch,"There are no more threads available to fetch from.\n");
890
891 // Breaks looping condition in tick()
892 threadFetched = numFetchingThreads;
893 return;
894 }
895
896 // The current PC.
897 Addr &fetch_PC = PC[tid];
898
899 // Fault code for memory access.
900 Fault fault = NoFault;
901
902 // If returning from the delay of a cache miss, then update the status
903 // to running, otherwise do the cache access. Possibly move this up
904 // to tick() function.
905 if (fetchStatus[tid] == IcacheAccessComplete) {
906 DPRINTF(Fetch, "[tid:%i]: Icache miss is complete.\n",
907 tid);
908
909 fetchStatus[tid] = Running;
910 status_change = true;
911 } else if (fetchStatus[tid] == Running) {
912 DPRINTF(Fetch, "[tid:%i]: Attempting to translate and read "
913 "instruction, starting at PC %08p.\n",
914 tid, fetch_PC);
915
916 bool fetch_success = fetchCacheLine(fetch_PC, fault, tid);
917 if (!fetch_success) {
918 ++fetchMiscStallCycles;
919 return;
920 }
921 } else {
922 if (fetchStatus[tid] == Idle) {
923 ++fetchIdleCycles;
924 } else if (fetchStatus[tid] == Blocked) {
925 ++fetchBlockedCycles;
926 } else if (fetchStatus[tid] == Squashing) {
927 ++fetchSquashCycles;
928 } else if (fetchStatus[tid] == IcacheWaitResponse) {
929 ++icacheStallCycles;
930 }
931
932 // Status is Idle, Squashing, Blocked, or IcacheWaitResponse, so
933 // fetch should do nothing.
934 return;
935 }
936
937 ++fetchCycles;
938
939 // If we had a stall due to an icache miss, then return.
940 if (fetchStatus[tid] == IcacheWaitResponse) {
941 ++icacheStallCycles;
942 status_change = true;
943 return;
944 }
945
946 Addr next_PC = fetch_PC;
947 InstSeqNum inst_seq;
948 MachInst inst;
949 ExtMachInst ext_inst;
950 // @todo: Fix this hack.
951 unsigned offset = (fetch_PC & cacheBlkMask) & ~3;
952
953 if (fault == NoFault) {
954 // If the read of the first instruction was successful, then grab the
955 // instructions from the rest of the cache line and put them into the
956 // queue heading to decode.
957
958 DPRINTF(Fetch, "[tid:%i]: Adding instructions to queue to "
959 "decode.\n",tid);
960
961 // Need to keep track of whether or not a predicted branch
962 // ended this fetch block.
963 bool predicted_branch = false;
964
965 for (;
966 offset < cacheBlkSize &&
967 numInst < fetchWidth &&
968 !predicted_branch;
969 ++numInst) {
970
971 // Get a sequence number.
972 inst_seq = cpu->getAndIncrementInstSeq();
973
974 // Make sure this is a valid index.
975 assert(offset <= cacheBlkSize - instSize);
976
977 // Get the instruction from the array of the cache line.
978 inst = gtoh(*reinterpret_cast<MachInst *>
979 (&cacheData[tid][offset]));
980
981 ext_inst = TheISA::makeExtMI(inst, fetch_PC);
982
983 // Create a new DynInst from the instruction fetched.
984 DynInstPtr instruction = new DynInst(ext_inst, fetch_PC,
985 next_PC,
986 inst_seq, cpu);
987 instruction->setThread(tid);
988
989 instruction->setASID(tid);
990
991 instruction->setState(cpu->thread[tid]);
992
993 DPRINTF(Fetch, "[tid:%i]: Instruction PC %#x created "
994 "[sn:%lli]\n",
995 tid, instruction->readPC(), inst_seq);
996
997 DPRINTF(Fetch, "[tid:%i]: Instruction is: %s\n",
998 tid, instruction->staticInst->disassemble(fetch_PC));
999
1000 instruction->traceData =
1001 Trace::getInstRecord(curTick, cpu->tcBase(tid), cpu,
1002 instruction->staticInst,
1003 instruction->readPC(),tid);
1004
1005 predicted_branch = lookupAndUpdateNextPC(instruction, next_PC);
1006
1007 // Add instruction to the CPU's list of instructions.
1008 instruction->setInstListIt(cpu->addInst(instruction));
1009
1010 // Write the instruction to the first slot in the queue
1011 // that heads to decode.
1012 toDecode->insts[numInst] = instruction;
1013
1014 toDecode->size++;
1015
1016 // Increment stat of fetched instructions.
1017 ++fetchedInsts;
1018
1019 // Move to the next instruction, unless we have a branch.
1020 fetch_PC = next_PC;
1021
1022 if (instruction->isQuiesce()) {
1023 warn("%lli: Quiesce instruction encountered, halting fetch!",
1024 curTick);
1025 fetchStatus[tid] = QuiescePending;
1026 ++numInst;
1027 status_change = true;
1028 break;
1029 }
1030
1031 offset+= instSize;
1032 }
1033 }
1034
1035 if (numInst > 0) {
1036 wroteToTimeBuffer = true;
1037 }
1038
1039 // Now that fetching is completed, update the PC to signify what the next
1040 // cycle will be.
1041 if (fault == NoFault) {
1042 DPRINTF(Fetch, "[tid:%i]: Setting PC to %08p.\n",tid, next_PC);
1043
1044 PC[tid] = next_PC;
1045 nextPC[tid] = next_PC + instSize;
1046 } else {
1047 // We shouldn't be in an icache miss and also have a fault (an ITB
1048 // miss)
1049 if (fetchStatus[tid] == IcacheWaitResponse) {
1050 panic("Fetch should have exited prior to this!");
1051 }
1052
1053 // Send the fault to commit. This thread will not do anything
1054 // until commit handles the fault. The only other way it can
1055 // wake up is if a squash comes along and changes the PC.
1056#if FULL_SYSTEM
1057 assert(numInst != fetchWidth);
1058 // Get a sequence number.
1059 inst_seq = cpu->getAndIncrementInstSeq();
1060 // We will use a nop in order to carry the fault.
1061 ext_inst = TheISA::NoopMachInst;
1062
1063 // Create a new DynInst from the dummy nop.
1064 DynInstPtr instruction = new DynInst(ext_inst, fetch_PC,
1065 next_PC,
1066 inst_seq, cpu);
1067 instruction->setPredTarg(next_PC + instSize);
1068 instruction->setThread(tid);
1069
1070 instruction->setASID(tid);
1071
1072 instruction->setState(cpu->thread[tid]);
1073
1074 instruction->traceData = NULL;
1075
1076 instruction->setInstListIt(cpu->addInst(instruction));
1077
1078 instruction->fault = fault;
1079
1080 toDecode->insts[numInst] = instruction;
1081 toDecode->size++;
1082
1083 DPRINTF(Fetch, "[tid:%i]: Blocked, need to handle the trap.\n",tid);
1084
1085 fetchStatus[tid] = TrapPending;
1086 status_change = true;
1087
1088 warn("%lli fault (%d) detected @ PC %08p", curTick, fault, PC[tid]);
1089#else // !FULL_SYSTEM
1090 warn("%lli fault (%d) detected @ PC %08p", curTick, fault, PC[tid]);
1091#endif // FULL_SYSTEM
1092 }
1093}
1094
1095template<class Impl>
1096void
1097DefaultFetch<Impl>::recvRetry()
1098{
1099 assert(cacheBlocked);
1100 if (retryPkt != NULL) {
1101 assert(retryTid != -1);
1102 assert(fetchStatus[retryTid] == IcacheWaitRetry);
1103
1104 if (icachePort->sendTiming(retryPkt)) {
1105 fetchStatus[retryTid] = IcacheWaitResponse;
1106 retryPkt = NULL;
1107 retryTid = -1;
1108 cacheBlocked = false;
1109 }
1110 } else {
1111 assert(retryTid == -1);
1112 // Access has been squashed since it was sent out. Just clear
1113 // the cache being blocked.
1114 cacheBlocked = false;
1115 }
1116}
1117
1118///////////////////////////////////////
1119// //
1120// SMT FETCH POLICY MAINTAINED HERE //
1121// //
1122///////////////////////////////////////
1123template<class Impl>
1124int
1125DefaultFetch<Impl>::getFetchingThread(FetchPriority &fetch_priority)
1126{
1127 if (numThreads > 1) {
1128 switch (fetch_priority) {
1129
1130 case SingleThread:
1131 return 0;
1132
1133 case RoundRobin:
1134 return roundRobin();
1135
1136 case IQ:
1137 return iqCount();
1138
1139 case LSQ:
1140 return lsqCount();
1141
1142 case Branch:
1143 return branchCount();
1144
1145 default:
1146 return -1;
1147 }
1148 } else {
1149 int tid = *((*activeThreads).begin());
1150
1151 if (fetchStatus[tid] == Running ||
1152 fetchStatus[tid] == IcacheAccessComplete ||
1153 fetchStatus[tid] == Idle) {
1154 return tid;
1155 } else {
1156 return -1;
1157 }
1158 }
1159
1160}
1161
1162
1163template<class Impl>
1164int
1165DefaultFetch<Impl>::roundRobin()
1166{
1167 list<unsigned>::iterator pri_iter = priorityList.begin();
1168 list<unsigned>::iterator end = priorityList.end();
1169
1170 int high_pri;
1171
1172 while (pri_iter != end) {
1173 high_pri = *pri_iter;
1174
1175 assert(high_pri <= numThreads);
1176
1177 if (fetchStatus[high_pri] == Running ||
1178 fetchStatus[high_pri] == IcacheAccessComplete ||
1179 fetchStatus[high_pri] == Idle) {
1180
1181 priorityList.erase(pri_iter);
1182 priorityList.push_back(high_pri);
1183
1184 return high_pri;
1185 }
1186
1187 pri_iter++;
1188 }
1189
1190 return -1;
1191}
1192
1193template<class Impl>
1194int
1195DefaultFetch<Impl>::iqCount()
1196{
1197 priority_queue<unsigned> PQ;
1198
1199 list<unsigned>::iterator threads = (*activeThreads).begin();
1200
1201 while (threads != (*activeThreads).end()) {
1202 unsigned tid = *threads++;
1203
1204 PQ.push(fromIEW->iewInfo[tid].iqCount);
1205 }
1206
1207 while (!PQ.empty()) {
1208
1209 unsigned high_pri = PQ.top();
1210
1211 if (fetchStatus[high_pri] == Running ||
1212 fetchStatus[high_pri] == IcacheAccessComplete ||
1213 fetchStatus[high_pri] == Idle)
1214 return high_pri;
1215 else
1216 PQ.pop();
1217
1218 }
1219
1220 return -1;
1221}
1222
1223template<class Impl>
1224int
1225DefaultFetch<Impl>::lsqCount()
1226{
1227 priority_queue<unsigned> PQ;
1228
1229
1230 list<unsigned>::iterator threads = (*activeThreads).begin();
1231
1232 while (threads != (*activeThreads).end()) {
1233 unsigned tid = *threads++;
1234
1235 PQ.push(fromIEW->iewInfo[tid].ldstqCount);
1236 }
1237
1238 while (!PQ.empty()) {
1239
1240 unsigned high_pri = PQ.top();
1241
1242 if (fetchStatus[high_pri] == Running ||
1243 fetchStatus[high_pri] == IcacheAccessComplete ||
1244 fetchStatus[high_pri] == Idle)
1245 return high_pri;
1246 else
1247 PQ.pop();
1248
1249 }
1250
1251 return -1;
1252}
1253
1254template<class Impl>
1255int
1256DefaultFetch<Impl>::branchCount()
1257{
1258 list<unsigned>::iterator threads = (*activeThreads).begin();
1259
1260 return *threads;
1261}