fetch_impl.hh (2702:8a3ee279559b) fetch_impl.hh (2727:91e17c7ee622)
1/*
2 * Copyright (c) 2004-2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Kevin Lim
29 */
30
31#include "arch/isa_traits.hh"
32#include "arch/utility.hh"
33#include "cpu/checker/cpu.hh"
34#include "cpu/exetrace.hh"
35#include "cpu/o3/fetch.hh"
36#include "mem/packet.hh"
37#include "mem/request.hh"
38#include "sim/byteswap.hh"
39#include "sim/host.hh"
40#include "sim/root.hh"
41
42#if FULL_SYSTEM
43#include "arch/tlb.hh"
44#include "arch/vtophys.hh"
45#include "base/remote_gdb.hh"
46#include "sim/system.hh"
47#endif // FULL_SYSTEM
48
49#include <algorithm>
50
51using namespace std;
52using namespace TheISA;
53
54template<class Impl>
55Tick
56DefaultFetch<Impl>::IcachePort::recvAtomic(PacketPtr pkt)
57{
58 panic("DefaultFetch doesn't expect recvAtomic callback!");
59 return curTick;
60}
61
62template<class Impl>
63void
64DefaultFetch<Impl>::IcachePort::recvFunctional(PacketPtr pkt)
65{
66 panic("DefaultFetch doesn't expect recvFunctional callback!");
67}
68
69template<class Impl>
70void
71DefaultFetch<Impl>::IcachePort::recvStatusChange(Status status)
72{
73 if (status == RangeChange)
74 return;
75
76 panic("DefaultFetch doesn't expect recvStatusChange callback!");
77}
78
79template<class Impl>
80bool
81DefaultFetch<Impl>::IcachePort::recvTiming(Packet *pkt)
82{
83 fetch->processCacheCompletion(pkt);
84 return true;
85}
86
87template<class Impl>
88void
89DefaultFetch<Impl>::IcachePort::recvRetry()
90{
91 fetch->recvRetry();
92}
93
94template<class Impl>
95DefaultFetch<Impl>::DefaultFetch(Params *params)
96 : mem(params->mem),
97 branchPred(params),
98 decodeToFetchDelay(params->decodeToFetchDelay),
99 renameToFetchDelay(params->renameToFetchDelay),
100 iewToFetchDelay(params->iewToFetchDelay),
101 commitToFetchDelay(params->commitToFetchDelay),
102 fetchWidth(params->fetchWidth),
103 cacheBlocked(false),
104 retryPkt(NULL),
105 retryTid(-1),
106 numThreads(params->numberOfThreads),
107 numFetchingThreads(params->smtNumFetchingThreads),
108 interruptPending(false),
109 switchedOut(false)
110{
111 if (numThreads > Impl::MaxThreads)
112 fatal("numThreads is not a valid value\n");
113
114 DPRINTF(Fetch, "Fetch constructor called\n");
115
116 // Set fetch stage's status to inactive.
117 _status = Inactive;
118
119 string policy = params->smtFetchPolicy;
120
121 // Convert string to lowercase
122 std::transform(policy.begin(), policy.end(), policy.begin(),
123 (int(*)(int)) tolower);
124
125 // Figure out fetch policy
126 if (policy == "singlethread") {
127 fetchPolicy = SingleThread;
128 } else if (policy == "roundrobin") {
129 fetchPolicy = RoundRobin;
130 DPRINTF(Fetch, "Fetch policy set to Round Robin\n");
131 } else if (policy == "branch") {
132 fetchPolicy = Branch;
133 DPRINTF(Fetch, "Fetch policy set to Branch Count\n");
134 } else if (policy == "iqcount") {
135 fetchPolicy = IQ;
136 DPRINTF(Fetch, "Fetch policy set to IQ count\n");
137 } else if (policy == "lsqcount") {
138 fetchPolicy = LSQ;
139 DPRINTF(Fetch, "Fetch policy set to LSQ count\n");
140 } else {
141 fatal("Invalid Fetch Policy. Options Are: {SingleThread,"
142 " RoundRobin,LSQcount,IQcount}\n");
143 }
144
145 // Size of cache block.
146 cacheBlkSize = 64;
147
148 // Create mask to get rid of offset bits.
149 cacheBlkMask = (cacheBlkSize - 1);
150
151 for (int tid=0; tid < numThreads; tid++) {
152
153 fetchStatus[tid] = Running;
154
155 priorityList.push_back(tid);
156
157 memReq[tid] = NULL;
158
159 // Create space to store a cache line.
160 cacheData[tid] = new uint8_t[cacheBlkSize];
161
162 stalls[tid].decode = 0;
163 stalls[tid].rename = 0;
164 stalls[tid].iew = 0;
165 stalls[tid].commit = 0;
166 }
167
168 // Get the size of an instruction.
169 instSize = sizeof(MachInst);
170}
171
172template <class Impl>
173std::string
174DefaultFetch<Impl>::name() const
175{
176 return cpu->name() + ".fetch";
177}
178
179template <class Impl>
180void
181DefaultFetch<Impl>::regStats()
182{
183 icacheStallCycles
184 .name(name() + ".icacheStallCycles")
185 .desc("Number of cycles fetch is stalled on an Icache miss")
186 .prereq(icacheStallCycles);
187
188 fetchedInsts
189 .name(name() + ".Insts")
190 .desc("Number of instructions fetch has processed")
191 .prereq(fetchedInsts);
192
193 fetchedBranches
194 .name(name() + ".Branches")
195 .desc("Number of branches that fetch encountered")
196 .prereq(fetchedBranches);
197
198 predictedBranches
199 .name(name() + ".predictedBranches")
200 .desc("Number of branches that fetch has predicted taken")
201 .prereq(predictedBranches);
202
203 fetchCycles
204 .name(name() + ".Cycles")
205 .desc("Number of cycles fetch has run and was not squashing or"
206 " blocked")
207 .prereq(fetchCycles);
208
209 fetchSquashCycles
210 .name(name() + ".SquashCycles")
211 .desc("Number of cycles fetch has spent squashing")
212 .prereq(fetchSquashCycles);
213
214 fetchIdleCycles
215 .name(name() + ".IdleCycles")
216 .desc("Number of cycles fetch was idle")
217 .prereq(fetchIdleCycles);
218
219 fetchBlockedCycles
220 .name(name() + ".BlockedCycles")
221 .desc("Number of cycles fetch has spent blocked")
222 .prereq(fetchBlockedCycles);
223
224 fetchedCacheLines
225 .name(name() + ".CacheLines")
226 .desc("Number of cache lines fetched")
227 .prereq(fetchedCacheLines);
228
229 fetchMiscStallCycles
230 .name(name() + ".MiscStallCycles")
231 .desc("Number of cycles fetch has spent waiting on interrupts, or "
232 "bad addresses, or out of MSHRs")
233 .prereq(fetchMiscStallCycles);
234
235 fetchIcacheSquashes
236 .name(name() + ".IcacheSquashes")
237 .desc("Number of outstanding Icache misses that were squashed")
238 .prereq(fetchIcacheSquashes);
239
240 fetchNisnDist
241 .init(/* base value */ 0,
242 /* last value */ fetchWidth,
243 /* bucket size */ 1)
244 .name(name() + ".rateDist")
245 .desc("Number of instructions fetched each cycle (Total)")
246 .flags(Stats::pdf);
247
248 idleRate
249 .name(name() + ".idleRate")
250 .desc("Percent of cycles fetch was idle")
251 .prereq(idleRate);
252 idleRate = fetchIdleCycles * 100 / cpu->numCycles;
253
254 branchRate
255 .name(name() + ".branchRate")
256 .desc("Number of branch fetches per cycle")
257 .flags(Stats::total);
258 branchRate = fetchedBranches / cpu->numCycles;
259
260 fetchRate
261 .name(name() + ".rate")
262 .desc("Number of inst fetches per cycle")
263 .flags(Stats::total);
264 fetchRate = fetchedInsts / cpu->numCycles;
265
266 branchPred.regStats();
267}
268
269template<class Impl>
270void
271DefaultFetch<Impl>::setCPU(FullCPU *cpu_ptr)
272{
273 DPRINTF(Fetch, "Setting the CPU pointer.\n");
274 cpu = cpu_ptr;
275
276 // Name is finally available, so create the port.
277 icachePort = new IcachePort(this);
278
279 Port *mem_dport = mem->getPort("");
280 icachePort->setPeer(mem_dport);
281 mem_dport->setPeer(icachePort);
282
283 if (cpu->checker) {
284 cpu->checker->setIcachePort(icachePort);
285 }
286
287 // Fetch needs to start fetching instructions at the very beginning,
288 // so it must start up in active state.
289 switchToActive();
290}
291
292template<class Impl>
293void
294DefaultFetch<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *time_buffer)
295{
296 DPRINTF(Fetch, "Setting the time buffer pointer.\n");
297 timeBuffer = time_buffer;
298
299 // Create wires to get information from proper places in time buffer.
300 fromDecode = timeBuffer->getWire(-decodeToFetchDelay);
301 fromRename = timeBuffer->getWire(-renameToFetchDelay);
302 fromIEW = timeBuffer->getWire(-iewToFetchDelay);
303 fromCommit = timeBuffer->getWire(-commitToFetchDelay);
304}
305
306template<class Impl>
307void
308DefaultFetch<Impl>::setActiveThreads(list<unsigned> *at_ptr)
309{
310 DPRINTF(Fetch, "Setting active threads list pointer.\n");
311 activeThreads = at_ptr;
312}
313
314template<class Impl>
315void
316DefaultFetch<Impl>::setFetchQueue(TimeBuffer<FetchStruct> *fq_ptr)
317{
318 DPRINTF(Fetch, "Setting the fetch queue pointer.\n");
319 fetchQueue = fq_ptr;
320
321 // Create wire to write information to proper place in fetch queue.
322 toDecode = fetchQueue->getWire(0);
323}
324
325template<class Impl>
326void
327DefaultFetch<Impl>::initStage()
328{
329 // Setup PC and nextPC with initial state.
330 for (int tid = 0; tid < numThreads; tid++) {
331 PC[tid] = cpu->readPC(tid);
332 nextPC[tid] = cpu->readNextPC(tid);
333 }
334}
335
336template<class Impl>
337void
338DefaultFetch<Impl>::processCacheCompletion(PacketPtr pkt)
339{
340 unsigned tid = pkt->req->getThreadNum();
341
342 DPRINTF(Fetch, "[tid:%u] Waking up from cache miss.\n",tid);
343
344 // Only change the status if it's still waiting on the icache access
345 // to return.
346 if (fetchStatus[tid] != IcacheWaitResponse ||
347 pkt->req != memReq[tid] ||
348 isSwitchedOut()) {
349 ++fetchIcacheSquashes;
350 delete pkt->req;
351 delete pkt;
352 memReq[tid] = NULL;
353 return;
354 }
355
356 // Wake up the CPU (if it went to sleep and was waiting on this completion
357 // event).
358 cpu->wakeCPU();
359
360 DPRINTF(Activity, "[tid:%u] Activating fetch due to cache completion\n",
361 tid);
362
363 switchToActive();
364
365 // Only switch to IcacheAccessComplete if we're not stalled as well.
366 if (checkStall(tid)) {
367 fetchStatus[tid] = Blocked;
368 } else {
369 fetchStatus[tid] = IcacheAccessComplete;
370 }
371
372 // Reset the mem req to NULL.
373 delete pkt->req;
374 delete pkt;
375 memReq[tid] = NULL;
376}
377
378template <class Impl>
379void
380DefaultFetch<Impl>::switchOut()
381{
382 // Fetch is ready to switch out at any time.
383 switchedOut = true;
384 cpu->signalSwitched();
385}
386
387template <class Impl>
388void
389DefaultFetch<Impl>::doSwitchOut()
390{
391 // Branch predictor needs to have its state cleared.
392 branchPred.switchOut();
393}
394
395template <class Impl>
396void
397DefaultFetch<Impl>::takeOverFrom()
398{
399 // Reset all state
400 for (int i = 0; i < Impl::MaxThreads; ++i) {
401 stalls[i].decode = 0;
402 stalls[i].rename = 0;
403 stalls[i].iew = 0;
404 stalls[i].commit = 0;
405 PC[i] = cpu->readPC(i);
406 nextPC[i] = cpu->readNextPC(i);
407 fetchStatus[i] = Running;
408 }
409 numInst = 0;
410 wroteToTimeBuffer = false;
411 _status = Inactive;
412 switchedOut = false;
413 branchPred.takeOverFrom();
414}
415
416template <class Impl>
417void
418DefaultFetch<Impl>::wakeFromQuiesce()
419{
420 DPRINTF(Fetch, "Waking up from quiesce\n");
421 // Hopefully this is safe
422 // @todo: Allow other threads to wake from quiesce.
423 fetchStatus[0] = Running;
424}
425
426template <class Impl>
427inline void
428DefaultFetch<Impl>::switchToActive()
429{
430 if (_status == Inactive) {
431 DPRINTF(Activity, "Activating stage.\n");
432
433 cpu->activateStage(FullCPU::FetchIdx);
434
435 _status = Active;
436 }
437}
438
439template <class Impl>
440inline void
441DefaultFetch<Impl>::switchToInactive()
442{
443 if (_status == Active) {
444 DPRINTF(Activity, "Deactivating stage.\n");
445
446 cpu->deactivateStage(FullCPU::FetchIdx);
447
448 _status = Inactive;
449 }
450}
451
452template <class Impl>
453bool
454DefaultFetch<Impl>::lookupAndUpdateNextPC(DynInstPtr &inst, Addr &next_PC)
455{
456 // Do branch prediction check here.
457 // A bit of a misnomer...next_PC is actually the current PC until
458 // this function updates it.
459 bool predict_taken;
460
461 if (!inst->isControl()) {
462 next_PC = next_PC + instSize;
463 inst->setPredTarg(next_PC);
464 return false;
465 }
466
467 predict_taken = branchPred.predict(inst, next_PC, inst->threadNumber);
468
469 ++fetchedBranches;
470
471 if (predict_taken) {
472 ++predictedBranches;
473 }
474
475 return predict_taken;
476}
477
478template <class Impl>
479bool
480DefaultFetch<Impl>::fetchCacheLine(Addr fetch_PC, Fault &ret_fault, unsigned tid)
481{
482 Fault fault = NoFault;
483
484#if FULL_SYSTEM
485 // Flag to say whether or not address is physical addr.
486 unsigned flags = cpu->inPalMode(fetch_PC) ? PHYSICAL : 0;
487#else
488 unsigned flags = 0;
489#endif // FULL_SYSTEM
490
491 if (cacheBlocked || (interruptPending && flags == 0) || switchedOut) {
492 // Hold off fetch from getting new instructions when:
493 // Cache is blocked, or
494 // while an interrupt is pending and we're not in PAL mode, or
495 // fetch is switched out.
496 return false;
497 }
498
499 // Align the fetch PC so it's at the start of a cache block.
500 fetch_PC = icacheBlockAlignPC(fetch_PC);
501
502 // Setup the memReq to do a read of the first instruction's address.
503 // Set the appropriate read size and flags as well.
504 // Build request here.
505 RequestPtr mem_req = new Request(tid, fetch_PC, cacheBlkSize, flags,
506 fetch_PC, cpu->readCpuId(), tid);
507
508 memReq[tid] = mem_req;
509
510 // Translate the instruction request.
511 fault = cpu->translateInstReq(mem_req, cpu->thread[tid]);
512
513 // In the case of faults, the fetch stage may need to stall and wait
514 // for the ITB miss to be handled.
515
516 // If translation was successful, attempt to read the first
517 // instruction.
518 if (fault == NoFault) {
519#if 0
520 if (cpu->system->memctrl->badaddr(memReq[tid]->paddr) ||
521 memReq[tid]->flags & UNCACHEABLE) {
522 DPRINTF(Fetch, "Fetch: Bad address %#x (hopefully on a "
523 "misspeculating path)!",
524 memReq[tid]->paddr);
525 ret_fault = TheISA::genMachineCheckFault();
526 return false;
527 }
528#endif
529
530 // Build packet here.
531 PacketPtr data_pkt = new Packet(mem_req,
532 Packet::ReadReq, Packet::Broadcast);
533 data_pkt->dataStatic(cacheData[tid]);
534
535 DPRINTF(Fetch, "Fetch: Doing instruction read.\n");
536
537 fetchedCacheLines++;
538
539 // Now do the timing access to see whether or not the instruction
540 // exists within the cache.
541 if (!icachePort->sendTiming(data_pkt)) {
542 assert(retryPkt == NULL);
543 assert(retryTid == -1);
544 DPRINTF(Fetch, "[tid:%i] Out of MSHRs!\n", tid);
545 fetchStatus[tid] = IcacheWaitRetry;
546 retryPkt = data_pkt;
547 retryTid = tid;
548 cacheBlocked = true;
549 return false;
550 }
551
552 DPRINTF(Fetch, "Doing cache access.\n");
553
554 lastIcacheStall[tid] = curTick;
555
556 DPRINTF(Activity, "[tid:%i]: Activity: Waiting on I-cache "
557 "response.\n", tid);
558
559 fetchStatus[tid] = IcacheWaitResponse;
560 } else {
561 delete mem_req;
562 memReq[tid] = NULL;
563 }
564
565 ret_fault = fault;
566 return true;
567}
568
569template <class Impl>
570inline void
571DefaultFetch<Impl>::doSquash(const Addr &new_PC, unsigned tid)
572{
573 DPRINTF(Fetch, "[tid:%i]: Squashing, setting PC to: %#x.\n",
574 tid, new_PC);
575
576 PC[tid] = new_PC;
577 nextPC[tid] = new_PC + instSize;
578
579 // Clear the icache miss if it's outstanding.
580 if (fetchStatus[tid] == IcacheWaitResponse) {
581 DPRINTF(Fetch, "[tid:%i]: Squashing outstanding Icache miss.\n",
582 tid);
583 memReq[tid] = NULL;
584 }
585
586 // Get rid of the retrying packet if it was from this thread.
587 if (retryTid == tid) {
588 assert(cacheBlocked);
589 cacheBlocked = false;
590 retryTid = -1;
591 retryPkt = NULL;
592 delete retryPkt->req;
593 delete retryPkt;
594 }
595
596 fetchStatus[tid] = Squashing;
597
598 ++fetchSquashCycles;
599}
600
601template<class Impl>
602void
603DefaultFetch<Impl>::squashFromDecode(const Addr &new_PC,
604 const InstSeqNum &seq_num,
605 unsigned tid)
606{
607 DPRINTF(Fetch, "[tid:%i]: Squashing from decode.\n",tid);
608
609 doSquash(new_PC, tid);
610
611 // Tell the CPU to remove any instructions that are in flight between
612 // fetch and decode.
613 cpu->removeInstsUntil(seq_num, tid);
614}
615
616template<class Impl>
617bool
618DefaultFetch<Impl>::checkStall(unsigned tid) const
619{
620 bool ret_val = false;
621
622 if (cpu->contextSwitch) {
623 DPRINTF(Fetch,"[tid:%i]: Stalling for a context switch.\n",tid);
624 ret_val = true;
625 } else if (stalls[tid].decode) {
626 DPRINTF(Fetch,"[tid:%i]: Stall from Decode stage detected.\n",tid);
627 ret_val = true;
628 } else if (stalls[tid].rename) {
629 DPRINTF(Fetch,"[tid:%i]: Stall from Rename stage detected.\n",tid);
630 ret_val = true;
631 } else if (stalls[tid].iew) {
632 DPRINTF(Fetch,"[tid:%i]: Stall from IEW stage detected.\n",tid);
633 ret_val = true;
634 } else if (stalls[tid].commit) {
635 DPRINTF(Fetch,"[tid:%i]: Stall from Commit stage detected.\n",tid);
636 ret_val = true;
637 }
638
639 return ret_val;
640}
641
642template<class Impl>
643typename DefaultFetch<Impl>::FetchStatus
644DefaultFetch<Impl>::updateFetchStatus()
645{
646 //Check Running
647 list<unsigned>::iterator threads = (*activeThreads).begin();
648
649 while (threads != (*activeThreads).end()) {
650
651 unsigned tid = *threads++;
652
653 if (fetchStatus[tid] == Running ||
654 fetchStatus[tid] == Squashing ||
655 fetchStatus[tid] == IcacheAccessComplete) {
656
657 if (_status == Inactive) {
658 DPRINTF(Activity, "[tid:%i]: Activating stage.\n",tid);
659
660 if (fetchStatus[tid] == IcacheAccessComplete) {
661 DPRINTF(Activity, "[tid:%i]: Activating fetch due to cache"
662 "completion\n",tid);
663 }
664
665 cpu->activateStage(FullCPU::FetchIdx);
666 }
667
668 return Active;
669 }
670 }
671
672 // Stage is switching from active to inactive, notify CPU of it.
673 if (_status == Active) {
674 DPRINTF(Activity, "Deactivating stage.\n");
675
676 cpu->deactivateStage(FullCPU::FetchIdx);
677 }
678
679 return Inactive;
680}
681
682template <class Impl>
683void
684DefaultFetch<Impl>::squash(const Addr &new_PC, unsigned tid)
685{
686 DPRINTF(Fetch, "[tid:%u]: Squash from commit.\n",tid);
687
688 doSquash(new_PC, tid);
689
690 // Tell the CPU to remove any instructions that are not in the ROB.
691 cpu->removeInstsNotInROB(tid);
692}
693
694template <class Impl>
695void
696DefaultFetch<Impl>::tick()
697{
698 list<unsigned>::iterator threads = (*activeThreads).begin();
699 bool status_change = false;
700
701 wroteToTimeBuffer = false;
702
703 while (threads != (*activeThreads).end()) {
704 unsigned tid = *threads++;
705
706 // Check the signals for each thread to determine the proper status
707 // for each thread.
708 bool updated_status = checkSignalsAndUpdate(tid);
709 status_change = status_change || updated_status;
710 }
711
712 DPRINTF(Fetch, "Running stage.\n");
713
714 // Reset the number of the instruction we're fetching.
715 numInst = 0;
716
717 if (fromCommit->commitInfo[0].interruptPending) {
718 interruptPending = true;
719 }
720 if (fromCommit->commitInfo[0].clearInterrupt) {
721 interruptPending = false;
722 }
723
724 for (threadFetched = 0; threadFetched < numFetchingThreads;
725 threadFetched++) {
726 // Fetch each of the actively fetching threads.
727 fetch(status_change);
728 }
729
730 // Record number of instructions fetched this cycle for distribution.
731 fetchNisnDist.sample(numInst);
732
733 if (status_change) {
734 // Change the fetch stage status if there was a status change.
735 _status = updateFetchStatus();
736 }
737
738 // If there was activity this cycle, inform the CPU of it.
739 if (wroteToTimeBuffer || cpu->contextSwitch) {
740 DPRINTF(Activity, "Activity this cycle.\n");
741
742 cpu->activityThisCycle();
743 }
744}
745
746template <class Impl>
747bool
748DefaultFetch<Impl>::checkSignalsAndUpdate(unsigned tid)
749{
750 // Update the per thread stall statuses.
751 if (fromDecode->decodeBlock[tid]) {
752 stalls[tid].decode = true;
753 }
754
755 if (fromDecode->decodeUnblock[tid]) {
756 assert(stalls[tid].decode);
757 assert(!fromDecode->decodeBlock[tid]);
758 stalls[tid].decode = false;
759 }
760
761 if (fromRename->renameBlock[tid]) {
762 stalls[tid].rename = true;
763 }
764
765 if (fromRename->renameUnblock[tid]) {
766 assert(stalls[tid].rename);
767 assert(!fromRename->renameBlock[tid]);
768 stalls[tid].rename = false;
769 }
770
771 if (fromIEW->iewBlock[tid]) {
772 stalls[tid].iew = true;
773 }
774
775 if (fromIEW->iewUnblock[tid]) {
776 assert(stalls[tid].iew);
777 assert(!fromIEW->iewBlock[tid]);
778 stalls[tid].iew = false;
779 }
780
781 if (fromCommit->commitBlock[tid]) {
782 stalls[tid].commit = true;
783 }
784
785 if (fromCommit->commitUnblock[tid]) {
786 assert(stalls[tid].commit);
787 assert(!fromCommit->commitBlock[tid]);
788 stalls[tid].commit = false;
789 }
790
791 // Check squash signals from commit.
792 if (fromCommit->commitInfo[tid].squash) {
793
794 DPRINTF(Fetch, "[tid:%u]: Squashing instructions due to squash "
795 "from commit.\n",tid);
796
797 // In any case, squash.
798 squash(fromCommit->commitInfo[tid].nextPC,tid);
799
800 // Also check if there's a mispredict that happened.
801 if (fromCommit->commitInfo[tid].branchMispredict) {
802 branchPred.squash(fromCommit->commitInfo[tid].doneSeqNum,
803 fromCommit->commitInfo[tid].nextPC,
804 fromCommit->commitInfo[tid].branchTaken,
805 tid);
806 } else {
807 branchPred.squash(fromCommit->commitInfo[tid].doneSeqNum,
808 tid);
809 }
810
811 return true;
812 } else if (fromCommit->commitInfo[tid].doneSeqNum) {
813 // Update the branch predictor if it wasn't a squashed instruction
814 // that was broadcasted.
815 branchPred.update(fromCommit->commitInfo[tid].doneSeqNum, tid);
816 }
817
818 // Check ROB squash signals from commit.
819 if (fromCommit->commitInfo[tid].robSquashing) {
820 DPRINTF(Fetch, "[tid:%u]: ROB is still squashing.\n", tid);
821
822 // Continue to squash.
823 fetchStatus[tid] = Squashing;
824
825 return true;
826 }
827
828 // Check squash signals from decode.
829 if (fromDecode->decodeInfo[tid].squash) {
830 DPRINTF(Fetch, "[tid:%u]: Squashing instructions due to squash "
831 "from decode.\n",tid);
832
833 // Update the branch predictor.
834 if (fromDecode->decodeInfo[tid].branchMispredict) {
835 branchPred.squash(fromDecode->decodeInfo[tid].doneSeqNum,
836 fromDecode->decodeInfo[tid].nextPC,
837 fromDecode->decodeInfo[tid].branchTaken,
838 tid);
839 } else {
840 branchPred.squash(fromDecode->decodeInfo[tid].doneSeqNum,
841 tid);
842 }
843
844 if (fetchStatus[tid] != Squashing) {
845 // Squash unless we're already squashing
846 squashFromDecode(fromDecode->decodeInfo[tid].nextPC,
847 fromDecode->decodeInfo[tid].doneSeqNum,
848 tid);
849
850 return true;
851 }
852 }
853
854 if (checkStall(tid) && fetchStatus[tid] != IcacheWaitResponse) {
855 DPRINTF(Fetch, "[tid:%i]: Setting to blocked\n",tid);
856
857 fetchStatus[tid] = Blocked;
858
859 return true;
860 }
861
862 if (fetchStatus[tid] == Blocked ||
863 fetchStatus[tid] == Squashing) {
864 // Switch status to running if fetch isn't being told to block or
865 // squash this cycle.
866 DPRINTF(Fetch, "[tid:%i]: Done squashing, switching to running.\n",
867 tid);
868
869 fetchStatus[tid] = Running;
870
871 return true;
872 }
873
874 // If we've reached this point, we have not gotten any signals that
875 // cause fetch to change its status. Fetch remains the same as before.
876 return false;
877}
878
879template<class Impl>
880void
881DefaultFetch<Impl>::fetch(bool &status_change)
882{
883 //////////////////////////////////////////
884 // Start actual fetch
885 //////////////////////////////////////////
886 int tid = getFetchingThread(fetchPolicy);
887
888 if (tid == -1) {
889 DPRINTF(Fetch,"There are no more threads available to fetch from.\n");
890
891 // Breaks looping condition in tick()
892 threadFetched = numFetchingThreads;
893 return;
894 }
895
896 // The current PC.
897 Addr &fetch_PC = PC[tid];
898
899 // Fault code for memory access.
900 Fault fault = NoFault;
901
902 // If returning from the delay of a cache miss, then update the status
903 // to running, otherwise do the cache access. Possibly move this up
904 // to tick() function.
905 if (fetchStatus[tid] == IcacheAccessComplete) {
906 DPRINTF(Fetch, "[tid:%i]: Icache miss is complete.\n",
907 tid);
908
909 fetchStatus[tid] = Running;
910 status_change = true;
911 } else if (fetchStatus[tid] == Running) {
912 DPRINTF(Fetch, "[tid:%i]: Attempting to translate and read "
913 "instruction, starting at PC %08p.\n",
914 tid, fetch_PC);
915
916 bool fetch_success = fetchCacheLine(fetch_PC, fault, tid);
917 if (!fetch_success) {
1/*
2 * Copyright (c) 2004-2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Kevin Lim
29 */
30
31#include "arch/isa_traits.hh"
32#include "arch/utility.hh"
33#include "cpu/checker/cpu.hh"
34#include "cpu/exetrace.hh"
35#include "cpu/o3/fetch.hh"
36#include "mem/packet.hh"
37#include "mem/request.hh"
38#include "sim/byteswap.hh"
39#include "sim/host.hh"
40#include "sim/root.hh"
41
42#if FULL_SYSTEM
43#include "arch/tlb.hh"
44#include "arch/vtophys.hh"
45#include "base/remote_gdb.hh"
46#include "sim/system.hh"
47#endif // FULL_SYSTEM
48
49#include <algorithm>
50
51using namespace std;
52using namespace TheISA;
53
54template<class Impl>
55Tick
56DefaultFetch<Impl>::IcachePort::recvAtomic(PacketPtr pkt)
57{
58 panic("DefaultFetch doesn't expect recvAtomic callback!");
59 return curTick;
60}
61
62template<class Impl>
63void
64DefaultFetch<Impl>::IcachePort::recvFunctional(PacketPtr pkt)
65{
66 panic("DefaultFetch doesn't expect recvFunctional callback!");
67}
68
69template<class Impl>
70void
71DefaultFetch<Impl>::IcachePort::recvStatusChange(Status status)
72{
73 if (status == RangeChange)
74 return;
75
76 panic("DefaultFetch doesn't expect recvStatusChange callback!");
77}
78
79template<class Impl>
80bool
81DefaultFetch<Impl>::IcachePort::recvTiming(Packet *pkt)
82{
83 fetch->processCacheCompletion(pkt);
84 return true;
85}
86
87template<class Impl>
88void
89DefaultFetch<Impl>::IcachePort::recvRetry()
90{
91 fetch->recvRetry();
92}
93
94template<class Impl>
95DefaultFetch<Impl>::DefaultFetch(Params *params)
96 : mem(params->mem),
97 branchPred(params),
98 decodeToFetchDelay(params->decodeToFetchDelay),
99 renameToFetchDelay(params->renameToFetchDelay),
100 iewToFetchDelay(params->iewToFetchDelay),
101 commitToFetchDelay(params->commitToFetchDelay),
102 fetchWidth(params->fetchWidth),
103 cacheBlocked(false),
104 retryPkt(NULL),
105 retryTid(-1),
106 numThreads(params->numberOfThreads),
107 numFetchingThreads(params->smtNumFetchingThreads),
108 interruptPending(false),
109 switchedOut(false)
110{
111 if (numThreads > Impl::MaxThreads)
112 fatal("numThreads is not a valid value\n");
113
114 DPRINTF(Fetch, "Fetch constructor called\n");
115
116 // Set fetch stage's status to inactive.
117 _status = Inactive;
118
119 string policy = params->smtFetchPolicy;
120
121 // Convert string to lowercase
122 std::transform(policy.begin(), policy.end(), policy.begin(),
123 (int(*)(int)) tolower);
124
125 // Figure out fetch policy
126 if (policy == "singlethread") {
127 fetchPolicy = SingleThread;
128 } else if (policy == "roundrobin") {
129 fetchPolicy = RoundRobin;
130 DPRINTF(Fetch, "Fetch policy set to Round Robin\n");
131 } else if (policy == "branch") {
132 fetchPolicy = Branch;
133 DPRINTF(Fetch, "Fetch policy set to Branch Count\n");
134 } else if (policy == "iqcount") {
135 fetchPolicy = IQ;
136 DPRINTF(Fetch, "Fetch policy set to IQ count\n");
137 } else if (policy == "lsqcount") {
138 fetchPolicy = LSQ;
139 DPRINTF(Fetch, "Fetch policy set to LSQ count\n");
140 } else {
141 fatal("Invalid Fetch Policy. Options Are: {SingleThread,"
142 " RoundRobin,LSQcount,IQcount}\n");
143 }
144
145 // Size of cache block.
146 cacheBlkSize = 64;
147
148 // Create mask to get rid of offset bits.
149 cacheBlkMask = (cacheBlkSize - 1);
150
151 for (int tid=0; tid < numThreads; tid++) {
152
153 fetchStatus[tid] = Running;
154
155 priorityList.push_back(tid);
156
157 memReq[tid] = NULL;
158
159 // Create space to store a cache line.
160 cacheData[tid] = new uint8_t[cacheBlkSize];
161
162 stalls[tid].decode = 0;
163 stalls[tid].rename = 0;
164 stalls[tid].iew = 0;
165 stalls[tid].commit = 0;
166 }
167
168 // Get the size of an instruction.
169 instSize = sizeof(MachInst);
170}
171
172template <class Impl>
173std::string
174DefaultFetch<Impl>::name() const
175{
176 return cpu->name() + ".fetch";
177}
178
179template <class Impl>
180void
181DefaultFetch<Impl>::regStats()
182{
183 icacheStallCycles
184 .name(name() + ".icacheStallCycles")
185 .desc("Number of cycles fetch is stalled on an Icache miss")
186 .prereq(icacheStallCycles);
187
188 fetchedInsts
189 .name(name() + ".Insts")
190 .desc("Number of instructions fetch has processed")
191 .prereq(fetchedInsts);
192
193 fetchedBranches
194 .name(name() + ".Branches")
195 .desc("Number of branches that fetch encountered")
196 .prereq(fetchedBranches);
197
198 predictedBranches
199 .name(name() + ".predictedBranches")
200 .desc("Number of branches that fetch has predicted taken")
201 .prereq(predictedBranches);
202
203 fetchCycles
204 .name(name() + ".Cycles")
205 .desc("Number of cycles fetch has run and was not squashing or"
206 " blocked")
207 .prereq(fetchCycles);
208
209 fetchSquashCycles
210 .name(name() + ".SquashCycles")
211 .desc("Number of cycles fetch has spent squashing")
212 .prereq(fetchSquashCycles);
213
214 fetchIdleCycles
215 .name(name() + ".IdleCycles")
216 .desc("Number of cycles fetch was idle")
217 .prereq(fetchIdleCycles);
218
219 fetchBlockedCycles
220 .name(name() + ".BlockedCycles")
221 .desc("Number of cycles fetch has spent blocked")
222 .prereq(fetchBlockedCycles);
223
224 fetchedCacheLines
225 .name(name() + ".CacheLines")
226 .desc("Number of cache lines fetched")
227 .prereq(fetchedCacheLines);
228
229 fetchMiscStallCycles
230 .name(name() + ".MiscStallCycles")
231 .desc("Number of cycles fetch has spent waiting on interrupts, or "
232 "bad addresses, or out of MSHRs")
233 .prereq(fetchMiscStallCycles);
234
235 fetchIcacheSquashes
236 .name(name() + ".IcacheSquashes")
237 .desc("Number of outstanding Icache misses that were squashed")
238 .prereq(fetchIcacheSquashes);
239
240 fetchNisnDist
241 .init(/* base value */ 0,
242 /* last value */ fetchWidth,
243 /* bucket size */ 1)
244 .name(name() + ".rateDist")
245 .desc("Number of instructions fetched each cycle (Total)")
246 .flags(Stats::pdf);
247
248 idleRate
249 .name(name() + ".idleRate")
250 .desc("Percent of cycles fetch was idle")
251 .prereq(idleRate);
252 idleRate = fetchIdleCycles * 100 / cpu->numCycles;
253
254 branchRate
255 .name(name() + ".branchRate")
256 .desc("Number of branch fetches per cycle")
257 .flags(Stats::total);
258 branchRate = fetchedBranches / cpu->numCycles;
259
260 fetchRate
261 .name(name() + ".rate")
262 .desc("Number of inst fetches per cycle")
263 .flags(Stats::total);
264 fetchRate = fetchedInsts / cpu->numCycles;
265
266 branchPred.regStats();
267}
268
269template<class Impl>
270void
271DefaultFetch<Impl>::setCPU(FullCPU *cpu_ptr)
272{
273 DPRINTF(Fetch, "Setting the CPU pointer.\n");
274 cpu = cpu_ptr;
275
276 // Name is finally available, so create the port.
277 icachePort = new IcachePort(this);
278
279 Port *mem_dport = mem->getPort("");
280 icachePort->setPeer(mem_dport);
281 mem_dport->setPeer(icachePort);
282
283 if (cpu->checker) {
284 cpu->checker->setIcachePort(icachePort);
285 }
286
287 // Fetch needs to start fetching instructions at the very beginning,
288 // so it must start up in active state.
289 switchToActive();
290}
291
292template<class Impl>
293void
294DefaultFetch<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *time_buffer)
295{
296 DPRINTF(Fetch, "Setting the time buffer pointer.\n");
297 timeBuffer = time_buffer;
298
299 // Create wires to get information from proper places in time buffer.
300 fromDecode = timeBuffer->getWire(-decodeToFetchDelay);
301 fromRename = timeBuffer->getWire(-renameToFetchDelay);
302 fromIEW = timeBuffer->getWire(-iewToFetchDelay);
303 fromCommit = timeBuffer->getWire(-commitToFetchDelay);
304}
305
306template<class Impl>
307void
308DefaultFetch<Impl>::setActiveThreads(list<unsigned> *at_ptr)
309{
310 DPRINTF(Fetch, "Setting active threads list pointer.\n");
311 activeThreads = at_ptr;
312}
313
314template<class Impl>
315void
316DefaultFetch<Impl>::setFetchQueue(TimeBuffer<FetchStruct> *fq_ptr)
317{
318 DPRINTF(Fetch, "Setting the fetch queue pointer.\n");
319 fetchQueue = fq_ptr;
320
321 // Create wire to write information to proper place in fetch queue.
322 toDecode = fetchQueue->getWire(0);
323}
324
325template<class Impl>
326void
327DefaultFetch<Impl>::initStage()
328{
329 // Setup PC and nextPC with initial state.
330 for (int tid = 0; tid < numThreads; tid++) {
331 PC[tid] = cpu->readPC(tid);
332 nextPC[tid] = cpu->readNextPC(tid);
333 }
334}
335
336template<class Impl>
337void
338DefaultFetch<Impl>::processCacheCompletion(PacketPtr pkt)
339{
340 unsigned tid = pkt->req->getThreadNum();
341
342 DPRINTF(Fetch, "[tid:%u] Waking up from cache miss.\n",tid);
343
344 // Only change the status if it's still waiting on the icache access
345 // to return.
346 if (fetchStatus[tid] != IcacheWaitResponse ||
347 pkt->req != memReq[tid] ||
348 isSwitchedOut()) {
349 ++fetchIcacheSquashes;
350 delete pkt->req;
351 delete pkt;
352 memReq[tid] = NULL;
353 return;
354 }
355
356 // Wake up the CPU (if it went to sleep and was waiting on this completion
357 // event).
358 cpu->wakeCPU();
359
360 DPRINTF(Activity, "[tid:%u] Activating fetch due to cache completion\n",
361 tid);
362
363 switchToActive();
364
365 // Only switch to IcacheAccessComplete if we're not stalled as well.
366 if (checkStall(tid)) {
367 fetchStatus[tid] = Blocked;
368 } else {
369 fetchStatus[tid] = IcacheAccessComplete;
370 }
371
372 // Reset the mem req to NULL.
373 delete pkt->req;
374 delete pkt;
375 memReq[tid] = NULL;
376}
377
378template <class Impl>
379void
380DefaultFetch<Impl>::switchOut()
381{
382 // Fetch is ready to switch out at any time.
383 switchedOut = true;
384 cpu->signalSwitched();
385}
386
387template <class Impl>
388void
389DefaultFetch<Impl>::doSwitchOut()
390{
391 // Branch predictor needs to have its state cleared.
392 branchPred.switchOut();
393}
394
395template <class Impl>
396void
397DefaultFetch<Impl>::takeOverFrom()
398{
399 // Reset all state
400 for (int i = 0; i < Impl::MaxThreads; ++i) {
401 stalls[i].decode = 0;
402 stalls[i].rename = 0;
403 stalls[i].iew = 0;
404 stalls[i].commit = 0;
405 PC[i] = cpu->readPC(i);
406 nextPC[i] = cpu->readNextPC(i);
407 fetchStatus[i] = Running;
408 }
409 numInst = 0;
410 wroteToTimeBuffer = false;
411 _status = Inactive;
412 switchedOut = false;
413 branchPred.takeOverFrom();
414}
415
416template <class Impl>
417void
418DefaultFetch<Impl>::wakeFromQuiesce()
419{
420 DPRINTF(Fetch, "Waking up from quiesce\n");
421 // Hopefully this is safe
422 // @todo: Allow other threads to wake from quiesce.
423 fetchStatus[0] = Running;
424}
425
426template <class Impl>
427inline void
428DefaultFetch<Impl>::switchToActive()
429{
430 if (_status == Inactive) {
431 DPRINTF(Activity, "Activating stage.\n");
432
433 cpu->activateStage(FullCPU::FetchIdx);
434
435 _status = Active;
436 }
437}
438
439template <class Impl>
440inline void
441DefaultFetch<Impl>::switchToInactive()
442{
443 if (_status == Active) {
444 DPRINTF(Activity, "Deactivating stage.\n");
445
446 cpu->deactivateStage(FullCPU::FetchIdx);
447
448 _status = Inactive;
449 }
450}
451
452template <class Impl>
453bool
454DefaultFetch<Impl>::lookupAndUpdateNextPC(DynInstPtr &inst, Addr &next_PC)
455{
456 // Do branch prediction check here.
457 // A bit of a misnomer...next_PC is actually the current PC until
458 // this function updates it.
459 bool predict_taken;
460
461 if (!inst->isControl()) {
462 next_PC = next_PC + instSize;
463 inst->setPredTarg(next_PC);
464 return false;
465 }
466
467 predict_taken = branchPred.predict(inst, next_PC, inst->threadNumber);
468
469 ++fetchedBranches;
470
471 if (predict_taken) {
472 ++predictedBranches;
473 }
474
475 return predict_taken;
476}
477
478template <class Impl>
479bool
480DefaultFetch<Impl>::fetchCacheLine(Addr fetch_PC, Fault &ret_fault, unsigned tid)
481{
482 Fault fault = NoFault;
483
484#if FULL_SYSTEM
485 // Flag to say whether or not address is physical addr.
486 unsigned flags = cpu->inPalMode(fetch_PC) ? PHYSICAL : 0;
487#else
488 unsigned flags = 0;
489#endif // FULL_SYSTEM
490
491 if (cacheBlocked || (interruptPending && flags == 0) || switchedOut) {
492 // Hold off fetch from getting new instructions when:
493 // Cache is blocked, or
494 // while an interrupt is pending and we're not in PAL mode, or
495 // fetch is switched out.
496 return false;
497 }
498
499 // Align the fetch PC so it's at the start of a cache block.
500 fetch_PC = icacheBlockAlignPC(fetch_PC);
501
502 // Setup the memReq to do a read of the first instruction's address.
503 // Set the appropriate read size and flags as well.
504 // Build request here.
505 RequestPtr mem_req = new Request(tid, fetch_PC, cacheBlkSize, flags,
506 fetch_PC, cpu->readCpuId(), tid);
507
508 memReq[tid] = mem_req;
509
510 // Translate the instruction request.
511 fault = cpu->translateInstReq(mem_req, cpu->thread[tid]);
512
513 // In the case of faults, the fetch stage may need to stall and wait
514 // for the ITB miss to be handled.
515
516 // If translation was successful, attempt to read the first
517 // instruction.
518 if (fault == NoFault) {
519#if 0
520 if (cpu->system->memctrl->badaddr(memReq[tid]->paddr) ||
521 memReq[tid]->flags & UNCACHEABLE) {
522 DPRINTF(Fetch, "Fetch: Bad address %#x (hopefully on a "
523 "misspeculating path)!",
524 memReq[tid]->paddr);
525 ret_fault = TheISA::genMachineCheckFault();
526 return false;
527 }
528#endif
529
530 // Build packet here.
531 PacketPtr data_pkt = new Packet(mem_req,
532 Packet::ReadReq, Packet::Broadcast);
533 data_pkt->dataStatic(cacheData[tid]);
534
535 DPRINTF(Fetch, "Fetch: Doing instruction read.\n");
536
537 fetchedCacheLines++;
538
539 // Now do the timing access to see whether or not the instruction
540 // exists within the cache.
541 if (!icachePort->sendTiming(data_pkt)) {
542 assert(retryPkt == NULL);
543 assert(retryTid == -1);
544 DPRINTF(Fetch, "[tid:%i] Out of MSHRs!\n", tid);
545 fetchStatus[tid] = IcacheWaitRetry;
546 retryPkt = data_pkt;
547 retryTid = tid;
548 cacheBlocked = true;
549 return false;
550 }
551
552 DPRINTF(Fetch, "Doing cache access.\n");
553
554 lastIcacheStall[tid] = curTick;
555
556 DPRINTF(Activity, "[tid:%i]: Activity: Waiting on I-cache "
557 "response.\n", tid);
558
559 fetchStatus[tid] = IcacheWaitResponse;
560 } else {
561 delete mem_req;
562 memReq[tid] = NULL;
563 }
564
565 ret_fault = fault;
566 return true;
567}
568
569template <class Impl>
570inline void
571DefaultFetch<Impl>::doSquash(const Addr &new_PC, unsigned tid)
572{
573 DPRINTF(Fetch, "[tid:%i]: Squashing, setting PC to: %#x.\n",
574 tid, new_PC);
575
576 PC[tid] = new_PC;
577 nextPC[tid] = new_PC + instSize;
578
579 // Clear the icache miss if it's outstanding.
580 if (fetchStatus[tid] == IcacheWaitResponse) {
581 DPRINTF(Fetch, "[tid:%i]: Squashing outstanding Icache miss.\n",
582 tid);
583 memReq[tid] = NULL;
584 }
585
586 // Get rid of the retrying packet if it was from this thread.
587 if (retryTid == tid) {
588 assert(cacheBlocked);
589 cacheBlocked = false;
590 retryTid = -1;
591 retryPkt = NULL;
592 delete retryPkt->req;
593 delete retryPkt;
594 }
595
596 fetchStatus[tid] = Squashing;
597
598 ++fetchSquashCycles;
599}
600
601template<class Impl>
602void
603DefaultFetch<Impl>::squashFromDecode(const Addr &new_PC,
604 const InstSeqNum &seq_num,
605 unsigned tid)
606{
607 DPRINTF(Fetch, "[tid:%i]: Squashing from decode.\n",tid);
608
609 doSquash(new_PC, tid);
610
611 // Tell the CPU to remove any instructions that are in flight between
612 // fetch and decode.
613 cpu->removeInstsUntil(seq_num, tid);
614}
615
616template<class Impl>
617bool
618DefaultFetch<Impl>::checkStall(unsigned tid) const
619{
620 bool ret_val = false;
621
622 if (cpu->contextSwitch) {
623 DPRINTF(Fetch,"[tid:%i]: Stalling for a context switch.\n",tid);
624 ret_val = true;
625 } else if (stalls[tid].decode) {
626 DPRINTF(Fetch,"[tid:%i]: Stall from Decode stage detected.\n",tid);
627 ret_val = true;
628 } else if (stalls[tid].rename) {
629 DPRINTF(Fetch,"[tid:%i]: Stall from Rename stage detected.\n",tid);
630 ret_val = true;
631 } else if (stalls[tid].iew) {
632 DPRINTF(Fetch,"[tid:%i]: Stall from IEW stage detected.\n",tid);
633 ret_val = true;
634 } else if (stalls[tid].commit) {
635 DPRINTF(Fetch,"[tid:%i]: Stall from Commit stage detected.\n",tid);
636 ret_val = true;
637 }
638
639 return ret_val;
640}
641
642template<class Impl>
643typename DefaultFetch<Impl>::FetchStatus
644DefaultFetch<Impl>::updateFetchStatus()
645{
646 //Check Running
647 list<unsigned>::iterator threads = (*activeThreads).begin();
648
649 while (threads != (*activeThreads).end()) {
650
651 unsigned tid = *threads++;
652
653 if (fetchStatus[tid] == Running ||
654 fetchStatus[tid] == Squashing ||
655 fetchStatus[tid] == IcacheAccessComplete) {
656
657 if (_status == Inactive) {
658 DPRINTF(Activity, "[tid:%i]: Activating stage.\n",tid);
659
660 if (fetchStatus[tid] == IcacheAccessComplete) {
661 DPRINTF(Activity, "[tid:%i]: Activating fetch due to cache"
662 "completion\n",tid);
663 }
664
665 cpu->activateStage(FullCPU::FetchIdx);
666 }
667
668 return Active;
669 }
670 }
671
672 // Stage is switching from active to inactive, notify CPU of it.
673 if (_status == Active) {
674 DPRINTF(Activity, "Deactivating stage.\n");
675
676 cpu->deactivateStage(FullCPU::FetchIdx);
677 }
678
679 return Inactive;
680}
681
682template <class Impl>
683void
684DefaultFetch<Impl>::squash(const Addr &new_PC, unsigned tid)
685{
686 DPRINTF(Fetch, "[tid:%u]: Squash from commit.\n",tid);
687
688 doSquash(new_PC, tid);
689
690 // Tell the CPU to remove any instructions that are not in the ROB.
691 cpu->removeInstsNotInROB(tid);
692}
693
694template <class Impl>
695void
696DefaultFetch<Impl>::tick()
697{
698 list<unsigned>::iterator threads = (*activeThreads).begin();
699 bool status_change = false;
700
701 wroteToTimeBuffer = false;
702
703 while (threads != (*activeThreads).end()) {
704 unsigned tid = *threads++;
705
706 // Check the signals for each thread to determine the proper status
707 // for each thread.
708 bool updated_status = checkSignalsAndUpdate(tid);
709 status_change = status_change || updated_status;
710 }
711
712 DPRINTF(Fetch, "Running stage.\n");
713
714 // Reset the number of the instruction we're fetching.
715 numInst = 0;
716
717 if (fromCommit->commitInfo[0].interruptPending) {
718 interruptPending = true;
719 }
720 if (fromCommit->commitInfo[0].clearInterrupt) {
721 interruptPending = false;
722 }
723
724 for (threadFetched = 0; threadFetched < numFetchingThreads;
725 threadFetched++) {
726 // Fetch each of the actively fetching threads.
727 fetch(status_change);
728 }
729
730 // Record number of instructions fetched this cycle for distribution.
731 fetchNisnDist.sample(numInst);
732
733 if (status_change) {
734 // Change the fetch stage status if there was a status change.
735 _status = updateFetchStatus();
736 }
737
738 // If there was activity this cycle, inform the CPU of it.
739 if (wroteToTimeBuffer || cpu->contextSwitch) {
740 DPRINTF(Activity, "Activity this cycle.\n");
741
742 cpu->activityThisCycle();
743 }
744}
745
746template <class Impl>
747bool
748DefaultFetch<Impl>::checkSignalsAndUpdate(unsigned tid)
749{
750 // Update the per thread stall statuses.
751 if (fromDecode->decodeBlock[tid]) {
752 stalls[tid].decode = true;
753 }
754
755 if (fromDecode->decodeUnblock[tid]) {
756 assert(stalls[tid].decode);
757 assert(!fromDecode->decodeBlock[tid]);
758 stalls[tid].decode = false;
759 }
760
761 if (fromRename->renameBlock[tid]) {
762 stalls[tid].rename = true;
763 }
764
765 if (fromRename->renameUnblock[tid]) {
766 assert(stalls[tid].rename);
767 assert(!fromRename->renameBlock[tid]);
768 stalls[tid].rename = false;
769 }
770
771 if (fromIEW->iewBlock[tid]) {
772 stalls[tid].iew = true;
773 }
774
775 if (fromIEW->iewUnblock[tid]) {
776 assert(stalls[tid].iew);
777 assert(!fromIEW->iewBlock[tid]);
778 stalls[tid].iew = false;
779 }
780
781 if (fromCommit->commitBlock[tid]) {
782 stalls[tid].commit = true;
783 }
784
785 if (fromCommit->commitUnblock[tid]) {
786 assert(stalls[tid].commit);
787 assert(!fromCommit->commitBlock[tid]);
788 stalls[tid].commit = false;
789 }
790
791 // Check squash signals from commit.
792 if (fromCommit->commitInfo[tid].squash) {
793
794 DPRINTF(Fetch, "[tid:%u]: Squashing instructions due to squash "
795 "from commit.\n",tid);
796
797 // In any case, squash.
798 squash(fromCommit->commitInfo[tid].nextPC,tid);
799
800 // Also check if there's a mispredict that happened.
801 if (fromCommit->commitInfo[tid].branchMispredict) {
802 branchPred.squash(fromCommit->commitInfo[tid].doneSeqNum,
803 fromCommit->commitInfo[tid].nextPC,
804 fromCommit->commitInfo[tid].branchTaken,
805 tid);
806 } else {
807 branchPred.squash(fromCommit->commitInfo[tid].doneSeqNum,
808 tid);
809 }
810
811 return true;
812 } else if (fromCommit->commitInfo[tid].doneSeqNum) {
813 // Update the branch predictor if it wasn't a squashed instruction
814 // that was broadcasted.
815 branchPred.update(fromCommit->commitInfo[tid].doneSeqNum, tid);
816 }
817
818 // Check ROB squash signals from commit.
819 if (fromCommit->commitInfo[tid].robSquashing) {
820 DPRINTF(Fetch, "[tid:%u]: ROB is still squashing.\n", tid);
821
822 // Continue to squash.
823 fetchStatus[tid] = Squashing;
824
825 return true;
826 }
827
828 // Check squash signals from decode.
829 if (fromDecode->decodeInfo[tid].squash) {
830 DPRINTF(Fetch, "[tid:%u]: Squashing instructions due to squash "
831 "from decode.\n",tid);
832
833 // Update the branch predictor.
834 if (fromDecode->decodeInfo[tid].branchMispredict) {
835 branchPred.squash(fromDecode->decodeInfo[tid].doneSeqNum,
836 fromDecode->decodeInfo[tid].nextPC,
837 fromDecode->decodeInfo[tid].branchTaken,
838 tid);
839 } else {
840 branchPred.squash(fromDecode->decodeInfo[tid].doneSeqNum,
841 tid);
842 }
843
844 if (fetchStatus[tid] != Squashing) {
845 // Squash unless we're already squashing
846 squashFromDecode(fromDecode->decodeInfo[tid].nextPC,
847 fromDecode->decodeInfo[tid].doneSeqNum,
848 tid);
849
850 return true;
851 }
852 }
853
854 if (checkStall(tid) && fetchStatus[tid] != IcacheWaitResponse) {
855 DPRINTF(Fetch, "[tid:%i]: Setting to blocked\n",tid);
856
857 fetchStatus[tid] = Blocked;
858
859 return true;
860 }
861
862 if (fetchStatus[tid] == Blocked ||
863 fetchStatus[tid] == Squashing) {
864 // Switch status to running if fetch isn't being told to block or
865 // squash this cycle.
866 DPRINTF(Fetch, "[tid:%i]: Done squashing, switching to running.\n",
867 tid);
868
869 fetchStatus[tid] = Running;
870
871 return true;
872 }
873
874 // If we've reached this point, we have not gotten any signals that
875 // cause fetch to change its status. Fetch remains the same as before.
876 return false;
877}
878
879template<class Impl>
880void
881DefaultFetch<Impl>::fetch(bool &status_change)
882{
883 //////////////////////////////////////////
884 // Start actual fetch
885 //////////////////////////////////////////
886 int tid = getFetchingThread(fetchPolicy);
887
888 if (tid == -1) {
889 DPRINTF(Fetch,"There are no more threads available to fetch from.\n");
890
891 // Breaks looping condition in tick()
892 threadFetched = numFetchingThreads;
893 return;
894 }
895
896 // The current PC.
897 Addr &fetch_PC = PC[tid];
898
899 // Fault code for memory access.
900 Fault fault = NoFault;
901
902 // If returning from the delay of a cache miss, then update the status
903 // to running, otherwise do the cache access. Possibly move this up
904 // to tick() function.
905 if (fetchStatus[tid] == IcacheAccessComplete) {
906 DPRINTF(Fetch, "[tid:%i]: Icache miss is complete.\n",
907 tid);
908
909 fetchStatus[tid] = Running;
910 status_change = true;
911 } else if (fetchStatus[tid] == Running) {
912 DPRINTF(Fetch, "[tid:%i]: Attempting to translate and read "
913 "instruction, starting at PC %08p.\n",
914 tid, fetch_PC);
915
916 bool fetch_success = fetchCacheLine(fetch_PC, fault, tid);
917 if (!fetch_success) {
918 ++fetchMiscStallCycles;
918 if (cacheBlocked) {
919 ++icacheStallCycles;
920 } else {
921 ++fetchMiscStallCycles;
922 }
919 return;
920 }
921 } else {
922 if (fetchStatus[tid] == Idle) {
923 ++fetchIdleCycles;
924 } else if (fetchStatus[tid] == Blocked) {
925 ++fetchBlockedCycles;
926 } else if (fetchStatus[tid] == Squashing) {
927 ++fetchSquashCycles;
928 } else if (fetchStatus[tid] == IcacheWaitResponse) {
929 ++icacheStallCycles;
930 }
931
932 // Status is Idle, Squashing, Blocked, or IcacheWaitResponse, so
933 // fetch should do nothing.
934 return;
935 }
936
937 ++fetchCycles;
938
939 // If we had a stall due to an icache miss, then return.
940 if (fetchStatus[tid] == IcacheWaitResponse) {
941 ++icacheStallCycles;
942 status_change = true;
943 return;
944 }
945
946 Addr next_PC = fetch_PC;
947 InstSeqNum inst_seq;
948 MachInst inst;
949 ExtMachInst ext_inst;
950 // @todo: Fix this hack.
951 unsigned offset = (fetch_PC & cacheBlkMask) & ~3;
952
953 if (fault == NoFault) {
954 // If the read of the first instruction was successful, then grab the
955 // instructions from the rest of the cache line and put them into the
956 // queue heading to decode.
957
958 DPRINTF(Fetch, "[tid:%i]: Adding instructions to queue to "
959 "decode.\n",tid);
960
961 // Need to keep track of whether or not a predicted branch
962 // ended this fetch block.
963 bool predicted_branch = false;
964
965 for (;
966 offset < cacheBlkSize &&
967 numInst < fetchWidth &&
968 !predicted_branch;
969 ++numInst) {
970
971 // Get a sequence number.
972 inst_seq = cpu->getAndIncrementInstSeq();
973
974 // Make sure this is a valid index.
975 assert(offset <= cacheBlkSize - instSize);
976
977 // Get the instruction from the array of the cache line.
978 inst = gtoh(*reinterpret_cast<MachInst *>
979 (&cacheData[tid][offset]));
980
981 ext_inst = TheISA::makeExtMI(inst, fetch_PC);
982
983 // Create a new DynInst from the instruction fetched.
984 DynInstPtr instruction = new DynInst(ext_inst, fetch_PC,
985 next_PC,
986 inst_seq, cpu);
987 instruction->setTid(tid);
988
989 instruction->setASID(tid);
990
991 instruction->setThreadState(cpu->thread[tid]);
992
993 DPRINTF(Fetch, "[tid:%i]: Instruction PC %#x created "
994 "[sn:%lli]\n",
995 tid, instruction->readPC(), inst_seq);
996
997 DPRINTF(Fetch, "[tid:%i]: Instruction is: %s\n",
998 tid, instruction->staticInst->disassemble(fetch_PC));
999
1000 instruction->traceData =
1001 Trace::getInstRecord(curTick, cpu->tcBase(tid), cpu,
1002 instruction->staticInst,
1003 instruction->readPC(),tid);
1004
1005 predicted_branch = lookupAndUpdateNextPC(instruction, next_PC);
1006
1007 // Add instruction to the CPU's list of instructions.
1008 instruction->setInstListIt(cpu->addInst(instruction));
1009
1010 // Write the instruction to the first slot in the queue
1011 // that heads to decode.
1012 toDecode->insts[numInst] = instruction;
1013
1014 toDecode->size++;
1015
1016 // Increment stat of fetched instructions.
1017 ++fetchedInsts;
1018
1019 // Move to the next instruction, unless we have a branch.
1020 fetch_PC = next_PC;
1021
1022 if (instruction->isQuiesce()) {
1023 warn("%lli: Quiesce instruction encountered, halting fetch!",
1024 curTick);
1025 fetchStatus[tid] = QuiescePending;
1026 ++numInst;
1027 status_change = true;
1028 break;
1029 }
1030
1031 offset+= instSize;
1032 }
1033 }
1034
1035 if (numInst > 0) {
1036 wroteToTimeBuffer = true;
1037 }
1038
1039 // Now that fetching is completed, update the PC to signify what the next
1040 // cycle will be.
1041 if (fault == NoFault) {
1042 DPRINTF(Fetch, "[tid:%i]: Setting PC to %08p.\n",tid, next_PC);
1043
1044 PC[tid] = next_PC;
1045 nextPC[tid] = next_PC + instSize;
1046 } else {
1047 // We shouldn't be in an icache miss and also have a fault (an ITB
1048 // miss)
1049 if (fetchStatus[tid] == IcacheWaitResponse) {
1050 panic("Fetch should have exited prior to this!");
1051 }
1052
1053 // Send the fault to commit. This thread will not do anything
1054 // until commit handles the fault. The only other way it can
1055 // wake up is if a squash comes along and changes the PC.
1056#if FULL_SYSTEM
1057 assert(numInst != fetchWidth);
1058 // Get a sequence number.
1059 inst_seq = cpu->getAndIncrementInstSeq();
1060 // We will use a nop in order to carry the fault.
1061 ext_inst = TheISA::NoopMachInst;
1062
1063 // Create a new DynInst from the dummy nop.
1064 DynInstPtr instruction = new DynInst(ext_inst, fetch_PC,
1065 next_PC,
1066 inst_seq, cpu);
1067 instruction->setPredTarg(next_PC + instSize);
1068 instruction->setTid(tid);
1069
1070 instruction->setASID(tid);
1071
1072 instruction->setThreadState(cpu->thread[tid]);
1073
1074 instruction->traceData = NULL;
1075
1076 instruction->setInstListIt(cpu->addInst(instruction));
1077
1078 instruction->fault = fault;
1079
1080 toDecode->insts[numInst] = instruction;
1081 toDecode->size++;
1082
1083 DPRINTF(Fetch, "[tid:%i]: Blocked, need to handle the trap.\n",tid);
1084
1085 fetchStatus[tid] = TrapPending;
1086 status_change = true;
1087
1088 warn("%lli fault (%d) detected @ PC %08p", curTick, fault, PC[tid]);
1089#else // !FULL_SYSTEM
1090 warn("%lli fault (%d) detected @ PC %08p", curTick, fault, PC[tid]);
1091#endif // FULL_SYSTEM
1092 }
1093}
1094
1095template<class Impl>
1096void
1097DefaultFetch<Impl>::recvRetry()
1098{
1099 assert(cacheBlocked);
1100 if (retryPkt != NULL) {
1101 assert(retryTid != -1);
1102 assert(fetchStatus[retryTid] == IcacheWaitRetry);
1103
1104 if (icachePort->sendTiming(retryPkt)) {
1105 fetchStatus[retryTid] = IcacheWaitResponse;
1106 retryPkt = NULL;
1107 retryTid = -1;
1108 cacheBlocked = false;
1109 }
1110 } else {
1111 assert(retryTid == -1);
1112 // Access has been squashed since it was sent out. Just clear
1113 // the cache being blocked.
1114 cacheBlocked = false;
1115 }
1116}
1117
1118///////////////////////////////////////
1119// //
1120// SMT FETCH POLICY MAINTAINED HERE //
1121// //
1122///////////////////////////////////////
1123template<class Impl>
1124int
1125DefaultFetch<Impl>::getFetchingThread(FetchPriority &fetch_priority)
1126{
1127 if (numThreads > 1) {
1128 switch (fetch_priority) {
1129
1130 case SingleThread:
1131 return 0;
1132
1133 case RoundRobin:
1134 return roundRobin();
1135
1136 case IQ:
1137 return iqCount();
1138
1139 case LSQ:
1140 return lsqCount();
1141
1142 case Branch:
1143 return branchCount();
1144
1145 default:
1146 return -1;
1147 }
1148 } else {
1149 int tid = *((*activeThreads).begin());
1150
1151 if (fetchStatus[tid] == Running ||
1152 fetchStatus[tid] == IcacheAccessComplete ||
1153 fetchStatus[tid] == Idle) {
1154 return tid;
1155 } else {
1156 return -1;
1157 }
1158 }
1159
1160}
1161
1162
1163template<class Impl>
1164int
1165DefaultFetch<Impl>::roundRobin()
1166{
1167 list<unsigned>::iterator pri_iter = priorityList.begin();
1168 list<unsigned>::iterator end = priorityList.end();
1169
1170 int high_pri;
1171
1172 while (pri_iter != end) {
1173 high_pri = *pri_iter;
1174
1175 assert(high_pri <= numThreads);
1176
1177 if (fetchStatus[high_pri] == Running ||
1178 fetchStatus[high_pri] == IcacheAccessComplete ||
1179 fetchStatus[high_pri] == Idle) {
1180
1181 priorityList.erase(pri_iter);
1182 priorityList.push_back(high_pri);
1183
1184 return high_pri;
1185 }
1186
1187 pri_iter++;
1188 }
1189
1190 return -1;
1191}
1192
1193template<class Impl>
1194int
1195DefaultFetch<Impl>::iqCount()
1196{
1197 priority_queue<unsigned> PQ;
1198
1199 list<unsigned>::iterator threads = (*activeThreads).begin();
1200
1201 while (threads != (*activeThreads).end()) {
1202 unsigned tid = *threads++;
1203
1204 PQ.push(fromIEW->iewInfo[tid].iqCount);
1205 }
1206
1207 while (!PQ.empty()) {
1208
1209 unsigned high_pri = PQ.top();
1210
1211 if (fetchStatus[high_pri] == Running ||
1212 fetchStatus[high_pri] == IcacheAccessComplete ||
1213 fetchStatus[high_pri] == Idle)
1214 return high_pri;
1215 else
1216 PQ.pop();
1217
1218 }
1219
1220 return -1;
1221}
1222
1223template<class Impl>
1224int
1225DefaultFetch<Impl>::lsqCount()
1226{
1227 priority_queue<unsigned> PQ;
1228
1229
1230 list<unsigned>::iterator threads = (*activeThreads).begin();
1231
1232 while (threads != (*activeThreads).end()) {
1233 unsigned tid = *threads++;
1234
1235 PQ.push(fromIEW->iewInfo[tid].ldstqCount);
1236 }
1237
1238 while (!PQ.empty()) {
1239
1240 unsigned high_pri = PQ.top();
1241
1242 if (fetchStatus[high_pri] == Running ||
1243 fetchStatus[high_pri] == IcacheAccessComplete ||
1244 fetchStatus[high_pri] == Idle)
1245 return high_pri;
1246 else
1247 PQ.pop();
1248
1249 }
1250
1251 return -1;
1252}
1253
1254template<class Impl>
1255int
1256DefaultFetch<Impl>::branchCount()
1257{
1258 list<unsigned>::iterator threads = (*activeThreads).begin();
1259
1260 return *threads;
1261}
923 return;
924 }
925 } else {
926 if (fetchStatus[tid] == Idle) {
927 ++fetchIdleCycles;
928 } else if (fetchStatus[tid] == Blocked) {
929 ++fetchBlockedCycles;
930 } else if (fetchStatus[tid] == Squashing) {
931 ++fetchSquashCycles;
932 } else if (fetchStatus[tid] == IcacheWaitResponse) {
933 ++icacheStallCycles;
934 }
935
936 // Status is Idle, Squashing, Blocked, or IcacheWaitResponse, so
937 // fetch should do nothing.
938 return;
939 }
940
941 ++fetchCycles;
942
943 // If we had a stall due to an icache miss, then return.
944 if (fetchStatus[tid] == IcacheWaitResponse) {
945 ++icacheStallCycles;
946 status_change = true;
947 return;
948 }
949
950 Addr next_PC = fetch_PC;
951 InstSeqNum inst_seq;
952 MachInst inst;
953 ExtMachInst ext_inst;
954 // @todo: Fix this hack.
955 unsigned offset = (fetch_PC & cacheBlkMask) & ~3;
956
957 if (fault == NoFault) {
958 // If the read of the first instruction was successful, then grab the
959 // instructions from the rest of the cache line and put them into the
960 // queue heading to decode.
961
962 DPRINTF(Fetch, "[tid:%i]: Adding instructions to queue to "
963 "decode.\n",tid);
964
965 // Need to keep track of whether or not a predicted branch
966 // ended this fetch block.
967 bool predicted_branch = false;
968
969 for (;
970 offset < cacheBlkSize &&
971 numInst < fetchWidth &&
972 !predicted_branch;
973 ++numInst) {
974
975 // Get a sequence number.
976 inst_seq = cpu->getAndIncrementInstSeq();
977
978 // Make sure this is a valid index.
979 assert(offset <= cacheBlkSize - instSize);
980
981 // Get the instruction from the array of the cache line.
982 inst = gtoh(*reinterpret_cast<MachInst *>
983 (&cacheData[tid][offset]));
984
985 ext_inst = TheISA::makeExtMI(inst, fetch_PC);
986
987 // Create a new DynInst from the instruction fetched.
988 DynInstPtr instruction = new DynInst(ext_inst, fetch_PC,
989 next_PC,
990 inst_seq, cpu);
991 instruction->setTid(tid);
992
993 instruction->setASID(tid);
994
995 instruction->setThreadState(cpu->thread[tid]);
996
997 DPRINTF(Fetch, "[tid:%i]: Instruction PC %#x created "
998 "[sn:%lli]\n",
999 tid, instruction->readPC(), inst_seq);
1000
1001 DPRINTF(Fetch, "[tid:%i]: Instruction is: %s\n",
1002 tid, instruction->staticInst->disassemble(fetch_PC));
1003
1004 instruction->traceData =
1005 Trace::getInstRecord(curTick, cpu->tcBase(tid), cpu,
1006 instruction->staticInst,
1007 instruction->readPC(),tid);
1008
1009 predicted_branch = lookupAndUpdateNextPC(instruction, next_PC);
1010
1011 // Add instruction to the CPU's list of instructions.
1012 instruction->setInstListIt(cpu->addInst(instruction));
1013
1014 // Write the instruction to the first slot in the queue
1015 // that heads to decode.
1016 toDecode->insts[numInst] = instruction;
1017
1018 toDecode->size++;
1019
1020 // Increment stat of fetched instructions.
1021 ++fetchedInsts;
1022
1023 // Move to the next instruction, unless we have a branch.
1024 fetch_PC = next_PC;
1025
1026 if (instruction->isQuiesce()) {
1027 warn("%lli: Quiesce instruction encountered, halting fetch!",
1028 curTick);
1029 fetchStatus[tid] = QuiescePending;
1030 ++numInst;
1031 status_change = true;
1032 break;
1033 }
1034
1035 offset+= instSize;
1036 }
1037 }
1038
1039 if (numInst > 0) {
1040 wroteToTimeBuffer = true;
1041 }
1042
1043 // Now that fetching is completed, update the PC to signify what the next
1044 // cycle will be.
1045 if (fault == NoFault) {
1046 DPRINTF(Fetch, "[tid:%i]: Setting PC to %08p.\n",tid, next_PC);
1047
1048 PC[tid] = next_PC;
1049 nextPC[tid] = next_PC + instSize;
1050 } else {
1051 // We shouldn't be in an icache miss and also have a fault (an ITB
1052 // miss)
1053 if (fetchStatus[tid] == IcacheWaitResponse) {
1054 panic("Fetch should have exited prior to this!");
1055 }
1056
1057 // Send the fault to commit. This thread will not do anything
1058 // until commit handles the fault. The only other way it can
1059 // wake up is if a squash comes along and changes the PC.
1060#if FULL_SYSTEM
1061 assert(numInst != fetchWidth);
1062 // Get a sequence number.
1063 inst_seq = cpu->getAndIncrementInstSeq();
1064 // We will use a nop in order to carry the fault.
1065 ext_inst = TheISA::NoopMachInst;
1066
1067 // Create a new DynInst from the dummy nop.
1068 DynInstPtr instruction = new DynInst(ext_inst, fetch_PC,
1069 next_PC,
1070 inst_seq, cpu);
1071 instruction->setPredTarg(next_PC + instSize);
1072 instruction->setTid(tid);
1073
1074 instruction->setASID(tid);
1075
1076 instruction->setThreadState(cpu->thread[tid]);
1077
1078 instruction->traceData = NULL;
1079
1080 instruction->setInstListIt(cpu->addInst(instruction));
1081
1082 instruction->fault = fault;
1083
1084 toDecode->insts[numInst] = instruction;
1085 toDecode->size++;
1086
1087 DPRINTF(Fetch, "[tid:%i]: Blocked, need to handle the trap.\n",tid);
1088
1089 fetchStatus[tid] = TrapPending;
1090 status_change = true;
1091
1092 warn("%lli fault (%d) detected @ PC %08p", curTick, fault, PC[tid]);
1093#else // !FULL_SYSTEM
1094 warn("%lli fault (%d) detected @ PC %08p", curTick, fault, PC[tid]);
1095#endif // FULL_SYSTEM
1096 }
1097}
1098
1099template<class Impl>
1100void
1101DefaultFetch<Impl>::recvRetry()
1102{
1103 assert(cacheBlocked);
1104 if (retryPkt != NULL) {
1105 assert(retryTid != -1);
1106 assert(fetchStatus[retryTid] == IcacheWaitRetry);
1107
1108 if (icachePort->sendTiming(retryPkt)) {
1109 fetchStatus[retryTid] = IcacheWaitResponse;
1110 retryPkt = NULL;
1111 retryTid = -1;
1112 cacheBlocked = false;
1113 }
1114 } else {
1115 assert(retryTid == -1);
1116 // Access has been squashed since it was sent out. Just clear
1117 // the cache being blocked.
1118 cacheBlocked = false;
1119 }
1120}
1121
1122///////////////////////////////////////
1123// //
1124// SMT FETCH POLICY MAINTAINED HERE //
1125// //
1126///////////////////////////////////////
1127template<class Impl>
1128int
1129DefaultFetch<Impl>::getFetchingThread(FetchPriority &fetch_priority)
1130{
1131 if (numThreads > 1) {
1132 switch (fetch_priority) {
1133
1134 case SingleThread:
1135 return 0;
1136
1137 case RoundRobin:
1138 return roundRobin();
1139
1140 case IQ:
1141 return iqCount();
1142
1143 case LSQ:
1144 return lsqCount();
1145
1146 case Branch:
1147 return branchCount();
1148
1149 default:
1150 return -1;
1151 }
1152 } else {
1153 int tid = *((*activeThreads).begin());
1154
1155 if (fetchStatus[tid] == Running ||
1156 fetchStatus[tid] == IcacheAccessComplete ||
1157 fetchStatus[tid] == Idle) {
1158 return tid;
1159 } else {
1160 return -1;
1161 }
1162 }
1163
1164}
1165
1166
1167template<class Impl>
1168int
1169DefaultFetch<Impl>::roundRobin()
1170{
1171 list<unsigned>::iterator pri_iter = priorityList.begin();
1172 list<unsigned>::iterator end = priorityList.end();
1173
1174 int high_pri;
1175
1176 while (pri_iter != end) {
1177 high_pri = *pri_iter;
1178
1179 assert(high_pri <= numThreads);
1180
1181 if (fetchStatus[high_pri] == Running ||
1182 fetchStatus[high_pri] == IcacheAccessComplete ||
1183 fetchStatus[high_pri] == Idle) {
1184
1185 priorityList.erase(pri_iter);
1186 priorityList.push_back(high_pri);
1187
1188 return high_pri;
1189 }
1190
1191 pri_iter++;
1192 }
1193
1194 return -1;
1195}
1196
1197template<class Impl>
1198int
1199DefaultFetch<Impl>::iqCount()
1200{
1201 priority_queue<unsigned> PQ;
1202
1203 list<unsigned>::iterator threads = (*activeThreads).begin();
1204
1205 while (threads != (*activeThreads).end()) {
1206 unsigned tid = *threads++;
1207
1208 PQ.push(fromIEW->iewInfo[tid].iqCount);
1209 }
1210
1211 while (!PQ.empty()) {
1212
1213 unsigned high_pri = PQ.top();
1214
1215 if (fetchStatus[high_pri] == Running ||
1216 fetchStatus[high_pri] == IcacheAccessComplete ||
1217 fetchStatus[high_pri] == Idle)
1218 return high_pri;
1219 else
1220 PQ.pop();
1221
1222 }
1223
1224 return -1;
1225}
1226
1227template<class Impl>
1228int
1229DefaultFetch<Impl>::lsqCount()
1230{
1231 priority_queue<unsigned> PQ;
1232
1233
1234 list<unsigned>::iterator threads = (*activeThreads).begin();
1235
1236 while (threads != (*activeThreads).end()) {
1237 unsigned tid = *threads++;
1238
1239 PQ.push(fromIEW->iewInfo[tid].ldstqCount);
1240 }
1241
1242 while (!PQ.empty()) {
1243
1244 unsigned high_pri = PQ.top();
1245
1246 if (fetchStatus[high_pri] == Running ||
1247 fetchStatus[high_pri] == IcacheAccessComplete ||
1248 fetchStatus[high_pri] == Idle)
1249 return high_pri;
1250 else
1251 PQ.pop();
1252
1253 }
1254
1255 return -1;
1256}
1257
1258template<class Impl>
1259int
1260DefaultFetch<Impl>::branchCount()
1261{
1262 list<unsigned>::iterator threads = (*activeThreads).begin();
1263
1264 return *threads;
1265}