fetch_impl.hh revision 2696:30b38e36ff54
1/*
2 * Copyright (c) 2004-2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Kevin Lim
29 */
30
31#include "arch/isa_traits.hh"
32#include "arch/utility.hh"
33#include "cpu/checker/cpu.hh"
34#include "cpu/exetrace.hh"
35#include "cpu/o3/fetch.hh"
36#include "mem/packet.hh"
37#include "mem/request.hh"
38#include "sim/byteswap.hh"
39#include "sim/host.hh"
40#include "sim/root.hh"
41
42#if FULL_SYSTEM
43#include "arch/tlb.hh"
44#include "arch/vtophys.hh"
45#include "base/remote_gdb.hh"
46#include "sim/system.hh"
47#endif // FULL_SYSTEM
48
49#include <algorithm>
50
51using namespace std;
52using namespace TheISA;
53
54template<class Impl>
55Tick
56DefaultFetch<Impl>::IcachePort::recvAtomic(PacketPtr pkt)
57{
58    panic("DefaultFetch doesn't expect recvAtomic callback!");
59    return curTick;
60}
61
62template<class Impl>
63void
64DefaultFetch<Impl>::IcachePort::recvFunctional(PacketPtr pkt)
65{
66    panic("DefaultFetch doesn't expect recvFunctional callback!");
67}
68
69template<class Impl>
70void
71DefaultFetch<Impl>::IcachePort::recvStatusChange(Status status)
72{
73    if (status == RangeChange)
74        return;
75
76    panic("DefaultFetch doesn't expect recvStatusChange callback!");
77}
78
79template<class Impl>
80bool
81DefaultFetch<Impl>::IcachePort::recvTiming(Packet *pkt)
82{
83    fetch->processCacheCompletion(pkt);
84    return true;
85}
86
87template<class Impl>
88void
89DefaultFetch<Impl>::IcachePort::recvRetry()
90{
91    fetch->recvRetry();
92}
93
94template<class Impl>
95DefaultFetch<Impl>::DefaultFetch(Params *params)
96    : mem(params->mem),
97      branchPred(params),
98      decodeToFetchDelay(params->decodeToFetchDelay),
99      renameToFetchDelay(params->renameToFetchDelay),
100      iewToFetchDelay(params->iewToFetchDelay),
101      commitToFetchDelay(params->commitToFetchDelay),
102      fetchWidth(params->fetchWidth),
103      cacheBlocked(false),
104      retryPkt(NULL),
105      retryTid(-1),
106      numThreads(params->numberOfThreads),
107      numFetchingThreads(params->smtNumFetchingThreads),
108      interruptPending(false),
109      switchedOut(false)
110{
111    if (numThreads > Impl::MaxThreads)
112        fatal("numThreads is not a valid value\n");
113
114    DPRINTF(Fetch, "Fetch constructor called\n");
115
116    // Set fetch stage's status to inactive.
117    _status = Inactive;
118
119    string policy = params->smtFetchPolicy;
120
121    // Convert string to lowercase
122    std::transform(policy.begin(), policy.end(), policy.begin(),
123                   (int(*)(int)) tolower);
124
125    // Figure out fetch policy
126    if (policy == "singlethread") {
127        fetchPolicy = SingleThread;
128    } else if (policy == "roundrobin") {
129        fetchPolicy = RoundRobin;
130        DPRINTF(Fetch, "Fetch policy set to Round Robin\n");
131    } else if (policy == "branch") {
132        fetchPolicy = Branch;
133        DPRINTF(Fetch, "Fetch policy set to Branch Count\n");
134    } else if (policy == "iqcount") {
135        fetchPolicy = IQ;
136        DPRINTF(Fetch, "Fetch policy set to IQ count\n");
137    } else if (policy == "lsqcount") {
138        fetchPolicy = LSQ;
139        DPRINTF(Fetch, "Fetch policy set to LSQ count\n");
140    } else {
141        fatal("Invalid Fetch Policy. Options Are: {SingleThread,"
142              " RoundRobin,LSQcount,IQcount}\n");
143    }
144
145    // Size of cache block.
146    cacheBlkSize = 64;
147
148    // Create mask to get rid of offset bits.
149    cacheBlkMask = (cacheBlkSize - 1);
150
151    for (int tid=0; tid < numThreads; tid++) {
152
153        fetchStatus[tid] = Running;
154
155        priorityList.push_back(tid);
156
157        memReq[tid] = NULL;
158
159        // Create space to store a cache line.
160        cacheData[tid] = new uint8_t[cacheBlkSize];
161
162        stalls[tid].decode = 0;
163        stalls[tid].rename = 0;
164        stalls[tid].iew = 0;
165        stalls[tid].commit = 0;
166    }
167
168    // Get the size of an instruction.
169    instSize = sizeof(MachInst);
170}
171
172template <class Impl>
173std::string
174DefaultFetch<Impl>::name() const
175{
176    return cpu->name() + ".fetch";
177}
178
179template <class Impl>
180void
181DefaultFetch<Impl>::regStats()
182{
183    icacheStallCycles
184        .name(name() + ".icacheStallCycles")
185        .desc("Number of cycles fetch is stalled on an Icache miss")
186        .prereq(icacheStallCycles);
187
188    fetchedInsts
189        .name(name() + ".Insts")
190        .desc("Number of instructions fetch has processed")
191        .prereq(fetchedInsts);
192
193    fetchedBranches
194        .name(name() + ".Branches")
195        .desc("Number of branches that fetch encountered")
196        .prereq(fetchedBranches);
197
198    predictedBranches
199        .name(name() + ".predictedBranches")
200        .desc("Number of branches that fetch has predicted taken")
201        .prereq(predictedBranches);
202
203    fetchCycles
204        .name(name() + ".Cycles")
205        .desc("Number of cycles fetch has run and was not squashing or"
206              " blocked")
207        .prereq(fetchCycles);
208
209    fetchSquashCycles
210        .name(name() + ".SquashCycles")
211        .desc("Number of cycles fetch has spent squashing")
212        .prereq(fetchSquashCycles);
213
214    fetchIdleCycles
215        .name(name() + ".IdleCycles")
216        .desc("Number of cycles fetch was idle")
217        .prereq(fetchIdleCycles);
218
219    fetchBlockedCycles
220        .name(name() + ".BlockedCycles")
221        .desc("Number of cycles fetch has spent blocked")
222        .prereq(fetchBlockedCycles);
223
224    fetchedCacheLines
225        .name(name() + ".CacheLines")
226        .desc("Number of cache lines fetched")
227        .prereq(fetchedCacheLines);
228
229    fetchMiscStallCycles
230        .name(name() + ".MiscStallCycles")
231        .desc("Number of cycles fetch has spent waiting on interrupts, or "
232              "bad addresses, or out of MSHRs")
233        .prereq(fetchMiscStallCycles);
234
235    fetchIcacheSquashes
236        .name(name() + ".IcacheSquashes")
237        .desc("Number of outstanding Icache misses that were squashed")
238        .prereq(fetchIcacheSquashes);
239
240    fetchNisnDist
241        .init(/* base value */ 0,
242              /* last value */ fetchWidth,
243              /* bucket size */ 1)
244        .name(name() + ".rateDist")
245        .desc("Number of instructions fetched each cycle (Total)")
246        .flags(Stats::pdf);
247
248    idleRate
249        .name(name() + ".idleRate")
250        .desc("Percent of cycles fetch was idle")
251        .prereq(idleRate);
252    idleRate = fetchIdleCycles * 100 / cpu->numCycles;
253
254    branchRate
255        .name(name() + ".branchRate")
256        .desc("Number of branch fetches per cycle")
257        .flags(Stats::total);
258    branchRate = fetchedBranches / cpu->numCycles;
259
260    fetchRate
261        .name(name() + ".rate")
262        .desc("Number of inst fetches per cycle")
263        .flags(Stats::total);
264    fetchRate = fetchedInsts / cpu->numCycles;
265
266    branchPred.regStats();
267}
268
269template<class Impl>
270void
271DefaultFetch<Impl>::setCPU(FullCPU *cpu_ptr)
272{
273    DPRINTF(Fetch, "Setting the CPU pointer.\n");
274    cpu = cpu_ptr;
275
276    // Name is finally available, so create the port.
277    icachePort = new IcachePort(this);
278
279    Port *mem_dport = mem->getPort("");
280    icachePort->setPeer(mem_dport);
281    mem_dport->setPeer(icachePort);
282
283    if (cpu->checker) {
284        cpu->checker->setIcachePort(icachePort);
285    }
286
287    // Fetch needs to start fetching instructions at the very beginning,
288    // so it must start up in active state.
289    switchToActive();
290}
291
292template<class Impl>
293void
294DefaultFetch<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *time_buffer)
295{
296    DPRINTF(Fetch, "Setting the time buffer pointer.\n");
297    timeBuffer = time_buffer;
298
299    // Create wires to get information from proper places in time buffer.
300    fromDecode = timeBuffer->getWire(-decodeToFetchDelay);
301    fromRename = timeBuffer->getWire(-renameToFetchDelay);
302    fromIEW = timeBuffer->getWire(-iewToFetchDelay);
303    fromCommit = timeBuffer->getWire(-commitToFetchDelay);
304}
305
306template<class Impl>
307void
308DefaultFetch<Impl>::setActiveThreads(list<unsigned> *at_ptr)
309{
310    DPRINTF(Fetch, "Setting active threads list pointer.\n");
311    activeThreads = at_ptr;
312}
313
314template<class Impl>
315void
316DefaultFetch<Impl>::setFetchQueue(TimeBuffer<FetchStruct> *fq_ptr)
317{
318    DPRINTF(Fetch, "Setting the fetch queue pointer.\n");
319    fetchQueue = fq_ptr;
320
321    // Create wire to write information to proper place in fetch queue.
322    toDecode = fetchQueue->getWire(0);
323}
324
325#if 0
326template<class Impl>
327void
328DefaultFetch<Impl>::setPageTable(PageTable *pt_ptr)
329{
330    DPRINTF(Fetch, "Setting the page table pointer.\n");
331#if !FULL_SYSTEM
332    pTable = pt_ptr;
333#endif
334}
335#endif
336
337template<class Impl>
338void
339DefaultFetch<Impl>::initStage()
340{
341    // Setup PC and nextPC with initial state.
342    for (int tid = 0; tid < numThreads; tid++) {
343        PC[tid] = cpu->readPC(tid);
344        nextPC[tid] = cpu->readNextPC(tid);
345    }
346}
347
348template<class Impl>
349void
350DefaultFetch<Impl>::processCacheCompletion(PacketPtr pkt)
351{
352    unsigned tid = pkt->req->getThreadNum();
353
354    DPRINTF(Fetch, "[tid:%u] Waking up from cache miss.\n",tid);
355
356    // Only change the status if it's still waiting on the icache access
357    // to return.
358    if (fetchStatus[tid] != IcacheWaitResponse ||
359        pkt->req != memReq[tid] ||
360        isSwitchedOut()) {
361        ++fetchIcacheSquashes;
362        delete pkt->req;
363        delete pkt;
364        memReq[tid] = NULL;
365        return;
366    }
367
368    // Wake up the CPU (if it went to sleep and was waiting on this completion
369    // event).
370    cpu->wakeCPU();
371
372    DPRINTF(Activity, "[tid:%u] Activating fetch due to cache completion\n",
373            tid);
374
375    switchToActive();
376
377    // Only switch to IcacheAccessComplete if we're not stalled as well.
378    if (checkStall(tid)) {
379        fetchStatus[tid] = Blocked;
380    } else {
381        fetchStatus[tid] = IcacheAccessComplete;
382    }
383
384//    memcpy(cacheData[tid], memReq[tid]->data, memReq[tid]->size);
385
386    // Reset the mem req to NULL.
387    delete pkt->req;
388    delete pkt;
389    memReq[tid] = NULL;
390}
391
392template <class Impl>
393void
394DefaultFetch<Impl>::switchOut()
395{
396    // Fetch is ready to switch out at any time.
397    switchedOut = true;
398    cpu->signalSwitched();
399}
400
401template <class Impl>
402void
403DefaultFetch<Impl>::doSwitchOut()
404{
405    // Branch predictor needs to have its state cleared.
406    branchPred.switchOut();
407}
408
409template <class Impl>
410void
411DefaultFetch<Impl>::takeOverFrom()
412{
413    // Reset all state
414    for (int i = 0; i < Impl::MaxThreads; ++i) {
415        stalls[i].decode = 0;
416        stalls[i].rename = 0;
417        stalls[i].iew = 0;
418        stalls[i].commit = 0;
419        PC[i] = cpu->readPC(i);
420        nextPC[i] = cpu->readNextPC(i);
421        fetchStatus[i] = Running;
422    }
423    numInst = 0;
424    wroteToTimeBuffer = false;
425    _status = Inactive;
426    switchedOut = false;
427    branchPred.takeOverFrom();
428}
429
430template <class Impl>
431void
432DefaultFetch<Impl>::wakeFromQuiesce()
433{
434    DPRINTF(Fetch, "Waking up from quiesce\n");
435    // Hopefully this is safe
436    // @todo: Allow other threads to wake from quiesce.
437    fetchStatus[0] = Running;
438}
439
440template <class Impl>
441inline void
442DefaultFetch<Impl>::switchToActive()
443{
444    if (_status == Inactive) {
445        DPRINTF(Activity, "Activating stage.\n");
446
447        cpu->activateStage(FullCPU::FetchIdx);
448
449        _status = Active;
450    }
451}
452
453template <class Impl>
454inline void
455DefaultFetch<Impl>::switchToInactive()
456{
457    if (_status == Active) {
458        DPRINTF(Activity, "Deactivating stage.\n");
459
460        cpu->deactivateStage(FullCPU::FetchIdx);
461
462        _status = Inactive;
463    }
464}
465
466template <class Impl>
467bool
468DefaultFetch<Impl>::lookupAndUpdateNextPC(DynInstPtr &inst, Addr &next_PC)
469{
470    // Do branch prediction check here.
471    // A bit of a misnomer...next_PC is actually the current PC until
472    // this function updates it.
473    bool predict_taken;
474
475    if (!inst->isControl()) {
476        next_PC = next_PC + instSize;
477        inst->setPredTarg(next_PC);
478        return false;
479    }
480
481    predict_taken = branchPred.predict(inst, next_PC, inst->threadNumber);
482
483    ++fetchedBranches;
484
485    if (predict_taken) {
486        ++predictedBranches;
487    }
488
489    return predict_taken;
490}
491
492template <class Impl>
493bool
494DefaultFetch<Impl>::fetchCacheLine(Addr fetch_PC, Fault &ret_fault, unsigned tid)
495{
496    Fault fault = NoFault;
497
498#if FULL_SYSTEM
499    // Flag to say whether or not address is physical addr.
500    unsigned flags = cpu->inPalMode(fetch_PC) ? PHYSICAL : 0;
501#else
502    unsigned flags = 0;
503#endif // FULL_SYSTEM
504
505    if (cacheBlocked || (interruptPending && flags == 0) || switchedOut) {
506        // Hold off fetch from getting new instructions when:
507        // Cache is blocked, or
508        // while an interrupt is pending and we're not in PAL mode, or
509        // fetch is switched out.
510        return false;
511    }
512
513    // Align the fetch PC so it's at the start of a cache block.
514    fetch_PC = icacheBlockAlignPC(fetch_PC);
515
516    // Setup the memReq to do a read of the first instruction's address.
517    // Set the appropriate read size and flags as well.
518    // Build request here.
519    RequestPtr mem_req = new Request(tid, fetch_PC, cacheBlkSize, flags,
520                                     fetch_PC, cpu->readCpuId(), tid);
521
522    memReq[tid] = mem_req;
523
524    // Translate the instruction request.
525    fault = cpu->translateInstReq(mem_req, cpu->thread[tid]);
526
527    // In the case of faults, the fetch stage may need to stall and wait
528    // for the ITB miss to be handled.
529
530    // If translation was successful, attempt to read the first
531    // instruction.
532    if (fault == NoFault) {
533#if 0
534        if (cpu->system->memctrl->badaddr(memReq[tid]->paddr) ||
535            memReq[tid]->flags & UNCACHEABLE) {
536            DPRINTF(Fetch, "Fetch: Bad address %#x (hopefully on a "
537                    "misspeculating path)!",
538                    memReq[tid]->paddr);
539            ret_fault = TheISA::genMachineCheckFault();
540            return false;
541        }
542#endif
543
544        // Build packet here.
545        PacketPtr data_pkt = new Packet(mem_req,
546                                        Packet::ReadReq, Packet::Broadcast);
547        data_pkt->dataStatic(cacheData[tid]);
548
549        DPRINTF(Fetch, "Fetch: Doing instruction read.\n");
550
551        fetchedCacheLines++;
552
553        // Now do the timing access to see whether or not the instruction
554        // exists within the cache.
555        if (!icachePort->sendTiming(data_pkt)) {
556            assert(retryPkt == NULL);
557            assert(retryTid == -1);
558            DPRINTF(Fetch, "[tid:%i] Out of MSHRs!\n", tid);
559            fetchStatus[tid] = IcacheWaitRetry;
560            retryPkt = data_pkt;
561            retryTid = tid;
562            cacheBlocked = true;
563            return false;
564        }
565
566        DPRINTF(Fetch, "Doing cache access.\n");
567
568        lastIcacheStall[tid] = curTick;
569
570        DPRINTF(Activity, "[tid:%i]: Activity: Waiting on I-cache "
571                "response.\n", tid);
572
573        fetchStatus[tid] = IcacheWaitResponse;
574    } else {
575        delete mem_req;
576        memReq[tid] = NULL;
577    }
578
579    ret_fault = fault;
580    return true;
581}
582
583template <class Impl>
584inline void
585DefaultFetch<Impl>::doSquash(const Addr &new_PC, unsigned tid)
586{
587    DPRINTF(Fetch, "[tid:%i]: Squashing, setting PC to: %#x.\n",
588            tid, new_PC);
589
590    PC[tid] = new_PC;
591    nextPC[tid] = new_PC + instSize;
592
593    // Clear the icache miss if it's outstanding.
594    if (fetchStatus[tid] == IcacheWaitResponse) {
595        DPRINTF(Fetch, "[tid:%i]: Squashing outstanding Icache miss.\n",
596                tid);
597        // Should I delete this here or when it comes back from the cache?
598//        delete memReq[tid];
599        memReq[tid] = NULL;
600    }
601
602    // Get rid of the retrying packet if it was from this thread.
603    if (retryTid == tid) {
604        assert(cacheBlocked);
605        cacheBlocked = false;
606        retryTid = -1;
607        retryPkt = NULL;
608        delete retryPkt->req;
609        delete retryPkt;
610    }
611
612    fetchStatus[tid] = Squashing;
613
614    ++fetchSquashCycles;
615}
616
617template<class Impl>
618void
619DefaultFetch<Impl>::squashFromDecode(const Addr &new_PC,
620                                     const InstSeqNum &seq_num,
621                                     unsigned tid)
622{
623    DPRINTF(Fetch, "[tid:%i]: Squashing from decode.\n",tid);
624
625    doSquash(new_PC, tid);
626
627    // Tell the CPU to remove any instructions that are in flight between
628    // fetch and decode.
629    cpu->removeInstsUntil(seq_num, tid);
630}
631
632template<class Impl>
633bool
634DefaultFetch<Impl>::checkStall(unsigned tid) const
635{
636    bool ret_val = false;
637
638    if (cpu->contextSwitch) {
639        DPRINTF(Fetch,"[tid:%i]: Stalling for a context switch.\n",tid);
640        ret_val = true;
641    } else if (stalls[tid].decode) {
642        DPRINTF(Fetch,"[tid:%i]: Stall from Decode stage detected.\n",tid);
643        ret_val = true;
644    } else if (stalls[tid].rename) {
645        DPRINTF(Fetch,"[tid:%i]: Stall from Rename stage detected.\n",tid);
646        ret_val = true;
647    } else if (stalls[tid].iew) {
648        DPRINTF(Fetch,"[tid:%i]: Stall from IEW stage detected.\n",tid);
649        ret_val = true;
650    } else if (stalls[tid].commit) {
651        DPRINTF(Fetch,"[tid:%i]: Stall from Commit stage detected.\n",tid);
652        ret_val = true;
653    }
654
655    return ret_val;
656}
657
658template<class Impl>
659typename DefaultFetch<Impl>::FetchStatus
660DefaultFetch<Impl>::updateFetchStatus()
661{
662    //Check Running
663    list<unsigned>::iterator threads = (*activeThreads).begin();
664
665    while (threads != (*activeThreads).end()) {
666
667        unsigned tid = *threads++;
668
669        if (fetchStatus[tid] == Running ||
670            fetchStatus[tid] == Squashing ||
671            fetchStatus[tid] == IcacheAccessComplete) {
672
673            if (_status == Inactive) {
674                DPRINTF(Activity, "[tid:%i]: Activating stage.\n",tid);
675
676                if (fetchStatus[tid] == IcacheAccessComplete) {
677                    DPRINTF(Activity, "[tid:%i]: Activating fetch due to cache"
678                            "completion\n",tid);
679                }
680
681                cpu->activateStage(FullCPU::FetchIdx);
682            }
683
684            return Active;
685        }
686    }
687
688    // Stage is switching from active to inactive, notify CPU of it.
689    if (_status == Active) {
690        DPRINTF(Activity, "Deactivating stage.\n");
691
692        cpu->deactivateStage(FullCPU::FetchIdx);
693    }
694
695    return Inactive;
696}
697
698template <class Impl>
699void
700DefaultFetch<Impl>::squash(const Addr &new_PC, unsigned tid)
701{
702    DPRINTF(Fetch, "[tid:%u]: Squash from commit.\n",tid);
703
704    doSquash(new_PC, tid);
705
706    // Tell the CPU to remove any instructions that are not in the ROB.
707    cpu->removeInstsNotInROB(tid);
708}
709
710template <class Impl>
711void
712DefaultFetch<Impl>::tick()
713{
714    list<unsigned>::iterator threads = (*activeThreads).begin();
715    bool status_change = false;
716
717    wroteToTimeBuffer = false;
718
719    while (threads != (*activeThreads).end()) {
720        unsigned tid = *threads++;
721
722        // Check the signals for each thread to determine the proper status
723        // for each thread.
724        bool updated_status = checkSignalsAndUpdate(tid);
725        status_change =  status_change || updated_status;
726    }
727
728    DPRINTF(Fetch, "Running stage.\n");
729
730    // Reset the number of the instruction we're fetching.
731    numInst = 0;
732
733    if (fromCommit->commitInfo[0].interruptPending) {
734        interruptPending = true;
735    }
736    if (fromCommit->commitInfo[0].clearInterrupt) {
737        interruptPending = false;
738    }
739
740    for (threadFetched = 0; threadFetched < numFetchingThreads;
741         threadFetched++) {
742        // Fetch each of the actively fetching threads.
743        fetch(status_change);
744    }
745
746    // Record number of instructions fetched this cycle for distribution.
747    fetchNisnDist.sample(numInst);
748
749    if (status_change) {
750        // Change the fetch stage status if there was a status change.
751        _status = updateFetchStatus();
752    }
753
754    // If there was activity this cycle, inform the CPU of it.
755    if (wroteToTimeBuffer || cpu->contextSwitch) {
756        DPRINTF(Activity, "Activity this cycle.\n");
757
758        cpu->activityThisCycle();
759    }
760}
761
762template <class Impl>
763bool
764DefaultFetch<Impl>::checkSignalsAndUpdate(unsigned tid)
765{
766    // Update the per thread stall statuses.
767    if (fromDecode->decodeBlock[tid]) {
768        stalls[tid].decode = true;
769    }
770
771    if (fromDecode->decodeUnblock[tid]) {
772        assert(stalls[tid].decode);
773        assert(!fromDecode->decodeBlock[tid]);
774        stalls[tid].decode = false;
775    }
776
777    if (fromRename->renameBlock[tid]) {
778        stalls[tid].rename = true;
779    }
780
781    if (fromRename->renameUnblock[tid]) {
782        assert(stalls[tid].rename);
783        assert(!fromRename->renameBlock[tid]);
784        stalls[tid].rename = false;
785    }
786
787    if (fromIEW->iewBlock[tid]) {
788        stalls[tid].iew = true;
789    }
790
791    if (fromIEW->iewUnblock[tid]) {
792        assert(stalls[tid].iew);
793        assert(!fromIEW->iewBlock[tid]);
794        stalls[tid].iew = false;
795    }
796
797    if (fromCommit->commitBlock[tid]) {
798        stalls[tid].commit = true;
799    }
800
801    if (fromCommit->commitUnblock[tid]) {
802        assert(stalls[tid].commit);
803        assert(!fromCommit->commitBlock[tid]);
804        stalls[tid].commit = false;
805    }
806
807    // Check squash signals from commit.
808    if (fromCommit->commitInfo[tid].squash) {
809
810        DPRINTF(Fetch, "[tid:%u]: Squashing instructions due to squash "
811                "from commit.\n",tid);
812
813        // In any case, squash.
814        squash(fromCommit->commitInfo[tid].nextPC,tid);
815
816        // Also check if there's a mispredict that happened.
817        if (fromCommit->commitInfo[tid].branchMispredict) {
818            branchPred.squash(fromCommit->commitInfo[tid].doneSeqNum,
819                              fromCommit->commitInfo[tid].nextPC,
820                              fromCommit->commitInfo[tid].branchTaken,
821                              tid);
822        } else {
823            branchPred.squash(fromCommit->commitInfo[tid].doneSeqNum,
824                              tid);
825        }
826
827        return true;
828    } else if (fromCommit->commitInfo[tid].doneSeqNum) {
829        // Update the branch predictor if it wasn't a squashed instruction
830        // that was broadcasted.
831        branchPred.update(fromCommit->commitInfo[tid].doneSeqNum, tid);
832    }
833
834    // Check ROB squash signals from commit.
835    if (fromCommit->commitInfo[tid].robSquashing) {
836        DPRINTF(Fetch, "[tid:%u]: ROB is still squashing Thread %u.\n", tid);
837
838        // Continue to squash.
839        fetchStatus[tid] = Squashing;
840
841        return true;
842    }
843
844    // Check squash signals from decode.
845    if (fromDecode->decodeInfo[tid].squash) {
846        DPRINTF(Fetch, "[tid:%u]: Squashing instructions due to squash "
847                "from decode.\n",tid);
848
849        // Update the branch predictor.
850        if (fromDecode->decodeInfo[tid].branchMispredict) {
851            branchPred.squash(fromDecode->decodeInfo[tid].doneSeqNum,
852                              fromDecode->decodeInfo[tid].nextPC,
853                              fromDecode->decodeInfo[tid].branchTaken,
854                              tid);
855        } else {
856            branchPred.squash(fromDecode->decodeInfo[tid].doneSeqNum,
857                              tid);
858        }
859
860        if (fetchStatus[tid] != Squashing) {
861            // Squash unless we're already squashing
862            squashFromDecode(fromDecode->decodeInfo[tid].nextPC,
863                             fromDecode->decodeInfo[tid].doneSeqNum,
864                             tid);
865
866            return true;
867        }
868    }
869
870    if (checkStall(tid) && fetchStatus[tid] != IcacheWaitResponse) {
871        DPRINTF(Fetch, "[tid:%i]: Setting to blocked\n",tid);
872
873        fetchStatus[tid] = Blocked;
874
875        return true;
876    }
877
878    if (fetchStatus[tid] == Blocked ||
879        fetchStatus[tid] == Squashing) {
880        // Switch status to running if fetch isn't being told to block or
881        // squash this cycle.
882        DPRINTF(Fetch, "[tid:%i]: Done squashing, switching to running.\n",
883                tid);
884
885        fetchStatus[tid] = Running;
886
887        return true;
888    }
889
890    // If we've reached this point, we have not gotten any signals that
891    // cause fetch to change its status.  Fetch remains the same as before.
892    return false;
893}
894
895template<class Impl>
896void
897DefaultFetch<Impl>::fetch(bool &status_change)
898{
899    //////////////////////////////////////////
900    // Start actual fetch
901    //////////////////////////////////////////
902    int tid = getFetchingThread(fetchPolicy);
903
904    if (tid == -1) {
905        DPRINTF(Fetch,"There are no more threads available to fetch from.\n");
906
907        // Breaks looping condition in tick()
908        threadFetched = numFetchingThreads;
909        return;
910    }
911
912    // The current PC.
913    Addr &fetch_PC = PC[tid];
914
915    // Fault code for memory access.
916    Fault fault = NoFault;
917
918    // If returning from the delay of a cache miss, then update the status
919    // to running, otherwise do the cache access.  Possibly move this up
920    // to tick() function.
921    if (fetchStatus[tid] == IcacheAccessComplete) {
922        DPRINTF(Fetch, "[tid:%i]: Icache miss is complete.\n",
923                tid);
924
925        fetchStatus[tid] = Running;
926        status_change = true;
927    } else if (fetchStatus[tid] == Running) {
928        DPRINTF(Fetch, "[tid:%i]: Attempting to translate and read "
929                "instruction, starting at PC %08p.\n",
930                tid, fetch_PC);
931
932        bool fetch_success = fetchCacheLine(fetch_PC, fault, tid);
933        if (!fetch_success) {
934            ++fetchMiscStallCycles;
935            return;
936        }
937    } else {
938        if (fetchStatus[tid] == Idle) {
939            ++fetchIdleCycles;
940        } else if (fetchStatus[tid] == Blocked) {
941            ++fetchBlockedCycles;
942        } else if (fetchStatus[tid] == Squashing) {
943            ++fetchSquashCycles;
944        } else if (fetchStatus[tid] == IcacheWaitResponse) {
945            ++icacheStallCycles;
946        }
947
948        // Status is Idle, Squashing, Blocked, or IcacheWaitResponse, so
949        // fetch should do nothing.
950        return;
951    }
952
953    ++fetchCycles;
954
955    // If we had a stall due to an icache miss, then return.
956    if (fetchStatus[tid] == IcacheWaitResponse) {
957        ++icacheStallCycles;
958        status_change = true;
959        return;
960    }
961
962    Addr next_PC = fetch_PC;
963    InstSeqNum inst_seq;
964    MachInst inst;
965    ExtMachInst ext_inst;
966    // @todo: Fix this hack.
967    unsigned offset = (fetch_PC & cacheBlkMask) & ~3;
968
969    if (fault == NoFault) {
970        // If the read of the first instruction was successful, then grab the
971        // instructions from the rest of the cache line and put them into the
972        // queue heading to decode.
973
974        DPRINTF(Fetch, "[tid:%i]: Adding instructions to queue to "
975                "decode.\n",tid);
976
977        // Need to keep track of whether or not a predicted branch
978        // ended this fetch block.
979        bool predicted_branch = false;
980
981        for (;
982             offset < cacheBlkSize &&
983                 numInst < fetchWidth &&
984                 !predicted_branch;
985             ++numInst) {
986
987            // Get a sequence number.
988            inst_seq = cpu->getAndIncrementInstSeq();
989
990            // Make sure this is a valid index.
991            assert(offset <= cacheBlkSize - instSize);
992
993            // Get the instruction from the array of the cache line.
994            inst = gtoh(*reinterpret_cast<MachInst *>
995                        (&cacheData[tid][offset]));
996
997            ext_inst = TheISA::makeExtMI(inst, fetch_PC);
998
999            // Create a new DynInst from the instruction fetched.
1000            DynInstPtr instruction = new DynInst(ext_inst, fetch_PC,
1001                                                 next_PC,
1002                                                 inst_seq, cpu);
1003            instruction->setThread(tid);
1004
1005            instruction->setASID(tid);
1006
1007            instruction->setState(cpu->thread[tid]);
1008
1009            DPRINTF(Fetch, "[tid:%i]: Instruction PC %#x created "
1010                    "[sn:%lli]\n",
1011                    tid, instruction->readPC(), inst_seq);
1012
1013            DPRINTF(Fetch, "[tid:%i]: Instruction is: %s\n",
1014                    tid, instruction->staticInst->disassemble(fetch_PC));
1015
1016            instruction->traceData =
1017                Trace::getInstRecord(curTick, cpu->tcBase(tid), cpu,
1018                                     instruction->staticInst,
1019                                     instruction->readPC(),tid);
1020
1021            predicted_branch = lookupAndUpdateNextPC(instruction, next_PC);
1022
1023            // Add instruction to the CPU's list of instructions.
1024            instruction->setInstListIt(cpu->addInst(instruction));
1025
1026            // Write the instruction to the first slot in the queue
1027            // that heads to decode.
1028            toDecode->insts[numInst] = instruction;
1029
1030            toDecode->size++;
1031
1032            // Increment stat of fetched instructions.
1033            ++fetchedInsts;
1034
1035            // Move to the next instruction, unless we have a branch.
1036            fetch_PC = next_PC;
1037
1038            if (instruction->isQuiesce()) {
1039                warn("%lli: Quiesce instruction encountered, halting fetch!",
1040                     curTick);
1041                fetchStatus[tid] = QuiescePending;
1042                ++numInst;
1043                status_change = true;
1044                break;
1045            }
1046
1047            offset+= instSize;
1048        }
1049    }
1050
1051    if (numInst > 0) {
1052        wroteToTimeBuffer = true;
1053    }
1054
1055    // Now that fetching is completed, update the PC to signify what the next
1056    // cycle will be.
1057    if (fault == NoFault) {
1058        DPRINTF(Fetch, "[tid:%i]: Setting PC to %08p.\n",tid, next_PC);
1059
1060        PC[tid] = next_PC;
1061        nextPC[tid] = next_PC + instSize;
1062    } else {
1063        // We shouldn't be in an icache miss and also have a fault (an ITB
1064        // miss)
1065        if (fetchStatus[tid] == IcacheWaitResponse) {
1066            panic("Fetch should have exited prior to this!");
1067        }
1068
1069        // Send the fault to commit.  This thread will not do anything
1070        // until commit handles the fault.  The only other way it can
1071        // wake up is if a squash comes along and changes the PC.
1072#if FULL_SYSTEM
1073        assert(numInst != fetchWidth);
1074        // Get a sequence number.
1075        inst_seq = cpu->getAndIncrementInstSeq();
1076        // We will use a nop in order to carry the fault.
1077        ext_inst = TheISA::NoopMachInst;
1078
1079        // Create a new DynInst from the dummy nop.
1080        DynInstPtr instruction = new DynInst(ext_inst, fetch_PC,
1081                                             next_PC,
1082                                             inst_seq, cpu);
1083        instruction->setPredTarg(next_PC + instSize);
1084        instruction->setThread(tid);
1085
1086        instruction->setASID(tid);
1087
1088        instruction->setState(cpu->thread[tid]);
1089
1090        instruction->traceData = NULL;
1091
1092        instruction->setInstListIt(cpu->addInst(instruction));
1093
1094        instruction->fault = fault;
1095
1096        toDecode->insts[numInst] = instruction;
1097        toDecode->size++;
1098
1099        DPRINTF(Fetch, "[tid:%i]: Blocked, need to handle the trap.\n",tid);
1100
1101        fetchStatus[tid] = TrapPending;
1102        status_change = true;
1103
1104        warn("%lli fault (%d) detected @ PC %08p", curTick, fault, PC[tid]);
1105#else // !FULL_SYSTEM
1106        warn("%lli fault (%d) detected @ PC %08p", curTick, fault, PC[tid]);
1107#endif // FULL_SYSTEM
1108    }
1109}
1110
1111template<class Impl>
1112void
1113DefaultFetch<Impl>::recvRetry()
1114{
1115    assert(cacheBlocked);
1116    if (retryPkt != NULL) {
1117        assert(retryTid != -1);
1118        assert(fetchStatus[retryTid] == IcacheWaitRetry);
1119
1120        if (icachePort->sendTiming(retryPkt)) {
1121            fetchStatus[retryTid] = IcacheWaitResponse;
1122            retryPkt = NULL;
1123            retryTid = -1;
1124            cacheBlocked = false;
1125        }
1126    } else {
1127        assert(retryTid == -1);
1128        // Access has been squashed since it was sent out.  Just clear
1129        // the cache being blocked.
1130        cacheBlocked = false;
1131    }
1132}
1133
1134///////////////////////////////////////
1135//                                   //
1136//  SMT FETCH POLICY MAINTAINED HERE //
1137//                                   //
1138///////////////////////////////////////
1139template<class Impl>
1140int
1141DefaultFetch<Impl>::getFetchingThread(FetchPriority &fetch_priority)
1142{
1143    if (numThreads > 1) {
1144        switch (fetch_priority) {
1145
1146          case SingleThread:
1147            return 0;
1148
1149          case RoundRobin:
1150            return roundRobin();
1151
1152          case IQ:
1153            return iqCount();
1154
1155          case LSQ:
1156            return lsqCount();
1157
1158          case Branch:
1159            return branchCount();
1160
1161          default:
1162            return -1;
1163        }
1164    } else {
1165        int tid = *((*activeThreads).begin());
1166
1167        if (fetchStatus[tid] == Running ||
1168            fetchStatus[tid] == IcacheAccessComplete ||
1169            fetchStatus[tid] == Idle) {
1170            return tid;
1171        } else {
1172            return -1;
1173        }
1174    }
1175
1176}
1177
1178
1179template<class Impl>
1180int
1181DefaultFetch<Impl>::roundRobin()
1182{
1183    list<unsigned>::iterator pri_iter = priorityList.begin();
1184    list<unsigned>::iterator end      = priorityList.end();
1185
1186    int high_pri;
1187
1188    while (pri_iter != end) {
1189        high_pri = *pri_iter;
1190
1191        assert(high_pri <= numThreads);
1192
1193        if (fetchStatus[high_pri] == Running ||
1194            fetchStatus[high_pri] == IcacheAccessComplete ||
1195            fetchStatus[high_pri] == Idle) {
1196
1197            priorityList.erase(pri_iter);
1198            priorityList.push_back(high_pri);
1199
1200            return high_pri;
1201        }
1202
1203        pri_iter++;
1204    }
1205
1206    return -1;
1207}
1208
1209template<class Impl>
1210int
1211DefaultFetch<Impl>::iqCount()
1212{
1213    priority_queue<unsigned> PQ;
1214
1215    list<unsigned>::iterator threads = (*activeThreads).begin();
1216
1217    while (threads != (*activeThreads).end()) {
1218        unsigned tid = *threads++;
1219
1220        PQ.push(fromIEW->iewInfo[tid].iqCount);
1221    }
1222
1223    while (!PQ.empty()) {
1224
1225        unsigned high_pri = PQ.top();
1226
1227        if (fetchStatus[high_pri] == Running ||
1228            fetchStatus[high_pri] == IcacheAccessComplete ||
1229            fetchStatus[high_pri] == Idle)
1230            return high_pri;
1231        else
1232            PQ.pop();
1233
1234    }
1235
1236    return -1;
1237}
1238
1239template<class Impl>
1240int
1241DefaultFetch<Impl>::lsqCount()
1242{
1243    priority_queue<unsigned> PQ;
1244
1245
1246    list<unsigned>::iterator threads = (*activeThreads).begin();
1247
1248    while (threads != (*activeThreads).end()) {
1249        unsigned tid = *threads++;
1250
1251        PQ.push(fromIEW->iewInfo[tid].ldstqCount);
1252    }
1253
1254    while (!PQ.empty()) {
1255
1256        unsigned high_pri = PQ.top();
1257
1258        if (fetchStatus[high_pri] == Running ||
1259            fetchStatus[high_pri] == IcacheAccessComplete ||
1260            fetchStatus[high_pri] == Idle)
1261            return high_pri;
1262        else
1263            PQ.pop();
1264
1265    }
1266
1267    return -1;
1268}
1269
1270template<class Impl>
1271int
1272DefaultFetch<Impl>::branchCount()
1273{
1274    list<unsigned>::iterator threads = (*activeThreads).begin();
1275
1276    return *threads;
1277}
1278