51a52
> #include "cpu/base.hh"
61a63
> #include "sim/eventq.hh"
137a140
> numInst(0),
150c153,154
< switchedOut(false)
---
> switchedOut(false),
> finishTranslationEvent(this)
270a275,299
> fetchPendingDrainCycles
> .name(name() + ".PendingDrainCycles")
> .desc("Number of cycles fetch has spent waiting on pipes to drain")
> .prereq(fetchPendingDrainCycles);
>
> fetchNoActiveThreadStallCycles
> .name(name() + ".NoActiveThreadStallCycles")
> .desc("Number of stall cycles due to no active thread to fetch from")
> .prereq(fetchNoActiveThreadStallCycles);
>
> fetchPendingTrapStallCycles
> .name(name() + ".PendingTrapStallCycles")
> .desc("Number of stall cycles due to pending traps")
> .prereq(fetchPendingTrapStallCycles);
>
> fetchPendingQuiesceStallCycles
> .name(name() + ".PendingQuiesceStallCycles")
> .desc("Number of stall cycles due to pending quiesce instructions")
> .prereq(fetchPendingQuiesceStallCycles);
>
> fetchIcacheWaitRetryStallCycles
> .name(name() + ".IcacheWaitRetryStallCycles")
> .desc("Number of stall cycles due to full MSHR")
> .prereq(fetchIcacheWaitRetryStallCycles);
>
677a707,713
> if (!(numInst < fetchWidth)) {
> assert(!finishTranslationEvent.scheduled());
> finishTranslationEvent.setFault(fault);
> finishTranslationEvent.setReq(mem_req);
> cpu->schedule(finishTranslationEvent, cpu->nextCycle(curTick() + cpu->ticks(1)));
> return;
> }
679c715
< mem_req->getVaddr(), memReq[tid]->getVaddr());
---
> tid, mem_req->getVaddr(), memReq[tid]->getVaddr());
853a890,893
> for (ThreadID i = 0; i < Impl::MaxThreads; ++i) {
> issuePipelinedIfetch[i] = false;
> }
>
865,868c905
< // Reset the number of the instruction we're fetching.
< numInst = 0;
<
< #if FULL_SYSTEM
---
> #if FULL_SYSTEM
897a935,944
>
> // Issue the next I-cache request if possible.
> for (ThreadID i = 0; i < Impl::MaxThreads; ++i) {
> if (issuePipelinedIfetch[i]) {
> pipelineIcacheAccesses(i);
> }
> }
>
> // Reset the number of the instruction we've fetched.
> numInst = 0;
1102,1103d1148
< DPRINTF(Fetch,"There are no more threads available to fetch from.\n");
<
1105a1151,1155
>
> if (numThreads == 1) { // @todo Per-thread stats
> profileStall(0);
> }
>
1160,1179d1209
< } else if (fetchStatus[tid] == Blocked) {
< ++fetchBlockedCycles;
< DPRINTF(Fetch, "[tid:%i]: Fetch is blocked!\n", tid);
< } else if (fetchStatus[tid] == Squashing) {
< ++fetchSquashCycles;
< DPRINTF(Fetch, "[tid:%i]: Fetch is squashing!\n", tid);
< } else if (fetchStatus[tid] == IcacheWaitResponse) {
< ++icacheStallCycles;
< DPRINTF(Fetch, "[tid:%i]: Fetch is waiting cache response!\n",
< tid);
< } else if (fetchStatus[tid] == ItlbWait) {
< DPRINTF(Fetch, "[tid:%i]: Fetch is waiting ITLB walk to "
< "finish! \n", tid);
< ++fetchTlbCycles;
< } else if (fetchStatus[tid] == TrapPending) {
< DPRINTF(Fetch, "[tid:%i]: Fetch is waiting for a pending trap\n",
< tid);
< } else if (fetchStatus[tid] == NoGoodAddr) {
< DPRINTF(Fetch, "[tid:%i]: Fetch predicted non-executable address\n",
< tid);
1182,1185c1212
<
<
< // Status is Idle, Squashing, Blocked, ItlbWait or IcacheWaitResponse
< // so fetch should do nothing.
---
> // Status is Idle, so fetch should do nothing.
1331a1359,1369
>
> // pipeline a fetch if we're crossing a cache boundary and not in
> // a state that would preclude fetching
> fetchAddr = (thisPC.instAddr() + pcOffset) & BaseCPU::PCMask;
> Addr block_PC = icacheBlockAlignPC(fetchAddr);
> issuePipelinedIfetch[tid] = block_PC != cacheDataPC[tid] &&
> fetchStatus[tid] != IcacheWaitResponse &&
> fetchStatus[tid] != ItlbWait &&
> fetchStatus[tid] != IcacheWaitRetry &&
> fetchStatus[tid] != QuiescePending &&
> !curMacroop;
1513a1552,1629
>
> template<class Impl>
> void
> DefaultFetch<Impl>::pipelineIcacheAccesses(ThreadID tid)
> {
> if (!issuePipelinedIfetch[tid]) {
> return;
> }
>
> // The next PC to access.
> TheISA::PCState thisPC = pc[tid];
>
> if (isRomMicroPC(thisPC.microPC())) {
> return;
> }
>
> Addr pcOffset = fetchOffset[tid];
> Addr fetchAddr = (thisPC.instAddr() + pcOffset) & BaseCPU::PCMask;
>
> // Align the fetch PC so its at the start of a cache block.
> Addr block_PC = icacheBlockAlignPC(fetchAddr);
>
> // Unless buffer already got the block, fetch it from icache.
> if (!(cacheDataValid[tid] && block_PC == cacheDataPC[tid])) {
> DPRINTF(Fetch, "[tid:%i]: Issuing a pipelined I-cache access, "
> "starting at PC %s.\n", tid, thisPC);
>
> fetchCacheLine(fetchAddr, tid, thisPC.instAddr());
> }
> }
>
> template<class Impl>
> void
> DefaultFetch<Impl>::profileStall(ThreadID tid) {
> DPRINTF(Fetch,"There are no more threads available to fetch from.\n");
>
> // @todo Per-thread stats
>
> if (drainPending) {
> ++fetchPendingDrainCycles;
> DPRINTF(Fetch, "Fetch is waiting for a drain!\n");
> } else if (activeThreads->empty()) {
> ++fetchNoActiveThreadStallCycles;
> DPRINTF(Fetch, "Fetch has no active thread!\n");
> } else if (fetchStatus[tid] == Blocked) {
> ++fetchBlockedCycles;
> DPRINTF(Fetch, "[tid:%i]: Fetch is blocked!\n", tid);
> } else if (fetchStatus[tid] == Squashing) {
> ++fetchSquashCycles;
> DPRINTF(Fetch, "[tid:%i]: Fetch is squashing!\n", tid);
> } else if (fetchStatus[tid] == IcacheWaitResponse) {
> ++icacheStallCycles;
> DPRINTF(Fetch, "[tid:%i]: Fetch is waiting cache response!\n",
> tid);
> } else if (fetchStatus[tid] == ItlbWait) {
> ++fetchTlbCycles;
> DPRINTF(Fetch, "[tid:%i]: Fetch is waiting ITLB walk to "
> "finish!\n", tid);
> } else if (fetchStatus[tid] == TrapPending) {
> ++fetchPendingTrapStallCycles;
> DPRINTF(Fetch, "[tid:%i]: Fetch is waiting for a pending trap!\n",
> tid);
> } else if (fetchStatus[tid] == QuiescePending) {
> ++fetchPendingQuiesceStallCycles;
> DPRINTF(Fetch, "[tid:%i]: Fetch is waiting for a pending quiesce "
> "instruction!\n", tid);
> } else if (fetchStatus[tid] == IcacheWaitRetry) {
> ++fetchIcacheWaitRetryStallCycles;
> DPRINTF(Fetch, "[tid:%i]: Fetch is waiting for an I-cache retry!\n",
> tid);
> } else if (fetchStatus[tid] == NoGoodAddr) {
> DPRINTF(Fetch, "[tid:%i]: Fetch predicted non-executable address\n",
> tid);
> } else {
> DPRINTF(Fetch, "[tid:%i]: Unexpected fetch stall reason (Status: %i).\n",
> tid, fetchStatus[tid]);
> }
> }