fetch_impl.hh revision 10020:2f33cb012383
110259SAndrew.Bardsley@arm.com/* 210259SAndrew.Bardsley@arm.com * Copyright (c) 2010-2013 ARM Limited 310259SAndrew.Bardsley@arm.com * All rights reserved. 410259SAndrew.Bardsley@arm.com * 510259SAndrew.Bardsley@arm.com * The license below extends only to copyright in the software and shall 610259SAndrew.Bardsley@arm.com * not be construed as granting a license to any other intellectual 710259SAndrew.Bardsley@arm.com * property including but not limited to intellectual property relating 810259SAndrew.Bardsley@arm.com * to a hardware implementation of the functionality of the software 910259SAndrew.Bardsley@arm.com * licensed hereunder. You may use the software subject to the license 1010259SAndrew.Bardsley@arm.com * terms below provided that you ensure that this notice is replicated 1110259SAndrew.Bardsley@arm.com * unmodified and in its entirety in all distributions of the software, 1210259SAndrew.Bardsley@arm.com * modified or unmodified, in source code or in binary form. 1310259SAndrew.Bardsley@arm.com * 1410259SAndrew.Bardsley@arm.com * Copyright (c) 2004-2006 The Regents of The University of Michigan 1510259SAndrew.Bardsley@arm.com * All rights reserved. 1610259SAndrew.Bardsley@arm.com * 1710259SAndrew.Bardsley@arm.com * Redistribution and use in source and binary forms, with or without 1810259SAndrew.Bardsley@arm.com * modification, are permitted provided that the following conditions are 1910259SAndrew.Bardsley@arm.com * met: redistributions of source code must retain the above copyright 2010259SAndrew.Bardsley@arm.com * notice, this list of conditions and the following disclaimer; 2110259SAndrew.Bardsley@arm.com * redistributions in binary form must reproduce the above copyright 2210259SAndrew.Bardsley@arm.com * notice, this list of conditions and the following disclaimer in the 2310259SAndrew.Bardsley@arm.com * documentation and/or other materials provided with the distribution; 2410259SAndrew.Bardsley@arm.com * neither the name of the copyright holders nor the names of its 2510259SAndrew.Bardsley@arm.com * contributors may be used to endorse or promote products derived from 2610259SAndrew.Bardsley@arm.com * this software without specific prior written permission. 2710259SAndrew.Bardsley@arm.com * 2810259SAndrew.Bardsley@arm.com * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 2910259SAndrew.Bardsley@arm.com * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 3010259SAndrew.Bardsley@arm.com * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 3110259SAndrew.Bardsley@arm.com * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 3210259SAndrew.Bardsley@arm.com * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 3310259SAndrew.Bardsley@arm.com * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 3410259SAndrew.Bardsley@arm.com * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 3510259SAndrew.Bardsley@arm.com * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 3610259SAndrew.Bardsley@arm.com * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 3710259SAndrew.Bardsley@arm.com * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 3810259SAndrew.Bardsley@arm.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 3910259SAndrew.Bardsley@arm.com * 4010259SAndrew.Bardsley@arm.com * Authors: Kevin Lim 4110259SAndrew.Bardsley@arm.com * Korey Sewell 4210259SAndrew.Bardsley@arm.com */ 4310259SAndrew.Bardsley@arm.com 4410259SAndrew.Bardsley@arm.com#ifndef __CPU_O3_FETCH_IMPL_HH__ 4510259SAndrew.Bardsley@arm.com#define __CPU_O3_FETCH_IMPL_HH__ 4610259SAndrew.Bardsley@arm.com 4710259SAndrew.Bardsley@arm.com#include <algorithm> 4810259SAndrew.Bardsley@arm.com#include <cstring> 4910259SAndrew.Bardsley@arm.com#include <list> 5010259SAndrew.Bardsley@arm.com#include <map> 5110259SAndrew.Bardsley@arm.com#include <queue> 5210259SAndrew.Bardsley@arm.com 5310259SAndrew.Bardsley@arm.com#include "arch/isa_traits.hh" 5410259SAndrew.Bardsley@arm.com#include "arch/tlb.hh" 5510259SAndrew.Bardsley@arm.com#include "arch/utility.hh" 5610259SAndrew.Bardsley@arm.com#include "arch/vtophys.hh" 5710259SAndrew.Bardsley@arm.com#include "base/types.hh" 5810259SAndrew.Bardsley@arm.com#include "config/the_isa.hh" 5910259SAndrew.Bardsley@arm.com#include "cpu/base.hh" 6010259SAndrew.Bardsley@arm.com//#include "cpu/checker/cpu.hh" 6110259SAndrew.Bardsley@arm.com#include "cpu/o3/fetch.hh" 6210259SAndrew.Bardsley@arm.com#include "cpu/exetrace.hh" 6310259SAndrew.Bardsley@arm.com#include "debug/Activity.hh" 6410259SAndrew.Bardsley@arm.com#include "debug/Drain.hh" 6510259SAndrew.Bardsley@arm.com#include "debug/Fetch.hh" 6610259SAndrew.Bardsley@arm.com#include "debug/O3PipeView.hh" 6710259SAndrew.Bardsley@arm.com#include "mem/packet.hh" 6810259SAndrew.Bardsley@arm.com#include "params/DerivO3CPU.hh" 6910259SAndrew.Bardsley@arm.com#include "sim/byteswap.hh" 7010259SAndrew.Bardsley@arm.com#include "sim/core.hh" 7110259SAndrew.Bardsley@arm.com#include "sim/eventq.hh" 7210259SAndrew.Bardsley@arm.com#include "sim/full_system.hh" 7310259SAndrew.Bardsley@arm.com#include "sim/system.hh" 7410259SAndrew.Bardsley@arm.com 7510259SAndrew.Bardsley@arm.comusing namespace std; 7610259SAndrew.Bardsley@arm.com 7710259SAndrew.Bardsley@arm.comtemplate<class Impl> 7810259SAndrew.Bardsley@arm.comDefaultFetch<Impl>::DefaultFetch(O3CPU *_cpu, DerivO3CPUParams *params) 7910259SAndrew.Bardsley@arm.com : cpu(_cpu), 8010259SAndrew.Bardsley@arm.com decodeToFetchDelay(params->decodeToFetchDelay), 8110259SAndrew.Bardsley@arm.com renameToFetchDelay(params->renameToFetchDelay), 8210259SAndrew.Bardsley@arm.com iewToFetchDelay(params->iewToFetchDelay), 8310259SAndrew.Bardsley@arm.com commitToFetchDelay(params->commitToFetchDelay), 8410259SAndrew.Bardsley@arm.com fetchWidth(params->fetchWidth), 8510259SAndrew.Bardsley@arm.com retryPkt(NULL), 8610259SAndrew.Bardsley@arm.com retryTid(InvalidThreadID), 8710259SAndrew.Bardsley@arm.com cacheBlkSize(cpu->cacheLineSize()), 8810259SAndrew.Bardsley@arm.com fetchBufferSize(params->fetchBufferSize), 8910259SAndrew.Bardsley@arm.com fetchBufferMask(fetchBufferSize - 1), 9010259SAndrew.Bardsley@arm.com numThreads(params->numThreads), 9110259SAndrew.Bardsley@arm.com numFetchingThreads(params->smtNumFetchingThreads), 9210259SAndrew.Bardsley@arm.com finishTranslationEvent(this) 9310259SAndrew.Bardsley@arm.com{ 9410259SAndrew.Bardsley@arm.com if (numThreads > Impl::MaxThreads) 9510259SAndrew.Bardsley@arm.com fatal("numThreads (%d) is larger than compiled limit (%d),\n" 9610259SAndrew.Bardsley@arm.com "\tincrease MaxThreads in src/cpu/o3/impl.hh\n", 9710259SAndrew.Bardsley@arm.com numThreads, static_cast<int>(Impl::MaxThreads)); 9810259SAndrew.Bardsley@arm.com if (fetchWidth > Impl::MaxWidth) 9910259SAndrew.Bardsley@arm.com fatal("fetchWidth (%d) is larger than compiled limit (%d),\n" 10010259SAndrew.Bardsley@arm.com "\tincrease MaxWidth in src/cpu/o3/impl.hh\n", 10110259SAndrew.Bardsley@arm.com fetchWidth, static_cast<int>(Impl::MaxWidth)); 10210259SAndrew.Bardsley@arm.com if (fetchBufferSize > cacheBlkSize) 10310259SAndrew.Bardsley@arm.com fatal("fetch buffer size (%u bytes) is greater than the cache " 10410259SAndrew.Bardsley@arm.com "block size (%u bytes)\n", fetchBufferSize, cacheBlkSize); 10510259SAndrew.Bardsley@arm.com if (cacheBlkSize % fetchBufferSize) 10610259SAndrew.Bardsley@arm.com fatal("cache block (%u bytes) is not a multiple of the " 10710259SAndrew.Bardsley@arm.com "fetch buffer (%u bytes)\n", cacheBlkSize, fetchBufferSize); 10810259SAndrew.Bardsley@arm.com 10910259SAndrew.Bardsley@arm.com std::string policy = params->smtFetchPolicy; 11010259SAndrew.Bardsley@arm.com 11110259SAndrew.Bardsley@arm.com // Convert string to lowercase 11210259SAndrew.Bardsley@arm.com std::transform(policy.begin(), policy.end(), policy.begin(), 11310259SAndrew.Bardsley@arm.com (int(*)(int)) tolower); 11410259SAndrew.Bardsley@arm.com 11510259SAndrew.Bardsley@arm.com // Figure out fetch policy 11610259SAndrew.Bardsley@arm.com if (policy == "singlethread") { 11710259SAndrew.Bardsley@arm.com fetchPolicy = SingleThread; 11810259SAndrew.Bardsley@arm.com if (numThreads > 1) 11910259SAndrew.Bardsley@arm.com panic("Invalid Fetch Policy for a SMT workload."); 12010259SAndrew.Bardsley@arm.com } else if (policy == "roundrobin") { 12110259SAndrew.Bardsley@arm.com fetchPolicy = RoundRobin; 12210259SAndrew.Bardsley@arm.com DPRINTF(Fetch, "Fetch policy set to Round Robin\n"); 12310259SAndrew.Bardsley@arm.com } else if (policy == "branch") { 12410259SAndrew.Bardsley@arm.com fetchPolicy = Branch; 12510259SAndrew.Bardsley@arm.com DPRINTF(Fetch, "Fetch policy set to Branch Count\n"); 12610259SAndrew.Bardsley@arm.com } else if (policy == "iqcount") { 12710259SAndrew.Bardsley@arm.com fetchPolicy = IQ; 12810259SAndrew.Bardsley@arm.com DPRINTF(Fetch, "Fetch policy set to IQ count\n"); 12910259SAndrew.Bardsley@arm.com } else if (policy == "lsqcount") { 13010259SAndrew.Bardsley@arm.com fetchPolicy = LSQ; 13110259SAndrew.Bardsley@arm.com DPRINTF(Fetch, "Fetch policy set to LSQ count\n"); 13210259SAndrew.Bardsley@arm.com } else { 13310259SAndrew.Bardsley@arm.com fatal("Invalid Fetch Policy. Options Are: {SingleThread," 13410259SAndrew.Bardsley@arm.com " RoundRobin,LSQcount,IQcount}\n"); 13510259SAndrew.Bardsley@arm.com } 13610259SAndrew.Bardsley@arm.com 13710259SAndrew.Bardsley@arm.com // Get the size of an instruction. 13810259SAndrew.Bardsley@arm.com instSize = sizeof(TheISA::MachInst); 13910259SAndrew.Bardsley@arm.com 14010259SAndrew.Bardsley@arm.com for (int i = 0; i < Impl::MaxThreads; i++) { 14110259SAndrew.Bardsley@arm.com decoder[i] = NULL; 14210259SAndrew.Bardsley@arm.com fetchBuffer[i] = NULL; 14310259SAndrew.Bardsley@arm.com fetchBufferPC[i] = 0; 14410259SAndrew.Bardsley@arm.com fetchBufferValid[i] = false; 14510259SAndrew.Bardsley@arm.com } 14610259SAndrew.Bardsley@arm.com 14710259SAndrew.Bardsley@arm.com branchPred = params->branchPred; 14810259SAndrew.Bardsley@arm.com 14910259SAndrew.Bardsley@arm.com for (ThreadID tid = 0; tid < numThreads; tid++) { 15010259SAndrew.Bardsley@arm.com decoder[tid] = new TheISA::Decoder; 15110259SAndrew.Bardsley@arm.com // Create space to buffer the cache line data, 15210259SAndrew.Bardsley@arm.com // which may not hold the entire cache line. 15310259SAndrew.Bardsley@arm.com fetchBuffer[tid] = new uint8_t[fetchBufferSize]; 15410259SAndrew.Bardsley@arm.com } 15510259SAndrew.Bardsley@arm.com} 15610259SAndrew.Bardsley@arm.com 15710259SAndrew.Bardsley@arm.comtemplate <class Impl> 15810259SAndrew.Bardsley@arm.comstd::string 15910259SAndrew.Bardsley@arm.comDefaultFetch<Impl>::name() const 16010259SAndrew.Bardsley@arm.com{ 16110259SAndrew.Bardsley@arm.com return cpu->name() + ".fetch"; 16210259SAndrew.Bardsley@arm.com} 16310259SAndrew.Bardsley@arm.com 16410259SAndrew.Bardsley@arm.comtemplate <class Impl> 16510259SAndrew.Bardsley@arm.comvoid 16610259SAndrew.Bardsley@arm.comDefaultFetch<Impl>::regStats() 16710259SAndrew.Bardsley@arm.com{ 16810259SAndrew.Bardsley@arm.com icacheStallCycles 16910259SAndrew.Bardsley@arm.com .name(name() + ".icacheStallCycles") 17010259SAndrew.Bardsley@arm.com .desc("Number of cycles fetch is stalled on an Icache miss") 17110259SAndrew.Bardsley@arm.com .prereq(icacheStallCycles); 17210259SAndrew.Bardsley@arm.com 17310259SAndrew.Bardsley@arm.com fetchedInsts 17410259SAndrew.Bardsley@arm.com .name(name() + ".Insts") 17510259SAndrew.Bardsley@arm.com .desc("Number of instructions fetch has processed") 17610259SAndrew.Bardsley@arm.com .prereq(fetchedInsts); 17710259SAndrew.Bardsley@arm.com 17810259SAndrew.Bardsley@arm.com fetchedBranches 17910259SAndrew.Bardsley@arm.com .name(name() + ".Branches") 18010259SAndrew.Bardsley@arm.com .desc("Number of branches that fetch encountered") 18110259SAndrew.Bardsley@arm.com .prereq(fetchedBranches); 18210259SAndrew.Bardsley@arm.com 18310259SAndrew.Bardsley@arm.com predictedBranches 18410259SAndrew.Bardsley@arm.com .name(name() + ".predictedBranches") 18510259SAndrew.Bardsley@arm.com .desc("Number of branches that fetch has predicted taken") 18610259SAndrew.Bardsley@arm.com .prereq(predictedBranches); 18710259SAndrew.Bardsley@arm.com 18810259SAndrew.Bardsley@arm.com fetchCycles 18910259SAndrew.Bardsley@arm.com .name(name() + ".Cycles") 19010259SAndrew.Bardsley@arm.com .desc("Number of cycles fetch has run and was not squashing or" 19110259SAndrew.Bardsley@arm.com " blocked") 19210259SAndrew.Bardsley@arm.com .prereq(fetchCycles); 19310259SAndrew.Bardsley@arm.com 19410259SAndrew.Bardsley@arm.com fetchSquashCycles 19510259SAndrew.Bardsley@arm.com .name(name() + ".SquashCycles") 19610259SAndrew.Bardsley@arm.com .desc("Number of cycles fetch has spent squashing") 19710259SAndrew.Bardsley@arm.com .prereq(fetchSquashCycles); 19810259SAndrew.Bardsley@arm.com 19910259SAndrew.Bardsley@arm.com fetchTlbCycles 20010259SAndrew.Bardsley@arm.com .name(name() + ".TlbCycles") 20110259SAndrew.Bardsley@arm.com .desc("Number of cycles fetch has spent waiting for tlb") 20210259SAndrew.Bardsley@arm.com .prereq(fetchTlbCycles); 20310259SAndrew.Bardsley@arm.com 20410259SAndrew.Bardsley@arm.com fetchIdleCycles 20510259SAndrew.Bardsley@arm.com .name(name() + ".IdleCycles") 20610259SAndrew.Bardsley@arm.com .desc("Number of cycles fetch was idle") 20710259SAndrew.Bardsley@arm.com .prereq(fetchIdleCycles); 20810259SAndrew.Bardsley@arm.com 20910259SAndrew.Bardsley@arm.com fetchBlockedCycles 21010259SAndrew.Bardsley@arm.com .name(name() + ".BlockedCycles") 21110259SAndrew.Bardsley@arm.com .desc("Number of cycles fetch has spent blocked") 21210259SAndrew.Bardsley@arm.com .prereq(fetchBlockedCycles); 21310259SAndrew.Bardsley@arm.com 21410259SAndrew.Bardsley@arm.com fetchedCacheLines 21510259SAndrew.Bardsley@arm.com .name(name() + ".CacheLines") 21610259SAndrew.Bardsley@arm.com .desc("Number of cache lines fetched") 21710259SAndrew.Bardsley@arm.com .prereq(fetchedCacheLines); 21810259SAndrew.Bardsley@arm.com 21910259SAndrew.Bardsley@arm.com fetchMiscStallCycles 22010259SAndrew.Bardsley@arm.com .name(name() + ".MiscStallCycles") 22110259SAndrew.Bardsley@arm.com .desc("Number of cycles fetch has spent waiting on interrupts, or " 22210259SAndrew.Bardsley@arm.com "bad addresses, or out of MSHRs") 22310259SAndrew.Bardsley@arm.com .prereq(fetchMiscStallCycles); 22410259SAndrew.Bardsley@arm.com 22510259SAndrew.Bardsley@arm.com fetchPendingDrainCycles 22610259SAndrew.Bardsley@arm.com .name(name() + ".PendingDrainCycles") 22710259SAndrew.Bardsley@arm.com .desc("Number of cycles fetch has spent waiting on pipes to drain") 22810259SAndrew.Bardsley@arm.com .prereq(fetchPendingDrainCycles); 22910259SAndrew.Bardsley@arm.com 23010259SAndrew.Bardsley@arm.com fetchNoActiveThreadStallCycles 23110259SAndrew.Bardsley@arm.com .name(name() + ".NoActiveThreadStallCycles") 23210259SAndrew.Bardsley@arm.com .desc("Number of stall cycles due to no active thread to fetch from") 23310259SAndrew.Bardsley@arm.com .prereq(fetchNoActiveThreadStallCycles); 23410259SAndrew.Bardsley@arm.com 23510259SAndrew.Bardsley@arm.com fetchPendingTrapStallCycles 23610259SAndrew.Bardsley@arm.com .name(name() + ".PendingTrapStallCycles") 23710259SAndrew.Bardsley@arm.com .desc("Number of stall cycles due to pending traps") 23810259SAndrew.Bardsley@arm.com .prereq(fetchPendingTrapStallCycles); 23910259SAndrew.Bardsley@arm.com 24010259SAndrew.Bardsley@arm.com fetchPendingQuiesceStallCycles 24110259SAndrew.Bardsley@arm.com .name(name() + ".PendingQuiesceStallCycles") 24210259SAndrew.Bardsley@arm.com .desc("Number of stall cycles due to pending quiesce instructions") 243 .prereq(fetchPendingQuiesceStallCycles); 244 245 fetchIcacheWaitRetryStallCycles 246 .name(name() + ".IcacheWaitRetryStallCycles") 247 .desc("Number of stall cycles due to full MSHR") 248 .prereq(fetchIcacheWaitRetryStallCycles); 249 250 fetchIcacheSquashes 251 .name(name() + ".IcacheSquashes") 252 .desc("Number of outstanding Icache misses that were squashed") 253 .prereq(fetchIcacheSquashes); 254 255 fetchTlbSquashes 256 .name(name() + ".ItlbSquashes") 257 .desc("Number of outstanding ITLB misses that were squashed") 258 .prereq(fetchTlbSquashes); 259 260 fetchNisnDist 261 .init(/* base value */ 0, 262 /* last value */ fetchWidth, 263 /* bucket size */ 1) 264 .name(name() + ".rateDist") 265 .desc("Number of instructions fetched each cycle (Total)") 266 .flags(Stats::pdf); 267 268 idleRate 269 .name(name() + ".idleRate") 270 .desc("Percent of cycles fetch was idle") 271 .prereq(idleRate); 272 idleRate = fetchIdleCycles * 100 / cpu->numCycles; 273 274 branchRate 275 .name(name() + ".branchRate") 276 .desc("Number of branch fetches per cycle") 277 .flags(Stats::total); 278 branchRate = fetchedBranches / cpu->numCycles; 279 280 fetchRate 281 .name(name() + ".rate") 282 .desc("Number of inst fetches per cycle") 283 .flags(Stats::total); 284 fetchRate = fetchedInsts / cpu->numCycles; 285} 286 287template<class Impl> 288void 289DefaultFetch<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *time_buffer) 290{ 291 timeBuffer = time_buffer; 292 293 // Create wires to get information from proper places in time buffer. 294 fromDecode = timeBuffer->getWire(-decodeToFetchDelay); 295 fromRename = timeBuffer->getWire(-renameToFetchDelay); 296 fromIEW = timeBuffer->getWire(-iewToFetchDelay); 297 fromCommit = timeBuffer->getWire(-commitToFetchDelay); 298} 299 300template<class Impl> 301void 302DefaultFetch<Impl>::setActiveThreads(std::list<ThreadID> *at_ptr) 303{ 304 activeThreads = at_ptr; 305} 306 307template<class Impl> 308void 309DefaultFetch<Impl>::setFetchQueue(TimeBuffer<FetchStruct> *fq_ptr) 310{ 311 fetchQueue = fq_ptr; 312 313 // Create wire to write information to proper place in fetch queue. 314 toDecode = fetchQueue->getWire(0); 315} 316 317template<class Impl> 318void 319DefaultFetch<Impl>::startupStage() 320{ 321 assert(priorityList.empty()); 322 resetStage(); 323 324 // Fetch needs to start fetching instructions at the very beginning, 325 // so it must start up in active state. 326 switchToActive(); 327} 328 329template<class Impl> 330void 331DefaultFetch<Impl>::resetStage() 332{ 333 numInst = 0; 334 interruptPending = false; 335 cacheBlocked = false; 336 337 priorityList.clear(); 338 339 // Setup PC and nextPC with initial state. 340 for (ThreadID tid = 0; tid < numThreads; ++tid) { 341 fetchStatus[tid] = Running; 342 pc[tid] = cpu->pcState(tid); 343 fetchOffset[tid] = 0; 344 macroop[tid] = NULL; 345 346 delayedCommit[tid] = false; 347 memReq[tid] = NULL; 348 349 stalls[tid].decode = false; 350 stalls[tid].rename = false; 351 stalls[tid].iew = false; 352 stalls[tid].commit = false; 353 stalls[tid].drain = false; 354 355 fetchBufferPC[tid] = 0; 356 fetchBufferValid[tid] = false; 357 358 priorityList.push_back(tid); 359 } 360 361 wroteToTimeBuffer = false; 362 _status = Inactive; 363} 364 365template<class Impl> 366void 367DefaultFetch<Impl>::processCacheCompletion(PacketPtr pkt) 368{ 369 ThreadID tid = pkt->req->threadId(); 370 371 DPRINTF(Fetch, "[tid:%u] Waking up from cache miss.\n", tid); 372 assert(!cpu->switchedOut()); 373 374 // Only change the status if it's still waiting on the icache access 375 // to return. 376 if (fetchStatus[tid] != IcacheWaitResponse || 377 pkt->req != memReq[tid]) { 378 ++fetchIcacheSquashes; 379 delete pkt->req; 380 delete pkt; 381 return; 382 } 383 384 memcpy(fetchBuffer[tid], pkt->getPtr<uint8_t>(), fetchBufferSize); 385 fetchBufferValid[tid] = true; 386 387 // Wake up the CPU (if it went to sleep and was waiting on 388 // this completion event). 389 cpu->wakeCPU(); 390 391 DPRINTF(Activity, "[tid:%u] Activating fetch due to cache completion\n", 392 tid); 393 394 switchToActive(); 395 396 // Only switch to IcacheAccessComplete if we're not stalled as well. 397 if (checkStall(tid)) { 398 fetchStatus[tid] = Blocked; 399 } else { 400 fetchStatus[tid] = IcacheAccessComplete; 401 } 402 403 pkt->req->setAccessLatency(); 404 // Reset the mem req to NULL. 405 delete pkt->req; 406 delete pkt; 407 memReq[tid] = NULL; 408} 409 410template <class Impl> 411void 412DefaultFetch<Impl>::drainResume() 413{ 414 for (ThreadID i = 0; i < Impl::MaxThreads; ++i) 415 stalls[i].drain = false; 416} 417 418template <class Impl> 419void 420DefaultFetch<Impl>::drainSanityCheck() const 421{ 422 assert(isDrained()); 423 assert(retryPkt == NULL); 424 assert(retryTid == InvalidThreadID); 425 assert(cacheBlocked == false); 426 assert(interruptPending == false); 427 428 for (ThreadID i = 0; i < numThreads; ++i) { 429 assert(!memReq[i]); 430 assert(!stalls[i].decode); 431 assert(!stalls[i].rename); 432 assert(!stalls[i].iew); 433 assert(!stalls[i].commit); 434 assert(fetchStatus[i] == Idle || stalls[i].drain); 435 } 436 437 branchPred->drainSanityCheck(); 438} 439 440template <class Impl> 441bool 442DefaultFetch<Impl>::isDrained() const 443{ 444 /* Make sure that threads are either idle of that the commit stage 445 * has signaled that draining has completed by setting the drain 446 * stall flag. This effectively forces the pipeline to be disabled 447 * until the whole system is drained (simulation may continue to 448 * drain other components). 449 */ 450 for (ThreadID i = 0; i < numThreads; ++i) { 451 if (!(fetchStatus[i] == Idle || 452 (fetchStatus[i] == Blocked && stalls[i].drain))) 453 return false; 454 } 455 456 /* The pipeline might start up again in the middle of the drain 457 * cycle if the finish translation event is scheduled, so make 458 * sure that's not the case. 459 */ 460 return !finishTranslationEvent.scheduled(); 461} 462 463template <class Impl> 464void 465DefaultFetch<Impl>::takeOverFrom() 466{ 467 assert(cpu->getInstPort().isConnected()); 468 resetStage(); 469 470} 471 472template <class Impl> 473void 474DefaultFetch<Impl>::drainStall(ThreadID tid) 475{ 476 assert(cpu->isDraining()); 477 assert(!stalls[tid].drain); 478 DPRINTF(Drain, "%i: Thread drained.\n", tid); 479 stalls[tid].drain = true; 480} 481 482template <class Impl> 483void 484DefaultFetch<Impl>::wakeFromQuiesce() 485{ 486 DPRINTF(Fetch, "Waking up from quiesce\n"); 487 // Hopefully this is safe 488 // @todo: Allow other threads to wake from quiesce. 489 fetchStatus[0] = Running; 490} 491 492template <class Impl> 493inline void 494DefaultFetch<Impl>::switchToActive() 495{ 496 if (_status == Inactive) { 497 DPRINTF(Activity, "Activating stage.\n"); 498 499 cpu->activateStage(O3CPU::FetchIdx); 500 501 _status = Active; 502 } 503} 504 505template <class Impl> 506inline void 507DefaultFetch<Impl>::switchToInactive() 508{ 509 if (_status == Active) { 510 DPRINTF(Activity, "Deactivating stage.\n"); 511 512 cpu->deactivateStage(O3CPU::FetchIdx); 513 514 _status = Inactive; 515 } 516} 517 518template <class Impl> 519bool 520DefaultFetch<Impl>::lookupAndUpdateNextPC( 521 DynInstPtr &inst, TheISA::PCState &nextPC) 522{ 523 // Do branch prediction check here. 524 // A bit of a misnomer...next_PC is actually the current PC until 525 // this function updates it. 526 bool predict_taken; 527 528 if (!inst->isControl()) { 529 TheISA::advancePC(nextPC, inst->staticInst); 530 inst->setPredTarg(nextPC); 531 inst->setPredTaken(false); 532 return false; 533 } 534 535 ThreadID tid = inst->threadNumber; 536 predict_taken = branchPred->predict(inst->staticInst, inst->seqNum, 537 nextPC, tid); 538 539 if (predict_taken) { 540 DPRINTF(Fetch, "[tid:%i]: [sn:%i]: Branch predicted to be taken to %s.\n", 541 tid, inst->seqNum, nextPC); 542 } else { 543 DPRINTF(Fetch, "[tid:%i]: [sn:%i]:Branch predicted to be not taken.\n", 544 tid, inst->seqNum); 545 } 546 547 DPRINTF(Fetch, "[tid:%i]: [sn:%i] Branch predicted to go to %s.\n", 548 tid, inst->seqNum, nextPC); 549 inst->setPredTarg(nextPC); 550 inst->setPredTaken(predict_taken); 551 552 ++fetchedBranches; 553 554 if (predict_taken) { 555 ++predictedBranches; 556 } 557 558 return predict_taken; 559} 560 561template <class Impl> 562bool 563DefaultFetch<Impl>::fetchCacheLine(Addr vaddr, ThreadID tid, Addr pc) 564{ 565 Fault fault = NoFault; 566 567 assert(!cpu->switchedOut()); 568 569 // @todo: not sure if these should block translation. 570 //AlphaDep 571 if (cacheBlocked) { 572 DPRINTF(Fetch, "[tid:%i] Can't fetch cache line, cache blocked\n", 573 tid); 574 return false; 575 } else if (checkInterrupt(pc) && !delayedCommit[tid]) { 576 // Hold off fetch from getting new instructions when: 577 // Cache is blocked, or 578 // while an interrupt is pending and we're not in PAL mode, or 579 // fetch is switched out. 580 DPRINTF(Fetch, "[tid:%i] Can't fetch cache line, interrupt pending\n", 581 tid); 582 return false; 583 } 584 585 // Align the fetch address to the start of a fetch buffer segment. 586 Addr fetchBufferBlockPC = fetchBufferAlignPC(vaddr); 587 588 DPRINTF(Fetch, "[tid:%i] Fetching cache line %#x for addr %#x\n", 589 tid, fetchBufferBlockPC, vaddr); 590 591 // Setup the memReq to do a read of the first instruction's address. 592 // Set the appropriate read size and flags as well. 593 // Build request here. 594 RequestPtr mem_req = 595 new Request(tid, fetchBufferBlockPC, fetchBufferSize, 596 Request::INST_FETCH, cpu->instMasterId(), pc, 597 cpu->thread[tid]->contextId(), tid); 598 599 memReq[tid] = mem_req; 600 601 // Initiate translation of the icache block 602 fetchStatus[tid] = ItlbWait; 603 FetchTranslation *trans = new FetchTranslation(this); 604 cpu->itb->translateTiming(mem_req, cpu->thread[tid]->getTC(), 605 trans, BaseTLB::Execute); 606 return true; 607} 608 609template <class Impl> 610void 611DefaultFetch<Impl>::finishTranslation(Fault fault, RequestPtr mem_req) 612{ 613 ThreadID tid = mem_req->threadId(); 614 Addr fetchBufferBlockPC = mem_req->getVaddr(); 615 616 assert(!cpu->switchedOut()); 617 618 // Wake up CPU if it was idle 619 cpu->wakeCPU(); 620 621 if (fetchStatus[tid] != ItlbWait || mem_req != memReq[tid] || 622 mem_req->getVaddr() != memReq[tid]->getVaddr()) { 623 DPRINTF(Fetch, "[tid:%i] Ignoring itlb completed after squash\n", 624 tid); 625 ++fetchTlbSquashes; 626 delete mem_req; 627 return; 628 } 629 630 631 // If translation was successful, attempt to read the icache block. 632 if (fault == NoFault) { 633 // Check that we're not going off into random memory 634 // If we have, just wait around for commit to squash something and put 635 // us on the right track 636 if (!cpu->system->isMemAddr(mem_req->getPaddr())) { 637 warn("Address %#x is outside of physical memory, stopping fetch\n", 638 mem_req->getPaddr()); 639 fetchStatus[tid] = NoGoodAddr; 640 delete mem_req; 641 memReq[tid] = NULL; 642 return; 643 } 644 645 // Build packet here. 646 PacketPtr data_pkt = new Packet(mem_req, MemCmd::ReadReq); 647 data_pkt->dataDynamicArray(new uint8_t[fetchBufferSize]); 648 649 fetchBufferPC[tid] = fetchBufferBlockPC; 650 fetchBufferValid[tid] = false; 651 DPRINTF(Fetch, "Fetch: Doing instruction read.\n"); 652 653 fetchedCacheLines++; 654 655 // Access the cache. 656 if (!cpu->getInstPort().sendTimingReq(data_pkt)) { 657 assert(retryPkt == NULL); 658 assert(retryTid == InvalidThreadID); 659 DPRINTF(Fetch, "[tid:%i] Out of MSHRs!\n", tid); 660 661 fetchStatus[tid] = IcacheWaitRetry; 662 retryPkt = data_pkt; 663 retryTid = tid; 664 cacheBlocked = true; 665 } else { 666 DPRINTF(Fetch, "[tid:%i]: Doing Icache access.\n", tid); 667 DPRINTF(Activity, "[tid:%i]: Activity: Waiting on I-cache " 668 "response.\n", tid); 669 670 lastIcacheStall[tid] = curTick(); 671 fetchStatus[tid] = IcacheWaitResponse; 672 } 673 } else { 674 if (!(numInst < fetchWidth)) { 675 assert(!finishTranslationEvent.scheduled()); 676 finishTranslationEvent.setFault(fault); 677 finishTranslationEvent.setReq(mem_req); 678 cpu->schedule(finishTranslationEvent, 679 cpu->clockEdge(Cycles(1))); 680 return; 681 } 682 DPRINTF(Fetch, "[tid:%i] Got back req with addr %#x but expected %#x\n", 683 tid, mem_req->getVaddr(), memReq[tid]->getVaddr()); 684 // Translation faulted, icache request won't be sent. 685 delete mem_req; 686 memReq[tid] = NULL; 687 688 // Send the fault to commit. This thread will not do anything 689 // until commit handles the fault. The only other way it can 690 // wake up is if a squash comes along and changes the PC. 691 TheISA::PCState fetchPC = pc[tid]; 692 693 DPRINTF(Fetch, "[tid:%i]: Translation faulted, building noop.\n", tid); 694 // We will use a nop in ordier to carry the fault. 695 DynInstPtr instruction = buildInst(tid, 696 decoder[tid]->decode(TheISA::NoopMachInst, fetchPC.instAddr()), 697 NULL, fetchPC, fetchPC, false); 698 699 instruction->setPredTarg(fetchPC); 700 instruction->fault = fault; 701 wroteToTimeBuffer = true; 702 703 DPRINTF(Activity, "Activity this cycle.\n"); 704 cpu->activityThisCycle(); 705 706 fetchStatus[tid] = TrapPending; 707 708 DPRINTF(Fetch, "[tid:%i]: Blocked, need to handle the trap.\n", tid); 709 DPRINTF(Fetch, "[tid:%i]: fault (%s) detected @ PC %s.\n", 710 tid, fault->name(), pc[tid]); 711 } 712 _status = updateFetchStatus(); 713} 714 715template <class Impl> 716inline void 717DefaultFetch<Impl>::doSquash(const TheISA::PCState &newPC, 718 const DynInstPtr squashInst, ThreadID tid) 719{ 720 DPRINTF(Fetch, "[tid:%i]: Squashing, setting PC to: %s.\n", 721 tid, newPC); 722 723 pc[tid] = newPC; 724 fetchOffset[tid] = 0; 725 if (squashInst && squashInst->pcState().instAddr() == newPC.instAddr()) 726 macroop[tid] = squashInst->macroop; 727 else 728 macroop[tid] = NULL; 729 decoder[tid]->reset(); 730 731 // Clear the icache miss if it's outstanding. 732 if (fetchStatus[tid] == IcacheWaitResponse) { 733 DPRINTF(Fetch, "[tid:%i]: Squashing outstanding Icache miss.\n", 734 tid); 735 memReq[tid] = NULL; 736 } else if (fetchStatus[tid] == ItlbWait) { 737 DPRINTF(Fetch, "[tid:%i]: Squashing outstanding ITLB miss.\n", 738 tid); 739 memReq[tid] = NULL; 740 } 741 742 // Get rid of the retrying packet if it was from this thread. 743 if (retryTid == tid) { 744 assert(cacheBlocked); 745 if (retryPkt) { 746 delete retryPkt->req; 747 delete retryPkt; 748 } 749 retryPkt = NULL; 750 retryTid = InvalidThreadID; 751 } 752 753 fetchStatus[tid] = Squashing; 754 755 // microops are being squashed, it is not known wheather the 756 // youngest non-squashed microop was marked delayed commit 757 // or not. Setting the flag to true ensures that the 758 // interrupts are not handled when they cannot be, though 759 // some opportunities to handle interrupts may be missed. 760 delayedCommit[tid] = true; 761 762 ++fetchSquashCycles; 763} 764 765template<class Impl> 766void 767DefaultFetch<Impl>::squashFromDecode(const TheISA::PCState &newPC, 768 const DynInstPtr squashInst, 769 const InstSeqNum seq_num, ThreadID tid) 770{ 771 DPRINTF(Fetch, "[tid:%i]: Squashing from decode.\n", tid); 772 773 doSquash(newPC, squashInst, tid); 774 775 // Tell the CPU to remove any instructions that are in flight between 776 // fetch and decode. 777 cpu->removeInstsUntil(seq_num, tid); 778} 779 780template<class Impl> 781bool 782DefaultFetch<Impl>::checkStall(ThreadID tid) const 783{ 784 bool ret_val = false; 785 786 if (cpu->contextSwitch) { 787 DPRINTF(Fetch,"[tid:%i]: Stalling for a context switch.\n",tid); 788 ret_val = true; 789 } else if (stalls[tid].drain) { 790 assert(cpu->isDraining()); 791 DPRINTF(Fetch,"[tid:%i]: Drain stall detected.\n",tid); 792 ret_val = true; 793 } else if (stalls[tid].decode) { 794 DPRINTF(Fetch,"[tid:%i]: Stall from Decode stage detected.\n",tid); 795 ret_val = true; 796 } else if (stalls[tid].rename) { 797 DPRINTF(Fetch,"[tid:%i]: Stall from Rename stage detected.\n",tid); 798 ret_val = true; 799 } else if (stalls[tid].iew) { 800 DPRINTF(Fetch,"[tid:%i]: Stall from IEW stage detected.\n",tid); 801 ret_val = true; 802 } else if (stalls[tid].commit) { 803 DPRINTF(Fetch,"[tid:%i]: Stall from Commit stage detected.\n",tid); 804 ret_val = true; 805 } 806 807 return ret_val; 808} 809 810template<class Impl> 811typename DefaultFetch<Impl>::FetchStatus 812DefaultFetch<Impl>::updateFetchStatus() 813{ 814 //Check Running 815 list<ThreadID>::iterator threads = activeThreads->begin(); 816 list<ThreadID>::iterator end = activeThreads->end(); 817 818 while (threads != end) { 819 ThreadID tid = *threads++; 820 821 if (fetchStatus[tid] == Running || 822 fetchStatus[tid] == Squashing || 823 fetchStatus[tid] == IcacheAccessComplete) { 824 825 if (_status == Inactive) { 826 DPRINTF(Activity, "[tid:%i]: Activating stage.\n",tid); 827 828 if (fetchStatus[tid] == IcacheAccessComplete) { 829 DPRINTF(Activity, "[tid:%i]: Activating fetch due to cache" 830 "completion\n",tid); 831 } 832 833 cpu->activateStage(O3CPU::FetchIdx); 834 } 835 836 return Active; 837 } 838 } 839 840 // Stage is switching from active to inactive, notify CPU of it. 841 if (_status == Active) { 842 DPRINTF(Activity, "Deactivating stage.\n"); 843 844 cpu->deactivateStage(O3CPU::FetchIdx); 845 } 846 847 return Inactive; 848} 849 850template <class Impl> 851void 852DefaultFetch<Impl>::squash(const TheISA::PCState &newPC, 853 const InstSeqNum seq_num, DynInstPtr squashInst, 854 ThreadID tid) 855{ 856 DPRINTF(Fetch, "[tid:%u]: Squash from commit.\n", tid); 857 858 doSquash(newPC, squashInst, tid); 859 860 // Tell the CPU to remove any instructions that are not in the ROB. 861 cpu->removeInstsNotInROB(tid); 862} 863 864template <class Impl> 865void 866DefaultFetch<Impl>::tick() 867{ 868 list<ThreadID>::iterator threads = activeThreads->begin(); 869 list<ThreadID>::iterator end = activeThreads->end(); 870 bool status_change = false; 871 872 wroteToTimeBuffer = false; 873 874 for (ThreadID i = 0; i < Impl::MaxThreads; ++i) { 875 issuePipelinedIfetch[i] = false; 876 } 877 878 while (threads != end) { 879 ThreadID tid = *threads++; 880 881 // Check the signals for each thread to determine the proper status 882 // for each thread. 883 bool updated_status = checkSignalsAndUpdate(tid); 884 status_change = status_change || updated_status; 885 } 886 887 DPRINTF(Fetch, "Running stage.\n"); 888 889 if (FullSystem) { 890 if (fromCommit->commitInfo[0].interruptPending) { 891 interruptPending = true; 892 } 893 894 if (fromCommit->commitInfo[0].clearInterrupt) { 895 interruptPending = false; 896 } 897 } 898 899 for (threadFetched = 0; threadFetched < numFetchingThreads; 900 threadFetched++) { 901 // Fetch each of the actively fetching threads. 902 fetch(status_change); 903 } 904 905 // Record number of instructions fetched this cycle for distribution. 906 fetchNisnDist.sample(numInst); 907 908 if (status_change) { 909 // Change the fetch stage status if there was a status change. 910 _status = updateFetchStatus(); 911 } 912 913 // If there was activity this cycle, inform the CPU of it. 914 if (wroteToTimeBuffer || cpu->contextSwitch) { 915 DPRINTF(Activity, "Activity this cycle.\n"); 916 917 cpu->activityThisCycle(); 918 } 919 920 // Issue the next I-cache request if possible. 921 for (ThreadID i = 0; i < Impl::MaxThreads; ++i) { 922 if (issuePipelinedIfetch[i]) { 923 pipelineIcacheAccesses(i); 924 } 925 } 926 927 // Reset the number of the instruction we've fetched. 928 numInst = 0; 929} 930 931template <class Impl> 932bool 933DefaultFetch<Impl>::checkSignalsAndUpdate(ThreadID tid) 934{ 935 // Update the per thread stall statuses. 936 if (fromDecode->decodeBlock[tid]) { 937 stalls[tid].decode = true; 938 } 939 940 if (fromDecode->decodeUnblock[tid]) { 941 assert(stalls[tid].decode); 942 assert(!fromDecode->decodeBlock[tid]); 943 stalls[tid].decode = false; 944 } 945 946 if (fromRename->renameBlock[tid]) { 947 stalls[tid].rename = true; 948 } 949 950 if (fromRename->renameUnblock[tid]) { 951 assert(stalls[tid].rename); 952 assert(!fromRename->renameBlock[tid]); 953 stalls[tid].rename = false; 954 } 955 956 if (fromIEW->iewBlock[tid]) { 957 stalls[tid].iew = true; 958 } 959 960 if (fromIEW->iewUnblock[tid]) { 961 assert(stalls[tid].iew); 962 assert(!fromIEW->iewBlock[tid]); 963 stalls[tid].iew = false; 964 } 965 966 if (fromCommit->commitBlock[tid]) { 967 stalls[tid].commit = true; 968 } 969 970 if (fromCommit->commitUnblock[tid]) { 971 assert(stalls[tid].commit); 972 assert(!fromCommit->commitBlock[tid]); 973 stalls[tid].commit = false; 974 } 975 976 // Check squash signals from commit. 977 if (fromCommit->commitInfo[tid].squash) { 978 979 DPRINTF(Fetch, "[tid:%u]: Squashing instructions due to squash " 980 "from commit.\n",tid); 981 // In any case, squash. 982 squash(fromCommit->commitInfo[tid].pc, 983 fromCommit->commitInfo[tid].doneSeqNum, 984 fromCommit->commitInfo[tid].squashInst, tid); 985 986 // If it was a branch mispredict on a control instruction, update the 987 // branch predictor with that instruction, otherwise just kill the 988 // invalid state we generated in after sequence number 989 if (fromCommit->commitInfo[tid].mispredictInst && 990 fromCommit->commitInfo[tid].mispredictInst->isControl()) { 991 branchPred->squash(fromCommit->commitInfo[tid].doneSeqNum, 992 fromCommit->commitInfo[tid].pc, 993 fromCommit->commitInfo[tid].branchTaken, 994 tid); 995 } else { 996 branchPred->squash(fromCommit->commitInfo[tid].doneSeqNum, 997 tid); 998 } 999 1000 return true; 1001 } else if (fromCommit->commitInfo[tid].doneSeqNum) { 1002 // Update the branch predictor if it wasn't a squashed instruction 1003 // that was broadcasted. 1004 branchPred->update(fromCommit->commitInfo[tid].doneSeqNum, tid); 1005 } 1006 1007 // Check ROB squash signals from commit. 1008 if (fromCommit->commitInfo[tid].robSquashing) { 1009 DPRINTF(Fetch, "[tid:%u]: ROB is still squashing.\n", tid); 1010 1011 // Continue to squash. 1012 fetchStatus[tid] = Squashing; 1013 1014 return true; 1015 } 1016 1017 // Check squash signals from decode. 1018 if (fromDecode->decodeInfo[tid].squash) { 1019 DPRINTF(Fetch, "[tid:%u]: Squashing instructions due to squash " 1020 "from decode.\n",tid); 1021 1022 // Update the branch predictor. 1023 if (fromDecode->decodeInfo[tid].branchMispredict) { 1024 branchPred->squash(fromDecode->decodeInfo[tid].doneSeqNum, 1025 fromDecode->decodeInfo[tid].nextPC, 1026 fromDecode->decodeInfo[tid].branchTaken, 1027 tid); 1028 } else { 1029 branchPred->squash(fromDecode->decodeInfo[tid].doneSeqNum, 1030 tid); 1031 } 1032 1033 if (fetchStatus[tid] != Squashing) { 1034 1035 DPRINTF(Fetch, "Squashing from decode with PC = %s\n", 1036 fromDecode->decodeInfo[tid].nextPC); 1037 // Squash unless we're already squashing 1038 squashFromDecode(fromDecode->decodeInfo[tid].nextPC, 1039 fromDecode->decodeInfo[tid].squashInst, 1040 fromDecode->decodeInfo[tid].doneSeqNum, 1041 tid); 1042 1043 return true; 1044 } 1045 } 1046 1047 if (checkStall(tid) && 1048 fetchStatus[tid] != IcacheWaitResponse && 1049 fetchStatus[tid] != IcacheWaitRetry && 1050 fetchStatus[tid] != ItlbWait && 1051 fetchStatus[tid] != QuiescePending) { 1052 DPRINTF(Fetch, "[tid:%i]: Setting to blocked\n",tid); 1053 1054 fetchStatus[tid] = Blocked; 1055 1056 return true; 1057 } 1058 1059 if (fetchStatus[tid] == Blocked || 1060 fetchStatus[tid] == Squashing) { 1061 // Switch status to running if fetch isn't being told to block or 1062 // squash this cycle. 1063 DPRINTF(Fetch, "[tid:%i]: Done squashing, switching to running.\n", 1064 tid); 1065 1066 fetchStatus[tid] = Running; 1067 1068 return true; 1069 } 1070 1071 // If we've reached this point, we have not gotten any signals that 1072 // cause fetch to change its status. Fetch remains the same as before. 1073 return false; 1074} 1075 1076template<class Impl> 1077typename Impl::DynInstPtr 1078DefaultFetch<Impl>::buildInst(ThreadID tid, StaticInstPtr staticInst, 1079 StaticInstPtr curMacroop, TheISA::PCState thisPC, 1080 TheISA::PCState nextPC, bool trace) 1081{ 1082 // Get a sequence number. 1083 InstSeqNum seq = cpu->getAndIncrementInstSeq(); 1084 1085 // Create a new DynInst from the instruction fetched. 1086 DynInstPtr instruction = 1087 new DynInst(staticInst, curMacroop, thisPC, nextPC, seq, cpu); 1088 instruction->setTid(tid); 1089 1090 instruction->setASID(tid); 1091 1092 instruction->setThreadState(cpu->thread[tid]); 1093 1094 DPRINTF(Fetch, "[tid:%i]: Instruction PC %#x (%d) created " 1095 "[sn:%lli].\n", tid, thisPC.instAddr(), 1096 thisPC.microPC(), seq); 1097 1098 DPRINTF(Fetch, "[tid:%i]: Instruction is: %s\n", tid, 1099 instruction->staticInst-> 1100 disassemble(thisPC.instAddr())); 1101 1102#if TRACING_ON 1103 if (trace) { 1104 instruction->traceData = 1105 cpu->getTracer()->getInstRecord(curTick(), cpu->tcBase(tid), 1106 instruction->staticInst, thisPC, curMacroop); 1107 } 1108#else 1109 instruction->traceData = NULL; 1110#endif 1111 1112 // Add instruction to the CPU's list of instructions. 1113 instruction->setInstListIt(cpu->addInst(instruction)); 1114 1115 // Write the instruction to the first slot in the queue 1116 // that heads to decode. 1117 assert(numInst < fetchWidth); 1118 toDecode->insts[toDecode->size++] = instruction; 1119 1120 // Keep track of if we can take an interrupt at this boundary 1121 delayedCommit[tid] = instruction->isDelayedCommit(); 1122 1123 return instruction; 1124} 1125 1126template<class Impl> 1127void 1128DefaultFetch<Impl>::fetch(bool &status_change) 1129{ 1130 ////////////////////////////////////////// 1131 // Start actual fetch 1132 ////////////////////////////////////////// 1133 ThreadID tid = getFetchingThread(fetchPolicy); 1134 1135 assert(!cpu->switchedOut()); 1136 1137 if (tid == InvalidThreadID) { 1138 // Breaks looping condition in tick() 1139 threadFetched = numFetchingThreads; 1140 1141 if (numThreads == 1) { // @todo Per-thread stats 1142 profileStall(0); 1143 } 1144 1145 return; 1146 } 1147 1148 DPRINTF(Fetch, "Attempting to fetch from [tid:%i]\n", tid); 1149 1150 // The current PC. 1151 TheISA::PCState thisPC = pc[tid]; 1152 1153 Addr pcOffset = fetchOffset[tid]; 1154 Addr fetchAddr = (thisPC.instAddr() + pcOffset) & BaseCPU::PCMask; 1155 1156 bool inRom = isRomMicroPC(thisPC.microPC()); 1157 1158 // If returning from the delay of a cache miss, then update the status 1159 // to running, otherwise do the cache access. Possibly move this up 1160 // to tick() function. 1161 if (fetchStatus[tid] == IcacheAccessComplete) { 1162 DPRINTF(Fetch, "[tid:%i]: Icache miss is complete.\n", tid); 1163 1164 fetchStatus[tid] = Running; 1165 status_change = true; 1166 } else if (fetchStatus[tid] == Running) { 1167 // Align the fetch PC so its at the start of a fetch buffer segment. 1168 Addr fetchBufferBlockPC = fetchBufferAlignPC(fetchAddr); 1169 1170 // If buffer is no longer valid or fetchAddr has moved to point 1171 // to the next cache block, AND we have no remaining ucode 1172 // from a macro-op, then start fetch from icache. 1173 if (!(fetchBufferValid[tid] && fetchBufferBlockPC == fetchBufferPC[tid]) 1174 && !inRom && !macroop[tid]) { 1175 DPRINTF(Fetch, "[tid:%i]: Attempting to translate and read " 1176 "instruction, starting at PC %s.\n", tid, thisPC); 1177 1178 fetchCacheLine(fetchAddr, tid, thisPC.instAddr()); 1179 1180 if (fetchStatus[tid] == IcacheWaitResponse) 1181 ++icacheStallCycles; 1182 else if (fetchStatus[tid] == ItlbWait) 1183 ++fetchTlbCycles; 1184 else 1185 ++fetchMiscStallCycles; 1186 return; 1187 } else if ((checkInterrupt(thisPC.instAddr()) && !delayedCommit[tid])) { 1188 // Stall CPU if an interrupt is posted and we're not issuing 1189 // an delayed commit micro-op currently (delayed commit instructions 1190 // are not interruptable by interrupts, only faults) 1191 ++fetchMiscStallCycles; 1192 DPRINTF(Fetch, "[tid:%i]: Fetch is stalled!\n", tid); 1193 return; 1194 } 1195 } else { 1196 if (fetchStatus[tid] == Idle) { 1197 ++fetchIdleCycles; 1198 DPRINTF(Fetch, "[tid:%i]: Fetch is idle!\n", tid); 1199 } 1200 1201 // Status is Idle, so fetch should do nothing. 1202 return; 1203 } 1204 1205 ++fetchCycles; 1206 1207 TheISA::PCState nextPC = thisPC; 1208 1209 StaticInstPtr staticInst = NULL; 1210 StaticInstPtr curMacroop = macroop[tid]; 1211 1212 // If the read of the first instruction was successful, then grab the 1213 // instructions from the rest of the cache line and put them into the 1214 // queue heading to decode. 1215 1216 DPRINTF(Fetch, "[tid:%i]: Adding instructions to queue to " 1217 "decode.\n", tid); 1218 1219 // Need to keep track of whether or not a predicted branch 1220 // ended this fetch block. 1221 bool predictedBranch = false; 1222 1223 TheISA::MachInst *cacheInsts = 1224 reinterpret_cast<TheISA::MachInst *>(fetchBuffer[tid]); 1225 1226 const unsigned numInsts = fetchBufferSize / instSize; 1227 unsigned blkOffset = (fetchAddr - fetchBufferPC[tid]) / instSize; 1228 1229 // Loop through instruction memory from the cache. 1230 // Keep issuing while fetchWidth is available and branch is not 1231 // predicted taken 1232 while (numInst < fetchWidth && !predictedBranch) { 1233 1234 // We need to process more memory if we aren't going to get a 1235 // StaticInst from the rom, the current macroop, or what's already 1236 // in the decoder. 1237 bool needMem = !inRom && !curMacroop && 1238 !decoder[tid]->instReady(); 1239 fetchAddr = (thisPC.instAddr() + pcOffset) & BaseCPU::PCMask; 1240 Addr fetchBufferBlockPC = fetchBufferAlignPC(fetchAddr); 1241 1242 if (needMem) { 1243 // If buffer is no longer valid or fetchAddr has moved to point 1244 // to the next cache block then start fetch from icache. 1245 if (!fetchBufferValid[tid] || 1246 fetchBufferBlockPC != fetchBufferPC[tid]) 1247 break; 1248 1249 if (blkOffset >= numInsts) { 1250 // We need to process more memory, but we've run out of the 1251 // current block. 1252 break; 1253 } 1254 1255 if (ISA_HAS_DELAY_SLOT && pcOffset == 0) { 1256 // Walk past any annulled delay slot instructions. 1257 Addr pcAddr = thisPC.instAddr() & BaseCPU::PCMask; 1258 while (fetchAddr != pcAddr && blkOffset < numInsts) { 1259 blkOffset++; 1260 fetchAddr += instSize; 1261 } 1262 if (blkOffset >= numInsts) 1263 break; 1264 } 1265 1266 MachInst inst = TheISA::gtoh(cacheInsts[blkOffset]); 1267 decoder[tid]->moreBytes(thisPC, fetchAddr, inst); 1268 1269 if (decoder[tid]->needMoreBytes()) { 1270 blkOffset++; 1271 fetchAddr += instSize; 1272 pcOffset += instSize; 1273 } 1274 } 1275 1276 // Extract as many instructions and/or microops as we can from 1277 // the memory we've processed so far. 1278 do { 1279 if (!(curMacroop || inRom)) { 1280 if (decoder[tid]->instReady()) { 1281 staticInst = decoder[tid]->decode(thisPC); 1282 1283 // Increment stat of fetched instructions. 1284 ++fetchedInsts; 1285 1286 if (staticInst->isMacroop()) { 1287 curMacroop = staticInst; 1288 } else { 1289 pcOffset = 0; 1290 } 1291 } else { 1292 // We need more bytes for this instruction so blkOffset and 1293 // pcOffset will be updated 1294 break; 1295 } 1296 } 1297 // Whether we're moving to a new macroop because we're at the 1298 // end of the current one, or the branch predictor incorrectly 1299 // thinks we are... 1300 bool newMacro = false; 1301 if (curMacroop || inRom) { 1302 if (inRom) { 1303 staticInst = cpu->microcodeRom.fetchMicroop( 1304 thisPC.microPC(), curMacroop); 1305 } else { 1306 staticInst = curMacroop->fetchMicroop(thisPC.microPC()); 1307 } 1308 newMacro |= staticInst->isLastMicroop(); 1309 } 1310 1311 DynInstPtr instruction = 1312 buildInst(tid, staticInst, curMacroop, 1313 thisPC, nextPC, true); 1314 1315 numInst++; 1316 1317#if TRACING_ON 1318 if (DTRACE(O3PipeView)) { 1319 instruction->fetchTick = curTick(); 1320 } 1321#endif 1322 1323 nextPC = thisPC; 1324 1325 // If we're branching after this instruction, quite fetching 1326 // from the same block then. 1327 predictedBranch |= thisPC.branching(); 1328 predictedBranch |= 1329 lookupAndUpdateNextPC(instruction, nextPC); 1330 if (predictedBranch) { 1331 DPRINTF(Fetch, "Branch detected with PC = %s\n", thisPC); 1332 } 1333 1334 newMacro |= thisPC.instAddr() != nextPC.instAddr(); 1335 1336 // Move to the next instruction, unless we have a branch. 1337 thisPC = nextPC; 1338 inRom = isRomMicroPC(thisPC.microPC()); 1339 1340 if (newMacro) { 1341 fetchAddr = thisPC.instAddr() & BaseCPU::PCMask; 1342 blkOffset = (fetchAddr - fetchBufferPC[tid]) / instSize; 1343 pcOffset = 0; 1344 curMacroop = NULL; 1345 } 1346 1347 if (instruction->isQuiesce()) { 1348 DPRINTF(Fetch, 1349 "Quiesce instruction encountered, halting fetch!"); 1350 fetchStatus[tid] = QuiescePending; 1351 status_change = true; 1352 break; 1353 } 1354 } while ((curMacroop || decoder[tid]->instReady()) && 1355 numInst < fetchWidth); 1356 } 1357 1358 if (predictedBranch) { 1359 DPRINTF(Fetch, "[tid:%i]: Done fetching, predicted branch " 1360 "instruction encountered.\n", tid); 1361 } else if (numInst >= fetchWidth) { 1362 DPRINTF(Fetch, "[tid:%i]: Done fetching, reached fetch bandwidth " 1363 "for this cycle.\n", tid); 1364 } else if (blkOffset >= fetchBufferSize) { 1365 DPRINTF(Fetch, "[tid:%i]: Done fetching, reached the end of the" 1366 "fetch buffer.\n", tid); 1367 } 1368 1369 macroop[tid] = curMacroop; 1370 fetchOffset[tid] = pcOffset; 1371 1372 if (numInst > 0) { 1373 wroteToTimeBuffer = true; 1374 } 1375 1376 pc[tid] = thisPC; 1377 1378 // pipeline a fetch if we're crossing a fetch buffer boundary and not in 1379 // a state that would preclude fetching 1380 fetchAddr = (thisPC.instAddr() + pcOffset) & BaseCPU::PCMask; 1381 Addr fetchBufferBlockPC = fetchBufferAlignPC(fetchAddr); 1382 issuePipelinedIfetch[tid] = fetchBufferBlockPC != fetchBufferPC[tid] && 1383 fetchStatus[tid] != IcacheWaitResponse && 1384 fetchStatus[tid] != ItlbWait && 1385 fetchStatus[tid] != IcacheWaitRetry && 1386 fetchStatus[tid] != QuiescePending && 1387 !curMacroop; 1388} 1389 1390template<class Impl> 1391void 1392DefaultFetch<Impl>::recvRetry() 1393{ 1394 if (retryPkt != NULL) { 1395 assert(cacheBlocked); 1396 assert(retryTid != InvalidThreadID); 1397 assert(fetchStatus[retryTid] == IcacheWaitRetry); 1398 1399 if (cpu->getInstPort().sendTimingReq(retryPkt)) { 1400 fetchStatus[retryTid] = IcacheWaitResponse; 1401 retryPkt = NULL; 1402 retryTid = InvalidThreadID; 1403 cacheBlocked = false; 1404 } 1405 } else { 1406 assert(retryTid == InvalidThreadID); 1407 // Access has been squashed since it was sent out. Just clear 1408 // the cache being blocked. 1409 cacheBlocked = false; 1410 } 1411} 1412 1413/////////////////////////////////////// 1414// // 1415// SMT FETCH POLICY MAINTAINED HERE // 1416// // 1417/////////////////////////////////////// 1418template<class Impl> 1419ThreadID 1420DefaultFetch<Impl>::getFetchingThread(FetchPriority &fetch_priority) 1421{ 1422 if (numThreads > 1) { 1423 switch (fetch_priority) { 1424 1425 case SingleThread: 1426 return 0; 1427 1428 case RoundRobin: 1429 return roundRobin(); 1430 1431 case IQ: 1432 return iqCount(); 1433 1434 case LSQ: 1435 return lsqCount(); 1436 1437 case Branch: 1438 return branchCount(); 1439 1440 default: 1441 return InvalidThreadID; 1442 } 1443 } else { 1444 list<ThreadID>::iterator thread = activeThreads->begin(); 1445 if (thread == activeThreads->end()) { 1446 return InvalidThreadID; 1447 } 1448 1449 ThreadID tid = *thread; 1450 1451 if (fetchStatus[tid] == Running || 1452 fetchStatus[tid] == IcacheAccessComplete || 1453 fetchStatus[tid] == Idle) { 1454 return tid; 1455 } else { 1456 return InvalidThreadID; 1457 } 1458 } 1459} 1460 1461 1462template<class Impl> 1463ThreadID 1464DefaultFetch<Impl>::roundRobin() 1465{ 1466 list<ThreadID>::iterator pri_iter = priorityList.begin(); 1467 list<ThreadID>::iterator end = priorityList.end(); 1468 1469 ThreadID high_pri; 1470 1471 while (pri_iter != end) { 1472 high_pri = *pri_iter; 1473 1474 assert(high_pri <= numThreads); 1475 1476 if (fetchStatus[high_pri] == Running || 1477 fetchStatus[high_pri] == IcacheAccessComplete || 1478 fetchStatus[high_pri] == Idle) { 1479 1480 priorityList.erase(pri_iter); 1481 priorityList.push_back(high_pri); 1482 1483 return high_pri; 1484 } 1485 1486 pri_iter++; 1487 } 1488 1489 return InvalidThreadID; 1490} 1491 1492template<class Impl> 1493ThreadID 1494DefaultFetch<Impl>::iqCount() 1495{ 1496 std::priority_queue<unsigned> PQ; 1497 std::map<unsigned, ThreadID> threadMap; 1498 1499 list<ThreadID>::iterator threads = activeThreads->begin(); 1500 list<ThreadID>::iterator end = activeThreads->end(); 1501 1502 while (threads != end) { 1503 ThreadID tid = *threads++; 1504 unsigned iqCount = fromIEW->iewInfo[tid].iqCount; 1505 1506 PQ.push(iqCount); 1507 threadMap[iqCount] = tid; 1508 } 1509 1510 while (!PQ.empty()) { 1511 ThreadID high_pri = threadMap[PQ.top()]; 1512 1513 if (fetchStatus[high_pri] == Running || 1514 fetchStatus[high_pri] == IcacheAccessComplete || 1515 fetchStatus[high_pri] == Idle) 1516 return high_pri; 1517 else 1518 PQ.pop(); 1519 1520 } 1521 1522 return InvalidThreadID; 1523} 1524 1525template<class Impl> 1526ThreadID 1527DefaultFetch<Impl>::lsqCount() 1528{ 1529 std::priority_queue<unsigned> PQ; 1530 std::map<unsigned, ThreadID> threadMap; 1531 1532 list<ThreadID>::iterator threads = activeThreads->begin(); 1533 list<ThreadID>::iterator end = activeThreads->end(); 1534 1535 while (threads != end) { 1536 ThreadID tid = *threads++; 1537 unsigned ldstqCount = fromIEW->iewInfo[tid].ldstqCount; 1538 1539 PQ.push(ldstqCount); 1540 threadMap[ldstqCount] = tid; 1541 } 1542 1543 while (!PQ.empty()) { 1544 ThreadID high_pri = threadMap[PQ.top()]; 1545 1546 if (fetchStatus[high_pri] == Running || 1547 fetchStatus[high_pri] == IcacheAccessComplete || 1548 fetchStatus[high_pri] == Idle) 1549 return high_pri; 1550 else 1551 PQ.pop(); 1552 } 1553 1554 return InvalidThreadID; 1555} 1556 1557template<class Impl> 1558ThreadID 1559DefaultFetch<Impl>::branchCount() 1560{ 1561#if 0 1562 list<ThreadID>::iterator thread = activeThreads->begin(); 1563 assert(thread != activeThreads->end()); 1564 ThreadID tid = *thread; 1565#endif 1566 1567 panic("Branch Count Fetch policy unimplemented\n"); 1568 return InvalidThreadID; 1569} 1570 1571template<class Impl> 1572void 1573DefaultFetch<Impl>::pipelineIcacheAccesses(ThreadID tid) 1574{ 1575 if (!issuePipelinedIfetch[tid]) { 1576 return; 1577 } 1578 1579 // The next PC to access. 1580 TheISA::PCState thisPC = pc[tid]; 1581 1582 if (isRomMicroPC(thisPC.microPC())) { 1583 return; 1584 } 1585 1586 Addr pcOffset = fetchOffset[tid]; 1587 Addr fetchAddr = (thisPC.instAddr() + pcOffset) & BaseCPU::PCMask; 1588 1589 // Align the fetch PC so its at the start of a fetch buffer segment. 1590 Addr fetchBufferBlockPC = fetchBufferAlignPC(fetchAddr); 1591 1592 // Unless buffer already got the block, fetch it from icache. 1593 if (!(fetchBufferValid[tid] && fetchBufferBlockPC == fetchBufferPC[tid])) { 1594 DPRINTF(Fetch, "[tid:%i]: Issuing a pipelined I-cache access, " 1595 "starting at PC %s.\n", tid, thisPC); 1596 1597 fetchCacheLine(fetchAddr, tid, thisPC.instAddr()); 1598 } 1599} 1600 1601template<class Impl> 1602void 1603DefaultFetch<Impl>::profileStall(ThreadID tid) { 1604 DPRINTF(Fetch,"There are no more threads available to fetch from.\n"); 1605 1606 // @todo Per-thread stats 1607 1608 if (stalls[tid].drain) { 1609 ++fetchPendingDrainCycles; 1610 DPRINTF(Fetch, "Fetch is waiting for a drain!\n"); 1611 } else if (activeThreads->empty()) { 1612 ++fetchNoActiveThreadStallCycles; 1613 DPRINTF(Fetch, "Fetch has no active thread!\n"); 1614 } else if (fetchStatus[tid] == Blocked) { 1615 ++fetchBlockedCycles; 1616 DPRINTF(Fetch, "[tid:%i]: Fetch is blocked!\n", tid); 1617 } else if (fetchStatus[tid] == Squashing) { 1618 ++fetchSquashCycles; 1619 DPRINTF(Fetch, "[tid:%i]: Fetch is squashing!\n", tid); 1620 } else if (fetchStatus[tid] == IcacheWaitResponse) { 1621 ++icacheStallCycles; 1622 DPRINTF(Fetch, "[tid:%i]: Fetch is waiting cache response!\n", 1623 tid); 1624 } else if (fetchStatus[tid] == ItlbWait) { 1625 ++fetchTlbCycles; 1626 DPRINTF(Fetch, "[tid:%i]: Fetch is waiting ITLB walk to " 1627 "finish!\n", tid); 1628 } else if (fetchStatus[tid] == TrapPending) { 1629 ++fetchPendingTrapStallCycles; 1630 DPRINTF(Fetch, "[tid:%i]: Fetch is waiting for a pending trap!\n", 1631 tid); 1632 } else if (fetchStatus[tid] == QuiescePending) { 1633 ++fetchPendingQuiesceStallCycles; 1634 DPRINTF(Fetch, "[tid:%i]: Fetch is waiting for a pending quiesce " 1635 "instruction!\n", tid); 1636 } else if (fetchStatus[tid] == IcacheWaitRetry) { 1637 ++fetchIcacheWaitRetryStallCycles; 1638 DPRINTF(Fetch, "[tid:%i]: Fetch is waiting for an I-cache retry!\n", 1639 tid); 1640 } else if (fetchStatus[tid] == NoGoodAddr) { 1641 DPRINTF(Fetch, "[tid:%i]: Fetch predicted non-executable address\n", 1642 tid); 1643 } else { 1644 DPRINTF(Fetch, "[tid:%i]: Unexpected fetch stall reason (Status: %i).\n", 1645 tid, fetchStatus[tid]); 1646 } 1647} 1648 1649#endif//__CPU_O3_FETCH_IMPL_HH__ 1650