timing.cc revision 11303
12810Srdreslin@umich.edu/* 22810Srdreslin@umich.edu * Copyright 2014 Google, Inc. 32810Srdreslin@umich.edu * Copyright (c) 2010-2013,2015 ARM Limited 42810Srdreslin@umich.edu * All rights reserved 52810Srdreslin@umich.edu * 62810Srdreslin@umich.edu * The license below extends only to copyright in the software and shall 72810Srdreslin@umich.edu * not be construed as granting a license to any other intellectual 82810Srdreslin@umich.edu * property including but not limited to intellectual property relating 92810Srdreslin@umich.edu * to a hardware implementation of the functionality of the software 102810Srdreslin@umich.edu * licensed hereunder. You may use the software subject to the license 112810Srdreslin@umich.edu * terms below provided that you ensure that this notice is replicated 122810Srdreslin@umich.edu * unmodified and in its entirety in all distributions of the software, 132810Srdreslin@umich.edu * modified or unmodified, in source code or in binary form. 142810Srdreslin@umich.edu * 152810Srdreslin@umich.edu * Copyright (c) 2002-2005 The Regents of The University of Michigan 162810Srdreslin@umich.edu * All rights reserved. 172810Srdreslin@umich.edu * 182810Srdreslin@umich.edu * Redistribution and use in source and binary forms, with or without 192810Srdreslin@umich.edu * modification, are permitted provided that the following conditions are 202810Srdreslin@umich.edu * met: redistributions of source code must retain the above copyright 212810Srdreslin@umich.edu * notice, this list of conditions and the following disclaimer; 222810Srdreslin@umich.edu * redistributions in binary form must reproduce the above copyright 232810Srdreslin@umich.edu * notice, this list of conditions and the following disclaimer in the 242810Srdreslin@umich.edu * documentation and/or other materials provided with the distribution; 252810Srdreslin@umich.edu * neither the name of the copyright holders nor the names of its 262810Srdreslin@umich.edu * contributors may be used to endorse or promote products derived from 272810Srdreslin@umich.edu * this software without specific prior written permission. 282810Srdreslin@umich.edu * 292810Srdreslin@umich.edu * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 302810Srdreslin@umich.edu * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 314458Sstever@eecs.umich.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 322810Srdreslin@umich.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 332810Srdreslin@umich.edu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 342810Srdreslin@umich.edu * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 352810Srdreslin@umich.edu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 362810Srdreslin@umich.edu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 372810Srdreslin@umich.edu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 382810Srdreslin@umich.edu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 392810Srdreslin@umich.edu * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 402810Srdreslin@umich.edu * 412810Srdreslin@umich.edu * Authors: Steve Reinhardt 423860Sstever@eecs.umich.edu */ 432810Srdreslin@umich.edu 442810Srdreslin@umich.edu#include "arch/locked_mem.hh" 452810Srdreslin@umich.edu#include "arch/mmapped_ipr.hh" 462810Srdreslin@umich.edu#include "arch/utility.hh" 473860Sstever@eecs.umich.edu#include "base/bigint.hh" 484626Sstever@eecs.umich.edu#include "config/the_isa.hh" 492810Srdreslin@umich.edu#include "cpu/simple/timing.hh" 504458Sstever@eecs.umich.edu#include "cpu/exetrace.hh" 514458Sstever@eecs.umich.edu#include "debug/Config.hh" 522813Srdreslin@umich.edu#include "debug/Drain.hh" 533861Sstever@eecs.umich.edu#include "debug/ExecFaulting.hh" 542810Srdreslin@umich.edu#include "debug/SimpleCPU.hh" 552810Srdreslin@umich.edu#include "mem/packet.hh" 562810Srdreslin@umich.edu#include "mem/packet_access.hh" 572810Srdreslin@umich.edu#include "params/TimingSimpleCPU.hh" 582810Srdreslin@umich.edu#include "sim/faults.hh" 592810Srdreslin@umich.edu#include "sim/full_system.hh" 602810Srdreslin@umich.edu#include "sim/system.hh" 612810Srdreslin@umich.edu 623719Sstever@eecs.umich.edu#include "debug/Mwait.hh" 632810Srdreslin@umich.edu 642810Srdreslin@umich.eduusing namespace std; 652810Srdreslin@umich.eduusing namespace TheISA; 662810Srdreslin@umich.edu 672810Srdreslin@umich.eduvoid 683860Sstever@eecs.umich.eduTimingSimpleCPU::init() 693860Sstever@eecs.umich.edu{ 702810Srdreslin@umich.edu BaseSimpleCPU::init(); 712810Srdreslin@umich.edu} 723738Sstever@eecs.umich.edu 732810Srdreslin@umich.eduvoid 742810Srdreslin@umich.eduTimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt, Tick t) 753738Sstever@eecs.umich.edu{ 763738Sstever@eecs.umich.edu pkt = _pkt; 773738Sstever@eecs.umich.edu cpu->schedule(this, t); 783738Sstever@eecs.umich.edu} 793738Sstever@eecs.umich.edu 803738Sstever@eecs.umich.eduTimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p) 813738Sstever@eecs.umich.edu : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this), 823738Sstever@eecs.umich.edu dcachePort(this), ifetch_pkt(NULL), dcache_pkt(NULL), previousCycle(0), 833738Sstever@eecs.umich.edu fetchEvent(this) 843738Sstever@eecs.umich.edu{ 853738Sstever@eecs.umich.edu _status = Idle; 863738Sstever@eecs.umich.edu} 873738Sstever@eecs.umich.edu 884478Sstever@eecs.umich.edu 894478Sstever@eecs.umich.edu 904478Sstever@eecs.umich.eduTimingSimpleCPU::~TimingSimpleCPU() 913738Sstever@eecs.umich.edu{ 923738Sstever@eecs.umich.edu} 933738Sstever@eecs.umich.edu 943738Sstever@eecs.umich.eduDrainState 953738Sstever@eecs.umich.eduTimingSimpleCPU::drain() 963738Sstever@eecs.umich.edu{ 973738Sstever@eecs.umich.edu if (switchedOut()) 983738Sstever@eecs.umich.edu return DrainState::Drained; 993738Sstever@eecs.umich.edu 1003738Sstever@eecs.umich.edu if (_status == Idle || 1013738Sstever@eecs.umich.edu (_status == BaseSimpleCPU::Running && isDrained())) { 1023738Sstever@eecs.umich.edu DPRINTF(Drain, "No need to drain.\n"); 1033738Sstever@eecs.umich.edu activeThreads.clear(); 1043738Sstever@eecs.umich.edu return DrainState::Drained; 1053738Sstever@eecs.umich.edu } else { 1063738Sstever@eecs.umich.edu DPRINTF(Drain, "Requesting drain.\n"); 1073738Sstever@eecs.umich.edu 1083738Sstever@eecs.umich.edu // The fetch event can become descheduled if a drain didn't 1093738Sstever@eecs.umich.edu // succeed on the first attempt. We need to reschedule it if 1103738Sstever@eecs.umich.edu // the CPU is waiting for a microcode routine to complete. 1114626Sstever@eecs.umich.edu if (_status == BaseSimpleCPU::Running && !fetchEvent.scheduled()) 1124626Sstever@eecs.umich.edu schedule(fetchEvent, clockEdge()); 1134626Sstever@eecs.umich.edu 1144458Sstever@eecs.umich.edu return DrainState::Draining; 1154478Sstever@eecs.umich.edu } 1164478Sstever@eecs.umich.edu} 1174478Sstever@eecs.umich.edu 1183738Sstever@eecs.umich.eduvoid 1193738Sstever@eecs.umich.eduTimingSimpleCPU::drainResume() 1204458Sstever@eecs.umich.edu{ 1214458Sstever@eecs.umich.edu assert(!fetchEvent.scheduled()); 1223738Sstever@eecs.umich.edu if (switchedOut()) 1233738Sstever@eecs.umich.edu return; 1243738Sstever@eecs.umich.edu 1254458Sstever@eecs.umich.edu DPRINTF(SimpleCPU, "Resume\n"); 1264626Sstever@eecs.umich.edu verifyMemoryMode(); 1274626Sstever@eecs.umich.edu 1283738Sstever@eecs.umich.edu assert(!threadContexts.empty()); 1293738Sstever@eecs.umich.edu 1302810Srdreslin@umich.edu _status = BaseSimpleCPU::Idle; 1312810Srdreslin@umich.edu 1324626Sstever@eecs.umich.edu for (ThreadID tid = 0; tid < numThreads; tid++) { 1332810Srdreslin@umich.edu if (threadInfo[tid]->thread->status() == ThreadContext::Active) { 1342810Srdreslin@umich.edu threadInfo[tid]->notIdleFraction = 1; 1352810Srdreslin@umich.edu 1362810Srdreslin@umich.edu activeThreads.push_back(tid); 1373861Sstever@eecs.umich.edu 1382810Srdreslin@umich.edu _status = BaseSimpleCPU::Running; 1392810Srdreslin@umich.edu 1403860Sstever@eecs.umich.edu // Fetch if any threads active 1413860Sstever@eecs.umich.edu if (!fetchEvent.scheduled()) { 1423860Sstever@eecs.umich.edu schedule(fetchEvent, nextCycle()); 1433860Sstever@eecs.umich.edu } 1443860Sstever@eecs.umich.edu } else { 1453860Sstever@eecs.umich.edu threadInfo[tid]->notIdleFraction = 0; 1463860Sstever@eecs.umich.edu } 1473860Sstever@eecs.umich.edu } 1483860Sstever@eecs.umich.edu 1493860Sstever@eecs.umich.edu system->totalNumInsts = 0; 1503860Sstever@eecs.umich.edu} 1513860Sstever@eecs.umich.edu 1523860Sstever@eecs.umich.edubool 1534626Sstever@eecs.umich.eduTimingSimpleCPU::tryCompleteDrain() 1543860Sstever@eecs.umich.edu{ 1553860Sstever@eecs.umich.edu if (drainState() != DrainState::Draining) 1563860Sstever@eecs.umich.edu return false; 1573860Sstever@eecs.umich.edu 1583860Sstever@eecs.umich.edu DPRINTF(Drain, "tryCompleteDrain.\n"); 1593860Sstever@eecs.umich.edu if (!isDrained()) 1603860Sstever@eecs.umich.edu return false; 1613860Sstever@eecs.umich.edu 1623860Sstever@eecs.umich.edu DPRINTF(Drain, "CPU done draining, processing drain event\n"); 1633860Sstever@eecs.umich.edu signalDrainDone(); 1643860Sstever@eecs.umich.edu 1654628Sstever@eecs.umich.edu return true; 1664219Srdreslin@umich.edu} 1674219Srdreslin@umich.edu 1684219Srdreslin@umich.eduvoid 1694219Srdreslin@umich.eduTimingSimpleCPU::switchOut() 1704626Sstever@eecs.umich.edu{ 1713860Sstever@eecs.umich.edu SimpleExecContext& t_info = *threadInfo[curThread]; 1723860Sstever@eecs.umich.edu M5_VAR_USED SimpleThread* thread = t_info.thread; 1733860Sstever@eecs.umich.edu 1743860Sstever@eecs.umich.edu BaseSimpleCPU::switchOut(); 1753860Sstever@eecs.umich.edu 1763860Sstever@eecs.umich.edu assert(!fetchEvent.scheduled()); 1774626Sstever@eecs.umich.edu assert(_status == BaseSimpleCPU::Running || _status == Idle); 1783860Sstever@eecs.umich.edu assert(!t_info.stayAtPC); 1793860Sstever@eecs.umich.edu assert(thread->microPC() == 0); 1803860Sstever@eecs.umich.edu 1813860Sstever@eecs.umich.edu updateCycleCounts(); 1824626Sstever@eecs.umich.edu} 1834626Sstever@eecs.umich.edu 1843860Sstever@eecs.umich.edu 1854665Sstever@eecs.umich.eduvoid 1864628Sstever@eecs.umich.eduTimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 1874626Sstever@eecs.umich.edu{ 1884670Sstever@eecs.umich.edu BaseSimpleCPU::takeOverFrom(oldCPU); 1894670Sstever@eecs.umich.edu 1903860Sstever@eecs.umich.edu previousCycle = curCycle(); 1913860Sstever@eecs.umich.edu} 1923860Sstever@eecs.umich.edu 1933860Sstever@eecs.umich.eduvoid 1943860Sstever@eecs.umich.eduTimingSimpleCPU::verifyMemoryMode() const 1953860Sstever@eecs.umich.edu{ 1964670Sstever@eecs.umich.edu if (!system->isTimingMode()) { 1974670Sstever@eecs.umich.edu fatal("The timing CPU requires the memory system to be in " 1983860Sstever@eecs.umich.edu "'timing' mode.\n"); 1993860Sstever@eecs.umich.edu } 2003860Sstever@eecs.umich.edu} 2013860Sstever@eecs.umich.edu 2023860Sstever@eecs.umich.eduvoid 2033860Sstever@eecs.umich.eduTimingSimpleCPU::activateContext(ThreadID thread_num) 2043860Sstever@eecs.umich.edu{ 2053860Sstever@eecs.umich.edu DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num); 2062810Srdreslin@umich.edu 2072810Srdreslin@umich.edu assert(thread_num < numThreads); 2082810Srdreslin@umich.edu 2092810Srdreslin@umich.edu threadInfo[thread_num]->notIdleFraction = 1; 2102810Srdreslin@umich.edu if (_status == BaseSimpleCPU::Idle) 2112810Srdreslin@umich.edu _status = BaseSimpleCPU::Running; 2122810Srdreslin@umich.edu 2132810Srdreslin@umich.edu // kick things off by initiating the fetch of the next instruction 2143861Sstever@eecs.umich.edu if (!fetchEvent.scheduled()) 2152810Srdreslin@umich.edu schedule(fetchEvent, clockEdge(Cycles(0))); 2163860Sstever@eecs.umich.edu 2173860Sstever@eecs.umich.edu if (std::find(activeThreads.begin(), activeThreads.end(), thread_num) 2182810Srdreslin@umich.edu == activeThreads.end()) { 2194626Sstever@eecs.umich.edu activeThreads.push_back(thread_num); 2203315Sstever@eecs.umich.edu } 2213861Sstever@eecs.umich.edu} 2223860Sstever@eecs.umich.edu 2233860Sstever@eecs.umich.edu 2243860Sstever@eecs.umich.eduvoid 2254626Sstever@eecs.umich.eduTimingSimpleCPU::suspendContext(ThreadID thread_num) 2263315Sstever@eecs.umich.edu{ 2272813Srdreslin@umich.edu DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 2283860Sstever@eecs.umich.edu 2294626Sstever@eecs.umich.edu assert(thread_num < numThreads); 2302810Srdreslin@umich.edu activeThreads.remove(thread_num); 2312810Srdreslin@umich.edu 2322810Srdreslin@umich.edu if (_status == Idle) 2332810Srdreslin@umich.edu return; 2342810Srdreslin@umich.edu 2352812Srdreslin@umich.edu assert(_status == BaseSimpleCPU::Running); 2362810Srdreslin@umich.edu 2373738Sstever@eecs.umich.edu threadInfo[thread_num]->notIdleFraction = 0; 2384190Ssaidi@eecs.umich.edu 2392813Srdreslin@umich.edu if (activeThreads.empty()) { 2402810Srdreslin@umich.edu _status = Idle; 2412810Srdreslin@umich.edu 2422810Srdreslin@umich.edu if (fetchEvent.scheduled()) { 2432810Srdreslin@umich.edu deschedule(fetchEvent); 2442982Sstever@eecs.umich.edu } 2452810Srdreslin@umich.edu } 2462810Srdreslin@umich.edu} 2474626Sstever@eecs.umich.edu 2482810Srdreslin@umich.edubool 2492810Srdreslin@umich.eduTimingSimpleCPU::handleReadPacket(PacketPtr pkt) 2504626Sstever@eecs.umich.edu{ 2514626Sstever@eecs.umich.edu SimpleExecContext &t_info = *threadInfo[curThread]; 2524626Sstever@eecs.umich.edu SimpleThread* thread = t_info.thread; 2532810Srdreslin@umich.edu 2544626Sstever@eecs.umich.edu RequestPtr req = pkt->req; 2552810Srdreslin@umich.edu 2562810Srdreslin@umich.edu // We're about the issues a locked load, so tell the monitor 2574626Sstever@eecs.umich.edu // to start caring about this address 2584626Sstever@eecs.umich.edu if (pkt->isRead() && pkt->req->isLLSC()) { 2594626Sstever@eecs.umich.edu TheISA::handleLockedRead(thread, pkt->req); 2602810Srdreslin@umich.edu } 2614626Sstever@eecs.umich.edu if (req->isMmappedIpr()) { 2623293Srdreslin@umich.edu Cycles delay = TheISA::handleIprRead(thread->getTC(), pkt); 2633293Srdreslin@umich.edu new IprEvent(pkt, this, clockEdge(delay)); 2642810Srdreslin@umich.edu _status = DcacheWaitResponse; 2652982Sstever@eecs.umich.edu dcache_pkt = NULL; 2662810Srdreslin@umich.edu } else if (!dcachePort.sendTimingReq(pkt)) { 2674626Sstever@eecs.umich.edu _status = DcacheRetry; 2682810Srdreslin@umich.edu dcache_pkt = pkt; 2692810Srdreslin@umich.edu } else { 2702810Srdreslin@umich.edu _status = DcacheWaitResponse; 2712982Sstever@eecs.umich.edu // memory system takes ownership of packet 2722810Srdreslin@umich.edu dcache_pkt = NULL; 2734626Sstever@eecs.umich.edu } 2742810Srdreslin@umich.edu return dcache_pkt == NULL; 2754626Sstever@eecs.umich.edu} 2764626Sstever@eecs.umich.edu 2774626Sstever@eecs.umich.eduvoid 2784626Sstever@eecs.umich.eduTimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res, 2794626Sstever@eecs.umich.edu bool read) 2804626Sstever@eecs.umich.edu{ 2814626Sstever@eecs.umich.edu SimpleExecContext &t_info = *threadInfo[curThread]; 2822810Srdreslin@umich.edu SimpleThread* thread = t_info.thread; 2832810Srdreslin@umich.edu 2842982Sstever@eecs.umich.edu PacketPtr pkt = buildPacket(req, read); 2852810Srdreslin@umich.edu pkt->dataDynamic<uint8_t>(data); 2862982Sstever@eecs.umich.edu if (req->getFlags().isSet(Request::NO_ACCESS)) { 2872810Srdreslin@umich.edu assert(!dcache_pkt); 2884626Sstever@eecs.umich.edu pkt->makeResponse(); 2894626Sstever@eecs.umich.edu completeDataAccess(pkt); 2904626Sstever@eecs.umich.edu } else if (read) { 2914626Sstever@eecs.umich.edu handleReadPacket(pkt); 2924626Sstever@eecs.umich.edu } else { 2934626Sstever@eecs.umich.edu bool do_access = true; // flag to suppress cache access 2944628Sstever@eecs.umich.edu 2954628Sstever@eecs.umich.edu if (req->isLLSC()) { 2964626Sstever@eecs.umich.edu do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask); 2974628Sstever@eecs.umich.edu } else if (req->isCondSwap()) { 2984626Sstever@eecs.umich.edu assert(res); 2994626Sstever@eecs.umich.edu req->setExtraData(*res); 3004626Sstever@eecs.umich.edu } 3014626Sstever@eecs.umich.edu 3024626Sstever@eecs.umich.edu if (do_access) { 3034626Sstever@eecs.umich.edu dcache_pkt = pkt; 3044626Sstever@eecs.umich.edu handleWritePacket(); 3054626Sstever@eecs.umich.edu threadSnoop(pkt, curThread); 3064626Sstever@eecs.umich.edu } else { 3074626Sstever@eecs.umich.edu _status = DcacheWaitResponse; 3084626Sstever@eecs.umich.edu completeDataAccess(pkt); 3094626Sstever@eecs.umich.edu } 3104626Sstever@eecs.umich.edu } 3114626Sstever@eecs.umich.edu} 3124626Sstever@eecs.umich.edu 3134626Sstever@eecs.umich.eduvoid 3144626Sstever@eecs.umich.eduTimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2, 3154626Sstever@eecs.umich.edu RequestPtr req, uint8_t *data, bool read) 3164626Sstever@eecs.umich.edu{ 3172810Srdreslin@umich.edu PacketPtr pkt1, pkt2; 3184626Sstever@eecs.umich.edu buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read); 3192810Srdreslin@umich.edu if (req->getFlags().isSet(Request::NO_ACCESS)) { 3202810Srdreslin@umich.edu assert(!dcache_pkt); 3214626Sstever@eecs.umich.edu pkt1->makeResponse(); 3224626Sstever@eecs.umich.edu completeDataAccess(pkt1); 3232810Srdreslin@umich.edu } else if (read) { 3242810Srdreslin@umich.edu SplitFragmentSenderState * send_state = 3253861Sstever@eecs.umich.edu dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState); 3263861Sstever@eecs.umich.edu if (handleReadPacket(pkt1)) { 3273861Sstever@eecs.umich.edu send_state->clearFromParent(); 3283861Sstever@eecs.umich.edu send_state = dynamic_cast<SplitFragmentSenderState *>( 3293861Sstever@eecs.umich.edu pkt2->senderState); 3304626Sstever@eecs.umich.edu if (handleReadPacket(pkt2)) { 3313861Sstever@eecs.umich.edu send_state->clearFromParent(); 3322810Srdreslin@umich.edu } 3332810Srdreslin@umich.edu } 3342810Srdreslin@umich.edu } else { 335 dcache_pkt = pkt1; 336 SplitFragmentSenderState * send_state = 337 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState); 338 if (handleWritePacket()) { 339 send_state->clearFromParent(); 340 dcache_pkt = pkt2; 341 send_state = dynamic_cast<SplitFragmentSenderState *>( 342 pkt2->senderState); 343 if (handleWritePacket()) { 344 send_state->clearFromParent(); 345 } 346 } 347 } 348} 349 350void 351TimingSimpleCPU::translationFault(const Fault &fault) 352{ 353 // fault may be NoFault in cases where a fault is suppressed, 354 // for instance prefetches. 355 updateCycleCounts(); 356 357 if (traceData) { 358 // Since there was a fault, we shouldn't trace this instruction. 359 delete traceData; 360 traceData = NULL; 361 } 362 363 postExecute(); 364 365 advanceInst(fault); 366} 367 368PacketPtr 369TimingSimpleCPU::buildPacket(RequestPtr req, bool read) 370{ 371 return read ? Packet::createRead(req) : Packet::createWrite(req); 372} 373 374void 375TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2, 376 RequestPtr req1, RequestPtr req2, RequestPtr req, 377 uint8_t *data, bool read) 378{ 379 pkt1 = pkt2 = NULL; 380 381 assert(!req1->isMmappedIpr() && !req2->isMmappedIpr()); 382 383 if (req->getFlags().isSet(Request::NO_ACCESS)) { 384 pkt1 = buildPacket(req, read); 385 return; 386 } 387 388 pkt1 = buildPacket(req1, read); 389 pkt2 = buildPacket(req2, read); 390 391 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand()); 392 393 pkt->dataDynamic<uint8_t>(data); 394 pkt1->dataStatic<uint8_t>(data); 395 pkt2->dataStatic<uint8_t>(data + req1->getSize()); 396 397 SplitMainSenderState * main_send_state = new SplitMainSenderState; 398 pkt->senderState = main_send_state; 399 main_send_state->fragments[0] = pkt1; 400 main_send_state->fragments[1] = pkt2; 401 main_send_state->outstanding = 2; 402 pkt1->senderState = new SplitFragmentSenderState(pkt, 0); 403 pkt2->senderState = new SplitFragmentSenderState(pkt, 1); 404} 405 406Fault 407TimingSimpleCPU::readMem(Addr addr, uint8_t *data, 408 unsigned size, unsigned flags) 409{ 410 panic("readMem() is for atomic accesses, and should " 411 "never be called on TimingSimpleCPU.\n"); 412} 413 414Fault 415TimingSimpleCPU::initiateMemRead(Addr addr, unsigned size, unsigned flags) 416{ 417 SimpleExecContext &t_info = *threadInfo[curThread]; 418 SimpleThread* thread = t_info.thread; 419 420 Fault fault; 421 const int asid = 0; 422 const ThreadID tid = curThread; 423 const Addr pc = thread->instAddr(); 424 unsigned block_size = cacheLineSize(); 425 BaseTLB::Mode mode = BaseTLB::Read; 426 427 if (traceData) 428 traceData->setMem(addr, size, flags); 429 430 RequestPtr req = new Request(asid, addr, size, 431 flags, dataMasterId(), pc, 432 thread->contextId(), tid); 433 434 req->taskId(taskId()); 435 436 Addr split_addr = roundDown(addr + size - 1, block_size); 437 assert(split_addr <= addr || split_addr - addr < block_size); 438 439 _status = DTBWaitResponse; 440 if (split_addr > addr) { 441 RequestPtr req1, req2; 442 assert(!req->isLLSC() && !req->isSwap()); 443 req->splitOnVaddr(split_addr, req1, req2); 444 445 WholeTranslationState *state = 446 new WholeTranslationState(req, req1, req2, new uint8_t[size], 447 NULL, mode); 448 DataTranslation<TimingSimpleCPU *> *trans1 = 449 new DataTranslation<TimingSimpleCPU *>(this, state, 0); 450 DataTranslation<TimingSimpleCPU *> *trans2 = 451 new DataTranslation<TimingSimpleCPU *>(this, state, 1); 452 453 thread->dtb->translateTiming(req1, thread->getTC(), trans1, mode); 454 thread->dtb->translateTiming(req2, thread->getTC(), trans2, mode); 455 } else { 456 WholeTranslationState *state = 457 new WholeTranslationState(req, new uint8_t[size], NULL, mode); 458 DataTranslation<TimingSimpleCPU *> *translation 459 = new DataTranslation<TimingSimpleCPU *>(this, state); 460 thread->dtb->translateTiming(req, thread->getTC(), translation, mode); 461 } 462 463 return NoFault; 464} 465 466bool 467TimingSimpleCPU::handleWritePacket() 468{ 469 SimpleExecContext &t_info = *threadInfo[curThread]; 470 SimpleThread* thread = t_info.thread; 471 472 RequestPtr req = dcache_pkt->req; 473 if (req->isMmappedIpr()) { 474 Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt); 475 new IprEvent(dcache_pkt, this, clockEdge(delay)); 476 _status = DcacheWaitResponse; 477 dcache_pkt = NULL; 478 } else if (!dcachePort.sendTimingReq(dcache_pkt)) { 479 _status = DcacheRetry; 480 } else { 481 _status = DcacheWaitResponse; 482 // memory system takes ownership of packet 483 dcache_pkt = NULL; 484 } 485 return dcache_pkt == NULL; 486} 487 488Fault 489TimingSimpleCPU::writeMem(uint8_t *data, unsigned size, 490 Addr addr, unsigned flags, uint64_t *res) 491{ 492 SimpleExecContext &t_info = *threadInfo[curThread]; 493 SimpleThread* thread = t_info.thread; 494 495 uint8_t *newData = new uint8_t[size]; 496 const int asid = 0; 497 const ThreadID tid = curThread; 498 const Addr pc = thread->instAddr(); 499 unsigned block_size = cacheLineSize(); 500 BaseTLB::Mode mode = BaseTLB::Write; 501 502 if (data == NULL) { 503 assert(flags & Request::CACHE_BLOCK_ZERO); 504 // This must be a cache block cleaning request 505 memset(newData, 0, size); 506 } else { 507 memcpy(newData, data, size); 508 } 509 510 if (traceData) 511 traceData->setMem(addr, size, flags); 512 513 RequestPtr req = new Request(asid, addr, size, 514 flags, dataMasterId(), pc, 515 thread->contextId(), tid); 516 517 req->taskId(taskId()); 518 519 Addr split_addr = roundDown(addr + size - 1, block_size); 520 assert(split_addr <= addr || split_addr - addr < block_size); 521 522 _status = DTBWaitResponse; 523 if (split_addr > addr) { 524 RequestPtr req1, req2; 525 assert(!req->isLLSC() && !req->isSwap()); 526 req->splitOnVaddr(split_addr, req1, req2); 527 528 WholeTranslationState *state = 529 new WholeTranslationState(req, req1, req2, newData, res, mode); 530 DataTranslation<TimingSimpleCPU *> *trans1 = 531 new DataTranslation<TimingSimpleCPU *>(this, state, 0); 532 DataTranslation<TimingSimpleCPU *> *trans2 = 533 new DataTranslation<TimingSimpleCPU *>(this, state, 1); 534 535 thread->dtb->translateTiming(req1, thread->getTC(), trans1, mode); 536 thread->dtb->translateTiming(req2, thread->getTC(), trans2, mode); 537 } else { 538 WholeTranslationState *state = 539 new WholeTranslationState(req, newData, res, mode); 540 DataTranslation<TimingSimpleCPU *> *translation = 541 new DataTranslation<TimingSimpleCPU *>(this, state); 542 thread->dtb->translateTiming(req, thread->getTC(), translation, mode); 543 } 544 545 // Translation faults will be returned via finishTranslation() 546 return NoFault; 547} 548 549void 550TimingSimpleCPU::threadSnoop(PacketPtr pkt, ThreadID sender) 551{ 552 for (ThreadID tid = 0; tid < numThreads; tid++) { 553 if (tid != sender) { 554 if(getCpuAddrMonitor(tid)->doMonitor(pkt)) { 555 wakeup(tid); 556 } 557 TheISA::handleLockedSnoop(threadInfo[tid]->thread, pkt, 558 dcachePort.cacheBlockMask); 559 } 560 } 561} 562 563void 564TimingSimpleCPU::finishTranslation(WholeTranslationState *state) 565{ 566 _status = BaseSimpleCPU::Running; 567 568 if (state->getFault() != NoFault) { 569 if (state->isPrefetch()) { 570 state->setNoFault(); 571 } 572 delete [] state->data; 573 state->deleteReqs(); 574 translationFault(state->getFault()); 575 } else { 576 if (!state->isSplit) { 577 sendData(state->mainReq, state->data, state->res, 578 state->mode == BaseTLB::Read); 579 } else { 580 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq, 581 state->data, state->mode == BaseTLB::Read); 582 } 583 } 584 585 delete state; 586} 587 588 589void 590TimingSimpleCPU::fetch() 591{ 592 // Change thread if multi-threaded 593 swapActiveThread(); 594 595 SimpleExecContext &t_info = *threadInfo[curThread]; 596 SimpleThread* thread = t_info.thread; 597 598 DPRINTF(SimpleCPU, "Fetch\n"); 599 600 if (!curStaticInst || !curStaticInst->isDelayedCommit()) { 601 checkForInterrupts(); 602 checkPcEventQueue(); 603 } 604 605 // We must have just got suspended by a PC event 606 if (_status == Idle) 607 return; 608 609 TheISA::PCState pcState = thread->pcState(); 610 bool needToFetch = !isRomMicroPC(pcState.microPC()) && 611 !curMacroStaticInst; 612 613 if (needToFetch) { 614 _status = BaseSimpleCPU::Running; 615 Request *ifetch_req = new Request(); 616 ifetch_req->taskId(taskId()); 617 ifetch_req->setThreadContext(thread->contextId(), curThread); 618 setupFetchRequest(ifetch_req); 619 DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr()); 620 thread->itb->translateTiming(ifetch_req, thread->getTC(), 621 &fetchTranslation, BaseTLB::Execute); 622 } else { 623 _status = IcacheWaitResponse; 624 completeIfetch(NULL); 625 626 updateCycleCounts(); 627 } 628} 629 630 631void 632TimingSimpleCPU::sendFetch(const Fault &fault, RequestPtr req, 633 ThreadContext *tc) 634{ 635 if (fault == NoFault) { 636 DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n", 637 req->getVaddr(), req->getPaddr()); 638 ifetch_pkt = new Packet(req, MemCmd::ReadReq); 639 ifetch_pkt->dataStatic(&inst); 640 DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr()); 641 642 if (!icachePort.sendTimingReq(ifetch_pkt)) { 643 // Need to wait for retry 644 _status = IcacheRetry; 645 } else { 646 // Need to wait for cache to respond 647 _status = IcacheWaitResponse; 648 // ownership of packet transferred to memory system 649 ifetch_pkt = NULL; 650 } 651 } else { 652 DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr()); 653 delete req; 654 // fetch fault: advance directly to next instruction (fault handler) 655 _status = BaseSimpleCPU::Running; 656 advanceInst(fault); 657 } 658 659 updateCycleCounts(); 660} 661 662 663void 664TimingSimpleCPU::advanceInst(const Fault &fault) 665{ 666 SimpleExecContext &t_info = *threadInfo[curThread]; 667 668 if (_status == Faulting) 669 return; 670 671 if (fault != NoFault) { 672 advancePC(fault); 673 DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n"); 674 reschedule(fetchEvent, clockEdge(), true); 675 _status = Faulting; 676 return; 677 } 678 679 680 if (!t_info.stayAtPC) 681 advancePC(fault); 682 683 if (tryCompleteDrain()) 684 return; 685 686 if (_status == BaseSimpleCPU::Running) { 687 // kick off fetch of next instruction... callback from icache 688 // response will cause that instruction to be executed, 689 // keeping the CPU running. 690 fetch(); 691 } 692} 693 694 695void 696TimingSimpleCPU::completeIfetch(PacketPtr pkt) 697{ 698 SimpleExecContext& t_info = *threadInfo[curThread]; 699 700 DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ? 701 pkt->getAddr() : 0); 702 703 // received a response from the icache: execute the received 704 // instruction 705 assert(!pkt || !pkt->isError()); 706 assert(_status == IcacheWaitResponse); 707 708 _status = BaseSimpleCPU::Running; 709 710 updateCycleCounts(); 711 712 if (pkt) 713 pkt->req->setAccessLatency(); 714 715 716 preExecute(); 717 if (curStaticInst && curStaticInst->isMemRef()) { 718 // load or store: just send to dcache 719 Fault fault = curStaticInst->initiateAcc(&t_info, traceData); 720 721 // If we're not running now the instruction will complete in a dcache 722 // response callback or the instruction faulted and has started an 723 // ifetch 724 if (_status == BaseSimpleCPU::Running) { 725 if (fault != NoFault && traceData) { 726 // If there was a fault, we shouldn't trace this instruction. 727 delete traceData; 728 traceData = NULL; 729 } 730 731 postExecute(); 732 // @todo remove me after debugging with legion done 733 if (curStaticInst && (!curStaticInst->isMicroop() || 734 curStaticInst->isFirstMicroop())) 735 instCnt++; 736 advanceInst(fault); 737 } 738 } else if (curStaticInst) { 739 // non-memory instruction: execute completely now 740 Fault fault = curStaticInst->execute(&t_info, traceData); 741 742 // keep an instruction count 743 if (fault == NoFault) 744 countInst(); 745 else if (traceData && !DTRACE(ExecFaulting)) { 746 delete traceData; 747 traceData = NULL; 748 } 749 750 postExecute(); 751 // @todo remove me after debugging with legion done 752 if (curStaticInst && (!curStaticInst->isMicroop() || 753 curStaticInst->isFirstMicroop())) 754 instCnt++; 755 advanceInst(fault); 756 } else { 757 advanceInst(NoFault); 758 } 759 760 if (pkt) { 761 delete pkt->req; 762 delete pkt; 763 } 764} 765 766void 767TimingSimpleCPU::IcachePort::ITickEvent::process() 768{ 769 cpu->completeIfetch(pkt); 770} 771 772bool 773TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt) 774{ 775 DPRINTF(SimpleCPU, "Received fetch response %#x\n", pkt->getAddr()); 776 // we should only ever see one response per cycle since we only 777 // issue a new request once this response is sunk 778 assert(!tickEvent.scheduled()); 779 // delay processing of returned data until next CPU clock edge 780 tickEvent.schedule(pkt, cpu->clockEdge()); 781 782 return true; 783} 784 785void 786TimingSimpleCPU::IcachePort::recvReqRetry() 787{ 788 // we shouldn't get a retry unless we have a packet that we're 789 // waiting to transmit 790 assert(cpu->ifetch_pkt != NULL); 791 assert(cpu->_status == IcacheRetry); 792 PacketPtr tmp = cpu->ifetch_pkt; 793 if (sendTimingReq(tmp)) { 794 cpu->_status = IcacheWaitResponse; 795 cpu->ifetch_pkt = NULL; 796 } 797} 798 799void 800TimingSimpleCPU::completeDataAccess(PacketPtr pkt) 801{ 802 // received a response from the dcache: complete the load or store 803 // instruction 804 assert(!pkt->isError()); 805 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse || 806 pkt->req->getFlags().isSet(Request::NO_ACCESS)); 807 808 pkt->req->setAccessLatency(); 809 810 updateCycleCounts(); 811 812 if (pkt->senderState) { 813 SplitFragmentSenderState * send_state = 814 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState); 815 assert(send_state); 816 delete pkt->req; 817 delete pkt; 818 PacketPtr big_pkt = send_state->bigPkt; 819 delete send_state; 820 821 SplitMainSenderState * main_send_state = 822 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState); 823 assert(main_send_state); 824 // Record the fact that this packet is no longer outstanding. 825 assert(main_send_state->outstanding != 0); 826 main_send_state->outstanding--; 827 828 if (main_send_state->outstanding) { 829 return; 830 } else { 831 delete main_send_state; 832 big_pkt->senderState = NULL; 833 pkt = big_pkt; 834 } 835 } 836 837 _status = BaseSimpleCPU::Running; 838 839 Fault fault = curStaticInst->completeAcc(pkt, threadInfo[curThread], 840 traceData); 841 842 // keep an instruction count 843 if (fault == NoFault) 844 countInst(); 845 else if (traceData) { 846 // If there was a fault, we shouldn't trace this instruction. 847 delete traceData; 848 traceData = NULL; 849 } 850 851 delete pkt->req; 852 delete pkt; 853 854 postExecute(); 855 856 advanceInst(fault); 857} 858 859void 860TimingSimpleCPU::updateCycleCounts() 861{ 862 const Cycles delta(curCycle() - previousCycle); 863 864 numCycles += delta; 865 ppCycles->notify(delta); 866 867 previousCycle = curCycle(); 868} 869 870void 871TimingSimpleCPU::DcachePort::recvTimingSnoopReq(PacketPtr pkt) 872{ 873 for (ThreadID tid = 0; tid < cpu->numThreads; tid++) { 874 if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) { 875 cpu->wakeup(tid); 876 } 877 } 878 879 for (auto &t_info : cpu->threadInfo) { 880 TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask); 881 } 882} 883 884void 885TimingSimpleCPU::DcachePort::recvFunctionalSnoop(PacketPtr pkt) 886{ 887 for (ThreadID tid = 0; tid < cpu->numThreads; tid++) { 888 if(cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) { 889 cpu->wakeup(tid); 890 } 891 } 892} 893 894bool 895TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt) 896{ 897 DPRINTF(SimpleCPU, "Received load/store response %#x\n", pkt->getAddr()); 898 899 // The timing CPU is not really ticked, instead it relies on the 900 // memory system (fetch and load/store) to set the pace. 901 if (!tickEvent.scheduled()) { 902 // Delay processing of returned data until next CPU clock edge 903 tickEvent.schedule(pkt, cpu->clockEdge()); 904 return true; 905 } else { 906 // In the case of a split transaction and a cache that is 907 // faster than a CPU we could get two responses in the 908 // same tick, delay the second one 909 if (!retryRespEvent.scheduled()) 910 cpu->schedule(retryRespEvent, cpu->clockEdge(Cycles(1))); 911 return false; 912 } 913} 914 915void 916TimingSimpleCPU::DcachePort::DTickEvent::process() 917{ 918 cpu->completeDataAccess(pkt); 919} 920 921void 922TimingSimpleCPU::DcachePort::recvReqRetry() 923{ 924 // we shouldn't get a retry unless we have a packet that we're 925 // waiting to transmit 926 assert(cpu->dcache_pkt != NULL); 927 assert(cpu->_status == DcacheRetry); 928 PacketPtr tmp = cpu->dcache_pkt; 929 if (tmp->senderState) { 930 // This is a packet from a split access. 931 SplitFragmentSenderState * send_state = 932 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState); 933 assert(send_state); 934 PacketPtr big_pkt = send_state->bigPkt; 935 936 SplitMainSenderState * main_send_state = 937 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState); 938 assert(main_send_state); 939 940 if (sendTimingReq(tmp)) { 941 // If we were able to send without retrying, record that fact 942 // and try sending the other fragment. 943 send_state->clearFromParent(); 944 int other_index = main_send_state->getPendingFragment(); 945 if (other_index > 0) { 946 tmp = main_send_state->fragments[other_index]; 947 cpu->dcache_pkt = tmp; 948 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) || 949 (big_pkt->isWrite() && cpu->handleWritePacket())) { 950 main_send_state->fragments[other_index] = NULL; 951 } 952 } else { 953 cpu->_status = DcacheWaitResponse; 954 // memory system takes ownership of packet 955 cpu->dcache_pkt = NULL; 956 } 957 } 958 } else if (sendTimingReq(tmp)) { 959 cpu->_status = DcacheWaitResponse; 960 // memory system takes ownership of packet 961 cpu->dcache_pkt = NULL; 962 } 963} 964 965TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu, 966 Tick t) 967 : pkt(_pkt), cpu(_cpu) 968{ 969 cpu->schedule(this, t); 970} 971 972void 973TimingSimpleCPU::IprEvent::process() 974{ 975 cpu->completeDataAccess(pkt); 976} 977 978const char * 979TimingSimpleCPU::IprEvent::description() const 980{ 981 return "Timing Simple CPU Delay IPR event"; 982} 983 984 985void 986TimingSimpleCPU::printAddr(Addr a) 987{ 988 dcachePort.printAddr(a); 989} 990 991 992//////////////////////////////////////////////////////////////////////// 993// 994// TimingSimpleCPU Simulation Object 995// 996TimingSimpleCPU * 997TimingSimpleCPUParams::create() 998{ 999 return new TimingSimpleCPU(this); 1000} 1001