timing.cc revision 10379
13358Srdreslin@umich.edu/* 21693Sstever@eecs.umich.edu * Copyright (c) 2010-2013 ARM Limited 31693Sstever@eecs.umich.edu * All rights reserved 41693Sstever@eecs.umich.edu * 51693Sstever@eecs.umich.edu * The license below extends only to copyright in the software and shall 61693Sstever@eecs.umich.edu * not be construed as granting a license to any other intellectual 71693Sstever@eecs.umich.edu * property including but not limited to intellectual property relating 81693Sstever@eecs.umich.edu * to a hardware implementation of the functionality of the software 91693Sstever@eecs.umich.edu * licensed hereunder. You may use the software subject to the license 101693Sstever@eecs.umich.edu * terms below provided that you ensure that this notice is replicated 111693Sstever@eecs.umich.edu * unmodified and in its entirety in all distributions of the software, 121693Sstever@eecs.umich.edu * modified or unmodified, in source code or in binary form. 131693Sstever@eecs.umich.edu * 141693Sstever@eecs.umich.edu * Copyright (c) 2002-2005 The Regents of The University of Michigan 151693Sstever@eecs.umich.edu * All rights reserved. 161693Sstever@eecs.umich.edu * 171693Sstever@eecs.umich.edu * Redistribution and use in source and binary forms, with or without 181693Sstever@eecs.umich.edu * modification, are permitted provided that the following conditions are 191693Sstever@eecs.umich.edu * met: redistributions of source code must retain the above copyright 201693Sstever@eecs.umich.edu * notice, this list of conditions and the following disclaimer; 211693Sstever@eecs.umich.edu * redistributions in binary form must reproduce the above copyright 221693Sstever@eecs.umich.edu * notice, this list of conditions and the following disclaimer in the 231693Sstever@eecs.umich.edu * documentation and/or other materials provided with the distribution; 241693Sstever@eecs.umich.edu * neither the name of the copyright holders nor the names of its 251693Sstever@eecs.umich.edu * contributors may be used to endorse or promote products derived from 261693Sstever@eecs.umich.edu * this software without specific prior written permission. 271693Sstever@eecs.umich.edu * 281693Sstever@eecs.umich.edu * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 293358Srdreslin@umich.edu * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 303358Srdreslin@umich.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 311516SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 323358Srdreslin@umich.edu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 333358Srdreslin@umich.edu * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 343358Srdreslin@umich.edu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 353358Srdreslin@umich.edu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 361516SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 373358Srdreslin@umich.edu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 383358Srdreslin@umich.edu * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 393358Srdreslin@umich.edu * 403358Srdreslin@umich.edu * Authors: Steve Reinhardt 413358Srdreslin@umich.edu */ 423358Srdreslin@umich.edu 433358Srdreslin@umich.edu#include "arch/locked_mem.hh" 443358Srdreslin@umich.edu#include "arch/mmapped_ipr.hh" 453358Srdreslin@umich.edu#include "arch/utility.hh" 463358Srdreslin@umich.edu#include "base/bigint.hh" 473358Srdreslin@umich.edu#include "config/the_isa.hh" 483358Srdreslin@umich.edu#include "cpu/simple/timing.hh" 493358Srdreslin@umich.edu#include "cpu/exetrace.hh" 503358Srdreslin@umich.edu#include "debug/Config.hh" 513358Srdreslin@umich.edu#include "debug/Drain.hh" 523358Srdreslin@umich.edu#include "debug/ExecFaulting.hh" 533358Srdreslin@umich.edu#include "debug/SimpleCPU.hh" 543358Srdreslin@umich.edu#include "mem/packet.hh" 553358Srdreslin@umich.edu#include "mem/packet_access.hh" 563358Srdreslin@umich.edu#include "params/TimingSimpleCPU.hh" 573358Srdreslin@umich.edu#include "sim/faults.hh" 583358Srdreslin@umich.edu#include "sim/full_system.hh" 593358Srdreslin@umich.edu#include "sim/system.hh" 603358Srdreslin@umich.edu 613358Srdreslin@umich.eduusing namespace std; 623358Srdreslin@umich.eduusing namespace TheISA; 633358Srdreslin@umich.edu 643358Srdreslin@umich.eduvoid 653358Srdreslin@umich.eduTimingSimpleCPU::init() 663358Srdreslin@umich.edu{ 673358Srdreslin@umich.edu BaseCPU::init(); 683358Srdreslin@umich.edu 693358Srdreslin@umich.edu // Initialise the ThreadContext's memory proxies 703358Srdreslin@umich.edu tcBase()->initMemProxies(tcBase()); 713358Srdreslin@umich.edu 723358Srdreslin@umich.edu if (FullSystem && !params()->switched_out) { 733358Srdreslin@umich.edu for (int i = 0; i < threadContexts.size(); ++i) { 743358Srdreslin@umich.edu ThreadContext *tc = threadContexts[i]; 753358Srdreslin@umich.edu // initialize CPU, including PC 763358Srdreslin@umich.edu TheISA::initCPU(tc, _cpuId); 773358Srdreslin@umich.edu } 783358Srdreslin@umich.edu } 793358Srdreslin@umich.edu} 803358Srdreslin@umich.edu 813358Srdreslin@umich.eduvoid 823358Srdreslin@umich.eduTimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt, Tick t) 833358Srdreslin@umich.edu{ 843358Srdreslin@umich.edu pkt = _pkt; 853358Srdreslin@umich.edu cpu->schedule(this, t); 863358Srdreslin@umich.edu} 873358Srdreslin@umich.edu 883358Srdreslin@umich.eduTimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p) 893358Srdreslin@umich.edu : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this), 903358Srdreslin@umich.edu dcachePort(this), ifetch_pkt(NULL), dcache_pkt(NULL), previousCycle(0), 913358Srdreslin@umich.edu fetchEvent(this), drainManager(NULL) 923358Srdreslin@umich.edu{ 933358Srdreslin@umich.edu _status = Idle; 943358Srdreslin@umich.edu 953358Srdreslin@umich.edu system->totalNumInsts = 0; 963358Srdreslin@umich.edu} 973358Srdreslin@umich.edu 983358Srdreslin@umich.edu 993358Srdreslin@umich.edu 1003358Srdreslin@umich.eduTimingSimpleCPU::~TimingSimpleCPU() 1013358Srdreslin@umich.edu{ 1023358Srdreslin@umich.edu} 1033358Srdreslin@umich.edu 1043358Srdreslin@umich.eduunsigned int 1053358Srdreslin@umich.eduTimingSimpleCPU::drain(DrainManager *drain_manager) 1063358Srdreslin@umich.edu{ 1073358Srdreslin@umich.edu assert(!drainManager); 1083358Srdreslin@umich.edu if (switchedOut()) 1093358Srdreslin@umich.edu return 0; 1103358Srdreslin@umich.edu 1113358Srdreslin@umich.edu if (_status == Idle || 1123358Srdreslin@umich.edu (_status == BaseSimpleCPU::Running && isDrained())) { 1133358Srdreslin@umich.edu DPRINTF(Drain, "No need to drain.\n"); 1143358Srdreslin@umich.edu return 0; 1153358Srdreslin@umich.edu } else { 1163358Srdreslin@umich.edu drainManager = drain_manager; 1173358Srdreslin@umich.edu DPRINTF(Drain, "Requesting drain: %s\n", pcState()); 1183358Srdreslin@umich.edu 1193358Srdreslin@umich.edu // The fetch event can become descheduled if a drain didn't 1203358Srdreslin@umich.edu // succeed on the first attempt. We need to reschedule it if 1213358Srdreslin@umich.edu // the CPU is waiting for a microcode routine to complete. 1223358Srdreslin@umich.edu if (_status == BaseSimpleCPU::Running && !fetchEvent.scheduled()) 1233358Srdreslin@umich.edu schedule(fetchEvent, clockEdge()); 1243358Srdreslin@umich.edu 1253358Srdreslin@umich.edu return 1; 1263358Srdreslin@umich.edu } 1273358Srdreslin@umich.edu} 1283358Srdreslin@umich.edu 1293358Srdreslin@umich.eduvoid 1303358Srdreslin@umich.eduTimingSimpleCPU::drainResume() 1313358Srdreslin@umich.edu{ 1323358Srdreslin@umich.edu assert(!fetchEvent.scheduled()); 1333358Srdreslin@umich.edu assert(!drainManager); 1343358Srdreslin@umich.edu if (switchedOut()) 1353358Srdreslin@umich.edu return; 1363358Srdreslin@umich.edu 1373358Srdreslin@umich.edu DPRINTF(SimpleCPU, "Resume\n"); 1383358Srdreslin@umich.edu verifyMemoryMode(); 1393358Srdreslin@umich.edu 1403358Srdreslin@umich.edu assert(!threadContexts.empty()); 1413358Srdreslin@umich.edu if (threadContexts.size() > 1) 1423358Srdreslin@umich.edu fatal("The timing CPU only supports one thread.\n"); 1433358Srdreslin@umich.edu 1443358Srdreslin@umich.edu if (thread->status() == ThreadContext::Active) { 1453358Srdreslin@umich.edu schedule(fetchEvent, nextCycle()); 1463358Srdreslin@umich.edu _status = BaseSimpleCPU::Running; 1473358Srdreslin@umich.edu notIdleFraction = 1; 1483358Srdreslin@umich.edu } else { 1493358Srdreslin@umich.edu _status = BaseSimpleCPU::Idle; 1503358Srdreslin@umich.edu notIdleFraction = 0; 1513358Srdreslin@umich.edu } 1523358Srdreslin@umich.edu} 1533358Srdreslin@umich.edu 1543358Srdreslin@umich.edubool 1553358Srdreslin@umich.eduTimingSimpleCPU::tryCompleteDrain() 1563358Srdreslin@umich.edu{ 1573358Srdreslin@umich.edu if (!drainManager) 1583358Srdreslin@umich.edu return false; 1593358Srdreslin@umich.edu 1603358Srdreslin@umich.edu DPRINTF(Drain, "tryCompleteDrain: %s\n", pcState()); 1613358Srdreslin@umich.edu if (!isDrained()) 1623358Srdreslin@umich.edu return false; 1631516SN/A 1643358Srdreslin@umich.edu DPRINTF(Drain, "CPU done draining, processing drain event\n"); 1653358Srdreslin@umich.edu drainManager->signalDrainDone(); 1663358Srdreslin@umich.edu drainManager = NULL; 1671516SN/A 1683358Srdreslin@umich.edu return true; 1693358Srdreslin@umich.edu} 1703358Srdreslin@umich.edu 1713358Srdreslin@umich.eduvoid 1723358Srdreslin@umich.eduTimingSimpleCPU::switchOut() 1731516SN/A{ 1743358Srdreslin@umich.edu BaseSimpleCPU::switchOut(); 1753358Srdreslin@umich.edu 1763358Srdreslin@umich.edu assert(!fetchEvent.scheduled()); 1773358Srdreslin@umich.edu assert(_status == BaseSimpleCPU::Running || _status == Idle); 1783358Srdreslin@umich.edu assert(!stayAtPC); 1793358Srdreslin@umich.edu assert(microPC() == 0); 1803358Srdreslin@umich.edu 1813358Srdreslin@umich.edu numCycles += curCycle() - previousCycle; 1823358Srdreslin@umich.edu} 1833358Srdreslin@umich.edu 1843358Srdreslin@umich.edu 1853358Srdreslin@umich.eduvoid 1863358Srdreslin@umich.eduTimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 1873358Srdreslin@umich.edu{ 1883358Srdreslin@umich.edu BaseSimpleCPU::takeOverFrom(oldCPU); 1893358Srdreslin@umich.edu 1903358Srdreslin@umich.edu previousCycle = curCycle(); 1913358Srdreslin@umich.edu} 1923358Srdreslin@umich.edu 1933358Srdreslin@umich.eduvoid 1943358Srdreslin@umich.eduTimingSimpleCPU::verifyMemoryMode() const 1953358Srdreslin@umich.edu{ 1963358Srdreslin@umich.edu if (!system->isTimingMode()) { 1973358Srdreslin@umich.edu fatal("The timing CPU requires the memory system to be in " 1983358Srdreslin@umich.edu "'timing' mode.\n"); 1993358Srdreslin@umich.edu } 2003358Srdreslin@umich.edu} 2013358Srdreslin@umich.edu 2023358Srdreslin@umich.eduvoid 2033358Srdreslin@umich.eduTimingSimpleCPU::activateContext(ThreadID thread_num, Cycles delay) 2043358Srdreslin@umich.edu{ 2053358Srdreslin@umich.edu DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); 2063358Srdreslin@umich.edu 2073358Srdreslin@umich.edu assert(thread_num == 0); 2083358Srdreslin@umich.edu assert(thread); 2093358Srdreslin@umich.edu 2103358Srdreslin@umich.edu assert(_status == Idle); 2113358Srdreslin@umich.edu 2123358Srdreslin@umich.edu notIdleFraction = 1; 2133358Srdreslin@umich.edu _status = BaseSimpleCPU::Running; 2143358Srdreslin@umich.edu 2153358Srdreslin@umich.edu // kick things off by initiating the fetch of the next instruction 2163358Srdreslin@umich.edu schedule(fetchEvent, clockEdge(delay)); 2173358Srdreslin@umich.edu} 2183358Srdreslin@umich.edu 2193358Srdreslin@umich.edu 2203358Srdreslin@umich.eduvoid 2213358Srdreslin@umich.eduTimingSimpleCPU::suspendContext(ThreadID thread_num) 2223358Srdreslin@umich.edu{ 2233358Srdreslin@umich.edu DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 2243358Srdreslin@umich.edu 2253358Srdreslin@umich.edu assert(thread_num == 0); 2263358Srdreslin@umich.edu assert(thread); 2273358Srdreslin@umich.edu 2283358Srdreslin@umich.edu if (_status == Idle) 2291516SN/A return; 2303358Srdreslin@umich.edu 2311516SN/A assert(_status == BaseSimpleCPU::Running); 2321516SN/A 2331516SN/A // just change status to Idle... if status != Running, 2343358Srdreslin@umich.edu // completeInst() will not initiate fetch of next instruction. 2353358Srdreslin@umich.edu 2363358Srdreslin@umich.edu notIdleFraction = 0; 2373358Srdreslin@umich.edu _status = Idle; 2383358Srdreslin@umich.edu} 2393358Srdreslin@umich.edu 2403358Srdreslin@umich.edubool 2413358Srdreslin@umich.eduTimingSimpleCPU::handleReadPacket(PacketPtr pkt) 2423358Srdreslin@umich.edu{ 2433358Srdreslin@umich.edu RequestPtr req = pkt->req; 2443358Srdreslin@umich.edu if (req->isMmappedIpr()) { 2453358Srdreslin@umich.edu Cycles delay = TheISA::handleIprRead(thread->getTC(), pkt); 2463358Srdreslin@umich.edu new IprEvent(pkt, this, clockEdge(delay)); 2473358Srdreslin@umich.edu _status = DcacheWaitResponse; 2483358Srdreslin@umich.edu dcache_pkt = NULL; 2493358Srdreslin@umich.edu } else if (!dcachePort.sendTimingReq(pkt)) { 2503358Srdreslin@umich.edu _status = DcacheRetry; 2513358Srdreslin@umich.edu dcache_pkt = pkt; 2523358Srdreslin@umich.edu } else { 2533358Srdreslin@umich.edu _status = DcacheWaitResponse; 2543358Srdreslin@umich.edu // memory system takes ownership of packet 2553358Srdreslin@umich.edu dcache_pkt = NULL; 2563358Srdreslin@umich.edu } 2573358Srdreslin@umich.edu return dcache_pkt == NULL; 2583358Srdreslin@umich.edu} 2593358Srdreslin@umich.edu 260void 261TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res, 262 bool read) 263{ 264 PacketPtr pkt; 265 buildPacket(pkt, req, read); 266 pkt->dataDynamicArray<uint8_t>(data); 267 if (req->getFlags().isSet(Request::NO_ACCESS)) { 268 assert(!dcache_pkt); 269 pkt->makeResponse(); 270 completeDataAccess(pkt); 271 } else if (read) { 272 handleReadPacket(pkt); 273 } else { 274 bool do_access = true; // flag to suppress cache access 275 276 if (req->isLLSC()) { 277 do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask); 278 } else if (req->isCondSwap()) { 279 assert(res); 280 req->setExtraData(*res); 281 } 282 283 if (do_access) { 284 dcache_pkt = pkt; 285 handleWritePacket(); 286 } else { 287 _status = DcacheWaitResponse; 288 completeDataAccess(pkt); 289 } 290 } 291} 292 293void 294TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2, 295 RequestPtr req, uint8_t *data, bool read) 296{ 297 PacketPtr pkt1, pkt2; 298 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read); 299 if (req->getFlags().isSet(Request::NO_ACCESS)) { 300 assert(!dcache_pkt); 301 pkt1->makeResponse(); 302 completeDataAccess(pkt1); 303 } else if (read) { 304 SplitFragmentSenderState * send_state = 305 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState); 306 if (handleReadPacket(pkt1)) { 307 send_state->clearFromParent(); 308 send_state = dynamic_cast<SplitFragmentSenderState *>( 309 pkt2->senderState); 310 if (handleReadPacket(pkt2)) { 311 send_state->clearFromParent(); 312 } 313 } 314 } else { 315 dcache_pkt = pkt1; 316 SplitFragmentSenderState * send_state = 317 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState); 318 if (handleWritePacket()) { 319 send_state->clearFromParent(); 320 dcache_pkt = pkt2; 321 send_state = dynamic_cast<SplitFragmentSenderState *>( 322 pkt2->senderState); 323 if (handleWritePacket()) { 324 send_state->clearFromParent(); 325 } 326 } 327 } 328} 329 330void 331TimingSimpleCPU::translationFault(const Fault &fault) 332{ 333 // fault may be NoFault in cases where a fault is suppressed, 334 // for instance prefetches. 335 numCycles += curCycle() - previousCycle; 336 previousCycle = curCycle(); 337 338 if (traceData) { 339 // Since there was a fault, we shouldn't trace this instruction. 340 delete traceData; 341 traceData = NULL; 342 } 343 344 postExecute(); 345 346 advanceInst(fault); 347} 348 349void 350TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read) 351{ 352 pkt = read ? Packet::createRead(req) : Packet::createWrite(req); 353} 354 355void 356TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2, 357 RequestPtr req1, RequestPtr req2, RequestPtr req, 358 uint8_t *data, bool read) 359{ 360 pkt1 = pkt2 = NULL; 361 362 assert(!req1->isMmappedIpr() && !req2->isMmappedIpr()); 363 364 if (req->getFlags().isSet(Request::NO_ACCESS)) { 365 buildPacket(pkt1, req, read); 366 return; 367 } 368 369 buildPacket(pkt1, req1, read); 370 buildPacket(pkt2, req2, read); 371 372 req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags(), dataMasterId()); 373 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand()); 374 375 pkt->dataDynamicArray<uint8_t>(data); 376 pkt1->dataStatic<uint8_t>(data); 377 pkt2->dataStatic<uint8_t>(data + req1->getSize()); 378 379 SplitMainSenderState * main_send_state = new SplitMainSenderState; 380 pkt->senderState = main_send_state; 381 main_send_state->fragments[0] = pkt1; 382 main_send_state->fragments[1] = pkt2; 383 main_send_state->outstanding = 2; 384 pkt1->senderState = new SplitFragmentSenderState(pkt, 0); 385 pkt2->senderState = new SplitFragmentSenderState(pkt, 1); 386} 387 388Fault 389TimingSimpleCPU::readMem(Addr addr, uint8_t *data, 390 unsigned size, unsigned flags) 391{ 392 Fault fault; 393 const int asid = 0; 394 const ThreadID tid = 0; 395 const Addr pc = thread->instAddr(); 396 unsigned block_size = cacheLineSize(); 397 BaseTLB::Mode mode = BaseTLB::Read; 398 399 if (traceData) { 400 traceData->setAddr(addr); 401 } 402 403 RequestPtr req = new Request(asid, addr, size, 404 flags, dataMasterId(), pc, _cpuId, tid); 405 406 req->taskId(taskId()); 407 408 Addr split_addr = roundDown(addr + size - 1, block_size); 409 assert(split_addr <= addr || split_addr - addr < block_size); 410 411 _status = DTBWaitResponse; 412 if (split_addr > addr) { 413 RequestPtr req1, req2; 414 assert(!req->isLLSC() && !req->isSwap()); 415 req->splitOnVaddr(split_addr, req1, req2); 416 417 WholeTranslationState *state = 418 new WholeTranslationState(req, req1, req2, new uint8_t[size], 419 NULL, mode); 420 DataTranslation<TimingSimpleCPU *> *trans1 = 421 new DataTranslation<TimingSimpleCPU *>(this, state, 0); 422 DataTranslation<TimingSimpleCPU *> *trans2 = 423 new DataTranslation<TimingSimpleCPU *>(this, state, 1); 424 425 thread->dtb->translateTiming(req1, tc, trans1, mode); 426 thread->dtb->translateTiming(req2, tc, trans2, mode); 427 } else { 428 WholeTranslationState *state = 429 new WholeTranslationState(req, new uint8_t[size], NULL, mode); 430 DataTranslation<TimingSimpleCPU *> *translation 431 = new DataTranslation<TimingSimpleCPU *>(this, state); 432 thread->dtb->translateTiming(req, tc, translation, mode); 433 } 434 435 return NoFault; 436} 437 438bool 439TimingSimpleCPU::handleWritePacket() 440{ 441 RequestPtr req = dcache_pkt->req; 442 if (req->isMmappedIpr()) { 443 Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt); 444 new IprEvent(dcache_pkt, this, clockEdge(delay)); 445 _status = DcacheWaitResponse; 446 dcache_pkt = NULL; 447 } else if (!dcachePort.sendTimingReq(dcache_pkt)) { 448 _status = DcacheRetry; 449 } else { 450 _status = DcacheWaitResponse; 451 // memory system takes ownership of packet 452 dcache_pkt = NULL; 453 } 454 return dcache_pkt == NULL; 455} 456 457Fault 458TimingSimpleCPU::writeMem(uint8_t *data, unsigned size, 459 Addr addr, unsigned flags, uint64_t *res) 460{ 461 uint8_t *newData = new uint8_t[size]; 462 const int asid = 0; 463 const ThreadID tid = 0; 464 const Addr pc = thread->instAddr(); 465 unsigned block_size = cacheLineSize(); 466 BaseTLB::Mode mode = BaseTLB::Write; 467 468 if (data == NULL) { 469 assert(flags & Request::CACHE_BLOCK_ZERO); 470 // This must be a cache block cleaning request 471 memset(newData, 0, size); 472 } else { 473 memcpy(newData, data, size); 474 } 475 476 if (traceData) { 477 traceData->setAddr(addr); 478 } 479 480 RequestPtr req = new Request(asid, addr, size, 481 flags, dataMasterId(), pc, _cpuId, tid); 482 483 req->taskId(taskId()); 484 485 Addr split_addr = roundDown(addr + size - 1, block_size); 486 assert(split_addr <= addr || split_addr - addr < block_size); 487 488 _status = DTBWaitResponse; 489 if (split_addr > addr) { 490 RequestPtr req1, req2; 491 assert(!req->isLLSC() && !req->isSwap()); 492 req->splitOnVaddr(split_addr, req1, req2); 493 494 WholeTranslationState *state = 495 new WholeTranslationState(req, req1, req2, newData, res, mode); 496 DataTranslation<TimingSimpleCPU *> *trans1 = 497 new DataTranslation<TimingSimpleCPU *>(this, state, 0); 498 DataTranslation<TimingSimpleCPU *> *trans2 = 499 new DataTranslation<TimingSimpleCPU *>(this, state, 1); 500 501 thread->dtb->translateTiming(req1, tc, trans1, mode); 502 thread->dtb->translateTiming(req2, tc, trans2, mode); 503 } else { 504 WholeTranslationState *state = 505 new WholeTranslationState(req, newData, res, mode); 506 DataTranslation<TimingSimpleCPU *> *translation = 507 new DataTranslation<TimingSimpleCPU *>(this, state); 508 thread->dtb->translateTiming(req, tc, translation, mode); 509 } 510 511 // Translation faults will be returned via finishTranslation() 512 return NoFault; 513} 514 515 516void 517TimingSimpleCPU::finishTranslation(WholeTranslationState *state) 518{ 519 _status = BaseSimpleCPU::Running; 520 521 if (state->getFault() != NoFault) { 522 if (state->isPrefetch()) { 523 state->setNoFault(); 524 } 525 delete [] state->data; 526 state->deleteReqs(); 527 translationFault(state->getFault()); 528 } else { 529 if (!state->isSplit) { 530 sendData(state->mainReq, state->data, state->res, 531 state->mode == BaseTLB::Read); 532 } else { 533 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq, 534 state->data, state->mode == BaseTLB::Read); 535 } 536 } 537 538 delete state; 539} 540 541 542void 543TimingSimpleCPU::fetch() 544{ 545 DPRINTF(SimpleCPU, "Fetch\n"); 546 547 if (!curStaticInst || !curStaticInst->isDelayedCommit()) 548 checkForInterrupts(); 549 550 checkPcEventQueue(); 551 552 // We must have just got suspended by a PC event 553 if (_status == Idle) 554 return; 555 556 TheISA::PCState pcState = thread->pcState(); 557 bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst; 558 559 if (needToFetch) { 560 _status = BaseSimpleCPU::Running; 561 Request *ifetch_req = new Request(); 562 ifetch_req->taskId(taskId()); 563 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0); 564 setupFetchRequest(ifetch_req); 565 DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr()); 566 thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation, 567 BaseTLB::Execute); 568 } else { 569 _status = IcacheWaitResponse; 570 completeIfetch(NULL); 571 572 numCycles += curCycle() - previousCycle; 573 previousCycle = curCycle(); 574 } 575} 576 577 578void 579TimingSimpleCPU::sendFetch(const Fault &fault, RequestPtr req, 580 ThreadContext *tc) 581{ 582 if (fault == NoFault) { 583 DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n", 584 req->getVaddr(), req->getPaddr()); 585 ifetch_pkt = new Packet(req, MemCmd::ReadReq); 586 ifetch_pkt->dataStatic(&inst); 587 DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr()); 588 589 if (!icachePort.sendTimingReq(ifetch_pkt)) { 590 // Need to wait for retry 591 _status = IcacheRetry; 592 } else { 593 // Need to wait for cache to respond 594 _status = IcacheWaitResponse; 595 // ownership of packet transferred to memory system 596 ifetch_pkt = NULL; 597 } 598 } else { 599 DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr()); 600 delete req; 601 // fetch fault: advance directly to next instruction (fault handler) 602 _status = BaseSimpleCPU::Running; 603 advanceInst(fault); 604 } 605 606 numCycles += curCycle() - previousCycle; 607 previousCycle = curCycle(); 608} 609 610 611void 612TimingSimpleCPU::advanceInst(const Fault &fault) 613{ 614 if (_status == Faulting) 615 return; 616 617 if (fault != NoFault) { 618 advancePC(fault); 619 DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n"); 620 reschedule(fetchEvent, clockEdge(), true); 621 _status = Faulting; 622 return; 623 } 624 625 626 if (!stayAtPC) 627 advancePC(fault); 628 629 if (tryCompleteDrain()) 630 return; 631 632 if (_status == BaseSimpleCPU::Running) { 633 // kick off fetch of next instruction... callback from icache 634 // response will cause that instruction to be executed, 635 // keeping the CPU running. 636 fetch(); 637 } 638} 639 640 641void 642TimingSimpleCPU::completeIfetch(PacketPtr pkt) 643{ 644 DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ? 645 pkt->getAddr() : 0); 646 647 // received a response from the icache: execute the received 648 // instruction 649 assert(!pkt || !pkt->isError()); 650 assert(_status == IcacheWaitResponse); 651 652 _status = BaseSimpleCPU::Running; 653 654 numCycles += curCycle() - previousCycle; 655 previousCycle = curCycle(); 656 657 if (pkt) 658 pkt->req->setAccessLatency(); 659 660 661 preExecute(); 662 if (curStaticInst && curStaticInst->isMemRef()) { 663 // load or store: just send to dcache 664 Fault fault = curStaticInst->initiateAcc(this, traceData); 665 666 // If we're not running now the instruction will complete in a dcache 667 // response callback or the instruction faulted and has started an 668 // ifetch 669 if (_status == BaseSimpleCPU::Running) { 670 if (fault != NoFault && traceData) { 671 // If there was a fault, we shouldn't trace this instruction. 672 delete traceData; 673 traceData = NULL; 674 } 675 676 postExecute(); 677 // @todo remove me after debugging with legion done 678 if (curStaticInst && (!curStaticInst->isMicroop() || 679 curStaticInst->isFirstMicroop())) 680 instCnt++; 681 advanceInst(fault); 682 } 683 } else if (curStaticInst) { 684 // non-memory instruction: execute completely now 685 Fault fault = curStaticInst->execute(this, traceData); 686 687 // keep an instruction count 688 if (fault == NoFault) 689 countInst(); 690 else if (traceData && !DTRACE(ExecFaulting)) { 691 delete traceData; 692 traceData = NULL; 693 } 694 695 postExecute(); 696 // @todo remove me after debugging with legion done 697 if (curStaticInst && (!curStaticInst->isMicroop() || 698 curStaticInst->isFirstMicroop())) 699 instCnt++; 700 advanceInst(fault); 701 } else { 702 advanceInst(NoFault); 703 } 704 705 if (pkt) { 706 delete pkt->req; 707 delete pkt; 708 } 709} 710 711void 712TimingSimpleCPU::IcachePort::ITickEvent::process() 713{ 714 cpu->completeIfetch(pkt); 715} 716 717bool 718TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt) 719{ 720 DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr()); 721 // delay processing of returned data until next CPU clock edge 722 Tick next_tick = cpu->clockEdge(); 723 724 if (next_tick == curTick()) 725 cpu->completeIfetch(pkt); 726 else 727 tickEvent.schedule(pkt, next_tick); 728 729 return true; 730} 731 732void 733TimingSimpleCPU::IcachePort::recvRetry() 734{ 735 // we shouldn't get a retry unless we have a packet that we're 736 // waiting to transmit 737 assert(cpu->ifetch_pkt != NULL); 738 assert(cpu->_status == IcacheRetry); 739 PacketPtr tmp = cpu->ifetch_pkt; 740 if (sendTimingReq(tmp)) { 741 cpu->_status = IcacheWaitResponse; 742 cpu->ifetch_pkt = NULL; 743 } 744} 745 746void 747TimingSimpleCPU::completeDataAccess(PacketPtr pkt) 748{ 749 // received a response from the dcache: complete the load or store 750 // instruction 751 assert(!pkt->isError()); 752 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse || 753 pkt->req->getFlags().isSet(Request::NO_ACCESS)); 754 755 pkt->req->setAccessLatency(); 756 numCycles += curCycle() - previousCycle; 757 previousCycle = curCycle(); 758 759 if (pkt->senderState) { 760 SplitFragmentSenderState * send_state = 761 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState); 762 assert(send_state); 763 delete pkt->req; 764 delete pkt; 765 PacketPtr big_pkt = send_state->bigPkt; 766 delete send_state; 767 768 SplitMainSenderState * main_send_state = 769 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState); 770 assert(main_send_state); 771 // Record the fact that this packet is no longer outstanding. 772 assert(main_send_state->outstanding != 0); 773 main_send_state->outstanding--; 774 775 if (main_send_state->outstanding) { 776 return; 777 } else { 778 delete main_send_state; 779 big_pkt->senderState = NULL; 780 pkt = big_pkt; 781 } 782 } 783 784 _status = BaseSimpleCPU::Running; 785 786 Fault fault = curStaticInst->completeAcc(pkt, this, traceData); 787 788 // keep an instruction count 789 if (fault == NoFault) 790 countInst(); 791 else if (traceData) { 792 // If there was a fault, we shouldn't trace this instruction. 793 delete traceData; 794 traceData = NULL; 795 } 796 797 // the locked flag may be cleared on the response packet, so check 798 // pkt->req and not pkt to see if it was a load-locked 799 if (pkt->isRead() && pkt->req->isLLSC()) { 800 TheISA::handleLockedRead(thread, pkt->req); 801 } 802 803 delete pkt->req; 804 delete pkt; 805 806 postExecute(); 807 808 advanceInst(fault); 809} 810 811void 812TimingSimpleCPU::DcachePort::recvTimingSnoopReq(PacketPtr pkt) 813{ 814 TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask); 815} 816 817 818bool 819TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt) 820{ 821 // delay processing of returned data until next CPU clock edge 822 Tick next_tick = cpu->clockEdge(); 823 824 if (next_tick == curTick()) { 825 cpu->completeDataAccess(pkt); 826 } else { 827 if (!tickEvent.scheduled()) { 828 tickEvent.schedule(pkt, next_tick); 829 } else { 830 // In the case of a split transaction and a cache that is 831 // faster than a CPU we could get two responses before 832 // next_tick expires 833 if (!retryEvent.scheduled()) 834 cpu->schedule(retryEvent, next_tick); 835 return false; 836 } 837 } 838 839 return true; 840} 841 842void 843TimingSimpleCPU::DcachePort::DTickEvent::process() 844{ 845 cpu->completeDataAccess(pkt); 846} 847 848void 849TimingSimpleCPU::DcachePort::recvRetry() 850{ 851 // we shouldn't get a retry unless we have a packet that we're 852 // waiting to transmit 853 assert(cpu->dcache_pkt != NULL); 854 assert(cpu->_status == DcacheRetry); 855 PacketPtr tmp = cpu->dcache_pkt; 856 if (tmp->senderState) { 857 // This is a packet from a split access. 858 SplitFragmentSenderState * send_state = 859 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState); 860 assert(send_state); 861 PacketPtr big_pkt = send_state->bigPkt; 862 863 SplitMainSenderState * main_send_state = 864 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState); 865 assert(main_send_state); 866 867 if (sendTimingReq(tmp)) { 868 // If we were able to send without retrying, record that fact 869 // and try sending the other fragment. 870 send_state->clearFromParent(); 871 int other_index = main_send_state->getPendingFragment(); 872 if (other_index > 0) { 873 tmp = main_send_state->fragments[other_index]; 874 cpu->dcache_pkt = tmp; 875 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) || 876 (big_pkt->isWrite() && cpu->handleWritePacket())) { 877 main_send_state->fragments[other_index] = NULL; 878 } 879 } else { 880 cpu->_status = DcacheWaitResponse; 881 // memory system takes ownership of packet 882 cpu->dcache_pkt = NULL; 883 } 884 } 885 } else if (sendTimingReq(tmp)) { 886 cpu->_status = DcacheWaitResponse; 887 // memory system takes ownership of packet 888 cpu->dcache_pkt = NULL; 889 } 890} 891 892TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu, 893 Tick t) 894 : pkt(_pkt), cpu(_cpu) 895{ 896 cpu->schedule(this, t); 897} 898 899void 900TimingSimpleCPU::IprEvent::process() 901{ 902 cpu->completeDataAccess(pkt); 903} 904 905const char * 906TimingSimpleCPU::IprEvent::description() const 907{ 908 return "Timing Simple CPU Delay IPR event"; 909} 910 911 912void 913TimingSimpleCPU::printAddr(Addr a) 914{ 915 dcachePort.printAddr(a); 916} 917 918 919//////////////////////////////////////////////////////////////////////// 920// 921// TimingSimpleCPU Simulation Object 922// 923TimingSimpleCPU * 924TimingSimpleCPUParams::create() 925{ 926 numThreads = 1; 927 if (!FullSystem && workload.size() != 1) 928 panic("only one workload allowed"); 929 return new TimingSimpleCPU(this); 930} 931