1/* 2 * Copyright (c) 2010-2012 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2002-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Steve Reinhardt 41 */ 42 43#include "arch/locked_mem.hh" 44#include "arch/mmapped_ipr.hh" 45#include "arch/utility.hh" 46#include "base/bigint.hh" 47#include "config/the_isa.hh" 48#include "cpu/simple/timing.hh" 49#include "cpu/exetrace.hh" 50#include "debug/Config.hh" 51#include "debug/Drain.hh" 52#include "debug/ExecFaulting.hh" 53#include "debug/SimpleCPU.hh" 54#include "mem/packet.hh" 55#include "mem/packet_access.hh" 56#include "params/TimingSimpleCPU.hh" 57#include "sim/faults.hh" 58#include "sim/full_system.hh" 59#include "sim/system.hh" 60 61using namespace std; 62using namespace TheISA; 63 64void 65TimingSimpleCPU::init() 66{ 67 BaseCPU::init(); 68 69 // Initialise the ThreadContext's memory proxies 70 tcBase()->initMemProxies(tcBase()); 71 72 if (FullSystem && !params()->switched_out) { 73 for (int i = 0; i < threadContexts.size(); ++i) { 74 ThreadContext *tc = threadContexts[i]; 75 // initialize CPU, including PC 76 TheISA::initCPU(tc, _cpuId); 77 } 78 } 79} 80 81void 82TimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt, Tick t) 83{ 84 pkt = _pkt; 85 cpu->schedule(this, t); 86} 87 88TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p) 89 : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this), 90 dcachePort(this), ifetch_pkt(NULL), dcache_pkt(NULL), previousCycle(0), 91 fetchEvent(this), drainManager(NULL) 92{ 93 _status = Idle; 94 95 system->totalNumInsts = 0; 96} 97 98 99TimingSimpleCPU::~TimingSimpleCPU() 100{ 101} 102 103unsigned int 104TimingSimpleCPU::drain(DrainManager *drain_manager) 105{ 106 assert(!drainManager); 107 if (switchedOut()) 108 return 0; 109 110 if (_status == Idle || 111 (_status == BaseSimpleCPU::Running && isDrained())) { 112 DPRINTF(Drain, "No need to drain.\n"); 113 return 0; 114 } else { 115 drainManager = drain_manager; 116 DPRINTF(Drain, "Requesting drain: %s\n", pcState()); 117 118 // The fetch event can become descheduled if a drain didn't 119 // succeed on the first attempt. We need to reschedule it if 120 // the CPU is waiting for a microcode routine to complete. 121 if (_status == BaseSimpleCPU::Running && !fetchEvent.scheduled()) 122 schedule(fetchEvent, clockEdge()); 123 124 return 1; 125 } 126} 127 128void 129TimingSimpleCPU::drainResume() 130{ 131 assert(!fetchEvent.scheduled()); 132 assert(!drainManager); 133 if (switchedOut()) 134 return; 135 136 DPRINTF(SimpleCPU, "Resume\n"); 137 verifyMemoryMode(); 138 139 assert(!threadContexts.empty()); 140 if (threadContexts.size() > 1) 141 fatal("The timing CPU only supports one thread.\n"); 142 143 if (thread->status() == ThreadContext::Active) { 144 schedule(fetchEvent, nextCycle()); 145 _status = BaseSimpleCPU::Running;
| 1/* 2 * Copyright (c) 2010-2012 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2002-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Steve Reinhardt 41 */ 42 43#include "arch/locked_mem.hh" 44#include "arch/mmapped_ipr.hh" 45#include "arch/utility.hh" 46#include "base/bigint.hh" 47#include "config/the_isa.hh" 48#include "cpu/simple/timing.hh" 49#include "cpu/exetrace.hh" 50#include "debug/Config.hh" 51#include "debug/Drain.hh" 52#include "debug/ExecFaulting.hh" 53#include "debug/SimpleCPU.hh" 54#include "mem/packet.hh" 55#include "mem/packet_access.hh" 56#include "params/TimingSimpleCPU.hh" 57#include "sim/faults.hh" 58#include "sim/full_system.hh" 59#include "sim/system.hh" 60 61using namespace std; 62using namespace TheISA; 63 64void 65TimingSimpleCPU::init() 66{ 67 BaseCPU::init(); 68 69 // Initialise the ThreadContext's memory proxies 70 tcBase()->initMemProxies(tcBase()); 71 72 if (FullSystem && !params()->switched_out) { 73 for (int i = 0; i < threadContexts.size(); ++i) { 74 ThreadContext *tc = threadContexts[i]; 75 // initialize CPU, including PC 76 TheISA::initCPU(tc, _cpuId); 77 } 78 } 79} 80 81void 82TimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt, Tick t) 83{ 84 pkt = _pkt; 85 cpu->schedule(this, t); 86} 87 88TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p) 89 : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this), 90 dcachePort(this), ifetch_pkt(NULL), dcache_pkt(NULL), previousCycle(0), 91 fetchEvent(this), drainManager(NULL) 92{ 93 _status = Idle; 94 95 system->totalNumInsts = 0; 96} 97 98 99TimingSimpleCPU::~TimingSimpleCPU() 100{ 101} 102 103unsigned int 104TimingSimpleCPU::drain(DrainManager *drain_manager) 105{ 106 assert(!drainManager); 107 if (switchedOut()) 108 return 0; 109 110 if (_status == Idle || 111 (_status == BaseSimpleCPU::Running && isDrained())) { 112 DPRINTF(Drain, "No need to drain.\n"); 113 return 0; 114 } else { 115 drainManager = drain_manager; 116 DPRINTF(Drain, "Requesting drain: %s\n", pcState()); 117 118 // The fetch event can become descheduled if a drain didn't 119 // succeed on the first attempt. We need to reschedule it if 120 // the CPU is waiting for a microcode routine to complete. 121 if (_status == BaseSimpleCPU::Running && !fetchEvent.scheduled()) 122 schedule(fetchEvent, clockEdge()); 123 124 return 1; 125 } 126} 127 128void 129TimingSimpleCPU::drainResume() 130{ 131 assert(!fetchEvent.scheduled()); 132 assert(!drainManager); 133 if (switchedOut()) 134 return; 135 136 DPRINTF(SimpleCPU, "Resume\n"); 137 verifyMemoryMode(); 138 139 assert(!threadContexts.empty()); 140 if (threadContexts.size() > 1) 141 fatal("The timing CPU only supports one thread.\n"); 142 143 if (thread->status() == ThreadContext::Active) { 144 schedule(fetchEvent, nextCycle()); 145 _status = BaseSimpleCPU::Running;
|
234 _status = Idle; 235} 236 237bool 238TimingSimpleCPU::handleReadPacket(PacketPtr pkt) 239{ 240 RequestPtr req = pkt->req; 241 if (req->isMmappedIpr()) { 242 Cycles delay = TheISA::handleIprRead(thread->getTC(), pkt); 243 new IprEvent(pkt, this, clockEdge(delay)); 244 _status = DcacheWaitResponse; 245 dcache_pkt = NULL; 246 } else if (!dcachePort.sendTimingReq(pkt)) { 247 _status = DcacheRetry; 248 dcache_pkt = pkt; 249 } else { 250 _status = DcacheWaitResponse; 251 // memory system takes ownership of packet 252 dcache_pkt = NULL; 253 } 254 return dcache_pkt == NULL; 255} 256 257void 258TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res, 259 bool read) 260{ 261 PacketPtr pkt; 262 buildPacket(pkt, req, read); 263 pkt->dataDynamicArray<uint8_t>(data); 264 if (req->getFlags().isSet(Request::NO_ACCESS)) { 265 assert(!dcache_pkt); 266 pkt->makeResponse(); 267 completeDataAccess(pkt); 268 } else if (read) { 269 handleReadPacket(pkt); 270 } else { 271 bool do_access = true; // flag to suppress cache access 272 273 if (req->isLLSC()) { 274 do_access = TheISA::handleLockedWrite(thread, req); 275 } else if (req->isCondSwap()) { 276 assert(res); 277 req->setExtraData(*res); 278 } 279 280 if (do_access) { 281 dcache_pkt = pkt; 282 handleWritePacket(); 283 } else { 284 _status = DcacheWaitResponse; 285 completeDataAccess(pkt); 286 } 287 } 288} 289 290void 291TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2, 292 RequestPtr req, uint8_t *data, bool read) 293{ 294 PacketPtr pkt1, pkt2; 295 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read); 296 if (req->getFlags().isSet(Request::NO_ACCESS)) { 297 assert(!dcache_pkt); 298 pkt1->makeResponse(); 299 completeDataAccess(pkt1); 300 } else if (read) { 301 SplitFragmentSenderState * send_state = 302 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState); 303 if (handleReadPacket(pkt1)) { 304 send_state->clearFromParent(); 305 send_state = dynamic_cast<SplitFragmentSenderState *>( 306 pkt2->senderState); 307 if (handleReadPacket(pkt2)) { 308 send_state->clearFromParent(); 309 } 310 } 311 } else { 312 dcache_pkt = pkt1; 313 SplitFragmentSenderState * send_state = 314 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState); 315 if (handleWritePacket()) { 316 send_state->clearFromParent(); 317 dcache_pkt = pkt2; 318 send_state = dynamic_cast<SplitFragmentSenderState *>( 319 pkt2->senderState); 320 if (handleWritePacket()) { 321 send_state->clearFromParent(); 322 } 323 } 324 } 325} 326 327void 328TimingSimpleCPU::translationFault(Fault fault) 329{ 330 // fault may be NoFault in cases where a fault is suppressed, 331 // for instance prefetches. 332 numCycles += curCycle() - previousCycle; 333 previousCycle = curCycle(); 334 335 if (traceData) { 336 // Since there was a fault, we shouldn't trace this instruction. 337 delete traceData; 338 traceData = NULL; 339 } 340 341 postExecute(); 342 343 advanceInst(fault); 344} 345 346void 347TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read) 348{ 349 MemCmd cmd; 350 if (read) { 351 cmd = MemCmd::ReadReq; 352 if (req->isLLSC()) 353 cmd = MemCmd::LoadLockedReq; 354 } else { 355 cmd = MemCmd::WriteReq; 356 if (req->isLLSC()) { 357 cmd = MemCmd::StoreCondReq; 358 } else if (req->isSwap()) { 359 cmd = MemCmd::SwapReq; 360 } 361 } 362 pkt = new Packet(req, cmd); 363} 364 365void 366TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2, 367 RequestPtr req1, RequestPtr req2, RequestPtr req, 368 uint8_t *data, bool read) 369{ 370 pkt1 = pkt2 = NULL; 371 372 assert(!req1->isMmappedIpr() && !req2->isMmappedIpr()); 373 374 if (req->getFlags().isSet(Request::NO_ACCESS)) { 375 buildPacket(pkt1, req, read); 376 return; 377 } 378 379 buildPacket(pkt1, req1, read); 380 buildPacket(pkt2, req2, read); 381 382 req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags(), dataMasterId()); 383 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand()); 384 385 pkt->dataDynamicArray<uint8_t>(data); 386 pkt1->dataStatic<uint8_t>(data); 387 pkt2->dataStatic<uint8_t>(data + req1->getSize()); 388 389 SplitMainSenderState * main_send_state = new SplitMainSenderState; 390 pkt->senderState = main_send_state; 391 main_send_state->fragments[0] = pkt1; 392 main_send_state->fragments[1] = pkt2; 393 main_send_state->outstanding = 2; 394 pkt1->senderState = new SplitFragmentSenderState(pkt, 0); 395 pkt2->senderState = new SplitFragmentSenderState(pkt, 1); 396} 397 398Fault 399TimingSimpleCPU::readMem(Addr addr, uint8_t *data, 400 unsigned size, unsigned flags) 401{ 402 Fault fault; 403 const int asid = 0; 404 const ThreadID tid = 0; 405 const Addr pc = thread->instAddr(); 406 unsigned block_size = cacheLineSize(); 407 BaseTLB::Mode mode = BaseTLB::Read; 408 409 if (traceData) { 410 traceData->setAddr(addr); 411 } 412 413 RequestPtr req = new Request(asid, addr, size, 414 flags, dataMasterId(), pc, _cpuId, tid); 415 416 Addr split_addr = roundDown(addr + size - 1, block_size); 417 assert(split_addr <= addr || split_addr - addr < block_size); 418 419 _status = DTBWaitResponse; 420 if (split_addr > addr) { 421 RequestPtr req1, req2; 422 assert(!req->isLLSC() && !req->isSwap()); 423 req->splitOnVaddr(split_addr, req1, req2); 424 425 WholeTranslationState *state = 426 new WholeTranslationState(req, req1, req2, new uint8_t[size], 427 NULL, mode); 428 DataTranslation<TimingSimpleCPU *> *trans1 = 429 new DataTranslation<TimingSimpleCPU *>(this, state, 0); 430 DataTranslation<TimingSimpleCPU *> *trans2 = 431 new DataTranslation<TimingSimpleCPU *>(this, state, 1); 432 433 thread->dtb->translateTiming(req1, tc, trans1, mode); 434 thread->dtb->translateTiming(req2, tc, trans2, mode); 435 } else { 436 WholeTranslationState *state = 437 new WholeTranslationState(req, new uint8_t[size], NULL, mode); 438 DataTranslation<TimingSimpleCPU *> *translation 439 = new DataTranslation<TimingSimpleCPU *>(this, state); 440 thread->dtb->translateTiming(req, tc, translation, mode); 441 } 442 443 return NoFault; 444} 445 446bool 447TimingSimpleCPU::handleWritePacket() 448{ 449 RequestPtr req = dcache_pkt->req; 450 if (req->isMmappedIpr()) { 451 Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt); 452 new IprEvent(dcache_pkt, this, clockEdge(delay)); 453 _status = DcacheWaitResponse; 454 dcache_pkt = NULL; 455 } else if (!dcachePort.sendTimingReq(dcache_pkt)) { 456 _status = DcacheRetry; 457 } else { 458 _status = DcacheWaitResponse; 459 // memory system takes ownership of packet 460 dcache_pkt = NULL; 461 } 462 return dcache_pkt == NULL; 463} 464 465Fault 466TimingSimpleCPU::writeMem(uint8_t *data, unsigned size, 467 Addr addr, unsigned flags, uint64_t *res) 468{ 469 uint8_t *newData = new uint8_t[size]; 470 memcpy(newData, data, size); 471 472 const int asid = 0; 473 const ThreadID tid = 0; 474 const Addr pc = thread->instAddr(); 475 unsigned block_size = cacheLineSize(); 476 BaseTLB::Mode mode = BaseTLB::Write; 477 478 if (traceData) { 479 traceData->setAddr(addr); 480 } 481 482 RequestPtr req = new Request(asid, addr, size, 483 flags, dataMasterId(), pc, _cpuId, tid); 484 485 Addr split_addr = roundDown(addr + size - 1, block_size); 486 assert(split_addr <= addr || split_addr - addr < block_size); 487 488 _status = DTBWaitResponse; 489 if (split_addr > addr) { 490 RequestPtr req1, req2; 491 assert(!req->isLLSC() && !req->isSwap()); 492 req->splitOnVaddr(split_addr, req1, req2); 493 494 WholeTranslationState *state = 495 new WholeTranslationState(req, req1, req2, newData, res, mode); 496 DataTranslation<TimingSimpleCPU *> *trans1 = 497 new DataTranslation<TimingSimpleCPU *>(this, state, 0); 498 DataTranslation<TimingSimpleCPU *> *trans2 = 499 new DataTranslation<TimingSimpleCPU *>(this, state, 1); 500 501 thread->dtb->translateTiming(req1, tc, trans1, mode); 502 thread->dtb->translateTiming(req2, tc, trans2, mode); 503 } else { 504 WholeTranslationState *state = 505 new WholeTranslationState(req, newData, res, mode); 506 DataTranslation<TimingSimpleCPU *> *translation = 507 new DataTranslation<TimingSimpleCPU *>(this, state); 508 thread->dtb->translateTiming(req, tc, translation, mode); 509 } 510 511 // Translation faults will be returned via finishTranslation() 512 return NoFault; 513} 514 515 516void 517TimingSimpleCPU::finishTranslation(WholeTranslationState *state) 518{ 519 _status = BaseSimpleCPU::Running; 520 521 if (state->getFault() != NoFault) { 522 if (state->isPrefetch()) { 523 state->setNoFault(); 524 } 525 delete [] state->data; 526 state->deleteReqs(); 527 translationFault(state->getFault()); 528 } else { 529 if (!state->isSplit) { 530 sendData(state->mainReq, state->data, state->res, 531 state->mode == BaseTLB::Read); 532 } else { 533 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq, 534 state->data, state->mode == BaseTLB::Read); 535 } 536 } 537 538 delete state; 539} 540 541 542void 543TimingSimpleCPU::fetch() 544{ 545 DPRINTF(SimpleCPU, "Fetch\n"); 546 547 if (!curStaticInst || !curStaticInst->isDelayedCommit()) 548 checkForInterrupts(); 549 550 checkPcEventQueue(); 551 552 // We must have just got suspended by a PC event 553 if (_status == Idle) 554 return; 555 556 TheISA::PCState pcState = thread->pcState(); 557 bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst; 558 559 if (needToFetch) { 560 _status = BaseSimpleCPU::Running; 561 Request *ifetch_req = new Request(); 562 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0); 563 setupFetchRequest(ifetch_req); 564 DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr()); 565 thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation, 566 BaseTLB::Execute); 567 } else { 568 _status = IcacheWaitResponse; 569 completeIfetch(NULL); 570 571 numCycles += curCycle() - previousCycle; 572 previousCycle = curCycle(); 573 } 574} 575 576 577void 578TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc) 579{ 580 if (fault == NoFault) { 581 DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n", 582 req->getVaddr(), req->getPaddr()); 583 ifetch_pkt = new Packet(req, MemCmd::ReadReq); 584 ifetch_pkt->dataStatic(&inst); 585 DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr()); 586 587 if (!icachePort.sendTimingReq(ifetch_pkt)) { 588 // Need to wait for retry 589 _status = IcacheRetry; 590 } else { 591 // Need to wait for cache to respond 592 _status = IcacheWaitResponse; 593 // ownership of packet transferred to memory system 594 ifetch_pkt = NULL; 595 } 596 } else { 597 DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr()); 598 delete req; 599 // fetch fault: advance directly to next instruction (fault handler) 600 _status = BaseSimpleCPU::Running; 601 advanceInst(fault); 602 } 603 604 numCycles += curCycle() - previousCycle; 605 previousCycle = curCycle(); 606} 607 608 609void 610TimingSimpleCPU::advanceInst(Fault fault) 611{ 612 if (_status == Faulting) 613 return; 614 615 if (fault != NoFault) { 616 advancePC(fault); 617 DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n"); 618 reschedule(fetchEvent, clockEdge(), true); 619 _status = Faulting; 620 return; 621 } 622 623 624 if (!stayAtPC) 625 advancePC(fault); 626 627 if (tryCompleteDrain()) 628 return; 629 630 if (_status == BaseSimpleCPU::Running) { 631 // kick off fetch of next instruction... callback from icache 632 // response will cause that instruction to be executed, 633 // keeping the CPU running. 634 fetch(); 635 } 636} 637 638 639void 640TimingSimpleCPU::completeIfetch(PacketPtr pkt) 641{ 642 DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ? 643 pkt->getAddr() : 0); 644 645 // received a response from the icache: execute the received 646 // instruction 647 648 assert(!pkt || !pkt->isError()); 649 assert(_status == IcacheWaitResponse); 650 651 _status = BaseSimpleCPU::Running; 652 653 numCycles += curCycle() - previousCycle; 654 previousCycle = curCycle(); 655 656 preExecute(); 657 if (curStaticInst && curStaticInst->isMemRef()) { 658 // load or store: just send to dcache 659 Fault fault = curStaticInst->initiateAcc(this, traceData); 660 661 // If we're not running now the instruction will complete in a dcache 662 // response callback or the instruction faulted and has started an 663 // ifetch 664 if (_status == BaseSimpleCPU::Running) { 665 if (fault != NoFault && traceData) { 666 // If there was a fault, we shouldn't trace this instruction. 667 delete traceData; 668 traceData = NULL; 669 } 670 671 postExecute(); 672 // @todo remove me after debugging with legion done 673 if (curStaticInst && (!curStaticInst->isMicroop() || 674 curStaticInst->isFirstMicroop())) 675 instCnt++; 676 advanceInst(fault); 677 } 678 } else if (curStaticInst) { 679 // non-memory instruction: execute completely now 680 Fault fault = curStaticInst->execute(this, traceData); 681 682 // keep an instruction count 683 if (fault == NoFault) 684 countInst(); 685 else if (traceData && !DTRACE(ExecFaulting)) { 686 delete traceData; 687 traceData = NULL; 688 } 689 690 postExecute(); 691 // @todo remove me after debugging with legion done 692 if (curStaticInst && (!curStaticInst->isMicroop() || 693 curStaticInst->isFirstMicroop())) 694 instCnt++; 695 advanceInst(fault); 696 } else { 697 advanceInst(NoFault); 698 } 699 700 if (pkt) { 701 delete pkt->req; 702 delete pkt; 703 } 704} 705 706void 707TimingSimpleCPU::IcachePort::ITickEvent::process() 708{ 709 cpu->completeIfetch(pkt); 710} 711 712bool 713TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt) 714{ 715 DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr()); 716 // delay processing of returned data until next CPU clock edge 717 Tick next_tick = cpu->clockEdge(); 718 719 if (next_tick == curTick()) 720 cpu->completeIfetch(pkt); 721 else 722 tickEvent.schedule(pkt, next_tick); 723 724 return true; 725} 726 727void 728TimingSimpleCPU::IcachePort::recvRetry() 729{ 730 // we shouldn't get a retry unless we have a packet that we're 731 // waiting to transmit 732 assert(cpu->ifetch_pkt != NULL); 733 assert(cpu->_status == IcacheRetry); 734 PacketPtr tmp = cpu->ifetch_pkt; 735 if (sendTimingReq(tmp)) { 736 cpu->_status = IcacheWaitResponse; 737 cpu->ifetch_pkt = NULL; 738 } 739} 740 741void 742TimingSimpleCPU::completeDataAccess(PacketPtr pkt) 743{ 744 // received a response from the dcache: complete the load or store 745 // instruction 746 assert(!pkt->isError()); 747 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse || 748 pkt->req->getFlags().isSet(Request::NO_ACCESS)); 749 750 numCycles += curCycle() - previousCycle; 751 previousCycle = curCycle(); 752 753 if (pkt->senderState) { 754 SplitFragmentSenderState * send_state = 755 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState); 756 assert(send_state); 757 delete pkt->req; 758 delete pkt; 759 PacketPtr big_pkt = send_state->bigPkt; 760 delete send_state; 761 762 SplitMainSenderState * main_send_state = 763 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState); 764 assert(main_send_state); 765 // Record the fact that this packet is no longer outstanding. 766 assert(main_send_state->outstanding != 0); 767 main_send_state->outstanding--; 768 769 if (main_send_state->outstanding) { 770 return; 771 } else { 772 delete main_send_state; 773 big_pkt->senderState = NULL; 774 pkt = big_pkt; 775 } 776 } 777 778 _status = BaseSimpleCPU::Running; 779 780 Fault fault = curStaticInst->completeAcc(pkt, this, traceData); 781 782 // keep an instruction count 783 if (fault == NoFault) 784 countInst(); 785 else if (traceData) { 786 // If there was a fault, we shouldn't trace this instruction. 787 delete traceData; 788 traceData = NULL; 789 } 790 791 // the locked flag may be cleared on the response packet, so check 792 // pkt->req and not pkt to see if it was a load-locked 793 if (pkt->isRead() && pkt->req->isLLSC()) { 794 TheISA::handleLockedRead(thread, pkt->req); 795 } 796 797 delete pkt->req; 798 delete pkt; 799 800 postExecute(); 801 802 advanceInst(fault); 803} 804 805bool 806TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt) 807{ 808 // delay processing of returned data until next CPU clock edge 809 Tick next_tick = cpu->clockEdge(); 810 811 if (next_tick == curTick()) { 812 cpu->completeDataAccess(pkt); 813 } else { 814 if (!tickEvent.scheduled()) { 815 tickEvent.schedule(pkt, next_tick); 816 } else { 817 // In the case of a split transaction and a cache that is 818 // faster than a CPU we could get two responses before 819 // next_tick expires 820 if (!retryEvent.scheduled()) 821 cpu->schedule(retryEvent, next_tick); 822 return false; 823 } 824 } 825 826 return true; 827} 828 829void 830TimingSimpleCPU::DcachePort::DTickEvent::process() 831{ 832 cpu->completeDataAccess(pkt); 833} 834 835void 836TimingSimpleCPU::DcachePort::recvRetry() 837{ 838 // we shouldn't get a retry unless we have a packet that we're 839 // waiting to transmit 840 assert(cpu->dcache_pkt != NULL); 841 assert(cpu->_status == DcacheRetry); 842 PacketPtr tmp = cpu->dcache_pkt; 843 if (tmp->senderState) { 844 // This is a packet from a split access. 845 SplitFragmentSenderState * send_state = 846 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState); 847 assert(send_state); 848 PacketPtr big_pkt = send_state->bigPkt; 849 850 SplitMainSenderState * main_send_state = 851 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState); 852 assert(main_send_state); 853 854 if (sendTimingReq(tmp)) { 855 // If we were able to send without retrying, record that fact 856 // and try sending the other fragment. 857 send_state->clearFromParent(); 858 int other_index = main_send_state->getPendingFragment(); 859 if (other_index > 0) { 860 tmp = main_send_state->fragments[other_index]; 861 cpu->dcache_pkt = tmp; 862 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) || 863 (big_pkt->isWrite() && cpu->handleWritePacket())) { 864 main_send_state->fragments[other_index] = NULL; 865 } 866 } else { 867 cpu->_status = DcacheWaitResponse; 868 // memory system takes ownership of packet 869 cpu->dcache_pkt = NULL; 870 } 871 } 872 } else if (sendTimingReq(tmp)) { 873 cpu->_status = DcacheWaitResponse; 874 // memory system takes ownership of packet 875 cpu->dcache_pkt = NULL; 876 } 877} 878 879TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu, 880 Tick t) 881 : pkt(_pkt), cpu(_cpu) 882{ 883 cpu->schedule(this, t); 884} 885 886void 887TimingSimpleCPU::IprEvent::process() 888{ 889 cpu->completeDataAccess(pkt); 890} 891 892const char * 893TimingSimpleCPU::IprEvent::description() const 894{ 895 return "Timing Simple CPU Delay IPR event"; 896} 897 898 899void 900TimingSimpleCPU::printAddr(Addr a) 901{ 902 dcachePort.printAddr(a); 903} 904 905 906//////////////////////////////////////////////////////////////////////// 907// 908// TimingSimpleCPU Simulation Object 909// 910TimingSimpleCPU * 911TimingSimpleCPUParams::create() 912{ 913 numThreads = 1; 914 if (!FullSystem && workload.size() != 1) 915 panic("only one workload allowed"); 916 return new TimingSimpleCPU(this); 917}
| 236 _status = Idle; 237} 238 239bool 240TimingSimpleCPU::handleReadPacket(PacketPtr pkt) 241{ 242 RequestPtr req = pkt->req; 243 if (req->isMmappedIpr()) { 244 Cycles delay = TheISA::handleIprRead(thread->getTC(), pkt); 245 new IprEvent(pkt, this, clockEdge(delay)); 246 _status = DcacheWaitResponse; 247 dcache_pkt = NULL; 248 } else if (!dcachePort.sendTimingReq(pkt)) { 249 _status = DcacheRetry; 250 dcache_pkt = pkt; 251 } else { 252 _status = DcacheWaitResponse; 253 // memory system takes ownership of packet 254 dcache_pkt = NULL; 255 } 256 return dcache_pkt == NULL; 257} 258 259void 260TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res, 261 bool read) 262{ 263 PacketPtr pkt; 264 buildPacket(pkt, req, read); 265 pkt->dataDynamicArray<uint8_t>(data); 266 if (req->getFlags().isSet(Request::NO_ACCESS)) { 267 assert(!dcache_pkt); 268 pkt->makeResponse(); 269 completeDataAccess(pkt); 270 } else if (read) { 271 handleReadPacket(pkt); 272 } else { 273 bool do_access = true; // flag to suppress cache access 274 275 if (req->isLLSC()) { 276 do_access = TheISA::handleLockedWrite(thread, req); 277 } else if (req->isCondSwap()) { 278 assert(res); 279 req->setExtraData(*res); 280 } 281 282 if (do_access) { 283 dcache_pkt = pkt; 284 handleWritePacket(); 285 } else { 286 _status = DcacheWaitResponse; 287 completeDataAccess(pkt); 288 } 289 } 290} 291 292void 293TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2, 294 RequestPtr req, uint8_t *data, bool read) 295{ 296 PacketPtr pkt1, pkt2; 297 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read); 298 if (req->getFlags().isSet(Request::NO_ACCESS)) { 299 assert(!dcache_pkt); 300 pkt1->makeResponse(); 301 completeDataAccess(pkt1); 302 } else if (read) { 303 SplitFragmentSenderState * send_state = 304 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState); 305 if (handleReadPacket(pkt1)) { 306 send_state->clearFromParent(); 307 send_state = dynamic_cast<SplitFragmentSenderState *>( 308 pkt2->senderState); 309 if (handleReadPacket(pkt2)) { 310 send_state->clearFromParent(); 311 } 312 } 313 } else { 314 dcache_pkt = pkt1; 315 SplitFragmentSenderState * send_state = 316 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState); 317 if (handleWritePacket()) { 318 send_state->clearFromParent(); 319 dcache_pkt = pkt2; 320 send_state = dynamic_cast<SplitFragmentSenderState *>( 321 pkt2->senderState); 322 if (handleWritePacket()) { 323 send_state->clearFromParent(); 324 } 325 } 326 } 327} 328 329void 330TimingSimpleCPU::translationFault(Fault fault) 331{ 332 // fault may be NoFault in cases where a fault is suppressed, 333 // for instance prefetches. 334 numCycles += curCycle() - previousCycle; 335 previousCycle = curCycle(); 336 337 if (traceData) { 338 // Since there was a fault, we shouldn't trace this instruction. 339 delete traceData; 340 traceData = NULL; 341 } 342 343 postExecute(); 344 345 advanceInst(fault); 346} 347 348void 349TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read) 350{ 351 MemCmd cmd; 352 if (read) { 353 cmd = MemCmd::ReadReq; 354 if (req->isLLSC()) 355 cmd = MemCmd::LoadLockedReq; 356 } else { 357 cmd = MemCmd::WriteReq; 358 if (req->isLLSC()) { 359 cmd = MemCmd::StoreCondReq; 360 } else if (req->isSwap()) { 361 cmd = MemCmd::SwapReq; 362 } 363 } 364 pkt = new Packet(req, cmd); 365} 366 367void 368TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2, 369 RequestPtr req1, RequestPtr req2, RequestPtr req, 370 uint8_t *data, bool read) 371{ 372 pkt1 = pkt2 = NULL; 373 374 assert(!req1->isMmappedIpr() && !req2->isMmappedIpr()); 375 376 if (req->getFlags().isSet(Request::NO_ACCESS)) { 377 buildPacket(pkt1, req, read); 378 return; 379 } 380 381 buildPacket(pkt1, req1, read); 382 buildPacket(pkt2, req2, read); 383 384 req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags(), dataMasterId()); 385 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand()); 386 387 pkt->dataDynamicArray<uint8_t>(data); 388 pkt1->dataStatic<uint8_t>(data); 389 pkt2->dataStatic<uint8_t>(data + req1->getSize()); 390 391 SplitMainSenderState * main_send_state = new SplitMainSenderState; 392 pkt->senderState = main_send_state; 393 main_send_state->fragments[0] = pkt1; 394 main_send_state->fragments[1] = pkt2; 395 main_send_state->outstanding = 2; 396 pkt1->senderState = new SplitFragmentSenderState(pkt, 0); 397 pkt2->senderState = new SplitFragmentSenderState(pkt, 1); 398} 399 400Fault 401TimingSimpleCPU::readMem(Addr addr, uint8_t *data, 402 unsigned size, unsigned flags) 403{ 404 Fault fault; 405 const int asid = 0; 406 const ThreadID tid = 0; 407 const Addr pc = thread->instAddr(); 408 unsigned block_size = cacheLineSize(); 409 BaseTLB::Mode mode = BaseTLB::Read; 410 411 if (traceData) { 412 traceData->setAddr(addr); 413 } 414 415 RequestPtr req = new Request(asid, addr, size, 416 flags, dataMasterId(), pc, _cpuId, tid); 417 418 Addr split_addr = roundDown(addr + size - 1, block_size); 419 assert(split_addr <= addr || split_addr - addr < block_size); 420 421 _status = DTBWaitResponse; 422 if (split_addr > addr) { 423 RequestPtr req1, req2; 424 assert(!req->isLLSC() && !req->isSwap()); 425 req->splitOnVaddr(split_addr, req1, req2); 426 427 WholeTranslationState *state = 428 new WholeTranslationState(req, req1, req2, new uint8_t[size], 429 NULL, mode); 430 DataTranslation<TimingSimpleCPU *> *trans1 = 431 new DataTranslation<TimingSimpleCPU *>(this, state, 0); 432 DataTranslation<TimingSimpleCPU *> *trans2 = 433 new DataTranslation<TimingSimpleCPU *>(this, state, 1); 434 435 thread->dtb->translateTiming(req1, tc, trans1, mode); 436 thread->dtb->translateTiming(req2, tc, trans2, mode); 437 } else { 438 WholeTranslationState *state = 439 new WholeTranslationState(req, new uint8_t[size], NULL, mode); 440 DataTranslation<TimingSimpleCPU *> *translation 441 = new DataTranslation<TimingSimpleCPU *>(this, state); 442 thread->dtb->translateTiming(req, tc, translation, mode); 443 } 444 445 return NoFault; 446} 447 448bool 449TimingSimpleCPU::handleWritePacket() 450{ 451 RequestPtr req = dcache_pkt->req; 452 if (req->isMmappedIpr()) { 453 Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt); 454 new IprEvent(dcache_pkt, this, clockEdge(delay)); 455 _status = DcacheWaitResponse; 456 dcache_pkt = NULL; 457 } else if (!dcachePort.sendTimingReq(dcache_pkt)) { 458 _status = DcacheRetry; 459 } else { 460 _status = DcacheWaitResponse; 461 // memory system takes ownership of packet 462 dcache_pkt = NULL; 463 } 464 return dcache_pkt == NULL; 465} 466 467Fault 468TimingSimpleCPU::writeMem(uint8_t *data, unsigned size, 469 Addr addr, unsigned flags, uint64_t *res) 470{ 471 uint8_t *newData = new uint8_t[size]; 472 memcpy(newData, data, size); 473 474 const int asid = 0; 475 const ThreadID tid = 0; 476 const Addr pc = thread->instAddr(); 477 unsigned block_size = cacheLineSize(); 478 BaseTLB::Mode mode = BaseTLB::Write; 479 480 if (traceData) { 481 traceData->setAddr(addr); 482 } 483 484 RequestPtr req = new Request(asid, addr, size, 485 flags, dataMasterId(), pc, _cpuId, tid); 486 487 Addr split_addr = roundDown(addr + size - 1, block_size); 488 assert(split_addr <= addr || split_addr - addr < block_size); 489 490 _status = DTBWaitResponse; 491 if (split_addr > addr) { 492 RequestPtr req1, req2; 493 assert(!req->isLLSC() && !req->isSwap()); 494 req->splitOnVaddr(split_addr, req1, req2); 495 496 WholeTranslationState *state = 497 new WholeTranslationState(req, req1, req2, newData, res, mode); 498 DataTranslation<TimingSimpleCPU *> *trans1 = 499 new DataTranslation<TimingSimpleCPU *>(this, state, 0); 500 DataTranslation<TimingSimpleCPU *> *trans2 = 501 new DataTranslation<TimingSimpleCPU *>(this, state, 1); 502 503 thread->dtb->translateTiming(req1, tc, trans1, mode); 504 thread->dtb->translateTiming(req2, tc, trans2, mode); 505 } else { 506 WholeTranslationState *state = 507 new WholeTranslationState(req, newData, res, mode); 508 DataTranslation<TimingSimpleCPU *> *translation = 509 new DataTranslation<TimingSimpleCPU *>(this, state); 510 thread->dtb->translateTiming(req, tc, translation, mode); 511 } 512 513 // Translation faults will be returned via finishTranslation() 514 return NoFault; 515} 516 517 518void 519TimingSimpleCPU::finishTranslation(WholeTranslationState *state) 520{ 521 _status = BaseSimpleCPU::Running; 522 523 if (state->getFault() != NoFault) { 524 if (state->isPrefetch()) { 525 state->setNoFault(); 526 } 527 delete [] state->data; 528 state->deleteReqs(); 529 translationFault(state->getFault()); 530 } else { 531 if (!state->isSplit) { 532 sendData(state->mainReq, state->data, state->res, 533 state->mode == BaseTLB::Read); 534 } else { 535 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq, 536 state->data, state->mode == BaseTLB::Read); 537 } 538 } 539 540 delete state; 541} 542 543 544void 545TimingSimpleCPU::fetch() 546{ 547 DPRINTF(SimpleCPU, "Fetch\n"); 548 549 if (!curStaticInst || !curStaticInst->isDelayedCommit()) 550 checkForInterrupts(); 551 552 checkPcEventQueue(); 553 554 // We must have just got suspended by a PC event 555 if (_status == Idle) 556 return; 557 558 TheISA::PCState pcState = thread->pcState(); 559 bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst; 560 561 if (needToFetch) { 562 _status = BaseSimpleCPU::Running; 563 Request *ifetch_req = new Request(); 564 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0); 565 setupFetchRequest(ifetch_req); 566 DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr()); 567 thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation, 568 BaseTLB::Execute); 569 } else { 570 _status = IcacheWaitResponse; 571 completeIfetch(NULL); 572 573 numCycles += curCycle() - previousCycle; 574 previousCycle = curCycle(); 575 } 576} 577 578 579void 580TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc) 581{ 582 if (fault == NoFault) { 583 DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n", 584 req->getVaddr(), req->getPaddr()); 585 ifetch_pkt = new Packet(req, MemCmd::ReadReq); 586 ifetch_pkt->dataStatic(&inst); 587 DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr()); 588 589 if (!icachePort.sendTimingReq(ifetch_pkt)) { 590 // Need to wait for retry 591 _status = IcacheRetry; 592 } else { 593 // Need to wait for cache to respond 594 _status = IcacheWaitResponse; 595 // ownership of packet transferred to memory system 596 ifetch_pkt = NULL; 597 } 598 } else { 599 DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr()); 600 delete req; 601 // fetch fault: advance directly to next instruction (fault handler) 602 _status = BaseSimpleCPU::Running; 603 advanceInst(fault); 604 } 605 606 numCycles += curCycle() - previousCycle; 607 previousCycle = curCycle(); 608} 609 610 611void 612TimingSimpleCPU::advanceInst(Fault fault) 613{ 614 if (_status == Faulting) 615 return; 616 617 if (fault != NoFault) { 618 advancePC(fault); 619 DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n"); 620 reschedule(fetchEvent, clockEdge(), true); 621 _status = Faulting; 622 return; 623 } 624 625 626 if (!stayAtPC) 627 advancePC(fault); 628 629 if (tryCompleteDrain()) 630 return; 631 632 if (_status == BaseSimpleCPU::Running) { 633 // kick off fetch of next instruction... callback from icache 634 // response will cause that instruction to be executed, 635 // keeping the CPU running. 636 fetch(); 637 } 638} 639 640 641void 642TimingSimpleCPU::completeIfetch(PacketPtr pkt) 643{ 644 DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ? 645 pkt->getAddr() : 0); 646 647 // received a response from the icache: execute the received 648 // instruction 649 650 assert(!pkt || !pkt->isError()); 651 assert(_status == IcacheWaitResponse); 652 653 _status = BaseSimpleCPU::Running; 654 655 numCycles += curCycle() - previousCycle; 656 previousCycle = curCycle(); 657 658 preExecute(); 659 if (curStaticInst && curStaticInst->isMemRef()) { 660 // load or store: just send to dcache 661 Fault fault = curStaticInst->initiateAcc(this, traceData); 662 663 // If we're not running now the instruction will complete in a dcache 664 // response callback or the instruction faulted and has started an 665 // ifetch 666 if (_status == BaseSimpleCPU::Running) { 667 if (fault != NoFault && traceData) { 668 // If there was a fault, we shouldn't trace this instruction. 669 delete traceData; 670 traceData = NULL; 671 } 672 673 postExecute(); 674 // @todo remove me after debugging with legion done 675 if (curStaticInst && (!curStaticInst->isMicroop() || 676 curStaticInst->isFirstMicroop())) 677 instCnt++; 678 advanceInst(fault); 679 } 680 } else if (curStaticInst) { 681 // non-memory instruction: execute completely now 682 Fault fault = curStaticInst->execute(this, traceData); 683 684 // keep an instruction count 685 if (fault == NoFault) 686 countInst(); 687 else if (traceData && !DTRACE(ExecFaulting)) { 688 delete traceData; 689 traceData = NULL; 690 } 691 692 postExecute(); 693 // @todo remove me after debugging with legion done 694 if (curStaticInst && (!curStaticInst->isMicroop() || 695 curStaticInst->isFirstMicroop())) 696 instCnt++; 697 advanceInst(fault); 698 } else { 699 advanceInst(NoFault); 700 } 701 702 if (pkt) { 703 delete pkt->req; 704 delete pkt; 705 } 706} 707 708void 709TimingSimpleCPU::IcachePort::ITickEvent::process() 710{ 711 cpu->completeIfetch(pkt); 712} 713 714bool 715TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt) 716{ 717 DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr()); 718 // delay processing of returned data until next CPU clock edge 719 Tick next_tick = cpu->clockEdge(); 720 721 if (next_tick == curTick()) 722 cpu->completeIfetch(pkt); 723 else 724 tickEvent.schedule(pkt, next_tick); 725 726 return true; 727} 728 729void 730TimingSimpleCPU::IcachePort::recvRetry() 731{ 732 // we shouldn't get a retry unless we have a packet that we're 733 // waiting to transmit 734 assert(cpu->ifetch_pkt != NULL); 735 assert(cpu->_status == IcacheRetry); 736 PacketPtr tmp = cpu->ifetch_pkt; 737 if (sendTimingReq(tmp)) { 738 cpu->_status = IcacheWaitResponse; 739 cpu->ifetch_pkt = NULL; 740 } 741} 742 743void 744TimingSimpleCPU::completeDataAccess(PacketPtr pkt) 745{ 746 // received a response from the dcache: complete the load or store 747 // instruction 748 assert(!pkt->isError()); 749 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse || 750 pkt->req->getFlags().isSet(Request::NO_ACCESS)); 751 752 numCycles += curCycle() - previousCycle; 753 previousCycle = curCycle(); 754 755 if (pkt->senderState) { 756 SplitFragmentSenderState * send_state = 757 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState); 758 assert(send_state); 759 delete pkt->req; 760 delete pkt; 761 PacketPtr big_pkt = send_state->bigPkt; 762 delete send_state; 763 764 SplitMainSenderState * main_send_state = 765 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState); 766 assert(main_send_state); 767 // Record the fact that this packet is no longer outstanding. 768 assert(main_send_state->outstanding != 0); 769 main_send_state->outstanding--; 770 771 if (main_send_state->outstanding) { 772 return; 773 } else { 774 delete main_send_state; 775 big_pkt->senderState = NULL; 776 pkt = big_pkt; 777 } 778 } 779 780 _status = BaseSimpleCPU::Running; 781 782 Fault fault = curStaticInst->completeAcc(pkt, this, traceData); 783 784 // keep an instruction count 785 if (fault == NoFault) 786 countInst(); 787 else if (traceData) { 788 // If there was a fault, we shouldn't trace this instruction. 789 delete traceData; 790 traceData = NULL; 791 } 792 793 // the locked flag may be cleared on the response packet, so check 794 // pkt->req and not pkt to see if it was a load-locked 795 if (pkt->isRead() && pkt->req->isLLSC()) { 796 TheISA::handleLockedRead(thread, pkt->req); 797 } 798 799 delete pkt->req; 800 delete pkt; 801 802 postExecute(); 803 804 advanceInst(fault); 805} 806 807bool 808TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt) 809{ 810 // delay processing of returned data until next CPU clock edge 811 Tick next_tick = cpu->clockEdge(); 812 813 if (next_tick == curTick()) { 814 cpu->completeDataAccess(pkt); 815 } else { 816 if (!tickEvent.scheduled()) { 817 tickEvent.schedule(pkt, next_tick); 818 } else { 819 // In the case of a split transaction and a cache that is 820 // faster than a CPU we could get two responses before 821 // next_tick expires 822 if (!retryEvent.scheduled()) 823 cpu->schedule(retryEvent, next_tick); 824 return false; 825 } 826 } 827 828 return true; 829} 830 831void 832TimingSimpleCPU::DcachePort::DTickEvent::process() 833{ 834 cpu->completeDataAccess(pkt); 835} 836 837void 838TimingSimpleCPU::DcachePort::recvRetry() 839{ 840 // we shouldn't get a retry unless we have a packet that we're 841 // waiting to transmit 842 assert(cpu->dcache_pkt != NULL); 843 assert(cpu->_status == DcacheRetry); 844 PacketPtr tmp = cpu->dcache_pkt; 845 if (tmp->senderState) { 846 // This is a packet from a split access. 847 SplitFragmentSenderState * send_state = 848 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState); 849 assert(send_state); 850 PacketPtr big_pkt = send_state->bigPkt; 851 852 SplitMainSenderState * main_send_state = 853 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState); 854 assert(main_send_state); 855 856 if (sendTimingReq(tmp)) { 857 // If we were able to send without retrying, record that fact 858 // and try sending the other fragment. 859 send_state->clearFromParent(); 860 int other_index = main_send_state->getPendingFragment(); 861 if (other_index > 0) { 862 tmp = main_send_state->fragments[other_index]; 863 cpu->dcache_pkt = tmp; 864 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) || 865 (big_pkt->isWrite() && cpu->handleWritePacket())) { 866 main_send_state->fragments[other_index] = NULL; 867 } 868 } else { 869 cpu->_status = DcacheWaitResponse; 870 // memory system takes ownership of packet 871 cpu->dcache_pkt = NULL; 872 } 873 } 874 } else if (sendTimingReq(tmp)) { 875 cpu->_status = DcacheWaitResponse; 876 // memory system takes ownership of packet 877 cpu->dcache_pkt = NULL; 878 } 879} 880 881TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu, 882 Tick t) 883 : pkt(_pkt), cpu(_cpu) 884{ 885 cpu->schedule(this, t); 886} 887 888void 889TimingSimpleCPU::IprEvent::process() 890{ 891 cpu->completeDataAccess(pkt); 892} 893 894const char * 895TimingSimpleCPU::IprEvent::description() const 896{ 897 return "Timing Simple CPU Delay IPR event"; 898} 899 900 901void 902TimingSimpleCPU::printAddr(Addr a) 903{ 904 dcachePort.printAddr(a); 905} 906 907 908//////////////////////////////////////////////////////////////////////// 909// 910// TimingSimpleCPU Simulation Object 911// 912TimingSimpleCPU * 913TimingSimpleCPUParams::create() 914{ 915 numThreads = 1; 916 if (!FullSystem && workload.size() != 1) 917 panic("only one workload allowed"); 918 return new TimingSimpleCPU(this); 919}
|