1/* 2 * Copyright (c) 2010-2012 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2002-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Steve Reinhardt 41 */ 42 43#include "arch/locked_mem.hh" 44#include "arch/mmapped_ipr.hh" 45#include "arch/utility.hh" 46#include "base/bigint.hh" 47#include "config/the_isa.hh" 48#include "cpu/simple/timing.hh" 49#include "cpu/exetrace.hh" 50#include "debug/Config.hh" 51#include "debug/Drain.hh" 52#include "debug/ExecFaulting.hh" 53#include "debug/SimpleCPU.hh" 54#include "mem/packet.hh" 55#include "mem/packet_access.hh" 56#include "params/TimingSimpleCPU.hh" 57#include "sim/faults.hh" 58#include "sim/full_system.hh" 59#include "sim/system.hh" 60 61using namespace std; 62using namespace TheISA; 63 64void 65TimingSimpleCPU::init() 66{ 67 BaseCPU::init(); 68 69 if (!params()->switched_out && 70 system->getMemoryMode() != Enums::timing) { 71 fatal("The timing CPU requires the memory system to be in " 72 "'timing' mode.\n"); 73 } 74 75 // Initialise the ThreadContext's memory proxies 76 tcBase()->initMemProxies(tcBase()); 77 78 if (FullSystem && !params()->switched_out) { 79 for (int i = 0; i < threadContexts.size(); ++i) { 80 ThreadContext *tc = threadContexts[i]; 81 // initialize CPU, including PC 82 TheISA::initCPU(tc, _cpuId); 83 } 84 } 85} 86 87void 88TimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt, Tick t) 89{ 90 pkt = _pkt; 91 cpu->schedule(this, t); 92} 93 94TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p) 95 : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this), 96 dcachePort(this), ifetch_pkt(NULL), dcache_pkt(NULL), previousCycle(0), 97 fetchEvent(this), drainManager(NULL) 98{ 99 _status = Idle; 100 101 system->totalNumInsts = 0; 102} 103 104 105TimingSimpleCPU::~TimingSimpleCPU() 106{ 107} 108 109unsigned int 110TimingSimpleCPU::drain(DrainManager *drain_manager) 111{
| 1/* 2 * Copyright (c) 2010-2012 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2002-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Steve Reinhardt 41 */ 42 43#include "arch/locked_mem.hh" 44#include "arch/mmapped_ipr.hh" 45#include "arch/utility.hh" 46#include "base/bigint.hh" 47#include "config/the_isa.hh" 48#include "cpu/simple/timing.hh" 49#include "cpu/exetrace.hh" 50#include "debug/Config.hh" 51#include "debug/Drain.hh" 52#include "debug/ExecFaulting.hh" 53#include "debug/SimpleCPU.hh" 54#include "mem/packet.hh" 55#include "mem/packet_access.hh" 56#include "params/TimingSimpleCPU.hh" 57#include "sim/faults.hh" 58#include "sim/full_system.hh" 59#include "sim/system.hh" 60 61using namespace std; 62using namespace TheISA; 63 64void 65TimingSimpleCPU::init() 66{ 67 BaseCPU::init(); 68 69 if (!params()->switched_out && 70 system->getMemoryMode() != Enums::timing) { 71 fatal("The timing CPU requires the memory system to be in " 72 "'timing' mode.\n"); 73 } 74 75 // Initialise the ThreadContext's memory proxies 76 tcBase()->initMemProxies(tcBase()); 77 78 if (FullSystem && !params()->switched_out) { 79 for (int i = 0; i < threadContexts.size(); ++i) { 80 ThreadContext *tc = threadContexts[i]; 81 // initialize CPU, including PC 82 TheISA::initCPU(tc, _cpuId); 83 } 84 } 85} 86 87void 88TimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt, Tick t) 89{ 90 pkt = _pkt; 91 cpu->schedule(this, t); 92} 93 94TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p) 95 : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this), 96 dcachePort(this), ifetch_pkt(NULL), dcache_pkt(NULL), previousCycle(0), 97 fetchEvent(this), drainManager(NULL) 98{ 99 _status = Idle; 100 101 system->totalNumInsts = 0; 102} 103 104 105TimingSimpleCPU::~TimingSimpleCPU() 106{ 107} 108 109unsigned int 110TimingSimpleCPU::drain(DrainManager *drain_manager) 111{
|
202 previousCycle = curCycle(); 203} 204 205 206void 207TimingSimpleCPU::activateContext(ThreadID thread_num, Cycles delay) 208{ 209 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); 210 211 assert(thread_num == 0); 212 assert(thread); 213 214 assert(_status == Idle); 215 216 notIdleFraction++; 217 _status = BaseSimpleCPU::Running; 218 219 // kick things off by initiating the fetch of the next instruction 220 schedule(fetchEvent, clockEdge(delay)); 221} 222 223 224void 225TimingSimpleCPU::suspendContext(ThreadID thread_num) 226{ 227 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 228 229 assert(thread_num == 0); 230 assert(thread); 231 232 if (_status == Idle) 233 return; 234 235 assert(_status == BaseSimpleCPU::Running); 236 237 // just change status to Idle... if status != Running, 238 // completeInst() will not initiate fetch of next instruction. 239 240 notIdleFraction--; 241 _status = Idle; 242} 243 244bool 245TimingSimpleCPU::handleReadPacket(PacketPtr pkt) 246{ 247 RequestPtr req = pkt->req; 248 if (req->isMmappedIpr()) { 249 Cycles delay = TheISA::handleIprRead(thread->getTC(), pkt); 250 new IprEvent(pkt, this, clockEdge(delay)); 251 _status = DcacheWaitResponse; 252 dcache_pkt = NULL; 253 } else if (!dcachePort.sendTimingReq(pkt)) { 254 _status = DcacheRetry; 255 dcache_pkt = pkt; 256 } else { 257 _status = DcacheWaitResponse; 258 // memory system takes ownership of packet 259 dcache_pkt = NULL; 260 } 261 return dcache_pkt == NULL; 262} 263 264void 265TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res, 266 bool read) 267{ 268 PacketPtr pkt; 269 buildPacket(pkt, req, read); 270 pkt->dataDynamicArray<uint8_t>(data); 271 if (req->getFlags().isSet(Request::NO_ACCESS)) { 272 assert(!dcache_pkt); 273 pkt->makeResponse(); 274 completeDataAccess(pkt); 275 } else if (read) { 276 handleReadPacket(pkt); 277 } else { 278 bool do_access = true; // flag to suppress cache access 279 280 if (req->isLLSC()) { 281 do_access = TheISA::handleLockedWrite(thread, req); 282 } else if (req->isCondSwap()) { 283 assert(res); 284 req->setExtraData(*res); 285 } 286 287 if (do_access) { 288 dcache_pkt = pkt; 289 handleWritePacket(); 290 } else { 291 _status = DcacheWaitResponse; 292 completeDataAccess(pkt); 293 } 294 } 295} 296 297void 298TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2, 299 RequestPtr req, uint8_t *data, bool read) 300{ 301 PacketPtr pkt1, pkt2; 302 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read); 303 if (req->getFlags().isSet(Request::NO_ACCESS)) { 304 assert(!dcache_pkt); 305 pkt1->makeResponse(); 306 completeDataAccess(pkt1); 307 } else if (read) { 308 SplitFragmentSenderState * send_state = 309 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState); 310 if (handleReadPacket(pkt1)) { 311 send_state->clearFromParent(); 312 send_state = dynamic_cast<SplitFragmentSenderState *>( 313 pkt2->senderState); 314 if (handleReadPacket(pkt2)) { 315 send_state->clearFromParent(); 316 } 317 } 318 } else { 319 dcache_pkt = pkt1; 320 SplitFragmentSenderState * send_state = 321 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState); 322 if (handleWritePacket()) { 323 send_state->clearFromParent(); 324 dcache_pkt = pkt2; 325 send_state = dynamic_cast<SplitFragmentSenderState *>( 326 pkt2->senderState); 327 if (handleWritePacket()) { 328 send_state->clearFromParent(); 329 } 330 } 331 } 332} 333 334void 335TimingSimpleCPU::translationFault(Fault fault) 336{ 337 // fault may be NoFault in cases where a fault is suppressed, 338 // for instance prefetches. 339 numCycles += curCycle() - previousCycle; 340 previousCycle = curCycle(); 341 342 if (traceData) { 343 // Since there was a fault, we shouldn't trace this instruction. 344 delete traceData; 345 traceData = NULL; 346 } 347 348 postExecute(); 349 350 advanceInst(fault); 351} 352 353void 354TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read) 355{ 356 MemCmd cmd; 357 if (read) { 358 cmd = MemCmd::ReadReq; 359 if (req->isLLSC()) 360 cmd = MemCmd::LoadLockedReq; 361 } else { 362 cmd = MemCmd::WriteReq; 363 if (req->isLLSC()) { 364 cmd = MemCmd::StoreCondReq; 365 } else if (req->isSwap()) { 366 cmd = MemCmd::SwapReq; 367 } 368 } 369 pkt = new Packet(req, cmd); 370} 371 372void 373TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2, 374 RequestPtr req1, RequestPtr req2, RequestPtr req, 375 uint8_t *data, bool read) 376{ 377 pkt1 = pkt2 = NULL; 378 379 assert(!req1->isMmappedIpr() && !req2->isMmappedIpr()); 380 381 if (req->getFlags().isSet(Request::NO_ACCESS)) { 382 buildPacket(pkt1, req, read); 383 return; 384 } 385 386 buildPacket(pkt1, req1, read); 387 buildPacket(pkt2, req2, read); 388 389 req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags(), dataMasterId()); 390 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand()); 391 392 pkt->dataDynamicArray<uint8_t>(data); 393 pkt1->dataStatic<uint8_t>(data); 394 pkt2->dataStatic<uint8_t>(data + req1->getSize()); 395 396 SplitMainSenderState * main_send_state = new SplitMainSenderState; 397 pkt->senderState = main_send_state; 398 main_send_state->fragments[0] = pkt1; 399 main_send_state->fragments[1] = pkt2; 400 main_send_state->outstanding = 2; 401 pkt1->senderState = new SplitFragmentSenderState(pkt, 0); 402 pkt2->senderState = new SplitFragmentSenderState(pkt, 1); 403} 404 405Fault 406TimingSimpleCPU::readMem(Addr addr, uint8_t *data, 407 unsigned size, unsigned flags) 408{ 409 Fault fault; 410 const int asid = 0; 411 const ThreadID tid = 0; 412 const Addr pc = thread->instAddr(); 413 unsigned block_size = dcachePort.peerBlockSize(); 414 BaseTLB::Mode mode = BaseTLB::Read; 415 416 if (traceData) { 417 traceData->setAddr(addr); 418 } 419 420 RequestPtr req = new Request(asid, addr, size, 421 flags, dataMasterId(), pc, _cpuId, tid); 422 423 Addr split_addr = roundDown(addr + size - 1, block_size); 424 assert(split_addr <= addr || split_addr - addr < block_size); 425 426 _status = DTBWaitResponse; 427 if (split_addr > addr) { 428 RequestPtr req1, req2; 429 assert(!req->isLLSC() && !req->isSwap()); 430 req->splitOnVaddr(split_addr, req1, req2); 431 432 WholeTranslationState *state = 433 new WholeTranslationState(req, req1, req2, new uint8_t[size], 434 NULL, mode); 435 DataTranslation<TimingSimpleCPU *> *trans1 = 436 new DataTranslation<TimingSimpleCPU *>(this, state, 0); 437 DataTranslation<TimingSimpleCPU *> *trans2 = 438 new DataTranslation<TimingSimpleCPU *>(this, state, 1); 439 440 thread->dtb->translateTiming(req1, tc, trans1, mode); 441 thread->dtb->translateTiming(req2, tc, trans2, mode); 442 } else { 443 WholeTranslationState *state = 444 new WholeTranslationState(req, new uint8_t[size], NULL, mode); 445 DataTranslation<TimingSimpleCPU *> *translation 446 = new DataTranslation<TimingSimpleCPU *>(this, state); 447 thread->dtb->translateTiming(req, tc, translation, mode); 448 } 449 450 return NoFault; 451} 452 453bool 454TimingSimpleCPU::handleWritePacket() 455{ 456 RequestPtr req = dcache_pkt->req; 457 if (req->isMmappedIpr()) { 458 Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt); 459 new IprEvent(dcache_pkt, this, clockEdge(delay)); 460 _status = DcacheWaitResponse; 461 dcache_pkt = NULL; 462 } else if (!dcachePort.sendTimingReq(dcache_pkt)) { 463 _status = DcacheRetry; 464 } else { 465 _status = DcacheWaitResponse; 466 // memory system takes ownership of packet 467 dcache_pkt = NULL; 468 } 469 return dcache_pkt == NULL; 470} 471 472Fault 473TimingSimpleCPU::writeMem(uint8_t *data, unsigned size, 474 Addr addr, unsigned flags, uint64_t *res) 475{ 476 uint8_t *newData = new uint8_t[size]; 477 memcpy(newData, data, size); 478 479 const int asid = 0; 480 const ThreadID tid = 0; 481 const Addr pc = thread->instAddr(); 482 unsigned block_size = dcachePort.peerBlockSize(); 483 BaseTLB::Mode mode = BaseTLB::Write; 484 485 if (traceData) { 486 traceData->setAddr(addr); 487 } 488 489 RequestPtr req = new Request(asid, addr, size, 490 flags, dataMasterId(), pc, _cpuId, tid); 491 492 Addr split_addr = roundDown(addr + size - 1, block_size); 493 assert(split_addr <= addr || split_addr - addr < block_size); 494 495 _status = DTBWaitResponse; 496 if (split_addr > addr) { 497 RequestPtr req1, req2; 498 assert(!req->isLLSC() && !req->isSwap()); 499 req->splitOnVaddr(split_addr, req1, req2); 500 501 WholeTranslationState *state = 502 new WholeTranslationState(req, req1, req2, newData, res, mode); 503 DataTranslation<TimingSimpleCPU *> *trans1 = 504 new DataTranslation<TimingSimpleCPU *>(this, state, 0); 505 DataTranslation<TimingSimpleCPU *> *trans2 = 506 new DataTranslation<TimingSimpleCPU *>(this, state, 1); 507 508 thread->dtb->translateTiming(req1, tc, trans1, mode); 509 thread->dtb->translateTiming(req2, tc, trans2, mode); 510 } else { 511 WholeTranslationState *state = 512 new WholeTranslationState(req, newData, res, mode); 513 DataTranslation<TimingSimpleCPU *> *translation = 514 new DataTranslation<TimingSimpleCPU *>(this, state); 515 thread->dtb->translateTiming(req, tc, translation, mode); 516 } 517 518 // Translation faults will be returned via finishTranslation() 519 return NoFault; 520} 521 522 523void 524TimingSimpleCPU::finishTranslation(WholeTranslationState *state) 525{ 526 _status = BaseSimpleCPU::Running; 527 528 if (state->getFault() != NoFault) { 529 if (state->isPrefetch()) { 530 state->setNoFault(); 531 } 532 delete [] state->data; 533 state->deleteReqs(); 534 translationFault(state->getFault()); 535 } else { 536 if (!state->isSplit) { 537 sendData(state->mainReq, state->data, state->res, 538 state->mode == BaseTLB::Read); 539 } else { 540 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq, 541 state->data, state->mode == BaseTLB::Read); 542 } 543 } 544 545 delete state; 546} 547 548 549void 550TimingSimpleCPU::fetch() 551{ 552 DPRINTF(SimpleCPU, "Fetch\n"); 553 554 if (!curStaticInst || !curStaticInst->isDelayedCommit()) 555 checkForInterrupts(); 556 557 checkPcEventQueue(); 558 559 // We must have just got suspended by a PC event 560 if (_status == Idle) 561 return; 562 563 TheISA::PCState pcState = thread->pcState(); 564 bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst; 565 566 if (needToFetch) { 567 _status = BaseSimpleCPU::Running; 568 Request *ifetch_req = new Request(); 569 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0); 570 setupFetchRequest(ifetch_req); 571 DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr()); 572 thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation, 573 BaseTLB::Execute); 574 } else { 575 _status = IcacheWaitResponse; 576 completeIfetch(NULL); 577 578 numCycles += curCycle() - previousCycle; 579 previousCycle = curCycle(); 580 } 581} 582 583 584void 585TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc) 586{ 587 if (fault == NoFault) { 588 DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n", 589 req->getVaddr(), req->getPaddr()); 590 ifetch_pkt = new Packet(req, MemCmd::ReadReq); 591 ifetch_pkt->dataStatic(&inst); 592 DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr()); 593 594 if (!icachePort.sendTimingReq(ifetch_pkt)) { 595 // Need to wait for retry 596 _status = IcacheRetry; 597 } else { 598 // Need to wait for cache to respond 599 _status = IcacheWaitResponse; 600 // ownership of packet transferred to memory system 601 ifetch_pkt = NULL; 602 } 603 } else { 604 DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr()); 605 delete req; 606 // fetch fault: advance directly to next instruction (fault handler) 607 _status = BaseSimpleCPU::Running; 608 advanceInst(fault); 609 } 610 611 numCycles += curCycle() - previousCycle; 612 previousCycle = curCycle(); 613} 614 615 616void 617TimingSimpleCPU::advanceInst(Fault fault) 618{ 619 if (_status == Faulting) 620 return; 621 622 if (fault != NoFault) { 623 advancePC(fault); 624 DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n"); 625 reschedule(fetchEvent, nextCycle(), true); 626 _status = Faulting; 627 return; 628 } 629 630 631 if (!stayAtPC) 632 advancePC(fault); 633 634 if (tryCompleteDrain()) 635 return; 636 637 if (_status == BaseSimpleCPU::Running) { 638 // kick off fetch of next instruction... callback from icache 639 // response will cause that instruction to be executed, 640 // keeping the CPU running. 641 fetch(); 642 } 643} 644 645 646void 647TimingSimpleCPU::completeIfetch(PacketPtr pkt) 648{ 649 DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ? 650 pkt->getAddr() : 0); 651 652 // received a response from the icache: execute the received 653 // instruction 654 655 assert(!pkt || !pkt->isError()); 656 assert(_status == IcacheWaitResponse); 657 658 _status = BaseSimpleCPU::Running; 659 660 numCycles += curCycle() - previousCycle; 661 previousCycle = curCycle(); 662 663 preExecute(); 664 if (curStaticInst && curStaticInst->isMemRef()) { 665 // load or store: just send to dcache 666 Fault fault = curStaticInst->initiateAcc(this, traceData); 667 668 // If we're not running now the instruction will complete in a dcache 669 // response callback or the instruction faulted and has started an 670 // ifetch 671 if (_status == BaseSimpleCPU::Running) { 672 if (fault != NoFault && traceData) { 673 // If there was a fault, we shouldn't trace this instruction. 674 delete traceData; 675 traceData = NULL; 676 } 677 678 postExecute(); 679 // @todo remove me after debugging with legion done 680 if (curStaticInst && (!curStaticInst->isMicroop() || 681 curStaticInst->isFirstMicroop())) 682 instCnt++; 683 advanceInst(fault); 684 } 685 } else if (curStaticInst) { 686 // non-memory instruction: execute completely now 687 Fault fault = curStaticInst->execute(this, traceData); 688 689 // keep an instruction count 690 if (fault == NoFault) 691 countInst(); 692 else if (traceData && !DTRACE(ExecFaulting)) { 693 delete traceData; 694 traceData = NULL; 695 } 696 697 postExecute(); 698 // @todo remove me after debugging with legion done 699 if (curStaticInst && (!curStaticInst->isMicroop() || 700 curStaticInst->isFirstMicroop())) 701 instCnt++; 702 advanceInst(fault); 703 } else { 704 advanceInst(NoFault); 705 } 706 707 if (pkt) { 708 delete pkt->req; 709 delete pkt; 710 } 711} 712 713void 714TimingSimpleCPU::IcachePort::ITickEvent::process() 715{ 716 cpu->completeIfetch(pkt); 717} 718 719bool 720TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt) 721{ 722 DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr()); 723 // delay processing of returned data until next CPU clock edge 724 Tick next_tick = cpu->nextCycle(); 725 726 if (next_tick == curTick()) 727 cpu->completeIfetch(pkt); 728 else 729 tickEvent.schedule(pkt, next_tick); 730 731 return true; 732} 733 734void 735TimingSimpleCPU::IcachePort::recvRetry() 736{ 737 // we shouldn't get a retry unless we have a packet that we're 738 // waiting to transmit 739 assert(cpu->ifetch_pkt != NULL); 740 assert(cpu->_status == IcacheRetry); 741 PacketPtr tmp = cpu->ifetch_pkt; 742 if (sendTimingReq(tmp)) { 743 cpu->_status = IcacheWaitResponse; 744 cpu->ifetch_pkt = NULL; 745 } 746} 747 748void 749TimingSimpleCPU::completeDataAccess(PacketPtr pkt) 750{ 751 // received a response from the dcache: complete the load or store 752 // instruction 753 assert(!pkt->isError()); 754 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse || 755 pkt->req->getFlags().isSet(Request::NO_ACCESS)); 756 757 numCycles += curCycle() - previousCycle; 758 previousCycle = curCycle(); 759 760 if (pkt->senderState) { 761 SplitFragmentSenderState * send_state = 762 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState); 763 assert(send_state); 764 delete pkt->req; 765 delete pkt; 766 PacketPtr big_pkt = send_state->bigPkt; 767 delete send_state; 768 769 SplitMainSenderState * main_send_state = 770 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState); 771 assert(main_send_state); 772 // Record the fact that this packet is no longer outstanding. 773 assert(main_send_state->outstanding != 0); 774 main_send_state->outstanding--; 775 776 if (main_send_state->outstanding) { 777 return; 778 } else { 779 delete main_send_state; 780 big_pkt->senderState = NULL; 781 pkt = big_pkt; 782 } 783 } 784 785 _status = BaseSimpleCPU::Running; 786 787 Fault fault = curStaticInst->completeAcc(pkt, this, traceData); 788 789 // keep an instruction count 790 if (fault == NoFault) 791 countInst(); 792 else if (traceData) { 793 // If there was a fault, we shouldn't trace this instruction. 794 delete traceData; 795 traceData = NULL; 796 } 797 798 // the locked flag may be cleared on the response packet, so check 799 // pkt->req and not pkt to see if it was a load-locked 800 if (pkt->isRead() && pkt->req->isLLSC()) { 801 TheISA::handleLockedRead(thread, pkt->req); 802 } 803 804 delete pkt->req; 805 delete pkt; 806 807 postExecute(); 808 809 advanceInst(fault); 810} 811 812bool 813TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt) 814{ 815 // delay processing of returned data until next CPU clock edge 816 Tick next_tick = cpu->nextCycle(); 817 818 if (next_tick == curTick()) { 819 cpu->completeDataAccess(pkt); 820 } else { 821 if (!tickEvent.scheduled()) { 822 tickEvent.schedule(pkt, next_tick); 823 } else { 824 // In the case of a split transaction and a cache that is 825 // faster than a CPU we could get two responses before 826 // next_tick expires 827 if (!retryEvent.scheduled()) 828 cpu->schedule(retryEvent, next_tick); 829 return false; 830 } 831 } 832 833 return true; 834} 835 836void 837TimingSimpleCPU::DcachePort::DTickEvent::process() 838{ 839 cpu->completeDataAccess(pkt); 840} 841 842void 843TimingSimpleCPU::DcachePort::recvRetry() 844{ 845 // we shouldn't get a retry unless we have a packet that we're 846 // waiting to transmit 847 assert(cpu->dcache_pkt != NULL); 848 assert(cpu->_status == DcacheRetry); 849 PacketPtr tmp = cpu->dcache_pkt; 850 if (tmp->senderState) { 851 // This is a packet from a split access. 852 SplitFragmentSenderState * send_state = 853 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState); 854 assert(send_state); 855 PacketPtr big_pkt = send_state->bigPkt; 856 857 SplitMainSenderState * main_send_state = 858 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState); 859 assert(main_send_state); 860 861 if (sendTimingReq(tmp)) { 862 // If we were able to send without retrying, record that fact 863 // and try sending the other fragment. 864 send_state->clearFromParent(); 865 int other_index = main_send_state->getPendingFragment(); 866 if (other_index > 0) { 867 tmp = main_send_state->fragments[other_index]; 868 cpu->dcache_pkt = tmp; 869 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) || 870 (big_pkt->isWrite() && cpu->handleWritePacket())) { 871 main_send_state->fragments[other_index] = NULL; 872 } 873 } else { 874 cpu->_status = DcacheWaitResponse; 875 // memory system takes ownership of packet 876 cpu->dcache_pkt = NULL; 877 } 878 } 879 } else if (sendTimingReq(tmp)) { 880 cpu->_status = DcacheWaitResponse; 881 // memory system takes ownership of packet 882 cpu->dcache_pkt = NULL; 883 } 884} 885 886TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu, 887 Tick t) 888 : pkt(_pkt), cpu(_cpu) 889{ 890 cpu->schedule(this, t); 891} 892 893void 894TimingSimpleCPU::IprEvent::process() 895{ 896 cpu->completeDataAccess(pkt); 897} 898 899const char * 900TimingSimpleCPU::IprEvent::description() const 901{ 902 return "Timing Simple CPU Delay IPR event"; 903} 904 905 906void 907TimingSimpleCPU::printAddr(Addr a) 908{ 909 dcachePort.printAddr(a); 910} 911 912 913//////////////////////////////////////////////////////////////////////// 914// 915// TimingSimpleCPU Simulation Object 916// 917TimingSimpleCPU * 918TimingSimpleCPUParams::create() 919{ 920 numThreads = 1; 921 if (!FullSystem && workload.size() != 1) 922 panic("only one workload allowed"); 923 return new TimingSimpleCPU(this); 924}
| 197 previousCycle = curCycle(); 198} 199 200 201void 202TimingSimpleCPU::activateContext(ThreadID thread_num, Cycles delay) 203{ 204 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); 205 206 assert(thread_num == 0); 207 assert(thread); 208 209 assert(_status == Idle); 210 211 notIdleFraction++; 212 _status = BaseSimpleCPU::Running; 213 214 // kick things off by initiating the fetch of the next instruction 215 schedule(fetchEvent, clockEdge(delay)); 216} 217 218 219void 220TimingSimpleCPU::suspendContext(ThreadID thread_num) 221{ 222 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 223 224 assert(thread_num == 0); 225 assert(thread); 226 227 if (_status == Idle) 228 return; 229 230 assert(_status == BaseSimpleCPU::Running); 231 232 // just change status to Idle... if status != Running, 233 // completeInst() will not initiate fetch of next instruction. 234 235 notIdleFraction--; 236 _status = Idle; 237} 238 239bool 240TimingSimpleCPU::handleReadPacket(PacketPtr pkt) 241{ 242 RequestPtr req = pkt->req; 243 if (req->isMmappedIpr()) { 244 Cycles delay = TheISA::handleIprRead(thread->getTC(), pkt); 245 new IprEvent(pkt, this, clockEdge(delay)); 246 _status = DcacheWaitResponse; 247 dcache_pkt = NULL; 248 } else if (!dcachePort.sendTimingReq(pkt)) { 249 _status = DcacheRetry; 250 dcache_pkt = pkt; 251 } else { 252 _status = DcacheWaitResponse; 253 // memory system takes ownership of packet 254 dcache_pkt = NULL; 255 } 256 return dcache_pkt == NULL; 257} 258 259void 260TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res, 261 bool read) 262{ 263 PacketPtr pkt; 264 buildPacket(pkt, req, read); 265 pkt->dataDynamicArray<uint8_t>(data); 266 if (req->getFlags().isSet(Request::NO_ACCESS)) { 267 assert(!dcache_pkt); 268 pkt->makeResponse(); 269 completeDataAccess(pkt); 270 } else if (read) { 271 handleReadPacket(pkt); 272 } else { 273 bool do_access = true; // flag to suppress cache access 274 275 if (req->isLLSC()) { 276 do_access = TheISA::handleLockedWrite(thread, req); 277 } else if (req->isCondSwap()) { 278 assert(res); 279 req->setExtraData(*res); 280 } 281 282 if (do_access) { 283 dcache_pkt = pkt; 284 handleWritePacket(); 285 } else { 286 _status = DcacheWaitResponse; 287 completeDataAccess(pkt); 288 } 289 } 290} 291 292void 293TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2, 294 RequestPtr req, uint8_t *data, bool read) 295{ 296 PacketPtr pkt1, pkt2; 297 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read); 298 if (req->getFlags().isSet(Request::NO_ACCESS)) { 299 assert(!dcache_pkt); 300 pkt1->makeResponse(); 301 completeDataAccess(pkt1); 302 } else if (read) { 303 SplitFragmentSenderState * send_state = 304 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState); 305 if (handleReadPacket(pkt1)) { 306 send_state->clearFromParent(); 307 send_state = dynamic_cast<SplitFragmentSenderState *>( 308 pkt2->senderState); 309 if (handleReadPacket(pkt2)) { 310 send_state->clearFromParent(); 311 } 312 } 313 } else { 314 dcache_pkt = pkt1; 315 SplitFragmentSenderState * send_state = 316 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState); 317 if (handleWritePacket()) { 318 send_state->clearFromParent(); 319 dcache_pkt = pkt2; 320 send_state = dynamic_cast<SplitFragmentSenderState *>( 321 pkt2->senderState); 322 if (handleWritePacket()) { 323 send_state->clearFromParent(); 324 } 325 } 326 } 327} 328 329void 330TimingSimpleCPU::translationFault(Fault fault) 331{ 332 // fault may be NoFault in cases where a fault is suppressed, 333 // for instance prefetches. 334 numCycles += curCycle() - previousCycle; 335 previousCycle = curCycle(); 336 337 if (traceData) { 338 // Since there was a fault, we shouldn't trace this instruction. 339 delete traceData; 340 traceData = NULL; 341 } 342 343 postExecute(); 344 345 advanceInst(fault); 346} 347 348void 349TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read) 350{ 351 MemCmd cmd; 352 if (read) { 353 cmd = MemCmd::ReadReq; 354 if (req->isLLSC()) 355 cmd = MemCmd::LoadLockedReq; 356 } else { 357 cmd = MemCmd::WriteReq; 358 if (req->isLLSC()) { 359 cmd = MemCmd::StoreCondReq; 360 } else if (req->isSwap()) { 361 cmd = MemCmd::SwapReq; 362 } 363 } 364 pkt = new Packet(req, cmd); 365} 366 367void 368TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2, 369 RequestPtr req1, RequestPtr req2, RequestPtr req, 370 uint8_t *data, bool read) 371{ 372 pkt1 = pkt2 = NULL; 373 374 assert(!req1->isMmappedIpr() && !req2->isMmappedIpr()); 375 376 if (req->getFlags().isSet(Request::NO_ACCESS)) { 377 buildPacket(pkt1, req, read); 378 return; 379 } 380 381 buildPacket(pkt1, req1, read); 382 buildPacket(pkt2, req2, read); 383 384 req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags(), dataMasterId()); 385 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand()); 386 387 pkt->dataDynamicArray<uint8_t>(data); 388 pkt1->dataStatic<uint8_t>(data); 389 pkt2->dataStatic<uint8_t>(data + req1->getSize()); 390 391 SplitMainSenderState * main_send_state = new SplitMainSenderState; 392 pkt->senderState = main_send_state; 393 main_send_state->fragments[0] = pkt1; 394 main_send_state->fragments[1] = pkt2; 395 main_send_state->outstanding = 2; 396 pkt1->senderState = new SplitFragmentSenderState(pkt, 0); 397 pkt2->senderState = new SplitFragmentSenderState(pkt, 1); 398} 399 400Fault 401TimingSimpleCPU::readMem(Addr addr, uint8_t *data, 402 unsigned size, unsigned flags) 403{ 404 Fault fault; 405 const int asid = 0; 406 const ThreadID tid = 0; 407 const Addr pc = thread->instAddr(); 408 unsigned block_size = dcachePort.peerBlockSize(); 409 BaseTLB::Mode mode = BaseTLB::Read; 410 411 if (traceData) { 412 traceData->setAddr(addr); 413 } 414 415 RequestPtr req = new Request(asid, addr, size, 416 flags, dataMasterId(), pc, _cpuId, tid); 417 418 Addr split_addr = roundDown(addr + size - 1, block_size); 419 assert(split_addr <= addr || split_addr - addr < block_size); 420 421 _status = DTBWaitResponse; 422 if (split_addr > addr) { 423 RequestPtr req1, req2; 424 assert(!req->isLLSC() && !req->isSwap()); 425 req->splitOnVaddr(split_addr, req1, req2); 426 427 WholeTranslationState *state = 428 new WholeTranslationState(req, req1, req2, new uint8_t[size], 429 NULL, mode); 430 DataTranslation<TimingSimpleCPU *> *trans1 = 431 new DataTranslation<TimingSimpleCPU *>(this, state, 0); 432 DataTranslation<TimingSimpleCPU *> *trans2 = 433 new DataTranslation<TimingSimpleCPU *>(this, state, 1); 434 435 thread->dtb->translateTiming(req1, tc, trans1, mode); 436 thread->dtb->translateTiming(req2, tc, trans2, mode); 437 } else { 438 WholeTranslationState *state = 439 new WholeTranslationState(req, new uint8_t[size], NULL, mode); 440 DataTranslation<TimingSimpleCPU *> *translation 441 = new DataTranslation<TimingSimpleCPU *>(this, state); 442 thread->dtb->translateTiming(req, tc, translation, mode); 443 } 444 445 return NoFault; 446} 447 448bool 449TimingSimpleCPU::handleWritePacket() 450{ 451 RequestPtr req = dcache_pkt->req; 452 if (req->isMmappedIpr()) { 453 Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt); 454 new IprEvent(dcache_pkt, this, clockEdge(delay)); 455 _status = DcacheWaitResponse; 456 dcache_pkt = NULL; 457 } else if (!dcachePort.sendTimingReq(dcache_pkt)) { 458 _status = DcacheRetry; 459 } else { 460 _status = DcacheWaitResponse; 461 // memory system takes ownership of packet 462 dcache_pkt = NULL; 463 } 464 return dcache_pkt == NULL; 465} 466 467Fault 468TimingSimpleCPU::writeMem(uint8_t *data, unsigned size, 469 Addr addr, unsigned flags, uint64_t *res) 470{ 471 uint8_t *newData = new uint8_t[size]; 472 memcpy(newData, data, size); 473 474 const int asid = 0; 475 const ThreadID tid = 0; 476 const Addr pc = thread->instAddr(); 477 unsigned block_size = dcachePort.peerBlockSize(); 478 BaseTLB::Mode mode = BaseTLB::Write; 479 480 if (traceData) { 481 traceData->setAddr(addr); 482 } 483 484 RequestPtr req = new Request(asid, addr, size, 485 flags, dataMasterId(), pc, _cpuId, tid); 486 487 Addr split_addr = roundDown(addr + size - 1, block_size); 488 assert(split_addr <= addr || split_addr - addr < block_size); 489 490 _status = DTBWaitResponse; 491 if (split_addr > addr) { 492 RequestPtr req1, req2; 493 assert(!req->isLLSC() && !req->isSwap()); 494 req->splitOnVaddr(split_addr, req1, req2); 495 496 WholeTranslationState *state = 497 new WholeTranslationState(req, req1, req2, newData, res, mode); 498 DataTranslation<TimingSimpleCPU *> *trans1 = 499 new DataTranslation<TimingSimpleCPU *>(this, state, 0); 500 DataTranslation<TimingSimpleCPU *> *trans2 = 501 new DataTranslation<TimingSimpleCPU *>(this, state, 1); 502 503 thread->dtb->translateTiming(req1, tc, trans1, mode); 504 thread->dtb->translateTiming(req2, tc, trans2, mode); 505 } else { 506 WholeTranslationState *state = 507 new WholeTranslationState(req, newData, res, mode); 508 DataTranslation<TimingSimpleCPU *> *translation = 509 new DataTranslation<TimingSimpleCPU *>(this, state); 510 thread->dtb->translateTiming(req, tc, translation, mode); 511 } 512 513 // Translation faults will be returned via finishTranslation() 514 return NoFault; 515} 516 517 518void 519TimingSimpleCPU::finishTranslation(WholeTranslationState *state) 520{ 521 _status = BaseSimpleCPU::Running; 522 523 if (state->getFault() != NoFault) { 524 if (state->isPrefetch()) { 525 state->setNoFault(); 526 } 527 delete [] state->data; 528 state->deleteReqs(); 529 translationFault(state->getFault()); 530 } else { 531 if (!state->isSplit) { 532 sendData(state->mainReq, state->data, state->res, 533 state->mode == BaseTLB::Read); 534 } else { 535 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq, 536 state->data, state->mode == BaseTLB::Read); 537 } 538 } 539 540 delete state; 541} 542 543 544void 545TimingSimpleCPU::fetch() 546{ 547 DPRINTF(SimpleCPU, "Fetch\n"); 548 549 if (!curStaticInst || !curStaticInst->isDelayedCommit()) 550 checkForInterrupts(); 551 552 checkPcEventQueue(); 553 554 // We must have just got suspended by a PC event 555 if (_status == Idle) 556 return; 557 558 TheISA::PCState pcState = thread->pcState(); 559 bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst; 560 561 if (needToFetch) { 562 _status = BaseSimpleCPU::Running; 563 Request *ifetch_req = new Request(); 564 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0); 565 setupFetchRequest(ifetch_req); 566 DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr()); 567 thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation, 568 BaseTLB::Execute); 569 } else { 570 _status = IcacheWaitResponse; 571 completeIfetch(NULL); 572 573 numCycles += curCycle() - previousCycle; 574 previousCycle = curCycle(); 575 } 576} 577 578 579void 580TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc) 581{ 582 if (fault == NoFault) { 583 DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n", 584 req->getVaddr(), req->getPaddr()); 585 ifetch_pkt = new Packet(req, MemCmd::ReadReq); 586 ifetch_pkt->dataStatic(&inst); 587 DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr()); 588 589 if (!icachePort.sendTimingReq(ifetch_pkt)) { 590 // Need to wait for retry 591 _status = IcacheRetry; 592 } else { 593 // Need to wait for cache to respond 594 _status = IcacheWaitResponse; 595 // ownership of packet transferred to memory system 596 ifetch_pkt = NULL; 597 } 598 } else { 599 DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr()); 600 delete req; 601 // fetch fault: advance directly to next instruction (fault handler) 602 _status = BaseSimpleCPU::Running; 603 advanceInst(fault); 604 } 605 606 numCycles += curCycle() - previousCycle; 607 previousCycle = curCycle(); 608} 609 610 611void 612TimingSimpleCPU::advanceInst(Fault fault) 613{ 614 if (_status == Faulting) 615 return; 616 617 if (fault != NoFault) { 618 advancePC(fault); 619 DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n"); 620 reschedule(fetchEvent, nextCycle(), true); 621 _status = Faulting; 622 return; 623 } 624 625 626 if (!stayAtPC) 627 advancePC(fault); 628 629 if (tryCompleteDrain()) 630 return; 631 632 if (_status == BaseSimpleCPU::Running) { 633 // kick off fetch of next instruction... callback from icache 634 // response will cause that instruction to be executed, 635 // keeping the CPU running. 636 fetch(); 637 } 638} 639 640 641void 642TimingSimpleCPU::completeIfetch(PacketPtr pkt) 643{ 644 DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ? 645 pkt->getAddr() : 0); 646 647 // received a response from the icache: execute the received 648 // instruction 649 650 assert(!pkt || !pkt->isError()); 651 assert(_status == IcacheWaitResponse); 652 653 _status = BaseSimpleCPU::Running; 654 655 numCycles += curCycle() - previousCycle; 656 previousCycle = curCycle(); 657 658 preExecute(); 659 if (curStaticInst && curStaticInst->isMemRef()) { 660 // load or store: just send to dcache 661 Fault fault = curStaticInst->initiateAcc(this, traceData); 662 663 // If we're not running now the instruction will complete in a dcache 664 // response callback or the instruction faulted and has started an 665 // ifetch 666 if (_status == BaseSimpleCPU::Running) { 667 if (fault != NoFault && traceData) { 668 // If there was a fault, we shouldn't trace this instruction. 669 delete traceData; 670 traceData = NULL; 671 } 672 673 postExecute(); 674 // @todo remove me after debugging with legion done 675 if (curStaticInst && (!curStaticInst->isMicroop() || 676 curStaticInst->isFirstMicroop())) 677 instCnt++; 678 advanceInst(fault); 679 } 680 } else if (curStaticInst) { 681 // non-memory instruction: execute completely now 682 Fault fault = curStaticInst->execute(this, traceData); 683 684 // keep an instruction count 685 if (fault == NoFault) 686 countInst(); 687 else if (traceData && !DTRACE(ExecFaulting)) { 688 delete traceData; 689 traceData = NULL; 690 } 691 692 postExecute(); 693 // @todo remove me after debugging with legion done 694 if (curStaticInst && (!curStaticInst->isMicroop() || 695 curStaticInst->isFirstMicroop())) 696 instCnt++; 697 advanceInst(fault); 698 } else { 699 advanceInst(NoFault); 700 } 701 702 if (pkt) { 703 delete pkt->req; 704 delete pkt; 705 } 706} 707 708void 709TimingSimpleCPU::IcachePort::ITickEvent::process() 710{ 711 cpu->completeIfetch(pkt); 712} 713 714bool 715TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt) 716{ 717 DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr()); 718 // delay processing of returned data until next CPU clock edge 719 Tick next_tick = cpu->nextCycle(); 720 721 if (next_tick == curTick()) 722 cpu->completeIfetch(pkt); 723 else 724 tickEvent.schedule(pkt, next_tick); 725 726 return true; 727} 728 729void 730TimingSimpleCPU::IcachePort::recvRetry() 731{ 732 // we shouldn't get a retry unless we have a packet that we're 733 // waiting to transmit 734 assert(cpu->ifetch_pkt != NULL); 735 assert(cpu->_status == IcacheRetry); 736 PacketPtr tmp = cpu->ifetch_pkt; 737 if (sendTimingReq(tmp)) { 738 cpu->_status = IcacheWaitResponse; 739 cpu->ifetch_pkt = NULL; 740 } 741} 742 743void 744TimingSimpleCPU::completeDataAccess(PacketPtr pkt) 745{ 746 // received a response from the dcache: complete the load or store 747 // instruction 748 assert(!pkt->isError()); 749 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse || 750 pkt->req->getFlags().isSet(Request::NO_ACCESS)); 751 752 numCycles += curCycle() - previousCycle; 753 previousCycle = curCycle(); 754 755 if (pkt->senderState) { 756 SplitFragmentSenderState * send_state = 757 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState); 758 assert(send_state); 759 delete pkt->req; 760 delete pkt; 761 PacketPtr big_pkt = send_state->bigPkt; 762 delete send_state; 763 764 SplitMainSenderState * main_send_state = 765 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState); 766 assert(main_send_state); 767 // Record the fact that this packet is no longer outstanding. 768 assert(main_send_state->outstanding != 0); 769 main_send_state->outstanding--; 770 771 if (main_send_state->outstanding) { 772 return; 773 } else { 774 delete main_send_state; 775 big_pkt->senderState = NULL; 776 pkt = big_pkt; 777 } 778 } 779 780 _status = BaseSimpleCPU::Running; 781 782 Fault fault = curStaticInst->completeAcc(pkt, this, traceData); 783 784 // keep an instruction count 785 if (fault == NoFault) 786 countInst(); 787 else if (traceData) { 788 // If there was a fault, we shouldn't trace this instruction. 789 delete traceData; 790 traceData = NULL; 791 } 792 793 // the locked flag may be cleared on the response packet, so check 794 // pkt->req and not pkt to see if it was a load-locked 795 if (pkt->isRead() && pkt->req->isLLSC()) { 796 TheISA::handleLockedRead(thread, pkt->req); 797 } 798 799 delete pkt->req; 800 delete pkt; 801 802 postExecute(); 803 804 advanceInst(fault); 805} 806 807bool 808TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt) 809{ 810 // delay processing of returned data until next CPU clock edge 811 Tick next_tick = cpu->nextCycle(); 812 813 if (next_tick == curTick()) { 814 cpu->completeDataAccess(pkt); 815 } else { 816 if (!tickEvent.scheduled()) { 817 tickEvent.schedule(pkt, next_tick); 818 } else { 819 // In the case of a split transaction and a cache that is 820 // faster than a CPU we could get two responses before 821 // next_tick expires 822 if (!retryEvent.scheduled()) 823 cpu->schedule(retryEvent, next_tick); 824 return false; 825 } 826 } 827 828 return true; 829} 830 831void 832TimingSimpleCPU::DcachePort::DTickEvent::process() 833{ 834 cpu->completeDataAccess(pkt); 835} 836 837void 838TimingSimpleCPU::DcachePort::recvRetry() 839{ 840 // we shouldn't get a retry unless we have a packet that we're 841 // waiting to transmit 842 assert(cpu->dcache_pkt != NULL); 843 assert(cpu->_status == DcacheRetry); 844 PacketPtr tmp = cpu->dcache_pkt; 845 if (tmp->senderState) { 846 // This is a packet from a split access. 847 SplitFragmentSenderState * send_state = 848 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState); 849 assert(send_state); 850 PacketPtr big_pkt = send_state->bigPkt; 851 852 SplitMainSenderState * main_send_state = 853 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState); 854 assert(main_send_state); 855 856 if (sendTimingReq(tmp)) { 857 // If we were able to send without retrying, record that fact 858 // and try sending the other fragment. 859 send_state->clearFromParent(); 860 int other_index = main_send_state->getPendingFragment(); 861 if (other_index > 0) { 862 tmp = main_send_state->fragments[other_index]; 863 cpu->dcache_pkt = tmp; 864 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) || 865 (big_pkt->isWrite() && cpu->handleWritePacket())) { 866 main_send_state->fragments[other_index] = NULL; 867 } 868 } else { 869 cpu->_status = DcacheWaitResponse; 870 // memory system takes ownership of packet 871 cpu->dcache_pkt = NULL; 872 } 873 } 874 } else if (sendTimingReq(tmp)) { 875 cpu->_status = DcacheWaitResponse; 876 // memory system takes ownership of packet 877 cpu->dcache_pkt = NULL; 878 } 879} 880 881TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu, 882 Tick t) 883 : pkt(_pkt), cpu(_cpu) 884{ 885 cpu->schedule(this, t); 886} 887 888void 889TimingSimpleCPU::IprEvent::process() 890{ 891 cpu->completeDataAccess(pkt); 892} 893 894const char * 895TimingSimpleCPU::IprEvent::description() const 896{ 897 return "Timing Simple CPU Delay IPR event"; 898} 899 900 901void 902TimingSimpleCPU::printAddr(Addr a) 903{ 904 dcachePort.printAddr(a); 905} 906 907 908//////////////////////////////////////////////////////////////////////// 909// 910// TimingSimpleCPU Simulation Object 911// 912TimingSimpleCPU * 913TimingSimpleCPUParams::create() 914{ 915 numThreads = 1; 916 if (!FullSystem && workload.size() != 1) 917 panic("only one workload allowed"); 918 return new TimingSimpleCPU(this); 919}
|