timing.cc revision 8708:7ccbdea0fa12
1/* 2 * Copyright (c) 2010 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2002-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Steve Reinhardt 41 */ 42 43#include "arch/locked_mem.hh" 44#include "arch/mmapped_ipr.hh" 45#include "arch/utility.hh" 46#include "base/bigint.hh" 47#include "config/the_isa.hh" 48#include "cpu/simple/timing.hh" 49#include "cpu/exetrace.hh" 50#include "debug/Config.hh" 51#include "debug/ExecFaulting.hh" 52#include "debug/SimpleCPU.hh" 53#include "mem/packet.hh" 54#include "mem/packet_access.hh" 55#include "params/TimingSimpleCPU.hh" 56#include "sim/faults.hh" 57#include "sim/system.hh" 58 59using namespace std; 60using namespace TheISA; 61 62Port * 63TimingSimpleCPU::getPort(const std::string &if_name, int idx) 64{ 65 if (if_name == "dcache_port") 66 return &dcachePort; 67 else if (if_name == "icache_port") 68 return &icachePort; 69 else 70 panic("No Such Port\n"); 71} 72 73void 74TimingSimpleCPU::init() 75{ 76 BaseCPU::init(); 77#if FULL_SYSTEM 78 for (int i = 0; i < threadContexts.size(); ++i) { 79 ThreadContext *tc = threadContexts[i]; 80 81 // initialize CPU, including PC 82 TheISA::initCPU(tc, _cpuId); 83 } 84 85 // Initialise the ThreadContext's memory proxies 86 tcBase()->initMemProxies(tcBase()); 87#endif 88} 89 90void 91TimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt, Tick t) 92{ 93 pkt = _pkt; 94 cpu->schedule(this, t); 95} 96 97TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p) 98 : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this), 99 dcachePort(this), fetchEvent(this) 100{ 101 _status = Idle; 102 103 ifetch_pkt = dcache_pkt = NULL; 104 drainEvent = NULL; 105 previousTick = 0; 106 changeState(SimObject::Running); 107 system->totalNumInsts = 0; 108} 109 110 111TimingSimpleCPU::~TimingSimpleCPU() 112{ 113} 114 115void 116TimingSimpleCPU::serialize(ostream &os) 117{ 118 SimObject::State so_state = SimObject::getState(); 119 SERIALIZE_ENUM(so_state); 120 BaseSimpleCPU::serialize(os); 121} 122 123void 124TimingSimpleCPU::unserialize(Checkpoint *cp, const string §ion) 125{ 126 SimObject::State so_state; 127 UNSERIALIZE_ENUM(so_state); 128 BaseSimpleCPU::unserialize(cp, section); 129} 130 131unsigned int 132TimingSimpleCPU::drain(Event *drain_event) 133{ 134 // TimingSimpleCPU is ready to drain if it's not waiting for 135 // an access to complete. 136 if (_status == Idle || _status == Running || _status == SwitchedOut) { 137 changeState(SimObject::Drained); 138 return 0; 139 } else { 140 changeState(SimObject::Draining); 141 drainEvent = drain_event; 142 return 1; 143 } 144} 145 146void 147TimingSimpleCPU::resume() 148{ 149 DPRINTF(SimpleCPU, "Resume\n"); 150 if (_status != SwitchedOut && _status != Idle) { 151 assert(system->getMemoryMode() == Enums::timing); 152 153 if (fetchEvent.scheduled()) 154 deschedule(fetchEvent); 155 156 schedule(fetchEvent, nextCycle()); 157 } 158 159 changeState(SimObject::Running); 160} 161 162void 163TimingSimpleCPU::switchOut() 164{ 165 assert(_status == Running || _status == Idle); 166 _status = SwitchedOut; 167 numCycles += tickToCycles(curTick() - previousTick); 168 169 // If we've been scheduled to resume but are then told to switch out, 170 // we'll need to cancel it. 171 if (fetchEvent.scheduled()) 172 deschedule(fetchEvent); 173} 174 175 176void 177TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 178{ 179 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort); 180 181 // if any of this CPU's ThreadContexts are active, mark the CPU as 182 // running and schedule its tick event. 183 for (int i = 0; i < threadContexts.size(); ++i) { 184 ThreadContext *tc = threadContexts[i]; 185 if (tc->status() == ThreadContext::Active && _status != Running) { 186 _status = Running; 187 break; 188 } 189 } 190 191 if (_status != Running) { 192 _status = Idle; 193 } 194 assert(threadContexts.size() == 1); 195 previousTick = curTick(); 196} 197 198 199void 200TimingSimpleCPU::activateContext(int thread_num, int delay) 201{ 202 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); 203 204 assert(thread_num == 0); 205 assert(thread); 206 207 assert(_status == Idle); 208 209 notIdleFraction++; 210 _status = Running; 211 212 // kick things off by initiating the fetch of the next instruction 213 schedule(fetchEvent, nextCycle(curTick() + ticks(delay))); 214} 215 216 217void 218TimingSimpleCPU::suspendContext(int thread_num) 219{ 220 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 221 222 assert(thread_num == 0); 223 assert(thread); 224 225 if (_status == Idle) 226 return; 227 228 assert(_status == Running); 229 230 // just change status to Idle... if status != Running, 231 // completeInst() will not initiate fetch of next instruction. 232 233 notIdleFraction--; 234 _status = Idle; 235} 236 237bool 238TimingSimpleCPU::handleReadPacket(PacketPtr pkt) 239{ 240 RequestPtr req = pkt->req; 241 if (req->isMmappedIpr()) { 242 Tick delay; 243 delay = TheISA::handleIprRead(thread->getTC(), pkt); 244 new IprEvent(pkt, this, nextCycle(curTick() + delay)); 245 _status = DcacheWaitResponse; 246 dcache_pkt = NULL; 247 } else if (!dcachePort.sendTiming(pkt)) { 248 _status = DcacheRetry; 249 dcache_pkt = pkt; 250 } else { 251 _status = DcacheWaitResponse; 252 // memory system takes ownership of packet 253 dcache_pkt = NULL; 254 } 255 return dcache_pkt == NULL; 256} 257 258void 259TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res, 260 bool read) 261{ 262 PacketPtr pkt; 263 buildPacket(pkt, req, read); 264 pkt->dataDynamicArray<uint8_t>(data); 265 if (req->getFlags().isSet(Request::NO_ACCESS)) { 266 assert(!dcache_pkt); 267 pkt->makeResponse(); 268 completeDataAccess(pkt); 269 } else if (read) { 270 handleReadPacket(pkt); 271 } else { 272 bool do_access = true; // flag to suppress cache access 273 274 if (req->isLLSC()) { 275 do_access = TheISA::handleLockedWrite(thread, req); 276 } else if (req->isCondSwap()) { 277 assert(res); 278 req->setExtraData(*res); 279 } 280 281 if (do_access) { 282 dcache_pkt = pkt; 283 handleWritePacket(); 284 } else { 285 _status = DcacheWaitResponse; 286 completeDataAccess(pkt); 287 } 288 } 289} 290 291void 292TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2, 293 RequestPtr req, uint8_t *data, bool read) 294{ 295 PacketPtr pkt1, pkt2; 296 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read); 297 if (req->getFlags().isSet(Request::NO_ACCESS)) { 298 assert(!dcache_pkt); 299 pkt1->makeResponse(); 300 completeDataAccess(pkt1); 301 } else if (read) { 302 SplitFragmentSenderState * send_state = 303 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState); 304 if (handleReadPacket(pkt1)) { 305 send_state->clearFromParent(); 306 send_state = dynamic_cast<SplitFragmentSenderState *>( 307 pkt2->senderState); 308 if (handleReadPacket(pkt2)) { 309 send_state->clearFromParent(); 310 } 311 } 312 } else { 313 dcache_pkt = pkt1; 314 SplitFragmentSenderState * send_state = 315 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState); 316 if (handleWritePacket()) { 317 send_state->clearFromParent(); 318 dcache_pkt = pkt2; 319 send_state = dynamic_cast<SplitFragmentSenderState *>( 320 pkt2->senderState); 321 if (handleWritePacket()) { 322 send_state->clearFromParent(); 323 } 324 } 325 } 326} 327 328void 329TimingSimpleCPU::translationFault(Fault fault) 330{ 331 // fault may be NoFault in cases where a fault is suppressed, 332 // for instance prefetches. 333 numCycles += tickToCycles(curTick() - previousTick); 334 previousTick = curTick(); 335 336 if (traceData) { 337 // Since there was a fault, we shouldn't trace this instruction. 338 delete traceData; 339 traceData = NULL; 340 } 341 342 postExecute(); 343 344 if (getState() == SimObject::Draining) { 345 advancePC(fault); 346 completeDrain(); 347 } else { 348 advanceInst(fault); 349 } 350} 351 352void 353TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read) 354{ 355 MemCmd cmd; 356 if (read) { 357 cmd = MemCmd::ReadReq; 358 if (req->isLLSC()) 359 cmd = MemCmd::LoadLockedReq; 360 } else { 361 cmd = MemCmd::WriteReq; 362 if (req->isLLSC()) { 363 cmd = MemCmd::StoreCondReq; 364 } else if (req->isSwap()) { 365 cmd = MemCmd::SwapReq; 366 } 367 } 368 pkt = new Packet(req, cmd, Packet::Broadcast); 369} 370 371void 372TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2, 373 RequestPtr req1, RequestPtr req2, RequestPtr req, 374 uint8_t *data, bool read) 375{ 376 pkt1 = pkt2 = NULL; 377 378 assert(!req1->isMmappedIpr() && !req2->isMmappedIpr()); 379 380 if (req->getFlags().isSet(Request::NO_ACCESS)) { 381 buildPacket(pkt1, req, read); 382 return; 383 } 384 385 buildPacket(pkt1, req1, read); 386 buildPacket(pkt2, req2, read); 387 388 req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags()); 389 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand(), 390 Packet::Broadcast); 391 392 pkt->dataDynamicArray<uint8_t>(data); 393 pkt1->dataStatic<uint8_t>(data); 394 pkt2->dataStatic<uint8_t>(data + req1->getSize()); 395 396 SplitMainSenderState * main_send_state = new SplitMainSenderState; 397 pkt->senderState = main_send_state; 398 main_send_state->fragments[0] = pkt1; 399 main_send_state->fragments[1] = pkt2; 400 main_send_state->outstanding = 2; 401 pkt1->senderState = new SplitFragmentSenderState(pkt, 0); 402 pkt2->senderState = new SplitFragmentSenderState(pkt, 1); 403} 404 405Fault 406TimingSimpleCPU::readMem(Addr addr, uint8_t *data, 407 unsigned size, unsigned flags) 408{ 409 Fault fault; 410 const int asid = 0; 411 const ThreadID tid = 0; 412 const Addr pc = thread->instAddr(); 413 unsigned block_size = dcachePort.peerBlockSize(); 414 BaseTLB::Mode mode = BaseTLB::Read; 415 416 if (traceData) { 417 traceData->setAddr(addr); 418 } 419 420 RequestPtr req = new Request(asid, addr, size, 421 flags, pc, _cpuId, tid); 422 423 Addr split_addr = roundDown(addr + size - 1, block_size); 424 assert(split_addr <= addr || split_addr - addr < block_size); 425 426 _status = DTBWaitResponse; 427 if (split_addr > addr) { 428 RequestPtr req1, req2; 429 assert(!req->isLLSC() && !req->isSwap()); 430 req->splitOnVaddr(split_addr, req1, req2); 431 432 WholeTranslationState *state = 433 new WholeTranslationState(req, req1, req2, new uint8_t[size], 434 NULL, mode); 435 DataTranslation<TimingSimpleCPU *> *trans1 = 436 new DataTranslation<TimingSimpleCPU *>(this, state, 0); 437 DataTranslation<TimingSimpleCPU *> *trans2 = 438 new DataTranslation<TimingSimpleCPU *>(this, state, 1); 439 440 thread->dtb->translateTiming(req1, tc, trans1, mode); 441 thread->dtb->translateTiming(req2, tc, trans2, mode); 442 } else { 443 WholeTranslationState *state = 444 new WholeTranslationState(req, new uint8_t[size], NULL, mode); 445 DataTranslation<TimingSimpleCPU *> *translation 446 = new DataTranslation<TimingSimpleCPU *>(this, state); 447 thread->dtb->translateTiming(req, tc, translation, mode); 448 } 449 450 return NoFault; 451} 452 453bool 454TimingSimpleCPU::handleWritePacket() 455{ 456 RequestPtr req = dcache_pkt->req; 457 if (req->isMmappedIpr()) { 458 Tick delay; 459 delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt); 460 new IprEvent(dcache_pkt, this, nextCycle(curTick() + delay)); 461 _status = DcacheWaitResponse; 462 dcache_pkt = NULL; 463 } else if (!dcachePort.sendTiming(dcache_pkt)) { 464 _status = DcacheRetry; 465 } else { 466 _status = DcacheWaitResponse; 467 // memory system takes ownership of packet 468 dcache_pkt = NULL; 469 } 470 return dcache_pkt == NULL; 471} 472 473Fault 474TimingSimpleCPU::writeMem(uint8_t *data, unsigned size, 475 Addr addr, unsigned flags, uint64_t *res) 476{ 477 uint8_t *newData = new uint8_t[size]; 478 memcpy(newData, data, size); 479 480 const int asid = 0; 481 const ThreadID tid = 0; 482 const Addr pc = thread->instAddr(); 483 unsigned block_size = dcachePort.peerBlockSize(); 484 BaseTLB::Mode mode = BaseTLB::Write; 485 486 if (traceData) { 487 traceData->setAddr(addr); 488 } 489 490 RequestPtr req = new Request(asid, addr, size, 491 flags, pc, _cpuId, tid); 492 493 Addr split_addr = roundDown(addr + size - 1, block_size); 494 assert(split_addr <= addr || split_addr - addr < block_size); 495 496 _status = DTBWaitResponse; 497 if (split_addr > addr) { 498 RequestPtr req1, req2; 499 assert(!req->isLLSC() && !req->isSwap()); 500 req->splitOnVaddr(split_addr, req1, req2); 501 502 WholeTranslationState *state = 503 new WholeTranslationState(req, req1, req2, newData, res, mode); 504 DataTranslation<TimingSimpleCPU *> *trans1 = 505 new DataTranslation<TimingSimpleCPU *>(this, state, 0); 506 DataTranslation<TimingSimpleCPU *> *trans2 = 507 new DataTranslation<TimingSimpleCPU *>(this, state, 1); 508 509 thread->dtb->translateTiming(req1, tc, trans1, mode); 510 thread->dtb->translateTiming(req2, tc, trans2, mode); 511 } else { 512 WholeTranslationState *state = 513 new WholeTranslationState(req, newData, res, mode); 514 DataTranslation<TimingSimpleCPU *> *translation = 515 new DataTranslation<TimingSimpleCPU *>(this, state); 516 thread->dtb->translateTiming(req, tc, translation, mode); 517 } 518 519 // Translation faults will be returned via finishTranslation() 520 return NoFault; 521} 522 523 524void 525TimingSimpleCPU::finishTranslation(WholeTranslationState *state) 526{ 527 _status = Running; 528 529 if (state->getFault() != NoFault) { 530 if (state->isPrefetch()) { 531 state->setNoFault(); 532 } 533 delete [] state->data; 534 state->deleteReqs(); 535 translationFault(state->getFault()); 536 } else { 537 if (!state->isSplit) { 538 sendData(state->mainReq, state->data, state->res, 539 state->mode == BaseTLB::Read); 540 } else { 541 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq, 542 state->data, state->mode == BaseTLB::Read); 543 } 544 } 545 546 delete state; 547} 548 549 550void 551TimingSimpleCPU::fetch() 552{ 553 DPRINTF(SimpleCPU, "Fetch\n"); 554 555 if (!curStaticInst || !curStaticInst->isDelayedCommit()) 556 checkForInterrupts(); 557 558 checkPcEventQueue(); 559 560 // We must have just got suspended by a PC event 561 if (_status == Idle) 562 return; 563 564 TheISA::PCState pcState = thread->pcState(); 565 bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst; 566 567 if (needToFetch) { 568 _status = Running; 569 Request *ifetch_req = new Request(); 570 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0); 571 setupFetchRequest(ifetch_req); 572 DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr()); 573 thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation, 574 BaseTLB::Execute); 575 } else { 576 _status = IcacheWaitResponse; 577 completeIfetch(NULL); 578 579 numCycles += tickToCycles(curTick() - previousTick); 580 previousTick = curTick(); 581 } 582} 583 584 585void 586TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc) 587{ 588 if (fault == NoFault) { 589 DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n", 590 req->getVaddr(), req->getPaddr()); 591 ifetch_pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast); 592 ifetch_pkt->dataStatic(&inst); 593 DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr()); 594 595 if (!icachePort.sendTiming(ifetch_pkt)) { 596 // Need to wait for retry 597 _status = IcacheRetry; 598 } else { 599 // Need to wait for cache to respond 600 _status = IcacheWaitResponse; 601 // ownership of packet transferred to memory system 602 ifetch_pkt = NULL; 603 } 604 } else { 605 DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr()); 606 delete req; 607 // fetch fault: advance directly to next instruction (fault handler) 608 _status = Running; 609 advanceInst(fault); 610 } 611 612 numCycles += tickToCycles(curTick() - previousTick); 613 previousTick = curTick(); 614} 615 616 617void 618TimingSimpleCPU::advanceInst(Fault fault) 619{ 620 621 if (_status == Faulting) 622 return; 623 624 if (fault != NoFault) { 625 advancePC(fault); 626 DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n"); 627 reschedule(fetchEvent, nextCycle(), true); 628 _status = Faulting; 629 return; 630 } 631 632 633 if (!stayAtPC) 634 advancePC(fault); 635 636 if (_status == Running) { 637 // kick off fetch of next instruction... callback from icache 638 // response will cause that instruction to be executed, 639 // keeping the CPU running. 640 fetch(); 641 } 642} 643 644 645void 646TimingSimpleCPU::completeIfetch(PacketPtr pkt) 647{ 648 DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ? 649 pkt->getAddr() : 0); 650 651 // received a response from the icache: execute the received 652 // instruction 653 654 assert(!pkt || !pkt->isError()); 655 assert(_status == IcacheWaitResponse); 656 657 _status = Running; 658 659 numCycles += tickToCycles(curTick() - previousTick); 660 previousTick = curTick(); 661 662 if (getState() == SimObject::Draining) { 663 if (pkt) { 664 delete pkt->req; 665 delete pkt; 666 } 667 668 completeDrain(); 669 return; 670 } 671 672 preExecute(); 673 if (curStaticInst && curStaticInst->isMemRef()) { 674 // load or store: just send to dcache 675 Fault fault = curStaticInst->initiateAcc(this, traceData); 676 677 // If we're not running now the instruction will complete in a dcache 678 // response callback or the instruction faulted and has started an 679 // ifetch 680 if (_status == Running) { 681 if (fault != NoFault && traceData) { 682 // If there was a fault, we shouldn't trace this instruction. 683 delete traceData; 684 traceData = NULL; 685 } 686 687 postExecute(); 688 // @todo remove me after debugging with legion done 689 if (curStaticInst && (!curStaticInst->isMicroop() || 690 curStaticInst->isFirstMicroop())) 691 instCnt++; 692 advanceInst(fault); 693 } 694 } else if (curStaticInst) { 695 // non-memory instruction: execute completely now 696 Fault fault = curStaticInst->execute(this, traceData); 697 698 // keep an instruction count 699 if (fault == NoFault) 700 countInst(); 701 else if (traceData && !DTRACE(ExecFaulting)) { 702 delete traceData; 703 traceData = NULL; 704 } 705 706 postExecute(); 707 // @todo remove me after debugging with legion done 708 if (curStaticInst && (!curStaticInst->isMicroop() || 709 curStaticInst->isFirstMicroop())) 710 instCnt++; 711 advanceInst(fault); 712 } else { 713 advanceInst(NoFault); 714 } 715 716 if (pkt) { 717 delete pkt->req; 718 delete pkt; 719 } 720} 721 722void 723TimingSimpleCPU::IcachePort::ITickEvent::process() 724{ 725 cpu->completeIfetch(pkt); 726} 727 728bool 729TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt) 730{ 731 if (pkt->isResponse() && !pkt->wasNacked()) { 732 DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr()); 733 // delay processing of returned data until next CPU clock edge 734 Tick next_tick = cpu->nextCycle(curTick()); 735 736 if (next_tick == curTick()) 737 cpu->completeIfetch(pkt); 738 else 739 tickEvent.schedule(pkt, next_tick); 740 741 return true; 742 } else if (pkt->wasNacked()) { 743 assert(cpu->_status == IcacheWaitResponse); 744 pkt->reinitNacked(); 745 if (!sendTiming(pkt)) { 746 cpu->_status = IcacheRetry; 747 cpu->ifetch_pkt = pkt; 748 } 749 } 750 //Snooping a Coherence Request, do nothing 751 return true; 752} 753 754void 755TimingSimpleCPU::IcachePort::recvRetry() 756{ 757 // we shouldn't get a retry unless we have a packet that we're 758 // waiting to transmit 759 assert(cpu->ifetch_pkt != NULL); 760 assert(cpu->_status == IcacheRetry); 761 PacketPtr tmp = cpu->ifetch_pkt; 762 if (sendTiming(tmp)) { 763 cpu->_status = IcacheWaitResponse; 764 cpu->ifetch_pkt = NULL; 765 } 766} 767 768void 769TimingSimpleCPU::completeDataAccess(PacketPtr pkt) 770{ 771 // received a response from the dcache: complete the load or store 772 // instruction 773 assert(!pkt->isError()); 774 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse || 775 pkt->req->getFlags().isSet(Request::NO_ACCESS)); 776 777 numCycles += tickToCycles(curTick() - previousTick); 778 previousTick = curTick(); 779 780 if (pkt->senderState) { 781 SplitFragmentSenderState * send_state = 782 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState); 783 assert(send_state); 784 delete pkt->req; 785 delete pkt; 786 PacketPtr big_pkt = send_state->bigPkt; 787 delete send_state; 788 789 SplitMainSenderState * main_send_state = 790 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState); 791 assert(main_send_state); 792 // Record the fact that this packet is no longer outstanding. 793 assert(main_send_state->outstanding != 0); 794 main_send_state->outstanding--; 795 796 if (main_send_state->outstanding) { 797 return; 798 } else { 799 delete main_send_state; 800 big_pkt->senderState = NULL; 801 pkt = big_pkt; 802 } 803 } 804 805 _status = Running; 806 807 Fault fault = curStaticInst->completeAcc(pkt, this, traceData); 808 809 // keep an instruction count 810 if (fault == NoFault) 811 countInst(); 812 else if (traceData) { 813 // If there was a fault, we shouldn't trace this instruction. 814 delete traceData; 815 traceData = NULL; 816 } 817 818 // the locked flag may be cleared on the response packet, so check 819 // pkt->req and not pkt to see if it was a load-locked 820 if (pkt->isRead() && pkt->req->isLLSC()) { 821 TheISA::handleLockedRead(thread, pkt->req); 822 } 823 824 delete pkt->req; 825 delete pkt; 826 827 postExecute(); 828 829 if (getState() == SimObject::Draining) { 830 advancePC(fault); 831 completeDrain(); 832 833 return; 834 } 835 836 advanceInst(fault); 837} 838 839 840void 841TimingSimpleCPU::completeDrain() 842{ 843 DPRINTF(Config, "Done draining\n"); 844 changeState(SimObject::Drained); 845 drainEvent->process(); 846} 847 848bool 849TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt) 850{ 851 if (pkt->isResponse() && !pkt->wasNacked()) { 852 // delay processing of returned data until next CPU clock edge 853 Tick next_tick = cpu->nextCycle(curTick()); 854 855 if (next_tick == curTick()) { 856 cpu->completeDataAccess(pkt); 857 } else { 858 if (!tickEvent.scheduled()) { 859 tickEvent.schedule(pkt, next_tick); 860 } else { 861 // In the case of a split transaction and a cache that is 862 // faster than a CPU we could get two responses before 863 // next_tick expires 864 if (!retryEvent.scheduled()) 865 cpu->schedule(retryEvent, next_tick); 866 return false; 867 } 868 } 869 870 return true; 871 } 872 else if (pkt->wasNacked()) { 873 assert(cpu->_status == DcacheWaitResponse); 874 pkt->reinitNacked(); 875 if (!sendTiming(pkt)) { 876 cpu->_status = DcacheRetry; 877 cpu->dcache_pkt = pkt; 878 } 879 } 880 //Snooping a Coherence Request, do nothing 881 return true; 882} 883 884void 885TimingSimpleCPU::DcachePort::DTickEvent::process() 886{ 887 cpu->completeDataAccess(pkt); 888} 889 890void 891TimingSimpleCPU::DcachePort::recvRetry() 892{ 893 // we shouldn't get a retry unless we have a packet that we're 894 // waiting to transmit 895 assert(cpu->dcache_pkt != NULL); 896 assert(cpu->_status == DcacheRetry); 897 PacketPtr tmp = cpu->dcache_pkt; 898 if (tmp->senderState) { 899 // This is a packet from a split access. 900 SplitFragmentSenderState * send_state = 901 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState); 902 assert(send_state); 903 PacketPtr big_pkt = send_state->bigPkt; 904 905 SplitMainSenderState * main_send_state = 906 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState); 907 assert(main_send_state); 908 909 if (sendTiming(tmp)) { 910 // If we were able to send without retrying, record that fact 911 // and try sending the other fragment. 912 send_state->clearFromParent(); 913 int other_index = main_send_state->getPendingFragment(); 914 if (other_index > 0) { 915 tmp = main_send_state->fragments[other_index]; 916 cpu->dcache_pkt = tmp; 917 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) || 918 (big_pkt->isWrite() && cpu->handleWritePacket())) { 919 main_send_state->fragments[other_index] = NULL; 920 } 921 } else { 922 cpu->_status = DcacheWaitResponse; 923 // memory system takes ownership of packet 924 cpu->dcache_pkt = NULL; 925 } 926 } 927 } else if (sendTiming(tmp)) { 928 cpu->_status = DcacheWaitResponse; 929 // memory system takes ownership of packet 930 cpu->dcache_pkt = NULL; 931 } 932} 933 934TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu, 935 Tick t) 936 : pkt(_pkt), cpu(_cpu) 937{ 938 cpu->schedule(this, t); 939} 940 941void 942TimingSimpleCPU::IprEvent::process() 943{ 944 cpu->completeDataAccess(pkt); 945} 946 947const char * 948TimingSimpleCPU::IprEvent::description() const 949{ 950 return "Timing Simple CPU Delay IPR event"; 951} 952 953 954void 955TimingSimpleCPU::printAddr(Addr a) 956{ 957 dcachePort.printAddr(a); 958} 959 960 961//////////////////////////////////////////////////////////////////////// 962// 963// TimingSimpleCPU Simulation Object 964// 965TimingSimpleCPU * 966TimingSimpleCPUParams::create() 967{ 968 numThreads = 1; 969#if !FULL_SYSTEM 970 if (workload.size() != 1) 971 panic("only one workload allowed"); 972#endif 973 return new TimingSimpleCPU(this); 974} 975