timing.cc revision 12769:f9c0d0a09dac
1/* 2 * Copyright 2014 Google, Inc. 3 * Copyright (c) 2010-2013,2015,2017 ARM Limited 4 * All rights reserved 5 * 6 * The license below extends only to copyright in the software and shall 7 * not be construed as granting a license to any other intellectual 8 * property including but not limited to intellectual property relating 9 * to a hardware implementation of the functionality of the software 10 * licensed hereunder. You may use the software subject to the license 11 * terms below provided that you ensure that this notice is replicated 12 * unmodified and in its entirety in all distributions of the software, 13 * modified or unmodified, in source code or in binary form. 14 * 15 * Copyright (c) 2002-2005 The Regents of The University of Michigan 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Steve Reinhardt 42 */ 43 44#include "cpu/simple/timing.hh" 45 46#include "arch/locked_mem.hh" 47#include "arch/mmapped_ipr.hh" 48#include "arch/utility.hh" 49#include "config/the_isa.hh" 50#include "cpu/exetrace.hh" 51#include "debug/Config.hh" 52#include "debug/Drain.hh" 53#include "debug/ExecFaulting.hh" 54#include "debug/Mwait.hh" 55#include "debug/SimpleCPU.hh" 56#include "mem/packet.hh" 57#include "mem/packet_access.hh" 58#include "params/TimingSimpleCPU.hh" 59#include "sim/faults.hh" 60#include "sim/full_system.hh" 61#include "sim/system.hh" 62 63using namespace std; 64using namespace TheISA; 65 66void 67TimingSimpleCPU::init() 68{ 69 BaseSimpleCPU::init(); 70} 71 72void 73TimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt, Tick t) 74{ 75 pkt = _pkt; 76 cpu->schedule(this, t); 77} 78 79TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p) 80 : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this), 81 dcachePort(this), ifetch_pkt(NULL), dcache_pkt(NULL), previousCycle(0), 82 fetchEvent([this]{ fetch(); }, name()) 83{ 84 _status = Idle; 85} 86 87 88 89TimingSimpleCPU::~TimingSimpleCPU() 90{ 91} 92 93DrainState 94TimingSimpleCPU::drain() 95{ 96 // Deschedule any power gating event (if any) 97 deschedulePowerGatingEvent(); 98 99 if (switchedOut()) 100 return DrainState::Drained; 101 102 if (_status == Idle || 103 (_status == BaseSimpleCPU::Running && isDrained())) { 104 DPRINTF(Drain, "No need to drain.\n"); 105 activeThreads.clear(); 106 return DrainState::Drained; 107 } else { 108 DPRINTF(Drain, "Requesting drain.\n"); 109 110 // The fetch event can become descheduled if a drain didn't 111 // succeed on the first attempt. We need to reschedule it if 112 // the CPU is waiting for a microcode routine to complete. 113 if (_status == BaseSimpleCPU::Running && !fetchEvent.scheduled()) 114 schedule(fetchEvent, clockEdge()); 115 116 return DrainState::Draining; 117 } 118} 119 120void 121TimingSimpleCPU::drainResume() 122{ 123 assert(!fetchEvent.scheduled()); 124 if (switchedOut()) 125 return; 126 127 DPRINTF(SimpleCPU, "Resume\n"); 128 verifyMemoryMode(); 129 130 assert(!threadContexts.empty()); 131 132 _status = BaseSimpleCPU::Idle; 133 134 for (ThreadID tid = 0; tid < numThreads; tid++) { 135 if (threadInfo[tid]->thread->status() == ThreadContext::Active) { 136 threadInfo[tid]->notIdleFraction = 1; 137 138 activeThreads.push_back(tid); 139 140 _status = BaseSimpleCPU::Running; 141 142 // Fetch if any threads active 143 if (!fetchEvent.scheduled()) { 144 schedule(fetchEvent, nextCycle()); 145 } 146 } else { 147 threadInfo[tid]->notIdleFraction = 0; 148 } 149 } 150 151 // Reschedule any power gating event (if any) 152 schedulePowerGatingEvent(); 153 154 system->totalNumInsts = 0; 155} 156 157bool 158TimingSimpleCPU::tryCompleteDrain() 159{ 160 if (drainState() != DrainState::Draining) 161 return false; 162 163 DPRINTF(Drain, "tryCompleteDrain.\n"); 164 if (!isDrained()) 165 return false; 166 167 DPRINTF(Drain, "CPU done draining, processing drain event\n"); 168 signalDrainDone(); 169 170 return true; 171} 172 173void 174TimingSimpleCPU::switchOut() 175{ 176 SimpleExecContext& t_info = *threadInfo[curThread]; 177 M5_VAR_USED SimpleThread* thread = t_info.thread; 178 179 BaseSimpleCPU::switchOut(); 180 181 assert(!fetchEvent.scheduled()); 182 assert(_status == BaseSimpleCPU::Running || _status == Idle); 183 assert(!t_info.stayAtPC); 184 assert(thread->microPC() == 0); 185 186 updateCycleCounts(); 187 updateCycleCounters(BaseCPU::CPU_STATE_ON); 188} 189 190 191void 192TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 193{ 194 BaseSimpleCPU::takeOverFrom(oldCPU); 195 196 previousCycle = curCycle(); 197} 198 199void 200TimingSimpleCPU::verifyMemoryMode() const 201{ 202 if (!system->isTimingMode()) { 203 fatal("The timing CPU requires the memory system to be in " 204 "'timing' mode.\n"); 205 } 206} 207 208void 209TimingSimpleCPU::activateContext(ThreadID thread_num) 210{ 211 DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num); 212 213 assert(thread_num < numThreads); 214 215 threadInfo[thread_num]->notIdleFraction = 1; 216 if (_status == BaseSimpleCPU::Idle) 217 _status = BaseSimpleCPU::Running; 218 219 // kick things off by initiating the fetch of the next instruction 220 if (!fetchEvent.scheduled()) 221 schedule(fetchEvent, clockEdge(Cycles(0))); 222 223 if (std::find(activeThreads.begin(), activeThreads.end(), thread_num) 224 == activeThreads.end()) { 225 activeThreads.push_back(thread_num); 226 } 227 228 BaseCPU::activateContext(thread_num); 229} 230 231 232void 233TimingSimpleCPU::suspendContext(ThreadID thread_num) 234{ 235 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 236 237 assert(thread_num < numThreads); 238 activeThreads.remove(thread_num); 239 240 if (_status == Idle) 241 return; 242 243 assert(_status == BaseSimpleCPU::Running); 244 245 threadInfo[thread_num]->notIdleFraction = 0; 246 247 if (activeThreads.empty()) { 248 _status = Idle; 249 250 if (fetchEvent.scheduled()) { 251 deschedule(fetchEvent); 252 } 253 } 254 255 BaseCPU::suspendContext(thread_num); 256} 257 258bool 259TimingSimpleCPU::handleReadPacket(PacketPtr pkt) 260{ 261 SimpleExecContext &t_info = *threadInfo[curThread]; 262 SimpleThread* thread = t_info.thread; 263 264 const RequestPtr &req = pkt->req; 265 266 // We're about the issues a locked load, so tell the monitor 267 // to start caring about this address 268 if (pkt->isRead() && pkt->req->isLLSC()) { 269 TheISA::handleLockedRead(thread, pkt->req); 270 } 271 if (req->isMmappedIpr()) { 272 Cycles delay = TheISA::handleIprRead(thread->getTC(), pkt); 273 new IprEvent(pkt, this, clockEdge(delay)); 274 _status = DcacheWaitResponse; 275 dcache_pkt = NULL; 276 } else if (!dcachePort.sendTimingReq(pkt)) { 277 _status = DcacheRetry; 278 dcache_pkt = pkt; 279 } else { 280 _status = DcacheWaitResponse; 281 // memory system takes ownership of packet 282 dcache_pkt = NULL; 283 } 284 return dcache_pkt == NULL; 285} 286 287void 288TimingSimpleCPU::sendData(const RequestPtr &req, uint8_t *data, uint64_t *res, 289 bool read) 290{ 291 SimpleExecContext &t_info = *threadInfo[curThread]; 292 SimpleThread* thread = t_info.thread; 293 294 PacketPtr pkt = buildPacket(req, read); 295 pkt->dataDynamic<uint8_t>(data); 296 if (req->getFlags().isSet(Request::NO_ACCESS)) { 297 assert(!dcache_pkt); 298 pkt->makeResponse(); 299 completeDataAccess(pkt); 300 } else if (read) { 301 handleReadPacket(pkt); 302 } else { 303 bool do_access = true; // flag to suppress cache access 304 305 if (req->isLLSC()) { 306 do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask); 307 } else if (req->isCondSwap()) { 308 assert(res); 309 req->setExtraData(*res); 310 } 311 312 if (do_access) { 313 dcache_pkt = pkt; 314 handleWritePacket(); 315 threadSnoop(pkt, curThread); 316 } else { 317 _status = DcacheWaitResponse; 318 completeDataAccess(pkt); 319 } 320 } 321} 322 323void 324TimingSimpleCPU::sendSplitData(const RequestPtr &req1, const RequestPtr &req2, 325 const RequestPtr &req, uint8_t *data, bool read) 326{ 327 PacketPtr pkt1, pkt2; 328 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read); 329 if (req->getFlags().isSet(Request::NO_ACCESS)) { 330 assert(!dcache_pkt); 331 pkt1->makeResponse(); 332 completeDataAccess(pkt1); 333 } else if (read) { 334 SplitFragmentSenderState * send_state = 335 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState); 336 if (handleReadPacket(pkt1)) { 337 send_state->clearFromParent(); 338 send_state = dynamic_cast<SplitFragmentSenderState *>( 339 pkt2->senderState); 340 if (handleReadPacket(pkt2)) { 341 send_state->clearFromParent(); 342 } 343 } 344 } else { 345 dcache_pkt = pkt1; 346 SplitFragmentSenderState * send_state = 347 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState); 348 if (handleWritePacket()) { 349 send_state->clearFromParent(); 350 dcache_pkt = pkt2; 351 send_state = dynamic_cast<SplitFragmentSenderState *>( 352 pkt2->senderState); 353 if (handleWritePacket()) { 354 send_state->clearFromParent(); 355 } 356 } 357 } 358} 359 360void 361TimingSimpleCPU::translationFault(const Fault &fault) 362{ 363 // fault may be NoFault in cases where a fault is suppressed, 364 // for instance prefetches. 365 updateCycleCounts(); 366 updateCycleCounters(BaseCPU::CPU_STATE_ON); 367 368 if (traceData) { 369 // Since there was a fault, we shouldn't trace this instruction. 370 delete traceData; 371 traceData = NULL; 372 } 373 374 postExecute(); 375 376 advanceInst(fault); 377} 378 379PacketPtr 380TimingSimpleCPU::buildPacket(const RequestPtr &req, bool read) 381{ 382 return read ? Packet::createRead(req) : Packet::createWrite(req); 383} 384 385void 386TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2, 387 const RequestPtr &req1, const RequestPtr &req2, const RequestPtr &req, 388 uint8_t *data, bool read) 389{ 390 pkt1 = pkt2 = NULL; 391 392 assert(!req1->isMmappedIpr() && !req2->isMmappedIpr()); 393 394 if (req->getFlags().isSet(Request::NO_ACCESS)) { 395 pkt1 = buildPacket(req, read); 396 return; 397 } 398 399 pkt1 = buildPacket(req1, read); 400 pkt2 = buildPacket(req2, read); 401 402 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand()); 403 404 pkt->dataDynamic<uint8_t>(data); 405 pkt1->dataStatic<uint8_t>(data); 406 pkt2->dataStatic<uint8_t>(data + req1->getSize()); 407 408 SplitMainSenderState * main_send_state = new SplitMainSenderState; 409 pkt->senderState = main_send_state; 410 main_send_state->fragments[0] = pkt1; 411 main_send_state->fragments[1] = pkt2; 412 main_send_state->outstanding = 2; 413 pkt1->senderState = new SplitFragmentSenderState(pkt, 0); 414 pkt2->senderState = new SplitFragmentSenderState(pkt, 1); 415} 416 417Fault 418TimingSimpleCPU::readMem(Addr addr, uint8_t *data, 419 unsigned size, Request::Flags flags) 420{ 421 panic("readMem() is for atomic accesses, and should " 422 "never be called on TimingSimpleCPU.\n"); 423} 424 425Fault 426TimingSimpleCPU::initiateMemRead(Addr addr, unsigned size, 427 Request::Flags flags) 428{ 429 SimpleExecContext &t_info = *threadInfo[curThread]; 430 SimpleThread* thread = t_info.thread; 431 432 Fault fault; 433 const int asid = 0; 434 const Addr pc = thread->instAddr(); 435 unsigned block_size = cacheLineSize(); 436 BaseTLB::Mode mode = BaseTLB::Read; 437 438 if (traceData) 439 traceData->setMem(addr, size, flags); 440 441 RequestPtr req = std::make_shared<Request>( 442 asid, addr, size, flags, dataMasterId(), pc, 443 thread->contextId()); 444 445 req->taskId(taskId()); 446 447 Addr split_addr = roundDown(addr + size - 1, block_size); 448 assert(split_addr <= addr || split_addr - addr < block_size); 449 450 _status = DTBWaitResponse; 451 if (split_addr > addr) { 452 RequestPtr req1, req2; 453 assert(!req->isLLSC() && !req->isSwap()); 454 req->splitOnVaddr(split_addr, req1, req2); 455 456 WholeTranslationState *state = 457 new WholeTranslationState(req, req1, req2, new uint8_t[size], 458 NULL, mode); 459 DataTranslation<TimingSimpleCPU *> *trans1 = 460 new DataTranslation<TimingSimpleCPU *>(this, state, 0); 461 DataTranslation<TimingSimpleCPU *> *trans2 = 462 new DataTranslation<TimingSimpleCPU *>(this, state, 1); 463 464 thread->dtb->translateTiming(req1, thread->getTC(), trans1, mode); 465 thread->dtb->translateTiming(req2, thread->getTC(), trans2, mode); 466 } else { 467 WholeTranslationState *state = 468 new WholeTranslationState(req, new uint8_t[size], NULL, mode); 469 DataTranslation<TimingSimpleCPU *> *translation 470 = new DataTranslation<TimingSimpleCPU *>(this, state); 471 thread->dtb->translateTiming(req, thread->getTC(), translation, mode); 472 } 473 474 return NoFault; 475} 476 477bool 478TimingSimpleCPU::handleWritePacket() 479{ 480 SimpleExecContext &t_info = *threadInfo[curThread]; 481 SimpleThread* thread = t_info.thread; 482 483 const RequestPtr &req = dcache_pkt->req; 484 if (req->isMmappedIpr()) { 485 Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt); 486 new IprEvent(dcache_pkt, this, clockEdge(delay)); 487 _status = DcacheWaitResponse; 488 dcache_pkt = NULL; 489 } else if (!dcachePort.sendTimingReq(dcache_pkt)) { 490 _status = DcacheRetry; 491 } else { 492 _status = DcacheWaitResponse; 493 // memory system takes ownership of packet 494 dcache_pkt = NULL; 495 } 496 return dcache_pkt == NULL; 497} 498 499Fault 500TimingSimpleCPU::writeMem(uint8_t *data, unsigned size, 501 Addr addr, Request::Flags flags, uint64_t *res) 502{ 503 SimpleExecContext &t_info = *threadInfo[curThread]; 504 SimpleThread* thread = t_info.thread; 505 506 uint8_t *newData = new uint8_t[size]; 507 const int asid = 0; 508 const Addr pc = thread->instAddr(); 509 unsigned block_size = cacheLineSize(); 510 BaseTLB::Mode mode = BaseTLB::Write; 511 512 if (data == NULL) { 513 assert(flags & Request::STORE_NO_DATA); 514 // This must be a cache block cleaning request 515 memset(newData, 0, size); 516 } else { 517 memcpy(newData, data, size); 518 } 519 520 if (traceData) 521 traceData->setMem(addr, size, flags); 522 523 RequestPtr req = std::make_shared<Request>( 524 asid, addr, size, flags, dataMasterId(), pc, 525 thread->contextId()); 526 527 req->taskId(taskId()); 528 529 Addr split_addr = roundDown(addr + size - 1, block_size); 530 assert(split_addr <= addr || split_addr - addr < block_size); 531 532 _status = DTBWaitResponse; 533 if (split_addr > addr) { 534 RequestPtr req1, req2; 535 assert(!req->isLLSC() && !req->isSwap()); 536 req->splitOnVaddr(split_addr, req1, req2); 537 538 WholeTranslationState *state = 539 new WholeTranslationState(req, req1, req2, newData, res, mode); 540 DataTranslation<TimingSimpleCPU *> *trans1 = 541 new DataTranslation<TimingSimpleCPU *>(this, state, 0); 542 DataTranslation<TimingSimpleCPU *> *trans2 = 543 new DataTranslation<TimingSimpleCPU *>(this, state, 1); 544 545 thread->dtb->translateTiming(req1, thread->getTC(), trans1, mode); 546 thread->dtb->translateTiming(req2, thread->getTC(), trans2, mode); 547 } else { 548 WholeTranslationState *state = 549 new WholeTranslationState(req, newData, res, mode); 550 DataTranslation<TimingSimpleCPU *> *translation = 551 new DataTranslation<TimingSimpleCPU *>(this, state); 552 thread->dtb->translateTiming(req, thread->getTC(), translation, mode); 553 } 554 555 // Translation faults will be returned via finishTranslation() 556 return NoFault; 557} 558 559void 560TimingSimpleCPU::threadSnoop(PacketPtr pkt, ThreadID sender) 561{ 562 for (ThreadID tid = 0; tid < numThreads; tid++) { 563 if (tid != sender) { 564 if (getCpuAddrMonitor(tid)->doMonitor(pkt)) { 565 wakeup(tid); 566 } 567 TheISA::handleLockedSnoop(threadInfo[tid]->thread, pkt, 568 dcachePort.cacheBlockMask); 569 } 570 } 571} 572 573void 574TimingSimpleCPU::finishTranslation(WholeTranslationState *state) 575{ 576 _status = BaseSimpleCPU::Running; 577 578 if (state->getFault() != NoFault) { 579 if (state->isPrefetch()) { 580 state->setNoFault(); 581 } 582 delete [] state->data; 583 state->deleteReqs(); 584 translationFault(state->getFault()); 585 } else { 586 if (!state->isSplit) { 587 sendData(state->mainReq, state->data, state->res, 588 state->mode == BaseTLB::Read); 589 } else { 590 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq, 591 state->data, state->mode == BaseTLB::Read); 592 } 593 } 594 595 delete state; 596} 597 598 599void 600TimingSimpleCPU::fetch() 601{ 602 // Change thread if multi-threaded 603 swapActiveThread(); 604 605 SimpleExecContext &t_info = *threadInfo[curThread]; 606 SimpleThread* thread = t_info.thread; 607 608 DPRINTF(SimpleCPU, "Fetch\n"); 609 610 if (!curStaticInst || !curStaticInst->isDelayedCommit()) { 611 checkForInterrupts(); 612 checkPcEventQueue(); 613 } 614 615 // We must have just got suspended by a PC event 616 if (_status == Idle) 617 return; 618 619 TheISA::PCState pcState = thread->pcState(); 620 bool needToFetch = !isRomMicroPC(pcState.microPC()) && 621 !curMacroStaticInst; 622 623 if (needToFetch) { 624 _status = BaseSimpleCPU::Running; 625 RequestPtr ifetch_req = std::make_shared<Request>(); 626 ifetch_req->taskId(taskId()); 627 ifetch_req->setContext(thread->contextId()); 628 setupFetchRequest(ifetch_req); 629 DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr()); 630 thread->itb->translateTiming(ifetch_req, thread->getTC(), 631 &fetchTranslation, BaseTLB::Execute); 632 } else { 633 _status = IcacheWaitResponse; 634 completeIfetch(NULL); 635 636 updateCycleCounts(); 637 updateCycleCounters(BaseCPU::CPU_STATE_ON); 638 } 639} 640 641 642void 643TimingSimpleCPU::sendFetch(const Fault &fault, const RequestPtr &req, 644 ThreadContext *tc) 645{ 646 if (fault == NoFault) { 647 DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n", 648 req->getVaddr(), req->getPaddr()); 649 ifetch_pkt = new Packet(req, MemCmd::ReadReq); 650 ifetch_pkt->dataStatic(&inst); 651 DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr()); 652 653 if (!icachePort.sendTimingReq(ifetch_pkt)) { 654 // Need to wait for retry 655 _status = IcacheRetry; 656 } else { 657 // Need to wait for cache to respond 658 _status = IcacheWaitResponse; 659 // ownership of packet transferred to memory system 660 ifetch_pkt = NULL; 661 } 662 } else { 663 DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr()); 664 // fetch fault: advance directly to next instruction (fault handler) 665 _status = BaseSimpleCPU::Running; 666 advanceInst(fault); 667 } 668 669 updateCycleCounts(); 670 updateCycleCounters(BaseCPU::CPU_STATE_ON); 671} 672 673 674void 675TimingSimpleCPU::advanceInst(const Fault &fault) 676{ 677 SimpleExecContext &t_info = *threadInfo[curThread]; 678 679 if (_status == Faulting) 680 return; 681 682 if (fault != NoFault) { 683 DPRINTF(SimpleCPU, "Fault occured. Handling the fault\n"); 684 685 advancePC(fault); 686 687 // A syscall fault could suspend this CPU (e.g., futex_wait) 688 // If the _status is not Idle, schedule an event to fetch the next 689 // instruction after 'stall' ticks. 690 // If the cpu has been suspended (i.e., _status == Idle), another 691 // cpu will wake this cpu up later. 692 if (_status != Idle) { 693 DPRINTF(SimpleCPU, "Scheduling fetch event after the Fault\n"); 694 695 Tick stall = dynamic_pointer_cast<SyscallRetryFault>(fault) ? 696 clockEdge(syscallRetryLatency) : clockEdge(); 697 reschedule(fetchEvent, stall, true); 698 _status = Faulting; 699 } 700 701 return; 702 } 703 704 if (!t_info.stayAtPC) 705 advancePC(fault); 706 707 if (tryCompleteDrain()) 708 return; 709 710 if (_status == BaseSimpleCPU::Running) { 711 // kick off fetch of next instruction... callback from icache 712 // response will cause that instruction to be executed, 713 // keeping the CPU running. 714 fetch(); 715 } 716} 717 718 719void 720TimingSimpleCPU::completeIfetch(PacketPtr pkt) 721{ 722 SimpleExecContext& t_info = *threadInfo[curThread]; 723 724 DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ? 725 pkt->getAddr() : 0); 726 727 // received a response from the icache: execute the received 728 // instruction 729 assert(!pkt || !pkt->isError()); 730 assert(_status == IcacheWaitResponse); 731 732 _status = BaseSimpleCPU::Running; 733 734 updateCycleCounts(); 735 updateCycleCounters(BaseCPU::CPU_STATE_ON); 736 737 if (pkt) 738 pkt->req->setAccessLatency(); 739 740 741 preExecute(); 742 if (curStaticInst && curStaticInst->isMemRef()) { 743 // load or store: just send to dcache 744 Fault fault = curStaticInst->initiateAcc(&t_info, traceData); 745 746 // If we're not running now the instruction will complete in a dcache 747 // response callback or the instruction faulted and has started an 748 // ifetch 749 if (_status == BaseSimpleCPU::Running) { 750 if (fault != NoFault && traceData) { 751 // If there was a fault, we shouldn't trace this instruction. 752 delete traceData; 753 traceData = NULL; 754 } 755 756 postExecute(); 757 // @todo remove me after debugging with legion done 758 if (curStaticInst && (!curStaticInst->isMicroop() || 759 curStaticInst->isFirstMicroop())) 760 instCnt++; 761 advanceInst(fault); 762 } 763 } else if (curStaticInst) { 764 // non-memory instruction: execute completely now 765 Fault fault = curStaticInst->execute(&t_info, traceData); 766 767 // keep an instruction count 768 if (fault == NoFault) 769 countInst(); 770 else if (traceData && !DTRACE(ExecFaulting)) { 771 delete traceData; 772 traceData = NULL; 773 } 774 775 postExecute(); 776 // @todo remove me after debugging with legion done 777 if (curStaticInst && (!curStaticInst->isMicroop() || 778 curStaticInst->isFirstMicroop())) 779 instCnt++; 780 advanceInst(fault); 781 } else { 782 advanceInst(NoFault); 783 } 784 785 if (pkt) { 786 delete pkt; 787 } 788} 789 790void 791TimingSimpleCPU::IcachePort::ITickEvent::process() 792{ 793 cpu->completeIfetch(pkt); 794} 795 796bool 797TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt) 798{ 799 DPRINTF(SimpleCPU, "Received fetch response %#x\n", pkt->getAddr()); 800 // we should only ever see one response per cycle since we only 801 // issue a new request once this response is sunk 802 assert(!tickEvent.scheduled()); 803 // delay processing of returned data until next CPU clock edge 804 tickEvent.schedule(pkt, cpu->clockEdge()); 805 806 return true; 807} 808 809void 810TimingSimpleCPU::IcachePort::recvReqRetry() 811{ 812 // we shouldn't get a retry unless we have a packet that we're 813 // waiting to transmit 814 assert(cpu->ifetch_pkt != NULL); 815 assert(cpu->_status == IcacheRetry); 816 PacketPtr tmp = cpu->ifetch_pkt; 817 if (sendTimingReq(tmp)) { 818 cpu->_status = IcacheWaitResponse; 819 cpu->ifetch_pkt = NULL; 820 } 821} 822 823void 824TimingSimpleCPU::completeDataAccess(PacketPtr pkt) 825{ 826 // received a response from the dcache: complete the load or store 827 // instruction 828 assert(!pkt->isError()); 829 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse || 830 pkt->req->getFlags().isSet(Request::NO_ACCESS)); 831 832 pkt->req->setAccessLatency(); 833 834 updateCycleCounts(); 835 updateCycleCounters(BaseCPU::CPU_STATE_ON); 836 837 if (pkt->senderState) { 838 SplitFragmentSenderState * send_state = 839 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState); 840 assert(send_state); 841 delete pkt; 842 PacketPtr big_pkt = send_state->bigPkt; 843 delete send_state; 844 845 SplitMainSenderState * main_send_state = 846 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState); 847 assert(main_send_state); 848 // Record the fact that this packet is no longer outstanding. 849 assert(main_send_state->outstanding != 0); 850 main_send_state->outstanding--; 851 852 if (main_send_state->outstanding) { 853 return; 854 } else { 855 delete main_send_state; 856 big_pkt->senderState = NULL; 857 pkt = big_pkt; 858 } 859 } 860 861 _status = BaseSimpleCPU::Running; 862 863 Fault fault = curStaticInst->completeAcc(pkt, threadInfo[curThread], 864 traceData); 865 866 // keep an instruction count 867 if (fault == NoFault) 868 countInst(); 869 else if (traceData) { 870 // If there was a fault, we shouldn't trace this instruction. 871 delete traceData; 872 traceData = NULL; 873 } 874 875 delete pkt; 876 877 postExecute(); 878 879 advanceInst(fault); 880} 881 882void 883TimingSimpleCPU::updateCycleCounts() 884{ 885 const Cycles delta(curCycle() - previousCycle); 886 887 numCycles += delta; 888 889 previousCycle = curCycle(); 890} 891 892void 893TimingSimpleCPU::DcachePort::recvTimingSnoopReq(PacketPtr pkt) 894{ 895 for (ThreadID tid = 0; tid < cpu->numThreads; tid++) { 896 if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) { 897 cpu->wakeup(tid); 898 } 899 } 900 901 // Making it uniform across all CPUs: 902 // The CPUs need to be woken up only on an invalidation packet (when using caches) 903 // or on an incoming write packet (when not using caches) 904 // It is not necessary to wake up the processor on all incoming packets 905 if (pkt->isInvalidate() || pkt->isWrite()) { 906 for (auto &t_info : cpu->threadInfo) { 907 TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask); 908 } 909 } 910} 911 912void 913TimingSimpleCPU::DcachePort::recvFunctionalSnoop(PacketPtr pkt) 914{ 915 for (ThreadID tid = 0; tid < cpu->numThreads; tid++) { 916 if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) { 917 cpu->wakeup(tid); 918 } 919 } 920} 921 922bool 923TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt) 924{ 925 DPRINTF(SimpleCPU, "Received load/store response %#x\n", pkt->getAddr()); 926 927 // The timing CPU is not really ticked, instead it relies on the 928 // memory system (fetch and load/store) to set the pace. 929 if (!tickEvent.scheduled()) { 930 // Delay processing of returned data until next CPU clock edge 931 tickEvent.schedule(pkt, cpu->clockEdge()); 932 return true; 933 } else { 934 // In the case of a split transaction and a cache that is 935 // faster than a CPU we could get two responses in the 936 // same tick, delay the second one 937 if (!retryRespEvent.scheduled()) 938 cpu->schedule(retryRespEvent, cpu->clockEdge(Cycles(1))); 939 return false; 940 } 941} 942 943void 944TimingSimpleCPU::DcachePort::DTickEvent::process() 945{ 946 cpu->completeDataAccess(pkt); 947} 948 949void 950TimingSimpleCPU::DcachePort::recvReqRetry() 951{ 952 // we shouldn't get a retry unless we have a packet that we're 953 // waiting to transmit 954 assert(cpu->dcache_pkt != NULL); 955 assert(cpu->_status == DcacheRetry); 956 PacketPtr tmp = cpu->dcache_pkt; 957 if (tmp->senderState) { 958 // This is a packet from a split access. 959 SplitFragmentSenderState * send_state = 960 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState); 961 assert(send_state); 962 PacketPtr big_pkt = send_state->bigPkt; 963 964 SplitMainSenderState * main_send_state = 965 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState); 966 assert(main_send_state); 967 968 if (sendTimingReq(tmp)) { 969 // If we were able to send without retrying, record that fact 970 // and try sending the other fragment. 971 send_state->clearFromParent(); 972 int other_index = main_send_state->getPendingFragment(); 973 if (other_index > 0) { 974 tmp = main_send_state->fragments[other_index]; 975 cpu->dcache_pkt = tmp; 976 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) || 977 (big_pkt->isWrite() && cpu->handleWritePacket())) { 978 main_send_state->fragments[other_index] = NULL; 979 } 980 } else { 981 cpu->_status = DcacheWaitResponse; 982 // memory system takes ownership of packet 983 cpu->dcache_pkt = NULL; 984 } 985 } 986 } else if (sendTimingReq(tmp)) { 987 cpu->_status = DcacheWaitResponse; 988 // memory system takes ownership of packet 989 cpu->dcache_pkt = NULL; 990 } 991} 992 993TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu, 994 Tick t) 995 : pkt(_pkt), cpu(_cpu) 996{ 997 cpu->schedule(this, t); 998} 999 1000void 1001TimingSimpleCPU::IprEvent::process() 1002{ 1003 cpu->completeDataAccess(pkt); 1004} 1005 1006const char * 1007TimingSimpleCPU::IprEvent::description() const 1008{ 1009 return "Timing Simple CPU Delay IPR event"; 1010} 1011 1012 1013void 1014TimingSimpleCPU::printAddr(Addr a) 1015{ 1016 dcachePort.printAddr(a); 1017} 1018 1019 1020//////////////////////////////////////////////////////////////////////// 1021// 1022// TimingSimpleCPU Simulation Object 1023// 1024TimingSimpleCPU * 1025TimingSimpleCPUParams::create() 1026{ 1027 return new TimingSimpleCPU(this); 1028} 1029