1/* 2 * Copyright 2014 Google, Inc. 3 * Copyright (c) 2010-2013,2015,2017-2018 ARM Limited 4 * All rights reserved 5 * 6 * The license below extends only to copyright in the software and shall 7 * not be construed as granting a license to any other intellectual 8 * property including but not limited to intellectual property relating 9 * to a hardware implementation of the functionality of the software 10 * licensed hereunder. You may use the software subject to the license 11 * terms below provided that you ensure that this notice is replicated 12 * unmodified and in its entirety in all distributions of the software, 13 * modified or unmodified, in source code or in binary form. 14 * 15 * Copyright (c) 2002-2005 The Regents of The University of Michigan 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Steve Reinhardt 42 */ 43 44#include "cpu/simple/timing.hh" 45 46#include "arch/locked_mem.hh" 47#include "arch/mmapped_ipr.hh" 48#include "arch/utility.hh" 49#include "config/the_isa.hh" 50#include "cpu/exetrace.hh" 51#include "debug/Config.hh" 52#include "debug/Drain.hh" 53#include "debug/ExecFaulting.hh" 54#include "debug/Mwait.hh" 55#include "debug/SimpleCPU.hh" 56#include "mem/packet.hh" 57#include "mem/packet_access.hh" 58#include "params/TimingSimpleCPU.hh" 59#include "sim/faults.hh" 60#include "sim/full_system.hh" 61#include "sim/system.hh" 62 63using namespace std; 64using namespace TheISA; 65 66void 67TimingSimpleCPU::init() 68{ 69 BaseSimpleCPU::init(); 70} 71 72void 73TimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt, Tick t) 74{ 75 pkt = _pkt; 76 cpu->schedule(this, t); 77} 78 79TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p) 80 : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this), 81 dcachePort(this), ifetch_pkt(NULL), dcache_pkt(NULL), previousCycle(0), 82 fetchEvent([this]{ fetch(); }, name()) 83{ 84 _status = Idle; 85} 86 87 88 89TimingSimpleCPU::~TimingSimpleCPU() 90{ 91} 92 93DrainState 94TimingSimpleCPU::drain() 95{ 96 // Deschedule any power gating event (if any) 97 deschedulePowerGatingEvent(); 98 99 if (switchedOut()) 100 return DrainState::Drained; 101 102 if (_status == Idle || 103 (_status == BaseSimpleCPU::Running && isCpuDrained())) { 104 DPRINTF(Drain, "No need to drain.\n"); 105 activeThreads.clear(); 106 return DrainState::Drained; 107 } else { 108 DPRINTF(Drain, "Requesting drain.\n"); 109 110 // The fetch event can become descheduled if a drain didn't 111 // succeed on the first attempt. We need to reschedule it if 112 // the CPU is waiting for a microcode routine to complete. 113 if (_status == BaseSimpleCPU::Running && !fetchEvent.scheduled()) 114 schedule(fetchEvent, clockEdge()); 115 116 return DrainState::Draining; 117 } 118} 119 120void 121TimingSimpleCPU::drainResume() 122{ 123 assert(!fetchEvent.scheduled()); 124 if (switchedOut()) 125 return; 126 127 DPRINTF(SimpleCPU, "Resume\n"); 128 verifyMemoryMode(); 129 130 assert(!threadContexts.empty()); 131 132 _status = BaseSimpleCPU::Idle; 133 134 for (ThreadID tid = 0; tid < numThreads; tid++) { 135 if (threadInfo[tid]->thread->status() == ThreadContext::Active) { 136 threadInfo[tid]->notIdleFraction = 1; 137 138 activeThreads.push_back(tid); 139 140 _status = BaseSimpleCPU::Running; 141 142 // Fetch if any threads active 143 if (!fetchEvent.scheduled()) { 144 schedule(fetchEvent, nextCycle()); 145 } 146 } else { 147 threadInfo[tid]->notIdleFraction = 0; 148 } 149 } 150 151 // Reschedule any power gating event (if any) 152 schedulePowerGatingEvent(); 153 154 system->totalNumInsts = 0; 155} 156 157bool 158TimingSimpleCPU::tryCompleteDrain() 159{ 160 if (drainState() != DrainState::Draining) 161 return false; 162 163 DPRINTF(Drain, "tryCompleteDrain.\n"); 164 if (!isCpuDrained()) 165 return false; 166 167 DPRINTF(Drain, "CPU done draining, processing drain event\n"); 168 signalDrainDone(); 169 170 return true; 171} 172 173void 174TimingSimpleCPU::switchOut() 175{ 176 SimpleExecContext& t_info = *threadInfo[curThread]; 177 M5_VAR_USED SimpleThread* thread = t_info.thread; 178 179 BaseSimpleCPU::switchOut(); 180 181 assert(!fetchEvent.scheduled()); 182 assert(_status == BaseSimpleCPU::Running || _status == Idle); 183 assert(!t_info.stayAtPC); 184 assert(thread->microPC() == 0); 185 186 updateCycleCounts(); 187 updateCycleCounters(BaseCPU::CPU_STATE_ON); 188} 189 190 191void 192TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 193{ 194 BaseSimpleCPU::takeOverFrom(oldCPU); 195 196 previousCycle = curCycle(); 197} 198 199void 200TimingSimpleCPU::verifyMemoryMode() const 201{ 202 if (!system->isTimingMode()) { 203 fatal("The timing CPU requires the memory system to be in " 204 "'timing' mode.\n"); 205 } 206} 207 208void 209TimingSimpleCPU::activateContext(ThreadID thread_num) 210{ 211 DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num); 212 213 assert(thread_num < numThreads); 214 215 threadInfo[thread_num]->notIdleFraction = 1; 216 if (_status == BaseSimpleCPU::Idle) 217 _status = BaseSimpleCPU::Running; 218 219 // kick things off by initiating the fetch of the next instruction 220 if (!fetchEvent.scheduled()) 221 schedule(fetchEvent, clockEdge(Cycles(0))); 222 223 if (std::find(activeThreads.begin(), activeThreads.end(), thread_num) 224 == activeThreads.end()) { 225 activeThreads.push_back(thread_num); 226 } 227 228 BaseCPU::activateContext(thread_num); 229} 230 231 232void 233TimingSimpleCPU::suspendContext(ThreadID thread_num) 234{ 235 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 236 237 assert(thread_num < numThreads); 238 activeThreads.remove(thread_num); 239 240 if (_status == Idle) 241 return; 242 243 assert(_status == BaseSimpleCPU::Running); 244 245 threadInfo[thread_num]->notIdleFraction = 0; 246 247 if (activeThreads.empty()) { 248 _status = Idle; 249 250 if (fetchEvent.scheduled()) { 251 deschedule(fetchEvent); 252 } 253 } 254 255 BaseCPU::suspendContext(thread_num); 256} 257 258bool 259TimingSimpleCPU::handleReadPacket(PacketPtr pkt) 260{ 261 SimpleExecContext &t_info = *threadInfo[curThread]; 262 SimpleThread* thread = t_info.thread; 263 264 const RequestPtr &req = pkt->req; 265 266 // We're about the issues a locked load, so tell the monitor 267 // to start caring about this address 268 if (pkt->isRead() && pkt->req->isLLSC()) { 269 TheISA::handleLockedRead(thread, pkt->req); 270 } 271 if (req->isMmappedIpr()) { 272 Cycles delay = TheISA::handleIprRead(thread->getTC(), pkt); 273 new IprEvent(pkt, this, clockEdge(delay)); 274 _status = DcacheWaitResponse; 275 dcache_pkt = NULL; 276 } else if (!dcachePort.sendTimingReq(pkt)) { 277 _status = DcacheRetry; 278 dcache_pkt = pkt; 279 } else { 280 _status = DcacheWaitResponse; 281 // memory system takes ownership of packet 282 dcache_pkt = NULL; 283 } 284 return dcache_pkt == NULL; 285} 286 287void 288TimingSimpleCPU::sendData(const RequestPtr &req, uint8_t *data, uint64_t *res, 289 bool read) 290{ 291 SimpleExecContext &t_info = *threadInfo[curThread]; 292 SimpleThread* thread = t_info.thread; 293 294 PacketPtr pkt = buildPacket(req, read); 295 pkt->dataDynamic<uint8_t>(data); 296 297 if (req->getFlags().isSet(Request::NO_ACCESS)) { 298 assert(!dcache_pkt); 299 pkt->makeResponse(); 300 completeDataAccess(pkt); 301 } else if (read) { 302 handleReadPacket(pkt); 303 } else { 304 bool do_access = true; // flag to suppress cache access 305 306 if (req->isLLSC()) { 307 do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask); 308 } else if (req->isCondSwap()) { 309 assert(res); 310 req->setExtraData(*res); 311 } 312 313 if (do_access) { 314 dcache_pkt = pkt; 315 handleWritePacket(); 316 threadSnoop(pkt, curThread); 317 } else { 318 _status = DcacheWaitResponse; 319 completeDataAccess(pkt); 320 } 321 } 322} 323 324void 325TimingSimpleCPU::sendSplitData(const RequestPtr &req1, const RequestPtr &req2, 326 const RequestPtr &req, uint8_t *data, bool read) 327{ 328 PacketPtr pkt1, pkt2; 329 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read); 330 if (req->getFlags().isSet(Request::NO_ACCESS)) { 331 assert(!dcache_pkt); 332 pkt1->makeResponse(); 333 completeDataAccess(pkt1); 334 } else if (read) { 335 SplitFragmentSenderState * send_state = 336 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState); 337 if (handleReadPacket(pkt1)) { 338 send_state->clearFromParent(); 339 send_state = dynamic_cast<SplitFragmentSenderState *>( 340 pkt2->senderState); 341 if (handleReadPacket(pkt2)) { 342 send_state->clearFromParent(); 343 } 344 } 345 } else { 346 dcache_pkt = pkt1; 347 SplitFragmentSenderState * send_state = 348 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState); 349 if (handleWritePacket()) { 350 send_state->clearFromParent(); 351 dcache_pkt = pkt2; 352 send_state = dynamic_cast<SplitFragmentSenderState *>( 353 pkt2->senderState); 354 if (handleWritePacket()) { 355 send_state->clearFromParent(); 356 } 357 } 358 } 359} 360 361void 362TimingSimpleCPU::translationFault(const Fault &fault) 363{ 364 // fault may be NoFault in cases where a fault is suppressed, 365 // for instance prefetches. 366 updateCycleCounts(); 367 updateCycleCounters(BaseCPU::CPU_STATE_ON); 368 369 if (traceData) { 370 // Since there was a fault, we shouldn't trace this instruction. 371 delete traceData; 372 traceData = NULL; 373 } 374 375 postExecute(); 376 377 advanceInst(fault); 378} 379 380PacketPtr 381TimingSimpleCPU::buildPacket(const RequestPtr &req, bool read) 382{ 383 return read ? Packet::createRead(req) : Packet::createWrite(req); 384} 385 386void 387TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2, 388 const RequestPtr &req1, const RequestPtr &req2, const RequestPtr &req, 389 uint8_t *data, bool read) 390{ 391 pkt1 = pkt2 = NULL; 392 393 assert(!req1->isMmappedIpr() && !req2->isMmappedIpr()); 394 395 if (req->getFlags().isSet(Request::NO_ACCESS)) { 396 pkt1 = buildPacket(req, read); 397 return; 398 } 399 400 pkt1 = buildPacket(req1, read); 401 pkt2 = buildPacket(req2, read); 402 403 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand()); 404 405 pkt->dataDynamic<uint8_t>(data); 406 pkt1->dataStatic<uint8_t>(data); 407 pkt2->dataStatic<uint8_t>(data + req1->getSize()); 408 409 SplitMainSenderState * main_send_state = new SplitMainSenderState; 410 pkt->senderState = main_send_state; 411 main_send_state->fragments[0] = pkt1; 412 main_send_state->fragments[1] = pkt2; 413 main_send_state->outstanding = 2; 414 pkt1->senderState = new SplitFragmentSenderState(pkt, 0); 415 pkt2->senderState = new SplitFragmentSenderState(pkt, 1); 416} 417 418Fault 419TimingSimpleCPU::initiateMemRead(Addr addr, unsigned size, 420 Request::Flags flags, 421 const std::vector<bool>& byteEnable) 422{ 423 SimpleExecContext &t_info = *threadInfo[curThread]; 424 SimpleThread* thread = t_info.thread; 425 426 Fault fault; 427 const int asid = 0; 428 const Addr pc = thread->instAddr(); 429 unsigned block_size = cacheLineSize(); 430 BaseTLB::Mode mode = BaseTLB::Read; 431 432 if (traceData) 433 traceData->setMem(addr, size, flags); 434 435 RequestPtr req = std::make_shared<Request>( 436 asid, addr, size, flags, dataMasterId(), pc, 437 thread->contextId()); 438 if (!byteEnable.empty()) { 439 req->setByteEnable(byteEnable); 440 } 441 442 req->taskId(taskId()); 443 444 Addr split_addr = roundDown(addr + size - 1, block_size); 445 assert(split_addr <= addr || split_addr - addr < block_size); 446 447 _status = DTBWaitResponse; 448 if (split_addr > addr) { 449 RequestPtr req1, req2; 450 assert(!req->isLLSC() && !req->isSwap()); 451 req->splitOnVaddr(split_addr, req1, req2); 452 453 WholeTranslationState *state = 454 new WholeTranslationState(req, req1, req2, new uint8_t[size], 455 NULL, mode); 456 DataTranslation<TimingSimpleCPU *> *trans1 = 457 new DataTranslation<TimingSimpleCPU *>(this, state, 0); 458 DataTranslation<TimingSimpleCPU *> *trans2 = 459 new DataTranslation<TimingSimpleCPU *>(this, state, 1); 460 461 thread->dtb->translateTiming(req1, thread->getTC(), trans1, mode); 462 thread->dtb->translateTiming(req2, thread->getTC(), trans2, mode); 463 } else { 464 WholeTranslationState *state = 465 new WholeTranslationState(req, new uint8_t[size], NULL, mode); 466 DataTranslation<TimingSimpleCPU *> *translation 467 = new DataTranslation<TimingSimpleCPU *>(this, state); 468 thread->dtb->translateTiming(req, thread->getTC(), translation, mode); 469 } 470 471 return NoFault; 472} 473 474bool 475TimingSimpleCPU::handleWritePacket() 476{ 477 SimpleExecContext &t_info = *threadInfo[curThread]; 478 SimpleThread* thread = t_info.thread; 479 480 const RequestPtr &req = dcache_pkt->req; 481 if (req->isMmappedIpr()) { 482 Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt); 483 new IprEvent(dcache_pkt, this, clockEdge(delay)); 484 _status = DcacheWaitResponse; 485 dcache_pkt = NULL; 486 } else if (!dcachePort.sendTimingReq(dcache_pkt)) { 487 _status = DcacheRetry; 488 } else { 489 _status = DcacheWaitResponse; 490 // memory system takes ownership of packet 491 dcache_pkt = NULL; 492 } 493 return dcache_pkt == NULL; 494} 495 496Fault 497TimingSimpleCPU::writeMem(uint8_t *data, unsigned size, 498 Addr addr, Request::Flags flags, uint64_t *res, 499 const std::vector<bool>& byteEnable) 500{ 501 SimpleExecContext &t_info = *threadInfo[curThread]; 502 SimpleThread* thread = t_info.thread; 503 504 uint8_t *newData = new uint8_t[size]; 505 const int asid = 0; 506 const Addr pc = thread->instAddr(); 507 unsigned block_size = cacheLineSize(); 508 BaseTLB::Mode mode = BaseTLB::Write; 509 510 if (data == NULL) { 511 assert(flags & Request::STORE_NO_DATA); 512 // This must be a cache block cleaning request 513 memset(newData, 0, size); 514 } else { 515 memcpy(newData, data, size); 516 } 517 518 if (traceData) 519 traceData->setMem(addr, size, flags); 520 521 RequestPtr req = std::make_shared<Request>( 522 asid, addr, size, flags, dataMasterId(), pc, 523 thread->contextId()); 524 if (!byteEnable.empty()) { 525 req->setByteEnable(byteEnable); 526 } 527 528 req->taskId(taskId()); 529 530 Addr split_addr = roundDown(addr + size - 1, block_size); 531 assert(split_addr <= addr || split_addr - addr < block_size); 532 533 _status = DTBWaitResponse; 534 535 // TODO: TimingSimpleCPU doesn't support arbitrarily long multi-line mem. 536 // accesses yet 537 538 if (split_addr > addr) { 539 RequestPtr req1, req2; 540 assert(!req->isLLSC() && !req->isSwap()); 541 req->splitOnVaddr(split_addr, req1, req2); 542 543 WholeTranslationState *state = 544 new WholeTranslationState(req, req1, req2, newData, res, mode); 545 DataTranslation<TimingSimpleCPU *> *trans1 = 546 new DataTranslation<TimingSimpleCPU *>(this, state, 0); 547 DataTranslation<TimingSimpleCPU *> *trans2 = 548 new DataTranslation<TimingSimpleCPU *>(this, state, 1); 549 550 thread->dtb->translateTiming(req1, thread->getTC(), trans1, mode); 551 thread->dtb->translateTiming(req2, thread->getTC(), trans2, mode); 552 } else { 553 WholeTranslationState *state = 554 new WholeTranslationState(req, newData, res, mode); 555 DataTranslation<TimingSimpleCPU *> *translation = 556 new DataTranslation<TimingSimpleCPU *>(this, state); 557 thread->dtb->translateTiming(req, thread->getTC(), translation, mode); 558 } 559 560 // Translation faults will be returned via finishTranslation() 561 return NoFault; 562} 563 564Fault 565TimingSimpleCPU::initiateMemAMO(Addr addr, unsigned size, 566 Request::Flags flags, 567 AtomicOpFunctorPtr amo_op) 568{ 569 SimpleExecContext &t_info = *threadInfo[curThread]; 570 SimpleThread* thread = t_info.thread; 571 572 Fault fault; 573 const int asid = 0; 574 const Addr pc = thread->instAddr(); 575 unsigned block_size = cacheLineSize(); 576 BaseTLB::Mode mode = BaseTLB::Write; 577 578 if (traceData) 579 traceData->setMem(addr, size, flags); 580 581 RequestPtr req = make_shared<Request>(asid, addr, size, flags, 582 dataMasterId(), pc, thread->contextId(), 583 std::move(amo_op)); 584 585 assert(req->hasAtomicOpFunctor()); 586 587 req->taskId(taskId()); 588 589 Addr split_addr = roundDown(addr + size - 1, block_size); 590 591 // AMO requests that access across a cache line boundary are not 592 // allowed since the cache does not guarantee AMO ops to be executed 593 // atomically in two cache lines 594 // For ISAs such as x86 that requires AMO operations to work on 595 // accesses that cross cache-line boundaries, the cache needs to be 596 // modified to support locking both cache lines to guarantee the 597 // atomicity. 598 if (split_addr > addr) { 599 panic("AMO requests should not access across a cache line boundary\n"); 600 } 601 602 _status = DTBWaitResponse; 603 604 WholeTranslationState *state = 605 new WholeTranslationState(req, new uint8_t[size], NULL, mode); 606 DataTranslation<TimingSimpleCPU *> *translation 607 = new DataTranslation<TimingSimpleCPU *>(this, state); 608 thread->dtb->translateTiming(req, thread->getTC(), translation, mode); 609 610 return NoFault; 611} 612 613void 614TimingSimpleCPU::threadSnoop(PacketPtr pkt, ThreadID sender) 615{ 616 for (ThreadID tid = 0; tid < numThreads; tid++) { 617 if (tid != sender) { 618 if (getCpuAddrMonitor(tid)->doMonitor(pkt)) { 619 wakeup(tid); 620 } 621 TheISA::handleLockedSnoop(threadInfo[tid]->thread, pkt, 622 dcachePort.cacheBlockMask); 623 } 624 } 625} 626 627void 628TimingSimpleCPU::finishTranslation(WholeTranslationState *state) 629{ 630 _status = BaseSimpleCPU::Running; 631 632 if (state->getFault() != NoFault) { 633 if (state->isPrefetch()) { 634 state->setNoFault(); 635 } 636 delete [] state->data; 637 state->deleteReqs(); 638 translationFault(state->getFault()); 639 } else { 640 if (!state->isSplit) { 641 sendData(state->mainReq, state->data, state->res, 642 state->mode == BaseTLB::Read); 643 } else { 644 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq, 645 state->data, state->mode == BaseTLB::Read); 646 } 647 } 648 649 delete state; 650} 651 652 653void 654TimingSimpleCPU::fetch() 655{ 656 // Change thread if multi-threaded 657 swapActiveThread(); 658 659 SimpleExecContext &t_info = *threadInfo[curThread]; 660 SimpleThread* thread = t_info.thread; 661 662 DPRINTF(SimpleCPU, "Fetch\n"); 663 664 if (!curStaticInst || !curStaticInst->isDelayedCommit()) { 665 checkForInterrupts(); 666 checkPcEventQueue(); 667 } 668 669 // We must have just got suspended by a PC event 670 if (_status == Idle) 671 return; 672 673 TheISA::PCState pcState = thread->pcState(); 674 bool needToFetch = !isRomMicroPC(pcState.microPC()) && 675 !curMacroStaticInst; 676 677 if (needToFetch) { 678 _status = BaseSimpleCPU::Running; 679 RequestPtr ifetch_req = std::make_shared<Request>(); 680 ifetch_req->taskId(taskId()); 681 ifetch_req->setContext(thread->contextId()); 682 setupFetchRequest(ifetch_req); 683 DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr()); 684 thread->itb->translateTiming(ifetch_req, thread->getTC(), 685 &fetchTranslation, BaseTLB::Execute); 686 } else { 687 _status = IcacheWaitResponse; 688 completeIfetch(NULL); 689 690 updateCycleCounts(); 691 updateCycleCounters(BaseCPU::CPU_STATE_ON); 692 } 693} 694 695 696void 697TimingSimpleCPU::sendFetch(const Fault &fault, const RequestPtr &req, 698 ThreadContext *tc) 699{ 700 if (fault == NoFault) { 701 DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n", 702 req->getVaddr(), req->getPaddr()); 703 ifetch_pkt = new Packet(req, MemCmd::ReadReq); 704 ifetch_pkt->dataStatic(&inst); 705 DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr()); 706 707 if (!icachePort.sendTimingReq(ifetch_pkt)) { 708 // Need to wait for retry 709 _status = IcacheRetry; 710 } else { 711 // Need to wait for cache to respond 712 _status = IcacheWaitResponse; 713 // ownership of packet transferred to memory system 714 ifetch_pkt = NULL; 715 } 716 } else { 717 DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr()); 718 // fetch fault: advance directly to next instruction (fault handler) 719 _status = BaseSimpleCPU::Running; 720 advanceInst(fault); 721 } 722 723 updateCycleCounts(); 724 updateCycleCounters(BaseCPU::CPU_STATE_ON); 725} 726 727 728void 729TimingSimpleCPU::advanceInst(const Fault &fault) 730{ 731 SimpleExecContext &t_info = *threadInfo[curThread]; 732 733 if (_status == Faulting) 734 return; 735 736 if (fault != NoFault) { 737 DPRINTF(SimpleCPU, "Fault occured. Handling the fault\n"); 738 739 advancePC(fault); 740 741 // A syscall fault could suspend this CPU (e.g., futex_wait) 742 // If the _status is not Idle, schedule an event to fetch the next 743 // instruction after 'stall' ticks. 744 // If the cpu has been suspended (i.e., _status == Idle), another 745 // cpu will wake this cpu up later. 746 if (_status != Idle) { 747 DPRINTF(SimpleCPU, "Scheduling fetch event after the Fault\n"); 748 749 Tick stall = dynamic_pointer_cast<SyscallRetryFault>(fault) ? 750 clockEdge(syscallRetryLatency) : clockEdge(); 751 reschedule(fetchEvent, stall, true); 752 _status = Faulting; 753 } 754 755 return; 756 } 757 758 if (!t_info.stayAtPC) 759 advancePC(fault); 760 761 if (tryCompleteDrain()) 762 return; 763 764 if (_status == BaseSimpleCPU::Running) { 765 // kick off fetch of next instruction... callback from icache 766 // response will cause that instruction to be executed, 767 // keeping the CPU running. 768 fetch(); 769 } 770} 771 772 773void 774TimingSimpleCPU::completeIfetch(PacketPtr pkt) 775{ 776 SimpleExecContext& t_info = *threadInfo[curThread]; 777 778 DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ? 779 pkt->getAddr() : 0); 780 781 // received a response from the icache: execute the received 782 // instruction 783 assert(!pkt || !pkt->isError()); 784 assert(_status == IcacheWaitResponse); 785 786 _status = BaseSimpleCPU::Running; 787 788 updateCycleCounts(); 789 updateCycleCounters(BaseCPU::CPU_STATE_ON); 790 791 if (pkt) 792 pkt->req->setAccessLatency(); 793 794 795 preExecute(); 796 if (curStaticInst && curStaticInst->isMemRef()) { 797 // load or store: just send to dcache 798 Fault fault = curStaticInst->initiateAcc(&t_info, traceData); 799 800 // If we're not running now the instruction will complete in a dcache 801 // response callback or the instruction faulted and has started an 802 // ifetch 803 if (_status == BaseSimpleCPU::Running) { 804 if (fault != NoFault && traceData) { 805 // If there was a fault, we shouldn't trace this instruction. 806 delete traceData; 807 traceData = NULL; 808 } 809 810 postExecute(); 811 // @todo remove me after debugging with legion done 812 if (curStaticInst && (!curStaticInst->isMicroop() || 813 curStaticInst->isFirstMicroop())) 814 instCnt++; 815 advanceInst(fault); 816 } 817 } else if (curStaticInst) { 818 // non-memory instruction: execute completely now 819 Fault fault = curStaticInst->execute(&t_info, traceData); 820 821 // keep an instruction count 822 if (fault == NoFault) 823 countInst(); 824 else if (traceData && !DTRACE(ExecFaulting)) { 825 delete traceData; 826 traceData = NULL; 827 } 828 829 postExecute(); 830 // @todo remove me after debugging with legion done 831 if (curStaticInst && (!curStaticInst->isMicroop() || 832 curStaticInst->isFirstMicroop())) 833 instCnt++; 834 advanceInst(fault); 835 } else { 836 advanceInst(NoFault); 837 } 838 839 if (pkt) { 840 delete pkt; 841 } 842} 843 844void 845TimingSimpleCPU::IcachePort::ITickEvent::process() 846{ 847 cpu->completeIfetch(pkt); 848} 849 850bool 851TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt) 852{ 853 DPRINTF(SimpleCPU, "Received fetch response %#x\n", pkt->getAddr()); 854 // we should only ever see one response per cycle since we only 855 // issue a new request once this response is sunk 856 assert(!tickEvent.scheduled()); 857 // delay processing of returned data until next CPU clock edge 858 tickEvent.schedule(pkt, cpu->clockEdge()); 859 860 return true; 861} 862 863void 864TimingSimpleCPU::IcachePort::recvReqRetry() 865{ 866 // we shouldn't get a retry unless we have a packet that we're 867 // waiting to transmit 868 assert(cpu->ifetch_pkt != NULL); 869 assert(cpu->_status == IcacheRetry); 870 PacketPtr tmp = cpu->ifetch_pkt; 871 if (sendTimingReq(tmp)) { 872 cpu->_status = IcacheWaitResponse; 873 cpu->ifetch_pkt = NULL; 874 } 875} 876 877void 878TimingSimpleCPU::completeDataAccess(PacketPtr pkt) 879{ 880 // received a response from the dcache: complete the load or store 881 // instruction 882 assert(!pkt->isError()); 883 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse || 884 pkt->req->getFlags().isSet(Request::NO_ACCESS)); 885 886 pkt->req->setAccessLatency(); 887 888 updateCycleCounts(); 889 updateCycleCounters(BaseCPU::CPU_STATE_ON); 890 891 if (pkt->senderState) { 892 SplitFragmentSenderState * send_state = 893 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState); 894 assert(send_state); 895 delete pkt; 896 PacketPtr big_pkt = send_state->bigPkt; 897 delete send_state; 898 899 SplitMainSenderState * main_send_state = 900 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState); 901 assert(main_send_state); 902 // Record the fact that this packet is no longer outstanding. 903 assert(main_send_state->outstanding != 0); 904 main_send_state->outstanding--; 905 906 if (main_send_state->outstanding) { 907 return; 908 } else { 909 delete main_send_state; 910 big_pkt->senderState = NULL; 911 pkt = big_pkt; 912 } 913 } 914 915 _status = BaseSimpleCPU::Running; 916 917 Fault fault = curStaticInst->completeAcc(pkt, threadInfo[curThread], 918 traceData); 919 920 // keep an instruction count 921 if (fault == NoFault) 922 countInst(); 923 else if (traceData) { 924 // If there was a fault, we shouldn't trace this instruction. 925 delete traceData; 926 traceData = NULL; 927 } 928 929 delete pkt; 930 931 postExecute(); 932 933 advanceInst(fault); 934} 935 936void 937TimingSimpleCPU::updateCycleCounts() 938{ 939 const Cycles delta(curCycle() - previousCycle); 940 941 numCycles += delta; 942 943 previousCycle = curCycle(); 944} 945 946void 947TimingSimpleCPU::DcachePort::recvTimingSnoopReq(PacketPtr pkt) 948{ 949 for (ThreadID tid = 0; tid < cpu->numThreads; tid++) { 950 if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) { 951 cpu->wakeup(tid); 952 } 953 } 954 955 // Making it uniform across all CPUs: 956 // The CPUs need to be woken up only on an invalidation packet (when using caches) 957 // or on an incoming write packet (when not using caches) 958 // It is not necessary to wake up the processor on all incoming packets 959 if (pkt->isInvalidate() || pkt->isWrite()) { 960 for (auto &t_info : cpu->threadInfo) { 961 TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask); 962 } 963 } 964} 965 966void 967TimingSimpleCPU::DcachePort::recvFunctionalSnoop(PacketPtr pkt) 968{ 969 for (ThreadID tid = 0; tid < cpu->numThreads; tid++) { 970 if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) { 971 cpu->wakeup(tid); 972 } 973 } 974} 975 976bool 977TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt) 978{ 979 DPRINTF(SimpleCPU, "Received load/store response %#x\n", pkt->getAddr()); 980 981 // The timing CPU is not really ticked, instead it relies on the 982 // memory system (fetch and load/store) to set the pace. 983 if (!tickEvent.scheduled()) { 984 // Delay processing of returned data until next CPU clock edge 985 tickEvent.schedule(pkt, cpu->clockEdge()); 986 return true; 987 } else { 988 // In the case of a split transaction and a cache that is 989 // faster than a CPU we could get two responses in the 990 // same tick, delay the second one 991 if (!retryRespEvent.scheduled()) 992 cpu->schedule(retryRespEvent, cpu->clockEdge(Cycles(1))); 993 return false; 994 } 995} 996 997void 998TimingSimpleCPU::DcachePort::DTickEvent::process() 999{ 1000 cpu->completeDataAccess(pkt); 1001} 1002 1003void 1004TimingSimpleCPU::DcachePort::recvReqRetry() 1005{ 1006 // we shouldn't get a retry unless we have a packet that we're 1007 // waiting to transmit 1008 assert(cpu->dcache_pkt != NULL); 1009 assert(cpu->_status == DcacheRetry); 1010 PacketPtr tmp = cpu->dcache_pkt; 1011 if (tmp->senderState) { 1012 // This is a packet from a split access. 1013 SplitFragmentSenderState * send_state = 1014 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState); 1015 assert(send_state); 1016 PacketPtr big_pkt = send_state->bigPkt; 1017 1018 SplitMainSenderState * main_send_state = 1019 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState); 1020 assert(main_send_state); 1021 1022 if (sendTimingReq(tmp)) { 1023 // If we were able to send without retrying, record that fact 1024 // and try sending the other fragment. 1025 send_state->clearFromParent(); 1026 int other_index = main_send_state->getPendingFragment(); 1027 if (other_index > 0) { 1028 tmp = main_send_state->fragments[other_index]; 1029 cpu->dcache_pkt = tmp; 1030 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) || 1031 (big_pkt->isWrite() && cpu->handleWritePacket())) { 1032 main_send_state->fragments[other_index] = NULL; 1033 } 1034 } else { 1035 cpu->_status = DcacheWaitResponse; 1036 // memory system takes ownership of packet 1037 cpu->dcache_pkt = NULL; 1038 } 1039 } 1040 } else if (sendTimingReq(tmp)) { 1041 cpu->_status = DcacheWaitResponse; 1042 // memory system takes ownership of packet 1043 cpu->dcache_pkt = NULL; 1044 } 1045} 1046 1047TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu, 1048 Tick t) 1049 : pkt(_pkt), cpu(_cpu) 1050{ 1051 cpu->schedule(this, t); 1052} 1053 1054void 1055TimingSimpleCPU::IprEvent::process() 1056{ 1057 cpu->completeDataAccess(pkt); 1058} 1059 1060const char * 1061TimingSimpleCPU::IprEvent::description() const 1062{ 1063 return "Timing Simple CPU Delay IPR event"; 1064} 1065 1066 1067void 1068TimingSimpleCPU::printAddr(Addr a) 1069{ 1070 dcachePort.printAddr(a); 1071} 1072 1073 1074//////////////////////////////////////////////////////////////////////// 1075// 1076// TimingSimpleCPU Simulation Object 1077// 1078TimingSimpleCPU * 1079TimingSimpleCPUParams::create() 1080{ 1081 return new TimingSimpleCPU(this); 1082} 1083