timing.cc revision 11147
1/* 2 * Copyright 2014 Google, Inc. 3 * Copyright (c) 2010-2013,2015 ARM Limited 4 * All rights reserved 5 * 6 * The license below extends only to copyright in the software and shall 7 * not be construed as granting a license to any other intellectual 8 * property including but not limited to intellectual property relating 9 * to a hardware implementation of the functionality of the software 10 * licensed hereunder. You may use the software subject to the license 11 * terms below provided that you ensure that this notice is replicated 12 * unmodified and in its entirety in all distributions of the software, 13 * modified or unmodified, in source code or in binary form. 14 * 15 * Copyright (c) 2002-2005 The Regents of The University of Michigan 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Steve Reinhardt 42 */ 43 44#include "arch/locked_mem.hh" 45#include "arch/mmapped_ipr.hh" 46#include "arch/utility.hh" 47#include "base/bigint.hh" 48#include "config/the_isa.hh" 49#include "cpu/simple/timing.hh" 50#include "cpu/exetrace.hh" 51#include "debug/Config.hh" 52#include "debug/Drain.hh" 53#include "debug/ExecFaulting.hh" 54#include "debug/SimpleCPU.hh" 55#include "mem/packet.hh" 56#include "mem/packet_access.hh" 57#include "params/TimingSimpleCPU.hh" 58#include "sim/faults.hh" 59#include "sim/full_system.hh" 60#include "sim/system.hh" 61 62#include "debug/Mwait.hh" 63 64using namespace std; 65using namespace TheISA; 66 67void 68TimingSimpleCPU::init() 69{ 70 BaseSimpleCPU::init(); 71} 72 73void 74TimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt, Tick t) 75{ 76 pkt = _pkt; 77 cpu->schedule(this, t); 78} 79 80TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p) 81 : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this), 82 dcachePort(this), ifetch_pkt(NULL), dcache_pkt(NULL), previousCycle(0), 83 fetchEvent(this) 84{ 85 _status = Idle; 86} 87 88 89 90TimingSimpleCPU::~TimingSimpleCPU() 91{ 92} 93 94DrainState 95TimingSimpleCPU::drain() 96{ 97 if (switchedOut()) 98 return DrainState::Drained; 99 100 if (_status == Idle || 101 (_status == BaseSimpleCPU::Running && isDrained())) { 102 DPRINTF(Drain, "No need to drain.\n"); 103 activeThreads.clear(); 104 return DrainState::Drained; 105 } else { 106 DPRINTF(Drain, "Requesting drain.\n"); 107 108 // The fetch event can become descheduled if a drain didn't 109 // succeed on the first attempt. We need to reschedule it if 110 // the CPU is waiting for a microcode routine to complete. 111 if (_status == BaseSimpleCPU::Running && !fetchEvent.scheduled()) 112 schedule(fetchEvent, clockEdge()); 113 114 return DrainState::Draining; 115 } 116} 117 118void 119TimingSimpleCPU::drainResume() 120{ 121 assert(!fetchEvent.scheduled()); 122 if (switchedOut()) 123 return; 124 125 DPRINTF(SimpleCPU, "Resume\n"); 126 verifyMemoryMode(); 127 128 assert(!threadContexts.empty()); 129 130 _status = BaseSimpleCPU::Idle; 131 132 for (ThreadID tid = 0; tid < numThreads; tid++) { 133 if (threadInfo[tid]->thread->status() == ThreadContext::Active) { 134 threadInfo[tid]->notIdleFraction = 1; 135 136 activeThreads.push_back(tid); 137 138 _status = BaseSimpleCPU::Running; 139 140 // Fetch if any threads active 141 if (!fetchEvent.scheduled()) { 142 schedule(fetchEvent, nextCycle()); 143 } 144 } else { 145 threadInfo[tid]->notIdleFraction = 0; 146 } 147 } 148 149 system->totalNumInsts = 0; 150} 151 152bool 153TimingSimpleCPU::tryCompleteDrain() 154{ 155 if (drainState() != DrainState::Draining) 156 return false; 157 158 DPRINTF(Drain, "tryCompleteDrain.\n"); 159 if (!isDrained()) 160 return false; 161 162 DPRINTF(Drain, "CPU done draining, processing drain event\n"); 163 signalDrainDone(); 164 165 return true; 166} 167 168void 169TimingSimpleCPU::switchOut() 170{ 171 SimpleExecContext& t_info = *threadInfo[curThread]; 172 M5_VAR_USED SimpleThread* thread = t_info.thread; 173 174 BaseSimpleCPU::switchOut(); 175 176 assert(!fetchEvent.scheduled()); 177 assert(_status == BaseSimpleCPU::Running || _status == Idle); 178 assert(!t_info.stayAtPC); 179 assert(thread->microPC() == 0); 180 181 updateCycleCounts(); 182} 183 184 185void 186TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 187{ 188 BaseSimpleCPU::takeOverFrom(oldCPU); 189 190 previousCycle = curCycle(); 191} 192 193void 194TimingSimpleCPU::verifyMemoryMode() const 195{ 196 if (!system->isTimingMode()) { 197 fatal("The timing CPU requires the memory system to be in " 198 "'timing' mode.\n"); 199 } 200} 201 202void 203TimingSimpleCPU::activateContext(ThreadID thread_num) 204{ 205 DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num); 206 207 assert(thread_num < numThreads); 208 209 threadInfo[thread_num]->notIdleFraction = 1; 210 if (_status == BaseSimpleCPU::Idle) 211 _status = BaseSimpleCPU::Running; 212 213 // kick things off by initiating the fetch of the next instruction 214 if (!fetchEvent.scheduled()) 215 schedule(fetchEvent, clockEdge(Cycles(0))); 216 217 if (std::find(activeThreads.begin(), activeThreads.end(), thread_num) 218 == activeThreads.end()) { 219 activeThreads.push_back(thread_num); 220 } 221} 222 223 224void 225TimingSimpleCPU::suspendContext(ThreadID thread_num) 226{ 227 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 228 229 assert(thread_num < numThreads); 230 activeThreads.remove(thread_num); 231 232 if (_status == Idle) 233 return; 234 235 assert(_status == BaseSimpleCPU::Running); 236 237 threadInfo[thread_num]->notIdleFraction = 0; 238 239 if (activeThreads.empty()) { 240 _status = Idle; 241 242 if (fetchEvent.scheduled()) { 243 deschedule(fetchEvent); 244 } 245 } 246} 247 248bool 249TimingSimpleCPU::handleReadPacket(PacketPtr pkt) 250{ 251 SimpleExecContext &t_info = *threadInfo[curThread]; 252 SimpleThread* thread = t_info.thread; 253 254 RequestPtr req = pkt->req; 255 256 // We're about the issues a locked load, so tell the monitor 257 // to start caring about this address 258 if (pkt->isRead() && pkt->req->isLLSC()) { 259 TheISA::handleLockedRead(thread, pkt->req); 260 } 261 if (req->isMmappedIpr()) { 262 Cycles delay = TheISA::handleIprRead(thread->getTC(), pkt); 263 new IprEvent(pkt, this, clockEdge(delay)); 264 _status = DcacheWaitResponse; 265 dcache_pkt = NULL; 266 } else if (!dcachePort.sendTimingReq(pkt)) { 267 _status = DcacheRetry; 268 dcache_pkt = pkt; 269 } else { 270 _status = DcacheWaitResponse; 271 // memory system takes ownership of packet 272 dcache_pkt = NULL; 273 } 274 return dcache_pkt == NULL; 275} 276 277void 278TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res, 279 bool read) 280{ 281 SimpleExecContext &t_info = *threadInfo[curThread]; 282 SimpleThread* thread = t_info.thread; 283 284 PacketPtr pkt = buildPacket(req, read); 285 pkt->dataDynamic<uint8_t>(data); 286 if (req->getFlags().isSet(Request::NO_ACCESS)) { 287 assert(!dcache_pkt); 288 pkt->makeResponse(); 289 completeDataAccess(pkt); 290 } else if (read) { 291 handleReadPacket(pkt); 292 } else { 293 bool do_access = true; // flag to suppress cache access 294 295 if (req->isLLSC()) { 296 do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask); 297 } else if (req->isCondSwap()) { 298 assert(res); 299 req->setExtraData(*res); 300 } 301 302 if (do_access) { 303 dcache_pkt = pkt; 304 handleWritePacket(); 305 } else { 306 _status = DcacheWaitResponse; 307 completeDataAccess(pkt); 308 } 309 } 310} 311 312void 313TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2, 314 RequestPtr req, uint8_t *data, bool read) 315{ 316 PacketPtr pkt1, pkt2; 317 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read); 318 if (req->getFlags().isSet(Request::NO_ACCESS)) { 319 assert(!dcache_pkt); 320 pkt1->makeResponse(); 321 completeDataAccess(pkt1); 322 } else if (read) { 323 SplitFragmentSenderState * send_state = 324 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState); 325 if (handleReadPacket(pkt1)) { 326 send_state->clearFromParent(); 327 send_state = dynamic_cast<SplitFragmentSenderState *>( 328 pkt2->senderState); 329 if (handleReadPacket(pkt2)) { 330 send_state->clearFromParent(); 331 } 332 } 333 } else { 334 dcache_pkt = pkt1; 335 SplitFragmentSenderState * send_state = 336 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState); 337 if (handleWritePacket()) { 338 send_state->clearFromParent(); 339 dcache_pkt = pkt2; 340 send_state = dynamic_cast<SplitFragmentSenderState *>( 341 pkt2->senderState); 342 if (handleWritePacket()) { 343 send_state->clearFromParent(); 344 } 345 } 346 } 347} 348 349void 350TimingSimpleCPU::translationFault(const Fault &fault) 351{ 352 // fault may be NoFault in cases where a fault is suppressed, 353 // for instance prefetches. 354 updateCycleCounts(); 355 356 if (traceData) { 357 // Since there was a fault, we shouldn't trace this instruction. 358 delete traceData; 359 traceData = NULL; 360 } 361 362 postExecute(); 363 364 advanceInst(fault); 365} 366 367PacketPtr 368TimingSimpleCPU::buildPacket(RequestPtr req, bool read) 369{ 370 return read ? Packet::createRead(req) : Packet::createWrite(req); 371} 372 373void 374TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2, 375 RequestPtr req1, RequestPtr req2, RequestPtr req, 376 uint8_t *data, bool read) 377{ 378 pkt1 = pkt2 = NULL; 379 380 assert(!req1->isMmappedIpr() && !req2->isMmappedIpr()); 381 382 if (req->getFlags().isSet(Request::NO_ACCESS)) { 383 pkt1 = buildPacket(req, read); 384 return; 385 } 386 387 pkt1 = buildPacket(req1, read); 388 pkt2 = buildPacket(req2, read); 389 390 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand()); 391 392 pkt->dataDynamic<uint8_t>(data); 393 pkt1->dataStatic<uint8_t>(data); 394 pkt2->dataStatic<uint8_t>(data + req1->getSize()); 395 396 SplitMainSenderState * main_send_state = new SplitMainSenderState; 397 pkt->senderState = main_send_state; 398 main_send_state->fragments[0] = pkt1; 399 main_send_state->fragments[1] = pkt2; 400 main_send_state->outstanding = 2; 401 pkt1->senderState = new SplitFragmentSenderState(pkt, 0); 402 pkt2->senderState = new SplitFragmentSenderState(pkt, 1); 403} 404 405Fault 406TimingSimpleCPU::readMem(Addr addr, uint8_t *data, 407 unsigned size, unsigned flags) 408{ 409 SimpleExecContext &t_info = *threadInfo[curThread]; 410 SimpleThread* thread = t_info.thread; 411 412 Fault fault; 413 const int asid = 0; 414 const ThreadID tid = curThread; 415 const Addr pc = thread->instAddr(); 416 unsigned block_size = cacheLineSize(); 417 BaseTLB::Mode mode = BaseTLB::Read; 418 419 if (traceData) 420 traceData->setMem(addr, size, flags); 421 422 RequestPtr req = new Request(asid, addr, size, 423 flags, dataMasterId(), pc, 424 thread->contextId(), tid); 425 426 req->taskId(taskId()); 427 428 Addr split_addr = roundDown(addr + size - 1, block_size); 429 assert(split_addr <= addr || split_addr - addr < block_size); 430 431 _status = DTBWaitResponse; 432 if (split_addr > addr) { 433 RequestPtr req1, req2; 434 assert(!req->isLLSC() && !req->isSwap()); 435 req->splitOnVaddr(split_addr, req1, req2); 436 437 WholeTranslationState *state = 438 new WholeTranslationState(req, req1, req2, new uint8_t[size], 439 NULL, mode); 440 DataTranslation<TimingSimpleCPU *> *trans1 = 441 new DataTranslation<TimingSimpleCPU *>(this, state, 0); 442 DataTranslation<TimingSimpleCPU *> *trans2 = 443 new DataTranslation<TimingSimpleCPU *>(this, state, 1); 444 445 thread->dtb->translateTiming(req1, thread->getTC(), trans1, mode); 446 thread->dtb->translateTiming(req2, thread->getTC(), trans2, mode); 447 } else { 448 WholeTranslationState *state = 449 new WholeTranslationState(req, new uint8_t[size], NULL, mode); 450 DataTranslation<TimingSimpleCPU *> *translation 451 = new DataTranslation<TimingSimpleCPU *>(this, state); 452 thread->dtb->translateTiming(req, thread->getTC(), translation, mode); 453 } 454 455 return NoFault; 456} 457 458bool 459TimingSimpleCPU::handleWritePacket() 460{ 461 SimpleExecContext &t_info = *threadInfo[curThread]; 462 SimpleThread* thread = t_info.thread; 463 464 RequestPtr req = dcache_pkt->req; 465 if (req->isMmappedIpr()) { 466 Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt); 467 new IprEvent(dcache_pkt, this, clockEdge(delay)); 468 _status = DcacheWaitResponse; 469 dcache_pkt = NULL; 470 } else if (!dcachePort.sendTimingReq(dcache_pkt)) { 471 _status = DcacheRetry; 472 } else { 473 _status = DcacheWaitResponse; 474 // memory system takes ownership of packet 475 dcache_pkt = NULL; 476 } 477 return dcache_pkt == NULL; 478} 479 480Fault 481TimingSimpleCPU::writeMem(uint8_t *data, unsigned size, 482 Addr addr, unsigned flags, uint64_t *res) 483{ 484 SimpleExecContext &t_info = *threadInfo[curThread]; 485 SimpleThread* thread = t_info.thread; 486 487 uint8_t *newData = new uint8_t[size]; 488 const int asid = 0; 489 const ThreadID tid = curThread; 490 const Addr pc = thread->instAddr(); 491 unsigned block_size = cacheLineSize(); 492 BaseTLB::Mode mode = BaseTLB::Write; 493 494 if (data == NULL) { 495 assert(flags & Request::CACHE_BLOCK_ZERO); 496 // This must be a cache block cleaning request 497 memset(newData, 0, size); 498 } else { 499 memcpy(newData, data, size); 500 } 501 502 if (traceData) 503 traceData->setMem(addr, size, flags); 504 505 RequestPtr req = new Request(asid, addr, size, 506 flags, dataMasterId(), pc, 507 thread->contextId(), tid); 508 509 req->taskId(taskId()); 510 511 Addr split_addr = roundDown(addr + size - 1, block_size); 512 assert(split_addr <= addr || split_addr - addr < block_size); 513 514 _status = DTBWaitResponse; 515 if (split_addr > addr) { 516 RequestPtr req1, req2; 517 assert(!req->isLLSC() && !req->isSwap()); 518 req->splitOnVaddr(split_addr, req1, req2); 519 520 WholeTranslationState *state = 521 new WholeTranslationState(req, req1, req2, newData, res, mode); 522 DataTranslation<TimingSimpleCPU *> *trans1 = 523 new DataTranslation<TimingSimpleCPU *>(this, state, 0); 524 DataTranslation<TimingSimpleCPU *> *trans2 = 525 new DataTranslation<TimingSimpleCPU *>(this, state, 1); 526 527 thread->dtb->translateTiming(req1, thread->getTC(), trans1, mode); 528 thread->dtb->translateTiming(req2, thread->getTC(), trans2, mode); 529 } else { 530 WholeTranslationState *state = 531 new WholeTranslationState(req, newData, res, mode); 532 DataTranslation<TimingSimpleCPU *> *translation = 533 new DataTranslation<TimingSimpleCPU *>(this, state); 534 thread->dtb->translateTiming(req, thread->getTC(), translation, mode); 535 } 536 537 // Translation faults will be returned via finishTranslation() 538 return NoFault; 539} 540 541 542void 543TimingSimpleCPU::finishTranslation(WholeTranslationState *state) 544{ 545 _status = BaseSimpleCPU::Running; 546 547 if (state->getFault() != NoFault) { 548 if (state->isPrefetch()) { 549 state->setNoFault(); 550 } 551 delete [] state->data; 552 state->deleteReqs(); 553 translationFault(state->getFault()); 554 } else { 555 if (!state->isSplit) { 556 sendData(state->mainReq, state->data, state->res, 557 state->mode == BaseTLB::Read); 558 } else { 559 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq, 560 state->data, state->mode == BaseTLB::Read); 561 } 562 } 563 564 delete state; 565} 566 567 568void 569TimingSimpleCPU::fetch() 570{ 571 // Change thread if multi-threaded 572 swapActiveThread(); 573 574 SimpleExecContext &t_info = *threadInfo[curThread]; 575 SimpleThread* thread = t_info.thread; 576 577 DPRINTF(SimpleCPU, "Fetch\n"); 578 579 if (!curStaticInst || !curStaticInst->isDelayedCommit()) { 580 checkForInterrupts(); 581 checkPcEventQueue(); 582 } 583 584 // We must have just got suspended by a PC event 585 if (_status == Idle) 586 return; 587 588 TheISA::PCState pcState = thread->pcState(); 589 bool needToFetch = !isRomMicroPC(pcState.microPC()) && 590 !curMacroStaticInst; 591 592 if (needToFetch) { 593 _status = BaseSimpleCPU::Running; 594 Request *ifetch_req = new Request(); 595 ifetch_req->taskId(taskId()); 596 ifetch_req->setThreadContext(thread->contextId(), curThread); 597 setupFetchRequest(ifetch_req); 598 DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr()); 599 thread->itb->translateTiming(ifetch_req, thread->getTC(), 600 &fetchTranslation, BaseTLB::Execute); 601 } else { 602 _status = IcacheWaitResponse; 603 completeIfetch(NULL); 604 605 updateCycleCounts(); 606 } 607} 608 609 610void 611TimingSimpleCPU::sendFetch(const Fault &fault, RequestPtr req, 612 ThreadContext *tc) 613{ 614 if (fault == NoFault) { 615 DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n", 616 req->getVaddr(), req->getPaddr()); 617 ifetch_pkt = new Packet(req, MemCmd::ReadReq); 618 ifetch_pkt->dataStatic(&inst); 619 DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr()); 620 621 if (!icachePort.sendTimingReq(ifetch_pkt)) { 622 // Need to wait for retry 623 _status = IcacheRetry; 624 } else { 625 // Need to wait for cache to respond 626 _status = IcacheWaitResponse; 627 // ownership of packet transferred to memory system 628 ifetch_pkt = NULL; 629 } 630 } else { 631 DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr()); 632 delete req; 633 // fetch fault: advance directly to next instruction (fault handler) 634 _status = BaseSimpleCPU::Running; 635 advanceInst(fault); 636 } 637 638 updateCycleCounts(); 639} 640 641 642void 643TimingSimpleCPU::advanceInst(const Fault &fault) 644{ 645 SimpleExecContext &t_info = *threadInfo[curThread]; 646 647 if (_status == Faulting) 648 return; 649 650 if (fault != NoFault) { 651 advancePC(fault); 652 DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n"); 653 reschedule(fetchEvent, clockEdge(), true); 654 _status = Faulting; 655 return; 656 } 657 658 659 if (!t_info.stayAtPC) 660 advancePC(fault); 661 662 if (tryCompleteDrain()) 663 return; 664 665 if (_status == BaseSimpleCPU::Running) { 666 // kick off fetch of next instruction... callback from icache 667 // response will cause that instruction to be executed, 668 // keeping the CPU running. 669 fetch(); 670 } 671} 672 673 674void 675TimingSimpleCPU::completeIfetch(PacketPtr pkt) 676{ 677 SimpleExecContext& t_info = *threadInfo[curThread]; 678 679 DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ? 680 pkt->getAddr() : 0); 681 682 // received a response from the icache: execute the received 683 // instruction 684 assert(!pkt || !pkt->isError()); 685 assert(_status == IcacheWaitResponse); 686 687 _status = BaseSimpleCPU::Running; 688 689 updateCycleCounts(); 690 691 if (pkt) 692 pkt->req->setAccessLatency(); 693 694 695 preExecute(); 696 if (curStaticInst && curStaticInst->isMemRef()) { 697 // load or store: just send to dcache 698 Fault fault = curStaticInst->initiateAcc(&t_info, traceData); 699 700 // If we're not running now the instruction will complete in a dcache 701 // response callback or the instruction faulted and has started an 702 // ifetch 703 if (_status == BaseSimpleCPU::Running) { 704 if (fault != NoFault && traceData) { 705 // If there was a fault, we shouldn't trace this instruction. 706 delete traceData; 707 traceData = NULL; 708 } 709 710 postExecute(); 711 // @todo remove me after debugging with legion done 712 if (curStaticInst && (!curStaticInst->isMicroop() || 713 curStaticInst->isFirstMicroop())) 714 instCnt++; 715 advanceInst(fault); 716 } 717 } else if (curStaticInst) { 718 // non-memory instruction: execute completely now 719 Fault fault = curStaticInst->execute(&t_info, traceData); 720 721 // keep an instruction count 722 if (fault == NoFault) 723 countInst(); 724 else if (traceData && !DTRACE(ExecFaulting)) { 725 delete traceData; 726 traceData = NULL; 727 } 728 729 postExecute(); 730 // @todo remove me after debugging with legion done 731 if (curStaticInst && (!curStaticInst->isMicroop() || 732 curStaticInst->isFirstMicroop())) 733 instCnt++; 734 advanceInst(fault); 735 } else { 736 advanceInst(NoFault); 737 } 738 739 if (pkt) { 740 delete pkt->req; 741 delete pkt; 742 } 743} 744 745void 746TimingSimpleCPU::IcachePort::ITickEvent::process() 747{ 748 cpu->completeIfetch(pkt); 749} 750 751bool 752TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt) 753{ 754 DPRINTF(SimpleCPU, "Received fetch response %#x\n", pkt->getAddr()); 755 // we should only ever see one response per cycle since we only 756 // issue a new request once this response is sunk 757 assert(!tickEvent.scheduled()); 758 // delay processing of returned data until next CPU clock edge 759 tickEvent.schedule(pkt, cpu->clockEdge()); 760 761 return true; 762} 763 764void 765TimingSimpleCPU::IcachePort::recvReqRetry() 766{ 767 // we shouldn't get a retry unless we have a packet that we're 768 // waiting to transmit 769 assert(cpu->ifetch_pkt != NULL); 770 assert(cpu->_status == IcacheRetry); 771 PacketPtr tmp = cpu->ifetch_pkt; 772 if (sendTimingReq(tmp)) { 773 cpu->_status = IcacheWaitResponse; 774 cpu->ifetch_pkt = NULL; 775 } 776} 777 778void 779TimingSimpleCPU::completeDataAccess(PacketPtr pkt) 780{ 781 // received a response from the dcache: complete the load or store 782 // instruction 783 assert(!pkt->isError()); 784 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse || 785 pkt->req->getFlags().isSet(Request::NO_ACCESS)); 786 787 pkt->req->setAccessLatency(); 788 789 updateCycleCounts(); 790 791 if (pkt->senderState) { 792 SplitFragmentSenderState * send_state = 793 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState); 794 assert(send_state); 795 delete pkt->req; 796 delete pkt; 797 PacketPtr big_pkt = send_state->bigPkt; 798 delete send_state; 799 800 SplitMainSenderState * main_send_state = 801 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState); 802 assert(main_send_state); 803 // Record the fact that this packet is no longer outstanding. 804 assert(main_send_state->outstanding != 0); 805 main_send_state->outstanding--; 806 807 if (main_send_state->outstanding) { 808 return; 809 } else { 810 delete main_send_state; 811 big_pkt->senderState = NULL; 812 pkt = big_pkt; 813 } 814 } 815 816 _status = BaseSimpleCPU::Running; 817 818 Fault fault = curStaticInst->completeAcc(pkt, threadInfo[curThread], 819 traceData); 820 821 // keep an instruction count 822 if (fault == NoFault) 823 countInst(); 824 else if (traceData) { 825 // If there was a fault, we shouldn't trace this instruction. 826 delete traceData; 827 traceData = NULL; 828 } 829 830 delete pkt->req; 831 delete pkt; 832 833 postExecute(); 834 835 advanceInst(fault); 836} 837 838void 839TimingSimpleCPU::updateCycleCounts() 840{ 841 const Cycles delta(curCycle() - previousCycle); 842 843 numCycles += delta; 844 ppCycles->notify(delta); 845 846 previousCycle = curCycle(); 847} 848 849void 850TimingSimpleCPU::DcachePort::recvTimingSnoopReq(PacketPtr pkt) 851{ 852 // X86 ISA: Snooping an invalidation for monitor/mwait 853 if(cpu->getCpuAddrMonitor()->doMonitor(pkt)) { 854 cpu->wakeup(); 855 } 856 857 for (auto &t_info : cpu->threadInfo) { 858 TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask); 859 } 860} 861 862void 863TimingSimpleCPU::DcachePort::recvFunctionalSnoop(PacketPtr pkt) 864{ 865 // X86 ISA: Snooping an invalidation for monitor/mwait 866 if(cpu->getCpuAddrMonitor()->doMonitor(pkt)) { 867 cpu->wakeup(); 868 } 869} 870 871bool 872TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt) 873{ 874 DPRINTF(SimpleCPU, "Received load/store response %#x\n", pkt->getAddr()); 875 876 // The timing CPU is not really ticked, instead it relies on the 877 // memory system (fetch and load/store) to set the pace. 878 if (!tickEvent.scheduled()) { 879 // Delay processing of returned data until next CPU clock edge 880 tickEvent.schedule(pkt, cpu->clockEdge()); 881 return true; 882 } else { 883 // In the case of a split transaction and a cache that is 884 // faster than a CPU we could get two responses in the 885 // same tick, delay the second one 886 if (!retryRespEvent.scheduled()) 887 cpu->schedule(retryRespEvent, cpu->clockEdge(Cycles(1))); 888 return false; 889 } 890} 891 892void 893TimingSimpleCPU::DcachePort::DTickEvent::process() 894{ 895 cpu->completeDataAccess(pkt); 896} 897 898void 899TimingSimpleCPU::DcachePort::recvReqRetry() 900{ 901 // we shouldn't get a retry unless we have a packet that we're 902 // waiting to transmit 903 assert(cpu->dcache_pkt != NULL); 904 assert(cpu->_status == DcacheRetry); 905 PacketPtr tmp = cpu->dcache_pkt; 906 if (tmp->senderState) { 907 // This is a packet from a split access. 908 SplitFragmentSenderState * send_state = 909 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState); 910 assert(send_state); 911 PacketPtr big_pkt = send_state->bigPkt; 912 913 SplitMainSenderState * main_send_state = 914 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState); 915 assert(main_send_state); 916 917 if (sendTimingReq(tmp)) { 918 // If we were able to send without retrying, record that fact 919 // and try sending the other fragment. 920 send_state->clearFromParent(); 921 int other_index = main_send_state->getPendingFragment(); 922 if (other_index > 0) { 923 tmp = main_send_state->fragments[other_index]; 924 cpu->dcache_pkt = tmp; 925 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) || 926 (big_pkt->isWrite() && cpu->handleWritePacket())) { 927 main_send_state->fragments[other_index] = NULL; 928 } 929 } else { 930 cpu->_status = DcacheWaitResponse; 931 // memory system takes ownership of packet 932 cpu->dcache_pkt = NULL; 933 } 934 } 935 } else if (sendTimingReq(tmp)) { 936 cpu->_status = DcacheWaitResponse; 937 // memory system takes ownership of packet 938 cpu->dcache_pkt = NULL; 939 } 940} 941 942TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu, 943 Tick t) 944 : pkt(_pkt), cpu(_cpu) 945{ 946 cpu->schedule(this, t); 947} 948 949void 950TimingSimpleCPU::IprEvent::process() 951{ 952 cpu->completeDataAccess(pkt); 953} 954 955const char * 956TimingSimpleCPU::IprEvent::description() const 957{ 958 return "Timing Simple CPU Delay IPR event"; 959} 960 961 962void 963TimingSimpleCPU::printAddr(Addr a) 964{ 965 dcachePort.printAddr(a); 966} 967 968 969//////////////////////////////////////////////////////////////////////// 970// 971// TimingSimpleCPU Simulation Object 972// 973TimingSimpleCPU * 974TimingSimpleCPUParams::create() 975{ 976 return new TimingSimpleCPU(this); 977} 978