timing.cc revision 10653:e3fc6bc7f97e
1/* 2 * Copyright 2014 Google, Inc. 3 * Copyright (c) 2010-2013 ARM Limited 4 * All rights reserved 5 * 6 * The license below extends only to copyright in the software and shall 7 * not be construed as granting a license to any other intellectual 8 * property including but not limited to intellectual property relating 9 * to a hardware implementation of the functionality of the software 10 * licensed hereunder. You may use the software subject to the license 11 * terms below provided that you ensure that this notice is replicated 12 * unmodified and in its entirety in all distributions of the software, 13 * modified or unmodified, in source code or in binary form. 14 * 15 * Copyright (c) 2002-2005 The Regents of The University of Michigan 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Steve Reinhardt 42 */ 43 44#include "arch/locked_mem.hh" 45#include "arch/mmapped_ipr.hh" 46#include "arch/utility.hh" 47#include "base/bigint.hh" 48#include "config/the_isa.hh" 49#include "cpu/simple/timing.hh" 50#include "cpu/exetrace.hh" 51#include "debug/Config.hh" 52#include "debug/Drain.hh" 53#include "debug/ExecFaulting.hh" 54#include "debug/SimpleCPU.hh" 55#include "mem/packet.hh" 56#include "mem/packet_access.hh" 57#include "params/TimingSimpleCPU.hh" 58#include "sim/faults.hh" 59#include "sim/full_system.hh" 60#include "sim/system.hh" 61 62#include "debug/Mwait.hh" 63 64using namespace std; 65using namespace TheISA; 66 67void 68TimingSimpleCPU::init() 69{ 70 BaseCPU::init(); 71 72 // Initialise the ThreadContext's memory proxies 73 tcBase()->initMemProxies(tcBase()); 74 75 if (FullSystem && !params()->switched_out) { 76 for (int i = 0; i < threadContexts.size(); ++i) { 77 ThreadContext *tc = threadContexts[i]; 78 // initialize CPU, including PC 79 TheISA::initCPU(tc, _cpuId); 80 } 81 } 82} 83 84void 85TimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt, Tick t) 86{ 87 pkt = _pkt; 88 cpu->schedule(this, t); 89} 90 91TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p) 92 : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this), 93 dcachePort(this), ifetch_pkt(NULL), dcache_pkt(NULL), previousCycle(0), 94 fetchEvent(this), drainManager(NULL) 95{ 96 _status = Idle; 97 98 system->totalNumInsts = 0; 99} 100 101 102 103TimingSimpleCPU::~TimingSimpleCPU() 104{ 105} 106 107unsigned int 108TimingSimpleCPU::drain(DrainManager *drain_manager) 109{ 110 assert(!drainManager); 111 if (switchedOut()) 112 return 0; 113 114 if (_status == Idle || 115 (_status == BaseSimpleCPU::Running && isDrained())) { 116 DPRINTF(Drain, "No need to drain.\n"); 117 return 0; 118 } else { 119 drainManager = drain_manager; 120 DPRINTF(Drain, "Requesting drain: %s\n", pcState()); 121 122 // The fetch event can become descheduled if a drain didn't 123 // succeed on the first attempt. We need to reschedule it if 124 // the CPU is waiting for a microcode routine to complete. 125 if (_status == BaseSimpleCPU::Running && !fetchEvent.scheduled()) 126 schedule(fetchEvent, clockEdge()); 127 128 return 1; 129 } 130} 131 132void 133TimingSimpleCPU::drainResume() 134{ 135 assert(!fetchEvent.scheduled()); 136 assert(!drainManager); 137 if (switchedOut()) 138 return; 139 140 DPRINTF(SimpleCPU, "Resume\n"); 141 verifyMemoryMode(); 142 143 assert(!threadContexts.empty()); 144 if (threadContexts.size() > 1) 145 fatal("The timing CPU only supports one thread.\n"); 146 147 if (thread->status() == ThreadContext::Active) { 148 schedule(fetchEvent, nextCycle()); 149 _status = BaseSimpleCPU::Running; 150 notIdleFraction = 1; 151 } else { 152 _status = BaseSimpleCPU::Idle; 153 notIdleFraction = 0; 154 } 155} 156 157bool 158TimingSimpleCPU::tryCompleteDrain() 159{ 160 if (!drainManager) 161 return false; 162 163 DPRINTF(Drain, "tryCompleteDrain: %s\n", pcState()); 164 if (!isDrained()) 165 return false; 166 167 DPRINTF(Drain, "CPU done draining, processing drain event\n"); 168 drainManager->signalDrainDone(); 169 drainManager = NULL; 170 171 return true; 172} 173 174void 175TimingSimpleCPU::switchOut() 176{ 177 BaseSimpleCPU::switchOut(); 178 179 assert(!fetchEvent.scheduled()); 180 assert(_status == BaseSimpleCPU::Running || _status == Idle); 181 assert(!stayAtPC); 182 assert(microPC() == 0); 183 184 updateCycleCounts(); 185} 186 187 188void 189TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 190{ 191 BaseSimpleCPU::takeOverFrom(oldCPU); 192 193 previousCycle = curCycle(); 194} 195 196void 197TimingSimpleCPU::verifyMemoryMode() const 198{ 199 if (!system->isTimingMode()) { 200 fatal("The timing CPU requires the memory system to be in " 201 "'timing' mode.\n"); 202 } 203} 204 205void 206TimingSimpleCPU::activateContext(ThreadID thread_num) 207{ 208 DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num); 209 210 assert(thread_num == 0); 211 assert(thread); 212 213 assert(_status == Idle); 214 215 notIdleFraction = 1; 216 _status = BaseSimpleCPU::Running; 217 218 // kick things off by initiating the fetch of the next instruction 219 schedule(fetchEvent, clockEdge(Cycles(0))); 220} 221 222 223void 224TimingSimpleCPU::suspendContext(ThreadID thread_num) 225{ 226 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 227 228 assert(thread_num == 0); 229 assert(thread); 230 231 if (_status == Idle) 232 return; 233 234 assert(_status == BaseSimpleCPU::Running); 235 236 // just change status to Idle... if status != Running, 237 // completeInst() will not initiate fetch of next instruction. 238 239 notIdleFraction = 0; 240 _status = Idle; 241} 242 243bool 244TimingSimpleCPU::handleReadPacket(PacketPtr pkt) 245{ 246 RequestPtr req = pkt->req; 247 248 // We're about the issues a locked load, so tell the monitor 249 // to start caring about this address 250 if (pkt->isRead() && pkt->req->isLLSC()) { 251 TheISA::handleLockedRead(thread, pkt->req); 252 } 253 if (req->isMmappedIpr()) { 254 Cycles delay = TheISA::handleIprRead(thread->getTC(), pkt); 255 new IprEvent(pkt, this, clockEdge(delay)); 256 _status = DcacheWaitResponse; 257 dcache_pkt = NULL; 258 } else if (!dcachePort.sendTimingReq(pkt)) { 259 _status = DcacheRetry; 260 dcache_pkt = pkt; 261 } else { 262 _status = DcacheWaitResponse; 263 // memory system takes ownership of packet 264 dcache_pkt = NULL; 265 } 266 return dcache_pkt == NULL; 267} 268 269void 270TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res, 271 bool read) 272{ 273 PacketPtr pkt = buildPacket(req, read); 274 pkt->dataDynamic<uint8_t>(data); 275 if (req->getFlags().isSet(Request::NO_ACCESS)) { 276 assert(!dcache_pkt); 277 pkt->makeResponse(); 278 completeDataAccess(pkt); 279 } else if (read) { 280 handleReadPacket(pkt); 281 } else { 282 bool do_access = true; // flag to suppress cache access 283 284 if (req->isLLSC()) { 285 do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask); 286 } else if (req->isCondSwap()) { 287 assert(res); 288 req->setExtraData(*res); 289 } 290 291 if (do_access) { 292 dcache_pkt = pkt; 293 handleWritePacket(); 294 } else { 295 _status = DcacheWaitResponse; 296 completeDataAccess(pkt); 297 } 298 } 299} 300 301void 302TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2, 303 RequestPtr req, uint8_t *data, bool read) 304{ 305 PacketPtr pkt1, pkt2; 306 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read); 307 if (req->getFlags().isSet(Request::NO_ACCESS)) { 308 assert(!dcache_pkt); 309 pkt1->makeResponse(); 310 completeDataAccess(pkt1); 311 } else if (read) { 312 SplitFragmentSenderState * send_state = 313 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState); 314 if (handleReadPacket(pkt1)) { 315 send_state->clearFromParent(); 316 send_state = dynamic_cast<SplitFragmentSenderState *>( 317 pkt2->senderState); 318 if (handleReadPacket(pkt2)) { 319 send_state->clearFromParent(); 320 } 321 } 322 } else { 323 dcache_pkt = pkt1; 324 SplitFragmentSenderState * send_state = 325 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState); 326 if (handleWritePacket()) { 327 send_state->clearFromParent(); 328 dcache_pkt = pkt2; 329 send_state = dynamic_cast<SplitFragmentSenderState *>( 330 pkt2->senderState); 331 if (handleWritePacket()) { 332 send_state->clearFromParent(); 333 } 334 } 335 } 336} 337 338void 339TimingSimpleCPU::translationFault(const Fault &fault) 340{ 341 // fault may be NoFault in cases where a fault is suppressed, 342 // for instance prefetches. 343 updateCycleCounts(); 344 345 if (traceData) { 346 // Since there was a fault, we shouldn't trace this instruction. 347 delete traceData; 348 traceData = NULL; 349 } 350 351 postExecute(); 352 353 advanceInst(fault); 354} 355 356PacketPtr 357TimingSimpleCPU::buildPacket(RequestPtr req, bool read) 358{ 359 return read ? Packet::createRead(req) : Packet::createWrite(req); 360} 361 362void 363TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2, 364 RequestPtr req1, RequestPtr req2, RequestPtr req, 365 uint8_t *data, bool read) 366{ 367 pkt1 = pkt2 = NULL; 368 369 assert(!req1->isMmappedIpr() && !req2->isMmappedIpr()); 370 371 if (req->getFlags().isSet(Request::NO_ACCESS)) { 372 pkt1 = buildPacket(req, read); 373 return; 374 } 375 376 pkt1 = buildPacket(req1, read); 377 pkt2 = buildPacket(req2, read); 378 379 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand()); 380 381 pkt->dataDynamic<uint8_t>(data); 382 pkt1->dataStatic<uint8_t>(data); 383 pkt2->dataStatic<uint8_t>(data + req1->getSize()); 384 385 SplitMainSenderState * main_send_state = new SplitMainSenderState; 386 pkt->senderState = main_send_state; 387 main_send_state->fragments[0] = pkt1; 388 main_send_state->fragments[1] = pkt2; 389 main_send_state->outstanding = 2; 390 pkt1->senderState = new SplitFragmentSenderState(pkt, 0); 391 pkt2->senderState = new SplitFragmentSenderState(pkt, 1); 392} 393 394Fault 395TimingSimpleCPU::readMem(Addr addr, uint8_t *data, 396 unsigned size, unsigned flags) 397{ 398 Fault fault; 399 const int asid = 0; 400 const ThreadID tid = 0; 401 const Addr pc = thread->instAddr(); 402 unsigned block_size = cacheLineSize(); 403 BaseTLB::Mode mode = BaseTLB::Read; 404 405 if (traceData) { 406 traceData->setAddr(addr); 407 } 408 409 RequestPtr req = new Request(asid, addr, size, 410 flags, dataMasterId(), pc, _cpuId, tid); 411 412 req->taskId(taskId()); 413 414 Addr split_addr = roundDown(addr + size - 1, block_size); 415 assert(split_addr <= addr || split_addr - addr < block_size); 416 417 _status = DTBWaitResponse; 418 if (split_addr > addr) { 419 RequestPtr req1, req2; 420 assert(!req->isLLSC() && !req->isSwap()); 421 req->splitOnVaddr(split_addr, req1, req2); 422 423 WholeTranslationState *state = 424 new WholeTranslationState(req, req1, req2, new uint8_t[size], 425 NULL, mode); 426 DataTranslation<TimingSimpleCPU *> *trans1 = 427 new DataTranslation<TimingSimpleCPU *>(this, state, 0); 428 DataTranslation<TimingSimpleCPU *> *trans2 = 429 new DataTranslation<TimingSimpleCPU *>(this, state, 1); 430 431 thread->dtb->translateTiming(req1, tc, trans1, mode); 432 thread->dtb->translateTiming(req2, tc, trans2, mode); 433 } else { 434 WholeTranslationState *state = 435 new WholeTranslationState(req, new uint8_t[size], NULL, mode); 436 DataTranslation<TimingSimpleCPU *> *translation 437 = new DataTranslation<TimingSimpleCPU *>(this, state); 438 thread->dtb->translateTiming(req, tc, translation, mode); 439 } 440 441 return NoFault; 442} 443 444bool 445TimingSimpleCPU::handleWritePacket() 446{ 447 RequestPtr req = dcache_pkt->req; 448 if (req->isMmappedIpr()) { 449 Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt); 450 new IprEvent(dcache_pkt, this, clockEdge(delay)); 451 _status = DcacheWaitResponse; 452 dcache_pkt = NULL; 453 } else if (!dcachePort.sendTimingReq(dcache_pkt)) { 454 _status = DcacheRetry; 455 } else { 456 _status = DcacheWaitResponse; 457 // memory system takes ownership of packet 458 dcache_pkt = NULL; 459 } 460 return dcache_pkt == NULL; 461} 462 463Fault 464TimingSimpleCPU::writeMem(uint8_t *data, unsigned size, 465 Addr addr, unsigned flags, uint64_t *res) 466{ 467 uint8_t *newData = new uint8_t[size]; 468 const int asid = 0; 469 const ThreadID tid = 0; 470 const Addr pc = thread->instAddr(); 471 unsigned block_size = cacheLineSize(); 472 BaseTLB::Mode mode = BaseTLB::Write; 473 474 if (data == NULL) { 475 assert(flags & Request::CACHE_BLOCK_ZERO); 476 // This must be a cache block cleaning request 477 memset(newData, 0, size); 478 } else { 479 memcpy(newData, data, size); 480 } 481 482 if (traceData) { 483 traceData->setAddr(addr); 484 } 485 486 RequestPtr req = new Request(asid, addr, size, 487 flags, dataMasterId(), pc, _cpuId, tid); 488 489 req->taskId(taskId()); 490 491 Addr split_addr = roundDown(addr + size - 1, block_size); 492 assert(split_addr <= addr || split_addr - addr < block_size); 493 494 _status = DTBWaitResponse; 495 if (split_addr > addr) { 496 RequestPtr req1, req2; 497 assert(!req->isLLSC() && !req->isSwap()); 498 req->splitOnVaddr(split_addr, req1, req2); 499 500 WholeTranslationState *state = 501 new WholeTranslationState(req, req1, req2, newData, res, mode); 502 DataTranslation<TimingSimpleCPU *> *trans1 = 503 new DataTranslation<TimingSimpleCPU *>(this, state, 0); 504 DataTranslation<TimingSimpleCPU *> *trans2 = 505 new DataTranslation<TimingSimpleCPU *>(this, state, 1); 506 507 thread->dtb->translateTiming(req1, tc, trans1, mode); 508 thread->dtb->translateTiming(req2, tc, trans2, mode); 509 } else { 510 WholeTranslationState *state = 511 new WholeTranslationState(req, newData, res, mode); 512 DataTranslation<TimingSimpleCPU *> *translation = 513 new DataTranslation<TimingSimpleCPU *>(this, state); 514 thread->dtb->translateTiming(req, tc, translation, mode); 515 } 516 517 // Translation faults will be returned via finishTranslation() 518 return NoFault; 519} 520 521 522void 523TimingSimpleCPU::finishTranslation(WholeTranslationState *state) 524{ 525 _status = BaseSimpleCPU::Running; 526 527 if (state->getFault() != NoFault) { 528 if (state->isPrefetch()) { 529 state->setNoFault(); 530 } 531 delete [] state->data; 532 state->deleteReqs(); 533 translationFault(state->getFault()); 534 } else { 535 if (!state->isSplit) { 536 sendData(state->mainReq, state->data, state->res, 537 state->mode == BaseTLB::Read); 538 } else { 539 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq, 540 state->data, state->mode == BaseTLB::Read); 541 } 542 } 543 544 delete state; 545} 546 547 548void 549TimingSimpleCPU::fetch() 550{ 551 DPRINTF(SimpleCPU, "Fetch\n"); 552 553 if (!curStaticInst || !curStaticInst->isDelayedCommit()) { 554 checkForInterrupts(); 555 checkPcEventQueue(); 556 } 557 558 // We must have just got suspended by a PC event 559 if (_status == Idle) 560 return; 561 562 TheISA::PCState pcState = thread->pcState(); 563 bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst; 564 565 if (needToFetch) { 566 _status = BaseSimpleCPU::Running; 567 Request *ifetch_req = new Request(); 568 ifetch_req->taskId(taskId()); 569 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0); 570 setupFetchRequest(ifetch_req); 571 DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr()); 572 thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation, 573 BaseTLB::Execute); 574 } else { 575 _status = IcacheWaitResponse; 576 completeIfetch(NULL); 577 578 updateCycleCounts(); 579 } 580} 581 582 583void 584TimingSimpleCPU::sendFetch(const Fault &fault, RequestPtr req, 585 ThreadContext *tc) 586{ 587 if (fault == NoFault) { 588 DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n", 589 req->getVaddr(), req->getPaddr()); 590 ifetch_pkt = new Packet(req, MemCmd::ReadReq); 591 ifetch_pkt->dataStatic(&inst); 592 DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr()); 593 594 if (!icachePort.sendTimingReq(ifetch_pkt)) { 595 // Need to wait for retry 596 _status = IcacheRetry; 597 } else { 598 // Need to wait for cache to respond 599 _status = IcacheWaitResponse; 600 // ownership of packet transferred to memory system 601 ifetch_pkt = NULL; 602 } 603 } else { 604 DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr()); 605 delete req; 606 // fetch fault: advance directly to next instruction (fault handler) 607 _status = BaseSimpleCPU::Running; 608 advanceInst(fault); 609 } 610 611 updateCycleCounts(); 612} 613 614 615void 616TimingSimpleCPU::advanceInst(const Fault &fault) 617{ 618 if (_status == Faulting) 619 return; 620 621 if (fault != NoFault) { 622 advancePC(fault); 623 DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n"); 624 reschedule(fetchEvent, clockEdge(), true); 625 _status = Faulting; 626 return; 627 } 628 629 630 if (!stayAtPC) 631 advancePC(fault); 632 633 if (tryCompleteDrain()) 634 return; 635 636 if (_status == BaseSimpleCPU::Running) { 637 // kick off fetch of next instruction... callback from icache 638 // response will cause that instruction to be executed, 639 // keeping the CPU running. 640 fetch(); 641 } 642} 643 644 645void 646TimingSimpleCPU::completeIfetch(PacketPtr pkt) 647{ 648 DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ? 649 pkt->getAddr() : 0); 650 651 // received a response from the icache: execute the received 652 // instruction 653 assert(!pkt || !pkt->isError()); 654 assert(_status == IcacheWaitResponse); 655 656 _status = BaseSimpleCPU::Running; 657 658 updateCycleCounts(); 659 660 if (pkt) 661 pkt->req->setAccessLatency(); 662 663 664 preExecute(); 665 if (curStaticInst && curStaticInst->isMemRef()) { 666 // load or store: just send to dcache 667 Fault fault = curStaticInst->initiateAcc(this, traceData); 668 669 // If we're not running now the instruction will complete in a dcache 670 // response callback or the instruction faulted and has started an 671 // ifetch 672 if (_status == BaseSimpleCPU::Running) { 673 if (fault != NoFault && traceData) { 674 // If there was a fault, we shouldn't trace this instruction. 675 delete traceData; 676 traceData = NULL; 677 } 678 679 postExecute(); 680 // @todo remove me after debugging with legion done 681 if (curStaticInst && (!curStaticInst->isMicroop() || 682 curStaticInst->isFirstMicroop())) 683 instCnt++; 684 advanceInst(fault); 685 } 686 } else if (curStaticInst) { 687 // non-memory instruction: execute completely now 688 Fault fault = curStaticInst->execute(this, traceData); 689 690 // keep an instruction count 691 if (fault == NoFault) 692 countInst(); 693 else if (traceData && !DTRACE(ExecFaulting)) { 694 delete traceData; 695 traceData = NULL; 696 } 697 698 postExecute(); 699 // @todo remove me after debugging with legion done 700 if (curStaticInst && (!curStaticInst->isMicroop() || 701 curStaticInst->isFirstMicroop())) 702 instCnt++; 703 advanceInst(fault); 704 } else { 705 advanceInst(NoFault); 706 } 707 708 if (pkt) { 709 delete pkt->req; 710 delete pkt; 711 } 712} 713 714void 715TimingSimpleCPU::IcachePort::ITickEvent::process() 716{ 717 cpu->completeIfetch(pkt); 718} 719 720bool 721TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt) 722{ 723 DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr()); 724 // delay processing of returned data until next CPU clock edge 725 Tick next_tick = cpu->clockEdge(); 726 727 if (next_tick == curTick()) 728 cpu->completeIfetch(pkt); 729 else 730 tickEvent.schedule(pkt, next_tick); 731 732 return true; 733} 734 735void 736TimingSimpleCPU::IcachePort::recvRetry() 737{ 738 // we shouldn't get a retry unless we have a packet that we're 739 // waiting to transmit 740 assert(cpu->ifetch_pkt != NULL); 741 assert(cpu->_status == IcacheRetry); 742 PacketPtr tmp = cpu->ifetch_pkt; 743 if (sendTimingReq(tmp)) { 744 cpu->_status = IcacheWaitResponse; 745 cpu->ifetch_pkt = NULL; 746 } 747} 748 749void 750TimingSimpleCPU::completeDataAccess(PacketPtr pkt) 751{ 752 // received a response from the dcache: complete the load or store 753 // instruction 754 assert(!pkt->isError()); 755 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse || 756 pkt->req->getFlags().isSet(Request::NO_ACCESS)); 757 758 pkt->req->setAccessLatency(); 759 760 updateCycleCounts(); 761 762 if (pkt->senderState) { 763 SplitFragmentSenderState * send_state = 764 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState); 765 assert(send_state); 766 delete pkt->req; 767 delete pkt; 768 PacketPtr big_pkt = send_state->bigPkt; 769 delete send_state; 770 771 SplitMainSenderState * main_send_state = 772 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState); 773 assert(main_send_state); 774 // Record the fact that this packet is no longer outstanding. 775 assert(main_send_state->outstanding != 0); 776 main_send_state->outstanding--; 777 778 if (main_send_state->outstanding) { 779 return; 780 } else { 781 delete main_send_state; 782 big_pkt->senderState = NULL; 783 pkt = big_pkt; 784 } 785 } 786 787 _status = BaseSimpleCPU::Running; 788 789 Fault fault = curStaticInst->completeAcc(pkt, this, traceData); 790 791 // keep an instruction count 792 if (fault == NoFault) 793 countInst(); 794 else if (traceData) { 795 // If there was a fault, we shouldn't trace this instruction. 796 delete traceData; 797 traceData = NULL; 798 } 799 800 delete pkt->req; 801 delete pkt; 802 803 postExecute(); 804 805 advanceInst(fault); 806} 807 808void 809TimingSimpleCPU::updateCycleCounts() 810{ 811 const Cycles delta(curCycle() - previousCycle); 812 813 numCycles += delta; 814 ppCycles->notify(delta); 815 816 previousCycle = curCycle(); 817} 818 819void 820TimingSimpleCPU::DcachePort::recvTimingSnoopReq(PacketPtr pkt) 821{ 822 // X86 ISA: Snooping an invalidation for monitor/mwait 823 if(cpu->getAddrMonitor()->doMonitor(pkt)) { 824 cpu->wakeup(); 825 } 826 TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask); 827} 828 829void 830TimingSimpleCPU::DcachePort::recvFunctionalSnoop(PacketPtr pkt) 831{ 832 // X86 ISA: Snooping an invalidation for monitor/mwait 833 if(cpu->getAddrMonitor()->doMonitor(pkt)) { 834 cpu->wakeup(); 835 } 836} 837 838bool 839TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt) 840{ 841 // delay processing of returned data until next CPU clock edge 842 Tick next_tick = cpu->clockEdge(); 843 844 if (next_tick == curTick()) { 845 cpu->completeDataAccess(pkt); 846 } else { 847 if (!tickEvent.scheduled()) { 848 tickEvent.schedule(pkt, next_tick); 849 } else { 850 // In the case of a split transaction and a cache that is 851 // faster than a CPU we could get two responses before 852 // next_tick expires 853 if (!retryEvent.scheduled()) 854 cpu->schedule(retryEvent, next_tick); 855 return false; 856 } 857 } 858 859 return true; 860} 861 862void 863TimingSimpleCPU::DcachePort::DTickEvent::process() 864{ 865 cpu->completeDataAccess(pkt); 866} 867 868void 869TimingSimpleCPU::DcachePort::recvRetry() 870{ 871 // we shouldn't get a retry unless we have a packet that we're 872 // waiting to transmit 873 assert(cpu->dcache_pkt != NULL); 874 assert(cpu->_status == DcacheRetry); 875 PacketPtr tmp = cpu->dcache_pkt; 876 if (tmp->senderState) { 877 // This is a packet from a split access. 878 SplitFragmentSenderState * send_state = 879 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState); 880 assert(send_state); 881 PacketPtr big_pkt = send_state->bigPkt; 882 883 SplitMainSenderState * main_send_state = 884 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState); 885 assert(main_send_state); 886 887 if (sendTimingReq(tmp)) { 888 // If we were able to send without retrying, record that fact 889 // and try sending the other fragment. 890 send_state->clearFromParent(); 891 int other_index = main_send_state->getPendingFragment(); 892 if (other_index > 0) { 893 tmp = main_send_state->fragments[other_index]; 894 cpu->dcache_pkt = tmp; 895 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) || 896 (big_pkt->isWrite() && cpu->handleWritePacket())) { 897 main_send_state->fragments[other_index] = NULL; 898 } 899 } else { 900 cpu->_status = DcacheWaitResponse; 901 // memory system takes ownership of packet 902 cpu->dcache_pkt = NULL; 903 } 904 } 905 } else if (sendTimingReq(tmp)) { 906 cpu->_status = DcacheWaitResponse; 907 // memory system takes ownership of packet 908 cpu->dcache_pkt = NULL; 909 } 910} 911 912TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu, 913 Tick t) 914 : pkt(_pkt), cpu(_cpu) 915{ 916 cpu->schedule(this, t); 917} 918 919void 920TimingSimpleCPU::IprEvent::process() 921{ 922 cpu->completeDataAccess(pkt); 923} 924 925const char * 926TimingSimpleCPU::IprEvent::description() const 927{ 928 return "Timing Simple CPU Delay IPR event"; 929} 930 931 932void 933TimingSimpleCPU::printAddr(Addr a) 934{ 935 dcachePort.printAddr(a); 936} 937 938 939//////////////////////////////////////////////////////////////////////// 940// 941// TimingSimpleCPU Simulation Object 942// 943TimingSimpleCPU * 944TimingSimpleCPUParams::create() 945{ 946 numThreads = 1; 947 if (!FullSystem && workload.size() != 1) 948 panic("only one workload allowed"); 949 return new TimingSimpleCPU(this); 950} 951