atomic.cc revision 14085
1/* 2 * Copyright 2014 Google, Inc. 3 * Copyright (c) 2012-2013,2015,2017-2018 ARM Limited 4 * All rights reserved. 5 * 6 * The license below extends only to copyright in the software and shall 7 * not be construed as granting a license to any other intellectual 8 * property including but not limited to intellectual property relating 9 * to a hardware implementation of the functionality of the software 10 * licensed hereunder. You may use the software subject to the license 11 * terms below provided that you ensure that this notice is replicated 12 * unmodified and in its entirety in all distributions of the software, 13 * modified or unmodified, in source code or in binary form. 14 * 15 * Copyright (c) 2002-2005 The Regents of The University of Michigan 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Steve Reinhardt 42 */ 43 44#include "cpu/simple/atomic.hh" 45 46#include "arch/locked_mem.hh" 47#include "arch/mmapped_ipr.hh" 48#include "arch/utility.hh" 49#include "base/output.hh" 50#include "config/the_isa.hh" 51#include "cpu/exetrace.hh" 52#include "cpu/utils.hh" 53#include "debug/Drain.hh" 54#include "debug/ExecFaulting.hh" 55#include "debug/SimpleCPU.hh" 56#include "mem/packet.hh" 57#include "mem/packet_access.hh" 58#include "mem/physical.hh" 59#include "params/AtomicSimpleCPU.hh" 60#include "sim/faults.hh" 61#include "sim/full_system.hh" 62#include "sim/system.hh" 63 64using namespace std; 65using namespace TheISA; 66 67void 68AtomicSimpleCPU::init() 69{ 70 BaseSimpleCPU::init(); 71 72 int cid = threadContexts[0]->contextId(); 73 ifetch_req->setContext(cid); 74 data_read_req->setContext(cid); 75 data_write_req->setContext(cid); 76 data_amo_req->setContext(cid); 77} 78 79AtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 80 : BaseSimpleCPU(p), 81 tickEvent([this]{ tick(); }, "AtomicSimpleCPU tick", 82 false, Event::CPU_Tick_Pri), 83 width(p->width), locked(false), 84 simulate_data_stalls(p->simulate_data_stalls), 85 simulate_inst_stalls(p->simulate_inst_stalls), 86 icachePort(name() + ".icache_port", this), 87 dcachePort(name() + ".dcache_port", this), 88 dcache_access(false), dcache_latency(0), 89 ppCommit(nullptr) 90{ 91 _status = Idle; 92 ifetch_req = std::make_shared<Request>(); 93 data_read_req = std::make_shared<Request>(); 94 data_write_req = std::make_shared<Request>(); 95 data_amo_req = std::make_shared<Request>(); 96} 97 98 99AtomicSimpleCPU::~AtomicSimpleCPU() 100{ 101 if (tickEvent.scheduled()) { 102 deschedule(tickEvent); 103 } 104} 105 106DrainState 107AtomicSimpleCPU::drain() 108{ 109 // Deschedule any power gating event (if any) 110 deschedulePowerGatingEvent(); 111 112 if (switchedOut()) 113 return DrainState::Drained; 114 115 if (!isCpuDrained()) { 116 DPRINTF(Drain, "Requesting drain.\n"); 117 return DrainState::Draining; 118 } else { 119 if (tickEvent.scheduled()) 120 deschedule(tickEvent); 121 122 activeThreads.clear(); 123 DPRINTF(Drain, "Not executing microcode, no need to drain.\n"); 124 return DrainState::Drained; 125 } 126} 127 128void 129AtomicSimpleCPU::threadSnoop(PacketPtr pkt, ThreadID sender) 130{ 131 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 132 pkt->cmdString()); 133 134 for (ThreadID tid = 0; tid < numThreads; tid++) { 135 if (tid != sender) { 136 if (getCpuAddrMonitor(tid)->doMonitor(pkt)) { 137 wakeup(tid); 138 } 139 140 TheISA::handleLockedSnoop(threadInfo[tid]->thread, 141 pkt, dcachePort.cacheBlockMask); 142 } 143 } 144} 145 146void 147AtomicSimpleCPU::drainResume() 148{ 149 assert(!tickEvent.scheduled()); 150 if (switchedOut()) 151 return; 152 153 DPRINTF(SimpleCPU, "Resume\n"); 154 verifyMemoryMode(); 155 156 assert(!threadContexts.empty()); 157 158 _status = BaseSimpleCPU::Idle; 159 160 for (ThreadID tid = 0; tid < numThreads; tid++) { 161 if (threadInfo[tid]->thread->status() == ThreadContext::Active) { 162 threadInfo[tid]->notIdleFraction = 1; 163 activeThreads.push_back(tid); 164 _status = BaseSimpleCPU::Running; 165 166 // Tick if any threads active 167 if (!tickEvent.scheduled()) { 168 schedule(tickEvent, nextCycle()); 169 } 170 } else { 171 threadInfo[tid]->notIdleFraction = 0; 172 } 173 } 174 175 // Reschedule any power gating event (if any) 176 schedulePowerGatingEvent(); 177} 178 179bool 180AtomicSimpleCPU::tryCompleteDrain() 181{ 182 if (drainState() != DrainState::Draining) 183 return false; 184 185 DPRINTF(Drain, "tryCompleteDrain.\n"); 186 if (!isCpuDrained()) 187 return false; 188 189 DPRINTF(Drain, "CPU done draining, processing drain event\n"); 190 signalDrainDone(); 191 192 return true; 193} 194 195 196void 197AtomicSimpleCPU::switchOut() 198{ 199 BaseSimpleCPU::switchOut(); 200 201 assert(!tickEvent.scheduled()); 202 assert(_status == BaseSimpleCPU::Running || _status == Idle); 203 assert(isCpuDrained()); 204} 205 206 207void 208AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 209{ 210 BaseSimpleCPU::takeOverFrom(oldCPU); 211 212 // The tick event should have been descheduled by drain() 213 assert(!tickEvent.scheduled()); 214} 215 216void 217AtomicSimpleCPU::verifyMemoryMode() const 218{ 219 if (!system->isAtomicMode()) { 220 fatal("The atomic CPU requires the memory system to be in " 221 "'atomic' mode.\n"); 222 } 223} 224 225void 226AtomicSimpleCPU::activateContext(ThreadID thread_num) 227{ 228 DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num); 229 230 assert(thread_num < numThreads); 231 232 threadInfo[thread_num]->notIdleFraction = 1; 233 Cycles delta = ticksToCycles(threadInfo[thread_num]->thread->lastActivate - 234 threadInfo[thread_num]->thread->lastSuspend); 235 numCycles += delta; 236 237 if (!tickEvent.scheduled()) { 238 //Make sure ticks are still on multiples of cycles 239 schedule(tickEvent, clockEdge(Cycles(0))); 240 } 241 _status = BaseSimpleCPU::Running; 242 if (std::find(activeThreads.begin(), activeThreads.end(), thread_num) 243 == activeThreads.end()) { 244 activeThreads.push_back(thread_num); 245 } 246 247 BaseCPU::activateContext(thread_num); 248} 249 250 251void 252AtomicSimpleCPU::suspendContext(ThreadID thread_num) 253{ 254 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 255 256 assert(thread_num < numThreads); 257 activeThreads.remove(thread_num); 258 259 if (_status == Idle) 260 return; 261 262 assert(_status == BaseSimpleCPU::Running); 263 264 threadInfo[thread_num]->notIdleFraction = 0; 265 266 if (activeThreads.empty()) { 267 _status = Idle; 268 269 if (tickEvent.scheduled()) { 270 deschedule(tickEvent); 271 } 272 } 273 274 BaseCPU::suspendContext(thread_num); 275} 276 277Tick 278AtomicSimpleCPU::sendPacket(MasterPort &port, const PacketPtr &pkt) 279{ 280 return port.sendAtomic(pkt); 281} 282 283Tick 284AtomicSimpleCPU::AtomicCPUDPort::recvAtomicSnoop(PacketPtr pkt) 285{ 286 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 287 pkt->cmdString()); 288 289 // X86 ISA: Snooping an invalidation for monitor/mwait 290 AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner); 291 292 for (ThreadID tid = 0; tid < cpu->numThreads; tid++) { 293 if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) { 294 cpu->wakeup(tid); 295 } 296 } 297 298 // if snoop invalidates, release any associated locks 299 // When run without caches, Invalidation packets will not be received 300 // hence we must check if the incoming packets are writes and wakeup 301 // the processor accordingly 302 if (pkt->isInvalidate() || pkt->isWrite()) { 303 DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 304 pkt->getAddr()); 305 for (auto &t_info : cpu->threadInfo) { 306 TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask); 307 } 308 } 309 310 return 0; 311} 312 313void 314AtomicSimpleCPU::AtomicCPUDPort::recvFunctionalSnoop(PacketPtr pkt) 315{ 316 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 317 pkt->cmdString()); 318 319 // X86 ISA: Snooping an invalidation for monitor/mwait 320 AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner); 321 for (ThreadID tid = 0; tid < cpu->numThreads; tid++) { 322 if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) { 323 cpu->wakeup(tid); 324 } 325 } 326 327 // if snoop invalidates, release any associated locks 328 if (pkt->isInvalidate()) { 329 DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 330 pkt->getAddr()); 331 for (auto &t_info : cpu->threadInfo) { 332 TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask); 333 } 334 } 335} 336 337bool 338AtomicSimpleCPU::genMemFragmentRequest(const RequestPtr& req, Addr frag_addr, 339 int size, Request::Flags flags, 340 const std::vector<bool>& byte_enable, 341 int& frag_size, int& size_left) const 342{ 343 bool predicate = true; 344 Addr inst_addr = threadInfo[curThread]->thread->pcState().instAddr(); 345 346 frag_size = std::min( 347 cacheLineSize() - addrBlockOffset(frag_addr, cacheLineSize()), 348 (Addr) size_left); 349 size_left -= frag_size; 350 351 if (!byte_enable.empty()) { 352 // Set up byte-enable mask for the current fragment 353 auto it_start = byte_enable.begin() + (size - (frag_size + size_left)); 354 auto it_end = byte_enable.begin() + (size - size_left); 355 if (isAnyActiveElement(it_start, it_end)) { 356 req->setVirt(0, frag_addr, frag_size, flags, dataMasterId(), 357 inst_addr); 358 req->setByteEnable(std::vector<bool>(it_start, it_end)); 359 } else { 360 predicate = false; 361 } 362 } else { 363 req->setVirt(0, frag_addr, frag_size, flags, dataMasterId(), 364 inst_addr); 365 } 366 367 return predicate; 368} 369 370Fault 371AtomicSimpleCPU::readMem(Addr addr, uint8_t * data, unsigned size, 372 Request::Flags flags, 373 const std::vector<bool>& byteEnable) 374{ 375 SimpleExecContext& t_info = *threadInfo[curThread]; 376 SimpleThread* thread = t_info.thread; 377 378 // use the CPU's statically allocated read request and packet objects 379 const RequestPtr &req = data_read_req; 380 381 if (traceData) 382 traceData->setMem(addr, size, flags); 383 384 dcache_latency = 0; 385 386 req->taskId(taskId()); 387 388 Addr frag_addr = addr; 389 int frag_size = 0; 390 int size_left = size; 391 bool predicate; 392 Fault fault = NoFault; 393 394 while (1) { 395 predicate = genMemFragmentRequest(req, frag_addr, size, flags, 396 byteEnable, frag_size, size_left); 397 398 // translate to physical address 399 if (predicate) { 400 fault = thread->dtb->translateAtomic(req, thread->getTC(), 401 BaseTLB::Read); 402 } 403 404 // Now do the access. 405 if (predicate && fault == NoFault && 406 !req->getFlags().isSet(Request::NO_ACCESS)) { 407 Packet pkt(req, Packet::makeReadCmd(req)); 408 pkt.dataStatic(data); 409 410 if (req->isMmappedIpr()) { 411 dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 412 } else { 413 dcache_latency += sendPacket(dcachePort, &pkt); 414 } 415 dcache_access = true; 416 417 assert(!pkt.isError()); 418 419 if (req->isLLSC()) { 420 TheISA::handleLockedRead(thread, req); 421 } 422 } 423 424 //If there's a fault, return it 425 if (fault != NoFault) { 426 if (req->isPrefetch()) { 427 return NoFault; 428 } else { 429 return fault; 430 } 431 } 432 433 // If we don't need to access further cache lines, stop now. 434 if (size_left == 0) { 435 if (req->isLockedRMW() && fault == NoFault) { 436 assert(!locked); 437 locked = true; 438 } 439 return fault; 440 } 441 442 /* 443 * Set up for accessing the next cache line. 444 */ 445 frag_addr += frag_size; 446 447 //Move the pointer we're reading into to the correct location. 448 data += frag_size; 449 } 450} 451 452Fault 453AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, Addr addr, 454 Request::Flags flags, uint64_t *res, 455 const std::vector<bool>& byteEnable) 456{ 457 SimpleExecContext& t_info = *threadInfo[curThread]; 458 SimpleThread* thread = t_info.thread; 459 static uint8_t zero_array[64] = {}; 460 461 if (data == NULL) { 462 assert(size <= 64); 463 assert(flags & Request::STORE_NO_DATA); 464 // This must be a cache block cleaning request 465 data = zero_array; 466 } 467 468 // use the CPU's statically allocated write request and packet objects 469 const RequestPtr &req = data_write_req; 470 471 if (traceData) 472 traceData->setMem(addr, size, flags); 473 474 dcache_latency = 0; 475 476 req->taskId(taskId()); 477 478 Addr frag_addr = addr; 479 int frag_size = 0; 480 int size_left = size; 481 int curr_frag_id = 0; 482 bool predicate; 483 Fault fault = NoFault; 484 485 while (1) { 486 predicate = genMemFragmentRequest(req, frag_addr, size, flags, 487 byteEnable, frag_size, size_left); 488 489 // translate to physical address 490 if (predicate) 491 fault = thread->dtb->translateAtomic(req, thread->getTC(), 492 BaseTLB::Write); 493 494 // Now do the access. 495 if (predicate && fault == NoFault) { 496 bool do_access = true; // flag to suppress cache access 497 498 if (req->isLLSC()) { 499 assert(curr_frag_id == 0); 500 do_access = 501 TheISA::handleLockedWrite(thread, req, 502 dcachePort.cacheBlockMask); 503 } else if (req->isSwap()) { 504 assert(curr_frag_id == 0); 505 if (req->isCondSwap()) { 506 assert(res); 507 req->setExtraData(*res); 508 } 509 } 510 511 if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) { 512 Packet pkt(req, Packet::makeWriteCmd(req)); 513 pkt.dataStatic(data); 514 515 if (req->isMmappedIpr()) { 516 dcache_latency += 517 TheISA::handleIprWrite(thread->getTC(), &pkt); 518 } else { 519 dcache_latency += sendPacket(dcachePort, &pkt); 520 521 // Notify other threads on this CPU of write 522 threadSnoop(&pkt, curThread); 523 } 524 dcache_access = true; 525 assert(!pkt.isError()); 526 527 if (req->isSwap()) { 528 assert(res && curr_frag_id == 0); 529 memcpy(res, pkt.getConstPtr<uint8_t>(), size); 530 } 531 } 532 533 if (res && !req->isSwap()) { 534 *res = req->getExtraData(); 535 } 536 } 537 538 //If there's a fault or we don't need to access a second cache line, 539 //stop now. 540 if (fault != NoFault || size_left == 0) 541 { 542 if (req->isLockedRMW() && fault == NoFault) { 543 assert(byteEnable.empty()); 544 locked = false; 545 } 546 547 if (fault != NoFault && req->isPrefetch()) { 548 return NoFault; 549 } else { 550 return fault; 551 } 552 } 553 554 /* 555 * Set up for accessing the next cache line. 556 */ 557 frag_addr += frag_size; 558 559 //Move the pointer we're reading into to the correct location. 560 data += frag_size; 561 562 curr_frag_id++; 563 } 564} 565 566Fault 567AtomicSimpleCPU::amoMem(Addr addr, uint8_t* data, unsigned size, 568 Request::Flags flags, AtomicOpFunctor *amo_op) 569{ 570 SimpleExecContext& t_info = *threadInfo[curThread]; 571 SimpleThread* thread = t_info.thread; 572 573 // use the CPU's statically allocated amo request and packet objects 574 const RequestPtr &req = data_amo_req; 575 576 if (traceData) 577 traceData->setMem(addr, size, flags); 578 579 //The address of the second part of this access if it needs to be split 580 //across a cache line boundary. 581 Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 582 583 // AMO requests that access across a cache line boundary are not 584 // allowed since the cache does not guarantee AMO ops to be executed 585 // atomically in two cache lines 586 // For ISAs such as x86 that requires AMO operations to work on 587 // accesses that cross cache-line boundaries, the cache needs to be 588 // modified to support locking both cache lines to guarantee the 589 // atomicity. 590 if (secondAddr > addr) { 591 panic("AMO request should not access across a cache line boundary\n"); 592 } 593 594 dcache_latency = 0; 595 596 req->taskId(taskId()); 597 req->setVirt(0, addr, size, flags, dataMasterId(), 598 thread->pcState().instAddr(), amo_op); 599 600 // translate to physical address 601 Fault fault = thread->dtb->translateAtomic(req, thread->getTC(), 602 BaseTLB::Write); 603 604 // Now do the access. 605 if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) { 606 // We treat AMO accesses as Write accesses with SwapReq command 607 // data will hold the return data of the AMO access 608 Packet pkt(req, Packet::makeWriteCmd(req)); 609 pkt.dataStatic(data); 610 611 if (req->isMmappedIpr()) 612 dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 613 else { 614 dcache_latency += sendPacket(dcachePort, &pkt); 615 } 616 617 dcache_access = true; 618 619 assert(!pkt.isError()); 620 assert(!req->isLLSC()); 621 } 622 623 if (fault != NoFault && req->isPrefetch()) { 624 return NoFault; 625 } 626 627 //If there's a fault and we're not doing prefetch, return it 628 return fault; 629} 630 631void 632AtomicSimpleCPU::tick() 633{ 634 DPRINTF(SimpleCPU, "Tick\n"); 635 636 // Change thread if multi-threaded 637 swapActiveThread(); 638 639 // Set memroy request ids to current thread 640 if (numThreads > 1) { 641 ContextID cid = threadContexts[curThread]->contextId(); 642 643 ifetch_req->setContext(cid); 644 data_read_req->setContext(cid); 645 data_write_req->setContext(cid); 646 data_amo_req->setContext(cid); 647 } 648 649 SimpleExecContext& t_info = *threadInfo[curThread]; 650 SimpleThread* thread = t_info.thread; 651 652 Tick latency = 0; 653 654 for (int i = 0; i < width || locked; ++i) { 655 numCycles++; 656 updateCycleCounters(BaseCPU::CPU_STATE_ON); 657 658 if (!curStaticInst || !curStaticInst->isDelayedCommit()) { 659 checkForInterrupts(); 660 checkPcEventQueue(); 661 } 662 663 // We must have just got suspended by a PC event 664 if (_status == Idle) { 665 tryCompleteDrain(); 666 return; 667 } 668 669 Fault fault = NoFault; 670 671 TheISA::PCState pcState = thread->pcState(); 672 673 bool needToFetch = !isRomMicroPC(pcState.microPC()) && 674 !curMacroStaticInst; 675 if (needToFetch) { 676 ifetch_req->taskId(taskId()); 677 setupFetchRequest(ifetch_req); 678 fault = thread->itb->translateAtomic(ifetch_req, thread->getTC(), 679 BaseTLB::Execute); 680 } 681 682 if (fault == NoFault) { 683 Tick icache_latency = 0; 684 bool icache_access = false; 685 dcache_access = false; // assume no dcache access 686 687 if (needToFetch) { 688 // This is commented out because the decoder would act like 689 // a tiny cache otherwise. It wouldn't be flushed when needed 690 // like the I cache. It should be flushed, and when that works 691 // this code should be uncommented. 692 //Fetch more instruction memory if necessary 693 //if (decoder.needMoreBytes()) 694 //{ 695 icache_access = true; 696 Packet ifetch_pkt = Packet(ifetch_req, MemCmd::ReadReq); 697 ifetch_pkt.dataStatic(&inst); 698 699 icache_latency = sendPacket(icachePort, &ifetch_pkt); 700 701 assert(!ifetch_pkt.isError()); 702 703 // ifetch_req is initialized to read the instruction directly 704 // into the CPU object's inst field. 705 //} 706 } 707 708 preExecute(); 709 710 Tick stall_ticks = 0; 711 if (curStaticInst) { 712 fault = curStaticInst->execute(&t_info, traceData); 713 714 // keep an instruction count 715 if (fault == NoFault) { 716 countInst(); 717 ppCommit->notify(std::make_pair(thread, curStaticInst)); 718 } 719 else if (traceData && !DTRACE(ExecFaulting)) { 720 delete traceData; 721 traceData = NULL; 722 } 723 724 if (fault != NoFault && 725 dynamic_pointer_cast<SyscallRetryFault>(fault)) { 726 // Retry execution of system calls after a delay. 727 // Prevents immediate re-execution since conditions which 728 // caused the retry are unlikely to change every tick. 729 stall_ticks += clockEdge(syscallRetryLatency) - curTick(); 730 } 731 732 postExecute(); 733 } 734 735 // @todo remove me after debugging with legion done 736 if (curStaticInst && (!curStaticInst->isMicroop() || 737 curStaticInst->isFirstMicroop())) 738 instCnt++; 739 740 if (simulate_inst_stalls && icache_access) 741 stall_ticks += icache_latency; 742 743 if (simulate_data_stalls && dcache_access) 744 stall_ticks += dcache_latency; 745 746 if (stall_ticks) { 747 // the atomic cpu does its accounting in ticks, so 748 // keep counting in ticks but round to the clock 749 // period 750 latency += divCeil(stall_ticks, clockPeriod()) * 751 clockPeriod(); 752 } 753 754 } 755 if (fault != NoFault || !t_info.stayAtPC) 756 advancePC(fault); 757 } 758 759 if (tryCompleteDrain()) 760 return; 761 762 // instruction takes at least one cycle 763 if (latency < clockPeriod()) 764 latency = clockPeriod(); 765 766 if (_status != Idle) 767 reschedule(tickEvent, curTick() + latency, true); 768} 769 770void 771AtomicSimpleCPU::regProbePoints() 772{ 773 BaseCPU::regProbePoints(); 774 775 ppCommit = new ProbePointArg<pair<SimpleThread*, const StaticInstPtr>> 776 (getProbeManager(), "Commit"); 777} 778 779void 780AtomicSimpleCPU::printAddr(Addr a) 781{ 782 dcachePort.printAddr(a); 783} 784 785//////////////////////////////////////////////////////////////////////// 786// 787// AtomicSimpleCPU Simulation Object 788// 789AtomicSimpleCPU * 790AtomicSimpleCPUParams::create() 791{ 792 return new AtomicSimpleCPU(this); 793} 794