atomic.cc revision 13012
1803SN/A/* 21363SN/A * Copyright 2014 Google, Inc. 3803SN/A * Copyright (c) 2012-2013,2015,2017-2018 ARM Limited 4803SN/A * All rights reserved. 5803SN/A * 6803SN/A * The license below extends only to copyright in the software and shall 7803SN/A * not be construed as granting a license to any other intellectual 8803SN/A * property including but not limited to intellectual property relating 9803SN/A * to a hardware implementation of the functionality of the software 10803SN/A * licensed hereunder. You may use the software subject to the license 11803SN/A * terms below provided that you ensure that this notice is replicated 12803SN/A * unmodified and in its entirety in all distributions of the software, 13803SN/A * modified or unmodified, in source code or in binary form. 14803SN/A * 15803SN/A * Copyright (c) 2002-2005 The Regents of The University of Michigan 16803SN/A * All rights reserved. 17803SN/A * 18803SN/A * Redistribution and use in source and binary forms, with or without 19803SN/A * modification, are permitted provided that the following conditions are 20803SN/A * met: redistributions of source code must retain the above copyright 21803SN/A * notice, this list of conditions and the following disclaimer; 22803SN/A * redistributions in binary form must reproduce the above copyright 23803SN/A * notice, this list of conditions and the following disclaimer in the 24803SN/A * documentation and/or other materials provided with the distribution; 25803SN/A * neither the name of the copyright holders nor the names of its 26803SN/A * contributors may be used to endorse or promote products derived from 272665SN/A * this software without specific prior written permission. 282665SN/A * 292665SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 302665SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31803SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32768SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 331730SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34773SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35768SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36768SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37773SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38773SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39768SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40768SN/A * 41768SN/A * Authors: Steve Reinhardt 42768SN/A */ 43768SN/A 442542SN/A#include "cpu/simple/atomic.hh" 452542SN/A 463540Sgblack@eecs.umich.edu#include "arch/locked_mem.hh" 473540Sgblack@eecs.umich.edu#include "arch/mmapped_ipr.hh" 483540Sgblack@eecs.umich.edu#include "arch/utility.hh" 493540Sgblack@eecs.umich.edu#include "base/output.hh" 503348SN/A#include "config/the_isa.hh" 513348SN/A#include "cpu/exetrace.hh" 522542SN/A#include "debug/Drain.hh" 53768SN/A#include "debug/ExecFaulting.hh" 542542SN/A#include "debug/SimpleCPU.hh" 55768SN/A#include "mem/packet.hh" 56768SN/A#include "mem/packet_access.hh" 572107SN/A#include "mem/physical.hh" 582107SN/A#include "params/AtomicSimpleCPU.hh" 59773SN/A#include "sim/faults.hh" 601854SN/A#include "sim/full_system.hh" 611854SN/A#include "sim/system.hh" 621817SN/A 631817SN/Ausing namespace std; 641817SN/Ausing namespace TheISA; 651817SN/A 661817SN/Avoid 67773SN/AAtomicSimpleCPU::init() 681817SN/A{ 691817SN/A BaseSimpleCPU::init(); 701817SN/A 711817SN/A int cid = threadContexts[0]->contextId(); 721817SN/A ifetch_req->setContext(cid); 731817SN/A data_read_req->setContext(cid); 741817SN/A data_write_req->setContext(cid); 751817SN/A} 761817SN/A 771817SN/AAtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 781817SN/A : BaseSimpleCPU(p), 791817SN/A tickEvent([this]{ tick(); }, "AtomicSimpleCPU tick", 801817SN/A false, Event::CPU_Tick_Pri), 811817SN/A width(p->width), locked(false), 821817SN/A simulate_data_stalls(p->simulate_data_stalls), 831817SN/A simulate_inst_stalls(p->simulate_inst_stalls), 841817SN/A icachePort(name() + ".icache_port", this), 851817SN/A dcachePort(name() + ".dcache_port", this), 862539SN/A dcache_access(false), dcache_latency(0), 871817SN/A ppCommit(nullptr) 882542SN/A{ 892539SN/A _status = Idle; 901817SN/A ifetch_req = std::make_shared<Request>(); 911817SN/A data_read_req = std::make_shared<Request>(); 921817SN/A data_write_req = std::make_shared<Request>(); 931817SN/A} 941817SN/A 952539SN/A 961817SN/AAtomicSimpleCPU::~AtomicSimpleCPU() 971817SN/A{ 982539SN/A if (tickEvent.scheduled()) { 991817SN/A deschedule(tickEvent); 1001817SN/A } 1011817SN/A} 1022539SN/A 1031817SN/ADrainState 1042542SN/AAtomicSimpleCPU::drain() 1051817SN/A{ 1061817SN/A // Deschedule any power gating event (if any) 1072539SN/A deschedulePowerGatingEvent(); 1081817SN/A 1091817SN/A if (switchedOut()) 1102542SN/A return DrainState::Drained; 1111817SN/A 1121817SN/A if (!isDrained()) { 1131817SN/A DPRINTF(Drain, "Requesting drain.\n"); 1141817SN/A return DrainState::Draining; 1151817SN/A } else { 1161817SN/A if (tickEvent.scheduled()) 1172539SN/A deschedule(tickEvent); 1181817SN/A 1191817SN/A activeThreads.clear(); 1201817SN/A DPRINTF(Drain, "Not executing microcode, no need to drain.\n"); 1211817SN/A return DrainState::Drained; 1221817SN/A } 1231817SN/A} 1241817SN/A 1251817SN/Avoid 1261817SN/AAtomicSimpleCPU::threadSnoop(PacketPtr pkt, ThreadID sender) 1272648SN/A{ 1282648SN/A DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 1291817SN/A pkt->cmdString()); 1301817SN/A 1312648SN/A for (ThreadID tid = 0; tid < numThreads; tid++) { 1321817SN/A if (tid != sender) { 1331817SN/A if (getCpuAddrMonitor(tid)->doMonitor(pkt)) { 1341817SN/A wakeup(tid); 1351817SN/A } 1361817SN/A 1372648SN/A TheISA::handleLockedSnoop(threadInfo[tid]->thread, 1381817SN/A pkt, dcachePort.cacheBlockMask); 1391817SN/A } 1402648SN/A } 1411817SN/A} 1421817SN/A 1431817SN/Avoid 1442648SN/AAtomicSimpleCPU::drainResume() 1451817SN/A{ 1462648SN/A assert(!tickEvent.scheduled()); 1472648SN/A if (switchedOut()) 1481817SN/A return; 1491817SN/A 1501817SN/A DPRINTF(SimpleCPU, "Resume\n"); 1511817SN/A verifyMemoryMode(); 1521817SN/A 1531854SN/A assert(!threadContexts.empty()); 1541817SN/A 1551854SN/A _status = BaseSimpleCPU::Idle; 1561854SN/A 1571854SN/A for (ThreadID tid = 0; tid < numThreads; tid++) { 1581854SN/A if (threadInfo[tid]->thread->status() == ThreadContext::Active) { 1591817SN/A threadInfo[tid]->notIdleFraction = 1; 1601817SN/A activeThreads.push_back(tid); 1611817SN/A _status = BaseSimpleCPU::Running; 1621854SN/A 1631854SN/A // Tick if any threads active 1641817SN/A if (!tickEvent.scheduled()) { 1651854SN/A schedule(tickEvent, nextCycle()); 1661854SN/A } 1671854SN/A } else { 1681854SN/A threadInfo[tid]->notIdleFraction = 0; 1691854SN/A } 1701817SN/A } 1711854SN/A 1721854SN/A // Reschedule any power gating event (if any) 1731854SN/A schedulePowerGatingEvent(); 1741854SN/A} 1751817SN/A 1761817SN/Abool 1771817SN/AAtomicSimpleCPU::tryCompleteDrain() 1781634SN/A{ 179772SN/A if (drainState() != DrainState::Draining) 180773SN/A return false; 1811634SN/A 182772SN/A DPRINTF(Drain, "tryCompleteDrain.\n"); 183772SN/A if (!isDrained()) 184772SN/A return false; 1851817SN/A 1861817SN/A DPRINTF(Drain, "CPU done draining, processing drain event\n"); 1871817SN/A signalDrainDone(); 1881817SN/A 1891817SN/A return true; 1901817SN/A} 1911817SN/A 192772SN/A 193776SN/Avoid 1941634SN/AAtomicSimpleCPU::switchOut() 195773SN/A{ 196831SN/A BaseSimpleCPU::switchOut(); 197772SN/A 198772SN/A assert(!tickEvent.scheduled()); 199772SN/A assert(_status == BaseSimpleCPU::Running || _status == Idle); 2001817SN/A assert(isDrained()); 201772SN/A} 2021634SN/A 203772SN/A 204772SN/Avoid 2051854SN/AAtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 2061854SN/A{ 2071854SN/A BaseSimpleCPU::takeOverFrom(oldCPU); 208918SN/A 2091854SN/A // The tick event should have been descheduled by drain() 2101854SN/A assert(!tickEvent.scheduled()); 2111854SN/A} 212771SN/A 213771SN/Avoid 214771SN/AAtomicSimpleCPU::verifyMemoryMode() const 2152539SN/A{ 216771SN/A if (!system->isAtomicMode()) { 2171817SN/A fatal("The atomic CPU requires the memory system to be in " 2181817SN/A "'atomic' mode.\n"); 2191817SN/A } 2202539SN/A} 2211817SN/A 2221817SN/Avoid 2231817SN/AAtomicSimpleCPU::activateContext(ThreadID thread_num) 2241817SN/A{ 2252542SN/A DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num); 2261817SN/A 2271817SN/A assert(thread_num < numThreads); 2281854SN/A 2291817SN/A threadInfo[thread_num]->notIdleFraction = 1; 2301854SN/A Cycles delta = ticksToCycles(threadInfo[thread_num]->thread->lastActivate - 2312539SN/A threadInfo[thread_num]->thread->lastSuspend); 2322539SN/A numCycles += delta; 2331817SN/A 234771SN/A if (!tickEvent.scheduled()) { 235771SN/A //Make sure ticks are still on multiples of cycles 236771SN/A schedule(tickEvent, clockEdge(Cycles(0))); 2371854SN/A } 238771SN/A _status = BaseSimpleCPU::Running; 2391817SN/A if (std::find(activeThreads.begin(), activeThreads.end(), thread_num) 2401854SN/A == activeThreads.end()) { 2411854SN/A activeThreads.push_back(thread_num); 2421854SN/A } 2431817SN/A 2441817SN/A BaseCPU::activateContext(thread_num); 2451817SN/A} 2461854SN/A 2471854SN/A 2481817SN/Avoid 2491817SN/AAtomicSimpleCPU::suspendContext(ThreadID thread_num) 2501854SN/A{ 2511854SN/A DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 2521854SN/A 2531817SN/A assert(thread_num < numThreads); 2541817SN/A activeThreads.remove(thread_num); 2551854SN/A 2561854SN/A if (_status == Idle) 2571817SN/A return; 2581817SN/A 2591817SN/A assert(_status == BaseSimpleCPU::Running); 2601817SN/A 2611817SN/A threadInfo[thread_num]->notIdleFraction = 0; 2621817SN/A 2631817SN/A if (activeThreads.empty()) { 2641817SN/A _status = Idle; 2651817SN/A 2661817SN/A if (tickEvent.scheduled()) { 2671817SN/A deschedule(tickEvent); 2681817SN/A } 2691817SN/A } 2701817SN/A 2711817SN/A BaseCPU::suspendContext(thread_num); 2721817SN/A} 2731817SN/A 2742648SN/ATick 2752648SN/AAtomicSimpleCPU::sendPacket(MasterPort &port, const PacketPtr &pkt) 2761817SN/A{ 2771817SN/A return port.sendAtomic(pkt); 2781817SN/A} 2791817SN/A 2801817SN/ATick 2812648SN/AAtomicSimpleCPU::AtomicCPUDPort::recvAtomicSnoop(PacketPtr pkt) 2821817SN/A{ 2831817SN/A DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 2841817SN/A pkt->cmdString()); 2851817SN/A 2862648SN/A // X86 ISA: Snooping an invalidation for monitor/mwait 2871817SN/A AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner); 2882648SN/A 2892648SN/A for (ThreadID tid = 0; tid < cpu->numThreads; tid++) { 2901817SN/A if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) { 2911817SN/A cpu->wakeup(tid); 2921817SN/A } 2931817SN/A } 2941817SN/A 2952648SN/A // if snoop invalidates, release any associated locks 2961817SN/A // When run without caches, Invalidation packets will not be received 2971817SN/A // hence we must check if the incoming packets are writes and wakeup 2981817SN/A // the processor accordingly 2992648SN/A if (pkt->isInvalidate() || pkt->isWrite()) { 3001817SN/A DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 3012648SN/A pkt->getAddr()); 3022648SN/A for (auto &t_info : cpu->threadInfo) { 3031817SN/A TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask); 3041817SN/A } 3051817SN/A } 3061817SN/A 3071817SN/A return 0; 3082539SN/A} 3091817SN/A 3101817SN/Avoid 3111817SN/AAtomicSimpleCPU::AtomicCPUDPort::recvFunctionalSnoop(PacketPtr pkt) 3122539SN/A{ 3131817SN/A DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 3141817SN/A pkt->cmdString()); 3151817SN/A 3161817SN/A // X86 ISA: Snooping an invalidation for monitor/mwait 3171817SN/A AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner); 3181817SN/A for (ThreadID tid = 0; tid < cpu->numThreads; tid++) { 3191817SN/A if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) { 3201817SN/A cpu->wakeup(tid); 3212539SN/A } 3221817SN/A } 3231817SN/A 3241817SN/A // if snoop invalidates, release any associated locks 3251854SN/A if (pkt->isInvalidate()) { 3261854SN/A DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 3271817SN/A pkt->getAddr()); 3281817SN/A for (auto &t_info : cpu->threadInfo) { 3291817SN/A TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask); 3301817SN/A } 3311817SN/A } 3321817SN/A} 3331817SN/A 3341817SN/AFault 3351817SN/AAtomicSimpleCPU::readMem(Addr addr, uint8_t * data, unsigned size, 3361817SN/A Request::Flags flags) 3371817SN/A{ 3381817SN/A SimpleExecContext& t_info = *threadInfo[curThread]; 3391817SN/A SimpleThread* thread = t_info.thread; 3401817SN/A 3411817SN/A // use the CPU's statically allocated read request and packet objects 3421817SN/A const RequestPtr &req = data_read_req; 3431817SN/A 3441817SN/A if (traceData) 3451817SN/A traceData->setMem(addr, size, flags); 3461817SN/A 3471817SN/A //The size of the data we're trying to read. 3481817SN/A int fullSize = size; 3491817SN/A 3501817SN/A //The address of the second part of this access if it needs to be split 3511817SN/A //across a cache line boundary. 3521817SN/A Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 3531817SN/A 3541817SN/A if (secondAddr > addr) 3551817SN/A size = secondAddr - addr; 3561817SN/A 3571817SN/A dcache_latency = 0; 3581817SN/A 3591817SN/A req->taskId(taskId()); 3601817SN/A while (1) { 3611817SN/A req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 3621817SN/A 3631817SN/A // translate to physical address 3641817SN/A Fault fault = thread->dtb->translateAtomic(req, thread->getTC(), 3651854SN/A BaseTLB::Read); 3661817SN/A 3671854SN/A // Now do the access. 3681854SN/A if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) { 3691854SN/A Packet pkt(req, Packet::makeReadCmd(req)); 3701854SN/A pkt.dataStatic(data); 3711854SN/A 3721854SN/A if (req->isMmappedIpr()) { 3731854SN/A dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 3741854SN/A } else { 3751817SN/A dcache_latency += sendPacket(dcachePort, &pkt); 3761854SN/A } 3771854SN/A dcache_access = true; 3781854SN/A 3791854SN/A assert(!pkt.isError()); 3801817SN/A 3811817SN/A if (req->isLLSC()) { 3821817SN/A TheISA::handleLockedRead(thread, req); 3831854SN/A } 3841854SN/A } 3851817SN/A 3861854SN/A //If there's a fault, return it 3871854SN/A if (fault != NoFault) { 3881854SN/A if (req->isPrefetch()) { 3891854SN/A return NoFault; 3901854SN/A } else { 3911854SN/A return fault; 3921854SN/A } 3931854SN/A } 3941817SN/A 3951854SN/A //If we don't need to access a second cache line, stop now. 3961854SN/A if (secondAddr <= addr) 3971854SN/A { 3981854SN/A if (req->isLockedRMW() && fault == NoFault) { 3991817SN/A assert(!locked); 4001817SN/A locked = true; 4011817SN/A } 4021817SN/A 4031817SN/A return fault; 4041817SN/A } 4051817SN/A 4061817SN/A /* 4071817SN/A * Set up for accessing the second cache line. 4081817SN/A */ 4091817SN/A 4101817SN/A //Move the pointer we're reading into to the correct location. 4111817SN/A data += size; 4121817SN/A //Adjust the size to get the remaining bytes. 4131817SN/A size = addr + fullSize - secondAddr; 4141817SN/A //And access the right address. 4151817SN/A addr = secondAddr; 4161817SN/A } 4171817SN/A} 4181817SN/A 4191817SN/AFault 4201817SN/AAtomicSimpleCPU::initiateMemRead(Addr addr, unsigned size, 421771SN/A Request::Flags flags) 422771SN/A{ 423771SN/A panic("initiateMemRead() is for timing accesses, and should " 4241817SN/A "never be called on AtomicSimpleCPU.\n"); 425771SN/A} 426771SN/A 427771SN/AFault 428771SN/AAtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, Addr addr, 4292539SN/A Request::Flags flags, uint64_t *res) 4302539SN/A{ 4312542SN/A SimpleExecContext& t_info = *threadInfo[curThread]; 432768SN/A SimpleThread* thread = t_info.thread; 4333846Shsul@eecs.umich.edu static uint8_t zero_array[64] = {}; 434909SN/A 435803SN/A if (data == NULL) { 436803SN/A assert(size <= 64); 437803SN/A assert(flags & Request::STORE_NO_DATA); 438771SN/A // This must be a cache block cleaning request 4392542SN/A data = zero_array; 440777SN/A } 441777SN/A 442773SN/A // use the CPU's statically allocated write request and packet objects 443773SN/A const RequestPtr &req = data_write_req; 4441634SN/A 4451634SN/A if (traceData) 4461634SN/A traceData->setMem(addr, size, flags); 4472539SN/A 4481634SN/A //The size of the data we're trying to read. 4491634SN/A int fullSize = size; 4502542SN/A 4513349SN/A //The address of the second part of this access if it needs to be split 452768SN/A //across a cache line boundary. 4532641SN/A Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 4542641SN/A 455768SN/A if (secondAddr > addr) 4562641SN/A size = secondAddr - addr; 457865SN/A 4582641SN/A dcache_latency = 0; 4592641SN/A 460771SN/A req->taskId(taskId()); 4612630SN/A while (1) { 4622539SN/A req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 4632641SN/A 464803SN/A // translate to physical address 4651817SN/A Fault fault = thread->dtb->translateAtomic(req, thread->getTC(), BaseTLB::Write); 4661817SN/A 4672630SN/A // Now do the access. 4682539SN/A if (fault == NoFault) { 4691817SN/A bool do_access = true; // flag to suppress cache access 4702630SN/A 4712539SN/A if (req->isLLSC()) { 472865SN/A do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask); 473865SN/A } else if (req->isSwap()) { 474865SN/A if (req->isCondSwap()) { 475865SN/A assert(res); 4762630SN/A req->setExtraData(*res); 4772539SN/A } 478865SN/A } 479865SN/A 4802630SN/A if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) { 4812539SN/A Packet pkt(req, Packet::makeWriteCmd(req)); 4821817SN/A pkt.dataStatic(data); 4832648SN/A 4842542SN/A if (req->isMmappedIpr()) { 4851817SN/A dcache_latency += 4862648SN/A TheISA::handleIprWrite(thread->getTC(), &pkt); 4872542SN/A } else { 4881817SN/A dcache_latency += sendPacket(dcachePort, &pkt); 4892648SN/A 4902539SN/A // Notify other threads on this CPU of write 491803SN/A threadSnoop(&pkt, curThread); 4922648SN/A } 4932539SN/A dcache_access = true; 4941817SN/A assert(!pkt.isError()); 4951817SN/A 4962630SN/A if (req->isSwap()) { 4971817SN/A assert(res); 4982630SN/A memcpy(res, pkt.getConstPtr<uint8_t>(), fullSize); 4992539SN/A } 500803SN/A } 5012641SN/A 502803SN/A if (res && !req->isSwap()) { 5032641SN/A *res = req->getExtraData(); 5042539SN/A } 5052630SN/A } 5062539SN/A 5072539SN/A //If there's a fault or we don't need to access a second cache line, 5082641SN/A //stop now. 5092539SN/A if (fault != NoFault || secondAddr <= addr) 5102641SN/A { 511771SN/A if (req->isLockedRMW() && fault == NoFault) { 5122641SN/A assert(locked); 5132539SN/A locked = false; 514768SN/A } 515768SN/A 5162539SN/A 5173349SN/A if (fault != NoFault && req->isPrefetch()) { 518768SN/A return NoFault; 5192641SN/A } else { 5202641SN/A return fault; 5212641SN/A } 522779SN/A } 523779SN/A 5242641SN/A /* 525768SN/A * Set up for accessing the second cache line. 5262641SN/A */ 527769SN/A 5282539SN/A //Move the pointer we're reading into to the correct location. 5292539SN/A data += size; 5302630SN/A //Adjust the size to get the remaining bytes. 5312539SN/A size = addr + fullSize - secondAddr; 5322539SN/A //And access the right address. 5332539SN/A addr = secondAddr; 5342539SN/A } 535803SN/A} 5362539SN/A 5372539SN/A 5382539SN/Avoid 5392539SN/AAtomicSimpleCPU::tick() 5402539SN/A{ 5412539SN/A DPRINTF(SimpleCPU, "Tick\n"); 5422539SN/A 5432630SN/A // Change thread if multi-threaded 5442539SN/A swapActiveThread(); 5452539SN/A 5462539SN/A // Set memroy request ids to current thread 5472539SN/A if (numThreads > 1) { 5482630SN/A ContextID cid = threadContexts[curThread]->contextId(); 5492539SN/A 5502539SN/A ifetch_req->setContext(cid); 5512539SN/A data_read_req->setContext(cid); 5522539SN/A data_write_req->setContext(cid); 5532630SN/A } 5542539SN/A 5552539SN/A SimpleExecContext& t_info = *threadInfo[curThread]; 5562630SN/A SimpleThread* thread = t_info.thread; 5572539SN/A 5582539SN/A Tick latency = 0; 5592630SN/A 5602539SN/A for (int i = 0; i < width || locked; ++i) { 5612539SN/A numCycles++; 5622630SN/A updateCycleCounters(BaseCPU::CPU_STATE_ON); 5632539SN/A 5642539SN/A if (!curStaticInst || !curStaticInst->isDelayedCommit()) { 5652630SN/A checkForInterrupts(); 5662539SN/A checkPcEventQueue(); 5672539SN/A } 5682630SN/A 5692539SN/A // We must have just got suspended by a PC event 5702539SN/A if (_status == Idle) { 5712630SN/A tryCompleteDrain(); 5722539SN/A return; 5732539SN/A } 5742630SN/A 5752539SN/A Fault fault = NoFault; 5762539SN/A 5772539SN/A TheISA::PCState pcState = thread->pcState(); 5782539SN/A 5792539SN/A bool needToFetch = !isRomMicroPC(pcState.microPC()) && 5802539SN/A !curMacroStaticInst; 5812539SN/A if (needToFetch) { 5822539SN/A ifetch_req->taskId(taskId()); 5832539SN/A setupFetchRequest(ifetch_req); 5842539SN/A fault = thread->itb->translateAtomic(ifetch_req, thread->getTC(), 5852539SN/A BaseTLB::Execute); 5862539SN/A } 5872539SN/A 588803SN/A if (fault == NoFault) { 5892641SN/A Tick icache_latency = 0; 590769SN/A bool icache_access = false; 591769SN/A dcache_access = false; // assume no dcache access 5922641SN/A 5932539SN/A if (needToFetch) { 594768SN/A // This is commented out because the decoder would act like 595768SN/A // a tiny cache otherwise. It wouldn't be flushed when needed 596768SN/A // like the I cache. It should be flushed, and when that works 597777SN/A // this code should be uncommented. 598777SN/A //Fetch more instruction memory if necessary 599777SN/A //if (decoder.needMoreBytes()) 600777SN/A //{ 601865SN/A icache_access = true; 602817SN/A Packet ifetch_pkt = Packet(ifetch_req, MemCmd::ReadReq); 603777SN/A ifetch_pkt.dataStatic(&inst); 604777SN/A 605777SN/A icache_latency = sendPacket(icachePort, &ifetch_pkt); 606777SN/A 607777SN/A assert(!ifetch_pkt.isError()); 608777SN/A 609777SN/A // ifetch_req is initialized to read the instruction directly 610777SN/A // into the CPU object's inst field. 611777SN/A //} 612777SN/A } 613817SN/A 614777SN/A preExecute(); 615777SN/A 616777SN/A Tick stall_ticks = 0; 617777SN/A if (curStaticInst) { 618777SN/A fault = curStaticInst->execute(&t_info, traceData); 6191854SN/A 620768SN/A // keep an instruction count 621811SN/A if (fault == NoFault) { 622899SN/A countInst(); 623899SN/A ppCommit->notify(std::make_pair(thread, curStaticInst)); 624899SN/A } 625899SN/A else if (traceData && !DTRACE(ExecFaulting)) { 626811SN/A delete traceData; 627811SN/A traceData = NULL; 628811SN/A } 629919SN/A 6301854SN/A if (fault != NoFault && 6311854SN/A dynamic_pointer_cast<SyscallRetryFault>(fault)) { 632768SN/A // Retry execution of system calls after a delay. 633768SN/A // Prevents immediate re-execution since conditions which 634768SN/A // caused the retry are unlikely to change every tick. 6351854SN/A stall_ticks += clockEdge(syscallRetryLatency) - curTick(); 636768SN/A } 637811SN/A 638899SN/A postExecute(); 639899SN/A } 640899SN/A 641899SN/A // @todo remove me after debugging with legion done 642811SN/A if (curStaticInst && (!curStaticInst->isMicroop() || 643811SN/A curStaticInst->isFirstMicroop())) 644919SN/A instCnt++; 645919SN/A 6461854SN/A if (simulate_inst_stalls && icache_access) 6471854SN/A stall_ticks += icache_latency; 648768SN/A 649768SN/A if (simulate_data_stalls && dcache_access) 650770SN/A stall_ticks += dcache_latency; 651768SN/A 6522539SN/A if (stall_ticks) { 6532539SN/A // the atomic cpu does its accounting in ticks, so 6542539SN/A // keep counting in ticks but round to the clock 6552539SN/A // period 6562539SN/A latency += divCeil(stall_ticks, clockPeriod()) * 6572539SN/A clockPeriod(); 658775SN/A } 659768SN/A 660770SN/A } 661768SN/A if (fault != NoFault || !t_info.stayAtPC) 662770SN/A advancePC(fault); 663768SN/A } 6642539SN/A 6652539SN/A if (tryCompleteDrain()) 6662542SN/A return; 6672539SN/A 6682539SN/A // instruction takes at least one cycle 6691634SN/A if (latency < clockPeriod()) 6702539SN/A latency = clockPeriod(); 671768SN/A 672770SN/A if (_status != Idle) 673768SN/A reschedule(tickEvent, curTick() + latency, true); 674770SN/A} 675768SN/A 6762539SN/Avoid 6772539SN/AAtomicSimpleCPU::regProbePoints() 6782539SN/A{ 6792539SN/A BaseCPU::regProbePoints(); 6802539SN/A 6812539SN/A ppCommit = new ProbePointArg<pair<SimpleThread*, const StaticInstPtr>> 6822539SN/A (getProbeManager(), "Commit"); 6832539SN/A} 6842539SN/A 6852539SN/Avoid 686768SN/AAtomicSimpleCPU::printAddr(Addr a) 687768SN/A{ 688770SN/A dcachePort.printAddr(a); 689} 690 691//////////////////////////////////////////////////////////////////////// 692// 693// AtomicSimpleCPU Simulation Object 694// 695AtomicSimpleCPU * 696AtomicSimpleCPUParams::create() 697{ 698 return new AtomicSimpleCPU(this); 699} 700