atomic.cc revision 10563
12623SN/A/* 210030SAli.Saidi@ARM.com * Copyright (c) 2012-2013 ARM Limited 38926Sandreas.hansson@arm.com * All rights reserved. 48926Sandreas.hansson@arm.com * 58926Sandreas.hansson@arm.com * The license below extends only to copyright in the software and shall 68926Sandreas.hansson@arm.com * not be construed as granting a license to any other intellectual 78926Sandreas.hansson@arm.com * property including but not limited to intellectual property relating 88926Sandreas.hansson@arm.com * to a hardware implementation of the functionality of the software 98926Sandreas.hansson@arm.com * licensed hereunder. You may use the software subject to the license 108926Sandreas.hansson@arm.com * terms below provided that you ensure that this notice is replicated 118926Sandreas.hansson@arm.com * unmodified and in its entirety in all distributions of the software, 128926Sandreas.hansson@arm.com * modified or unmodified, in source code or in binary form. 138926Sandreas.hansson@arm.com * 142623SN/A * Copyright (c) 2002-2005 The Regents of The University of Michigan 152623SN/A * All rights reserved. 162623SN/A * 172623SN/A * Redistribution and use in source and binary forms, with or without 182623SN/A * modification, are permitted provided that the following conditions are 192623SN/A * met: redistributions of source code must retain the above copyright 202623SN/A * notice, this list of conditions and the following disclaimer; 212623SN/A * redistributions in binary form must reproduce the above copyright 222623SN/A * notice, this list of conditions and the following disclaimer in the 232623SN/A * documentation and/or other materials provided with the distribution; 242623SN/A * neither the name of the copyright holders nor the names of its 252623SN/A * contributors may be used to endorse or promote products derived from 262623SN/A * this software without specific prior written permission. 272623SN/A * 282623SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 292623SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 302623SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 312623SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 322623SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 332623SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 342623SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 352623SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 362623SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 372623SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 382623SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 392665Ssaidi@eecs.umich.edu * 402665Ssaidi@eecs.umich.edu * Authors: Steve Reinhardt 412623SN/A */ 422623SN/A 433170Sstever@eecs.umich.edu#include "arch/locked_mem.hh" 448105Sgblack@eecs.umich.edu#include "arch/mmapped_ipr.hh" 452623SN/A#include "arch/utility.hh" 464040Ssaidi@eecs.umich.edu#include "base/bigint.hh" 479647Sdam.sunwoo@arm.com#include "base/output.hh" 486658Snate@binkert.org#include "config/the_isa.hh" 498229Snate@binkert.org#include "cpu/simple/atomic.hh" 502623SN/A#include "cpu/exetrace.hh" 519443SAndreas.Sandberg@ARM.com#include "debug/Drain.hh" 528232Snate@binkert.org#include "debug/ExecFaulting.hh" 538232Snate@binkert.org#include "debug/SimpleCPU.hh" 543348Sbinkertn@umich.edu#include "mem/packet.hh" 553348Sbinkertn@umich.edu#include "mem/packet_access.hh" 568926Sandreas.hansson@arm.com#include "mem/physical.hh" 574762Snate@binkert.org#include "params/AtomicSimpleCPU.hh" 587678Sgblack@eecs.umich.edu#include "sim/faults.hh" 592901Ssaidi@eecs.umich.edu#include "sim/system.hh" 608779Sgblack@eecs.umich.edu#include "sim/full_system.hh" 612623SN/A 622623SN/Ausing namespace std; 632623SN/Ausing namespace TheISA; 642623SN/A 652623SN/AAtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c) 665606Snate@binkert.org : Event(CPU_Tick_Pri), cpu(c) 672623SN/A{ 682623SN/A} 692623SN/A 702623SN/A 712623SN/Avoid 722623SN/AAtomicSimpleCPU::TickEvent::process() 732623SN/A{ 742623SN/A cpu->tick(); 752623SN/A} 762623SN/A 772623SN/Aconst char * 785336Shines@cs.fsu.eduAtomicSimpleCPU::TickEvent::description() const 792623SN/A{ 804873Sstever@eecs.umich.edu return "AtomicSimpleCPU tick"; 812623SN/A} 822623SN/A 832623SN/Avoid 842623SN/AAtomicSimpleCPU::init() 852623SN/A{ 862623SN/A BaseCPU::init(); 878921Sandreas.hansson@arm.com 888921Sandreas.hansson@arm.com // Initialise the ThreadContext's memory proxies 898921Sandreas.hansson@arm.com tcBase()->initMemProxies(tcBase()); 908921Sandreas.hansson@arm.com 919433SAndreas.Sandberg@ARM.com if (FullSystem && !params()->switched_out) { 928779Sgblack@eecs.umich.edu ThreadID size = threadContexts.size(); 938779Sgblack@eecs.umich.edu for (ThreadID i = 0; i < size; ++i) { 948779Sgblack@eecs.umich.edu ThreadContext *tc = threadContexts[i]; 958779Sgblack@eecs.umich.edu // initialize CPU, including PC 968779Sgblack@eecs.umich.edu TheISA::initCPU(tc, tc->contextId()); 978779Sgblack@eecs.umich.edu } 982623SN/A } 998706Sandreas.hansson@arm.com 1005714Shsul@eecs.umich.edu // Atomic doesn't do MT right now, so contextId == threadId 1015712Shsul@eecs.umich.edu ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 1025712Shsul@eecs.umich.edu data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 1035712Shsul@eecs.umich.edu data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 1042623SN/A} 1052623SN/A 1065529Snate@binkert.orgAtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 1076078Sgblack@eecs.umich.edu : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false), 1085487Snate@binkert.org simulate_data_stalls(p->simulate_data_stalls), 1095487Snate@binkert.org simulate_inst_stalls(p->simulate_inst_stalls), 1109443SAndreas.Sandberg@ARM.com drain_manager(NULL), 1119095Sandreas.hansson@arm.com icachePort(name() + ".icache_port", this), 1129095Sandreas.hansson@arm.com dcachePort(name() + ".dcache_port", this), 11310537Sandreas.hansson@arm.com fastmem(p->fastmem), dcache_access(false), dcache_latency(0), 11410537Sandreas.hansson@arm.com ppCommit(nullptr) 1152623SN/A{ 1162623SN/A _status = Idle; 1172623SN/A} 1182623SN/A 1192623SN/A 1202623SN/AAtomicSimpleCPU::~AtomicSimpleCPU() 1212623SN/A{ 1226775SBrad.Beckmann@amd.com if (tickEvent.scheduled()) { 1236775SBrad.Beckmann@amd.com deschedule(tickEvent); 1246775SBrad.Beckmann@amd.com } 1252623SN/A} 1262623SN/A 1279443SAndreas.Sandberg@ARM.comunsigned int 1289443SAndreas.Sandberg@ARM.comAtomicSimpleCPU::drain(DrainManager *dm) 1292623SN/A{ 1309443SAndreas.Sandberg@ARM.com assert(!drain_manager); 1319448SAndreas.Sandberg@ARM.com if (switchedOut()) 1329443SAndreas.Sandberg@ARM.com return 0; 1332623SN/A 1349443SAndreas.Sandberg@ARM.com if (!isDrained()) { 1359443SAndreas.Sandberg@ARM.com DPRINTF(Drain, "Requesting drain: %s\n", pcState()); 1369443SAndreas.Sandberg@ARM.com drain_manager = dm; 1379443SAndreas.Sandberg@ARM.com return 1; 1389443SAndreas.Sandberg@ARM.com } else { 1399443SAndreas.Sandberg@ARM.com if (tickEvent.scheduled()) 1409443SAndreas.Sandberg@ARM.com deschedule(tickEvent); 1412915Sktlim@umich.edu 1429443SAndreas.Sandberg@ARM.com DPRINTF(Drain, "Not executing microcode, no need to drain.\n"); 1439443SAndreas.Sandberg@ARM.com return 0; 1449443SAndreas.Sandberg@ARM.com } 1459342SAndreas.Sandberg@arm.com} 1469342SAndreas.Sandberg@arm.com 1472915Sktlim@umich.eduvoid 1489342SAndreas.Sandberg@arm.comAtomicSimpleCPU::drainResume() 1492915Sktlim@umich.edu{ 1509448SAndreas.Sandberg@ARM.com assert(!tickEvent.scheduled()); 1519443SAndreas.Sandberg@ARM.com assert(!drain_manager); 1529448SAndreas.Sandberg@ARM.com if (switchedOut()) 1535220Ssaidi@eecs.umich.edu return; 1545220Ssaidi@eecs.umich.edu 1554940Snate@binkert.org DPRINTF(SimpleCPU, "Resume\n"); 1569523SAndreas.Sandberg@ARM.com verifyMemoryMode(); 1573324Shsul@eecs.umich.edu 1589448SAndreas.Sandberg@ARM.com assert(!threadContexts.empty()); 1599448SAndreas.Sandberg@ARM.com if (threadContexts.size() > 1) 1609448SAndreas.Sandberg@ARM.com fatal("The atomic CPU only supports one thread.\n"); 1619448SAndreas.Sandberg@ARM.com 1629448SAndreas.Sandberg@ARM.com if (thread->status() == ThreadContext::Active) { 1639443SAndreas.Sandberg@ARM.com schedule(tickEvent, nextCycle()); 1649448SAndreas.Sandberg@ARM.com _status = BaseSimpleCPU::Running; 1659837Slena@cs.wisc,edu notIdleFraction = 1; 1669448SAndreas.Sandberg@ARM.com } else { 1679448SAndreas.Sandberg@ARM.com _status = BaseSimpleCPU::Idle; 1689837Slena@cs.wisc,edu notIdleFraction = 0; 1699448SAndreas.Sandberg@ARM.com } 1709443SAndreas.Sandberg@ARM.com 1717897Shestness@cs.utexas.edu system->totalNumInsts = 0; 1722623SN/A} 1732623SN/A 1749443SAndreas.Sandberg@ARM.combool 1759443SAndreas.Sandberg@ARM.comAtomicSimpleCPU::tryCompleteDrain() 1769443SAndreas.Sandberg@ARM.com{ 1779443SAndreas.Sandberg@ARM.com if (!drain_manager) 1789443SAndreas.Sandberg@ARM.com return false; 1799443SAndreas.Sandberg@ARM.com 1809443SAndreas.Sandberg@ARM.com DPRINTF(Drain, "tryCompleteDrain: %s\n", pcState()); 1819443SAndreas.Sandberg@ARM.com if (!isDrained()) 1829443SAndreas.Sandberg@ARM.com return false; 1839443SAndreas.Sandberg@ARM.com 1849443SAndreas.Sandberg@ARM.com DPRINTF(Drain, "CPU done draining, processing drain event\n"); 1859443SAndreas.Sandberg@ARM.com drain_manager->signalDrainDone(); 1869443SAndreas.Sandberg@ARM.com drain_manager = NULL; 1879443SAndreas.Sandberg@ARM.com 1889443SAndreas.Sandberg@ARM.com return true; 1899443SAndreas.Sandberg@ARM.com} 1909443SAndreas.Sandberg@ARM.com 1919443SAndreas.Sandberg@ARM.com 1922623SN/Avoid 1932798Sktlim@umich.eduAtomicSimpleCPU::switchOut() 1942623SN/A{ 1959429SAndreas.Sandberg@ARM.com BaseSimpleCPU::switchOut(); 1969429SAndreas.Sandberg@ARM.com 1979443SAndreas.Sandberg@ARM.com assert(!tickEvent.scheduled()); 1989342SAndreas.Sandberg@arm.com assert(_status == BaseSimpleCPU::Running || _status == Idle); 1999443SAndreas.Sandberg@ARM.com assert(isDrained()); 2002623SN/A} 2012623SN/A 2022623SN/A 2032623SN/Avoid 2042623SN/AAtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 2052623SN/A{ 2069429SAndreas.Sandberg@ARM.com BaseSimpleCPU::takeOverFrom(oldCPU); 2072623SN/A 2089443SAndreas.Sandberg@ARM.com // The tick event should have been descheduled by drain() 2092623SN/A assert(!tickEvent.scheduled()); 2102623SN/A 2115712Shsul@eecs.umich.edu ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 2125712Shsul@eecs.umich.edu data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 2135712Shsul@eecs.umich.edu data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 2142623SN/A} 2152623SN/A 2169523SAndreas.Sandberg@ARM.comvoid 2179523SAndreas.Sandberg@ARM.comAtomicSimpleCPU::verifyMemoryMode() const 2189523SAndreas.Sandberg@ARM.com{ 2199524SAndreas.Sandberg@ARM.com if (!system->isAtomicMode()) { 2209523SAndreas.Sandberg@ARM.com fatal("The atomic CPU requires the memory system to be in " 2219523SAndreas.Sandberg@ARM.com "'atomic' mode.\n"); 2229523SAndreas.Sandberg@ARM.com } 2239523SAndreas.Sandberg@ARM.com} 2242623SN/A 2252623SN/Avoid 22610407Smitch.hayenga@arm.comAtomicSimpleCPU::activateContext(ThreadID thread_num) 2272623SN/A{ 22810407Smitch.hayenga@arm.com DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num); 2294940Snate@binkert.org 2302623SN/A assert(thread_num == 0); 2312683Sktlim@umich.edu assert(thread); 2322623SN/A 2332623SN/A assert(_status == Idle); 2342623SN/A assert(!tickEvent.scheduled()); 2352623SN/A 2369837Slena@cs.wisc,edu notIdleFraction = 1; 23710464SAndreas.Sandberg@ARM.com Cycles delta = ticksToCycles(thread->lastActivate - thread->lastSuspend); 23810464SAndreas.Sandberg@ARM.com numCycles += delta; 23910464SAndreas.Sandberg@ARM.com ppCycles->notify(delta); 2403686Sktlim@umich.edu 2413430Sgblack@eecs.umich.edu //Make sure ticks are still on multiples of cycles 24210407Smitch.hayenga@arm.com schedule(tickEvent, clockEdge(Cycles(0))); 2439342SAndreas.Sandberg@arm.com _status = BaseSimpleCPU::Running; 2442623SN/A} 2452623SN/A 2462623SN/A 2472623SN/Avoid 2488737Skoansin.tan@gmail.comAtomicSimpleCPU::suspendContext(ThreadID thread_num) 2492623SN/A{ 2504940Snate@binkert.org DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 2514940Snate@binkert.org 2522623SN/A assert(thread_num == 0); 2532683Sktlim@umich.edu assert(thread); 2542623SN/A 2556043Sgblack@eecs.umich.edu if (_status == Idle) 2566043Sgblack@eecs.umich.edu return; 2576043Sgblack@eecs.umich.edu 2589342SAndreas.Sandberg@arm.com assert(_status == BaseSimpleCPU::Running); 2592626SN/A 2602626SN/A // tick event may not be scheduled if this gets called from inside 2612626SN/A // an instruction's execution, e.g. "quiesce" 2622626SN/A if (tickEvent.scheduled()) 2635606Snate@binkert.org deschedule(tickEvent); 2642623SN/A 2659837Slena@cs.wisc,edu notIdleFraction = 0; 2662623SN/A _status = Idle; 2672623SN/A} 2682623SN/A 2692623SN/A 27010030SAli.Saidi@ARM.comTick 27110030SAli.Saidi@ARM.comAtomicSimpleCPU::AtomicCPUDPort::recvAtomicSnoop(PacketPtr pkt) 27210030SAli.Saidi@ARM.com{ 27310030SAli.Saidi@ARM.com DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 27410030SAli.Saidi@ARM.com pkt->cmdString()); 27510030SAli.Saidi@ARM.com 27610529Smorr@cs.wisc.edu // X86 ISA: Snooping an invalidation for monitor/mwait 27710529Smorr@cs.wisc.edu AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner); 27810529Smorr@cs.wisc.edu if(cpu->getAddrMonitor()->doMonitor(pkt)) { 27910529Smorr@cs.wisc.edu cpu->wakeup(); 28010529Smorr@cs.wisc.edu } 28110529Smorr@cs.wisc.edu 28210030SAli.Saidi@ARM.com // if snoop invalidates, release any associated locks 28310030SAli.Saidi@ARM.com if (pkt->isInvalidate()) { 28410030SAli.Saidi@ARM.com DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 28510030SAli.Saidi@ARM.com pkt->getAddr()); 28610030SAli.Saidi@ARM.com TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask); 28710030SAli.Saidi@ARM.com } 28810030SAli.Saidi@ARM.com 28910030SAli.Saidi@ARM.com return 0; 29010030SAli.Saidi@ARM.com} 29110030SAli.Saidi@ARM.com 29210030SAli.Saidi@ARM.comvoid 29310030SAli.Saidi@ARM.comAtomicSimpleCPU::AtomicCPUDPort::recvFunctionalSnoop(PacketPtr pkt) 29410030SAli.Saidi@ARM.com{ 29510030SAli.Saidi@ARM.com DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 29610030SAli.Saidi@ARM.com pkt->cmdString()); 29710030SAli.Saidi@ARM.com 29810529Smorr@cs.wisc.edu // X86 ISA: Snooping an invalidation for monitor/mwait 29910529Smorr@cs.wisc.edu AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner); 30010529Smorr@cs.wisc.edu if(cpu->getAddrMonitor()->doMonitor(pkt)) { 30110529Smorr@cs.wisc.edu cpu->wakeup(); 30210529Smorr@cs.wisc.edu } 30310529Smorr@cs.wisc.edu 30410030SAli.Saidi@ARM.com // if snoop invalidates, release any associated locks 30510030SAli.Saidi@ARM.com if (pkt->isInvalidate()) { 30610030SAli.Saidi@ARM.com DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 30710030SAli.Saidi@ARM.com pkt->getAddr()); 30810030SAli.Saidi@ARM.com TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask); 30910030SAli.Saidi@ARM.com } 31010030SAli.Saidi@ARM.com} 31110030SAli.Saidi@ARM.com 3122623SN/AFault 3138444Sgblack@eecs.umich.eduAtomicSimpleCPU::readMem(Addr addr, uint8_t * data, 3148444Sgblack@eecs.umich.edu unsigned size, unsigned flags) 3152623SN/A{ 3163169Sstever@eecs.umich.edu // use the CPU's statically allocated read request and packet objects 3174870Sstever@eecs.umich.edu Request *req = &data_read_req; 3182623SN/A 3192623SN/A if (traceData) { 3202623SN/A traceData->setAddr(addr); 3212623SN/A } 3222623SN/A 3234999Sgblack@eecs.umich.edu //The size of the data we're trying to read. 3247520Sgblack@eecs.umich.edu int fullSize = size; 3252623SN/A 3264999Sgblack@eecs.umich.edu //The address of the second part of this access if it needs to be split 3274999Sgblack@eecs.umich.edu //across a cache line boundary. 3289814Sandreas.hansson@arm.com Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 3294999Sgblack@eecs.umich.edu 3307520Sgblack@eecs.umich.edu if (secondAddr > addr) 3317520Sgblack@eecs.umich.edu size = secondAddr - addr; 3324999Sgblack@eecs.umich.edu 3334999Sgblack@eecs.umich.edu dcache_latency = 0; 3344999Sgblack@eecs.umich.edu 33510024Sdam.sunwoo@arm.com req->taskId(taskId()); 3367520Sgblack@eecs.umich.edu while (1) { 3378832SAli.Saidi@ARM.com req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 3384999Sgblack@eecs.umich.edu 3394999Sgblack@eecs.umich.edu // translate to physical address 3406023Snate@binkert.org Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read); 3414999Sgblack@eecs.umich.edu 3424999Sgblack@eecs.umich.edu // Now do the access. 3436623Sgblack@eecs.umich.edu if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) { 34410342SCurtis.Dunham@arm.com Packet pkt(req, MemCmd::ReadReq); 34510342SCurtis.Dunham@arm.com pkt.refineCommand(); 3467520Sgblack@eecs.umich.edu pkt.dataStatic(data); 3474999Sgblack@eecs.umich.edu 3488105Sgblack@eecs.umich.edu if (req->isMmappedIpr()) 3494999Sgblack@eecs.umich.edu dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 3504999Sgblack@eecs.umich.edu else { 3518931Sandreas.hansson@arm.com if (fastmem && system->isMemAddr(pkt.getAddr())) 3528931Sandreas.hansson@arm.com system->getPhysMem().access(&pkt); 3534999Sgblack@eecs.umich.edu else 3544999Sgblack@eecs.umich.edu dcache_latency += dcachePort.sendAtomic(&pkt); 3554999Sgblack@eecs.umich.edu } 3564999Sgblack@eecs.umich.edu dcache_access = true; 3575012Sgblack@eecs.umich.edu 3584999Sgblack@eecs.umich.edu assert(!pkt.isError()); 3594999Sgblack@eecs.umich.edu 3606102Sgblack@eecs.umich.edu if (req->isLLSC()) { 3614999Sgblack@eecs.umich.edu TheISA::handleLockedRead(thread, req); 3624999Sgblack@eecs.umich.edu } 3634968Sacolyte@umich.edu } 3644986Ssaidi@eecs.umich.edu 3654999Sgblack@eecs.umich.edu //If there's a fault, return it 3666739Sgblack@eecs.umich.edu if (fault != NoFault) { 3676739Sgblack@eecs.umich.edu if (req->isPrefetch()) { 3686739Sgblack@eecs.umich.edu return NoFault; 3696739Sgblack@eecs.umich.edu } else { 3706739Sgblack@eecs.umich.edu return fault; 3716739Sgblack@eecs.umich.edu } 3726739Sgblack@eecs.umich.edu } 3736739Sgblack@eecs.umich.edu 3744999Sgblack@eecs.umich.edu //If we don't need to access a second cache line, stop now. 3754999Sgblack@eecs.umich.edu if (secondAddr <= addr) 3764999Sgblack@eecs.umich.edu { 3776078Sgblack@eecs.umich.edu if (req->isLocked() && fault == NoFault) { 3786078Sgblack@eecs.umich.edu assert(!locked); 3796078Sgblack@eecs.umich.edu locked = true; 3806078Sgblack@eecs.umich.edu } 3814999Sgblack@eecs.umich.edu return fault; 3824968Sacolyte@umich.edu } 3833170Sstever@eecs.umich.edu 3844999Sgblack@eecs.umich.edu /* 3854999Sgblack@eecs.umich.edu * Set up for accessing the second cache line. 3864999Sgblack@eecs.umich.edu */ 3874999Sgblack@eecs.umich.edu 3884999Sgblack@eecs.umich.edu //Move the pointer we're reading into to the correct location. 3897520Sgblack@eecs.umich.edu data += size; 3904999Sgblack@eecs.umich.edu //Adjust the size to get the remaining bytes. 3917520Sgblack@eecs.umich.edu size = addr + fullSize - secondAddr; 3924999Sgblack@eecs.umich.edu //And access the right address. 3934999Sgblack@eecs.umich.edu addr = secondAddr; 3942623SN/A } 3952623SN/A} 3962623SN/A 3977520Sgblack@eecs.umich.edu 3982623SN/AFault 3998444Sgblack@eecs.umich.eduAtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, 4008444Sgblack@eecs.umich.edu Addr addr, unsigned flags, uint64_t *res) 4012623SN/A{ 40210031SAli.Saidi@ARM.com 40310031SAli.Saidi@ARM.com static uint8_t zero_array[64] = {}; 40410031SAli.Saidi@ARM.com 40510031SAli.Saidi@ARM.com if (data == NULL) { 40610031SAli.Saidi@ARM.com assert(size <= 64); 40710031SAli.Saidi@ARM.com assert(flags & Request::CACHE_BLOCK_ZERO); 40810031SAli.Saidi@ARM.com // This must be a cache block cleaning request 40910031SAli.Saidi@ARM.com data = zero_array; 41010031SAli.Saidi@ARM.com } 41110031SAli.Saidi@ARM.com 4123169Sstever@eecs.umich.edu // use the CPU's statically allocated write request and packet objects 4134870Sstever@eecs.umich.edu Request *req = &data_write_req; 4142623SN/A 4152623SN/A if (traceData) { 4162623SN/A traceData->setAddr(addr); 4172623SN/A } 4182623SN/A 4194999Sgblack@eecs.umich.edu //The size of the data we're trying to read. 4207520Sgblack@eecs.umich.edu int fullSize = size; 4212623SN/A 4224999Sgblack@eecs.umich.edu //The address of the second part of this access if it needs to be split 4234999Sgblack@eecs.umich.edu //across a cache line boundary. 4249814Sandreas.hansson@arm.com Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 4254999Sgblack@eecs.umich.edu 4264999Sgblack@eecs.umich.edu if(secondAddr > addr) 4277520Sgblack@eecs.umich.edu size = secondAddr - addr; 4284999Sgblack@eecs.umich.edu 4294999Sgblack@eecs.umich.edu dcache_latency = 0; 4304999Sgblack@eecs.umich.edu 43110024Sdam.sunwoo@arm.com req->taskId(taskId()); 4324999Sgblack@eecs.umich.edu while(1) { 4338832SAli.Saidi@ARM.com req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 4344999Sgblack@eecs.umich.edu 4354999Sgblack@eecs.umich.edu // translate to physical address 4366023Snate@binkert.org Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write); 4374999Sgblack@eecs.umich.edu 4384999Sgblack@eecs.umich.edu // Now do the access. 4394999Sgblack@eecs.umich.edu if (fault == NoFault) { 4404999Sgblack@eecs.umich.edu MemCmd cmd = MemCmd::WriteReq; // default 4414999Sgblack@eecs.umich.edu bool do_access = true; // flag to suppress cache access 4424999Sgblack@eecs.umich.edu 4436102Sgblack@eecs.umich.edu if (req->isLLSC()) { 4444999Sgblack@eecs.umich.edu cmd = MemCmd::StoreCondReq; 44510030SAli.Saidi@ARM.com do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask); 4464999Sgblack@eecs.umich.edu } else if (req->isSwap()) { 4474999Sgblack@eecs.umich.edu cmd = MemCmd::SwapReq; 4484999Sgblack@eecs.umich.edu if (req->isCondSwap()) { 4494999Sgblack@eecs.umich.edu assert(res); 4504999Sgblack@eecs.umich.edu req->setExtraData(*res); 4514999Sgblack@eecs.umich.edu } 4524999Sgblack@eecs.umich.edu } 4534999Sgblack@eecs.umich.edu 4546623Sgblack@eecs.umich.edu if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) { 4558949Sandreas.hansson@arm.com Packet pkt = Packet(req, cmd); 4567520Sgblack@eecs.umich.edu pkt.dataStatic(data); 4574999Sgblack@eecs.umich.edu 4588105Sgblack@eecs.umich.edu if (req->isMmappedIpr()) { 4594999Sgblack@eecs.umich.edu dcache_latency += 4604999Sgblack@eecs.umich.edu TheISA::handleIprWrite(thread->getTC(), &pkt); 4614999Sgblack@eecs.umich.edu } else { 4628931Sandreas.hansson@arm.com if (fastmem && system->isMemAddr(pkt.getAddr())) 4638931Sandreas.hansson@arm.com system->getPhysMem().access(&pkt); 4644999Sgblack@eecs.umich.edu else 4654999Sgblack@eecs.umich.edu dcache_latency += dcachePort.sendAtomic(&pkt); 4664999Sgblack@eecs.umich.edu } 4674999Sgblack@eecs.umich.edu dcache_access = true; 4684999Sgblack@eecs.umich.edu assert(!pkt.isError()); 4694999Sgblack@eecs.umich.edu 4704999Sgblack@eecs.umich.edu if (req->isSwap()) { 4714999Sgblack@eecs.umich.edu assert(res); 47210563Sandreas.hansson@arm.com memcpy(res, pkt.getConstPtr<uint8_t>(), fullSize); 4734999Sgblack@eecs.umich.edu } 4744999Sgblack@eecs.umich.edu } 4754999Sgblack@eecs.umich.edu 4764999Sgblack@eecs.umich.edu if (res && !req->isSwap()) { 4774999Sgblack@eecs.umich.edu *res = req->getExtraData(); 4784878Sstever@eecs.umich.edu } 4794040Ssaidi@eecs.umich.edu } 4804040Ssaidi@eecs.umich.edu 4814999Sgblack@eecs.umich.edu //If there's a fault or we don't need to access a second cache line, 4824999Sgblack@eecs.umich.edu //stop now. 4834999Sgblack@eecs.umich.edu if (fault != NoFault || secondAddr <= addr) 4844999Sgblack@eecs.umich.edu { 4856078Sgblack@eecs.umich.edu if (req->isLocked() && fault == NoFault) { 4866078Sgblack@eecs.umich.edu assert(locked); 4876078Sgblack@eecs.umich.edu locked = false; 4886078Sgblack@eecs.umich.edu } 4896739Sgblack@eecs.umich.edu if (fault != NoFault && req->isPrefetch()) { 4906739Sgblack@eecs.umich.edu return NoFault; 4916739Sgblack@eecs.umich.edu } else { 4926739Sgblack@eecs.umich.edu return fault; 4936739Sgblack@eecs.umich.edu } 4943170Sstever@eecs.umich.edu } 4953170Sstever@eecs.umich.edu 4964999Sgblack@eecs.umich.edu /* 4974999Sgblack@eecs.umich.edu * Set up for accessing the second cache line. 4984999Sgblack@eecs.umich.edu */ 4994999Sgblack@eecs.umich.edu 5004999Sgblack@eecs.umich.edu //Move the pointer we're reading into to the correct location. 5017520Sgblack@eecs.umich.edu data += size; 5024999Sgblack@eecs.umich.edu //Adjust the size to get the remaining bytes. 5037520Sgblack@eecs.umich.edu size = addr + fullSize - secondAddr; 5044999Sgblack@eecs.umich.edu //And access the right address. 5054999Sgblack@eecs.umich.edu addr = secondAddr; 5062623SN/A } 5072623SN/A} 5082623SN/A 5092623SN/A 5102623SN/Avoid 5112623SN/AAtomicSimpleCPU::tick() 5122623SN/A{ 5134940Snate@binkert.org DPRINTF(SimpleCPU, "Tick\n"); 5144940Snate@binkert.org 5155487Snate@binkert.org Tick latency = 0; 5162623SN/A 5176078Sgblack@eecs.umich.edu for (int i = 0; i < width || locked; ++i) { 5182623SN/A numCycles++; 51910464SAndreas.Sandberg@ARM.com ppCycles->notify(1); 5202623SN/A 5213387Sgblack@eecs.umich.edu if (!curStaticInst || !curStaticInst->isDelayedCommit()) 5223387Sgblack@eecs.umich.edu checkForInterrupts(); 5232626SN/A 5245348Ssaidi@eecs.umich.edu checkPcEventQueue(); 5258143SAli.Saidi@ARM.com // We must have just got suspended by a PC event 5269443SAndreas.Sandberg@ARM.com if (_status == Idle) { 5279443SAndreas.Sandberg@ARM.com tryCompleteDrain(); 5288143SAli.Saidi@ARM.com return; 5299443SAndreas.Sandberg@ARM.com } 5305348Ssaidi@eecs.umich.edu 5315669Sgblack@eecs.umich.edu Fault fault = NoFault; 5325669Sgblack@eecs.umich.edu 5337720Sgblack@eecs.umich.edu TheISA::PCState pcState = thread->pcState(); 5347720Sgblack@eecs.umich.edu 5357720Sgblack@eecs.umich.edu bool needToFetch = !isRomMicroPC(pcState.microPC()) && 5367720Sgblack@eecs.umich.edu !curMacroStaticInst; 5377720Sgblack@eecs.umich.edu if (needToFetch) { 53810024Sdam.sunwoo@arm.com ifetch_req.taskId(taskId()); 5395894Sgblack@eecs.umich.edu setupFetchRequest(&ifetch_req); 5406023Snate@binkert.org fault = thread->itb->translateAtomic(&ifetch_req, tc, 5416023Snate@binkert.org BaseTLB::Execute); 5425894Sgblack@eecs.umich.edu } 5432623SN/A 5442623SN/A if (fault == NoFault) { 5454182Sgblack@eecs.umich.edu Tick icache_latency = 0; 5464182Sgblack@eecs.umich.edu bool icache_access = false; 5474182Sgblack@eecs.umich.edu dcache_access = false; // assume no dcache access 5482662Sstever@eecs.umich.edu 5497720Sgblack@eecs.umich.edu if (needToFetch) { 5509023Sgblack@eecs.umich.edu // This is commented out because the decoder would act like 5515694Sgblack@eecs.umich.edu // a tiny cache otherwise. It wouldn't be flushed when needed 5525694Sgblack@eecs.umich.edu // like the I cache. It should be flushed, and when that works 5535694Sgblack@eecs.umich.edu // this code should be uncommented. 5545669Sgblack@eecs.umich.edu //Fetch more instruction memory if necessary 5559023Sgblack@eecs.umich.edu //if(decoder.needMoreBytes()) 5565669Sgblack@eecs.umich.edu //{ 5575669Sgblack@eecs.umich.edu icache_access = true; 5588949Sandreas.hansson@arm.com Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq); 5595669Sgblack@eecs.umich.edu ifetch_pkt.dataStatic(&inst); 5602623SN/A 5618931Sandreas.hansson@arm.com if (fastmem && system->isMemAddr(ifetch_pkt.getAddr())) 5628931Sandreas.hansson@arm.com system->getPhysMem().access(&ifetch_pkt); 5635669Sgblack@eecs.umich.edu else 5645669Sgblack@eecs.umich.edu icache_latency = icachePort.sendAtomic(&ifetch_pkt); 5654968Sacolyte@umich.edu 5665669Sgblack@eecs.umich.edu assert(!ifetch_pkt.isError()); 5674968Sacolyte@umich.edu 5685669Sgblack@eecs.umich.edu // ifetch_req is initialized to read the instruction directly 5695669Sgblack@eecs.umich.edu // into the CPU object's inst field. 5705669Sgblack@eecs.umich.edu //} 5715669Sgblack@eecs.umich.edu } 5724182Sgblack@eecs.umich.edu 5732623SN/A preExecute(); 5743814Ssaidi@eecs.umich.edu 5755001Sgblack@eecs.umich.edu if (curStaticInst) { 5764182Sgblack@eecs.umich.edu fault = curStaticInst->execute(this, traceData); 5774998Sgblack@eecs.umich.edu 5784998Sgblack@eecs.umich.edu // keep an instruction count 57910381Sdam.sunwoo@arm.com if (fault == NoFault) { 5804998Sgblack@eecs.umich.edu countInst(); 58110381Sdam.sunwoo@arm.com if (!curStaticInst->isMicroop() || 58210381Sdam.sunwoo@arm.com curStaticInst->isLastMicroop()) { 58310381Sdam.sunwoo@arm.com ppCommit->notify(std::make_pair(thread, curStaticInst)); 58410381Sdam.sunwoo@arm.com } 58510381Sdam.sunwoo@arm.com } 5867655Sali.saidi@arm.com else if (traceData && !DTRACE(ExecFaulting)) { 5875001Sgblack@eecs.umich.edu delete traceData; 5885001Sgblack@eecs.umich.edu traceData = NULL; 5895001Sgblack@eecs.umich.edu } 5904998Sgblack@eecs.umich.edu 5914182Sgblack@eecs.umich.edu postExecute(); 5924182Sgblack@eecs.umich.edu } 5932623SN/A 5943814Ssaidi@eecs.umich.edu // @todo remove me after debugging with legion done 5954539Sgblack@eecs.umich.edu if (curStaticInst && (!curStaticInst->isMicroop() || 5964539Sgblack@eecs.umich.edu curStaticInst->isFirstMicroop())) 5973814Ssaidi@eecs.umich.edu instCnt++; 5983814Ssaidi@eecs.umich.edu 5995487Snate@binkert.org Tick stall_ticks = 0; 6005487Snate@binkert.org if (simulate_inst_stalls && icache_access) 6015487Snate@binkert.org stall_ticks += icache_latency; 6025487Snate@binkert.org 6035487Snate@binkert.org if (simulate_data_stalls && dcache_access) 6045487Snate@binkert.org stall_ticks += dcache_latency; 6055487Snate@binkert.org 6065487Snate@binkert.org if (stall_ticks) { 6079180Sandreas.hansson@arm.com // the atomic cpu does its accounting in ticks, so 6089180Sandreas.hansson@arm.com // keep counting in ticks but round to the clock 6099180Sandreas.hansson@arm.com // period 6109180Sandreas.hansson@arm.com latency += divCeil(stall_ticks, clockPeriod()) * 6119180Sandreas.hansson@arm.com clockPeriod(); 6122623SN/A } 6132623SN/A 6142623SN/A } 6154377Sgblack@eecs.umich.edu if(fault != NoFault || !stayAtPC) 6164182Sgblack@eecs.umich.edu advancePC(fault); 6172623SN/A } 6182623SN/A 6199443SAndreas.Sandberg@ARM.com if (tryCompleteDrain()) 6209443SAndreas.Sandberg@ARM.com return; 6219443SAndreas.Sandberg@ARM.com 6225487Snate@binkert.org // instruction takes at least one cycle 6239179Sandreas.hansson@arm.com if (latency < clockPeriod()) 6249179Sandreas.hansson@arm.com latency = clockPeriod(); 6255487Snate@binkert.org 6262626SN/A if (_status != Idle) 6277823Ssteve.reinhardt@amd.com schedule(tickEvent, curTick() + latency); 6282623SN/A} 6292623SN/A 63010381Sdam.sunwoo@arm.comvoid 63110381Sdam.sunwoo@arm.comAtomicSimpleCPU::regProbePoints() 63210381Sdam.sunwoo@arm.com{ 63310464SAndreas.Sandberg@ARM.com BaseCPU::regProbePoints(); 63410464SAndreas.Sandberg@ARM.com 63510381Sdam.sunwoo@arm.com ppCommit = new ProbePointArg<pair<SimpleThread*, const StaticInstPtr>> 63610381Sdam.sunwoo@arm.com (getProbeManager(), "Commit"); 63710381Sdam.sunwoo@arm.com} 6382623SN/A 6395315Sstever@gmail.comvoid 6405315Sstever@gmail.comAtomicSimpleCPU::printAddr(Addr a) 6415315Sstever@gmail.com{ 6425315Sstever@gmail.com dcachePort.printAddr(a); 6435315Sstever@gmail.com} 6445315Sstever@gmail.com 6452623SN/A//////////////////////////////////////////////////////////////////////// 6462623SN/A// 6472623SN/A// AtomicSimpleCPU Simulation Object 6482623SN/A// 6494762Snate@binkert.orgAtomicSimpleCPU * 6504762Snate@binkert.orgAtomicSimpleCPUParams::create() 6512623SN/A{ 6525529Snate@binkert.org numThreads = 1; 6538779Sgblack@eecs.umich.edu if (!FullSystem && workload.size() != 1) 6544762Snate@binkert.org panic("only one workload allowed"); 6555529Snate@binkert.org return new AtomicSimpleCPU(this); 6562623SN/A} 657