atomic.cc revision 9647
12623SN/A/* 28926Sandreas.hansson@arm.com * Copyright (c) 2012 ARM Limited 38926Sandreas.hansson@arm.com * All rights reserved. 48926Sandreas.hansson@arm.com * 58926Sandreas.hansson@arm.com * The license below extends only to copyright in the software and shall 68926Sandreas.hansson@arm.com * not be construed as granting a license to any other intellectual 78926Sandreas.hansson@arm.com * property including but not limited to intellectual property relating 88926Sandreas.hansson@arm.com * to a hardware implementation of the functionality of the software 98926Sandreas.hansson@arm.com * licensed hereunder. You may use the software subject to the license 108926Sandreas.hansson@arm.com * terms below provided that you ensure that this notice is replicated 118926Sandreas.hansson@arm.com * unmodified and in its entirety in all distributions of the software, 128926Sandreas.hansson@arm.com * modified or unmodified, in source code or in binary form. 138926Sandreas.hansson@arm.com * 142623SN/A * Copyright (c) 2002-2005 The Regents of The University of Michigan 152623SN/A * All rights reserved. 162623SN/A * 172623SN/A * Redistribution and use in source and binary forms, with or without 182623SN/A * modification, are permitted provided that the following conditions are 192623SN/A * met: redistributions of source code must retain the above copyright 202623SN/A * notice, this list of conditions and the following disclaimer; 212623SN/A * redistributions in binary form must reproduce the above copyright 222623SN/A * notice, this list of conditions and the following disclaimer in the 232623SN/A * documentation and/or other materials provided with the distribution; 242623SN/A * neither the name of the copyright holders nor the names of its 252623SN/A * contributors may be used to endorse or promote products derived from 262623SN/A * this software without specific prior written permission. 272623SN/A * 282623SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 292623SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 302623SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 312623SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 322623SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 332623SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 342623SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 352623SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 362623SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 372623SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 382623SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 392665Ssaidi@eecs.umich.edu * 402665Ssaidi@eecs.umich.edu * Authors: Steve Reinhardt 412623SN/A */ 422623SN/A 433170Sstever@eecs.umich.edu#include "arch/locked_mem.hh" 448105Sgblack@eecs.umich.edu#include "arch/mmapped_ipr.hh" 452623SN/A#include "arch/utility.hh" 464040Ssaidi@eecs.umich.edu#include "base/bigint.hh" 479647Sdam.sunwoo@arm.com#include "base/output.hh" 486658Snate@binkert.org#include "config/the_isa.hh" 498229Snate@binkert.org#include "cpu/simple/atomic.hh" 502623SN/A#include "cpu/exetrace.hh" 519443SAndreas.Sandberg@ARM.com#include "debug/Drain.hh" 528232Snate@binkert.org#include "debug/ExecFaulting.hh" 538232Snate@binkert.org#include "debug/SimpleCPU.hh" 543348Sbinkertn@umich.edu#include "mem/packet.hh" 553348Sbinkertn@umich.edu#include "mem/packet_access.hh" 568926Sandreas.hansson@arm.com#include "mem/physical.hh" 574762Snate@binkert.org#include "params/AtomicSimpleCPU.hh" 587678Sgblack@eecs.umich.edu#include "sim/faults.hh" 592901Ssaidi@eecs.umich.edu#include "sim/system.hh" 608779Sgblack@eecs.umich.edu#include "sim/full_system.hh" 612623SN/A 622623SN/Ausing namespace std; 632623SN/Ausing namespace TheISA; 642623SN/A 652623SN/AAtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c) 665606Snate@binkert.org : Event(CPU_Tick_Pri), cpu(c) 672623SN/A{ 682623SN/A} 692623SN/A 702623SN/A 712623SN/Avoid 722623SN/AAtomicSimpleCPU::TickEvent::process() 732623SN/A{ 742623SN/A cpu->tick(); 752623SN/A} 762623SN/A 772623SN/Aconst char * 785336Shines@cs.fsu.eduAtomicSimpleCPU::TickEvent::description() const 792623SN/A{ 804873Sstever@eecs.umich.edu return "AtomicSimpleCPU tick"; 812623SN/A} 822623SN/A 832623SN/Avoid 842623SN/AAtomicSimpleCPU::init() 852623SN/A{ 862623SN/A BaseCPU::init(); 878921Sandreas.hansson@arm.com 888921Sandreas.hansson@arm.com // Initialise the ThreadContext's memory proxies 898921Sandreas.hansson@arm.com tcBase()->initMemProxies(tcBase()); 908921Sandreas.hansson@arm.com 919433SAndreas.Sandberg@ARM.com if (FullSystem && !params()->switched_out) { 928779Sgblack@eecs.umich.edu ThreadID size = threadContexts.size(); 938779Sgblack@eecs.umich.edu for (ThreadID i = 0; i < size; ++i) { 948779Sgblack@eecs.umich.edu ThreadContext *tc = threadContexts[i]; 958779Sgblack@eecs.umich.edu // initialize CPU, including PC 968779Sgblack@eecs.umich.edu TheISA::initCPU(tc, tc->contextId()); 978779Sgblack@eecs.umich.edu } 982623SN/A } 998706Sandreas.hansson@arm.com 1005714Shsul@eecs.umich.edu // Atomic doesn't do MT right now, so contextId == threadId 1015712Shsul@eecs.umich.edu ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 1025712Shsul@eecs.umich.edu data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 1035712Shsul@eecs.umich.edu data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 1042623SN/A} 1052623SN/A 1065529Snate@binkert.orgAtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 1076078Sgblack@eecs.umich.edu : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false), 1085487Snate@binkert.org simulate_data_stalls(p->simulate_data_stalls), 1095487Snate@binkert.org simulate_inst_stalls(p->simulate_inst_stalls), 1109443SAndreas.Sandberg@ARM.com drain_manager(NULL), 1119095Sandreas.hansson@arm.com icachePort(name() + ".icache_port", this), 1129095Sandreas.hansson@arm.com dcachePort(name() + ".dcache_port", this), 1139647Sdam.sunwoo@arm.com fastmem(p->fastmem), 1149647Sdam.sunwoo@arm.com simpoint(p->simpoint_profile), 1159647Sdam.sunwoo@arm.com intervalSize(p->simpoint_interval), 1169647Sdam.sunwoo@arm.com intervalCount(0), 1179647Sdam.sunwoo@arm.com intervalDrift(0), 1189647Sdam.sunwoo@arm.com simpointStream(NULL), 1199647Sdam.sunwoo@arm.com currentBBV(0, 0), 1209647Sdam.sunwoo@arm.com currentBBVInstCount(0) 1212623SN/A{ 1222623SN/A _status = Idle; 1239647Sdam.sunwoo@arm.com 1249647Sdam.sunwoo@arm.com if (simpoint) { 1259647Sdam.sunwoo@arm.com simpointStream = simout.create(p->simpoint_profile_file, false); 1269647Sdam.sunwoo@arm.com } 1272623SN/A} 1282623SN/A 1292623SN/A 1302623SN/AAtomicSimpleCPU::~AtomicSimpleCPU() 1312623SN/A{ 1326775SBrad.Beckmann@amd.com if (tickEvent.scheduled()) { 1336775SBrad.Beckmann@amd.com deschedule(tickEvent); 1346775SBrad.Beckmann@amd.com } 1359647Sdam.sunwoo@arm.com if (simpointStream) { 1369647Sdam.sunwoo@arm.com simout.close(simpointStream); 1379647Sdam.sunwoo@arm.com } 1382623SN/A} 1392623SN/A 1409443SAndreas.Sandberg@ARM.comunsigned int 1419443SAndreas.Sandberg@ARM.comAtomicSimpleCPU::drain(DrainManager *dm) 1422623SN/A{ 1439443SAndreas.Sandberg@ARM.com assert(!drain_manager); 1449448SAndreas.Sandberg@ARM.com if (switchedOut()) 1459443SAndreas.Sandberg@ARM.com return 0; 1462623SN/A 1479443SAndreas.Sandberg@ARM.com if (!isDrained()) { 1489443SAndreas.Sandberg@ARM.com DPRINTF(Drain, "Requesting drain: %s\n", pcState()); 1499443SAndreas.Sandberg@ARM.com drain_manager = dm; 1509443SAndreas.Sandberg@ARM.com return 1; 1519443SAndreas.Sandberg@ARM.com } else { 1529443SAndreas.Sandberg@ARM.com if (tickEvent.scheduled()) 1539443SAndreas.Sandberg@ARM.com deschedule(tickEvent); 1542915Sktlim@umich.edu 1559443SAndreas.Sandberg@ARM.com DPRINTF(Drain, "Not executing microcode, no need to drain.\n"); 1569443SAndreas.Sandberg@ARM.com return 0; 1579443SAndreas.Sandberg@ARM.com } 1589342SAndreas.Sandberg@arm.com} 1599342SAndreas.Sandberg@arm.com 1602915Sktlim@umich.eduvoid 1619342SAndreas.Sandberg@arm.comAtomicSimpleCPU::drainResume() 1622915Sktlim@umich.edu{ 1639448SAndreas.Sandberg@ARM.com assert(!tickEvent.scheduled()); 1649443SAndreas.Sandberg@ARM.com assert(!drain_manager); 1659448SAndreas.Sandberg@ARM.com if (switchedOut()) 1665220Ssaidi@eecs.umich.edu return; 1675220Ssaidi@eecs.umich.edu 1684940Snate@binkert.org DPRINTF(SimpleCPU, "Resume\n"); 1699523SAndreas.Sandberg@ARM.com verifyMemoryMode(); 1703324Shsul@eecs.umich.edu 1719448SAndreas.Sandberg@ARM.com assert(!threadContexts.empty()); 1729448SAndreas.Sandberg@ARM.com if (threadContexts.size() > 1) 1739448SAndreas.Sandberg@ARM.com fatal("The atomic CPU only supports one thread.\n"); 1749448SAndreas.Sandberg@ARM.com 1759448SAndreas.Sandberg@ARM.com if (thread->status() == ThreadContext::Active) { 1769443SAndreas.Sandberg@ARM.com schedule(tickEvent, nextCycle()); 1779448SAndreas.Sandberg@ARM.com _status = BaseSimpleCPU::Running; 1789448SAndreas.Sandberg@ARM.com } else { 1799448SAndreas.Sandberg@ARM.com _status = BaseSimpleCPU::Idle; 1809448SAndreas.Sandberg@ARM.com } 1819443SAndreas.Sandberg@ARM.com 1827897Shestness@cs.utexas.edu system->totalNumInsts = 0; 1832623SN/A} 1842623SN/A 1859443SAndreas.Sandberg@ARM.combool 1869443SAndreas.Sandberg@ARM.comAtomicSimpleCPU::tryCompleteDrain() 1879443SAndreas.Sandberg@ARM.com{ 1889443SAndreas.Sandberg@ARM.com if (!drain_manager) 1899443SAndreas.Sandberg@ARM.com return false; 1909443SAndreas.Sandberg@ARM.com 1919443SAndreas.Sandberg@ARM.com DPRINTF(Drain, "tryCompleteDrain: %s\n", pcState()); 1929443SAndreas.Sandberg@ARM.com if (!isDrained()) 1939443SAndreas.Sandberg@ARM.com return false; 1949443SAndreas.Sandberg@ARM.com 1959443SAndreas.Sandberg@ARM.com DPRINTF(Drain, "CPU done draining, processing drain event\n"); 1969443SAndreas.Sandberg@ARM.com drain_manager->signalDrainDone(); 1979443SAndreas.Sandberg@ARM.com drain_manager = NULL; 1989443SAndreas.Sandberg@ARM.com 1999443SAndreas.Sandberg@ARM.com return true; 2009443SAndreas.Sandberg@ARM.com} 2019443SAndreas.Sandberg@ARM.com 2029443SAndreas.Sandberg@ARM.com 2032623SN/Avoid 2042798Sktlim@umich.eduAtomicSimpleCPU::switchOut() 2052623SN/A{ 2069429SAndreas.Sandberg@ARM.com BaseSimpleCPU::switchOut(); 2079429SAndreas.Sandberg@ARM.com 2089443SAndreas.Sandberg@ARM.com assert(!tickEvent.scheduled()); 2099342SAndreas.Sandberg@arm.com assert(_status == BaseSimpleCPU::Running || _status == Idle); 2109443SAndreas.Sandberg@ARM.com assert(isDrained()); 2112623SN/A} 2122623SN/A 2132623SN/A 2142623SN/Avoid 2152623SN/AAtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 2162623SN/A{ 2179429SAndreas.Sandberg@ARM.com BaseSimpleCPU::takeOverFrom(oldCPU); 2182623SN/A 2199443SAndreas.Sandberg@ARM.com // The tick event should have been descheduled by drain() 2202623SN/A assert(!tickEvent.scheduled()); 2212623SN/A 2225712Shsul@eecs.umich.edu ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 2235712Shsul@eecs.umich.edu data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 2245712Shsul@eecs.umich.edu data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 2252623SN/A} 2262623SN/A 2279523SAndreas.Sandberg@ARM.comvoid 2289523SAndreas.Sandberg@ARM.comAtomicSimpleCPU::verifyMemoryMode() const 2299523SAndreas.Sandberg@ARM.com{ 2309524SAndreas.Sandberg@ARM.com if (!system->isAtomicMode()) { 2319523SAndreas.Sandberg@ARM.com fatal("The atomic CPU requires the memory system to be in " 2329523SAndreas.Sandberg@ARM.com "'atomic' mode.\n"); 2339523SAndreas.Sandberg@ARM.com } 2349523SAndreas.Sandberg@ARM.com} 2352623SN/A 2362623SN/Avoid 2379180Sandreas.hansson@arm.comAtomicSimpleCPU::activateContext(ThreadID thread_num, Cycles delay) 2382623SN/A{ 2394940Snate@binkert.org DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); 2404940Snate@binkert.org 2412623SN/A assert(thread_num == 0); 2422683Sktlim@umich.edu assert(thread); 2432623SN/A 2442623SN/A assert(_status == Idle); 2452623SN/A assert(!tickEvent.scheduled()); 2462623SN/A 2472623SN/A notIdleFraction++; 2489180Sandreas.hansson@arm.com numCycles += ticksToCycles(thread->lastActivate - thread->lastSuspend); 2493686Sktlim@umich.edu 2503430Sgblack@eecs.umich.edu //Make sure ticks are still on multiples of cycles 2519179Sandreas.hansson@arm.com schedule(tickEvent, clockEdge(delay)); 2529342SAndreas.Sandberg@arm.com _status = BaseSimpleCPU::Running; 2532623SN/A} 2542623SN/A 2552623SN/A 2562623SN/Avoid 2578737Skoansin.tan@gmail.comAtomicSimpleCPU::suspendContext(ThreadID thread_num) 2582623SN/A{ 2594940Snate@binkert.org DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 2604940Snate@binkert.org 2612623SN/A assert(thread_num == 0); 2622683Sktlim@umich.edu assert(thread); 2632623SN/A 2646043Sgblack@eecs.umich.edu if (_status == Idle) 2656043Sgblack@eecs.umich.edu return; 2666043Sgblack@eecs.umich.edu 2679342SAndreas.Sandberg@arm.com assert(_status == BaseSimpleCPU::Running); 2682626SN/A 2692626SN/A // tick event may not be scheduled if this gets called from inside 2702626SN/A // an instruction's execution, e.g. "quiesce" 2712626SN/A if (tickEvent.scheduled()) 2725606Snate@binkert.org deschedule(tickEvent); 2732623SN/A 2742623SN/A notIdleFraction--; 2752623SN/A _status = Idle; 2762623SN/A} 2772623SN/A 2782623SN/A 2792623SN/AFault 2808444Sgblack@eecs.umich.eduAtomicSimpleCPU::readMem(Addr addr, uint8_t * data, 2818444Sgblack@eecs.umich.edu unsigned size, unsigned flags) 2822623SN/A{ 2833169Sstever@eecs.umich.edu // use the CPU's statically allocated read request and packet objects 2844870Sstever@eecs.umich.edu Request *req = &data_read_req; 2852623SN/A 2862623SN/A if (traceData) { 2872623SN/A traceData->setAddr(addr); 2882623SN/A } 2892623SN/A 2904999Sgblack@eecs.umich.edu //The block size of our peer. 2916227Snate@binkert.org unsigned blockSize = dcachePort.peerBlockSize(); 2924999Sgblack@eecs.umich.edu //The size of the data we're trying to read. 2937520Sgblack@eecs.umich.edu int fullSize = size; 2942623SN/A 2954999Sgblack@eecs.umich.edu //The address of the second part of this access if it needs to be split 2964999Sgblack@eecs.umich.edu //across a cache line boundary. 2977520Sgblack@eecs.umich.edu Addr secondAddr = roundDown(addr + size - 1, blockSize); 2984999Sgblack@eecs.umich.edu 2997520Sgblack@eecs.umich.edu if (secondAddr > addr) 3007520Sgblack@eecs.umich.edu size = secondAddr - addr; 3014999Sgblack@eecs.umich.edu 3024999Sgblack@eecs.umich.edu dcache_latency = 0; 3034999Sgblack@eecs.umich.edu 3047520Sgblack@eecs.umich.edu while (1) { 3058832SAli.Saidi@ARM.com req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 3064999Sgblack@eecs.umich.edu 3074999Sgblack@eecs.umich.edu // translate to physical address 3086023Snate@binkert.org Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read); 3094999Sgblack@eecs.umich.edu 3104999Sgblack@eecs.umich.edu // Now do the access. 3116623Sgblack@eecs.umich.edu if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) { 3124999Sgblack@eecs.umich.edu Packet pkt = Packet(req, 3138949Sandreas.hansson@arm.com req->isLLSC() ? MemCmd::LoadLockedReq : 3148949Sandreas.hansson@arm.com MemCmd::ReadReq); 3157520Sgblack@eecs.umich.edu pkt.dataStatic(data); 3164999Sgblack@eecs.umich.edu 3178105Sgblack@eecs.umich.edu if (req->isMmappedIpr()) 3184999Sgblack@eecs.umich.edu dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 3194999Sgblack@eecs.umich.edu else { 3208931Sandreas.hansson@arm.com if (fastmem && system->isMemAddr(pkt.getAddr())) 3218931Sandreas.hansson@arm.com system->getPhysMem().access(&pkt); 3224999Sgblack@eecs.umich.edu else 3234999Sgblack@eecs.umich.edu dcache_latency += dcachePort.sendAtomic(&pkt); 3244999Sgblack@eecs.umich.edu } 3254999Sgblack@eecs.umich.edu dcache_access = true; 3265012Sgblack@eecs.umich.edu 3274999Sgblack@eecs.umich.edu assert(!pkt.isError()); 3284999Sgblack@eecs.umich.edu 3296102Sgblack@eecs.umich.edu if (req->isLLSC()) { 3304999Sgblack@eecs.umich.edu TheISA::handleLockedRead(thread, req); 3314999Sgblack@eecs.umich.edu } 3324968Sacolyte@umich.edu } 3334986Ssaidi@eecs.umich.edu 3344999Sgblack@eecs.umich.edu //If there's a fault, return it 3356739Sgblack@eecs.umich.edu if (fault != NoFault) { 3366739Sgblack@eecs.umich.edu if (req->isPrefetch()) { 3376739Sgblack@eecs.umich.edu return NoFault; 3386739Sgblack@eecs.umich.edu } else { 3396739Sgblack@eecs.umich.edu return fault; 3406739Sgblack@eecs.umich.edu } 3416739Sgblack@eecs.umich.edu } 3426739Sgblack@eecs.umich.edu 3434999Sgblack@eecs.umich.edu //If we don't need to access a second cache line, stop now. 3444999Sgblack@eecs.umich.edu if (secondAddr <= addr) 3454999Sgblack@eecs.umich.edu { 3466078Sgblack@eecs.umich.edu if (req->isLocked() && fault == NoFault) { 3476078Sgblack@eecs.umich.edu assert(!locked); 3486078Sgblack@eecs.umich.edu locked = true; 3496078Sgblack@eecs.umich.edu } 3504999Sgblack@eecs.umich.edu return fault; 3514968Sacolyte@umich.edu } 3523170Sstever@eecs.umich.edu 3534999Sgblack@eecs.umich.edu /* 3544999Sgblack@eecs.umich.edu * Set up for accessing the second cache line. 3554999Sgblack@eecs.umich.edu */ 3564999Sgblack@eecs.umich.edu 3574999Sgblack@eecs.umich.edu //Move the pointer we're reading into to the correct location. 3587520Sgblack@eecs.umich.edu data += size; 3594999Sgblack@eecs.umich.edu //Adjust the size to get the remaining bytes. 3607520Sgblack@eecs.umich.edu size = addr + fullSize - secondAddr; 3614999Sgblack@eecs.umich.edu //And access the right address. 3624999Sgblack@eecs.umich.edu addr = secondAddr; 3632623SN/A } 3642623SN/A} 3652623SN/A 3667520Sgblack@eecs.umich.edu 3672623SN/AFault 3688444Sgblack@eecs.umich.eduAtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, 3698444Sgblack@eecs.umich.edu Addr addr, unsigned flags, uint64_t *res) 3702623SN/A{ 3713169Sstever@eecs.umich.edu // use the CPU's statically allocated write request and packet objects 3724870Sstever@eecs.umich.edu Request *req = &data_write_req; 3732623SN/A 3742623SN/A if (traceData) { 3752623SN/A traceData->setAddr(addr); 3762623SN/A } 3772623SN/A 3784999Sgblack@eecs.umich.edu //The block size of our peer. 3796227Snate@binkert.org unsigned blockSize = dcachePort.peerBlockSize(); 3804999Sgblack@eecs.umich.edu //The size of the data we're trying to read. 3817520Sgblack@eecs.umich.edu int fullSize = size; 3822623SN/A 3834999Sgblack@eecs.umich.edu //The address of the second part of this access if it needs to be split 3844999Sgblack@eecs.umich.edu //across a cache line boundary. 3857520Sgblack@eecs.umich.edu Addr secondAddr = roundDown(addr + size - 1, blockSize); 3864999Sgblack@eecs.umich.edu 3874999Sgblack@eecs.umich.edu if(secondAddr > addr) 3887520Sgblack@eecs.umich.edu size = secondAddr - addr; 3894999Sgblack@eecs.umich.edu 3904999Sgblack@eecs.umich.edu dcache_latency = 0; 3914999Sgblack@eecs.umich.edu 3924999Sgblack@eecs.umich.edu while(1) { 3938832SAli.Saidi@ARM.com req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 3944999Sgblack@eecs.umich.edu 3954999Sgblack@eecs.umich.edu // translate to physical address 3966023Snate@binkert.org Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write); 3974999Sgblack@eecs.umich.edu 3984999Sgblack@eecs.umich.edu // Now do the access. 3994999Sgblack@eecs.umich.edu if (fault == NoFault) { 4004999Sgblack@eecs.umich.edu MemCmd cmd = MemCmd::WriteReq; // default 4014999Sgblack@eecs.umich.edu bool do_access = true; // flag to suppress cache access 4024999Sgblack@eecs.umich.edu 4036102Sgblack@eecs.umich.edu if (req->isLLSC()) { 4044999Sgblack@eecs.umich.edu cmd = MemCmd::StoreCondReq; 4054999Sgblack@eecs.umich.edu do_access = TheISA::handleLockedWrite(thread, req); 4064999Sgblack@eecs.umich.edu } else if (req->isSwap()) { 4074999Sgblack@eecs.umich.edu cmd = MemCmd::SwapReq; 4084999Sgblack@eecs.umich.edu if (req->isCondSwap()) { 4094999Sgblack@eecs.umich.edu assert(res); 4104999Sgblack@eecs.umich.edu req->setExtraData(*res); 4114999Sgblack@eecs.umich.edu } 4124999Sgblack@eecs.umich.edu } 4134999Sgblack@eecs.umich.edu 4146623Sgblack@eecs.umich.edu if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) { 4158949Sandreas.hansson@arm.com Packet pkt = Packet(req, cmd); 4167520Sgblack@eecs.umich.edu pkt.dataStatic(data); 4174999Sgblack@eecs.umich.edu 4188105Sgblack@eecs.umich.edu if (req->isMmappedIpr()) { 4194999Sgblack@eecs.umich.edu dcache_latency += 4204999Sgblack@eecs.umich.edu TheISA::handleIprWrite(thread->getTC(), &pkt); 4214999Sgblack@eecs.umich.edu } else { 4228931Sandreas.hansson@arm.com if (fastmem && system->isMemAddr(pkt.getAddr())) 4238931Sandreas.hansson@arm.com system->getPhysMem().access(&pkt); 4244999Sgblack@eecs.umich.edu else 4254999Sgblack@eecs.umich.edu dcache_latency += dcachePort.sendAtomic(&pkt); 4264999Sgblack@eecs.umich.edu } 4274999Sgblack@eecs.umich.edu dcache_access = true; 4284999Sgblack@eecs.umich.edu assert(!pkt.isError()); 4294999Sgblack@eecs.umich.edu 4304999Sgblack@eecs.umich.edu if (req->isSwap()) { 4314999Sgblack@eecs.umich.edu assert(res); 4327520Sgblack@eecs.umich.edu memcpy(res, pkt.getPtr<uint8_t>(), fullSize); 4334999Sgblack@eecs.umich.edu } 4344999Sgblack@eecs.umich.edu } 4354999Sgblack@eecs.umich.edu 4364999Sgblack@eecs.umich.edu if (res && !req->isSwap()) { 4374999Sgblack@eecs.umich.edu *res = req->getExtraData(); 4384878Sstever@eecs.umich.edu } 4394040Ssaidi@eecs.umich.edu } 4404040Ssaidi@eecs.umich.edu 4414999Sgblack@eecs.umich.edu //If there's a fault or we don't need to access a second cache line, 4424999Sgblack@eecs.umich.edu //stop now. 4434999Sgblack@eecs.umich.edu if (fault != NoFault || secondAddr <= addr) 4444999Sgblack@eecs.umich.edu { 4456078Sgblack@eecs.umich.edu if (req->isLocked() && fault == NoFault) { 4466078Sgblack@eecs.umich.edu assert(locked); 4476078Sgblack@eecs.umich.edu locked = false; 4486078Sgblack@eecs.umich.edu } 4496739Sgblack@eecs.umich.edu if (fault != NoFault && req->isPrefetch()) { 4506739Sgblack@eecs.umich.edu return NoFault; 4516739Sgblack@eecs.umich.edu } else { 4526739Sgblack@eecs.umich.edu return fault; 4536739Sgblack@eecs.umich.edu } 4543170Sstever@eecs.umich.edu } 4553170Sstever@eecs.umich.edu 4564999Sgblack@eecs.umich.edu /* 4574999Sgblack@eecs.umich.edu * Set up for accessing the second cache line. 4584999Sgblack@eecs.umich.edu */ 4594999Sgblack@eecs.umich.edu 4604999Sgblack@eecs.umich.edu //Move the pointer we're reading into to the correct location. 4617520Sgblack@eecs.umich.edu data += size; 4624999Sgblack@eecs.umich.edu //Adjust the size to get the remaining bytes. 4637520Sgblack@eecs.umich.edu size = addr + fullSize - secondAddr; 4644999Sgblack@eecs.umich.edu //And access the right address. 4654999Sgblack@eecs.umich.edu addr = secondAddr; 4662623SN/A } 4672623SN/A} 4682623SN/A 4692623SN/A 4702623SN/Avoid 4712623SN/AAtomicSimpleCPU::tick() 4722623SN/A{ 4734940Snate@binkert.org DPRINTF(SimpleCPU, "Tick\n"); 4744940Snate@binkert.org 4755487Snate@binkert.org Tick latency = 0; 4762623SN/A 4776078Sgblack@eecs.umich.edu for (int i = 0; i < width || locked; ++i) { 4782623SN/A numCycles++; 4792623SN/A 4803387Sgblack@eecs.umich.edu if (!curStaticInst || !curStaticInst->isDelayedCommit()) 4813387Sgblack@eecs.umich.edu checkForInterrupts(); 4822626SN/A 4835348Ssaidi@eecs.umich.edu checkPcEventQueue(); 4848143SAli.Saidi@ARM.com // We must have just got suspended by a PC event 4859443SAndreas.Sandberg@ARM.com if (_status == Idle) { 4869443SAndreas.Sandberg@ARM.com tryCompleteDrain(); 4878143SAli.Saidi@ARM.com return; 4889443SAndreas.Sandberg@ARM.com } 4895348Ssaidi@eecs.umich.edu 4905669Sgblack@eecs.umich.edu Fault fault = NoFault; 4915669Sgblack@eecs.umich.edu 4927720Sgblack@eecs.umich.edu TheISA::PCState pcState = thread->pcState(); 4937720Sgblack@eecs.umich.edu 4947720Sgblack@eecs.umich.edu bool needToFetch = !isRomMicroPC(pcState.microPC()) && 4957720Sgblack@eecs.umich.edu !curMacroStaticInst; 4967720Sgblack@eecs.umich.edu if (needToFetch) { 4975894Sgblack@eecs.umich.edu setupFetchRequest(&ifetch_req); 4986023Snate@binkert.org fault = thread->itb->translateAtomic(&ifetch_req, tc, 4996023Snate@binkert.org BaseTLB::Execute); 5005894Sgblack@eecs.umich.edu } 5012623SN/A 5022623SN/A if (fault == NoFault) { 5034182Sgblack@eecs.umich.edu Tick icache_latency = 0; 5044182Sgblack@eecs.umich.edu bool icache_access = false; 5054182Sgblack@eecs.umich.edu dcache_access = false; // assume no dcache access 5062662Sstever@eecs.umich.edu 5077720Sgblack@eecs.umich.edu if (needToFetch) { 5089023Sgblack@eecs.umich.edu // This is commented out because the decoder would act like 5095694Sgblack@eecs.umich.edu // a tiny cache otherwise. It wouldn't be flushed when needed 5105694Sgblack@eecs.umich.edu // like the I cache. It should be flushed, and when that works 5115694Sgblack@eecs.umich.edu // this code should be uncommented. 5125669Sgblack@eecs.umich.edu //Fetch more instruction memory if necessary 5139023Sgblack@eecs.umich.edu //if(decoder.needMoreBytes()) 5145669Sgblack@eecs.umich.edu //{ 5155669Sgblack@eecs.umich.edu icache_access = true; 5168949Sandreas.hansson@arm.com Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq); 5175669Sgblack@eecs.umich.edu ifetch_pkt.dataStatic(&inst); 5182623SN/A 5198931Sandreas.hansson@arm.com if (fastmem && system->isMemAddr(ifetch_pkt.getAddr())) 5208931Sandreas.hansson@arm.com system->getPhysMem().access(&ifetch_pkt); 5215669Sgblack@eecs.umich.edu else 5225669Sgblack@eecs.umich.edu icache_latency = icachePort.sendAtomic(&ifetch_pkt); 5234968Sacolyte@umich.edu 5245669Sgblack@eecs.umich.edu assert(!ifetch_pkt.isError()); 5254968Sacolyte@umich.edu 5265669Sgblack@eecs.umich.edu // ifetch_req is initialized to read the instruction directly 5275669Sgblack@eecs.umich.edu // into the CPU object's inst field. 5285669Sgblack@eecs.umich.edu //} 5295669Sgblack@eecs.umich.edu } 5304182Sgblack@eecs.umich.edu 5312623SN/A preExecute(); 5323814Ssaidi@eecs.umich.edu 5335001Sgblack@eecs.umich.edu if (curStaticInst) { 5344182Sgblack@eecs.umich.edu fault = curStaticInst->execute(this, traceData); 5354998Sgblack@eecs.umich.edu 5364998Sgblack@eecs.umich.edu // keep an instruction count 5374998Sgblack@eecs.umich.edu if (fault == NoFault) 5384998Sgblack@eecs.umich.edu countInst(); 5397655Sali.saidi@arm.com else if (traceData && !DTRACE(ExecFaulting)) { 5405001Sgblack@eecs.umich.edu delete traceData; 5415001Sgblack@eecs.umich.edu traceData = NULL; 5425001Sgblack@eecs.umich.edu } 5434998Sgblack@eecs.umich.edu 5444182Sgblack@eecs.umich.edu postExecute(); 5454182Sgblack@eecs.umich.edu } 5462623SN/A 5473814Ssaidi@eecs.umich.edu // @todo remove me after debugging with legion done 5484539Sgblack@eecs.umich.edu if (curStaticInst && (!curStaticInst->isMicroop() || 5494539Sgblack@eecs.umich.edu curStaticInst->isFirstMicroop())) 5503814Ssaidi@eecs.umich.edu instCnt++; 5513814Ssaidi@eecs.umich.edu 5529647Sdam.sunwoo@arm.com // profile for SimPoints if enabled and macro inst is finished 5539647Sdam.sunwoo@arm.com if (simpoint && curStaticInst && (fault == NoFault) && 5549647Sdam.sunwoo@arm.com (!curStaticInst->isMicroop() || 5559647Sdam.sunwoo@arm.com curStaticInst->isLastMicroop())) { 5569647Sdam.sunwoo@arm.com profileSimPoint(); 5579647Sdam.sunwoo@arm.com } 5589647Sdam.sunwoo@arm.com 5595487Snate@binkert.org Tick stall_ticks = 0; 5605487Snate@binkert.org if (simulate_inst_stalls && icache_access) 5615487Snate@binkert.org stall_ticks += icache_latency; 5625487Snate@binkert.org 5635487Snate@binkert.org if (simulate_data_stalls && dcache_access) 5645487Snate@binkert.org stall_ticks += dcache_latency; 5655487Snate@binkert.org 5665487Snate@binkert.org if (stall_ticks) { 5679180Sandreas.hansson@arm.com // the atomic cpu does its accounting in ticks, so 5689180Sandreas.hansson@arm.com // keep counting in ticks but round to the clock 5699180Sandreas.hansson@arm.com // period 5709180Sandreas.hansson@arm.com latency += divCeil(stall_ticks, clockPeriod()) * 5719180Sandreas.hansson@arm.com clockPeriod(); 5722623SN/A } 5732623SN/A 5742623SN/A } 5754377Sgblack@eecs.umich.edu if(fault != NoFault || !stayAtPC) 5764182Sgblack@eecs.umich.edu advancePC(fault); 5772623SN/A } 5782623SN/A 5799443SAndreas.Sandberg@ARM.com if (tryCompleteDrain()) 5809443SAndreas.Sandberg@ARM.com return; 5819443SAndreas.Sandberg@ARM.com 5825487Snate@binkert.org // instruction takes at least one cycle 5839179Sandreas.hansson@arm.com if (latency < clockPeriod()) 5849179Sandreas.hansson@arm.com latency = clockPeriod(); 5855487Snate@binkert.org 5862626SN/A if (_status != Idle) 5877823Ssteve.reinhardt@amd.com schedule(tickEvent, curTick() + latency); 5882623SN/A} 5892623SN/A 5902623SN/A 5915315Sstever@gmail.comvoid 5925315Sstever@gmail.comAtomicSimpleCPU::printAddr(Addr a) 5935315Sstever@gmail.com{ 5945315Sstever@gmail.com dcachePort.printAddr(a); 5955315Sstever@gmail.com} 5965315Sstever@gmail.com 5979647Sdam.sunwoo@arm.comvoid 5989647Sdam.sunwoo@arm.comAtomicSimpleCPU::profileSimPoint() 5999647Sdam.sunwoo@arm.com{ 6009647Sdam.sunwoo@arm.com if (!currentBBVInstCount) 6019647Sdam.sunwoo@arm.com currentBBV.first = thread->pcState().instAddr(); 6029647Sdam.sunwoo@arm.com 6039647Sdam.sunwoo@arm.com ++intervalCount; 6049647Sdam.sunwoo@arm.com ++currentBBVInstCount; 6059647Sdam.sunwoo@arm.com 6069647Sdam.sunwoo@arm.com // If inst is control inst, assume end of basic block. 6079647Sdam.sunwoo@arm.com if (curStaticInst->isControl()) { 6089647Sdam.sunwoo@arm.com currentBBV.second = thread->pcState().instAddr(); 6099647Sdam.sunwoo@arm.com 6109647Sdam.sunwoo@arm.com auto map_itr = bbMap.find(currentBBV); 6119647Sdam.sunwoo@arm.com if (map_itr == bbMap.end()){ 6129647Sdam.sunwoo@arm.com // If a new (previously unseen) basic block is found, 6139647Sdam.sunwoo@arm.com // add a new unique id, record num of insts and insert into bbMap. 6149647Sdam.sunwoo@arm.com BBInfo info; 6159647Sdam.sunwoo@arm.com info.id = bbMap.size() + 1; 6169647Sdam.sunwoo@arm.com info.insts = currentBBVInstCount; 6179647Sdam.sunwoo@arm.com info.count = currentBBVInstCount; 6189647Sdam.sunwoo@arm.com bbMap.insert(std::make_pair(currentBBV, info)); 6199647Sdam.sunwoo@arm.com } else { 6209647Sdam.sunwoo@arm.com // If basic block is seen before, just increment the count by the 6219647Sdam.sunwoo@arm.com // number of insts in basic block. 6229647Sdam.sunwoo@arm.com BBInfo& info = map_itr->second; 6239647Sdam.sunwoo@arm.com assert(info.insts == currentBBVInstCount); 6249647Sdam.sunwoo@arm.com info.count += currentBBVInstCount; 6259647Sdam.sunwoo@arm.com } 6269647Sdam.sunwoo@arm.com currentBBVInstCount = 0; 6279647Sdam.sunwoo@arm.com 6289647Sdam.sunwoo@arm.com // Reached end of interval if the sum of the current inst count 6299647Sdam.sunwoo@arm.com // (intervalCount) and the excessive inst count from the previous 6309647Sdam.sunwoo@arm.com // interval (intervalDrift) is greater than/equal to the interval size. 6319647Sdam.sunwoo@arm.com if (intervalCount + intervalDrift >= intervalSize) { 6329647Sdam.sunwoo@arm.com // summarize interval and display BBV info 6339647Sdam.sunwoo@arm.com std::vector<pair<uint64_t, uint64_t> > counts; 6349647Sdam.sunwoo@arm.com for (auto map_itr = bbMap.begin(); map_itr != bbMap.end(); 6359647Sdam.sunwoo@arm.com ++map_itr) { 6369647Sdam.sunwoo@arm.com BBInfo& info = map_itr->second; 6379647Sdam.sunwoo@arm.com if (info.count != 0) { 6389647Sdam.sunwoo@arm.com counts.push_back(std::make_pair(info.id, info.count)); 6399647Sdam.sunwoo@arm.com info.count = 0; 6409647Sdam.sunwoo@arm.com } 6419647Sdam.sunwoo@arm.com } 6429647Sdam.sunwoo@arm.com std::sort(counts.begin(), counts.end()); 6439647Sdam.sunwoo@arm.com 6449647Sdam.sunwoo@arm.com // Print output BBV info 6459647Sdam.sunwoo@arm.com *simpointStream << "T"; 6469647Sdam.sunwoo@arm.com for (auto cnt_itr = counts.begin(); cnt_itr != counts.end(); 6479647Sdam.sunwoo@arm.com ++cnt_itr) { 6489647Sdam.sunwoo@arm.com *simpointStream << ":" << cnt_itr->first 6499647Sdam.sunwoo@arm.com << ":" << cnt_itr->second << " "; 6509647Sdam.sunwoo@arm.com } 6519647Sdam.sunwoo@arm.com *simpointStream << "\n"; 6529647Sdam.sunwoo@arm.com 6539647Sdam.sunwoo@arm.com intervalDrift = (intervalCount + intervalDrift) - intervalSize; 6549647Sdam.sunwoo@arm.com intervalCount = 0; 6559647Sdam.sunwoo@arm.com } 6569647Sdam.sunwoo@arm.com } 6579647Sdam.sunwoo@arm.com} 6585315Sstever@gmail.com 6592623SN/A//////////////////////////////////////////////////////////////////////// 6602623SN/A// 6612623SN/A// AtomicSimpleCPU Simulation Object 6622623SN/A// 6634762Snate@binkert.orgAtomicSimpleCPU * 6644762Snate@binkert.orgAtomicSimpleCPUParams::create() 6652623SN/A{ 6665529Snate@binkert.org numThreads = 1; 6678779Sgblack@eecs.umich.edu if (!FullSystem && workload.size() != 1) 6684762Snate@binkert.org panic("only one workload allowed"); 6695529Snate@binkert.org return new AtomicSimpleCPU(this); 6702623SN/A} 671