atomic.cc revision 8706
12623SN/A/* 22623SN/A * Copyright (c) 2002-2005 The Regents of The University of Michigan 32623SN/A * All rights reserved. 42623SN/A * 52623SN/A * Redistribution and use in source and binary forms, with or without 62623SN/A * modification, are permitted provided that the following conditions are 72623SN/A * met: redistributions of source code must retain the above copyright 82623SN/A * notice, this list of conditions and the following disclaimer; 92623SN/A * redistributions in binary form must reproduce the above copyright 102623SN/A * notice, this list of conditions and the following disclaimer in the 112623SN/A * documentation and/or other materials provided with the distribution; 122623SN/A * neither the name of the copyright holders nor the names of its 132623SN/A * contributors may be used to endorse or promote products derived from 142623SN/A * this software without specific prior written permission. 152623SN/A * 162623SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 172623SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 182623SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 192623SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 202623SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 212623SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 222623SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 232623SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 242623SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 252623SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 262623SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 272665Ssaidi@eecs.umich.edu * 282665Ssaidi@eecs.umich.edu * Authors: Steve Reinhardt 292623SN/A */ 302623SN/A 313170Sstever@eecs.umich.edu#include "arch/locked_mem.hh" 328105Sgblack@eecs.umich.edu#include "arch/mmapped_ipr.hh" 332623SN/A#include "arch/utility.hh" 344040Ssaidi@eecs.umich.edu#include "base/bigint.hh" 356658Snate@binkert.org#include "config/the_isa.hh" 368229Snate@binkert.org#include "cpu/simple/atomic.hh" 372623SN/A#include "cpu/exetrace.hh" 388232Snate@binkert.org#include "debug/ExecFaulting.hh" 398232Snate@binkert.org#include "debug/SimpleCPU.hh" 403348Sbinkertn@umich.edu#include "mem/packet.hh" 413348Sbinkertn@umich.edu#include "mem/packet_access.hh" 424762Snate@binkert.org#include "params/AtomicSimpleCPU.hh" 437678Sgblack@eecs.umich.edu#include "sim/faults.hh" 442901Ssaidi@eecs.umich.edu#include "sim/system.hh" 452623SN/A 462623SN/Ausing namespace std; 472623SN/Ausing namespace TheISA; 482623SN/A 492623SN/AAtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c) 505606Snate@binkert.org : Event(CPU_Tick_Pri), cpu(c) 512623SN/A{ 522623SN/A} 532623SN/A 542623SN/A 552623SN/Avoid 562623SN/AAtomicSimpleCPU::TickEvent::process() 572623SN/A{ 582623SN/A cpu->tick(); 592623SN/A} 602623SN/A 612623SN/Aconst char * 625336Shines@cs.fsu.eduAtomicSimpleCPU::TickEvent::description() const 632623SN/A{ 644873Sstever@eecs.umich.edu return "AtomicSimpleCPU tick"; 652623SN/A} 662623SN/A 672856Srdreslin@umich.eduPort * 686227Snate@binkert.orgAtomicSimpleCPU::getPort(const string &if_name, int idx) 692856Srdreslin@umich.edu{ 702856Srdreslin@umich.edu if (if_name == "dcache_port") 712856Srdreslin@umich.edu return &dcachePort; 722856Srdreslin@umich.edu else if (if_name == "icache_port") 732856Srdreslin@umich.edu return &icachePort; 744968Sacolyte@umich.edu else if (if_name == "physmem_port") { 754968Sacolyte@umich.edu hasPhysMemPort = true; 764968Sacolyte@umich.edu return &physmemPort; 774968Sacolyte@umich.edu } 782856Srdreslin@umich.edu else 792856Srdreslin@umich.edu panic("No Such Port\n"); 802856Srdreslin@umich.edu} 812623SN/A 822623SN/Avoid 832623SN/AAtomicSimpleCPU::init() 842623SN/A{ 852623SN/A BaseCPU::init(); 862623SN/A#if FULL_SYSTEM 876221Snate@binkert.org ThreadID size = threadContexts.size(); 886221Snate@binkert.org for (ThreadID i = 0; i < size; ++i) { 892680Sktlim@umich.edu ThreadContext *tc = threadContexts[i]; 902623SN/A 912623SN/A // initialize CPU, including PC 925714Shsul@eecs.umich.edu TheISA::initCPU(tc, tc->contextId()); 932623SN/A } 948706Sandreas.hansson@arm.com 958706Sandreas.hansson@arm.com // Initialise the ThreadContext's memory proxies 968706Sandreas.hansson@arm.com tcBase()->initMemProxies(tcBase()); 972623SN/A#endif 984968Sacolyte@umich.edu if (hasPhysMemPort) { 994968Sacolyte@umich.edu bool snoop = false; 1004968Sacolyte@umich.edu AddrRangeList pmAddrList; 1014968Sacolyte@umich.edu physmemPort.getPeerAddressRanges(pmAddrList, snoop); 1024968Sacolyte@umich.edu physMemAddr = *pmAddrList.begin(); 1034968Sacolyte@umich.edu } 1045714Shsul@eecs.umich.edu // Atomic doesn't do MT right now, so contextId == threadId 1055712Shsul@eecs.umich.edu ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 1065712Shsul@eecs.umich.edu data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 1075712Shsul@eecs.umich.edu data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 1082623SN/A} 1092623SN/A 1102623SN/Abool 1113349Sbinkertn@umich.eduAtomicSimpleCPU::CpuPort::recvTiming(PacketPtr pkt) 1122623SN/A{ 1133184Srdreslin@umich.edu panic("AtomicSimpleCPU doesn't expect recvTiming callback!"); 1142623SN/A return true; 1152623SN/A} 1162623SN/A 1172623SN/ATick 1183349Sbinkertn@umich.eduAtomicSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt) 1192623SN/A{ 1203310Srdreslin@umich.edu //Snooping a coherence request, just return 1213649Srdreslin@umich.edu return 0; 1222623SN/A} 1232623SN/A 1242623SN/Avoid 1253349Sbinkertn@umich.eduAtomicSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt) 1262623SN/A{ 1273184Srdreslin@umich.edu //No internal storage to update, just return 1283184Srdreslin@umich.edu return; 1292623SN/A} 1302623SN/A 1312623SN/Avoid 1322623SN/AAtomicSimpleCPU::CpuPort::recvStatusChange(Status status) 1332623SN/A{ 1343647Srdreslin@umich.edu if (status == RangeChange) { 1353647Srdreslin@umich.edu if (!snoopRangeSent) { 1363647Srdreslin@umich.edu snoopRangeSent = true; 1373647Srdreslin@umich.edu sendStatusChange(Port::RangeChange); 1383647Srdreslin@umich.edu } 1392626SN/A return; 1403647Srdreslin@umich.edu } 1412626SN/A 1422623SN/A panic("AtomicSimpleCPU doesn't expect recvStatusChange callback!"); 1432623SN/A} 1442623SN/A 1452657Ssaidi@eecs.umich.eduvoid 1462623SN/AAtomicSimpleCPU::CpuPort::recvRetry() 1472623SN/A{ 1482623SN/A panic("AtomicSimpleCPU doesn't expect recvRetry callback!"); 1492623SN/A} 1502623SN/A 1515529Snate@binkert.orgAtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 1526078Sgblack@eecs.umich.edu : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false), 1535487Snate@binkert.org simulate_data_stalls(p->simulate_data_stalls), 1545487Snate@binkert.org simulate_inst_stalls(p->simulate_inst_stalls), 1554968Sacolyte@umich.edu icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this), 1564968Sacolyte@umich.edu physmemPort(name() + "-iport", this), hasPhysMemPort(false) 1572623SN/A{ 1582623SN/A _status = Idle; 1592623SN/A 1603647Srdreslin@umich.edu icachePort.snoopRangeSent = false; 1613647Srdreslin@umich.edu dcachePort.snoopRangeSent = false; 1623647Srdreslin@umich.edu 1632623SN/A} 1642623SN/A 1652623SN/A 1662623SN/AAtomicSimpleCPU::~AtomicSimpleCPU() 1672623SN/A{ 1686775SBrad.Beckmann@amd.com if (tickEvent.scheduled()) { 1696775SBrad.Beckmann@amd.com deschedule(tickEvent); 1706775SBrad.Beckmann@amd.com } 1712623SN/A} 1722623SN/A 1732623SN/Avoid 1742623SN/AAtomicSimpleCPU::serialize(ostream &os) 1752623SN/A{ 1762915Sktlim@umich.edu SimObject::State so_state = SimObject::getState(); 1772915Sktlim@umich.edu SERIALIZE_ENUM(so_state); 1786078Sgblack@eecs.umich.edu SERIALIZE_SCALAR(locked); 1793145Shsul@eecs.umich.edu BaseSimpleCPU::serialize(os); 1802623SN/A nameOut(os, csprintf("%s.tickEvent", name())); 1812623SN/A tickEvent.serialize(os); 1822623SN/A} 1832623SN/A 1842623SN/Avoid 1852623SN/AAtomicSimpleCPU::unserialize(Checkpoint *cp, const string §ion) 1862623SN/A{ 1872915Sktlim@umich.edu SimObject::State so_state; 1882915Sktlim@umich.edu UNSERIALIZE_ENUM(so_state); 1896078Sgblack@eecs.umich.edu UNSERIALIZE_SCALAR(locked); 1903145Shsul@eecs.umich.edu BaseSimpleCPU::unserialize(cp, section); 1912915Sktlim@umich.edu tickEvent.unserialize(cp, csprintf("%s.tickEvent", section)); 1922915Sktlim@umich.edu} 1932915Sktlim@umich.edu 1942915Sktlim@umich.eduvoid 1952915Sktlim@umich.eduAtomicSimpleCPU::resume() 1962915Sktlim@umich.edu{ 1975220Ssaidi@eecs.umich.edu if (_status == Idle || _status == SwitchedOut) 1985220Ssaidi@eecs.umich.edu return; 1995220Ssaidi@eecs.umich.edu 2004940Snate@binkert.org DPRINTF(SimpleCPU, "Resume\n"); 2015220Ssaidi@eecs.umich.edu assert(system->getMemoryMode() == Enums::atomic); 2023324Shsul@eecs.umich.edu 2035220Ssaidi@eecs.umich.edu changeState(SimObject::Running); 2045220Ssaidi@eecs.umich.edu if (thread->status() == ThreadContext::Active) { 2055606Snate@binkert.org if (!tickEvent.scheduled()) 2065606Snate@binkert.org schedule(tickEvent, nextCycle()); 2072915Sktlim@umich.edu } 2087897Shestness@cs.utexas.edu system->totalNumInsts = 0; 2092623SN/A} 2102623SN/A 2112623SN/Avoid 2122798Sktlim@umich.eduAtomicSimpleCPU::switchOut() 2132623SN/A{ 2145496Ssaidi@eecs.umich.edu assert(_status == Running || _status == Idle); 2152798Sktlim@umich.edu _status = SwitchedOut; 2162623SN/A 2172798Sktlim@umich.edu tickEvent.squash(); 2182623SN/A} 2192623SN/A 2202623SN/A 2212623SN/Avoid 2222623SN/AAtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 2232623SN/A{ 2244192Sktlim@umich.edu BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort); 2252623SN/A 2262623SN/A assert(!tickEvent.scheduled()); 2272623SN/A 2282680Sktlim@umich.edu // if any of this CPU's ThreadContexts are active, mark the CPU as 2292623SN/A // running and schedule its tick event. 2306221Snate@binkert.org ThreadID size = threadContexts.size(); 2316221Snate@binkert.org for (ThreadID i = 0; i < size; ++i) { 2322680Sktlim@umich.edu ThreadContext *tc = threadContexts[i]; 2332680Sktlim@umich.edu if (tc->status() == ThreadContext::Active && _status != Running) { 2342623SN/A _status = Running; 2355606Snate@binkert.org schedule(tickEvent, nextCycle()); 2362623SN/A break; 2372623SN/A } 2382623SN/A } 2393512Sktlim@umich.edu if (_status != Running) { 2403512Sktlim@umich.edu _status = Idle; 2413512Sktlim@umich.edu } 2425169Ssaidi@eecs.umich.edu assert(threadContexts.size() == 1); 2435712Shsul@eecs.umich.edu ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 2445712Shsul@eecs.umich.edu data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 2455712Shsul@eecs.umich.edu data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 2462623SN/A} 2472623SN/A 2482623SN/A 2492623SN/Avoid 2502623SN/AAtomicSimpleCPU::activateContext(int thread_num, int delay) 2512623SN/A{ 2524940Snate@binkert.org DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); 2534940Snate@binkert.org 2542623SN/A assert(thread_num == 0); 2552683Sktlim@umich.edu assert(thread); 2562623SN/A 2572623SN/A assert(_status == Idle); 2582623SN/A assert(!tickEvent.scheduled()); 2592623SN/A 2602623SN/A notIdleFraction++; 2615101Ssaidi@eecs.umich.edu numCycles += tickToCycles(thread->lastActivate - thread->lastSuspend); 2623686Sktlim@umich.edu 2633430Sgblack@eecs.umich.edu //Make sure ticks are still on multiples of cycles 2647823Ssteve.reinhardt@amd.com schedule(tickEvent, nextCycle(curTick() + ticks(delay))); 2652623SN/A _status = Running; 2662623SN/A} 2672623SN/A 2682623SN/A 2692623SN/Avoid 2702623SN/AAtomicSimpleCPU::suspendContext(int thread_num) 2712623SN/A{ 2724940Snate@binkert.org DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 2734940Snate@binkert.org 2742623SN/A assert(thread_num == 0); 2752683Sktlim@umich.edu assert(thread); 2762623SN/A 2776043Sgblack@eecs.umich.edu if (_status == Idle) 2786043Sgblack@eecs.umich.edu return; 2796043Sgblack@eecs.umich.edu 2802623SN/A assert(_status == Running); 2812626SN/A 2822626SN/A // tick event may not be scheduled if this gets called from inside 2832626SN/A // an instruction's execution, e.g. "quiesce" 2842626SN/A if (tickEvent.scheduled()) 2855606Snate@binkert.org deschedule(tickEvent); 2862623SN/A 2872623SN/A notIdleFraction--; 2882623SN/A _status = Idle; 2892623SN/A} 2902623SN/A 2912623SN/A 2922623SN/AFault 2938444Sgblack@eecs.umich.eduAtomicSimpleCPU::readMem(Addr addr, uint8_t * data, 2948444Sgblack@eecs.umich.edu unsigned size, unsigned flags) 2952623SN/A{ 2963169Sstever@eecs.umich.edu // use the CPU's statically allocated read request and packet objects 2974870Sstever@eecs.umich.edu Request *req = &data_read_req; 2982623SN/A 2992623SN/A if (traceData) { 3002623SN/A traceData->setAddr(addr); 3012623SN/A } 3022623SN/A 3034999Sgblack@eecs.umich.edu //The block size of our peer. 3046227Snate@binkert.org unsigned blockSize = dcachePort.peerBlockSize(); 3054999Sgblack@eecs.umich.edu //The size of the data we're trying to read. 3067520Sgblack@eecs.umich.edu int fullSize = size; 3072623SN/A 3084999Sgblack@eecs.umich.edu //The address of the second part of this access if it needs to be split 3094999Sgblack@eecs.umich.edu //across a cache line boundary. 3107520Sgblack@eecs.umich.edu Addr secondAddr = roundDown(addr + size - 1, blockSize); 3114999Sgblack@eecs.umich.edu 3127520Sgblack@eecs.umich.edu if (secondAddr > addr) 3137520Sgblack@eecs.umich.edu size = secondAddr - addr; 3144999Sgblack@eecs.umich.edu 3154999Sgblack@eecs.umich.edu dcache_latency = 0; 3164999Sgblack@eecs.umich.edu 3177520Sgblack@eecs.umich.edu while (1) { 3187720Sgblack@eecs.umich.edu req->setVirt(0, addr, size, flags, thread->pcState().instAddr()); 3194999Sgblack@eecs.umich.edu 3204999Sgblack@eecs.umich.edu // translate to physical address 3216023Snate@binkert.org Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read); 3224999Sgblack@eecs.umich.edu 3234999Sgblack@eecs.umich.edu // Now do the access. 3246623Sgblack@eecs.umich.edu if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) { 3254999Sgblack@eecs.umich.edu Packet pkt = Packet(req, 3266102Sgblack@eecs.umich.edu req->isLLSC() ? MemCmd::LoadLockedReq : MemCmd::ReadReq, 3274999Sgblack@eecs.umich.edu Packet::Broadcast); 3287520Sgblack@eecs.umich.edu pkt.dataStatic(data); 3294999Sgblack@eecs.umich.edu 3308105Sgblack@eecs.umich.edu if (req->isMmappedIpr()) 3314999Sgblack@eecs.umich.edu dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 3324999Sgblack@eecs.umich.edu else { 3334999Sgblack@eecs.umich.edu if (hasPhysMemPort && pkt.getAddr() == physMemAddr) 3344999Sgblack@eecs.umich.edu dcache_latency += physmemPort.sendAtomic(&pkt); 3354999Sgblack@eecs.umich.edu else 3364999Sgblack@eecs.umich.edu dcache_latency += dcachePort.sendAtomic(&pkt); 3374999Sgblack@eecs.umich.edu } 3384999Sgblack@eecs.umich.edu dcache_access = true; 3395012Sgblack@eecs.umich.edu 3404999Sgblack@eecs.umich.edu assert(!pkt.isError()); 3414999Sgblack@eecs.umich.edu 3426102Sgblack@eecs.umich.edu if (req->isLLSC()) { 3434999Sgblack@eecs.umich.edu TheISA::handleLockedRead(thread, req); 3444999Sgblack@eecs.umich.edu } 3454968Sacolyte@umich.edu } 3464986Ssaidi@eecs.umich.edu 3474999Sgblack@eecs.umich.edu //If there's a fault, return it 3486739Sgblack@eecs.umich.edu if (fault != NoFault) { 3496739Sgblack@eecs.umich.edu if (req->isPrefetch()) { 3506739Sgblack@eecs.umich.edu return NoFault; 3516739Sgblack@eecs.umich.edu } else { 3526739Sgblack@eecs.umich.edu return fault; 3536739Sgblack@eecs.umich.edu } 3546739Sgblack@eecs.umich.edu } 3556739Sgblack@eecs.umich.edu 3564999Sgblack@eecs.umich.edu //If we don't need to access a second cache line, stop now. 3574999Sgblack@eecs.umich.edu if (secondAddr <= addr) 3584999Sgblack@eecs.umich.edu { 3596078Sgblack@eecs.umich.edu if (req->isLocked() && fault == NoFault) { 3606078Sgblack@eecs.umich.edu assert(!locked); 3616078Sgblack@eecs.umich.edu locked = true; 3626078Sgblack@eecs.umich.edu } 3634999Sgblack@eecs.umich.edu return fault; 3644968Sacolyte@umich.edu } 3653170Sstever@eecs.umich.edu 3664999Sgblack@eecs.umich.edu /* 3674999Sgblack@eecs.umich.edu * Set up for accessing the second cache line. 3684999Sgblack@eecs.umich.edu */ 3694999Sgblack@eecs.umich.edu 3704999Sgblack@eecs.umich.edu //Move the pointer we're reading into to the correct location. 3717520Sgblack@eecs.umich.edu data += size; 3724999Sgblack@eecs.umich.edu //Adjust the size to get the remaining bytes. 3737520Sgblack@eecs.umich.edu size = addr + fullSize - secondAddr; 3744999Sgblack@eecs.umich.edu //And access the right address. 3754999Sgblack@eecs.umich.edu addr = secondAddr; 3762623SN/A } 3772623SN/A} 3782623SN/A 3797520Sgblack@eecs.umich.edu 3802623SN/AFault 3818444Sgblack@eecs.umich.eduAtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, 3828444Sgblack@eecs.umich.edu Addr addr, unsigned flags, uint64_t *res) 3832623SN/A{ 3843169Sstever@eecs.umich.edu // use the CPU's statically allocated write request and packet objects 3854870Sstever@eecs.umich.edu Request *req = &data_write_req; 3862623SN/A 3872623SN/A if (traceData) { 3882623SN/A traceData->setAddr(addr); 3892623SN/A } 3902623SN/A 3914999Sgblack@eecs.umich.edu //The block size of our peer. 3926227Snate@binkert.org unsigned blockSize = dcachePort.peerBlockSize(); 3934999Sgblack@eecs.umich.edu //The size of the data we're trying to read. 3947520Sgblack@eecs.umich.edu int fullSize = size; 3952623SN/A 3964999Sgblack@eecs.umich.edu //The address of the second part of this access if it needs to be split 3974999Sgblack@eecs.umich.edu //across a cache line boundary. 3987520Sgblack@eecs.umich.edu Addr secondAddr = roundDown(addr + size - 1, blockSize); 3994999Sgblack@eecs.umich.edu 4004999Sgblack@eecs.umich.edu if(secondAddr > addr) 4017520Sgblack@eecs.umich.edu size = secondAddr - addr; 4024999Sgblack@eecs.umich.edu 4034999Sgblack@eecs.umich.edu dcache_latency = 0; 4044999Sgblack@eecs.umich.edu 4054999Sgblack@eecs.umich.edu while(1) { 4067720Sgblack@eecs.umich.edu req->setVirt(0, addr, size, flags, thread->pcState().instAddr()); 4074999Sgblack@eecs.umich.edu 4084999Sgblack@eecs.umich.edu // translate to physical address 4096023Snate@binkert.org Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write); 4104999Sgblack@eecs.umich.edu 4114999Sgblack@eecs.umich.edu // Now do the access. 4124999Sgblack@eecs.umich.edu if (fault == NoFault) { 4134999Sgblack@eecs.umich.edu MemCmd cmd = MemCmd::WriteReq; // default 4144999Sgblack@eecs.umich.edu bool do_access = true; // flag to suppress cache access 4154999Sgblack@eecs.umich.edu 4166102Sgblack@eecs.umich.edu if (req->isLLSC()) { 4174999Sgblack@eecs.umich.edu cmd = MemCmd::StoreCondReq; 4184999Sgblack@eecs.umich.edu do_access = TheISA::handleLockedWrite(thread, req); 4194999Sgblack@eecs.umich.edu } else if (req->isSwap()) { 4204999Sgblack@eecs.umich.edu cmd = MemCmd::SwapReq; 4214999Sgblack@eecs.umich.edu if (req->isCondSwap()) { 4224999Sgblack@eecs.umich.edu assert(res); 4234999Sgblack@eecs.umich.edu req->setExtraData(*res); 4244999Sgblack@eecs.umich.edu } 4254999Sgblack@eecs.umich.edu } 4264999Sgblack@eecs.umich.edu 4276623Sgblack@eecs.umich.edu if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) { 4284999Sgblack@eecs.umich.edu Packet pkt = Packet(req, cmd, Packet::Broadcast); 4297520Sgblack@eecs.umich.edu pkt.dataStatic(data); 4304999Sgblack@eecs.umich.edu 4318105Sgblack@eecs.umich.edu if (req->isMmappedIpr()) { 4324999Sgblack@eecs.umich.edu dcache_latency += 4334999Sgblack@eecs.umich.edu TheISA::handleIprWrite(thread->getTC(), &pkt); 4344999Sgblack@eecs.umich.edu } else { 4354999Sgblack@eecs.umich.edu if (hasPhysMemPort && pkt.getAddr() == physMemAddr) 4364999Sgblack@eecs.umich.edu dcache_latency += physmemPort.sendAtomic(&pkt); 4374999Sgblack@eecs.umich.edu else 4384999Sgblack@eecs.umich.edu dcache_latency += dcachePort.sendAtomic(&pkt); 4394999Sgblack@eecs.umich.edu } 4404999Sgblack@eecs.umich.edu dcache_access = true; 4414999Sgblack@eecs.umich.edu assert(!pkt.isError()); 4424999Sgblack@eecs.umich.edu 4434999Sgblack@eecs.umich.edu if (req->isSwap()) { 4444999Sgblack@eecs.umich.edu assert(res); 4457520Sgblack@eecs.umich.edu memcpy(res, pkt.getPtr<uint8_t>(), fullSize); 4464999Sgblack@eecs.umich.edu } 4474999Sgblack@eecs.umich.edu } 4484999Sgblack@eecs.umich.edu 4494999Sgblack@eecs.umich.edu if (res && !req->isSwap()) { 4504999Sgblack@eecs.umich.edu *res = req->getExtraData(); 4514878Sstever@eecs.umich.edu } 4524040Ssaidi@eecs.umich.edu } 4534040Ssaidi@eecs.umich.edu 4544999Sgblack@eecs.umich.edu //If there's a fault or we don't need to access a second cache line, 4554999Sgblack@eecs.umich.edu //stop now. 4564999Sgblack@eecs.umich.edu if (fault != NoFault || secondAddr <= addr) 4574999Sgblack@eecs.umich.edu { 4586078Sgblack@eecs.umich.edu if (req->isLocked() && fault == NoFault) { 4596078Sgblack@eecs.umich.edu assert(locked); 4606078Sgblack@eecs.umich.edu locked = false; 4616078Sgblack@eecs.umich.edu } 4626739Sgblack@eecs.umich.edu if (fault != NoFault && req->isPrefetch()) { 4636739Sgblack@eecs.umich.edu return NoFault; 4646739Sgblack@eecs.umich.edu } else { 4656739Sgblack@eecs.umich.edu return fault; 4666739Sgblack@eecs.umich.edu } 4673170Sstever@eecs.umich.edu } 4683170Sstever@eecs.umich.edu 4694999Sgblack@eecs.umich.edu /* 4704999Sgblack@eecs.umich.edu * Set up for accessing the second cache line. 4714999Sgblack@eecs.umich.edu */ 4724999Sgblack@eecs.umich.edu 4734999Sgblack@eecs.umich.edu //Move the pointer we're reading into to the correct location. 4747520Sgblack@eecs.umich.edu data += size; 4754999Sgblack@eecs.umich.edu //Adjust the size to get the remaining bytes. 4767520Sgblack@eecs.umich.edu size = addr + fullSize - secondAddr; 4774999Sgblack@eecs.umich.edu //And access the right address. 4784999Sgblack@eecs.umich.edu addr = secondAddr; 4792623SN/A } 4802623SN/A} 4812623SN/A 4822623SN/A 4832623SN/Avoid 4842623SN/AAtomicSimpleCPU::tick() 4852623SN/A{ 4864940Snate@binkert.org DPRINTF(SimpleCPU, "Tick\n"); 4874940Snate@binkert.org 4885487Snate@binkert.org Tick latency = 0; 4892623SN/A 4906078Sgblack@eecs.umich.edu for (int i = 0; i < width || locked; ++i) { 4912623SN/A numCycles++; 4922623SN/A 4933387Sgblack@eecs.umich.edu if (!curStaticInst || !curStaticInst->isDelayedCommit()) 4943387Sgblack@eecs.umich.edu checkForInterrupts(); 4952626SN/A 4965348Ssaidi@eecs.umich.edu checkPcEventQueue(); 4978143SAli.Saidi@ARM.com // We must have just got suspended by a PC event 4988143SAli.Saidi@ARM.com if (_status == Idle) 4998143SAli.Saidi@ARM.com return; 5005348Ssaidi@eecs.umich.edu 5015669Sgblack@eecs.umich.edu Fault fault = NoFault; 5025669Sgblack@eecs.umich.edu 5037720Sgblack@eecs.umich.edu TheISA::PCState pcState = thread->pcState(); 5047720Sgblack@eecs.umich.edu 5057720Sgblack@eecs.umich.edu bool needToFetch = !isRomMicroPC(pcState.microPC()) && 5067720Sgblack@eecs.umich.edu !curMacroStaticInst; 5077720Sgblack@eecs.umich.edu if (needToFetch) { 5085894Sgblack@eecs.umich.edu setupFetchRequest(&ifetch_req); 5096023Snate@binkert.org fault = thread->itb->translateAtomic(&ifetch_req, tc, 5106023Snate@binkert.org BaseTLB::Execute); 5115894Sgblack@eecs.umich.edu } 5122623SN/A 5132623SN/A if (fault == NoFault) { 5144182Sgblack@eecs.umich.edu Tick icache_latency = 0; 5154182Sgblack@eecs.umich.edu bool icache_access = false; 5164182Sgblack@eecs.umich.edu dcache_access = false; // assume no dcache access 5172662Sstever@eecs.umich.edu 5187720Sgblack@eecs.umich.edu if (needToFetch) { 5195694Sgblack@eecs.umich.edu // This is commented out because the predecoder would act like 5205694Sgblack@eecs.umich.edu // a tiny cache otherwise. It wouldn't be flushed when needed 5215694Sgblack@eecs.umich.edu // like the I cache. It should be flushed, and when that works 5225694Sgblack@eecs.umich.edu // this code should be uncommented. 5235669Sgblack@eecs.umich.edu //Fetch more instruction memory if necessary 5245669Sgblack@eecs.umich.edu //if(predecoder.needMoreBytes()) 5255669Sgblack@eecs.umich.edu //{ 5265669Sgblack@eecs.umich.edu icache_access = true; 5275669Sgblack@eecs.umich.edu Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq, 5285669Sgblack@eecs.umich.edu Packet::Broadcast); 5295669Sgblack@eecs.umich.edu ifetch_pkt.dataStatic(&inst); 5302623SN/A 5315669Sgblack@eecs.umich.edu if (hasPhysMemPort && ifetch_pkt.getAddr() == physMemAddr) 5325669Sgblack@eecs.umich.edu icache_latency = physmemPort.sendAtomic(&ifetch_pkt); 5335669Sgblack@eecs.umich.edu else 5345669Sgblack@eecs.umich.edu icache_latency = icachePort.sendAtomic(&ifetch_pkt); 5354968Sacolyte@umich.edu 5365669Sgblack@eecs.umich.edu assert(!ifetch_pkt.isError()); 5374968Sacolyte@umich.edu 5385669Sgblack@eecs.umich.edu // ifetch_req is initialized to read the instruction directly 5395669Sgblack@eecs.umich.edu // into the CPU object's inst field. 5405669Sgblack@eecs.umich.edu //} 5415669Sgblack@eecs.umich.edu } 5424182Sgblack@eecs.umich.edu 5432623SN/A preExecute(); 5443814Ssaidi@eecs.umich.edu 5455001Sgblack@eecs.umich.edu if (curStaticInst) { 5464182Sgblack@eecs.umich.edu fault = curStaticInst->execute(this, traceData); 5474998Sgblack@eecs.umich.edu 5484998Sgblack@eecs.umich.edu // keep an instruction count 5494998Sgblack@eecs.umich.edu if (fault == NoFault) 5504998Sgblack@eecs.umich.edu countInst(); 5517655Sali.saidi@arm.com else if (traceData && !DTRACE(ExecFaulting)) { 5525001Sgblack@eecs.umich.edu delete traceData; 5535001Sgblack@eecs.umich.edu traceData = NULL; 5545001Sgblack@eecs.umich.edu } 5554998Sgblack@eecs.umich.edu 5564182Sgblack@eecs.umich.edu postExecute(); 5574182Sgblack@eecs.umich.edu } 5582623SN/A 5593814Ssaidi@eecs.umich.edu // @todo remove me after debugging with legion done 5604539Sgblack@eecs.umich.edu if (curStaticInst && (!curStaticInst->isMicroop() || 5614539Sgblack@eecs.umich.edu curStaticInst->isFirstMicroop())) 5623814Ssaidi@eecs.umich.edu instCnt++; 5633814Ssaidi@eecs.umich.edu 5645487Snate@binkert.org Tick stall_ticks = 0; 5655487Snate@binkert.org if (simulate_inst_stalls && icache_access) 5665487Snate@binkert.org stall_ticks += icache_latency; 5675487Snate@binkert.org 5685487Snate@binkert.org if (simulate_data_stalls && dcache_access) 5695487Snate@binkert.org stall_ticks += dcache_latency; 5705487Snate@binkert.org 5715487Snate@binkert.org if (stall_ticks) { 5725487Snate@binkert.org Tick stall_cycles = stall_ticks / ticks(1); 5735487Snate@binkert.org Tick aligned_stall_ticks = ticks(stall_cycles); 5745487Snate@binkert.org 5755487Snate@binkert.org if (aligned_stall_ticks < stall_ticks) 5765487Snate@binkert.org aligned_stall_ticks += 1; 5775487Snate@binkert.org 5785487Snate@binkert.org latency += aligned_stall_ticks; 5792623SN/A } 5802623SN/A 5812623SN/A } 5824377Sgblack@eecs.umich.edu if(fault != NoFault || !stayAtPC) 5834182Sgblack@eecs.umich.edu advancePC(fault); 5842623SN/A } 5852623SN/A 5865487Snate@binkert.org // instruction takes at least one cycle 5875487Snate@binkert.org if (latency < ticks(1)) 5885487Snate@binkert.org latency = ticks(1); 5895487Snate@binkert.org 5902626SN/A if (_status != Idle) 5917823Ssteve.reinhardt@amd.com schedule(tickEvent, curTick() + latency); 5922623SN/A} 5932623SN/A 5942623SN/A 5955315Sstever@gmail.comvoid 5965315Sstever@gmail.comAtomicSimpleCPU::printAddr(Addr a) 5975315Sstever@gmail.com{ 5985315Sstever@gmail.com dcachePort.printAddr(a); 5995315Sstever@gmail.com} 6005315Sstever@gmail.com 6015315Sstever@gmail.com 6022623SN/A//////////////////////////////////////////////////////////////////////// 6032623SN/A// 6042623SN/A// AtomicSimpleCPU Simulation Object 6052623SN/A// 6064762Snate@binkert.orgAtomicSimpleCPU * 6074762Snate@binkert.orgAtomicSimpleCPUParams::create() 6082623SN/A{ 6095529Snate@binkert.org numThreads = 1; 6105529Snate@binkert.org#if !FULL_SYSTEM 6114762Snate@binkert.org if (workload.size() != 1) 6124762Snate@binkert.org panic("only one workload allowed"); 6132623SN/A#endif 6145529Snate@binkert.org return new AtomicSimpleCPU(this); 6152623SN/A} 616