atomic.cc revision 7897
12623SN/A/* 22623SN/A * Copyright (c) 2002-2005 The Regents of The University of Michigan 32623SN/A * All rights reserved. 42623SN/A * 52623SN/A * Redistribution and use in source and binary forms, with or without 62623SN/A * modification, are permitted provided that the following conditions are 72623SN/A * met: redistributions of source code must retain the above copyright 82623SN/A * notice, this list of conditions and the following disclaimer; 92623SN/A * redistributions in binary form must reproduce the above copyright 102623SN/A * notice, this list of conditions and the following disclaimer in the 112623SN/A * documentation and/or other materials provided with the distribution; 122623SN/A * neither the name of the copyright holders nor the names of its 132623SN/A * contributors may be used to endorse or promote products derived from 142623SN/A * this software without specific prior written permission. 152623SN/A * 162623SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 172623SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 182623SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 192623SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 202623SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 212623SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 222623SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 232623SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 242623SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 252623SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 262623SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 272665Ssaidi@eecs.umich.edu * 282665Ssaidi@eecs.umich.edu * Authors: Steve Reinhardt 292623SN/A */ 302623SN/A 313170Sstever@eecs.umich.edu#include "arch/locked_mem.hh" 323806Ssaidi@eecs.umich.edu#include "arch/mmaped_ipr.hh" 332623SN/A#include "arch/utility.hh" 344040Ssaidi@eecs.umich.edu#include "base/bigint.hh" 352623SN/A#include "config/the_isa.hh" 362623SN/A#include "cpu/exetrace.hh" 373348Sbinkertn@umich.edu#include "cpu/simple/atomic.hh" 383348Sbinkertn@umich.edu#include "mem/packet.hh" 394762Snate@binkert.org#include "mem/packet_access.hh" 402901Ssaidi@eecs.umich.edu#include "params/AtomicSimpleCPU.hh" 412623SN/A#include "sim/faults.hh" 422623SN/A#include "sim/system.hh" 432623SN/A 442623SN/Ausing namespace std; 452623SN/Ausing namespace TheISA; 462623SN/A 472623SN/AAtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c) 482623SN/A : Event(CPU_Tick_Pri), cpu(c) 492623SN/A{ 502623SN/A} 512623SN/A 522623SN/A 532623SN/Avoid 542623SN/AAtomicSimpleCPU::TickEvent::process() 552623SN/A{ 562623SN/A cpu->tick(); 572623SN/A} 582623SN/A 592623SN/Aconst char * 604873Sstever@eecs.umich.eduAtomicSimpleCPU::TickEvent::description() const 612623SN/A{ 622623SN/A return "AtomicSimpleCPU tick"; 632856Srdreslin@umich.edu} 642856Srdreslin@umich.edu 652856Srdreslin@umich.eduPort * 662856Srdreslin@umich.eduAtomicSimpleCPU::getPort(const string &if_name, int idx) 672856Srdreslin@umich.edu{ 682856Srdreslin@umich.edu if (if_name == "dcache_port") 692856Srdreslin@umich.edu return &dcachePort; 704968Sacolyte@umich.edu else if (if_name == "icache_port") 714968Sacolyte@umich.edu return &icachePort; 724968Sacolyte@umich.edu else if (if_name == "physmem_port") { 734968Sacolyte@umich.edu hasPhysMemPort = true; 742856Srdreslin@umich.edu return &physmemPort; 752856Srdreslin@umich.edu } 762856Srdreslin@umich.edu else 772623SN/A panic("No Such Port\n"); 782623SN/A} 792623SN/A 802623SN/Avoid 812623SN/AAtomicSimpleCPU::init() 822623SN/A{ 832680Sktlim@umich.edu BaseCPU::init(); 842680Sktlim@umich.edu#if FULL_SYSTEM 852623SN/A ThreadID size = threadContexts.size(); 862623SN/A for (ThreadID i = 0; i < size; ++i) { 872680Sktlim@umich.edu ThreadContext *tc = threadContexts[i]; 882623SN/A 892623SN/A // initialize CPU, including PC 904968Sacolyte@umich.edu TheISA::initCPU(tc, tc->contextId()); 914968Sacolyte@umich.edu } 924968Sacolyte@umich.edu#endif 934968Sacolyte@umich.edu if (hasPhysMemPort) { 944968Sacolyte@umich.edu bool snoop = false; 954968Sacolyte@umich.edu AddrRangeList pmAddrList; 962623SN/A physmemPort.getPeerAddressRanges(pmAddrList, snoop); 972623SN/A physMemAddr = *pmAddrList.begin(); 982623SN/A } 993349Sbinkertn@umich.edu // Atomic doesn't do MT right now, so contextId == threadId 1002623SN/A ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 1013184Srdreslin@umich.edu data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 1022623SN/A data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 1032623SN/A} 1042623SN/A 1052623SN/Abool 1063349Sbinkertn@umich.eduAtomicSimpleCPU::CpuPort::recvTiming(PacketPtr pkt) 1072623SN/A{ 1083310Srdreslin@umich.edu panic("AtomicSimpleCPU doesn't expect recvTiming callback!"); 1093649Srdreslin@umich.edu return true; 1102623SN/A} 1112623SN/A 1122623SN/ATick 1133349Sbinkertn@umich.eduAtomicSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt) 1142623SN/A{ 1153184Srdreslin@umich.edu //Snooping a coherence request, just return 1163184Srdreslin@umich.edu return 0; 1172623SN/A} 1182623SN/A 1192623SN/Avoid 1202623SN/AAtomicSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt) 1212623SN/A{ 1223647Srdreslin@umich.edu //No internal storage to update, just return 1233647Srdreslin@umich.edu return; 1243647Srdreslin@umich.edu} 1253647Srdreslin@umich.edu 1263647Srdreslin@umich.eduvoid 1272626SN/AAtomicSimpleCPU::CpuPort::recvStatusChange(Status status) 1283647Srdreslin@umich.edu{ 1292626SN/A if (status == RangeChange) { 1302623SN/A if (!snoopRangeSent) { 1312623SN/A snoopRangeSent = true; 1322623SN/A sendStatusChange(Port::RangeChange); 1332657Ssaidi@eecs.umich.edu } 1342623SN/A return; 1352623SN/A } 1362623SN/A 1372623SN/A panic("AtomicSimpleCPU doesn't expect recvStatusChange callback!"); 1382623SN/A} 1394192Sktlim@umich.edu 1404192Sktlim@umich.eduvoid 1414192Sktlim@umich.eduAtomicSimpleCPU::CpuPort::recvRetry() 1424192Sktlim@umich.edu{ 1434192Sktlim@umich.edu panic("AtomicSimpleCPU doesn't expect recvRetry callback!"); 1444192Sktlim@umich.edu} 1454192Sktlim@umich.edu 1464192Sktlim@umich.eduvoid 1474192Sktlim@umich.eduAtomicSimpleCPU::DcachePort::setPeer(Port *port) 1484192Sktlim@umich.edu{ 1494192Sktlim@umich.edu Port::setPeer(port); 1502623SN/A 1512623SN/A#if FULL_SYSTEM 1522623SN/A // Update the ThreadContext's memory ports (Functional/Virtual 1532623SN/A // Ports) 1544968Sacolyte@umich.edu cpu->tcBase()->connectMemPorts(cpu->tcBase()); 1554968Sacolyte@umich.edu#endif 1562623SN/A} 1572623SN/A 1582623SN/AAtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 1593647Srdreslin@umich.edu : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false), 1603647Srdreslin@umich.edu simulate_data_stalls(p->simulate_data_stalls), 1613647Srdreslin@umich.edu simulate_inst_stalls(p->simulate_inst_stalls), 1624870Sstever@eecs.umich.edu icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this), 1634870Sstever@eecs.umich.edu physmemPort(name() + "-iport", this), hasPhysMemPort(false) 1644870Sstever@eecs.umich.edu{ 1652623SN/A _status = Idle; 1662623SN/A 1672623SN/A icachePort.snoopRangeSent = false; 1682623SN/A dcachePort.snoopRangeSent = false; 1692623SN/A 1702623SN/A} 1712623SN/A 1722623SN/A 1732623SN/AAtomicSimpleCPU::~AtomicSimpleCPU() 1742623SN/A{ 1752915Sktlim@umich.edu if (tickEvent.scheduled()) { 1762915Sktlim@umich.edu deschedule(tickEvent); 1773177Shsul@eecs.umich.edu } 1783177Shsul@eecs.umich.edu} 1793145Shsul@eecs.umich.edu 1802623SN/Avoid 1812623SN/AAtomicSimpleCPU::serialize(ostream &os) 1822623SN/A{ 1832623SN/A SimObject::State so_state = SimObject::getState(); 1842623SN/A SERIALIZE_ENUM(so_state); 1852623SN/A SERIALIZE_SCALAR(locked); 1862623SN/A BaseSimpleCPU::serialize(os); 1872915Sktlim@umich.edu nameOut(os, csprintf("%s.tickEvent", name())); 1882915Sktlim@umich.edu tickEvent.serialize(os); 1893177Shsul@eecs.umich.edu} 1903145Shsul@eecs.umich.edu 1912915Sktlim@umich.eduvoid 1922915Sktlim@umich.eduAtomicSimpleCPU::unserialize(Checkpoint *cp, const string §ion) 1932915Sktlim@umich.edu{ 1942915Sktlim@umich.edu SimObject::State so_state; 1952915Sktlim@umich.edu UNSERIALIZE_ENUM(so_state); 1962915Sktlim@umich.edu UNSERIALIZE_SCALAR(locked); 1974940Snate@binkert.org BaseSimpleCPU::unserialize(cp, section); 1983324Shsul@eecs.umich.edu tickEvent.unserialize(cp, csprintf("%s.tickEvent", section)); 1994762Snate@binkert.org} 2003324Shsul@eecs.umich.edu 2013324Shsul@eecs.umich.eduvoid 2023324Shsul@eecs.umich.eduAtomicSimpleCPU::resume() 2033431Sgblack@eecs.umich.edu{ 2043495Sktlim@umich.edu if (_status == Idle || _status == SwitchedOut) 2053431Sgblack@eecs.umich.edu return; 2063324Shsul@eecs.umich.edu 2072915Sktlim@umich.edu DPRINTF(SimpleCPU, "Resume\n"); 2082623SN/A assert(system->getMemoryMode() == Enums::atomic); 2092623SN/A 2102623SN/A changeState(SimObject::Running); 2112798Sktlim@umich.edu if (thread->status() == ThreadContext::Active) { 2122623SN/A if (!tickEvent.scheduled()) 2132798Sktlim@umich.edu schedule(tickEvent, nextCycle()); 2142798Sktlim@umich.edu } 2152623SN/A system->totalNumInsts = 0; 2162798Sktlim@umich.edu} 2172623SN/A 2182623SN/Avoid 2192623SN/AAtomicSimpleCPU::switchOut() 2202623SN/A{ 2212623SN/A assert(_status == Running || _status == Idle); 2222623SN/A _status = SwitchedOut; 2234192Sktlim@umich.edu 2242623SN/A tickEvent.squash(); 2252623SN/A} 2262623SN/A 2272680Sktlim@umich.edu 2282623SN/Avoid 2292680Sktlim@umich.eduAtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 2302680Sktlim@umich.edu{ 2312680Sktlim@umich.edu BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort); 2322623SN/A 2333495Sktlim@umich.edu assert(!tickEvent.scheduled()); 2342623SN/A 2352623SN/A // if any of this CPU's ThreadContexts are active, mark the CPU as 2362623SN/A // running and schedule its tick event. 2373512Sktlim@umich.edu ThreadID size = threadContexts.size(); 2383512Sktlim@umich.edu for (ThreadID i = 0; i < size; ++i) { 2393512Sktlim@umich.edu ThreadContext *tc = threadContexts[i]; 2402623SN/A if (tc->status() == ThreadContext::Active && _status != Running) { 2412623SN/A _status = Running; 2422623SN/A schedule(tickEvent, nextCycle()); 2432623SN/A break; 2442623SN/A } 2452623SN/A } 2464940Snate@binkert.org if (_status != Running) { 2474940Snate@binkert.org _status = Idle; 2482623SN/A } 2492683Sktlim@umich.edu assert(threadContexts.size() == 1); 2502623SN/A ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 2512623SN/A data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 2522623SN/A data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 2532623SN/A} 2542623SN/A 2555101Ssaidi@eecs.umich.edu 2563686Sktlim@umich.eduvoid 2573430Sgblack@eecs.umich.eduAtomicSimpleCPU::activateContext(int thread_num, int delay) 2585100Ssaidi@eecs.umich.edu{ 2592623SN/A DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); 2602623SN/A 2612623SN/A assert(thread_num == 0); 2622623SN/A assert(thread); 2632623SN/A 2642623SN/A assert(_status == Idle); 2652623SN/A assert(!tickEvent.scheduled()); 2664940Snate@binkert.org 2674940Snate@binkert.org notIdleFraction++; 2682623SN/A numCycles += tickToCycles(thread->lastActivate - thread->lastSuspend); 2692683Sktlim@umich.edu 2702623SN/A //Make sure ticks are still on multiples of cycles 2712623SN/A schedule(tickEvent, nextCycle(curTick() + ticks(delay))); 2722626SN/A _status = Running; 2732626SN/A} 2742626SN/A 2752626SN/A 2762626SN/Avoid 2772623SN/AAtomicSimpleCPU::suspendContext(int thread_num) 2782623SN/A{ 2792623SN/A DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 2802623SN/A 2812623SN/A assert(thread_num == 0); 2822623SN/A assert(thread); 2832623SN/A 2842623SN/A if (_status == Idle) 2852623SN/A return; 2862623SN/A 2873169Sstever@eecs.umich.edu assert(_status == Running); 2884870Sstever@eecs.umich.edu 2892623SN/A // tick event may not be scheduled if this gets called from inside 2902623SN/A // an instruction's execution, e.g. "quiesce" 2912623SN/A if (tickEvent.scheduled()) 2922623SN/A deschedule(tickEvent); 2932623SN/A 2944999Sgblack@eecs.umich.edu notIdleFraction--; 2954999Sgblack@eecs.umich.edu _status = Idle; 2964999Sgblack@eecs.umich.edu} 2974999Sgblack@eecs.umich.edu 2982623SN/A 2994999Sgblack@eecs.umich.eduFault 3002623SN/AAtomicSimpleCPU::readBytes(Addr addr, uint8_t * data, 3014999Sgblack@eecs.umich.edu unsigned size, unsigned flags) 3024999Sgblack@eecs.umich.edu{ 3034999Sgblack@eecs.umich.edu // use the CPU's statically allocated read request and packet objects 3044999Sgblack@eecs.umich.edu Request *req = &data_read_req; 3054999Sgblack@eecs.umich.edu 3064999Sgblack@eecs.umich.edu if (traceData) { 3074999Sgblack@eecs.umich.edu traceData->setAddr(addr); 3084999Sgblack@eecs.umich.edu } 3094999Sgblack@eecs.umich.edu 3104999Sgblack@eecs.umich.edu //The block size of our peer. 3114999Sgblack@eecs.umich.edu unsigned blockSize = dcachePort.peerBlockSize(); 3124999Sgblack@eecs.umich.edu //The size of the data we're trying to read. 3134999Sgblack@eecs.umich.edu int fullSize = size; 3144999Sgblack@eecs.umich.edu 3154999Sgblack@eecs.umich.edu //The address of the second part of this access if it needs to be split 3164999Sgblack@eecs.umich.edu //across a cache line boundary. 3174999Sgblack@eecs.umich.edu Addr secondAddr = roundDown(addr + size - 1, blockSize); 3184999Sgblack@eecs.umich.edu 3194999Sgblack@eecs.umich.edu if (secondAddr > addr) 3204999Sgblack@eecs.umich.edu size = secondAddr - addr; 3214999Sgblack@eecs.umich.edu 3224999Sgblack@eecs.umich.edu dcache_latency = 0; 3234999Sgblack@eecs.umich.edu 3244999Sgblack@eecs.umich.edu while (1) { 3254999Sgblack@eecs.umich.edu req->setVirt(0, addr, size, flags, thread->pcState().instAddr()); 3264999Sgblack@eecs.umich.edu 3274999Sgblack@eecs.umich.edu // translate to physical address 3284999Sgblack@eecs.umich.edu Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read); 3294999Sgblack@eecs.umich.edu 3304999Sgblack@eecs.umich.edu // Now do the access. 3314999Sgblack@eecs.umich.edu if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) { 3325012Sgblack@eecs.umich.edu Packet pkt = Packet(req, 3334999Sgblack@eecs.umich.edu req->isLLSC() ? MemCmd::LoadLockedReq : MemCmd::ReadReq, 3344999Sgblack@eecs.umich.edu Packet::Broadcast); 3354999Sgblack@eecs.umich.edu pkt.dataStatic(data); 3364999Sgblack@eecs.umich.edu 3374999Sgblack@eecs.umich.edu if (req->isMmapedIpr()) 3384968Sacolyte@umich.edu dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 3394986Ssaidi@eecs.umich.edu else { 3404999Sgblack@eecs.umich.edu if (hasPhysMemPort && pkt.getAddr() == physMemAddr) 3414999Sgblack@eecs.umich.edu dcache_latency += physmemPort.sendAtomic(&pkt); 3424999Sgblack@eecs.umich.edu else 3434762Snate@binkert.org dcache_latency += dcachePort.sendAtomic(&pkt); 3444999Sgblack@eecs.umich.edu } 3454999Sgblack@eecs.umich.edu dcache_access = true; 3464999Sgblack@eecs.umich.edu 3474999Sgblack@eecs.umich.edu assert(!pkt.isError()); 3484999Sgblack@eecs.umich.edu 3494999Sgblack@eecs.umich.edu if (req->isLLSC()) { 3504999Sgblack@eecs.umich.edu TheISA::handleLockedRead(thread, req); 3514999Sgblack@eecs.umich.edu } 3524968Sacolyte@umich.edu } 3533170Sstever@eecs.umich.edu 3544999Sgblack@eecs.umich.edu //If there's a fault, return it 3554999Sgblack@eecs.umich.edu if (fault != NoFault) { 3564999Sgblack@eecs.umich.edu if (req->isPrefetch()) { 3574999Sgblack@eecs.umich.edu return NoFault; 3584999Sgblack@eecs.umich.edu } else { 3594999Sgblack@eecs.umich.edu return fault; 3604999Sgblack@eecs.umich.edu } 3614999Sgblack@eecs.umich.edu } 3624999Sgblack@eecs.umich.edu 3634999Sgblack@eecs.umich.edu //If we don't need to access a second cache line, stop now. 3642623SN/A if (secondAddr <= addr) 3652623SN/A { 3662623SN/A if (req->isLocked() && fault == NoFault) { 3672623SN/A assert(!locked); 3682623SN/A locked = true; 3692623SN/A } 3702623SN/A return fault; 3714115Ssaidi@eecs.umich.edu } 3724115Ssaidi@eecs.umich.edu 3734115Ssaidi@eecs.umich.edu /* 3744115Ssaidi@eecs.umich.edu * Set up for accessing the second cache line. 3754040Ssaidi@eecs.umich.edu */ 3764040Ssaidi@eecs.umich.edu 3774040Ssaidi@eecs.umich.edu //Move the pointer we're reading into to the correct location. 3784040Ssaidi@eecs.umich.edu data += size; 3792623SN/A //Adjust the size to get the remaining bytes. 3802623SN/A size = addr + fullSize - secondAddr; 3812623SN/A //And access the right address. 3822623SN/A addr = secondAddr; 3832623SN/A } 3842623SN/A} 3852623SN/A 3862623SN/A 3872623SN/Atemplate <class T> 3882623SN/AFault 3892623SN/AAtomicSimpleCPU::read(Addr addr, T &data, unsigned flags) 3902623SN/A{ 3912623SN/A uint8_t *dataPtr = (uint8_t *)&data; 3922623SN/A memset(dataPtr, 0, sizeof(data)); 3932623SN/A Fault fault = readBytes(addr, dataPtr, sizeof(data), flags); 3942623SN/A if (fault == NoFault) { 3952623SN/A data = gtoh(data); 3962623SN/A if (traceData) 3972623SN/A traceData->setData(data); 3982623SN/A } 3992623SN/A return fault; 4002623SN/A} 4012623SN/A 4022623SN/A#ifndef DOXYGEN_SHOULD_SKIP_THIS 4032623SN/A 4042623SN/Atemplate 4052623SN/AFault 4062623SN/AAtomicSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags); 4072623SN/A 4082623SN/Atemplate 4092623SN/AFault 4102623SN/AAtomicSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags); 4112623SN/A 4122623SN/Atemplate 4132623SN/AFault 4142623SN/AAtomicSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags); 4152623SN/A 4162623SN/Atemplate 4172623SN/AFault 4182623SN/AAtomicSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags); 4192623SN/A 4202623SN/Atemplate 4212623SN/AFault 4223169Sstever@eecs.umich.eduAtomicSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags); 4234870Sstever@eecs.umich.edu 4242623SN/Atemplate 4252623SN/AFault 4262623SN/AAtomicSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags); 4272623SN/A 4282623SN/A#endif //DOXYGEN_SHOULD_SKIP_THIS 4294999Sgblack@eecs.umich.edu 4304999Sgblack@eecs.umich.edutemplate<> 4314999Sgblack@eecs.umich.eduFault 4324999Sgblack@eecs.umich.eduAtomicSimpleCPU::read(Addr addr, double &data, unsigned flags) 4332623SN/A{ 4344999Sgblack@eecs.umich.edu return read(addr, *(uint64_t*)&data, flags); 4352623SN/A} 4364999Sgblack@eecs.umich.edu 4374999Sgblack@eecs.umich.edutemplate<> 4384999Sgblack@eecs.umich.eduFault 4394999Sgblack@eecs.umich.eduAtomicSimpleCPU::read(Addr addr, float &data, unsigned flags) 4404999Sgblack@eecs.umich.edu{ 4414999Sgblack@eecs.umich.edu return read(addr, *(uint32_t*)&data, flags); 4424999Sgblack@eecs.umich.edu} 4434999Sgblack@eecs.umich.edu 4444999Sgblack@eecs.umich.edu 4454999Sgblack@eecs.umich.edutemplate<> 4464999Sgblack@eecs.umich.eduFault 4474999Sgblack@eecs.umich.eduAtomicSimpleCPU::read(Addr addr, int32_t &data, unsigned flags) 4484999Sgblack@eecs.umich.edu{ 4494999Sgblack@eecs.umich.edu return read(addr, (uint32_t&)data, flags); 4504999Sgblack@eecs.umich.edu} 4514999Sgblack@eecs.umich.edu 4524999Sgblack@eecs.umich.edu 4534999Sgblack@eecs.umich.eduFault 4544999Sgblack@eecs.umich.eduAtomicSimpleCPU::writeBytes(uint8_t *data, unsigned size, 4554999Sgblack@eecs.umich.edu Addr addr, unsigned flags, uint64_t *res) 4564999Sgblack@eecs.umich.edu{ 4574999Sgblack@eecs.umich.edu // use the CPU's statically allocated write request and packet objects 4584999Sgblack@eecs.umich.edu Request *req = &data_write_req; 4594999Sgblack@eecs.umich.edu 4604999Sgblack@eecs.umich.edu if (traceData) { 4614999Sgblack@eecs.umich.edu traceData->setAddr(addr); 4624999Sgblack@eecs.umich.edu } 4634999Sgblack@eecs.umich.edu 4644999Sgblack@eecs.umich.edu //The block size of our peer. 4654999Sgblack@eecs.umich.edu unsigned blockSize = dcachePort.peerBlockSize(); 4664999Sgblack@eecs.umich.edu //The size of the data we're trying to read. 4674999Sgblack@eecs.umich.edu int fullSize = size; 4684999Sgblack@eecs.umich.edu 4694999Sgblack@eecs.umich.edu //The address of the second part of this access if it needs to be split 4704999Sgblack@eecs.umich.edu //across a cache line boundary. 4714999Sgblack@eecs.umich.edu Addr secondAddr = roundDown(addr + size - 1, blockSize); 4724999Sgblack@eecs.umich.edu 4734999Sgblack@eecs.umich.edu if(secondAddr > addr) 4744999Sgblack@eecs.umich.edu size = secondAddr - addr; 4754999Sgblack@eecs.umich.edu 4764999Sgblack@eecs.umich.edu dcache_latency = 0; 4774999Sgblack@eecs.umich.edu 4784999Sgblack@eecs.umich.edu while(1) { 4794999Sgblack@eecs.umich.edu req->setVirt(0, addr, size, flags, thread->pcState().instAddr()); 4804999Sgblack@eecs.umich.edu 4814999Sgblack@eecs.umich.edu // translate to physical address 4824999Sgblack@eecs.umich.edu Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write); 4834999Sgblack@eecs.umich.edu 4844999Sgblack@eecs.umich.edu // Now do the access. 4854999Sgblack@eecs.umich.edu if (fault == NoFault) { 4864999Sgblack@eecs.umich.edu MemCmd cmd = MemCmd::WriteReq; // default 4874999Sgblack@eecs.umich.edu bool do_access = true; // flag to suppress cache access 4884999Sgblack@eecs.umich.edu 4894999Sgblack@eecs.umich.edu if (req->isLLSC()) { 4904999Sgblack@eecs.umich.edu cmd = MemCmd::StoreCondReq; 4914999Sgblack@eecs.umich.edu do_access = TheISA::handleLockedWrite(thread, req); 4924999Sgblack@eecs.umich.edu } else if (req->isSwap()) { 4934999Sgblack@eecs.umich.edu cmd = MemCmd::SwapReq; 4944999Sgblack@eecs.umich.edu if (req->isCondSwap()) { 4954878Sstever@eecs.umich.edu assert(res); 4964040Ssaidi@eecs.umich.edu req->setExtraData(*res); 4974040Ssaidi@eecs.umich.edu } 4984999Sgblack@eecs.umich.edu } 4994999Sgblack@eecs.umich.edu 5004999Sgblack@eecs.umich.edu if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) { 5012631SN/A Packet pkt = Packet(req, cmd, Packet::Broadcast); 5024999Sgblack@eecs.umich.edu pkt.dataStatic(data); 5034999Sgblack@eecs.umich.edu 5044999Sgblack@eecs.umich.edu if (req->isMmapedIpr()) { 5054999Sgblack@eecs.umich.edu dcache_latency += 5064999Sgblack@eecs.umich.edu TheISA::handleIprWrite(thread->getTC(), &pkt); 5074999Sgblack@eecs.umich.edu } else { 5084999Sgblack@eecs.umich.edu if (hasPhysMemPort && pkt.getAddr() == physMemAddr) 5094999Sgblack@eecs.umich.edu dcache_latency += physmemPort.sendAtomic(&pkt); 5103170Sstever@eecs.umich.edu else 5113170Sstever@eecs.umich.edu dcache_latency += dcachePort.sendAtomic(&pkt); 5124999Sgblack@eecs.umich.edu } 5134999Sgblack@eecs.umich.edu dcache_access = true; 5144999Sgblack@eecs.umich.edu assert(!pkt.isError()); 5154999Sgblack@eecs.umich.edu 5164999Sgblack@eecs.umich.edu if (req->isSwap()) { 5174999Sgblack@eecs.umich.edu assert(res); 5184999Sgblack@eecs.umich.edu memcpy(res, pkt.getPtr<uint8_t>(), fullSize); 5194999Sgblack@eecs.umich.edu } 5204999Sgblack@eecs.umich.edu } 5214999Sgblack@eecs.umich.edu 5222623SN/A if (res && !req->isSwap()) { 5232623SN/A *res = req->getExtraData(); 5242623SN/A } 5252623SN/A } 5262623SN/A 5274224Sgblack@eecs.umich.edu //If there's a fault or we don't need to access a second cache line, 5284224Sgblack@eecs.umich.edu //stop now. 5294224Sgblack@eecs.umich.edu if (fault != NoFault || secondAddr <= addr) 5304224Sgblack@eecs.umich.edu { 5314224Sgblack@eecs.umich.edu if (req->isLocked() && fault == NoFault) { 5324224Sgblack@eecs.umich.edu assert(locked); 5334224Sgblack@eecs.umich.edu locked = false; 5344224Sgblack@eecs.umich.edu } 5354224Sgblack@eecs.umich.edu if (fault != NoFault && req->isPrefetch()) { 5364224Sgblack@eecs.umich.edu return NoFault; 5374224Sgblack@eecs.umich.edu } else { 5382623SN/A return fault; 5392623SN/A } 5402623SN/A } 5412623SN/A 5422623SN/A /* 5432623SN/A * Set up for accessing the second cache line. 5442623SN/A */ 5452623SN/A 5462623SN/A //Move the pointer we're reading into to the correct location. 5472623SN/A data += size; 5482623SN/A //Adjust the size to get the remaining bytes. 5492623SN/A size = addr + fullSize - secondAddr; 5502623SN/A //And access the right address. 5512623SN/A addr = secondAddr; 5522623SN/A } 5532623SN/A} 5542623SN/A 5552623SN/A 5562623SN/Atemplate <class T> 5572623SN/AFault 5582623SN/AAtomicSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res) 5592623SN/A{ 5602623SN/A uint8_t *dataPtr = (uint8_t *)&data; 5612623SN/A if (traceData) 5622623SN/A traceData->setData(data); 5632623SN/A data = htog(data); 5642623SN/A 5652623SN/A Fault fault = writeBytes(dataPtr, sizeof(data), addr, flags, res); 5662623SN/A if (fault == NoFault && data_write_req.isSwap()) { 5672623SN/A *res = gtoh((T)*res); 5682623SN/A } 5692623SN/A return fault; 5702623SN/A} 5712623SN/A 5722623SN/A 5732623SN/A#ifndef DOXYGEN_SHOULD_SKIP_THIS 5742623SN/A 5752623SN/Atemplate 5762623SN/AFault 5772623SN/AAtomicSimpleCPU::write(Twin32_t data, Addr addr, 5782623SN/A unsigned flags, uint64_t *res); 5792623SN/A 5802623SN/Atemplate 5812623SN/AFault 5822623SN/AAtomicSimpleCPU::write(Twin64_t data, Addr addr, 5832623SN/A unsigned flags, uint64_t *res); 5842623SN/A 5852623SN/Atemplate 5864940Snate@binkert.orgFault 5874940Snate@binkert.orgAtomicSimpleCPU::write(uint64_t data, Addr addr, 5885100Ssaidi@eecs.umich.edu unsigned flags, uint64_t *res); 5892623SN/A 5902623SN/Atemplate 5912623SN/AFault 5922623SN/AAtomicSimpleCPU::write(uint32_t data, Addr addr, 5933387Sgblack@eecs.umich.edu unsigned flags, uint64_t *res); 5943387Sgblack@eecs.umich.edu 5952626SN/Atemplate 5964870Sstever@eecs.umich.eduFault 5972623SN/AAtomicSimpleCPU::write(uint16_t data, Addr addr, 5982623SN/A unsigned flags, uint64_t *res); 5994182Sgblack@eecs.umich.edu 6004182Sgblack@eecs.umich.edutemplate 6014182Sgblack@eecs.umich.eduFault 6022662Sstever@eecs.umich.eduAtomicSimpleCPU::write(uint8_t data, Addr addr, 6034182Sgblack@eecs.umich.edu unsigned flags, uint64_t *res); 6044593Sgblack@eecs.umich.edu 6054593Sgblack@eecs.umich.edu#endif //DOXYGEN_SHOULD_SKIP_THIS 6064182Sgblack@eecs.umich.edu 6074870Sstever@eecs.umich.edutemplate<> 6084870Sstever@eecs.umich.eduFault 6094870Sstever@eecs.umich.eduAtomicSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res) 6102623SN/A{ 6114968Sacolyte@umich.edu return write(*(uint64_t*)&data, addr, flags, res); 6124968Sacolyte@umich.edu} 6134968Sacolyte@umich.edu 6144968Sacolyte@umich.edutemplate<> 6154968Sacolyte@umich.eduFault 6164986Ssaidi@eecs.umich.eduAtomicSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res) 6174968Sacolyte@umich.edu{ 6184182Sgblack@eecs.umich.edu return write(*(uint32_t*)&data, addr, flags, res); 6194182Sgblack@eecs.umich.edu} 6204593Sgblack@eecs.umich.edu 6214182Sgblack@eecs.umich.edu 6222623SN/Atemplate<> 6233814Ssaidi@eecs.umich.eduFault 6245001Sgblack@eecs.umich.eduAtomicSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res) 6254182Sgblack@eecs.umich.edu{ 6264998Sgblack@eecs.umich.edu return write((uint32_t)data, addr, flags, res); 6274998Sgblack@eecs.umich.edu} 6284998Sgblack@eecs.umich.edu 6294998Sgblack@eecs.umich.edu 6305001Sgblack@eecs.umich.eduvoid 6315001Sgblack@eecs.umich.eduAtomicSimpleCPU::tick() 6325001Sgblack@eecs.umich.edu{ 6335001Sgblack@eecs.umich.edu DPRINTF(SimpleCPU, "Tick\n"); 6345001Sgblack@eecs.umich.edu 6354998Sgblack@eecs.umich.edu Tick latency = 0; 6364182Sgblack@eecs.umich.edu 6374182Sgblack@eecs.umich.edu for (int i = 0; i < width || locked; ++i) { 6382623SN/A numCycles++; 6393814Ssaidi@eecs.umich.edu 6404539Sgblack@eecs.umich.edu if (!curStaticInst || !curStaticInst->isDelayedCommit()) 6414539Sgblack@eecs.umich.edu checkForInterrupts(); 6423814Ssaidi@eecs.umich.edu 6433814Ssaidi@eecs.umich.edu checkPcEventQueue(); 6442623SN/A 6454182Sgblack@eecs.umich.edu Fault fault = NoFault; 6465100Ssaidi@eecs.umich.edu 6472623SN/A TheISA::PCState pcState = thread->pcState(); 6485100Ssaidi@eecs.umich.edu 6495100Ssaidi@eecs.umich.edu bool needToFetch = !isRomMicroPC(pcState.microPC()) && 6505100Ssaidi@eecs.umich.edu !curMacroStaticInst; 6515100Ssaidi@eecs.umich.edu if (needToFetch) { 6522803Ssaidi@eecs.umich.edu setupFetchRequest(&ifetch_req); 6535100Ssaidi@eecs.umich.edu fault = thread->itb->translateAtomic(&ifetch_req, tc, 6542623SN/A BaseTLB::Execute); 6552623SN/A } 6562623SN/A 6574377Sgblack@eecs.umich.edu if (fault == NoFault) { 6584182Sgblack@eecs.umich.edu Tick icache_latency = 0; 6592623SN/A bool icache_access = false; 6602623SN/A dcache_access = false; // assume no dcache access 6612626SN/A 6622626SN/A if (needToFetch) { 6632623SN/A // This is commented out because the predecoder would act like 6642623SN/A // a tiny cache otherwise. It wouldn't be flushed when needed 6652623SN/A // like the I cache. It should be flushed, and when that works 6662623SN/A // this code should be uncommented. 6672623SN/A //Fetch more instruction memory if necessary 6682623SN/A //if(predecoder.needMoreBytes()) 6692623SN/A //{ 6704762Snate@binkert.org icache_access = true; 6714762Snate@binkert.org Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq, 6722623SN/A Packet::Broadcast); 6732623SN/A ifetch_pkt.dataStatic(&inst); 6744762Snate@binkert.org 6752623SN/A if (hasPhysMemPort && ifetch_pkt.getAddr() == physMemAddr) 6762623SN/A icache_latency = physmemPort.sendAtomic(&ifetch_pkt); 6772623SN/A else 6782623SN/A icache_latency = icachePort.sendAtomic(&ifetch_pkt); 6792623SN/A 6803119Sktlim@umich.edu assert(!ifetch_pkt.isError()); 6812623SN/A 6823661Srdreslin@umich.edu // ifetch_req is initialized to read the instruction directly 6832623SN/A // into the CPU object's inst field. 6842623SN/A //} 6852623SN/A } 6862623SN/A 6872623SN/A preExecute(); 6882901Ssaidi@eecs.umich.edu 6893170Sstever@eecs.umich.edu if (curStaticInst) { 6904776Sgblack@eecs.umich.edu fault = curStaticInst->execute(this, traceData); 6912623SN/A 6922623SN/A // keep an instruction count 6932623SN/A if (fault == NoFault) 6944997Sgblack@eecs.umich.edu countInst(); 6952623SN/A else if (traceData && !DTRACE(ExecFaulting)) { 6963617Sbinkertn@umich.edu delete traceData; 6973617Sbinkertn@umich.edu traceData = NULL; 6983617Sbinkertn@umich.edu } 6992623SN/A 7004762Snate@binkert.org postExecute(); 7014762Snate@binkert.org } 7024762Snate@binkert.org 7032623SN/A // @todo remove me after debugging with legion done 7042623SN/A if (curStaticInst && (!curStaticInst->isMicroop() || 7052623SN/A curStaticInst->isFirstMicroop())) 7062623SN/A instCnt++; 7072623SN/A 708 Tick stall_ticks = 0; 709 if (simulate_inst_stalls && icache_access) 710 stall_ticks += icache_latency; 711 712 if (simulate_data_stalls && dcache_access) 713 stall_ticks += dcache_latency; 714 715 if (stall_ticks) { 716 Tick stall_cycles = stall_ticks / ticks(1); 717 Tick aligned_stall_ticks = ticks(stall_cycles); 718 719 if (aligned_stall_ticks < stall_ticks) 720 aligned_stall_ticks += 1; 721 722 latency += aligned_stall_ticks; 723 } 724 725 } 726 if(fault != NoFault || !stayAtPC) 727 advancePC(fault); 728 } 729 730 // instruction takes at least one cycle 731 if (latency < ticks(1)) 732 latency = ticks(1); 733 734 if (_status != Idle) 735 schedule(tickEvent, curTick() + latency); 736} 737 738 739void 740AtomicSimpleCPU::printAddr(Addr a) 741{ 742 dcachePort.printAddr(a); 743} 744 745 746//////////////////////////////////////////////////////////////////////// 747// 748// AtomicSimpleCPU Simulation Object 749// 750AtomicSimpleCPU * 751AtomicSimpleCPUParams::create() 752{ 753 numThreads = 1; 754#if !FULL_SYSTEM 755 if (workload.size() != 1) 756 panic("only one workload allowed"); 757#endif 758 return new AtomicSimpleCPU(this); 759} 760