atomic.cc revision 6775
12623SN/A/* 22623SN/A * Copyright (c) 2002-2005 The Regents of The University of Michigan 32623SN/A * All rights reserved. 42623SN/A * 52623SN/A * Redistribution and use in source and binary forms, with or without 62623SN/A * modification, are permitted provided that the following conditions are 72623SN/A * met: redistributions of source code must retain the above copyright 82623SN/A * notice, this list of conditions and the following disclaimer; 92623SN/A * redistributions in binary form must reproduce the above copyright 102623SN/A * notice, this list of conditions and the following disclaimer in the 112623SN/A * documentation and/or other materials provided with the distribution; 122623SN/A * neither the name of the copyright holders nor the names of its 132623SN/A * contributors may be used to endorse or promote products derived from 142623SN/A * this software without specific prior written permission. 152623SN/A * 162623SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 172623SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 182623SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 192623SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 202623SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 212623SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 222623SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 232623SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 242623SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 252623SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 262623SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 272665Ssaidi@eecs.umich.edu * 282665Ssaidi@eecs.umich.edu * Authors: Steve Reinhardt 292623SN/A */ 302623SN/A 313170Sstever@eecs.umich.edu#include "arch/locked_mem.hh" 323806Ssaidi@eecs.umich.edu#include "arch/mmaped_ipr.hh" 332623SN/A#include "arch/utility.hh" 344040Ssaidi@eecs.umich.edu#include "base/bigint.hh" 356658Snate@binkert.org#include "config/the_isa.hh" 362623SN/A#include "cpu/exetrace.hh" 372623SN/A#include "cpu/simple/atomic.hh" 383348Sbinkertn@umich.edu#include "mem/packet.hh" 393348Sbinkertn@umich.edu#include "mem/packet_access.hh" 404762Snate@binkert.org#include "params/AtomicSimpleCPU.hh" 412901Ssaidi@eecs.umich.edu#include "sim/system.hh" 422623SN/A 432623SN/Ausing namespace std; 442623SN/Ausing namespace TheISA; 452623SN/A 462623SN/AAtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c) 475606Snate@binkert.org : Event(CPU_Tick_Pri), cpu(c) 482623SN/A{ 492623SN/A} 502623SN/A 512623SN/A 522623SN/Avoid 532623SN/AAtomicSimpleCPU::TickEvent::process() 542623SN/A{ 552623SN/A cpu->tick(); 562623SN/A} 572623SN/A 582623SN/Aconst char * 595336Shines@cs.fsu.eduAtomicSimpleCPU::TickEvent::description() const 602623SN/A{ 614873Sstever@eecs.umich.edu return "AtomicSimpleCPU tick"; 622623SN/A} 632623SN/A 642856Srdreslin@umich.eduPort * 656227Snate@binkert.orgAtomicSimpleCPU::getPort(const string &if_name, int idx) 662856Srdreslin@umich.edu{ 672856Srdreslin@umich.edu if (if_name == "dcache_port") 682856Srdreslin@umich.edu return &dcachePort; 692856Srdreslin@umich.edu else if (if_name == "icache_port") 702856Srdreslin@umich.edu return &icachePort; 714968Sacolyte@umich.edu else if (if_name == "physmem_port") { 724968Sacolyte@umich.edu hasPhysMemPort = true; 734968Sacolyte@umich.edu return &physmemPort; 744968Sacolyte@umich.edu } 752856Srdreslin@umich.edu else 762856Srdreslin@umich.edu panic("No Such Port\n"); 772856Srdreslin@umich.edu} 782623SN/A 792623SN/Avoid 802623SN/AAtomicSimpleCPU::init() 812623SN/A{ 822623SN/A BaseCPU::init(); 832623SN/A#if FULL_SYSTEM 846221Snate@binkert.org ThreadID size = threadContexts.size(); 856221Snate@binkert.org for (ThreadID i = 0; i < size; ++i) { 862680Sktlim@umich.edu ThreadContext *tc = threadContexts[i]; 872623SN/A 882623SN/A // initialize CPU, including PC 895714Shsul@eecs.umich.edu TheISA::initCPU(tc, tc->contextId()); 902623SN/A } 912623SN/A#endif 924968Sacolyte@umich.edu if (hasPhysMemPort) { 934968Sacolyte@umich.edu bool snoop = false; 944968Sacolyte@umich.edu AddrRangeList pmAddrList; 954968Sacolyte@umich.edu physmemPort.getPeerAddressRanges(pmAddrList, snoop); 964968Sacolyte@umich.edu physMemAddr = *pmAddrList.begin(); 974968Sacolyte@umich.edu } 985714Shsul@eecs.umich.edu // Atomic doesn't do MT right now, so contextId == threadId 995712Shsul@eecs.umich.edu ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 1005712Shsul@eecs.umich.edu data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 1015712Shsul@eecs.umich.edu data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 1022623SN/A} 1032623SN/A 1042623SN/Abool 1053349Sbinkertn@umich.eduAtomicSimpleCPU::CpuPort::recvTiming(PacketPtr pkt) 1062623SN/A{ 1073184Srdreslin@umich.edu panic("AtomicSimpleCPU doesn't expect recvTiming callback!"); 1082623SN/A return true; 1092623SN/A} 1102623SN/A 1112623SN/ATick 1123349Sbinkertn@umich.eduAtomicSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt) 1132623SN/A{ 1143310Srdreslin@umich.edu //Snooping a coherence request, just return 1153649Srdreslin@umich.edu return 0; 1162623SN/A} 1172623SN/A 1182623SN/Avoid 1193349Sbinkertn@umich.eduAtomicSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt) 1202623SN/A{ 1213184Srdreslin@umich.edu //No internal storage to update, just return 1223184Srdreslin@umich.edu return; 1232623SN/A} 1242623SN/A 1252623SN/Avoid 1262623SN/AAtomicSimpleCPU::CpuPort::recvStatusChange(Status status) 1272623SN/A{ 1283647Srdreslin@umich.edu if (status == RangeChange) { 1293647Srdreslin@umich.edu if (!snoopRangeSent) { 1303647Srdreslin@umich.edu snoopRangeSent = true; 1313647Srdreslin@umich.edu sendStatusChange(Port::RangeChange); 1323647Srdreslin@umich.edu } 1332626SN/A return; 1343647Srdreslin@umich.edu } 1352626SN/A 1362623SN/A panic("AtomicSimpleCPU doesn't expect recvStatusChange callback!"); 1372623SN/A} 1382623SN/A 1392657Ssaidi@eecs.umich.eduvoid 1402623SN/AAtomicSimpleCPU::CpuPort::recvRetry() 1412623SN/A{ 1422623SN/A panic("AtomicSimpleCPU doesn't expect recvRetry callback!"); 1432623SN/A} 1442623SN/A 1454192Sktlim@umich.eduvoid 1464192Sktlim@umich.eduAtomicSimpleCPU::DcachePort::setPeer(Port *port) 1474192Sktlim@umich.edu{ 1484192Sktlim@umich.edu Port::setPeer(port); 1494192Sktlim@umich.edu 1504192Sktlim@umich.edu#if FULL_SYSTEM 1514192Sktlim@umich.edu // Update the ThreadContext's memory ports (Functional/Virtual 1524192Sktlim@umich.edu // Ports) 1535497Ssaidi@eecs.umich.edu cpu->tcBase()->connectMemPorts(cpu->tcBase()); 1544192Sktlim@umich.edu#endif 1554192Sktlim@umich.edu} 1562623SN/A 1575529Snate@binkert.orgAtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 1586078Sgblack@eecs.umich.edu : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false), 1595487Snate@binkert.org simulate_data_stalls(p->simulate_data_stalls), 1605487Snate@binkert.org simulate_inst_stalls(p->simulate_inst_stalls), 1614968Sacolyte@umich.edu icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this), 1624968Sacolyte@umich.edu physmemPort(name() + "-iport", this), hasPhysMemPort(false) 1632623SN/A{ 1642623SN/A _status = Idle; 1652623SN/A 1663647Srdreslin@umich.edu icachePort.snoopRangeSent = false; 1673647Srdreslin@umich.edu dcachePort.snoopRangeSent = false; 1683647Srdreslin@umich.edu 1692623SN/A} 1702623SN/A 1712623SN/A 1722623SN/AAtomicSimpleCPU::~AtomicSimpleCPU() 1732623SN/A{ 1746775SBrad.Beckmann@amd.com if (tickEvent.scheduled()) { 1756775SBrad.Beckmann@amd.com deschedule(tickEvent); 1766775SBrad.Beckmann@amd.com } 1772623SN/A} 1782623SN/A 1792623SN/Avoid 1802623SN/AAtomicSimpleCPU::serialize(ostream &os) 1812623SN/A{ 1822915Sktlim@umich.edu SimObject::State so_state = SimObject::getState(); 1832915Sktlim@umich.edu SERIALIZE_ENUM(so_state); 1846078Sgblack@eecs.umich.edu SERIALIZE_SCALAR(locked); 1853145Shsul@eecs.umich.edu BaseSimpleCPU::serialize(os); 1862623SN/A nameOut(os, csprintf("%s.tickEvent", name())); 1872623SN/A tickEvent.serialize(os); 1882623SN/A} 1892623SN/A 1902623SN/Avoid 1912623SN/AAtomicSimpleCPU::unserialize(Checkpoint *cp, const string §ion) 1922623SN/A{ 1932915Sktlim@umich.edu SimObject::State so_state; 1942915Sktlim@umich.edu UNSERIALIZE_ENUM(so_state); 1956078Sgblack@eecs.umich.edu UNSERIALIZE_SCALAR(locked); 1963145Shsul@eecs.umich.edu BaseSimpleCPU::unserialize(cp, section); 1972915Sktlim@umich.edu tickEvent.unserialize(cp, csprintf("%s.tickEvent", section)); 1982915Sktlim@umich.edu} 1992915Sktlim@umich.edu 2002915Sktlim@umich.eduvoid 2012915Sktlim@umich.eduAtomicSimpleCPU::resume() 2022915Sktlim@umich.edu{ 2035220Ssaidi@eecs.umich.edu if (_status == Idle || _status == SwitchedOut) 2045220Ssaidi@eecs.umich.edu return; 2055220Ssaidi@eecs.umich.edu 2064940Snate@binkert.org DPRINTF(SimpleCPU, "Resume\n"); 2075220Ssaidi@eecs.umich.edu assert(system->getMemoryMode() == Enums::atomic); 2083324Shsul@eecs.umich.edu 2095220Ssaidi@eecs.umich.edu changeState(SimObject::Running); 2105220Ssaidi@eecs.umich.edu if (thread->status() == ThreadContext::Active) { 2115606Snate@binkert.org if (!tickEvent.scheduled()) 2125606Snate@binkert.org schedule(tickEvent, nextCycle()); 2132915Sktlim@umich.edu } 2142623SN/A} 2152623SN/A 2162623SN/Avoid 2172798Sktlim@umich.eduAtomicSimpleCPU::switchOut() 2182623SN/A{ 2195496Ssaidi@eecs.umich.edu assert(_status == Running || _status == Idle); 2202798Sktlim@umich.edu _status = SwitchedOut; 2212623SN/A 2222798Sktlim@umich.edu tickEvent.squash(); 2232623SN/A} 2242623SN/A 2252623SN/A 2262623SN/Avoid 2272623SN/AAtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 2282623SN/A{ 2294192Sktlim@umich.edu BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort); 2302623SN/A 2312623SN/A assert(!tickEvent.scheduled()); 2322623SN/A 2332680Sktlim@umich.edu // if any of this CPU's ThreadContexts are active, mark the CPU as 2342623SN/A // running and schedule its tick event. 2356221Snate@binkert.org ThreadID size = threadContexts.size(); 2366221Snate@binkert.org for (ThreadID i = 0; i < size; ++i) { 2372680Sktlim@umich.edu ThreadContext *tc = threadContexts[i]; 2382680Sktlim@umich.edu if (tc->status() == ThreadContext::Active && _status != Running) { 2392623SN/A _status = Running; 2405606Snate@binkert.org schedule(tickEvent, nextCycle()); 2412623SN/A break; 2422623SN/A } 2432623SN/A } 2443512Sktlim@umich.edu if (_status != Running) { 2453512Sktlim@umich.edu _status = Idle; 2463512Sktlim@umich.edu } 2475169Ssaidi@eecs.umich.edu assert(threadContexts.size() == 1); 2485712Shsul@eecs.umich.edu ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 2495712Shsul@eecs.umich.edu data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 2505712Shsul@eecs.umich.edu data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 2512623SN/A} 2522623SN/A 2532623SN/A 2542623SN/Avoid 2552623SN/AAtomicSimpleCPU::activateContext(int thread_num, int delay) 2562623SN/A{ 2574940Snate@binkert.org DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); 2584940Snate@binkert.org 2592623SN/A assert(thread_num == 0); 2602683Sktlim@umich.edu assert(thread); 2612623SN/A 2622623SN/A assert(_status == Idle); 2632623SN/A assert(!tickEvent.scheduled()); 2642623SN/A 2652623SN/A notIdleFraction++; 2665101Ssaidi@eecs.umich.edu numCycles += tickToCycles(thread->lastActivate - thread->lastSuspend); 2673686Sktlim@umich.edu 2683430Sgblack@eecs.umich.edu //Make sure ticks are still on multiples of cycles 2695606Snate@binkert.org schedule(tickEvent, nextCycle(curTick + ticks(delay))); 2702623SN/A _status = Running; 2712623SN/A} 2722623SN/A 2732623SN/A 2742623SN/Avoid 2752623SN/AAtomicSimpleCPU::suspendContext(int thread_num) 2762623SN/A{ 2774940Snate@binkert.org DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 2784940Snate@binkert.org 2792623SN/A assert(thread_num == 0); 2802683Sktlim@umich.edu assert(thread); 2812623SN/A 2826043Sgblack@eecs.umich.edu if (_status == Idle) 2836043Sgblack@eecs.umich.edu return; 2846043Sgblack@eecs.umich.edu 2852623SN/A assert(_status == Running); 2862626SN/A 2872626SN/A // tick event may not be scheduled if this gets called from inside 2882626SN/A // an instruction's execution, e.g. "quiesce" 2892626SN/A if (tickEvent.scheduled()) 2905606Snate@binkert.org deschedule(tickEvent); 2912623SN/A 2922623SN/A notIdleFraction--; 2932623SN/A _status = Idle; 2942623SN/A} 2952623SN/A 2962623SN/A 2972623SN/Atemplate <class T> 2982623SN/AFault 2992623SN/AAtomicSimpleCPU::read(Addr addr, T &data, unsigned flags) 3002623SN/A{ 3013169Sstever@eecs.umich.edu // use the CPU's statically allocated read request and packet objects 3024870Sstever@eecs.umich.edu Request *req = &data_read_req; 3032623SN/A 3042623SN/A if (traceData) { 3052623SN/A traceData->setAddr(addr); 3062623SN/A } 3072623SN/A 3084999Sgblack@eecs.umich.edu //The block size of our peer. 3096227Snate@binkert.org unsigned blockSize = dcachePort.peerBlockSize(); 3104999Sgblack@eecs.umich.edu //The size of the data we're trying to read. 3114999Sgblack@eecs.umich.edu int dataSize = sizeof(T); 3122623SN/A 3134999Sgblack@eecs.umich.edu uint8_t * dataPtr = (uint8_t *)&data; 3142623SN/A 3154999Sgblack@eecs.umich.edu //The address of the second part of this access if it needs to be split 3164999Sgblack@eecs.umich.edu //across a cache line boundary. 3174999Sgblack@eecs.umich.edu Addr secondAddr = roundDown(addr + dataSize - 1, blockSize); 3184999Sgblack@eecs.umich.edu 3194999Sgblack@eecs.umich.edu if(secondAddr > addr) 3204999Sgblack@eecs.umich.edu dataSize = secondAddr - addr; 3214999Sgblack@eecs.umich.edu 3224999Sgblack@eecs.umich.edu dcache_latency = 0; 3234999Sgblack@eecs.umich.edu 3244999Sgblack@eecs.umich.edu while(1) { 3254999Sgblack@eecs.umich.edu req->setVirt(0, addr, dataSize, flags, thread->readPC()); 3264999Sgblack@eecs.umich.edu 3274999Sgblack@eecs.umich.edu // translate to physical address 3286023Snate@binkert.org Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read); 3294999Sgblack@eecs.umich.edu 3304999Sgblack@eecs.umich.edu // Now do the access. 3316623Sgblack@eecs.umich.edu if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) { 3324999Sgblack@eecs.umich.edu Packet pkt = Packet(req, 3336102Sgblack@eecs.umich.edu req->isLLSC() ? MemCmd::LoadLockedReq : MemCmd::ReadReq, 3344999Sgblack@eecs.umich.edu Packet::Broadcast); 3354999Sgblack@eecs.umich.edu pkt.dataStatic(dataPtr); 3364999Sgblack@eecs.umich.edu 3374999Sgblack@eecs.umich.edu if (req->isMmapedIpr()) 3384999Sgblack@eecs.umich.edu dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 3394999Sgblack@eecs.umich.edu else { 3404999Sgblack@eecs.umich.edu if (hasPhysMemPort && pkt.getAddr() == physMemAddr) 3414999Sgblack@eecs.umich.edu dcache_latency += physmemPort.sendAtomic(&pkt); 3424999Sgblack@eecs.umich.edu else 3434999Sgblack@eecs.umich.edu dcache_latency += dcachePort.sendAtomic(&pkt); 3444999Sgblack@eecs.umich.edu } 3454999Sgblack@eecs.umich.edu dcache_access = true; 3465012Sgblack@eecs.umich.edu 3474999Sgblack@eecs.umich.edu assert(!pkt.isError()); 3484999Sgblack@eecs.umich.edu 3496102Sgblack@eecs.umich.edu if (req->isLLSC()) { 3504999Sgblack@eecs.umich.edu TheISA::handleLockedRead(thread, req); 3514999Sgblack@eecs.umich.edu } 3524968Sacolyte@umich.edu } 3534986Ssaidi@eecs.umich.edu 3544999Sgblack@eecs.umich.edu // This will need a new way to tell if it has a dcache attached. 3554999Sgblack@eecs.umich.edu if (req->isUncacheable()) 3564999Sgblack@eecs.umich.edu recordEvent("Uncached Read"); 3574762Snate@binkert.org 3584999Sgblack@eecs.umich.edu //If there's a fault, return it 3596739Sgblack@eecs.umich.edu if (fault != NoFault) { 3606739Sgblack@eecs.umich.edu if (req->isPrefetch()) { 3616739Sgblack@eecs.umich.edu return NoFault; 3626739Sgblack@eecs.umich.edu } else { 3636739Sgblack@eecs.umich.edu return fault; 3646739Sgblack@eecs.umich.edu } 3656739Sgblack@eecs.umich.edu } 3666739Sgblack@eecs.umich.edu 3674999Sgblack@eecs.umich.edu //If we don't need to access a second cache line, stop now. 3684999Sgblack@eecs.umich.edu if (secondAddr <= addr) 3694999Sgblack@eecs.umich.edu { 3704999Sgblack@eecs.umich.edu data = gtoh(data); 3715408Sgblack@eecs.umich.edu if (traceData) { 3725408Sgblack@eecs.umich.edu traceData->setData(data); 3735408Sgblack@eecs.umich.edu } 3746078Sgblack@eecs.umich.edu if (req->isLocked() && fault == NoFault) { 3756078Sgblack@eecs.umich.edu assert(!locked); 3766078Sgblack@eecs.umich.edu locked = true; 3776078Sgblack@eecs.umich.edu } 3784999Sgblack@eecs.umich.edu return fault; 3794968Sacolyte@umich.edu } 3803170Sstever@eecs.umich.edu 3814999Sgblack@eecs.umich.edu /* 3824999Sgblack@eecs.umich.edu * Set up for accessing the second cache line. 3834999Sgblack@eecs.umich.edu */ 3844999Sgblack@eecs.umich.edu 3854999Sgblack@eecs.umich.edu //Move the pointer we're reading into to the correct location. 3864999Sgblack@eecs.umich.edu dataPtr += dataSize; 3874999Sgblack@eecs.umich.edu //Adjust the size to get the remaining bytes. 3884999Sgblack@eecs.umich.edu dataSize = addr + sizeof(T) - secondAddr; 3894999Sgblack@eecs.umich.edu //And access the right address. 3904999Sgblack@eecs.umich.edu addr = secondAddr; 3912623SN/A } 3922623SN/A} 3932623SN/A 3942623SN/A#ifndef DOXYGEN_SHOULD_SKIP_THIS 3952623SN/A 3962623SN/Atemplate 3972623SN/AFault 3984115Ssaidi@eecs.umich.eduAtomicSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags); 3994115Ssaidi@eecs.umich.edu 4004115Ssaidi@eecs.umich.edutemplate 4014115Ssaidi@eecs.umich.eduFault 4024040Ssaidi@eecs.umich.eduAtomicSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags); 4034040Ssaidi@eecs.umich.edu 4044040Ssaidi@eecs.umich.edutemplate 4054040Ssaidi@eecs.umich.eduFault 4062623SN/AAtomicSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags); 4072623SN/A 4082623SN/Atemplate 4092623SN/AFault 4102623SN/AAtomicSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags); 4112623SN/A 4122623SN/Atemplate 4132623SN/AFault 4142623SN/AAtomicSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags); 4152623SN/A 4162623SN/Atemplate 4172623SN/AFault 4182623SN/AAtomicSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags); 4192623SN/A 4202623SN/A#endif //DOXYGEN_SHOULD_SKIP_THIS 4212623SN/A 4222623SN/Atemplate<> 4232623SN/AFault 4242623SN/AAtomicSimpleCPU::read(Addr addr, double &data, unsigned flags) 4252623SN/A{ 4262623SN/A return read(addr, *(uint64_t*)&data, flags); 4272623SN/A} 4282623SN/A 4292623SN/Atemplate<> 4302623SN/AFault 4312623SN/AAtomicSimpleCPU::read(Addr addr, float &data, unsigned flags) 4322623SN/A{ 4332623SN/A return read(addr, *(uint32_t*)&data, flags); 4342623SN/A} 4352623SN/A 4362623SN/A 4372623SN/Atemplate<> 4382623SN/AFault 4392623SN/AAtomicSimpleCPU::read(Addr addr, int32_t &data, unsigned flags) 4402623SN/A{ 4412623SN/A return read(addr, (uint32_t&)data, flags); 4422623SN/A} 4432623SN/A 4442623SN/A 4452623SN/Atemplate <class T> 4462623SN/AFault 4472623SN/AAtomicSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res) 4482623SN/A{ 4493169Sstever@eecs.umich.edu // use the CPU's statically allocated write request and packet objects 4504870Sstever@eecs.umich.edu Request *req = &data_write_req; 4512623SN/A 4522623SN/A if (traceData) { 4532623SN/A traceData->setAddr(addr); 4542623SN/A } 4552623SN/A 4564999Sgblack@eecs.umich.edu //The block size of our peer. 4576227Snate@binkert.org unsigned blockSize = dcachePort.peerBlockSize(); 4584999Sgblack@eecs.umich.edu //The size of the data we're trying to read. 4594999Sgblack@eecs.umich.edu int dataSize = sizeof(T); 4602623SN/A 4614999Sgblack@eecs.umich.edu uint8_t * dataPtr = (uint8_t *)&data; 4622623SN/A 4634999Sgblack@eecs.umich.edu //The address of the second part of this access if it needs to be split 4644999Sgblack@eecs.umich.edu //across a cache line boundary. 4654999Sgblack@eecs.umich.edu Addr secondAddr = roundDown(addr + dataSize - 1, blockSize); 4664999Sgblack@eecs.umich.edu 4674999Sgblack@eecs.umich.edu if(secondAddr > addr) 4684999Sgblack@eecs.umich.edu dataSize = secondAddr - addr; 4694999Sgblack@eecs.umich.edu 4704999Sgblack@eecs.umich.edu dcache_latency = 0; 4714999Sgblack@eecs.umich.edu 4724999Sgblack@eecs.umich.edu while(1) { 4734999Sgblack@eecs.umich.edu req->setVirt(0, addr, dataSize, flags, thread->readPC()); 4744999Sgblack@eecs.umich.edu 4754999Sgblack@eecs.umich.edu // translate to physical address 4766023Snate@binkert.org Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write); 4774999Sgblack@eecs.umich.edu 4784999Sgblack@eecs.umich.edu // Now do the access. 4794999Sgblack@eecs.umich.edu if (fault == NoFault) { 4804999Sgblack@eecs.umich.edu MemCmd cmd = MemCmd::WriteReq; // default 4814999Sgblack@eecs.umich.edu bool do_access = true; // flag to suppress cache access 4824999Sgblack@eecs.umich.edu 4836102Sgblack@eecs.umich.edu if (req->isLLSC()) { 4844999Sgblack@eecs.umich.edu cmd = MemCmd::StoreCondReq; 4854999Sgblack@eecs.umich.edu do_access = TheISA::handleLockedWrite(thread, req); 4864999Sgblack@eecs.umich.edu } else if (req->isSwap()) { 4874999Sgblack@eecs.umich.edu cmd = MemCmd::SwapReq; 4884999Sgblack@eecs.umich.edu if (req->isCondSwap()) { 4894999Sgblack@eecs.umich.edu assert(res); 4904999Sgblack@eecs.umich.edu req->setExtraData(*res); 4914999Sgblack@eecs.umich.edu } 4924999Sgblack@eecs.umich.edu } 4934999Sgblack@eecs.umich.edu 4946623Sgblack@eecs.umich.edu if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) { 4954999Sgblack@eecs.umich.edu Packet pkt = Packet(req, cmd, Packet::Broadcast); 4964999Sgblack@eecs.umich.edu pkt.dataStatic(dataPtr); 4974999Sgblack@eecs.umich.edu 4984999Sgblack@eecs.umich.edu if (req->isMmapedIpr()) { 4994999Sgblack@eecs.umich.edu dcache_latency += 5004999Sgblack@eecs.umich.edu TheISA::handleIprWrite(thread->getTC(), &pkt); 5014999Sgblack@eecs.umich.edu } else { 5024999Sgblack@eecs.umich.edu //XXX This needs to be outside of the loop in order to 5034999Sgblack@eecs.umich.edu //work properly for cache line boundary crossing 5044999Sgblack@eecs.umich.edu //accesses in transendian simulations. 5054999Sgblack@eecs.umich.edu data = htog(data); 5064999Sgblack@eecs.umich.edu if (hasPhysMemPort && pkt.getAddr() == physMemAddr) 5074999Sgblack@eecs.umich.edu dcache_latency += physmemPort.sendAtomic(&pkt); 5084999Sgblack@eecs.umich.edu else 5094999Sgblack@eecs.umich.edu dcache_latency += dcachePort.sendAtomic(&pkt); 5104999Sgblack@eecs.umich.edu } 5114999Sgblack@eecs.umich.edu dcache_access = true; 5124999Sgblack@eecs.umich.edu assert(!pkt.isError()); 5134999Sgblack@eecs.umich.edu 5144999Sgblack@eecs.umich.edu if (req->isSwap()) { 5154999Sgblack@eecs.umich.edu assert(res); 5164999Sgblack@eecs.umich.edu *res = pkt.get<T>(); 5174999Sgblack@eecs.umich.edu } 5184999Sgblack@eecs.umich.edu } 5194999Sgblack@eecs.umich.edu 5204999Sgblack@eecs.umich.edu if (res && !req->isSwap()) { 5214999Sgblack@eecs.umich.edu *res = req->getExtraData(); 5224878Sstever@eecs.umich.edu } 5234040Ssaidi@eecs.umich.edu } 5244040Ssaidi@eecs.umich.edu 5254999Sgblack@eecs.umich.edu // This will need a new way to tell if it's hooked up to a cache or not. 5264999Sgblack@eecs.umich.edu if (req->isUncacheable()) 5274999Sgblack@eecs.umich.edu recordEvent("Uncached Write"); 5282631SN/A 5294999Sgblack@eecs.umich.edu //If there's a fault or we don't need to access a second cache line, 5304999Sgblack@eecs.umich.edu //stop now. 5314999Sgblack@eecs.umich.edu if (fault != NoFault || secondAddr <= addr) 5324999Sgblack@eecs.umich.edu { 5334999Sgblack@eecs.umich.edu // If the write needs to have a fault on the access, consider 5344999Sgblack@eecs.umich.edu // calling changeStatus() and changing it to "bad addr write" 5354999Sgblack@eecs.umich.edu // or something. 5365408Sgblack@eecs.umich.edu if (traceData) { 5376012Ssteve.reinhardt@amd.com traceData->setData(gtoh(data)); 5385408Sgblack@eecs.umich.edu } 5396078Sgblack@eecs.umich.edu if (req->isLocked() && fault == NoFault) { 5406078Sgblack@eecs.umich.edu assert(locked); 5416078Sgblack@eecs.umich.edu locked = false; 5426078Sgblack@eecs.umich.edu } 5436739Sgblack@eecs.umich.edu if (fault != NoFault && req->isPrefetch()) { 5446739Sgblack@eecs.umich.edu return NoFault; 5456739Sgblack@eecs.umich.edu } else { 5466739Sgblack@eecs.umich.edu return fault; 5476739Sgblack@eecs.umich.edu } 5483170Sstever@eecs.umich.edu } 5493170Sstever@eecs.umich.edu 5504999Sgblack@eecs.umich.edu /* 5514999Sgblack@eecs.umich.edu * Set up for accessing the second cache line. 5524999Sgblack@eecs.umich.edu */ 5534999Sgblack@eecs.umich.edu 5544999Sgblack@eecs.umich.edu //Move the pointer we're reading into to the correct location. 5554999Sgblack@eecs.umich.edu dataPtr += dataSize; 5564999Sgblack@eecs.umich.edu //Adjust the size to get the remaining bytes. 5574999Sgblack@eecs.umich.edu dataSize = addr + sizeof(T) - secondAddr; 5584999Sgblack@eecs.umich.edu //And access the right address. 5594999Sgblack@eecs.umich.edu addr = secondAddr; 5602623SN/A } 5612623SN/A} 5622623SN/A 5632623SN/A 5642623SN/A#ifndef DOXYGEN_SHOULD_SKIP_THIS 5654224Sgblack@eecs.umich.edu 5664224Sgblack@eecs.umich.edutemplate 5674224Sgblack@eecs.umich.eduFault 5684224Sgblack@eecs.umich.eduAtomicSimpleCPU::write(Twin32_t data, Addr addr, 5694224Sgblack@eecs.umich.edu unsigned flags, uint64_t *res); 5704224Sgblack@eecs.umich.edu 5714224Sgblack@eecs.umich.edutemplate 5724224Sgblack@eecs.umich.eduFault 5734224Sgblack@eecs.umich.eduAtomicSimpleCPU::write(Twin64_t data, Addr addr, 5744224Sgblack@eecs.umich.edu unsigned flags, uint64_t *res); 5754224Sgblack@eecs.umich.edu 5762623SN/Atemplate 5772623SN/AFault 5782623SN/AAtomicSimpleCPU::write(uint64_t data, Addr addr, 5792623SN/A unsigned flags, uint64_t *res); 5802623SN/A 5812623SN/Atemplate 5822623SN/AFault 5832623SN/AAtomicSimpleCPU::write(uint32_t data, Addr addr, 5842623SN/A unsigned flags, uint64_t *res); 5852623SN/A 5862623SN/Atemplate 5872623SN/AFault 5882623SN/AAtomicSimpleCPU::write(uint16_t data, Addr addr, 5892623SN/A unsigned flags, uint64_t *res); 5902623SN/A 5912623SN/Atemplate 5922623SN/AFault 5932623SN/AAtomicSimpleCPU::write(uint8_t data, Addr addr, 5942623SN/A unsigned flags, uint64_t *res); 5952623SN/A 5962623SN/A#endif //DOXYGEN_SHOULD_SKIP_THIS 5972623SN/A 5982623SN/Atemplate<> 5992623SN/AFault 6002623SN/AAtomicSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res) 6012623SN/A{ 6022623SN/A return write(*(uint64_t*)&data, addr, flags, res); 6032623SN/A} 6042623SN/A 6052623SN/Atemplate<> 6062623SN/AFault 6072623SN/AAtomicSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res) 6082623SN/A{ 6092623SN/A return write(*(uint32_t*)&data, addr, flags, res); 6102623SN/A} 6112623SN/A 6122623SN/A 6132623SN/Atemplate<> 6142623SN/AFault 6152623SN/AAtomicSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res) 6162623SN/A{ 6172623SN/A return write((uint32_t)data, addr, flags, res); 6182623SN/A} 6192623SN/A 6202623SN/A 6212623SN/Avoid 6222623SN/AAtomicSimpleCPU::tick() 6232623SN/A{ 6244940Snate@binkert.org DPRINTF(SimpleCPU, "Tick\n"); 6254940Snate@binkert.org 6265487Snate@binkert.org Tick latency = 0; 6272623SN/A 6286078Sgblack@eecs.umich.edu for (int i = 0; i < width || locked; ++i) { 6292623SN/A numCycles++; 6302623SN/A 6313387Sgblack@eecs.umich.edu if (!curStaticInst || !curStaticInst->isDelayedCommit()) 6323387Sgblack@eecs.umich.edu checkForInterrupts(); 6332626SN/A 6345348Ssaidi@eecs.umich.edu checkPcEventQueue(); 6355348Ssaidi@eecs.umich.edu 6365669Sgblack@eecs.umich.edu Fault fault = NoFault; 6375669Sgblack@eecs.umich.edu 6385669Sgblack@eecs.umich.edu bool fromRom = isRomMicroPC(thread->readMicroPC()); 6395914Sgblack@eecs.umich.edu if (!fromRom && !curMacroStaticInst) { 6405894Sgblack@eecs.umich.edu setupFetchRequest(&ifetch_req); 6416023Snate@binkert.org fault = thread->itb->translateAtomic(&ifetch_req, tc, 6426023Snate@binkert.org BaseTLB::Execute); 6435894Sgblack@eecs.umich.edu } 6442623SN/A 6452623SN/A if (fault == NoFault) { 6464182Sgblack@eecs.umich.edu Tick icache_latency = 0; 6474182Sgblack@eecs.umich.edu bool icache_access = false; 6484182Sgblack@eecs.umich.edu dcache_access = false; // assume no dcache access 6492662Sstever@eecs.umich.edu 6505914Sgblack@eecs.umich.edu if (!fromRom && !curMacroStaticInst) { 6515694Sgblack@eecs.umich.edu // This is commented out because the predecoder would act like 6525694Sgblack@eecs.umich.edu // a tiny cache otherwise. It wouldn't be flushed when needed 6535694Sgblack@eecs.umich.edu // like the I cache. It should be flushed, and when that works 6545694Sgblack@eecs.umich.edu // this code should be uncommented. 6555669Sgblack@eecs.umich.edu //Fetch more instruction memory if necessary 6565669Sgblack@eecs.umich.edu //if(predecoder.needMoreBytes()) 6575669Sgblack@eecs.umich.edu //{ 6585669Sgblack@eecs.umich.edu icache_access = true; 6595669Sgblack@eecs.umich.edu Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq, 6605669Sgblack@eecs.umich.edu Packet::Broadcast); 6615669Sgblack@eecs.umich.edu ifetch_pkt.dataStatic(&inst); 6622623SN/A 6635669Sgblack@eecs.umich.edu if (hasPhysMemPort && ifetch_pkt.getAddr() == physMemAddr) 6645669Sgblack@eecs.umich.edu icache_latency = physmemPort.sendAtomic(&ifetch_pkt); 6655669Sgblack@eecs.umich.edu else 6665669Sgblack@eecs.umich.edu icache_latency = icachePort.sendAtomic(&ifetch_pkt); 6674968Sacolyte@umich.edu 6685669Sgblack@eecs.umich.edu assert(!ifetch_pkt.isError()); 6694968Sacolyte@umich.edu 6705669Sgblack@eecs.umich.edu // ifetch_req is initialized to read the instruction directly 6715669Sgblack@eecs.umich.edu // into the CPU object's inst field. 6725669Sgblack@eecs.umich.edu //} 6735669Sgblack@eecs.umich.edu } 6744182Sgblack@eecs.umich.edu 6752623SN/A preExecute(); 6763814Ssaidi@eecs.umich.edu 6775001Sgblack@eecs.umich.edu if (curStaticInst) { 6784182Sgblack@eecs.umich.edu fault = curStaticInst->execute(this, traceData); 6794998Sgblack@eecs.umich.edu 6804998Sgblack@eecs.umich.edu // keep an instruction count 6814998Sgblack@eecs.umich.edu if (fault == NoFault) 6824998Sgblack@eecs.umich.edu countInst(); 6835001Sgblack@eecs.umich.edu else if (traceData) { 6845001Sgblack@eecs.umich.edu // If there was a fault, we should trace this instruction. 6855001Sgblack@eecs.umich.edu delete traceData; 6865001Sgblack@eecs.umich.edu traceData = NULL; 6875001Sgblack@eecs.umich.edu } 6884998Sgblack@eecs.umich.edu 6894182Sgblack@eecs.umich.edu postExecute(); 6904182Sgblack@eecs.umich.edu } 6912623SN/A 6923814Ssaidi@eecs.umich.edu // @todo remove me after debugging with legion done 6934539Sgblack@eecs.umich.edu if (curStaticInst && (!curStaticInst->isMicroop() || 6944539Sgblack@eecs.umich.edu curStaticInst->isFirstMicroop())) 6953814Ssaidi@eecs.umich.edu instCnt++; 6963814Ssaidi@eecs.umich.edu 6975487Snate@binkert.org Tick stall_ticks = 0; 6985487Snate@binkert.org if (simulate_inst_stalls && icache_access) 6995487Snate@binkert.org stall_ticks += icache_latency; 7005487Snate@binkert.org 7015487Snate@binkert.org if (simulate_data_stalls && dcache_access) 7025487Snate@binkert.org stall_ticks += dcache_latency; 7035487Snate@binkert.org 7045487Snate@binkert.org if (stall_ticks) { 7055487Snate@binkert.org Tick stall_cycles = stall_ticks / ticks(1); 7065487Snate@binkert.org Tick aligned_stall_ticks = ticks(stall_cycles); 7075487Snate@binkert.org 7085487Snate@binkert.org if (aligned_stall_ticks < stall_ticks) 7095487Snate@binkert.org aligned_stall_ticks += 1; 7105487Snate@binkert.org 7115487Snate@binkert.org latency += aligned_stall_ticks; 7122623SN/A } 7132623SN/A 7142623SN/A } 7154377Sgblack@eecs.umich.edu if(fault != NoFault || !stayAtPC) 7164182Sgblack@eecs.umich.edu advancePC(fault); 7172623SN/A } 7182623SN/A 7195487Snate@binkert.org // instruction takes at least one cycle 7205487Snate@binkert.org if (latency < ticks(1)) 7215487Snate@binkert.org latency = ticks(1); 7225487Snate@binkert.org 7232626SN/A if (_status != Idle) 7245606Snate@binkert.org schedule(tickEvent, curTick + latency); 7252623SN/A} 7262623SN/A 7272623SN/A 7285315Sstever@gmail.comvoid 7295315Sstever@gmail.comAtomicSimpleCPU::printAddr(Addr a) 7305315Sstever@gmail.com{ 7315315Sstever@gmail.com dcachePort.printAddr(a); 7325315Sstever@gmail.com} 7335315Sstever@gmail.com 7345315Sstever@gmail.com 7352623SN/A//////////////////////////////////////////////////////////////////////// 7362623SN/A// 7372623SN/A// AtomicSimpleCPU Simulation Object 7382623SN/A// 7394762Snate@binkert.orgAtomicSimpleCPU * 7404762Snate@binkert.orgAtomicSimpleCPUParams::create() 7412623SN/A{ 7425529Snate@binkert.org numThreads = 1; 7435529Snate@binkert.org#if !FULL_SYSTEM 7444762Snate@binkert.org if (workload.size() != 1) 7454762Snate@binkert.org panic("only one workload allowed"); 7462623SN/A#endif 7475529Snate@binkert.org return new AtomicSimpleCPU(this); 7482623SN/A} 749