atomic.cc revision 6221
12623SN/A/* 22623SN/A * Copyright (c) 2002-2005 The Regents of The University of Michigan 32623SN/A * All rights reserved. 42623SN/A * 52623SN/A * Redistribution and use in source and binary forms, with or without 62623SN/A * modification, are permitted provided that the following conditions are 72623SN/A * met: redistributions of source code must retain the above copyright 82623SN/A * notice, this list of conditions and the following disclaimer; 92623SN/A * redistributions in binary form must reproduce the above copyright 102623SN/A * notice, this list of conditions and the following disclaimer in the 112623SN/A * documentation and/or other materials provided with the distribution; 122623SN/A * neither the name of the copyright holders nor the names of its 132623SN/A * contributors may be used to endorse or promote products derived from 142623SN/A * this software without specific prior written permission. 152623SN/A * 162623SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 172623SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 182623SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 192623SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 202623SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 212623SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 222623SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 232623SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 242623SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 252623SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 262623SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 272665Ssaidi@eecs.umich.edu * 282665Ssaidi@eecs.umich.edu * Authors: Steve Reinhardt 292623SN/A */ 302623SN/A 313170Sstever@eecs.umich.edu#include "arch/locked_mem.hh" 323806Ssaidi@eecs.umich.edu#include "arch/mmaped_ipr.hh" 332623SN/A#include "arch/utility.hh" 344040Ssaidi@eecs.umich.edu#include "base/bigint.hh" 352623SN/A#include "cpu/exetrace.hh" 362623SN/A#include "cpu/simple/atomic.hh" 373348Sbinkertn@umich.edu#include "mem/packet.hh" 383348Sbinkertn@umich.edu#include "mem/packet_access.hh" 394762Snate@binkert.org#include "params/AtomicSimpleCPU.hh" 402901Ssaidi@eecs.umich.edu#include "sim/system.hh" 412623SN/A 422623SN/Ausing namespace std; 432623SN/Ausing namespace TheISA; 442623SN/A 452623SN/AAtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c) 465606Snate@binkert.org : Event(CPU_Tick_Pri), cpu(c) 472623SN/A{ 482623SN/A} 492623SN/A 502623SN/A 512623SN/Avoid 522623SN/AAtomicSimpleCPU::TickEvent::process() 532623SN/A{ 542623SN/A cpu->tick(); 552623SN/A} 562623SN/A 572623SN/Aconst char * 585336Shines@cs.fsu.eduAtomicSimpleCPU::TickEvent::description() const 592623SN/A{ 604873Sstever@eecs.umich.edu return "AtomicSimpleCPU tick"; 612623SN/A} 622623SN/A 632856Srdreslin@umich.eduPort * 642856Srdreslin@umich.eduAtomicSimpleCPU::getPort(const std::string &if_name, int idx) 652856Srdreslin@umich.edu{ 662856Srdreslin@umich.edu if (if_name == "dcache_port") 672856Srdreslin@umich.edu return &dcachePort; 682856Srdreslin@umich.edu else if (if_name == "icache_port") 692856Srdreslin@umich.edu return &icachePort; 704968Sacolyte@umich.edu else if (if_name == "physmem_port") { 714968Sacolyte@umich.edu hasPhysMemPort = true; 724968Sacolyte@umich.edu return &physmemPort; 734968Sacolyte@umich.edu } 742856Srdreslin@umich.edu else 752856Srdreslin@umich.edu panic("No Such Port\n"); 762856Srdreslin@umich.edu} 772623SN/A 782623SN/Avoid 792623SN/AAtomicSimpleCPU::init() 802623SN/A{ 812623SN/A BaseCPU::init(); 822623SN/A#if FULL_SYSTEM 836221Snate@binkert.org ThreadID size = threadContexts.size(); 846221Snate@binkert.org for (ThreadID i = 0; i < size; ++i) { 852680Sktlim@umich.edu ThreadContext *tc = threadContexts[i]; 862623SN/A 872623SN/A // initialize CPU, including PC 885714Shsul@eecs.umich.edu TheISA::initCPU(tc, tc->contextId()); 892623SN/A } 902623SN/A#endif 914968Sacolyte@umich.edu if (hasPhysMemPort) { 924968Sacolyte@umich.edu bool snoop = false; 934968Sacolyte@umich.edu AddrRangeList pmAddrList; 944968Sacolyte@umich.edu physmemPort.getPeerAddressRanges(pmAddrList, snoop); 954968Sacolyte@umich.edu physMemAddr = *pmAddrList.begin(); 964968Sacolyte@umich.edu } 975714Shsul@eecs.umich.edu // Atomic doesn't do MT right now, so contextId == threadId 985712Shsul@eecs.umich.edu ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 995712Shsul@eecs.umich.edu data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 1005712Shsul@eecs.umich.edu data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 1012623SN/A} 1022623SN/A 1032623SN/Abool 1043349Sbinkertn@umich.eduAtomicSimpleCPU::CpuPort::recvTiming(PacketPtr pkt) 1052623SN/A{ 1063184Srdreslin@umich.edu panic("AtomicSimpleCPU doesn't expect recvTiming callback!"); 1072623SN/A return true; 1082623SN/A} 1092623SN/A 1102623SN/ATick 1113349Sbinkertn@umich.eduAtomicSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt) 1122623SN/A{ 1133310Srdreslin@umich.edu //Snooping a coherence request, just return 1143649Srdreslin@umich.edu return 0; 1152623SN/A} 1162623SN/A 1172623SN/Avoid 1183349Sbinkertn@umich.eduAtomicSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt) 1192623SN/A{ 1203184Srdreslin@umich.edu //No internal storage to update, just return 1213184Srdreslin@umich.edu return; 1222623SN/A} 1232623SN/A 1242623SN/Avoid 1252623SN/AAtomicSimpleCPU::CpuPort::recvStatusChange(Status status) 1262623SN/A{ 1273647Srdreslin@umich.edu if (status == RangeChange) { 1283647Srdreslin@umich.edu if (!snoopRangeSent) { 1293647Srdreslin@umich.edu snoopRangeSent = true; 1303647Srdreslin@umich.edu sendStatusChange(Port::RangeChange); 1313647Srdreslin@umich.edu } 1322626SN/A return; 1333647Srdreslin@umich.edu } 1342626SN/A 1352623SN/A panic("AtomicSimpleCPU doesn't expect recvStatusChange callback!"); 1362623SN/A} 1372623SN/A 1382657Ssaidi@eecs.umich.eduvoid 1392623SN/AAtomicSimpleCPU::CpuPort::recvRetry() 1402623SN/A{ 1412623SN/A panic("AtomicSimpleCPU doesn't expect recvRetry callback!"); 1422623SN/A} 1432623SN/A 1444192Sktlim@umich.eduvoid 1454192Sktlim@umich.eduAtomicSimpleCPU::DcachePort::setPeer(Port *port) 1464192Sktlim@umich.edu{ 1474192Sktlim@umich.edu Port::setPeer(port); 1484192Sktlim@umich.edu 1494192Sktlim@umich.edu#if FULL_SYSTEM 1504192Sktlim@umich.edu // Update the ThreadContext's memory ports (Functional/Virtual 1514192Sktlim@umich.edu // Ports) 1525497Ssaidi@eecs.umich.edu cpu->tcBase()->connectMemPorts(cpu->tcBase()); 1534192Sktlim@umich.edu#endif 1544192Sktlim@umich.edu} 1552623SN/A 1565529Snate@binkert.orgAtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 1576078Sgblack@eecs.umich.edu : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false), 1585487Snate@binkert.org simulate_data_stalls(p->simulate_data_stalls), 1595487Snate@binkert.org simulate_inst_stalls(p->simulate_inst_stalls), 1604968Sacolyte@umich.edu icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this), 1614968Sacolyte@umich.edu physmemPort(name() + "-iport", this), hasPhysMemPort(false) 1622623SN/A{ 1632623SN/A _status = Idle; 1642623SN/A 1653647Srdreslin@umich.edu icachePort.snoopRangeSent = false; 1663647Srdreslin@umich.edu dcachePort.snoopRangeSent = false; 1673647Srdreslin@umich.edu 1682623SN/A} 1692623SN/A 1702623SN/A 1712623SN/AAtomicSimpleCPU::~AtomicSimpleCPU() 1722623SN/A{ 1732623SN/A} 1742623SN/A 1752623SN/Avoid 1762623SN/AAtomicSimpleCPU::serialize(ostream &os) 1772623SN/A{ 1782915Sktlim@umich.edu SimObject::State so_state = SimObject::getState(); 1792915Sktlim@umich.edu SERIALIZE_ENUM(so_state); 1806078Sgblack@eecs.umich.edu SERIALIZE_SCALAR(locked); 1813145Shsul@eecs.umich.edu BaseSimpleCPU::serialize(os); 1822623SN/A nameOut(os, csprintf("%s.tickEvent", name())); 1832623SN/A tickEvent.serialize(os); 1842623SN/A} 1852623SN/A 1862623SN/Avoid 1872623SN/AAtomicSimpleCPU::unserialize(Checkpoint *cp, const string §ion) 1882623SN/A{ 1892915Sktlim@umich.edu SimObject::State so_state; 1902915Sktlim@umich.edu UNSERIALIZE_ENUM(so_state); 1916078Sgblack@eecs.umich.edu UNSERIALIZE_SCALAR(locked); 1923145Shsul@eecs.umich.edu BaseSimpleCPU::unserialize(cp, section); 1932915Sktlim@umich.edu tickEvent.unserialize(cp, csprintf("%s.tickEvent", section)); 1942915Sktlim@umich.edu} 1952915Sktlim@umich.edu 1962915Sktlim@umich.eduvoid 1972915Sktlim@umich.eduAtomicSimpleCPU::resume() 1982915Sktlim@umich.edu{ 1995220Ssaidi@eecs.umich.edu if (_status == Idle || _status == SwitchedOut) 2005220Ssaidi@eecs.umich.edu return; 2015220Ssaidi@eecs.umich.edu 2024940Snate@binkert.org DPRINTF(SimpleCPU, "Resume\n"); 2035220Ssaidi@eecs.umich.edu assert(system->getMemoryMode() == Enums::atomic); 2043324Shsul@eecs.umich.edu 2055220Ssaidi@eecs.umich.edu changeState(SimObject::Running); 2065220Ssaidi@eecs.umich.edu if (thread->status() == ThreadContext::Active) { 2075606Snate@binkert.org if (!tickEvent.scheduled()) 2085606Snate@binkert.org schedule(tickEvent, nextCycle()); 2092915Sktlim@umich.edu } 2102623SN/A} 2112623SN/A 2122623SN/Avoid 2132798Sktlim@umich.eduAtomicSimpleCPU::switchOut() 2142623SN/A{ 2155496Ssaidi@eecs.umich.edu assert(_status == Running || _status == Idle); 2162798Sktlim@umich.edu _status = SwitchedOut; 2172623SN/A 2182798Sktlim@umich.edu tickEvent.squash(); 2192623SN/A} 2202623SN/A 2212623SN/A 2222623SN/Avoid 2232623SN/AAtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 2242623SN/A{ 2254192Sktlim@umich.edu BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort); 2262623SN/A 2272623SN/A assert(!tickEvent.scheduled()); 2282623SN/A 2292680Sktlim@umich.edu // if any of this CPU's ThreadContexts are active, mark the CPU as 2302623SN/A // running and schedule its tick event. 2316221Snate@binkert.org ThreadID size = threadContexts.size(); 2326221Snate@binkert.org for (ThreadID i = 0; i < size; ++i) { 2332680Sktlim@umich.edu ThreadContext *tc = threadContexts[i]; 2342680Sktlim@umich.edu if (tc->status() == ThreadContext::Active && _status != Running) { 2352623SN/A _status = Running; 2365606Snate@binkert.org schedule(tickEvent, nextCycle()); 2372623SN/A break; 2382623SN/A } 2392623SN/A } 2403512Sktlim@umich.edu if (_status != Running) { 2413512Sktlim@umich.edu _status = Idle; 2423512Sktlim@umich.edu } 2435169Ssaidi@eecs.umich.edu assert(threadContexts.size() == 1); 2445712Shsul@eecs.umich.edu ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 2455712Shsul@eecs.umich.edu data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 2465712Shsul@eecs.umich.edu data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 2472623SN/A} 2482623SN/A 2492623SN/A 2502623SN/Avoid 2512623SN/AAtomicSimpleCPU::activateContext(int thread_num, int delay) 2522623SN/A{ 2534940Snate@binkert.org DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); 2544940Snate@binkert.org 2552623SN/A assert(thread_num == 0); 2562683Sktlim@umich.edu assert(thread); 2572623SN/A 2582623SN/A assert(_status == Idle); 2592623SN/A assert(!tickEvent.scheduled()); 2602623SN/A 2612623SN/A notIdleFraction++; 2625101Ssaidi@eecs.umich.edu numCycles += tickToCycles(thread->lastActivate - thread->lastSuspend); 2633686Sktlim@umich.edu 2643430Sgblack@eecs.umich.edu //Make sure ticks are still on multiples of cycles 2655606Snate@binkert.org schedule(tickEvent, nextCycle(curTick + ticks(delay))); 2662623SN/A _status = Running; 2672623SN/A} 2682623SN/A 2692623SN/A 2702623SN/Avoid 2712623SN/AAtomicSimpleCPU::suspendContext(int thread_num) 2722623SN/A{ 2734940Snate@binkert.org DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 2744940Snate@binkert.org 2752623SN/A assert(thread_num == 0); 2762683Sktlim@umich.edu assert(thread); 2772623SN/A 2786043Sgblack@eecs.umich.edu if (_status == Idle) 2796043Sgblack@eecs.umich.edu return; 2806043Sgblack@eecs.umich.edu 2812623SN/A assert(_status == Running); 2822626SN/A 2832626SN/A // tick event may not be scheduled if this gets called from inside 2842626SN/A // an instruction's execution, e.g. "quiesce" 2852626SN/A if (tickEvent.scheduled()) 2865606Snate@binkert.org deschedule(tickEvent); 2872623SN/A 2882623SN/A notIdleFraction--; 2892623SN/A _status = Idle; 2902623SN/A} 2912623SN/A 2922623SN/A 2932623SN/Atemplate <class T> 2942623SN/AFault 2952623SN/AAtomicSimpleCPU::read(Addr addr, T &data, unsigned flags) 2962623SN/A{ 2973169Sstever@eecs.umich.edu // use the CPU's statically allocated read request and packet objects 2984870Sstever@eecs.umich.edu Request *req = &data_read_req; 2992623SN/A 3002623SN/A if (traceData) { 3012623SN/A traceData->setAddr(addr); 3022623SN/A } 3032623SN/A 3044999Sgblack@eecs.umich.edu //The block size of our peer. 3054999Sgblack@eecs.umich.edu int blockSize = dcachePort.peerBlockSize(); 3064999Sgblack@eecs.umich.edu //The size of the data we're trying to read. 3074999Sgblack@eecs.umich.edu int dataSize = sizeof(T); 3082623SN/A 3094999Sgblack@eecs.umich.edu uint8_t * dataPtr = (uint8_t *)&data; 3102623SN/A 3114999Sgblack@eecs.umich.edu //The address of the second part of this access if it needs to be split 3124999Sgblack@eecs.umich.edu //across a cache line boundary. 3134999Sgblack@eecs.umich.edu Addr secondAddr = roundDown(addr + dataSize - 1, blockSize); 3144999Sgblack@eecs.umich.edu 3154999Sgblack@eecs.umich.edu if(secondAddr > addr) 3164999Sgblack@eecs.umich.edu dataSize = secondAddr - addr; 3174999Sgblack@eecs.umich.edu 3184999Sgblack@eecs.umich.edu dcache_latency = 0; 3194999Sgblack@eecs.umich.edu 3204999Sgblack@eecs.umich.edu while(1) { 3214999Sgblack@eecs.umich.edu req->setVirt(0, addr, dataSize, flags, thread->readPC()); 3224999Sgblack@eecs.umich.edu 3234999Sgblack@eecs.umich.edu // translate to physical address 3246023Snate@binkert.org Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read); 3254999Sgblack@eecs.umich.edu 3264999Sgblack@eecs.umich.edu // Now do the access. 3274999Sgblack@eecs.umich.edu if (fault == NoFault) { 3284999Sgblack@eecs.umich.edu Packet pkt = Packet(req, 3296102Sgblack@eecs.umich.edu req->isLLSC() ? MemCmd::LoadLockedReq : MemCmd::ReadReq, 3304999Sgblack@eecs.umich.edu Packet::Broadcast); 3314999Sgblack@eecs.umich.edu pkt.dataStatic(dataPtr); 3324999Sgblack@eecs.umich.edu 3334999Sgblack@eecs.umich.edu if (req->isMmapedIpr()) 3344999Sgblack@eecs.umich.edu dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 3354999Sgblack@eecs.umich.edu else { 3364999Sgblack@eecs.umich.edu if (hasPhysMemPort && pkt.getAddr() == physMemAddr) 3374999Sgblack@eecs.umich.edu dcache_latency += physmemPort.sendAtomic(&pkt); 3384999Sgblack@eecs.umich.edu else 3394999Sgblack@eecs.umich.edu dcache_latency += dcachePort.sendAtomic(&pkt); 3404999Sgblack@eecs.umich.edu } 3414999Sgblack@eecs.umich.edu dcache_access = true; 3425012Sgblack@eecs.umich.edu 3434999Sgblack@eecs.umich.edu assert(!pkt.isError()); 3444999Sgblack@eecs.umich.edu 3456102Sgblack@eecs.umich.edu if (req->isLLSC()) { 3464999Sgblack@eecs.umich.edu TheISA::handleLockedRead(thread, req); 3474999Sgblack@eecs.umich.edu } 3484968Sacolyte@umich.edu } 3494986Ssaidi@eecs.umich.edu 3504999Sgblack@eecs.umich.edu // This will need a new way to tell if it has a dcache attached. 3514999Sgblack@eecs.umich.edu if (req->isUncacheable()) 3524999Sgblack@eecs.umich.edu recordEvent("Uncached Read"); 3534762Snate@binkert.org 3544999Sgblack@eecs.umich.edu //If there's a fault, return it 3554999Sgblack@eecs.umich.edu if (fault != NoFault) 3564999Sgblack@eecs.umich.edu return fault; 3574999Sgblack@eecs.umich.edu //If we don't need to access a second cache line, stop now. 3584999Sgblack@eecs.umich.edu if (secondAddr <= addr) 3594999Sgblack@eecs.umich.edu { 3604999Sgblack@eecs.umich.edu data = gtoh(data); 3615408Sgblack@eecs.umich.edu if (traceData) { 3625408Sgblack@eecs.umich.edu traceData->setData(data); 3635408Sgblack@eecs.umich.edu } 3646078Sgblack@eecs.umich.edu if (req->isLocked() && fault == NoFault) { 3656078Sgblack@eecs.umich.edu assert(!locked); 3666078Sgblack@eecs.umich.edu locked = true; 3676078Sgblack@eecs.umich.edu } 3684999Sgblack@eecs.umich.edu return fault; 3694968Sacolyte@umich.edu } 3703170Sstever@eecs.umich.edu 3714999Sgblack@eecs.umich.edu /* 3724999Sgblack@eecs.umich.edu * Set up for accessing the second cache line. 3734999Sgblack@eecs.umich.edu */ 3744999Sgblack@eecs.umich.edu 3754999Sgblack@eecs.umich.edu //Move the pointer we're reading into to the correct location. 3764999Sgblack@eecs.umich.edu dataPtr += dataSize; 3774999Sgblack@eecs.umich.edu //Adjust the size to get the remaining bytes. 3784999Sgblack@eecs.umich.edu dataSize = addr + sizeof(T) - secondAddr; 3794999Sgblack@eecs.umich.edu //And access the right address. 3804999Sgblack@eecs.umich.edu addr = secondAddr; 3812623SN/A } 3822623SN/A} 3832623SN/A 3842623SN/A#ifndef DOXYGEN_SHOULD_SKIP_THIS 3852623SN/A 3862623SN/Atemplate 3872623SN/AFault 3884115Ssaidi@eecs.umich.eduAtomicSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags); 3894115Ssaidi@eecs.umich.edu 3904115Ssaidi@eecs.umich.edutemplate 3914115Ssaidi@eecs.umich.eduFault 3924040Ssaidi@eecs.umich.eduAtomicSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags); 3934040Ssaidi@eecs.umich.edu 3944040Ssaidi@eecs.umich.edutemplate 3954040Ssaidi@eecs.umich.eduFault 3962623SN/AAtomicSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags); 3972623SN/A 3982623SN/Atemplate 3992623SN/AFault 4002623SN/AAtomicSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags); 4012623SN/A 4022623SN/Atemplate 4032623SN/AFault 4042623SN/AAtomicSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags); 4052623SN/A 4062623SN/Atemplate 4072623SN/AFault 4082623SN/AAtomicSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags); 4092623SN/A 4102623SN/A#endif //DOXYGEN_SHOULD_SKIP_THIS 4112623SN/A 4122623SN/Atemplate<> 4132623SN/AFault 4142623SN/AAtomicSimpleCPU::read(Addr addr, double &data, unsigned flags) 4152623SN/A{ 4162623SN/A return read(addr, *(uint64_t*)&data, flags); 4172623SN/A} 4182623SN/A 4192623SN/Atemplate<> 4202623SN/AFault 4212623SN/AAtomicSimpleCPU::read(Addr addr, float &data, unsigned flags) 4222623SN/A{ 4232623SN/A return read(addr, *(uint32_t*)&data, flags); 4242623SN/A} 4252623SN/A 4262623SN/A 4272623SN/Atemplate<> 4282623SN/AFault 4292623SN/AAtomicSimpleCPU::read(Addr addr, int32_t &data, unsigned flags) 4302623SN/A{ 4312623SN/A return read(addr, (uint32_t&)data, flags); 4322623SN/A} 4332623SN/A 4342623SN/A 4352623SN/Atemplate <class T> 4362623SN/AFault 4372623SN/AAtomicSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res) 4382623SN/A{ 4393169Sstever@eecs.umich.edu // use the CPU's statically allocated write request and packet objects 4404870Sstever@eecs.umich.edu Request *req = &data_write_req; 4412623SN/A 4422623SN/A if (traceData) { 4432623SN/A traceData->setAddr(addr); 4442623SN/A } 4452623SN/A 4464999Sgblack@eecs.umich.edu //The block size of our peer. 4474999Sgblack@eecs.umich.edu int blockSize = dcachePort.peerBlockSize(); 4484999Sgblack@eecs.umich.edu //The size of the data we're trying to read. 4494999Sgblack@eecs.umich.edu int dataSize = sizeof(T); 4502623SN/A 4514999Sgblack@eecs.umich.edu uint8_t * dataPtr = (uint8_t *)&data; 4522623SN/A 4534999Sgblack@eecs.umich.edu //The address of the second part of this access if it needs to be split 4544999Sgblack@eecs.umich.edu //across a cache line boundary. 4554999Sgblack@eecs.umich.edu Addr secondAddr = roundDown(addr + dataSize - 1, blockSize); 4564999Sgblack@eecs.umich.edu 4574999Sgblack@eecs.umich.edu if(secondAddr > addr) 4584999Sgblack@eecs.umich.edu dataSize = secondAddr - addr; 4594999Sgblack@eecs.umich.edu 4604999Sgblack@eecs.umich.edu dcache_latency = 0; 4614999Sgblack@eecs.umich.edu 4624999Sgblack@eecs.umich.edu while(1) { 4634999Sgblack@eecs.umich.edu req->setVirt(0, addr, dataSize, flags, thread->readPC()); 4644999Sgblack@eecs.umich.edu 4654999Sgblack@eecs.umich.edu // translate to physical address 4666023Snate@binkert.org Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write); 4674999Sgblack@eecs.umich.edu 4684999Sgblack@eecs.umich.edu // Now do the access. 4694999Sgblack@eecs.umich.edu if (fault == NoFault) { 4704999Sgblack@eecs.umich.edu MemCmd cmd = MemCmd::WriteReq; // default 4714999Sgblack@eecs.umich.edu bool do_access = true; // flag to suppress cache access 4724999Sgblack@eecs.umich.edu 4736102Sgblack@eecs.umich.edu if (req->isLLSC()) { 4744999Sgblack@eecs.umich.edu cmd = MemCmd::StoreCondReq; 4754999Sgblack@eecs.umich.edu do_access = TheISA::handleLockedWrite(thread, req); 4764999Sgblack@eecs.umich.edu } else if (req->isSwap()) { 4774999Sgblack@eecs.umich.edu cmd = MemCmd::SwapReq; 4784999Sgblack@eecs.umich.edu if (req->isCondSwap()) { 4794999Sgblack@eecs.umich.edu assert(res); 4804999Sgblack@eecs.umich.edu req->setExtraData(*res); 4814999Sgblack@eecs.umich.edu } 4824999Sgblack@eecs.umich.edu } 4834999Sgblack@eecs.umich.edu 4844999Sgblack@eecs.umich.edu if (do_access) { 4854999Sgblack@eecs.umich.edu Packet pkt = Packet(req, cmd, Packet::Broadcast); 4864999Sgblack@eecs.umich.edu pkt.dataStatic(dataPtr); 4874999Sgblack@eecs.umich.edu 4884999Sgblack@eecs.umich.edu if (req->isMmapedIpr()) { 4894999Sgblack@eecs.umich.edu dcache_latency += 4904999Sgblack@eecs.umich.edu TheISA::handleIprWrite(thread->getTC(), &pkt); 4914999Sgblack@eecs.umich.edu } else { 4924999Sgblack@eecs.umich.edu //XXX This needs to be outside of the loop in order to 4934999Sgblack@eecs.umich.edu //work properly for cache line boundary crossing 4944999Sgblack@eecs.umich.edu //accesses in transendian simulations. 4954999Sgblack@eecs.umich.edu data = htog(data); 4964999Sgblack@eecs.umich.edu if (hasPhysMemPort && pkt.getAddr() == physMemAddr) 4974999Sgblack@eecs.umich.edu dcache_latency += physmemPort.sendAtomic(&pkt); 4984999Sgblack@eecs.umich.edu else 4994999Sgblack@eecs.umich.edu dcache_latency += dcachePort.sendAtomic(&pkt); 5004999Sgblack@eecs.umich.edu } 5014999Sgblack@eecs.umich.edu dcache_access = true; 5024999Sgblack@eecs.umich.edu assert(!pkt.isError()); 5034999Sgblack@eecs.umich.edu 5044999Sgblack@eecs.umich.edu if (req->isSwap()) { 5054999Sgblack@eecs.umich.edu assert(res); 5064999Sgblack@eecs.umich.edu *res = pkt.get<T>(); 5074999Sgblack@eecs.umich.edu } 5084999Sgblack@eecs.umich.edu } 5094999Sgblack@eecs.umich.edu 5104999Sgblack@eecs.umich.edu if (res && !req->isSwap()) { 5114999Sgblack@eecs.umich.edu *res = req->getExtraData(); 5124878Sstever@eecs.umich.edu } 5134040Ssaidi@eecs.umich.edu } 5144040Ssaidi@eecs.umich.edu 5154999Sgblack@eecs.umich.edu // This will need a new way to tell if it's hooked up to a cache or not. 5164999Sgblack@eecs.umich.edu if (req->isUncacheable()) 5174999Sgblack@eecs.umich.edu recordEvent("Uncached Write"); 5182631SN/A 5194999Sgblack@eecs.umich.edu //If there's a fault or we don't need to access a second cache line, 5204999Sgblack@eecs.umich.edu //stop now. 5214999Sgblack@eecs.umich.edu if (fault != NoFault || secondAddr <= addr) 5224999Sgblack@eecs.umich.edu { 5234999Sgblack@eecs.umich.edu // If the write needs to have a fault on the access, consider 5244999Sgblack@eecs.umich.edu // calling changeStatus() and changing it to "bad addr write" 5254999Sgblack@eecs.umich.edu // or something. 5265408Sgblack@eecs.umich.edu if (traceData) { 5276012Ssteve.reinhardt@amd.com traceData->setData(gtoh(data)); 5285408Sgblack@eecs.umich.edu } 5296078Sgblack@eecs.umich.edu if (req->isLocked() && fault == NoFault) { 5306078Sgblack@eecs.umich.edu assert(locked); 5316078Sgblack@eecs.umich.edu locked = false; 5326078Sgblack@eecs.umich.edu } 5334999Sgblack@eecs.umich.edu return fault; 5343170Sstever@eecs.umich.edu } 5353170Sstever@eecs.umich.edu 5364999Sgblack@eecs.umich.edu /* 5374999Sgblack@eecs.umich.edu * Set up for accessing the second cache line. 5384999Sgblack@eecs.umich.edu */ 5394999Sgblack@eecs.umich.edu 5404999Sgblack@eecs.umich.edu //Move the pointer we're reading into to the correct location. 5414999Sgblack@eecs.umich.edu dataPtr += dataSize; 5424999Sgblack@eecs.umich.edu //Adjust the size to get the remaining bytes. 5434999Sgblack@eecs.umich.edu dataSize = addr + sizeof(T) - secondAddr; 5444999Sgblack@eecs.umich.edu //And access the right address. 5454999Sgblack@eecs.umich.edu addr = secondAddr; 5462623SN/A } 5472623SN/A} 5482623SN/A 5492623SN/A 5502623SN/A#ifndef DOXYGEN_SHOULD_SKIP_THIS 5514224Sgblack@eecs.umich.edu 5524224Sgblack@eecs.umich.edutemplate 5534224Sgblack@eecs.umich.eduFault 5544224Sgblack@eecs.umich.eduAtomicSimpleCPU::write(Twin32_t data, Addr addr, 5554224Sgblack@eecs.umich.edu unsigned flags, uint64_t *res); 5564224Sgblack@eecs.umich.edu 5574224Sgblack@eecs.umich.edutemplate 5584224Sgblack@eecs.umich.eduFault 5594224Sgblack@eecs.umich.eduAtomicSimpleCPU::write(Twin64_t data, Addr addr, 5604224Sgblack@eecs.umich.edu unsigned flags, uint64_t *res); 5614224Sgblack@eecs.umich.edu 5622623SN/Atemplate 5632623SN/AFault 5642623SN/AAtomicSimpleCPU::write(uint64_t data, Addr addr, 5652623SN/A unsigned flags, uint64_t *res); 5662623SN/A 5672623SN/Atemplate 5682623SN/AFault 5692623SN/AAtomicSimpleCPU::write(uint32_t data, Addr addr, 5702623SN/A unsigned flags, uint64_t *res); 5712623SN/A 5722623SN/Atemplate 5732623SN/AFault 5742623SN/AAtomicSimpleCPU::write(uint16_t data, Addr addr, 5752623SN/A unsigned flags, uint64_t *res); 5762623SN/A 5772623SN/Atemplate 5782623SN/AFault 5792623SN/AAtomicSimpleCPU::write(uint8_t data, Addr addr, 5802623SN/A unsigned flags, uint64_t *res); 5812623SN/A 5822623SN/A#endif //DOXYGEN_SHOULD_SKIP_THIS 5832623SN/A 5842623SN/Atemplate<> 5852623SN/AFault 5862623SN/AAtomicSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res) 5872623SN/A{ 5882623SN/A return write(*(uint64_t*)&data, addr, flags, res); 5892623SN/A} 5902623SN/A 5912623SN/Atemplate<> 5922623SN/AFault 5932623SN/AAtomicSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res) 5942623SN/A{ 5952623SN/A return write(*(uint32_t*)&data, addr, flags, res); 5962623SN/A} 5972623SN/A 5982623SN/A 5992623SN/Atemplate<> 6002623SN/AFault 6012623SN/AAtomicSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res) 6022623SN/A{ 6032623SN/A return write((uint32_t)data, addr, flags, res); 6042623SN/A} 6052623SN/A 6062623SN/A 6072623SN/Avoid 6082623SN/AAtomicSimpleCPU::tick() 6092623SN/A{ 6104940Snate@binkert.org DPRINTF(SimpleCPU, "Tick\n"); 6114940Snate@binkert.org 6125487Snate@binkert.org Tick latency = 0; 6132623SN/A 6146078Sgblack@eecs.umich.edu for (int i = 0; i < width || locked; ++i) { 6152623SN/A numCycles++; 6162623SN/A 6173387Sgblack@eecs.umich.edu if (!curStaticInst || !curStaticInst->isDelayedCommit()) 6183387Sgblack@eecs.umich.edu checkForInterrupts(); 6192626SN/A 6205348Ssaidi@eecs.umich.edu checkPcEventQueue(); 6215348Ssaidi@eecs.umich.edu 6225669Sgblack@eecs.umich.edu Fault fault = NoFault; 6235669Sgblack@eecs.umich.edu 6245669Sgblack@eecs.umich.edu bool fromRom = isRomMicroPC(thread->readMicroPC()); 6255914Sgblack@eecs.umich.edu if (!fromRom && !curMacroStaticInst) { 6265894Sgblack@eecs.umich.edu setupFetchRequest(&ifetch_req); 6276023Snate@binkert.org fault = thread->itb->translateAtomic(&ifetch_req, tc, 6286023Snate@binkert.org BaseTLB::Execute); 6295894Sgblack@eecs.umich.edu } 6302623SN/A 6312623SN/A if (fault == NoFault) { 6324182Sgblack@eecs.umich.edu Tick icache_latency = 0; 6334182Sgblack@eecs.umich.edu bool icache_access = false; 6344182Sgblack@eecs.umich.edu dcache_access = false; // assume no dcache access 6352662Sstever@eecs.umich.edu 6365914Sgblack@eecs.umich.edu if (!fromRom && !curMacroStaticInst) { 6375694Sgblack@eecs.umich.edu // This is commented out because the predecoder would act like 6385694Sgblack@eecs.umich.edu // a tiny cache otherwise. It wouldn't be flushed when needed 6395694Sgblack@eecs.umich.edu // like the I cache. It should be flushed, and when that works 6405694Sgblack@eecs.umich.edu // this code should be uncommented. 6415669Sgblack@eecs.umich.edu //Fetch more instruction memory if necessary 6425669Sgblack@eecs.umich.edu //if(predecoder.needMoreBytes()) 6435669Sgblack@eecs.umich.edu //{ 6445669Sgblack@eecs.umich.edu icache_access = true; 6455669Sgblack@eecs.umich.edu Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq, 6465669Sgblack@eecs.umich.edu Packet::Broadcast); 6475669Sgblack@eecs.umich.edu ifetch_pkt.dataStatic(&inst); 6482623SN/A 6495669Sgblack@eecs.umich.edu if (hasPhysMemPort && ifetch_pkt.getAddr() == physMemAddr) 6505669Sgblack@eecs.umich.edu icache_latency = physmemPort.sendAtomic(&ifetch_pkt); 6515669Sgblack@eecs.umich.edu else 6525669Sgblack@eecs.umich.edu icache_latency = icachePort.sendAtomic(&ifetch_pkt); 6534968Sacolyte@umich.edu 6545669Sgblack@eecs.umich.edu assert(!ifetch_pkt.isError()); 6554968Sacolyte@umich.edu 6565669Sgblack@eecs.umich.edu // ifetch_req is initialized to read the instruction directly 6575669Sgblack@eecs.umich.edu // into the CPU object's inst field. 6585669Sgblack@eecs.umich.edu //} 6595669Sgblack@eecs.umich.edu } 6604182Sgblack@eecs.umich.edu 6612623SN/A preExecute(); 6623814Ssaidi@eecs.umich.edu 6635001Sgblack@eecs.umich.edu if (curStaticInst) { 6644182Sgblack@eecs.umich.edu fault = curStaticInst->execute(this, traceData); 6654998Sgblack@eecs.umich.edu 6664998Sgblack@eecs.umich.edu // keep an instruction count 6674998Sgblack@eecs.umich.edu if (fault == NoFault) 6684998Sgblack@eecs.umich.edu countInst(); 6695001Sgblack@eecs.umich.edu else if (traceData) { 6705001Sgblack@eecs.umich.edu // If there was a fault, we should trace this instruction. 6715001Sgblack@eecs.umich.edu delete traceData; 6725001Sgblack@eecs.umich.edu traceData = NULL; 6735001Sgblack@eecs.umich.edu } 6744998Sgblack@eecs.umich.edu 6754182Sgblack@eecs.umich.edu postExecute(); 6764182Sgblack@eecs.umich.edu } 6772623SN/A 6783814Ssaidi@eecs.umich.edu // @todo remove me after debugging with legion done 6794539Sgblack@eecs.umich.edu if (curStaticInst && (!curStaticInst->isMicroop() || 6804539Sgblack@eecs.umich.edu curStaticInst->isFirstMicroop())) 6813814Ssaidi@eecs.umich.edu instCnt++; 6823814Ssaidi@eecs.umich.edu 6835487Snate@binkert.org Tick stall_ticks = 0; 6845487Snate@binkert.org if (simulate_inst_stalls && icache_access) 6855487Snate@binkert.org stall_ticks += icache_latency; 6865487Snate@binkert.org 6875487Snate@binkert.org if (simulate_data_stalls && dcache_access) 6885487Snate@binkert.org stall_ticks += dcache_latency; 6895487Snate@binkert.org 6905487Snate@binkert.org if (stall_ticks) { 6915487Snate@binkert.org Tick stall_cycles = stall_ticks / ticks(1); 6925487Snate@binkert.org Tick aligned_stall_ticks = ticks(stall_cycles); 6935487Snate@binkert.org 6945487Snate@binkert.org if (aligned_stall_ticks < stall_ticks) 6955487Snate@binkert.org aligned_stall_ticks += 1; 6965487Snate@binkert.org 6975487Snate@binkert.org latency += aligned_stall_ticks; 6982623SN/A } 6992623SN/A 7002623SN/A } 7014377Sgblack@eecs.umich.edu if(fault != NoFault || !stayAtPC) 7024182Sgblack@eecs.umich.edu advancePC(fault); 7032623SN/A } 7042623SN/A 7055487Snate@binkert.org // instruction takes at least one cycle 7065487Snate@binkert.org if (latency < ticks(1)) 7075487Snate@binkert.org latency = ticks(1); 7085487Snate@binkert.org 7092626SN/A if (_status != Idle) 7105606Snate@binkert.org schedule(tickEvent, curTick + latency); 7112623SN/A} 7122623SN/A 7132623SN/A 7145315Sstever@gmail.comvoid 7155315Sstever@gmail.comAtomicSimpleCPU::printAddr(Addr a) 7165315Sstever@gmail.com{ 7175315Sstever@gmail.com dcachePort.printAddr(a); 7185315Sstever@gmail.com} 7195315Sstever@gmail.com 7205315Sstever@gmail.com 7212623SN/A//////////////////////////////////////////////////////////////////////// 7222623SN/A// 7232623SN/A// AtomicSimpleCPU Simulation Object 7242623SN/A// 7254762Snate@binkert.orgAtomicSimpleCPU * 7264762Snate@binkert.orgAtomicSimpleCPUParams::create() 7272623SN/A{ 7285529Snate@binkert.org numThreads = 1; 7295529Snate@binkert.org#if !FULL_SYSTEM 7304762Snate@binkert.org if (workload.size() != 1) 7314762Snate@binkert.org panic("only one workload allowed"); 7322623SN/A#endif 7335529Snate@binkert.org return new AtomicSimpleCPU(this); 7342623SN/A} 735