atomic.cc revision 5712
12623SN/A/* 22623SN/A * Copyright (c) 2002-2005 The Regents of The University of Michigan 32623SN/A * All rights reserved. 42623SN/A * 52623SN/A * Redistribution and use in source and binary forms, with or without 62623SN/A * modification, are permitted provided that the following conditions are 72623SN/A * met: redistributions of source code must retain the above copyright 82623SN/A * notice, this list of conditions and the following disclaimer; 92623SN/A * redistributions in binary form must reproduce the above copyright 102623SN/A * notice, this list of conditions and the following disclaimer in the 112623SN/A * documentation and/or other materials provided with the distribution; 122623SN/A * neither the name of the copyright holders nor the names of its 132623SN/A * contributors may be used to endorse or promote products derived from 142623SN/A * this software without specific prior written permission. 152623SN/A * 162623SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 172623SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 182623SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 192623SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 202623SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 212623SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 222623SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 232623SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 242623SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 252623SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 262623SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 272665Ssaidi@eecs.umich.edu * 282665Ssaidi@eecs.umich.edu * Authors: Steve Reinhardt 292623SN/A */ 302623SN/A 313170Sstever@eecs.umich.edu#include "arch/locked_mem.hh" 323806Ssaidi@eecs.umich.edu#include "arch/mmaped_ipr.hh" 332623SN/A#include "arch/utility.hh" 344040Ssaidi@eecs.umich.edu#include "base/bigint.hh" 352623SN/A#include "cpu/exetrace.hh" 362623SN/A#include "cpu/simple/atomic.hh" 373348Sbinkertn@umich.edu#include "mem/packet.hh" 383348Sbinkertn@umich.edu#include "mem/packet_access.hh" 394762Snate@binkert.org#include "params/AtomicSimpleCPU.hh" 402901Ssaidi@eecs.umich.edu#include "sim/system.hh" 412623SN/A 422623SN/Ausing namespace std; 432623SN/Ausing namespace TheISA; 442623SN/A 452623SN/AAtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c) 465606Snate@binkert.org : Event(CPU_Tick_Pri), cpu(c) 472623SN/A{ 482623SN/A} 492623SN/A 502623SN/A 512623SN/Avoid 522623SN/AAtomicSimpleCPU::TickEvent::process() 532623SN/A{ 542623SN/A cpu->tick(); 552623SN/A} 562623SN/A 572623SN/Aconst char * 585336Shines@cs.fsu.eduAtomicSimpleCPU::TickEvent::description() const 592623SN/A{ 604873Sstever@eecs.umich.edu return "AtomicSimpleCPU tick"; 612623SN/A} 622623SN/A 632856Srdreslin@umich.eduPort * 642856Srdreslin@umich.eduAtomicSimpleCPU::getPort(const std::string &if_name, int idx) 652856Srdreslin@umich.edu{ 662856Srdreslin@umich.edu if (if_name == "dcache_port") 672856Srdreslin@umich.edu return &dcachePort; 682856Srdreslin@umich.edu else if (if_name == "icache_port") 692856Srdreslin@umich.edu return &icachePort; 704968Sacolyte@umich.edu else if (if_name == "physmem_port") { 714968Sacolyte@umich.edu hasPhysMemPort = true; 724968Sacolyte@umich.edu return &physmemPort; 734968Sacolyte@umich.edu } 742856Srdreslin@umich.edu else 752856Srdreslin@umich.edu panic("No Such Port\n"); 762856Srdreslin@umich.edu} 772623SN/A 782623SN/Avoid 792623SN/AAtomicSimpleCPU::init() 802623SN/A{ 812623SN/A BaseCPU::init(); 822623SN/A#if FULL_SYSTEM 832680Sktlim@umich.edu for (int i = 0; i < threadContexts.size(); ++i) { 842680Sktlim@umich.edu ThreadContext *tc = threadContexts[i]; 852623SN/A 862623SN/A // initialize CPU, including PC 875712Shsul@eecs.umich.edu TheISA::initCPU(tc, _cpuId); 882623SN/A } 892623SN/A#endif 904968Sacolyte@umich.edu if (hasPhysMemPort) { 914968Sacolyte@umich.edu bool snoop = false; 924968Sacolyte@umich.edu AddrRangeList pmAddrList; 934968Sacolyte@umich.edu physmemPort.getPeerAddressRanges(pmAddrList, snoop); 944968Sacolyte@umich.edu physMemAddr = *pmAddrList.begin(); 954968Sacolyte@umich.edu } 965712Shsul@eecs.umich.edu ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 975712Shsul@eecs.umich.edu data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 985712Shsul@eecs.umich.edu data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 992623SN/A} 1002623SN/A 1012623SN/Abool 1023349Sbinkertn@umich.eduAtomicSimpleCPU::CpuPort::recvTiming(PacketPtr pkt) 1032623SN/A{ 1043184Srdreslin@umich.edu panic("AtomicSimpleCPU doesn't expect recvTiming callback!"); 1052623SN/A return true; 1062623SN/A} 1072623SN/A 1082623SN/ATick 1093349Sbinkertn@umich.eduAtomicSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt) 1102623SN/A{ 1113310Srdreslin@umich.edu //Snooping a coherence request, just return 1123649Srdreslin@umich.edu return 0; 1132623SN/A} 1142623SN/A 1152623SN/Avoid 1163349Sbinkertn@umich.eduAtomicSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt) 1172623SN/A{ 1183184Srdreslin@umich.edu //No internal storage to update, just return 1193184Srdreslin@umich.edu return; 1202623SN/A} 1212623SN/A 1222623SN/Avoid 1232623SN/AAtomicSimpleCPU::CpuPort::recvStatusChange(Status status) 1242623SN/A{ 1253647Srdreslin@umich.edu if (status == RangeChange) { 1263647Srdreslin@umich.edu if (!snoopRangeSent) { 1273647Srdreslin@umich.edu snoopRangeSent = true; 1283647Srdreslin@umich.edu sendStatusChange(Port::RangeChange); 1293647Srdreslin@umich.edu } 1302626SN/A return; 1313647Srdreslin@umich.edu } 1322626SN/A 1332623SN/A panic("AtomicSimpleCPU doesn't expect recvStatusChange callback!"); 1342623SN/A} 1352623SN/A 1362657Ssaidi@eecs.umich.eduvoid 1372623SN/AAtomicSimpleCPU::CpuPort::recvRetry() 1382623SN/A{ 1392623SN/A panic("AtomicSimpleCPU doesn't expect recvRetry callback!"); 1402623SN/A} 1412623SN/A 1424192Sktlim@umich.eduvoid 1434192Sktlim@umich.eduAtomicSimpleCPU::DcachePort::setPeer(Port *port) 1444192Sktlim@umich.edu{ 1454192Sktlim@umich.edu Port::setPeer(port); 1464192Sktlim@umich.edu 1474192Sktlim@umich.edu#if FULL_SYSTEM 1484192Sktlim@umich.edu // Update the ThreadContext's memory ports (Functional/Virtual 1494192Sktlim@umich.edu // Ports) 1505497Ssaidi@eecs.umich.edu cpu->tcBase()->connectMemPorts(cpu->tcBase()); 1514192Sktlim@umich.edu#endif 1524192Sktlim@umich.edu} 1532623SN/A 1545529Snate@binkert.orgAtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 1555487Snate@binkert.org : BaseSimpleCPU(p), tickEvent(this), width(p->width), 1565487Snate@binkert.org simulate_data_stalls(p->simulate_data_stalls), 1575487Snate@binkert.org simulate_inst_stalls(p->simulate_inst_stalls), 1584968Sacolyte@umich.edu icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this), 1594968Sacolyte@umich.edu physmemPort(name() + "-iport", this), hasPhysMemPort(false) 1602623SN/A{ 1612623SN/A _status = Idle; 1622623SN/A 1633647Srdreslin@umich.edu icachePort.snoopRangeSent = false; 1643647Srdreslin@umich.edu dcachePort.snoopRangeSent = false; 1653647Srdreslin@umich.edu 1662623SN/A} 1672623SN/A 1682623SN/A 1692623SN/AAtomicSimpleCPU::~AtomicSimpleCPU() 1702623SN/A{ 1712623SN/A} 1722623SN/A 1732623SN/Avoid 1742623SN/AAtomicSimpleCPU::serialize(ostream &os) 1752623SN/A{ 1762915Sktlim@umich.edu SimObject::State so_state = SimObject::getState(); 1772915Sktlim@umich.edu SERIALIZE_ENUM(so_state); 1783145Shsul@eecs.umich.edu BaseSimpleCPU::serialize(os); 1792623SN/A nameOut(os, csprintf("%s.tickEvent", name())); 1802623SN/A tickEvent.serialize(os); 1812623SN/A} 1822623SN/A 1832623SN/Avoid 1842623SN/AAtomicSimpleCPU::unserialize(Checkpoint *cp, const string §ion) 1852623SN/A{ 1862915Sktlim@umich.edu SimObject::State so_state; 1872915Sktlim@umich.edu UNSERIALIZE_ENUM(so_state); 1883145Shsul@eecs.umich.edu BaseSimpleCPU::unserialize(cp, section); 1892915Sktlim@umich.edu tickEvent.unserialize(cp, csprintf("%s.tickEvent", section)); 1902915Sktlim@umich.edu} 1912915Sktlim@umich.edu 1922915Sktlim@umich.eduvoid 1932915Sktlim@umich.eduAtomicSimpleCPU::resume() 1942915Sktlim@umich.edu{ 1955220Ssaidi@eecs.umich.edu if (_status == Idle || _status == SwitchedOut) 1965220Ssaidi@eecs.umich.edu return; 1975220Ssaidi@eecs.umich.edu 1984940Snate@binkert.org DPRINTF(SimpleCPU, "Resume\n"); 1995220Ssaidi@eecs.umich.edu assert(system->getMemoryMode() == Enums::atomic); 2003324Shsul@eecs.umich.edu 2015220Ssaidi@eecs.umich.edu changeState(SimObject::Running); 2025220Ssaidi@eecs.umich.edu if (thread->status() == ThreadContext::Active) { 2035606Snate@binkert.org if (!tickEvent.scheduled()) 2045606Snate@binkert.org schedule(tickEvent, nextCycle()); 2052915Sktlim@umich.edu } 2062623SN/A} 2072623SN/A 2082623SN/Avoid 2092798Sktlim@umich.eduAtomicSimpleCPU::switchOut() 2102623SN/A{ 2115496Ssaidi@eecs.umich.edu assert(_status == Running || _status == Idle); 2122798Sktlim@umich.edu _status = SwitchedOut; 2132623SN/A 2142798Sktlim@umich.edu tickEvent.squash(); 2152623SN/A} 2162623SN/A 2172623SN/A 2182623SN/Avoid 2192623SN/AAtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 2202623SN/A{ 2214192Sktlim@umich.edu BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort); 2222623SN/A 2232623SN/A assert(!tickEvent.scheduled()); 2242623SN/A 2252680Sktlim@umich.edu // if any of this CPU's ThreadContexts are active, mark the CPU as 2262623SN/A // running and schedule its tick event. 2272680Sktlim@umich.edu for (int i = 0; i < threadContexts.size(); ++i) { 2282680Sktlim@umich.edu ThreadContext *tc = threadContexts[i]; 2292680Sktlim@umich.edu if (tc->status() == ThreadContext::Active && _status != Running) { 2302623SN/A _status = Running; 2315606Snate@binkert.org schedule(tickEvent, nextCycle()); 2322623SN/A break; 2332623SN/A } 2342623SN/A } 2353512Sktlim@umich.edu if (_status != Running) { 2363512Sktlim@umich.edu _status = Idle; 2373512Sktlim@umich.edu } 2385169Ssaidi@eecs.umich.edu assert(threadContexts.size() == 1); 2395712Shsul@eecs.umich.edu ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 2405712Shsul@eecs.umich.edu data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 2415712Shsul@eecs.umich.edu data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 2422623SN/A} 2432623SN/A 2442623SN/A 2452623SN/Avoid 2462623SN/AAtomicSimpleCPU::activateContext(int thread_num, int delay) 2472623SN/A{ 2484940Snate@binkert.org DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); 2494940Snate@binkert.org 2502623SN/A assert(thread_num == 0); 2512683Sktlim@umich.edu assert(thread); 2522623SN/A 2532623SN/A assert(_status == Idle); 2542623SN/A assert(!tickEvent.scheduled()); 2552623SN/A 2562623SN/A notIdleFraction++; 2575101Ssaidi@eecs.umich.edu numCycles += tickToCycles(thread->lastActivate - thread->lastSuspend); 2583686Sktlim@umich.edu 2593430Sgblack@eecs.umich.edu //Make sure ticks are still on multiples of cycles 2605606Snate@binkert.org schedule(tickEvent, nextCycle(curTick + ticks(delay))); 2612623SN/A _status = Running; 2622623SN/A} 2632623SN/A 2642623SN/A 2652623SN/Avoid 2662623SN/AAtomicSimpleCPU::suspendContext(int thread_num) 2672623SN/A{ 2684940Snate@binkert.org DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 2694940Snate@binkert.org 2702623SN/A assert(thread_num == 0); 2712683Sktlim@umich.edu assert(thread); 2722623SN/A 2732623SN/A assert(_status == Running); 2742626SN/A 2752626SN/A // tick event may not be scheduled if this gets called from inside 2762626SN/A // an instruction's execution, e.g. "quiesce" 2772626SN/A if (tickEvent.scheduled()) 2785606Snate@binkert.org deschedule(tickEvent); 2792623SN/A 2802623SN/A notIdleFraction--; 2812623SN/A _status = Idle; 2822623SN/A} 2832623SN/A 2842623SN/A 2852623SN/Atemplate <class T> 2862623SN/AFault 2872623SN/AAtomicSimpleCPU::read(Addr addr, T &data, unsigned flags) 2882623SN/A{ 2893169Sstever@eecs.umich.edu // use the CPU's statically allocated read request and packet objects 2904870Sstever@eecs.umich.edu Request *req = &data_read_req; 2912623SN/A 2922623SN/A if (traceData) { 2932623SN/A traceData->setAddr(addr); 2942623SN/A } 2952623SN/A 2964999Sgblack@eecs.umich.edu //The block size of our peer. 2974999Sgblack@eecs.umich.edu int blockSize = dcachePort.peerBlockSize(); 2984999Sgblack@eecs.umich.edu //The size of the data we're trying to read. 2994999Sgblack@eecs.umich.edu int dataSize = sizeof(T); 3002623SN/A 3014999Sgblack@eecs.umich.edu uint8_t * dataPtr = (uint8_t *)&data; 3022623SN/A 3034999Sgblack@eecs.umich.edu //The address of the second part of this access if it needs to be split 3044999Sgblack@eecs.umich.edu //across a cache line boundary. 3054999Sgblack@eecs.umich.edu Addr secondAddr = roundDown(addr + dataSize - 1, blockSize); 3064999Sgblack@eecs.umich.edu 3074999Sgblack@eecs.umich.edu if(secondAddr > addr) 3084999Sgblack@eecs.umich.edu dataSize = secondAddr - addr; 3094999Sgblack@eecs.umich.edu 3104999Sgblack@eecs.umich.edu dcache_latency = 0; 3114999Sgblack@eecs.umich.edu 3124999Sgblack@eecs.umich.edu while(1) { 3134999Sgblack@eecs.umich.edu req->setVirt(0, addr, dataSize, flags, thread->readPC()); 3144999Sgblack@eecs.umich.edu 3154999Sgblack@eecs.umich.edu // translate to physical address 3164999Sgblack@eecs.umich.edu Fault fault = thread->translateDataReadReq(req); 3174999Sgblack@eecs.umich.edu 3184999Sgblack@eecs.umich.edu // Now do the access. 3194999Sgblack@eecs.umich.edu if (fault == NoFault) { 3204999Sgblack@eecs.umich.edu Packet pkt = Packet(req, 3214999Sgblack@eecs.umich.edu req->isLocked() ? MemCmd::LoadLockedReq : MemCmd::ReadReq, 3224999Sgblack@eecs.umich.edu Packet::Broadcast); 3234999Sgblack@eecs.umich.edu pkt.dataStatic(dataPtr); 3244999Sgblack@eecs.umich.edu 3254999Sgblack@eecs.umich.edu if (req->isMmapedIpr()) 3264999Sgblack@eecs.umich.edu dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 3274999Sgblack@eecs.umich.edu else { 3284999Sgblack@eecs.umich.edu if (hasPhysMemPort && pkt.getAddr() == physMemAddr) 3294999Sgblack@eecs.umich.edu dcache_latency += physmemPort.sendAtomic(&pkt); 3304999Sgblack@eecs.umich.edu else 3314999Sgblack@eecs.umich.edu dcache_latency += dcachePort.sendAtomic(&pkt); 3324999Sgblack@eecs.umich.edu } 3334999Sgblack@eecs.umich.edu dcache_access = true; 3345012Sgblack@eecs.umich.edu 3354999Sgblack@eecs.umich.edu assert(!pkt.isError()); 3364999Sgblack@eecs.umich.edu 3374999Sgblack@eecs.umich.edu if (req->isLocked()) { 3384999Sgblack@eecs.umich.edu TheISA::handleLockedRead(thread, req); 3394999Sgblack@eecs.umich.edu } 3404968Sacolyte@umich.edu } 3414986Ssaidi@eecs.umich.edu 3424999Sgblack@eecs.umich.edu // This will need a new way to tell if it has a dcache attached. 3434999Sgblack@eecs.umich.edu if (req->isUncacheable()) 3444999Sgblack@eecs.umich.edu recordEvent("Uncached Read"); 3454762Snate@binkert.org 3464999Sgblack@eecs.umich.edu //If there's a fault, return it 3474999Sgblack@eecs.umich.edu if (fault != NoFault) 3484999Sgblack@eecs.umich.edu return fault; 3494999Sgblack@eecs.umich.edu //If we don't need to access a second cache line, stop now. 3504999Sgblack@eecs.umich.edu if (secondAddr <= addr) 3514999Sgblack@eecs.umich.edu { 3524999Sgblack@eecs.umich.edu data = gtoh(data); 3535408Sgblack@eecs.umich.edu if (traceData) { 3545408Sgblack@eecs.umich.edu traceData->setData(data); 3555408Sgblack@eecs.umich.edu } 3564999Sgblack@eecs.umich.edu return fault; 3574968Sacolyte@umich.edu } 3583170Sstever@eecs.umich.edu 3594999Sgblack@eecs.umich.edu /* 3604999Sgblack@eecs.umich.edu * Set up for accessing the second cache line. 3614999Sgblack@eecs.umich.edu */ 3624999Sgblack@eecs.umich.edu 3634999Sgblack@eecs.umich.edu //Move the pointer we're reading into to the correct location. 3644999Sgblack@eecs.umich.edu dataPtr += dataSize; 3654999Sgblack@eecs.umich.edu //Adjust the size to get the remaining bytes. 3664999Sgblack@eecs.umich.edu dataSize = addr + sizeof(T) - secondAddr; 3674999Sgblack@eecs.umich.edu //And access the right address. 3684999Sgblack@eecs.umich.edu addr = secondAddr; 3692623SN/A } 3702623SN/A} 3712623SN/A 3725177Sgblack@eecs.umich.eduFault 3735177Sgblack@eecs.umich.eduAtomicSimpleCPU::translateDataReadAddr(Addr vaddr, Addr & paddr, 3745177Sgblack@eecs.umich.edu int size, unsigned flags) 3755177Sgblack@eecs.umich.edu{ 3765177Sgblack@eecs.umich.edu // use the CPU's statically allocated read request and packet objects 3775177Sgblack@eecs.umich.edu Request *req = &data_read_req; 3785177Sgblack@eecs.umich.edu 3795177Sgblack@eecs.umich.edu if (traceData) { 3805177Sgblack@eecs.umich.edu traceData->setAddr(vaddr); 3815177Sgblack@eecs.umich.edu } 3825177Sgblack@eecs.umich.edu 3835177Sgblack@eecs.umich.edu //The block size of our peer. 3845177Sgblack@eecs.umich.edu int blockSize = dcachePort.peerBlockSize(); 3855177Sgblack@eecs.umich.edu //The size of the data we're trying to read. 3865177Sgblack@eecs.umich.edu int dataSize = size; 3875177Sgblack@eecs.umich.edu 3885177Sgblack@eecs.umich.edu bool firstTimeThrough = true; 3895177Sgblack@eecs.umich.edu 3905177Sgblack@eecs.umich.edu //The address of the second part of this access if it needs to be split 3915177Sgblack@eecs.umich.edu //across a cache line boundary. 3925177Sgblack@eecs.umich.edu Addr secondAddr = roundDown(vaddr + dataSize - 1, blockSize); 3935177Sgblack@eecs.umich.edu 3945177Sgblack@eecs.umich.edu if(secondAddr > vaddr) 3955177Sgblack@eecs.umich.edu dataSize = secondAddr - vaddr; 3965177Sgblack@eecs.umich.edu 3975177Sgblack@eecs.umich.edu while(1) { 3985177Sgblack@eecs.umich.edu req->setVirt(0, vaddr, dataSize, flags, thread->readPC()); 3995177Sgblack@eecs.umich.edu 4005177Sgblack@eecs.umich.edu // translate to physical address 4015177Sgblack@eecs.umich.edu Fault fault = thread->translateDataReadReq(req); 4025177Sgblack@eecs.umich.edu 4035177Sgblack@eecs.umich.edu //If there's a fault, return it 4045177Sgblack@eecs.umich.edu if (fault != NoFault) 4055177Sgblack@eecs.umich.edu return fault; 4065177Sgblack@eecs.umich.edu 4075177Sgblack@eecs.umich.edu if (firstTimeThrough) { 4085177Sgblack@eecs.umich.edu paddr = req->getPaddr(); 4095177Sgblack@eecs.umich.edu firstTimeThrough = false; 4105177Sgblack@eecs.umich.edu } 4115177Sgblack@eecs.umich.edu 4125177Sgblack@eecs.umich.edu //If we don't need to access a second cache line, stop now. 4135177Sgblack@eecs.umich.edu if (secondAddr <= vaddr) 4145177Sgblack@eecs.umich.edu return fault; 4155177Sgblack@eecs.umich.edu 4165177Sgblack@eecs.umich.edu /* 4175177Sgblack@eecs.umich.edu * Set up for accessing the second cache line. 4185177Sgblack@eecs.umich.edu */ 4195177Sgblack@eecs.umich.edu 4205177Sgblack@eecs.umich.edu //Adjust the size to get the remaining bytes. 4215177Sgblack@eecs.umich.edu dataSize = vaddr + size - secondAddr; 4225177Sgblack@eecs.umich.edu //And access the right address. 4235177Sgblack@eecs.umich.edu vaddr = secondAddr; 4245177Sgblack@eecs.umich.edu } 4255177Sgblack@eecs.umich.edu} 4265177Sgblack@eecs.umich.edu 4272623SN/A#ifndef DOXYGEN_SHOULD_SKIP_THIS 4282623SN/A 4292623SN/Atemplate 4302623SN/AFault 4314115Ssaidi@eecs.umich.eduAtomicSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags); 4324115Ssaidi@eecs.umich.edu 4334115Ssaidi@eecs.umich.edutemplate 4344115Ssaidi@eecs.umich.eduFault 4354040Ssaidi@eecs.umich.eduAtomicSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags); 4364040Ssaidi@eecs.umich.edu 4374040Ssaidi@eecs.umich.edutemplate 4384040Ssaidi@eecs.umich.eduFault 4392623SN/AAtomicSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags); 4402623SN/A 4412623SN/Atemplate 4422623SN/AFault 4432623SN/AAtomicSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags); 4442623SN/A 4452623SN/Atemplate 4462623SN/AFault 4472623SN/AAtomicSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags); 4482623SN/A 4492623SN/Atemplate 4502623SN/AFault 4512623SN/AAtomicSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags); 4522623SN/A 4532623SN/A#endif //DOXYGEN_SHOULD_SKIP_THIS 4542623SN/A 4552623SN/Atemplate<> 4562623SN/AFault 4572623SN/AAtomicSimpleCPU::read(Addr addr, double &data, unsigned flags) 4582623SN/A{ 4592623SN/A return read(addr, *(uint64_t*)&data, flags); 4602623SN/A} 4612623SN/A 4622623SN/Atemplate<> 4632623SN/AFault 4642623SN/AAtomicSimpleCPU::read(Addr addr, float &data, unsigned flags) 4652623SN/A{ 4662623SN/A return read(addr, *(uint32_t*)&data, flags); 4672623SN/A} 4682623SN/A 4692623SN/A 4702623SN/Atemplate<> 4712623SN/AFault 4722623SN/AAtomicSimpleCPU::read(Addr addr, int32_t &data, unsigned flags) 4732623SN/A{ 4742623SN/A return read(addr, (uint32_t&)data, flags); 4752623SN/A} 4762623SN/A 4772623SN/A 4782623SN/Atemplate <class T> 4792623SN/AFault 4802623SN/AAtomicSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res) 4812623SN/A{ 4823169Sstever@eecs.umich.edu // use the CPU's statically allocated write request and packet objects 4834870Sstever@eecs.umich.edu Request *req = &data_write_req; 4842623SN/A 4852623SN/A if (traceData) { 4862623SN/A traceData->setAddr(addr); 4872623SN/A } 4882623SN/A 4894999Sgblack@eecs.umich.edu //The block size of our peer. 4904999Sgblack@eecs.umich.edu int blockSize = dcachePort.peerBlockSize(); 4914999Sgblack@eecs.umich.edu //The size of the data we're trying to read. 4924999Sgblack@eecs.umich.edu int dataSize = sizeof(T); 4932623SN/A 4944999Sgblack@eecs.umich.edu uint8_t * dataPtr = (uint8_t *)&data; 4952623SN/A 4964999Sgblack@eecs.umich.edu //The address of the second part of this access if it needs to be split 4974999Sgblack@eecs.umich.edu //across a cache line boundary. 4984999Sgblack@eecs.umich.edu Addr secondAddr = roundDown(addr + dataSize - 1, blockSize); 4994999Sgblack@eecs.umich.edu 5004999Sgblack@eecs.umich.edu if(secondAddr > addr) 5014999Sgblack@eecs.umich.edu dataSize = secondAddr - addr; 5024999Sgblack@eecs.umich.edu 5034999Sgblack@eecs.umich.edu dcache_latency = 0; 5044999Sgblack@eecs.umich.edu 5054999Sgblack@eecs.umich.edu while(1) { 5064999Sgblack@eecs.umich.edu req->setVirt(0, addr, dataSize, flags, thread->readPC()); 5074999Sgblack@eecs.umich.edu 5084999Sgblack@eecs.umich.edu // translate to physical address 5094999Sgblack@eecs.umich.edu Fault fault = thread->translateDataWriteReq(req); 5104999Sgblack@eecs.umich.edu 5114999Sgblack@eecs.umich.edu // Now do the access. 5124999Sgblack@eecs.umich.edu if (fault == NoFault) { 5134999Sgblack@eecs.umich.edu MemCmd cmd = MemCmd::WriteReq; // default 5144999Sgblack@eecs.umich.edu bool do_access = true; // flag to suppress cache access 5154999Sgblack@eecs.umich.edu 5164999Sgblack@eecs.umich.edu if (req->isLocked()) { 5174999Sgblack@eecs.umich.edu cmd = MemCmd::StoreCondReq; 5184999Sgblack@eecs.umich.edu do_access = TheISA::handleLockedWrite(thread, req); 5194999Sgblack@eecs.umich.edu } else if (req->isSwap()) { 5204999Sgblack@eecs.umich.edu cmd = MemCmd::SwapReq; 5214999Sgblack@eecs.umich.edu if (req->isCondSwap()) { 5224999Sgblack@eecs.umich.edu assert(res); 5234999Sgblack@eecs.umich.edu req->setExtraData(*res); 5244999Sgblack@eecs.umich.edu } 5254999Sgblack@eecs.umich.edu } 5264999Sgblack@eecs.umich.edu 5274999Sgblack@eecs.umich.edu if (do_access) { 5284999Sgblack@eecs.umich.edu Packet pkt = Packet(req, cmd, Packet::Broadcast); 5294999Sgblack@eecs.umich.edu pkt.dataStatic(dataPtr); 5304999Sgblack@eecs.umich.edu 5314999Sgblack@eecs.umich.edu if (req->isMmapedIpr()) { 5324999Sgblack@eecs.umich.edu dcache_latency += 5334999Sgblack@eecs.umich.edu TheISA::handleIprWrite(thread->getTC(), &pkt); 5344999Sgblack@eecs.umich.edu } else { 5354999Sgblack@eecs.umich.edu //XXX This needs to be outside of the loop in order to 5364999Sgblack@eecs.umich.edu //work properly for cache line boundary crossing 5374999Sgblack@eecs.umich.edu //accesses in transendian simulations. 5384999Sgblack@eecs.umich.edu data = htog(data); 5394999Sgblack@eecs.umich.edu if (hasPhysMemPort && pkt.getAddr() == physMemAddr) 5404999Sgblack@eecs.umich.edu dcache_latency += physmemPort.sendAtomic(&pkt); 5414999Sgblack@eecs.umich.edu else 5424999Sgblack@eecs.umich.edu dcache_latency += dcachePort.sendAtomic(&pkt); 5434999Sgblack@eecs.umich.edu } 5444999Sgblack@eecs.umich.edu dcache_access = true; 5454999Sgblack@eecs.umich.edu assert(!pkt.isError()); 5464999Sgblack@eecs.umich.edu 5474999Sgblack@eecs.umich.edu if (req->isSwap()) { 5484999Sgblack@eecs.umich.edu assert(res); 5494999Sgblack@eecs.umich.edu *res = pkt.get<T>(); 5504999Sgblack@eecs.umich.edu } 5514999Sgblack@eecs.umich.edu } 5524999Sgblack@eecs.umich.edu 5534999Sgblack@eecs.umich.edu if (res && !req->isSwap()) { 5544999Sgblack@eecs.umich.edu *res = req->getExtraData(); 5554878Sstever@eecs.umich.edu } 5564040Ssaidi@eecs.umich.edu } 5574040Ssaidi@eecs.umich.edu 5584999Sgblack@eecs.umich.edu // This will need a new way to tell if it's hooked up to a cache or not. 5594999Sgblack@eecs.umich.edu if (req->isUncacheable()) 5604999Sgblack@eecs.umich.edu recordEvent("Uncached Write"); 5612631SN/A 5624999Sgblack@eecs.umich.edu //If there's a fault or we don't need to access a second cache line, 5634999Sgblack@eecs.umich.edu //stop now. 5644999Sgblack@eecs.umich.edu if (fault != NoFault || secondAddr <= addr) 5654999Sgblack@eecs.umich.edu { 5664999Sgblack@eecs.umich.edu // If the write needs to have a fault on the access, consider 5674999Sgblack@eecs.umich.edu // calling changeStatus() and changing it to "bad addr write" 5684999Sgblack@eecs.umich.edu // or something. 5695408Sgblack@eecs.umich.edu if (traceData) { 5705408Sgblack@eecs.umich.edu traceData->setData(data); 5715408Sgblack@eecs.umich.edu } 5724999Sgblack@eecs.umich.edu return fault; 5733170Sstever@eecs.umich.edu } 5743170Sstever@eecs.umich.edu 5754999Sgblack@eecs.umich.edu /* 5764999Sgblack@eecs.umich.edu * Set up for accessing the second cache line. 5774999Sgblack@eecs.umich.edu */ 5784999Sgblack@eecs.umich.edu 5794999Sgblack@eecs.umich.edu //Move the pointer we're reading into to the correct location. 5804999Sgblack@eecs.umich.edu dataPtr += dataSize; 5814999Sgblack@eecs.umich.edu //Adjust the size to get the remaining bytes. 5824999Sgblack@eecs.umich.edu dataSize = addr + sizeof(T) - secondAddr; 5834999Sgblack@eecs.umich.edu //And access the right address. 5844999Sgblack@eecs.umich.edu addr = secondAddr; 5852623SN/A } 5862623SN/A} 5872623SN/A 5885177Sgblack@eecs.umich.eduFault 5895177Sgblack@eecs.umich.eduAtomicSimpleCPU::translateDataWriteAddr(Addr vaddr, Addr &paddr, 5905177Sgblack@eecs.umich.edu int size, unsigned flags) 5915177Sgblack@eecs.umich.edu{ 5925177Sgblack@eecs.umich.edu // use the CPU's statically allocated write request and packet objects 5935177Sgblack@eecs.umich.edu Request *req = &data_write_req; 5945177Sgblack@eecs.umich.edu 5955177Sgblack@eecs.umich.edu if (traceData) { 5965177Sgblack@eecs.umich.edu traceData->setAddr(vaddr); 5975177Sgblack@eecs.umich.edu } 5985177Sgblack@eecs.umich.edu 5995177Sgblack@eecs.umich.edu //The block size of our peer. 6005177Sgblack@eecs.umich.edu int blockSize = dcachePort.peerBlockSize(); 6015177Sgblack@eecs.umich.edu 6025177Sgblack@eecs.umich.edu //The address of the second part of this access if it needs to be split 6035177Sgblack@eecs.umich.edu //across a cache line boundary. 6045177Sgblack@eecs.umich.edu Addr secondAddr = roundDown(vaddr + size - 1, blockSize); 6055177Sgblack@eecs.umich.edu 6065177Sgblack@eecs.umich.edu //The size of the data we're trying to read. 6075177Sgblack@eecs.umich.edu int dataSize = size; 6085177Sgblack@eecs.umich.edu 6095177Sgblack@eecs.umich.edu bool firstTimeThrough = true; 6105177Sgblack@eecs.umich.edu 6115177Sgblack@eecs.umich.edu if(secondAddr > vaddr) 6125177Sgblack@eecs.umich.edu dataSize = secondAddr - vaddr; 6135177Sgblack@eecs.umich.edu 6145177Sgblack@eecs.umich.edu dcache_latency = 0; 6155177Sgblack@eecs.umich.edu 6165177Sgblack@eecs.umich.edu while(1) { 6175278Sgblack@eecs.umich.edu req->setVirt(0, vaddr, dataSize, flags, thread->readPC()); 6185177Sgblack@eecs.umich.edu 6195177Sgblack@eecs.umich.edu // translate to physical address 6205177Sgblack@eecs.umich.edu Fault fault = thread->translateDataWriteReq(req); 6215177Sgblack@eecs.umich.edu 6225177Sgblack@eecs.umich.edu //If there's a fault or we don't need to access a second cache line, 6235177Sgblack@eecs.umich.edu //stop now. 6245177Sgblack@eecs.umich.edu if (fault != NoFault) 6255177Sgblack@eecs.umich.edu return fault; 6265177Sgblack@eecs.umich.edu 6275177Sgblack@eecs.umich.edu if (firstTimeThrough) { 6285177Sgblack@eecs.umich.edu paddr = req->getPaddr(); 6295177Sgblack@eecs.umich.edu firstTimeThrough = false; 6305177Sgblack@eecs.umich.edu } 6315177Sgblack@eecs.umich.edu 6325177Sgblack@eecs.umich.edu if (secondAddr <= vaddr) 6335177Sgblack@eecs.umich.edu return fault; 6345177Sgblack@eecs.umich.edu 6355177Sgblack@eecs.umich.edu /* 6365177Sgblack@eecs.umich.edu * Set up for accessing the second cache line. 6375177Sgblack@eecs.umich.edu */ 6385177Sgblack@eecs.umich.edu 6395177Sgblack@eecs.umich.edu //Adjust the size to get the remaining bytes. 6405177Sgblack@eecs.umich.edu dataSize = vaddr + size - secondAddr; 6415177Sgblack@eecs.umich.edu //And access the right address. 6425177Sgblack@eecs.umich.edu vaddr = secondAddr; 6435177Sgblack@eecs.umich.edu } 6445177Sgblack@eecs.umich.edu} 6455177Sgblack@eecs.umich.edu 6462623SN/A 6472623SN/A#ifndef DOXYGEN_SHOULD_SKIP_THIS 6484224Sgblack@eecs.umich.edu 6494224Sgblack@eecs.umich.edutemplate 6504224Sgblack@eecs.umich.eduFault 6514224Sgblack@eecs.umich.eduAtomicSimpleCPU::write(Twin32_t data, Addr addr, 6524224Sgblack@eecs.umich.edu unsigned flags, uint64_t *res); 6534224Sgblack@eecs.umich.edu 6544224Sgblack@eecs.umich.edutemplate 6554224Sgblack@eecs.umich.eduFault 6564224Sgblack@eecs.umich.eduAtomicSimpleCPU::write(Twin64_t data, Addr addr, 6574224Sgblack@eecs.umich.edu unsigned flags, uint64_t *res); 6584224Sgblack@eecs.umich.edu 6592623SN/Atemplate 6602623SN/AFault 6612623SN/AAtomicSimpleCPU::write(uint64_t data, Addr addr, 6622623SN/A unsigned flags, uint64_t *res); 6632623SN/A 6642623SN/Atemplate 6652623SN/AFault 6662623SN/AAtomicSimpleCPU::write(uint32_t data, Addr addr, 6672623SN/A unsigned flags, uint64_t *res); 6682623SN/A 6692623SN/Atemplate 6702623SN/AFault 6712623SN/AAtomicSimpleCPU::write(uint16_t data, Addr addr, 6722623SN/A unsigned flags, uint64_t *res); 6732623SN/A 6742623SN/Atemplate 6752623SN/AFault 6762623SN/AAtomicSimpleCPU::write(uint8_t data, Addr addr, 6772623SN/A unsigned flags, uint64_t *res); 6782623SN/A 6792623SN/A#endif //DOXYGEN_SHOULD_SKIP_THIS 6802623SN/A 6812623SN/Atemplate<> 6822623SN/AFault 6832623SN/AAtomicSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res) 6842623SN/A{ 6852623SN/A return write(*(uint64_t*)&data, addr, flags, res); 6862623SN/A} 6872623SN/A 6882623SN/Atemplate<> 6892623SN/AFault 6902623SN/AAtomicSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res) 6912623SN/A{ 6922623SN/A return write(*(uint32_t*)&data, addr, flags, res); 6932623SN/A} 6942623SN/A 6952623SN/A 6962623SN/Atemplate<> 6972623SN/AFault 6982623SN/AAtomicSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res) 6992623SN/A{ 7002623SN/A return write((uint32_t)data, addr, flags, res); 7012623SN/A} 7022623SN/A 7032623SN/A 7042623SN/Avoid 7052623SN/AAtomicSimpleCPU::tick() 7062623SN/A{ 7074940Snate@binkert.org DPRINTF(SimpleCPU, "Tick\n"); 7084940Snate@binkert.org 7095487Snate@binkert.org Tick latency = 0; 7102623SN/A 7112623SN/A for (int i = 0; i < width; ++i) { 7122623SN/A numCycles++; 7132623SN/A 7143387Sgblack@eecs.umich.edu if (!curStaticInst || !curStaticInst->isDelayedCommit()) 7153387Sgblack@eecs.umich.edu checkForInterrupts(); 7162626SN/A 7175348Ssaidi@eecs.umich.edu checkPcEventQueue(); 7185348Ssaidi@eecs.umich.edu 7195669Sgblack@eecs.umich.edu Fault fault = NoFault; 7205669Sgblack@eecs.umich.edu 7215669Sgblack@eecs.umich.edu bool fromRom = isRomMicroPC(thread->readMicroPC()); 7225669Sgblack@eecs.umich.edu if (!fromRom) 7235669Sgblack@eecs.umich.edu fault = setupFetchRequest(&ifetch_req); 7242623SN/A 7252623SN/A if (fault == NoFault) { 7264182Sgblack@eecs.umich.edu Tick icache_latency = 0; 7274182Sgblack@eecs.umich.edu bool icache_access = false; 7284182Sgblack@eecs.umich.edu dcache_access = false; // assume no dcache access 7292662Sstever@eecs.umich.edu 7305669Sgblack@eecs.umich.edu if (!fromRom) { 7315694Sgblack@eecs.umich.edu // This is commented out because the predecoder would act like 7325694Sgblack@eecs.umich.edu // a tiny cache otherwise. It wouldn't be flushed when needed 7335694Sgblack@eecs.umich.edu // like the I cache. It should be flushed, and when that works 7345694Sgblack@eecs.umich.edu // this code should be uncommented. 7355669Sgblack@eecs.umich.edu //Fetch more instruction memory if necessary 7365669Sgblack@eecs.umich.edu //if(predecoder.needMoreBytes()) 7375669Sgblack@eecs.umich.edu //{ 7385669Sgblack@eecs.umich.edu icache_access = true; 7395669Sgblack@eecs.umich.edu Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq, 7405669Sgblack@eecs.umich.edu Packet::Broadcast); 7415669Sgblack@eecs.umich.edu ifetch_pkt.dataStatic(&inst); 7422623SN/A 7435669Sgblack@eecs.umich.edu if (hasPhysMemPort && ifetch_pkt.getAddr() == physMemAddr) 7445669Sgblack@eecs.umich.edu icache_latency = physmemPort.sendAtomic(&ifetch_pkt); 7455669Sgblack@eecs.umich.edu else 7465669Sgblack@eecs.umich.edu icache_latency = icachePort.sendAtomic(&ifetch_pkt); 7474968Sacolyte@umich.edu 7485669Sgblack@eecs.umich.edu assert(!ifetch_pkt.isError()); 7494968Sacolyte@umich.edu 7505669Sgblack@eecs.umich.edu // ifetch_req is initialized to read the instruction directly 7515669Sgblack@eecs.umich.edu // into the CPU object's inst field. 7525669Sgblack@eecs.umich.edu //} 7535669Sgblack@eecs.umich.edu } 7544182Sgblack@eecs.umich.edu 7552623SN/A preExecute(); 7563814Ssaidi@eecs.umich.edu 7575001Sgblack@eecs.umich.edu if (curStaticInst) { 7584182Sgblack@eecs.umich.edu fault = curStaticInst->execute(this, traceData); 7594998Sgblack@eecs.umich.edu 7604998Sgblack@eecs.umich.edu // keep an instruction count 7614998Sgblack@eecs.umich.edu if (fault == NoFault) 7624998Sgblack@eecs.umich.edu countInst(); 7635001Sgblack@eecs.umich.edu else if (traceData) { 7645001Sgblack@eecs.umich.edu // If there was a fault, we should trace this instruction. 7655001Sgblack@eecs.umich.edu delete traceData; 7665001Sgblack@eecs.umich.edu traceData = NULL; 7675001Sgblack@eecs.umich.edu } 7684998Sgblack@eecs.umich.edu 7694182Sgblack@eecs.umich.edu postExecute(); 7704182Sgblack@eecs.umich.edu } 7712623SN/A 7723814Ssaidi@eecs.umich.edu // @todo remove me after debugging with legion done 7734539Sgblack@eecs.umich.edu if (curStaticInst && (!curStaticInst->isMicroop() || 7744539Sgblack@eecs.umich.edu curStaticInst->isFirstMicroop())) 7753814Ssaidi@eecs.umich.edu instCnt++; 7763814Ssaidi@eecs.umich.edu 7775487Snate@binkert.org Tick stall_ticks = 0; 7785487Snate@binkert.org if (simulate_inst_stalls && icache_access) 7795487Snate@binkert.org stall_ticks += icache_latency; 7805487Snate@binkert.org 7815487Snate@binkert.org if (simulate_data_stalls && dcache_access) 7825487Snate@binkert.org stall_ticks += dcache_latency; 7835487Snate@binkert.org 7845487Snate@binkert.org if (stall_ticks) { 7855487Snate@binkert.org Tick stall_cycles = stall_ticks / ticks(1); 7865487Snate@binkert.org Tick aligned_stall_ticks = ticks(stall_cycles); 7875487Snate@binkert.org 7885487Snate@binkert.org if (aligned_stall_ticks < stall_ticks) 7895487Snate@binkert.org aligned_stall_ticks += 1; 7905487Snate@binkert.org 7915487Snate@binkert.org latency += aligned_stall_ticks; 7922623SN/A } 7932623SN/A 7942623SN/A } 7954377Sgblack@eecs.umich.edu if(fault != NoFault || !stayAtPC) 7964182Sgblack@eecs.umich.edu advancePC(fault); 7972623SN/A } 7982623SN/A 7995487Snate@binkert.org // instruction takes at least one cycle 8005487Snate@binkert.org if (latency < ticks(1)) 8015487Snate@binkert.org latency = ticks(1); 8025487Snate@binkert.org 8032626SN/A if (_status != Idle) 8045606Snate@binkert.org schedule(tickEvent, curTick + latency); 8052623SN/A} 8062623SN/A 8072623SN/A 8085315Sstever@gmail.comvoid 8095315Sstever@gmail.comAtomicSimpleCPU::printAddr(Addr a) 8105315Sstever@gmail.com{ 8115315Sstever@gmail.com dcachePort.printAddr(a); 8125315Sstever@gmail.com} 8135315Sstever@gmail.com 8145315Sstever@gmail.com 8152623SN/A//////////////////////////////////////////////////////////////////////// 8162623SN/A// 8172623SN/A// AtomicSimpleCPU Simulation Object 8182623SN/A// 8194762Snate@binkert.orgAtomicSimpleCPU * 8204762Snate@binkert.orgAtomicSimpleCPUParams::create() 8212623SN/A{ 8225529Snate@binkert.org numThreads = 1; 8235529Snate@binkert.org#if !FULL_SYSTEM 8244762Snate@binkert.org if (workload.size() != 1) 8254762Snate@binkert.org panic("only one workload allowed"); 8262623SN/A#endif 8275529Snate@binkert.org return new AtomicSimpleCPU(this); 8282623SN/A} 829