atomic.cc revision 7823
12623SN/A/* 22623SN/A * Copyright (c) 2002-2005 The Regents of The University of Michigan 32623SN/A * All rights reserved. 42623SN/A * 52623SN/A * Redistribution and use in source and binary forms, with or without 62623SN/A * modification, are permitted provided that the following conditions are 72623SN/A * met: redistributions of source code must retain the above copyright 82623SN/A * notice, this list of conditions and the following disclaimer; 92623SN/A * redistributions in binary form must reproduce the above copyright 102623SN/A * notice, this list of conditions and the following disclaimer in the 112623SN/A * documentation and/or other materials provided with the distribution; 122623SN/A * neither the name of the copyright holders nor the names of its 132623SN/A * contributors may be used to endorse or promote products derived from 142623SN/A * this software without specific prior written permission. 152623SN/A * 162623SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 172623SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 182623SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 192623SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 202623SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 212623SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 222623SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 232623SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 242623SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 252623SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 262623SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 272665Ssaidi@eecs.umich.edu * 282665Ssaidi@eecs.umich.edu * Authors: Steve Reinhardt 292623SN/A */ 302623SN/A 313170Sstever@eecs.umich.edu#include "arch/locked_mem.hh" 323806Ssaidi@eecs.umich.edu#include "arch/mmaped_ipr.hh" 332623SN/A#include "arch/utility.hh" 344040Ssaidi@eecs.umich.edu#include "base/bigint.hh" 356658Snate@binkert.org#include "config/the_isa.hh" 362623SN/A#include "cpu/exetrace.hh" 372623SN/A#include "cpu/simple/atomic.hh" 383348Sbinkertn@umich.edu#include "mem/packet.hh" 393348Sbinkertn@umich.edu#include "mem/packet_access.hh" 404762Snate@binkert.org#include "params/AtomicSimpleCPU.hh" 417678Sgblack@eecs.umich.edu#include "sim/faults.hh" 422901Ssaidi@eecs.umich.edu#include "sim/system.hh" 432623SN/A 442623SN/Ausing namespace std; 452623SN/Ausing namespace TheISA; 462623SN/A 472623SN/AAtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c) 485606Snate@binkert.org : Event(CPU_Tick_Pri), cpu(c) 492623SN/A{ 502623SN/A} 512623SN/A 522623SN/A 532623SN/Avoid 542623SN/AAtomicSimpleCPU::TickEvent::process() 552623SN/A{ 562623SN/A cpu->tick(); 572623SN/A} 582623SN/A 592623SN/Aconst char * 605336Shines@cs.fsu.eduAtomicSimpleCPU::TickEvent::description() const 612623SN/A{ 624873Sstever@eecs.umich.edu return "AtomicSimpleCPU tick"; 632623SN/A} 642623SN/A 652856Srdreslin@umich.eduPort * 666227Snate@binkert.orgAtomicSimpleCPU::getPort(const string &if_name, int idx) 672856Srdreslin@umich.edu{ 682856Srdreslin@umich.edu if (if_name == "dcache_port") 692856Srdreslin@umich.edu return &dcachePort; 702856Srdreslin@umich.edu else if (if_name == "icache_port") 712856Srdreslin@umich.edu return &icachePort; 724968Sacolyte@umich.edu else if (if_name == "physmem_port") { 734968Sacolyte@umich.edu hasPhysMemPort = true; 744968Sacolyte@umich.edu return &physmemPort; 754968Sacolyte@umich.edu } 762856Srdreslin@umich.edu else 772856Srdreslin@umich.edu panic("No Such Port\n"); 782856Srdreslin@umich.edu} 792623SN/A 802623SN/Avoid 812623SN/AAtomicSimpleCPU::init() 822623SN/A{ 832623SN/A BaseCPU::init(); 842623SN/A#if FULL_SYSTEM 856221Snate@binkert.org ThreadID size = threadContexts.size(); 866221Snate@binkert.org for (ThreadID i = 0; i < size; ++i) { 872680Sktlim@umich.edu ThreadContext *tc = threadContexts[i]; 882623SN/A 892623SN/A // initialize CPU, including PC 905714Shsul@eecs.umich.edu TheISA::initCPU(tc, tc->contextId()); 912623SN/A } 922623SN/A#endif 934968Sacolyte@umich.edu if (hasPhysMemPort) { 944968Sacolyte@umich.edu bool snoop = false; 954968Sacolyte@umich.edu AddrRangeList pmAddrList; 964968Sacolyte@umich.edu physmemPort.getPeerAddressRanges(pmAddrList, snoop); 974968Sacolyte@umich.edu physMemAddr = *pmAddrList.begin(); 984968Sacolyte@umich.edu } 995714Shsul@eecs.umich.edu // Atomic doesn't do MT right now, so contextId == threadId 1005712Shsul@eecs.umich.edu ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 1015712Shsul@eecs.umich.edu data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 1025712Shsul@eecs.umich.edu data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 1032623SN/A} 1042623SN/A 1052623SN/Abool 1063349Sbinkertn@umich.eduAtomicSimpleCPU::CpuPort::recvTiming(PacketPtr pkt) 1072623SN/A{ 1083184Srdreslin@umich.edu panic("AtomicSimpleCPU doesn't expect recvTiming callback!"); 1092623SN/A return true; 1102623SN/A} 1112623SN/A 1122623SN/ATick 1133349Sbinkertn@umich.eduAtomicSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt) 1142623SN/A{ 1153310Srdreslin@umich.edu //Snooping a coherence request, just return 1163649Srdreslin@umich.edu return 0; 1172623SN/A} 1182623SN/A 1192623SN/Avoid 1203349Sbinkertn@umich.eduAtomicSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt) 1212623SN/A{ 1223184Srdreslin@umich.edu //No internal storage to update, just return 1233184Srdreslin@umich.edu return; 1242623SN/A} 1252623SN/A 1262623SN/Avoid 1272623SN/AAtomicSimpleCPU::CpuPort::recvStatusChange(Status status) 1282623SN/A{ 1293647Srdreslin@umich.edu if (status == RangeChange) { 1303647Srdreslin@umich.edu if (!snoopRangeSent) { 1313647Srdreslin@umich.edu snoopRangeSent = true; 1323647Srdreslin@umich.edu sendStatusChange(Port::RangeChange); 1333647Srdreslin@umich.edu } 1342626SN/A return; 1353647Srdreslin@umich.edu } 1362626SN/A 1372623SN/A panic("AtomicSimpleCPU doesn't expect recvStatusChange callback!"); 1382623SN/A} 1392623SN/A 1402657Ssaidi@eecs.umich.eduvoid 1412623SN/AAtomicSimpleCPU::CpuPort::recvRetry() 1422623SN/A{ 1432623SN/A panic("AtomicSimpleCPU doesn't expect recvRetry callback!"); 1442623SN/A} 1452623SN/A 1464192Sktlim@umich.eduvoid 1474192Sktlim@umich.eduAtomicSimpleCPU::DcachePort::setPeer(Port *port) 1484192Sktlim@umich.edu{ 1494192Sktlim@umich.edu Port::setPeer(port); 1504192Sktlim@umich.edu 1514192Sktlim@umich.edu#if FULL_SYSTEM 1524192Sktlim@umich.edu // Update the ThreadContext's memory ports (Functional/Virtual 1534192Sktlim@umich.edu // Ports) 1545497Ssaidi@eecs.umich.edu cpu->tcBase()->connectMemPorts(cpu->tcBase()); 1554192Sktlim@umich.edu#endif 1564192Sktlim@umich.edu} 1572623SN/A 1585529Snate@binkert.orgAtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 1596078Sgblack@eecs.umich.edu : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false), 1605487Snate@binkert.org simulate_data_stalls(p->simulate_data_stalls), 1615487Snate@binkert.org simulate_inst_stalls(p->simulate_inst_stalls), 1624968Sacolyte@umich.edu icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this), 1634968Sacolyte@umich.edu physmemPort(name() + "-iport", this), hasPhysMemPort(false) 1642623SN/A{ 1652623SN/A _status = Idle; 1662623SN/A 1673647Srdreslin@umich.edu icachePort.snoopRangeSent = false; 1683647Srdreslin@umich.edu dcachePort.snoopRangeSent = false; 1693647Srdreslin@umich.edu 1702623SN/A} 1712623SN/A 1722623SN/A 1732623SN/AAtomicSimpleCPU::~AtomicSimpleCPU() 1742623SN/A{ 1756775SBrad.Beckmann@amd.com if (tickEvent.scheduled()) { 1766775SBrad.Beckmann@amd.com deschedule(tickEvent); 1776775SBrad.Beckmann@amd.com } 1782623SN/A} 1792623SN/A 1802623SN/Avoid 1812623SN/AAtomicSimpleCPU::serialize(ostream &os) 1822623SN/A{ 1832915Sktlim@umich.edu SimObject::State so_state = SimObject::getState(); 1842915Sktlim@umich.edu SERIALIZE_ENUM(so_state); 1856078Sgblack@eecs.umich.edu SERIALIZE_SCALAR(locked); 1863145Shsul@eecs.umich.edu BaseSimpleCPU::serialize(os); 1872623SN/A nameOut(os, csprintf("%s.tickEvent", name())); 1882623SN/A tickEvent.serialize(os); 1892623SN/A} 1902623SN/A 1912623SN/Avoid 1922623SN/AAtomicSimpleCPU::unserialize(Checkpoint *cp, const string §ion) 1932623SN/A{ 1942915Sktlim@umich.edu SimObject::State so_state; 1952915Sktlim@umich.edu UNSERIALIZE_ENUM(so_state); 1966078Sgblack@eecs.umich.edu UNSERIALIZE_SCALAR(locked); 1973145Shsul@eecs.umich.edu BaseSimpleCPU::unserialize(cp, section); 1982915Sktlim@umich.edu tickEvent.unserialize(cp, csprintf("%s.tickEvent", section)); 1992915Sktlim@umich.edu} 2002915Sktlim@umich.edu 2012915Sktlim@umich.eduvoid 2022915Sktlim@umich.eduAtomicSimpleCPU::resume() 2032915Sktlim@umich.edu{ 2045220Ssaidi@eecs.umich.edu if (_status == Idle || _status == SwitchedOut) 2055220Ssaidi@eecs.umich.edu return; 2065220Ssaidi@eecs.umich.edu 2074940Snate@binkert.org DPRINTF(SimpleCPU, "Resume\n"); 2085220Ssaidi@eecs.umich.edu assert(system->getMemoryMode() == Enums::atomic); 2093324Shsul@eecs.umich.edu 2105220Ssaidi@eecs.umich.edu changeState(SimObject::Running); 2115220Ssaidi@eecs.umich.edu if (thread->status() == ThreadContext::Active) { 2125606Snate@binkert.org if (!tickEvent.scheduled()) 2135606Snate@binkert.org schedule(tickEvent, nextCycle()); 2142915Sktlim@umich.edu } 2152623SN/A} 2162623SN/A 2172623SN/Avoid 2182798Sktlim@umich.eduAtomicSimpleCPU::switchOut() 2192623SN/A{ 2205496Ssaidi@eecs.umich.edu assert(_status == Running || _status == Idle); 2212798Sktlim@umich.edu _status = SwitchedOut; 2222623SN/A 2232798Sktlim@umich.edu tickEvent.squash(); 2242623SN/A} 2252623SN/A 2262623SN/A 2272623SN/Avoid 2282623SN/AAtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 2292623SN/A{ 2304192Sktlim@umich.edu BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort); 2312623SN/A 2322623SN/A assert(!tickEvent.scheduled()); 2332623SN/A 2342680Sktlim@umich.edu // if any of this CPU's ThreadContexts are active, mark the CPU as 2352623SN/A // running and schedule its tick event. 2366221Snate@binkert.org ThreadID size = threadContexts.size(); 2376221Snate@binkert.org for (ThreadID i = 0; i < size; ++i) { 2382680Sktlim@umich.edu ThreadContext *tc = threadContexts[i]; 2392680Sktlim@umich.edu if (tc->status() == ThreadContext::Active && _status != Running) { 2402623SN/A _status = Running; 2415606Snate@binkert.org schedule(tickEvent, nextCycle()); 2422623SN/A break; 2432623SN/A } 2442623SN/A } 2453512Sktlim@umich.edu if (_status != Running) { 2463512Sktlim@umich.edu _status = Idle; 2473512Sktlim@umich.edu } 2485169Ssaidi@eecs.umich.edu assert(threadContexts.size() == 1); 2495712Shsul@eecs.umich.edu ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 2505712Shsul@eecs.umich.edu data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 2515712Shsul@eecs.umich.edu data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 2522623SN/A} 2532623SN/A 2542623SN/A 2552623SN/Avoid 2562623SN/AAtomicSimpleCPU::activateContext(int thread_num, int delay) 2572623SN/A{ 2584940Snate@binkert.org DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); 2594940Snate@binkert.org 2602623SN/A assert(thread_num == 0); 2612683Sktlim@umich.edu assert(thread); 2622623SN/A 2632623SN/A assert(_status == Idle); 2642623SN/A assert(!tickEvent.scheduled()); 2652623SN/A 2662623SN/A notIdleFraction++; 2675101Ssaidi@eecs.umich.edu numCycles += tickToCycles(thread->lastActivate - thread->lastSuspend); 2683686Sktlim@umich.edu 2693430Sgblack@eecs.umich.edu //Make sure ticks are still on multiples of cycles 2707823Ssteve.reinhardt@amd.com schedule(tickEvent, nextCycle(curTick() + ticks(delay))); 2712623SN/A _status = Running; 2722623SN/A} 2732623SN/A 2742623SN/A 2752623SN/Avoid 2762623SN/AAtomicSimpleCPU::suspendContext(int thread_num) 2772623SN/A{ 2784940Snate@binkert.org DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 2794940Snate@binkert.org 2802623SN/A assert(thread_num == 0); 2812683Sktlim@umich.edu assert(thread); 2822623SN/A 2836043Sgblack@eecs.umich.edu if (_status == Idle) 2846043Sgblack@eecs.umich.edu return; 2856043Sgblack@eecs.umich.edu 2862623SN/A assert(_status == Running); 2872626SN/A 2882626SN/A // tick event may not be scheduled if this gets called from inside 2892626SN/A // an instruction's execution, e.g. "quiesce" 2902626SN/A if (tickEvent.scheduled()) 2915606Snate@binkert.org deschedule(tickEvent); 2922623SN/A 2932623SN/A notIdleFraction--; 2942623SN/A _status = Idle; 2952623SN/A} 2962623SN/A 2972623SN/A 2982623SN/AFault 2997520Sgblack@eecs.umich.eduAtomicSimpleCPU::readBytes(Addr addr, uint8_t * data, 3007520Sgblack@eecs.umich.edu unsigned size, unsigned flags) 3012623SN/A{ 3023169Sstever@eecs.umich.edu // use the CPU's statically allocated read request and packet objects 3034870Sstever@eecs.umich.edu Request *req = &data_read_req; 3042623SN/A 3052623SN/A if (traceData) { 3062623SN/A traceData->setAddr(addr); 3072623SN/A } 3082623SN/A 3094999Sgblack@eecs.umich.edu //The block size of our peer. 3106227Snate@binkert.org unsigned blockSize = dcachePort.peerBlockSize(); 3114999Sgblack@eecs.umich.edu //The size of the data we're trying to read. 3127520Sgblack@eecs.umich.edu int fullSize = size; 3132623SN/A 3144999Sgblack@eecs.umich.edu //The address of the second part of this access if it needs to be split 3154999Sgblack@eecs.umich.edu //across a cache line boundary. 3167520Sgblack@eecs.umich.edu Addr secondAddr = roundDown(addr + size - 1, blockSize); 3174999Sgblack@eecs.umich.edu 3187520Sgblack@eecs.umich.edu if (secondAddr > addr) 3197520Sgblack@eecs.umich.edu size = secondAddr - addr; 3204999Sgblack@eecs.umich.edu 3214999Sgblack@eecs.umich.edu dcache_latency = 0; 3224999Sgblack@eecs.umich.edu 3237520Sgblack@eecs.umich.edu while (1) { 3247720Sgblack@eecs.umich.edu req->setVirt(0, addr, size, flags, thread->pcState().instAddr()); 3254999Sgblack@eecs.umich.edu 3264999Sgblack@eecs.umich.edu // translate to physical address 3276023Snate@binkert.org Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read); 3284999Sgblack@eecs.umich.edu 3294999Sgblack@eecs.umich.edu // Now do the access. 3306623Sgblack@eecs.umich.edu if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) { 3314999Sgblack@eecs.umich.edu Packet pkt = Packet(req, 3326102Sgblack@eecs.umich.edu req->isLLSC() ? MemCmd::LoadLockedReq : MemCmd::ReadReq, 3334999Sgblack@eecs.umich.edu Packet::Broadcast); 3347520Sgblack@eecs.umich.edu pkt.dataStatic(data); 3354999Sgblack@eecs.umich.edu 3364999Sgblack@eecs.umich.edu if (req->isMmapedIpr()) 3374999Sgblack@eecs.umich.edu dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 3384999Sgblack@eecs.umich.edu else { 3394999Sgblack@eecs.umich.edu if (hasPhysMemPort && pkt.getAddr() == physMemAddr) 3404999Sgblack@eecs.umich.edu dcache_latency += physmemPort.sendAtomic(&pkt); 3414999Sgblack@eecs.umich.edu else 3424999Sgblack@eecs.umich.edu dcache_latency += dcachePort.sendAtomic(&pkt); 3434999Sgblack@eecs.umich.edu } 3444999Sgblack@eecs.umich.edu dcache_access = true; 3455012Sgblack@eecs.umich.edu 3464999Sgblack@eecs.umich.edu assert(!pkt.isError()); 3474999Sgblack@eecs.umich.edu 3486102Sgblack@eecs.umich.edu if (req->isLLSC()) { 3494999Sgblack@eecs.umich.edu TheISA::handleLockedRead(thread, req); 3504999Sgblack@eecs.umich.edu } 3514968Sacolyte@umich.edu } 3524986Ssaidi@eecs.umich.edu 3534999Sgblack@eecs.umich.edu //If there's a fault, return it 3546739Sgblack@eecs.umich.edu if (fault != NoFault) { 3556739Sgblack@eecs.umich.edu if (req->isPrefetch()) { 3566739Sgblack@eecs.umich.edu return NoFault; 3576739Sgblack@eecs.umich.edu } else { 3586739Sgblack@eecs.umich.edu return fault; 3596739Sgblack@eecs.umich.edu } 3606739Sgblack@eecs.umich.edu } 3616739Sgblack@eecs.umich.edu 3624999Sgblack@eecs.umich.edu //If we don't need to access a second cache line, stop now. 3634999Sgblack@eecs.umich.edu if (secondAddr <= addr) 3644999Sgblack@eecs.umich.edu { 3656078Sgblack@eecs.umich.edu if (req->isLocked() && fault == NoFault) { 3666078Sgblack@eecs.umich.edu assert(!locked); 3676078Sgblack@eecs.umich.edu locked = true; 3686078Sgblack@eecs.umich.edu } 3694999Sgblack@eecs.umich.edu return fault; 3704968Sacolyte@umich.edu } 3713170Sstever@eecs.umich.edu 3724999Sgblack@eecs.umich.edu /* 3734999Sgblack@eecs.umich.edu * Set up for accessing the second cache line. 3744999Sgblack@eecs.umich.edu */ 3754999Sgblack@eecs.umich.edu 3764999Sgblack@eecs.umich.edu //Move the pointer we're reading into to the correct location. 3777520Sgblack@eecs.umich.edu data += size; 3784999Sgblack@eecs.umich.edu //Adjust the size to get the remaining bytes. 3797520Sgblack@eecs.umich.edu size = addr + fullSize - secondAddr; 3804999Sgblack@eecs.umich.edu //And access the right address. 3814999Sgblack@eecs.umich.edu addr = secondAddr; 3822623SN/A } 3832623SN/A} 3842623SN/A 3857520Sgblack@eecs.umich.edu 3867520Sgblack@eecs.umich.edutemplate <class T> 3877520Sgblack@eecs.umich.eduFault 3887520Sgblack@eecs.umich.eduAtomicSimpleCPU::read(Addr addr, T &data, unsigned flags) 3897520Sgblack@eecs.umich.edu{ 3907520Sgblack@eecs.umich.edu uint8_t *dataPtr = (uint8_t *)&data; 3917520Sgblack@eecs.umich.edu memset(dataPtr, 0, sizeof(data)); 3927520Sgblack@eecs.umich.edu Fault fault = readBytes(addr, dataPtr, sizeof(data), flags); 3937520Sgblack@eecs.umich.edu if (fault == NoFault) { 3947520Sgblack@eecs.umich.edu data = gtoh(data); 3957520Sgblack@eecs.umich.edu if (traceData) 3967520Sgblack@eecs.umich.edu traceData->setData(data); 3977520Sgblack@eecs.umich.edu } 3987520Sgblack@eecs.umich.edu return fault; 3997520Sgblack@eecs.umich.edu} 4007520Sgblack@eecs.umich.edu 4012623SN/A#ifndef DOXYGEN_SHOULD_SKIP_THIS 4022623SN/A 4032623SN/Atemplate 4042623SN/AFault 4054115Ssaidi@eecs.umich.eduAtomicSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags); 4064115Ssaidi@eecs.umich.edu 4074115Ssaidi@eecs.umich.edutemplate 4084115Ssaidi@eecs.umich.eduFault 4094040Ssaidi@eecs.umich.eduAtomicSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags); 4104040Ssaidi@eecs.umich.edu 4114040Ssaidi@eecs.umich.edutemplate 4124040Ssaidi@eecs.umich.eduFault 4132623SN/AAtomicSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags); 4142623SN/A 4152623SN/Atemplate 4162623SN/AFault 4172623SN/AAtomicSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags); 4182623SN/A 4192623SN/Atemplate 4202623SN/AFault 4212623SN/AAtomicSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags); 4222623SN/A 4232623SN/Atemplate 4242623SN/AFault 4252623SN/AAtomicSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags); 4262623SN/A 4272623SN/A#endif //DOXYGEN_SHOULD_SKIP_THIS 4282623SN/A 4292623SN/Atemplate<> 4302623SN/AFault 4312623SN/AAtomicSimpleCPU::read(Addr addr, double &data, unsigned flags) 4322623SN/A{ 4332623SN/A return read(addr, *(uint64_t*)&data, flags); 4342623SN/A} 4352623SN/A 4362623SN/Atemplate<> 4372623SN/AFault 4382623SN/AAtomicSimpleCPU::read(Addr addr, float &data, unsigned flags) 4392623SN/A{ 4402623SN/A return read(addr, *(uint32_t*)&data, flags); 4412623SN/A} 4422623SN/A 4432623SN/A 4442623SN/Atemplate<> 4452623SN/AFault 4462623SN/AAtomicSimpleCPU::read(Addr addr, int32_t &data, unsigned flags) 4472623SN/A{ 4482623SN/A return read(addr, (uint32_t&)data, flags); 4492623SN/A} 4502623SN/A 4512623SN/A 4522623SN/AFault 4537520Sgblack@eecs.umich.eduAtomicSimpleCPU::writeBytes(uint8_t *data, unsigned size, 4547520Sgblack@eecs.umich.edu Addr addr, unsigned flags, uint64_t *res) 4552623SN/A{ 4563169Sstever@eecs.umich.edu // use the CPU's statically allocated write request and packet objects 4574870Sstever@eecs.umich.edu Request *req = &data_write_req; 4582623SN/A 4592623SN/A if (traceData) { 4602623SN/A traceData->setAddr(addr); 4612623SN/A } 4622623SN/A 4634999Sgblack@eecs.umich.edu //The block size of our peer. 4646227Snate@binkert.org unsigned blockSize = dcachePort.peerBlockSize(); 4654999Sgblack@eecs.umich.edu //The size of the data we're trying to read. 4667520Sgblack@eecs.umich.edu int fullSize = size; 4672623SN/A 4684999Sgblack@eecs.umich.edu //The address of the second part of this access if it needs to be split 4694999Sgblack@eecs.umich.edu //across a cache line boundary. 4707520Sgblack@eecs.umich.edu Addr secondAddr = roundDown(addr + size - 1, blockSize); 4714999Sgblack@eecs.umich.edu 4724999Sgblack@eecs.umich.edu if(secondAddr > addr) 4737520Sgblack@eecs.umich.edu size = secondAddr - addr; 4744999Sgblack@eecs.umich.edu 4754999Sgblack@eecs.umich.edu dcache_latency = 0; 4764999Sgblack@eecs.umich.edu 4774999Sgblack@eecs.umich.edu while(1) { 4787720Sgblack@eecs.umich.edu req->setVirt(0, addr, size, flags, thread->pcState().instAddr()); 4794999Sgblack@eecs.umich.edu 4804999Sgblack@eecs.umich.edu // translate to physical address 4816023Snate@binkert.org Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write); 4824999Sgblack@eecs.umich.edu 4834999Sgblack@eecs.umich.edu // Now do the access. 4844999Sgblack@eecs.umich.edu if (fault == NoFault) { 4854999Sgblack@eecs.umich.edu MemCmd cmd = MemCmd::WriteReq; // default 4864999Sgblack@eecs.umich.edu bool do_access = true; // flag to suppress cache access 4874999Sgblack@eecs.umich.edu 4886102Sgblack@eecs.umich.edu if (req->isLLSC()) { 4894999Sgblack@eecs.umich.edu cmd = MemCmd::StoreCondReq; 4904999Sgblack@eecs.umich.edu do_access = TheISA::handleLockedWrite(thread, req); 4914999Sgblack@eecs.umich.edu } else if (req->isSwap()) { 4924999Sgblack@eecs.umich.edu cmd = MemCmd::SwapReq; 4934999Sgblack@eecs.umich.edu if (req->isCondSwap()) { 4944999Sgblack@eecs.umich.edu assert(res); 4954999Sgblack@eecs.umich.edu req->setExtraData(*res); 4964999Sgblack@eecs.umich.edu } 4974999Sgblack@eecs.umich.edu } 4984999Sgblack@eecs.umich.edu 4996623Sgblack@eecs.umich.edu if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) { 5004999Sgblack@eecs.umich.edu Packet pkt = Packet(req, cmd, Packet::Broadcast); 5017520Sgblack@eecs.umich.edu pkt.dataStatic(data); 5024999Sgblack@eecs.umich.edu 5034999Sgblack@eecs.umich.edu if (req->isMmapedIpr()) { 5044999Sgblack@eecs.umich.edu dcache_latency += 5054999Sgblack@eecs.umich.edu TheISA::handleIprWrite(thread->getTC(), &pkt); 5064999Sgblack@eecs.umich.edu } else { 5074999Sgblack@eecs.umich.edu if (hasPhysMemPort && pkt.getAddr() == physMemAddr) 5084999Sgblack@eecs.umich.edu dcache_latency += physmemPort.sendAtomic(&pkt); 5094999Sgblack@eecs.umich.edu else 5104999Sgblack@eecs.umich.edu dcache_latency += dcachePort.sendAtomic(&pkt); 5114999Sgblack@eecs.umich.edu } 5124999Sgblack@eecs.umich.edu dcache_access = true; 5134999Sgblack@eecs.umich.edu assert(!pkt.isError()); 5144999Sgblack@eecs.umich.edu 5154999Sgblack@eecs.umich.edu if (req->isSwap()) { 5164999Sgblack@eecs.umich.edu assert(res); 5177520Sgblack@eecs.umich.edu memcpy(res, pkt.getPtr<uint8_t>(), fullSize); 5184999Sgblack@eecs.umich.edu } 5194999Sgblack@eecs.umich.edu } 5204999Sgblack@eecs.umich.edu 5214999Sgblack@eecs.umich.edu if (res && !req->isSwap()) { 5224999Sgblack@eecs.umich.edu *res = req->getExtraData(); 5234878Sstever@eecs.umich.edu } 5244040Ssaidi@eecs.umich.edu } 5254040Ssaidi@eecs.umich.edu 5264999Sgblack@eecs.umich.edu //If there's a fault or we don't need to access a second cache line, 5274999Sgblack@eecs.umich.edu //stop now. 5284999Sgblack@eecs.umich.edu if (fault != NoFault || secondAddr <= addr) 5294999Sgblack@eecs.umich.edu { 5306078Sgblack@eecs.umich.edu if (req->isLocked() && fault == NoFault) { 5316078Sgblack@eecs.umich.edu assert(locked); 5326078Sgblack@eecs.umich.edu locked = false; 5336078Sgblack@eecs.umich.edu } 5346739Sgblack@eecs.umich.edu if (fault != NoFault && req->isPrefetch()) { 5356739Sgblack@eecs.umich.edu return NoFault; 5366739Sgblack@eecs.umich.edu } else { 5376739Sgblack@eecs.umich.edu return fault; 5386739Sgblack@eecs.umich.edu } 5393170Sstever@eecs.umich.edu } 5403170Sstever@eecs.umich.edu 5414999Sgblack@eecs.umich.edu /* 5424999Sgblack@eecs.umich.edu * Set up for accessing the second cache line. 5434999Sgblack@eecs.umich.edu */ 5444999Sgblack@eecs.umich.edu 5454999Sgblack@eecs.umich.edu //Move the pointer we're reading into to the correct location. 5467520Sgblack@eecs.umich.edu data += size; 5474999Sgblack@eecs.umich.edu //Adjust the size to get the remaining bytes. 5487520Sgblack@eecs.umich.edu size = addr + fullSize - secondAddr; 5494999Sgblack@eecs.umich.edu //And access the right address. 5504999Sgblack@eecs.umich.edu addr = secondAddr; 5512623SN/A } 5522623SN/A} 5532623SN/A 5542623SN/A 5557520Sgblack@eecs.umich.edutemplate <class T> 5567520Sgblack@eecs.umich.eduFault 5577520Sgblack@eecs.umich.eduAtomicSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res) 5587520Sgblack@eecs.umich.edu{ 5597520Sgblack@eecs.umich.edu uint8_t *dataPtr = (uint8_t *)&data; 5607520Sgblack@eecs.umich.edu if (traceData) 5617520Sgblack@eecs.umich.edu traceData->setData(data); 5627520Sgblack@eecs.umich.edu data = htog(data); 5637520Sgblack@eecs.umich.edu 5647520Sgblack@eecs.umich.edu Fault fault = writeBytes(dataPtr, sizeof(data), addr, flags, res); 5657520Sgblack@eecs.umich.edu if (fault == NoFault && data_write_req.isSwap()) { 5667520Sgblack@eecs.umich.edu *res = gtoh((T)*res); 5677520Sgblack@eecs.umich.edu } 5687520Sgblack@eecs.umich.edu return fault; 5697520Sgblack@eecs.umich.edu} 5707520Sgblack@eecs.umich.edu 5717520Sgblack@eecs.umich.edu 5722623SN/A#ifndef DOXYGEN_SHOULD_SKIP_THIS 5734224Sgblack@eecs.umich.edu 5744224Sgblack@eecs.umich.edutemplate 5754224Sgblack@eecs.umich.eduFault 5764224Sgblack@eecs.umich.eduAtomicSimpleCPU::write(Twin32_t data, Addr addr, 5774224Sgblack@eecs.umich.edu unsigned flags, uint64_t *res); 5784224Sgblack@eecs.umich.edu 5794224Sgblack@eecs.umich.edutemplate 5804224Sgblack@eecs.umich.eduFault 5814224Sgblack@eecs.umich.eduAtomicSimpleCPU::write(Twin64_t data, Addr addr, 5824224Sgblack@eecs.umich.edu unsigned flags, uint64_t *res); 5834224Sgblack@eecs.umich.edu 5842623SN/Atemplate 5852623SN/AFault 5862623SN/AAtomicSimpleCPU::write(uint64_t data, Addr addr, 5872623SN/A unsigned flags, uint64_t *res); 5882623SN/A 5892623SN/Atemplate 5902623SN/AFault 5912623SN/AAtomicSimpleCPU::write(uint32_t data, Addr addr, 5922623SN/A unsigned flags, uint64_t *res); 5932623SN/A 5942623SN/Atemplate 5952623SN/AFault 5962623SN/AAtomicSimpleCPU::write(uint16_t data, Addr addr, 5972623SN/A unsigned flags, uint64_t *res); 5982623SN/A 5992623SN/Atemplate 6002623SN/AFault 6012623SN/AAtomicSimpleCPU::write(uint8_t data, Addr addr, 6022623SN/A unsigned flags, uint64_t *res); 6032623SN/A 6042623SN/A#endif //DOXYGEN_SHOULD_SKIP_THIS 6052623SN/A 6062623SN/Atemplate<> 6072623SN/AFault 6082623SN/AAtomicSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res) 6092623SN/A{ 6102623SN/A return write(*(uint64_t*)&data, addr, flags, res); 6112623SN/A} 6122623SN/A 6132623SN/Atemplate<> 6142623SN/AFault 6152623SN/AAtomicSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res) 6162623SN/A{ 6172623SN/A return write(*(uint32_t*)&data, addr, flags, res); 6182623SN/A} 6192623SN/A 6202623SN/A 6212623SN/Atemplate<> 6222623SN/AFault 6232623SN/AAtomicSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res) 6242623SN/A{ 6252623SN/A return write((uint32_t)data, addr, flags, res); 6262623SN/A} 6272623SN/A 6282623SN/A 6292623SN/Avoid 6302623SN/AAtomicSimpleCPU::tick() 6312623SN/A{ 6324940Snate@binkert.org DPRINTF(SimpleCPU, "Tick\n"); 6334940Snate@binkert.org 6345487Snate@binkert.org Tick latency = 0; 6352623SN/A 6366078Sgblack@eecs.umich.edu for (int i = 0; i < width || locked; ++i) { 6372623SN/A numCycles++; 6382623SN/A 6393387Sgblack@eecs.umich.edu if (!curStaticInst || !curStaticInst->isDelayedCommit()) 6403387Sgblack@eecs.umich.edu checkForInterrupts(); 6412626SN/A 6425348Ssaidi@eecs.umich.edu checkPcEventQueue(); 6435348Ssaidi@eecs.umich.edu 6445669Sgblack@eecs.umich.edu Fault fault = NoFault; 6455669Sgblack@eecs.umich.edu 6467720Sgblack@eecs.umich.edu TheISA::PCState pcState = thread->pcState(); 6477720Sgblack@eecs.umich.edu 6487720Sgblack@eecs.umich.edu bool needToFetch = !isRomMicroPC(pcState.microPC()) && 6497720Sgblack@eecs.umich.edu !curMacroStaticInst; 6507720Sgblack@eecs.umich.edu if (needToFetch) { 6515894Sgblack@eecs.umich.edu setupFetchRequest(&ifetch_req); 6526023Snate@binkert.org fault = thread->itb->translateAtomic(&ifetch_req, tc, 6536023Snate@binkert.org BaseTLB::Execute); 6545894Sgblack@eecs.umich.edu } 6552623SN/A 6562623SN/A if (fault == NoFault) { 6574182Sgblack@eecs.umich.edu Tick icache_latency = 0; 6584182Sgblack@eecs.umich.edu bool icache_access = false; 6594182Sgblack@eecs.umich.edu dcache_access = false; // assume no dcache access 6602662Sstever@eecs.umich.edu 6617720Sgblack@eecs.umich.edu if (needToFetch) { 6625694Sgblack@eecs.umich.edu // This is commented out because the predecoder would act like 6635694Sgblack@eecs.umich.edu // a tiny cache otherwise. It wouldn't be flushed when needed 6645694Sgblack@eecs.umich.edu // like the I cache. It should be flushed, and when that works 6655694Sgblack@eecs.umich.edu // this code should be uncommented. 6665669Sgblack@eecs.umich.edu //Fetch more instruction memory if necessary 6675669Sgblack@eecs.umich.edu //if(predecoder.needMoreBytes()) 6685669Sgblack@eecs.umich.edu //{ 6695669Sgblack@eecs.umich.edu icache_access = true; 6705669Sgblack@eecs.umich.edu Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq, 6715669Sgblack@eecs.umich.edu Packet::Broadcast); 6725669Sgblack@eecs.umich.edu ifetch_pkt.dataStatic(&inst); 6732623SN/A 6745669Sgblack@eecs.umich.edu if (hasPhysMemPort && ifetch_pkt.getAddr() == physMemAddr) 6755669Sgblack@eecs.umich.edu icache_latency = physmemPort.sendAtomic(&ifetch_pkt); 6765669Sgblack@eecs.umich.edu else 6775669Sgblack@eecs.umich.edu icache_latency = icachePort.sendAtomic(&ifetch_pkt); 6784968Sacolyte@umich.edu 6795669Sgblack@eecs.umich.edu assert(!ifetch_pkt.isError()); 6804968Sacolyte@umich.edu 6815669Sgblack@eecs.umich.edu // ifetch_req is initialized to read the instruction directly 6825669Sgblack@eecs.umich.edu // into the CPU object's inst field. 6835669Sgblack@eecs.umich.edu //} 6845669Sgblack@eecs.umich.edu } 6854182Sgblack@eecs.umich.edu 6862623SN/A preExecute(); 6873814Ssaidi@eecs.umich.edu 6885001Sgblack@eecs.umich.edu if (curStaticInst) { 6894182Sgblack@eecs.umich.edu fault = curStaticInst->execute(this, traceData); 6904998Sgblack@eecs.umich.edu 6914998Sgblack@eecs.umich.edu // keep an instruction count 6924998Sgblack@eecs.umich.edu if (fault == NoFault) 6934998Sgblack@eecs.umich.edu countInst(); 6947655Sali.saidi@arm.com else if (traceData && !DTRACE(ExecFaulting)) { 6955001Sgblack@eecs.umich.edu delete traceData; 6965001Sgblack@eecs.umich.edu traceData = NULL; 6975001Sgblack@eecs.umich.edu } 6984998Sgblack@eecs.umich.edu 6994182Sgblack@eecs.umich.edu postExecute(); 7004182Sgblack@eecs.umich.edu } 7012623SN/A 7023814Ssaidi@eecs.umich.edu // @todo remove me after debugging with legion done 7034539Sgblack@eecs.umich.edu if (curStaticInst && (!curStaticInst->isMicroop() || 7044539Sgblack@eecs.umich.edu curStaticInst->isFirstMicroop())) 7053814Ssaidi@eecs.umich.edu instCnt++; 7063814Ssaidi@eecs.umich.edu 7075487Snate@binkert.org Tick stall_ticks = 0; 7085487Snate@binkert.org if (simulate_inst_stalls && icache_access) 7095487Snate@binkert.org stall_ticks += icache_latency; 7105487Snate@binkert.org 7115487Snate@binkert.org if (simulate_data_stalls && dcache_access) 7125487Snate@binkert.org stall_ticks += dcache_latency; 7135487Snate@binkert.org 7145487Snate@binkert.org if (stall_ticks) { 7155487Snate@binkert.org Tick stall_cycles = stall_ticks / ticks(1); 7165487Snate@binkert.org Tick aligned_stall_ticks = ticks(stall_cycles); 7175487Snate@binkert.org 7185487Snate@binkert.org if (aligned_stall_ticks < stall_ticks) 7195487Snate@binkert.org aligned_stall_ticks += 1; 7205487Snate@binkert.org 7215487Snate@binkert.org latency += aligned_stall_ticks; 7222623SN/A } 7232623SN/A 7242623SN/A } 7254377Sgblack@eecs.umich.edu if(fault != NoFault || !stayAtPC) 7264182Sgblack@eecs.umich.edu advancePC(fault); 7272623SN/A } 7282623SN/A 7295487Snate@binkert.org // instruction takes at least one cycle 7305487Snate@binkert.org if (latency < ticks(1)) 7315487Snate@binkert.org latency = ticks(1); 7325487Snate@binkert.org 7332626SN/A if (_status != Idle) 7347823Ssteve.reinhardt@amd.com schedule(tickEvent, curTick() + latency); 7352623SN/A} 7362623SN/A 7372623SN/A 7385315Sstever@gmail.comvoid 7395315Sstever@gmail.comAtomicSimpleCPU::printAddr(Addr a) 7405315Sstever@gmail.com{ 7415315Sstever@gmail.com dcachePort.printAddr(a); 7425315Sstever@gmail.com} 7435315Sstever@gmail.com 7445315Sstever@gmail.com 7452623SN/A//////////////////////////////////////////////////////////////////////// 7462623SN/A// 7472623SN/A// AtomicSimpleCPU Simulation Object 7482623SN/A// 7494762Snate@binkert.orgAtomicSimpleCPU * 7504762Snate@binkert.orgAtomicSimpleCPUParams::create() 7512623SN/A{ 7525529Snate@binkert.org numThreads = 1; 7535529Snate@binkert.org#if !FULL_SYSTEM 7544762Snate@binkert.org if (workload.size() != 1) 7554762Snate@binkert.org panic("only one workload allowed"); 7562623SN/A#endif 7575529Snate@binkert.org return new AtomicSimpleCPU(this); 7582623SN/A} 759