atomic.cc revision 8232
12623SN/A/* 22623SN/A * Copyright (c) 2002-2005 The Regents of The University of Michigan 32623SN/A * All rights reserved. 42623SN/A * 52623SN/A * Redistribution and use in source and binary forms, with or without 62623SN/A * modification, are permitted provided that the following conditions are 72623SN/A * met: redistributions of source code must retain the above copyright 82623SN/A * notice, this list of conditions and the following disclaimer; 92623SN/A * redistributions in binary form must reproduce the above copyright 102623SN/A * notice, this list of conditions and the following disclaimer in the 112623SN/A * documentation and/or other materials provided with the distribution; 122623SN/A * neither the name of the copyright holders nor the names of its 132623SN/A * contributors may be used to endorse or promote products derived from 142623SN/A * this software without specific prior written permission. 152623SN/A * 162623SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 172623SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 182623SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 192623SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 202623SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 212623SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 222623SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 232623SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 242623SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 252623SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 262623SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 272665Ssaidi@eecs.umich.edu * 282665Ssaidi@eecs.umich.edu * Authors: Steve Reinhardt 292623SN/A */ 302623SN/A 313170Sstever@eecs.umich.edu#include "arch/locked_mem.hh" 328105Sgblack@eecs.umich.edu#include "arch/mmapped_ipr.hh" 332623SN/A#include "arch/utility.hh" 344040Ssaidi@eecs.umich.edu#include "base/bigint.hh" 356658Snate@binkert.org#include "config/the_isa.hh" 368229Snate@binkert.org#include "cpu/simple/atomic.hh" 372623SN/A#include "cpu/exetrace.hh" 388232Snate@binkert.org#include "debug/ExecFaulting.hh" 398232Snate@binkert.org#include "debug/SimpleCPU.hh" 403348Sbinkertn@umich.edu#include "mem/packet.hh" 413348Sbinkertn@umich.edu#include "mem/packet_access.hh" 424762Snate@binkert.org#include "params/AtomicSimpleCPU.hh" 437678Sgblack@eecs.umich.edu#include "sim/faults.hh" 442901Ssaidi@eecs.umich.edu#include "sim/system.hh" 452623SN/A 462623SN/Ausing namespace std; 472623SN/Ausing namespace TheISA; 482623SN/A 492623SN/AAtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c) 505606Snate@binkert.org : Event(CPU_Tick_Pri), cpu(c) 512623SN/A{ 522623SN/A} 532623SN/A 542623SN/A 552623SN/Avoid 562623SN/AAtomicSimpleCPU::TickEvent::process() 572623SN/A{ 582623SN/A cpu->tick(); 592623SN/A} 602623SN/A 612623SN/Aconst char * 625336Shines@cs.fsu.eduAtomicSimpleCPU::TickEvent::description() const 632623SN/A{ 644873Sstever@eecs.umich.edu return "AtomicSimpleCPU tick"; 652623SN/A} 662623SN/A 672856Srdreslin@umich.eduPort * 686227Snate@binkert.orgAtomicSimpleCPU::getPort(const string &if_name, int idx) 692856Srdreslin@umich.edu{ 702856Srdreslin@umich.edu if (if_name == "dcache_port") 712856Srdreslin@umich.edu return &dcachePort; 722856Srdreslin@umich.edu else if (if_name == "icache_port") 732856Srdreslin@umich.edu return &icachePort; 744968Sacolyte@umich.edu else if (if_name == "physmem_port") { 754968Sacolyte@umich.edu hasPhysMemPort = true; 764968Sacolyte@umich.edu return &physmemPort; 774968Sacolyte@umich.edu } 782856Srdreslin@umich.edu else 792856Srdreslin@umich.edu panic("No Such Port\n"); 802856Srdreslin@umich.edu} 812623SN/A 822623SN/Avoid 832623SN/AAtomicSimpleCPU::init() 842623SN/A{ 852623SN/A BaseCPU::init(); 862623SN/A#if FULL_SYSTEM 876221Snate@binkert.org ThreadID size = threadContexts.size(); 886221Snate@binkert.org for (ThreadID i = 0; i < size; ++i) { 892680Sktlim@umich.edu ThreadContext *tc = threadContexts[i]; 902623SN/A 912623SN/A // initialize CPU, including PC 925714Shsul@eecs.umich.edu TheISA::initCPU(tc, tc->contextId()); 932623SN/A } 942623SN/A#endif 954968Sacolyte@umich.edu if (hasPhysMemPort) { 964968Sacolyte@umich.edu bool snoop = false; 974968Sacolyte@umich.edu AddrRangeList pmAddrList; 984968Sacolyte@umich.edu physmemPort.getPeerAddressRanges(pmAddrList, snoop); 994968Sacolyte@umich.edu physMemAddr = *pmAddrList.begin(); 1004968Sacolyte@umich.edu } 1015714Shsul@eecs.umich.edu // Atomic doesn't do MT right now, so contextId == threadId 1025712Shsul@eecs.umich.edu ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 1035712Shsul@eecs.umich.edu data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 1045712Shsul@eecs.umich.edu data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 1052623SN/A} 1062623SN/A 1072623SN/Abool 1083349Sbinkertn@umich.eduAtomicSimpleCPU::CpuPort::recvTiming(PacketPtr pkt) 1092623SN/A{ 1103184Srdreslin@umich.edu panic("AtomicSimpleCPU doesn't expect recvTiming callback!"); 1112623SN/A return true; 1122623SN/A} 1132623SN/A 1142623SN/ATick 1153349Sbinkertn@umich.eduAtomicSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt) 1162623SN/A{ 1173310Srdreslin@umich.edu //Snooping a coherence request, just return 1183649Srdreslin@umich.edu return 0; 1192623SN/A} 1202623SN/A 1212623SN/Avoid 1223349Sbinkertn@umich.eduAtomicSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt) 1232623SN/A{ 1243184Srdreslin@umich.edu //No internal storage to update, just return 1253184Srdreslin@umich.edu return; 1262623SN/A} 1272623SN/A 1282623SN/Avoid 1292623SN/AAtomicSimpleCPU::CpuPort::recvStatusChange(Status status) 1302623SN/A{ 1313647Srdreslin@umich.edu if (status == RangeChange) { 1323647Srdreslin@umich.edu if (!snoopRangeSent) { 1333647Srdreslin@umich.edu snoopRangeSent = true; 1343647Srdreslin@umich.edu sendStatusChange(Port::RangeChange); 1353647Srdreslin@umich.edu } 1362626SN/A return; 1373647Srdreslin@umich.edu } 1382626SN/A 1392623SN/A panic("AtomicSimpleCPU doesn't expect recvStatusChange callback!"); 1402623SN/A} 1412623SN/A 1422657Ssaidi@eecs.umich.eduvoid 1432623SN/AAtomicSimpleCPU::CpuPort::recvRetry() 1442623SN/A{ 1452623SN/A panic("AtomicSimpleCPU doesn't expect recvRetry callback!"); 1462623SN/A} 1472623SN/A 1484192Sktlim@umich.eduvoid 1494192Sktlim@umich.eduAtomicSimpleCPU::DcachePort::setPeer(Port *port) 1504192Sktlim@umich.edu{ 1514192Sktlim@umich.edu Port::setPeer(port); 1524192Sktlim@umich.edu 1534192Sktlim@umich.edu#if FULL_SYSTEM 1544192Sktlim@umich.edu // Update the ThreadContext's memory ports (Functional/Virtual 1554192Sktlim@umich.edu // Ports) 1565497Ssaidi@eecs.umich.edu cpu->tcBase()->connectMemPorts(cpu->tcBase()); 1574192Sktlim@umich.edu#endif 1584192Sktlim@umich.edu} 1592623SN/A 1605529Snate@binkert.orgAtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 1616078Sgblack@eecs.umich.edu : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false), 1625487Snate@binkert.org simulate_data_stalls(p->simulate_data_stalls), 1635487Snate@binkert.org simulate_inst_stalls(p->simulate_inst_stalls), 1644968Sacolyte@umich.edu icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this), 1654968Sacolyte@umich.edu physmemPort(name() + "-iport", this), hasPhysMemPort(false) 1662623SN/A{ 1672623SN/A _status = Idle; 1682623SN/A 1693647Srdreslin@umich.edu icachePort.snoopRangeSent = false; 1703647Srdreslin@umich.edu dcachePort.snoopRangeSent = false; 1713647Srdreslin@umich.edu 1722623SN/A} 1732623SN/A 1742623SN/A 1752623SN/AAtomicSimpleCPU::~AtomicSimpleCPU() 1762623SN/A{ 1776775SBrad.Beckmann@amd.com if (tickEvent.scheduled()) { 1786775SBrad.Beckmann@amd.com deschedule(tickEvent); 1796775SBrad.Beckmann@amd.com } 1802623SN/A} 1812623SN/A 1822623SN/Avoid 1832623SN/AAtomicSimpleCPU::serialize(ostream &os) 1842623SN/A{ 1852915Sktlim@umich.edu SimObject::State so_state = SimObject::getState(); 1862915Sktlim@umich.edu SERIALIZE_ENUM(so_state); 1876078Sgblack@eecs.umich.edu SERIALIZE_SCALAR(locked); 1883145Shsul@eecs.umich.edu BaseSimpleCPU::serialize(os); 1892623SN/A nameOut(os, csprintf("%s.tickEvent", name())); 1902623SN/A tickEvent.serialize(os); 1912623SN/A} 1922623SN/A 1932623SN/Avoid 1942623SN/AAtomicSimpleCPU::unserialize(Checkpoint *cp, const string §ion) 1952623SN/A{ 1962915Sktlim@umich.edu SimObject::State so_state; 1972915Sktlim@umich.edu UNSERIALIZE_ENUM(so_state); 1986078Sgblack@eecs.umich.edu UNSERIALIZE_SCALAR(locked); 1993145Shsul@eecs.umich.edu BaseSimpleCPU::unserialize(cp, section); 2002915Sktlim@umich.edu tickEvent.unserialize(cp, csprintf("%s.tickEvent", section)); 2012915Sktlim@umich.edu} 2022915Sktlim@umich.edu 2032915Sktlim@umich.eduvoid 2042915Sktlim@umich.eduAtomicSimpleCPU::resume() 2052915Sktlim@umich.edu{ 2065220Ssaidi@eecs.umich.edu if (_status == Idle || _status == SwitchedOut) 2075220Ssaidi@eecs.umich.edu return; 2085220Ssaidi@eecs.umich.edu 2094940Snate@binkert.org DPRINTF(SimpleCPU, "Resume\n"); 2105220Ssaidi@eecs.umich.edu assert(system->getMemoryMode() == Enums::atomic); 2113324Shsul@eecs.umich.edu 2125220Ssaidi@eecs.umich.edu changeState(SimObject::Running); 2135220Ssaidi@eecs.umich.edu if (thread->status() == ThreadContext::Active) { 2145606Snate@binkert.org if (!tickEvent.scheduled()) 2155606Snate@binkert.org schedule(tickEvent, nextCycle()); 2162915Sktlim@umich.edu } 2177897Shestness@cs.utexas.edu system->totalNumInsts = 0; 2182623SN/A} 2192623SN/A 2202623SN/Avoid 2212798Sktlim@umich.eduAtomicSimpleCPU::switchOut() 2222623SN/A{ 2235496Ssaidi@eecs.umich.edu assert(_status == Running || _status == Idle); 2242798Sktlim@umich.edu _status = SwitchedOut; 2252623SN/A 2262798Sktlim@umich.edu tickEvent.squash(); 2272623SN/A} 2282623SN/A 2292623SN/A 2302623SN/Avoid 2312623SN/AAtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 2322623SN/A{ 2334192Sktlim@umich.edu BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort); 2342623SN/A 2352623SN/A assert(!tickEvent.scheduled()); 2362623SN/A 2372680Sktlim@umich.edu // if any of this CPU's ThreadContexts are active, mark the CPU as 2382623SN/A // running and schedule its tick event. 2396221Snate@binkert.org ThreadID size = threadContexts.size(); 2406221Snate@binkert.org for (ThreadID i = 0; i < size; ++i) { 2412680Sktlim@umich.edu ThreadContext *tc = threadContexts[i]; 2422680Sktlim@umich.edu if (tc->status() == ThreadContext::Active && _status != Running) { 2432623SN/A _status = Running; 2445606Snate@binkert.org schedule(tickEvent, nextCycle()); 2452623SN/A break; 2462623SN/A } 2472623SN/A } 2483512Sktlim@umich.edu if (_status != Running) { 2493512Sktlim@umich.edu _status = Idle; 2503512Sktlim@umich.edu } 2515169Ssaidi@eecs.umich.edu assert(threadContexts.size() == 1); 2525712Shsul@eecs.umich.edu ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 2535712Shsul@eecs.umich.edu data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 2545712Shsul@eecs.umich.edu data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 2552623SN/A} 2562623SN/A 2572623SN/A 2582623SN/Avoid 2592623SN/AAtomicSimpleCPU::activateContext(int thread_num, int delay) 2602623SN/A{ 2614940Snate@binkert.org DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); 2624940Snate@binkert.org 2632623SN/A assert(thread_num == 0); 2642683Sktlim@umich.edu assert(thread); 2652623SN/A 2662623SN/A assert(_status == Idle); 2672623SN/A assert(!tickEvent.scheduled()); 2682623SN/A 2692623SN/A notIdleFraction++; 2705101Ssaidi@eecs.umich.edu numCycles += tickToCycles(thread->lastActivate - thread->lastSuspend); 2713686Sktlim@umich.edu 2723430Sgblack@eecs.umich.edu //Make sure ticks are still on multiples of cycles 2737823Ssteve.reinhardt@amd.com schedule(tickEvent, nextCycle(curTick() + ticks(delay))); 2742623SN/A _status = Running; 2752623SN/A} 2762623SN/A 2772623SN/A 2782623SN/Avoid 2792623SN/AAtomicSimpleCPU::suspendContext(int thread_num) 2802623SN/A{ 2814940Snate@binkert.org DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 2824940Snate@binkert.org 2832623SN/A assert(thread_num == 0); 2842683Sktlim@umich.edu assert(thread); 2852623SN/A 2866043Sgblack@eecs.umich.edu if (_status == Idle) 2876043Sgblack@eecs.umich.edu return; 2886043Sgblack@eecs.umich.edu 2892623SN/A assert(_status == Running); 2902626SN/A 2912626SN/A // tick event may not be scheduled if this gets called from inside 2922626SN/A // an instruction's execution, e.g. "quiesce" 2932626SN/A if (tickEvent.scheduled()) 2945606Snate@binkert.org deschedule(tickEvent); 2952623SN/A 2962623SN/A notIdleFraction--; 2972623SN/A _status = Idle; 2982623SN/A} 2992623SN/A 3002623SN/A 3012623SN/AFault 3027520Sgblack@eecs.umich.eduAtomicSimpleCPU::readBytes(Addr addr, uint8_t * data, 3037520Sgblack@eecs.umich.edu unsigned size, unsigned flags) 3042623SN/A{ 3053169Sstever@eecs.umich.edu // use the CPU's statically allocated read request and packet objects 3064870Sstever@eecs.umich.edu Request *req = &data_read_req; 3072623SN/A 3082623SN/A if (traceData) { 3092623SN/A traceData->setAddr(addr); 3102623SN/A } 3112623SN/A 3124999Sgblack@eecs.umich.edu //The block size of our peer. 3136227Snate@binkert.org unsigned blockSize = dcachePort.peerBlockSize(); 3144999Sgblack@eecs.umich.edu //The size of the data we're trying to read. 3157520Sgblack@eecs.umich.edu int fullSize = size; 3162623SN/A 3174999Sgblack@eecs.umich.edu //The address of the second part of this access if it needs to be split 3184999Sgblack@eecs.umich.edu //across a cache line boundary. 3197520Sgblack@eecs.umich.edu Addr secondAddr = roundDown(addr + size - 1, blockSize); 3204999Sgblack@eecs.umich.edu 3217520Sgblack@eecs.umich.edu if (secondAddr > addr) 3227520Sgblack@eecs.umich.edu size = secondAddr - addr; 3234999Sgblack@eecs.umich.edu 3244999Sgblack@eecs.umich.edu dcache_latency = 0; 3254999Sgblack@eecs.umich.edu 3267520Sgblack@eecs.umich.edu while (1) { 3277720Sgblack@eecs.umich.edu req->setVirt(0, addr, size, flags, thread->pcState().instAddr()); 3284999Sgblack@eecs.umich.edu 3294999Sgblack@eecs.umich.edu // translate to physical address 3306023Snate@binkert.org Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read); 3314999Sgblack@eecs.umich.edu 3324999Sgblack@eecs.umich.edu // Now do the access. 3336623Sgblack@eecs.umich.edu if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) { 3344999Sgblack@eecs.umich.edu Packet pkt = Packet(req, 3356102Sgblack@eecs.umich.edu req->isLLSC() ? MemCmd::LoadLockedReq : MemCmd::ReadReq, 3364999Sgblack@eecs.umich.edu Packet::Broadcast); 3377520Sgblack@eecs.umich.edu pkt.dataStatic(data); 3384999Sgblack@eecs.umich.edu 3398105Sgblack@eecs.umich.edu if (req->isMmappedIpr()) 3404999Sgblack@eecs.umich.edu dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 3414999Sgblack@eecs.umich.edu else { 3424999Sgblack@eecs.umich.edu if (hasPhysMemPort && pkt.getAddr() == physMemAddr) 3434999Sgblack@eecs.umich.edu dcache_latency += physmemPort.sendAtomic(&pkt); 3444999Sgblack@eecs.umich.edu else 3454999Sgblack@eecs.umich.edu dcache_latency += dcachePort.sendAtomic(&pkt); 3464999Sgblack@eecs.umich.edu } 3474999Sgblack@eecs.umich.edu dcache_access = true; 3485012Sgblack@eecs.umich.edu 3494999Sgblack@eecs.umich.edu assert(!pkt.isError()); 3504999Sgblack@eecs.umich.edu 3516102Sgblack@eecs.umich.edu if (req->isLLSC()) { 3524999Sgblack@eecs.umich.edu TheISA::handleLockedRead(thread, req); 3534999Sgblack@eecs.umich.edu } 3544968Sacolyte@umich.edu } 3554986Ssaidi@eecs.umich.edu 3564999Sgblack@eecs.umich.edu //If there's a fault, return it 3576739Sgblack@eecs.umich.edu if (fault != NoFault) { 3586739Sgblack@eecs.umich.edu if (req->isPrefetch()) { 3596739Sgblack@eecs.umich.edu return NoFault; 3606739Sgblack@eecs.umich.edu } else { 3616739Sgblack@eecs.umich.edu return fault; 3626739Sgblack@eecs.umich.edu } 3636739Sgblack@eecs.umich.edu } 3646739Sgblack@eecs.umich.edu 3654999Sgblack@eecs.umich.edu //If we don't need to access a second cache line, stop now. 3664999Sgblack@eecs.umich.edu if (secondAddr <= addr) 3674999Sgblack@eecs.umich.edu { 3686078Sgblack@eecs.umich.edu if (req->isLocked() && fault == NoFault) { 3696078Sgblack@eecs.umich.edu assert(!locked); 3706078Sgblack@eecs.umich.edu locked = true; 3716078Sgblack@eecs.umich.edu } 3724999Sgblack@eecs.umich.edu return fault; 3734968Sacolyte@umich.edu } 3743170Sstever@eecs.umich.edu 3754999Sgblack@eecs.umich.edu /* 3764999Sgblack@eecs.umich.edu * Set up for accessing the second cache line. 3774999Sgblack@eecs.umich.edu */ 3784999Sgblack@eecs.umich.edu 3794999Sgblack@eecs.umich.edu //Move the pointer we're reading into to the correct location. 3807520Sgblack@eecs.umich.edu data += size; 3814999Sgblack@eecs.umich.edu //Adjust the size to get the remaining bytes. 3827520Sgblack@eecs.umich.edu size = addr + fullSize - secondAddr; 3834999Sgblack@eecs.umich.edu //And access the right address. 3844999Sgblack@eecs.umich.edu addr = secondAddr; 3852623SN/A } 3862623SN/A} 3872623SN/A 3887520Sgblack@eecs.umich.edu 3897520Sgblack@eecs.umich.edutemplate <class T> 3907520Sgblack@eecs.umich.eduFault 3917520Sgblack@eecs.umich.eduAtomicSimpleCPU::read(Addr addr, T &data, unsigned flags) 3927520Sgblack@eecs.umich.edu{ 3937520Sgblack@eecs.umich.edu uint8_t *dataPtr = (uint8_t *)&data; 3947520Sgblack@eecs.umich.edu memset(dataPtr, 0, sizeof(data)); 3957520Sgblack@eecs.umich.edu Fault fault = readBytes(addr, dataPtr, sizeof(data), flags); 3967520Sgblack@eecs.umich.edu if (fault == NoFault) { 3977520Sgblack@eecs.umich.edu data = gtoh(data); 3987520Sgblack@eecs.umich.edu if (traceData) 3997520Sgblack@eecs.umich.edu traceData->setData(data); 4007520Sgblack@eecs.umich.edu } 4017520Sgblack@eecs.umich.edu return fault; 4027520Sgblack@eecs.umich.edu} 4037520Sgblack@eecs.umich.edu 4042623SN/A#ifndef DOXYGEN_SHOULD_SKIP_THIS 4052623SN/A 4062623SN/Atemplate 4072623SN/AFault 4084115Ssaidi@eecs.umich.eduAtomicSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags); 4094115Ssaidi@eecs.umich.edu 4104115Ssaidi@eecs.umich.edutemplate 4114115Ssaidi@eecs.umich.eduFault 4124040Ssaidi@eecs.umich.eduAtomicSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags); 4134040Ssaidi@eecs.umich.edu 4144040Ssaidi@eecs.umich.edutemplate 4154040Ssaidi@eecs.umich.eduFault 4162623SN/AAtomicSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags); 4172623SN/A 4182623SN/Atemplate 4192623SN/AFault 4202623SN/AAtomicSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags); 4212623SN/A 4222623SN/Atemplate 4232623SN/AFault 4242623SN/AAtomicSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags); 4252623SN/A 4262623SN/Atemplate 4272623SN/AFault 4282623SN/AAtomicSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags); 4292623SN/A 4302623SN/A#endif //DOXYGEN_SHOULD_SKIP_THIS 4312623SN/A 4322623SN/Atemplate<> 4332623SN/AFault 4342623SN/AAtomicSimpleCPU::read(Addr addr, double &data, unsigned flags) 4352623SN/A{ 4362623SN/A return read(addr, *(uint64_t*)&data, flags); 4372623SN/A} 4382623SN/A 4392623SN/Atemplate<> 4402623SN/AFault 4412623SN/AAtomicSimpleCPU::read(Addr addr, float &data, unsigned flags) 4422623SN/A{ 4432623SN/A return read(addr, *(uint32_t*)&data, flags); 4442623SN/A} 4452623SN/A 4462623SN/A 4472623SN/Atemplate<> 4482623SN/AFault 4492623SN/AAtomicSimpleCPU::read(Addr addr, int32_t &data, unsigned flags) 4502623SN/A{ 4512623SN/A return read(addr, (uint32_t&)data, flags); 4522623SN/A} 4532623SN/A 4542623SN/A 4552623SN/AFault 4567520Sgblack@eecs.umich.eduAtomicSimpleCPU::writeBytes(uint8_t *data, unsigned size, 4577520Sgblack@eecs.umich.edu Addr addr, unsigned flags, uint64_t *res) 4582623SN/A{ 4593169Sstever@eecs.umich.edu // use the CPU's statically allocated write request and packet objects 4604870Sstever@eecs.umich.edu Request *req = &data_write_req; 4612623SN/A 4622623SN/A if (traceData) { 4632623SN/A traceData->setAddr(addr); 4642623SN/A } 4652623SN/A 4664999Sgblack@eecs.umich.edu //The block size of our peer. 4676227Snate@binkert.org unsigned blockSize = dcachePort.peerBlockSize(); 4684999Sgblack@eecs.umich.edu //The size of the data we're trying to read. 4697520Sgblack@eecs.umich.edu int fullSize = size; 4702623SN/A 4714999Sgblack@eecs.umich.edu //The address of the second part of this access if it needs to be split 4724999Sgblack@eecs.umich.edu //across a cache line boundary. 4737520Sgblack@eecs.umich.edu Addr secondAddr = roundDown(addr + size - 1, blockSize); 4744999Sgblack@eecs.umich.edu 4754999Sgblack@eecs.umich.edu if(secondAddr > addr) 4767520Sgblack@eecs.umich.edu size = secondAddr - addr; 4774999Sgblack@eecs.umich.edu 4784999Sgblack@eecs.umich.edu dcache_latency = 0; 4794999Sgblack@eecs.umich.edu 4804999Sgblack@eecs.umich.edu while(1) { 4817720Sgblack@eecs.umich.edu req->setVirt(0, addr, size, flags, thread->pcState().instAddr()); 4824999Sgblack@eecs.umich.edu 4834999Sgblack@eecs.umich.edu // translate to physical address 4846023Snate@binkert.org Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write); 4854999Sgblack@eecs.umich.edu 4864999Sgblack@eecs.umich.edu // Now do the access. 4874999Sgblack@eecs.umich.edu if (fault == NoFault) { 4884999Sgblack@eecs.umich.edu MemCmd cmd = MemCmd::WriteReq; // default 4894999Sgblack@eecs.umich.edu bool do_access = true; // flag to suppress cache access 4904999Sgblack@eecs.umich.edu 4916102Sgblack@eecs.umich.edu if (req->isLLSC()) { 4924999Sgblack@eecs.umich.edu cmd = MemCmd::StoreCondReq; 4934999Sgblack@eecs.umich.edu do_access = TheISA::handleLockedWrite(thread, req); 4944999Sgblack@eecs.umich.edu } else if (req->isSwap()) { 4954999Sgblack@eecs.umich.edu cmd = MemCmd::SwapReq; 4964999Sgblack@eecs.umich.edu if (req->isCondSwap()) { 4974999Sgblack@eecs.umich.edu assert(res); 4984999Sgblack@eecs.umich.edu req->setExtraData(*res); 4994999Sgblack@eecs.umich.edu } 5004999Sgblack@eecs.umich.edu } 5014999Sgblack@eecs.umich.edu 5026623Sgblack@eecs.umich.edu if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) { 5034999Sgblack@eecs.umich.edu Packet pkt = Packet(req, cmd, Packet::Broadcast); 5047520Sgblack@eecs.umich.edu pkt.dataStatic(data); 5054999Sgblack@eecs.umich.edu 5068105Sgblack@eecs.umich.edu if (req->isMmappedIpr()) { 5074999Sgblack@eecs.umich.edu dcache_latency += 5084999Sgblack@eecs.umich.edu TheISA::handleIprWrite(thread->getTC(), &pkt); 5094999Sgblack@eecs.umich.edu } else { 5104999Sgblack@eecs.umich.edu if (hasPhysMemPort && pkt.getAddr() == physMemAddr) 5114999Sgblack@eecs.umich.edu dcache_latency += physmemPort.sendAtomic(&pkt); 5124999Sgblack@eecs.umich.edu else 5134999Sgblack@eecs.umich.edu dcache_latency += dcachePort.sendAtomic(&pkt); 5144999Sgblack@eecs.umich.edu } 5154999Sgblack@eecs.umich.edu dcache_access = true; 5164999Sgblack@eecs.umich.edu assert(!pkt.isError()); 5174999Sgblack@eecs.umich.edu 5184999Sgblack@eecs.umich.edu if (req->isSwap()) { 5194999Sgblack@eecs.umich.edu assert(res); 5207520Sgblack@eecs.umich.edu memcpy(res, pkt.getPtr<uint8_t>(), fullSize); 5214999Sgblack@eecs.umich.edu } 5224999Sgblack@eecs.umich.edu } 5234999Sgblack@eecs.umich.edu 5244999Sgblack@eecs.umich.edu if (res && !req->isSwap()) { 5254999Sgblack@eecs.umich.edu *res = req->getExtraData(); 5264878Sstever@eecs.umich.edu } 5274040Ssaidi@eecs.umich.edu } 5284040Ssaidi@eecs.umich.edu 5294999Sgblack@eecs.umich.edu //If there's a fault or we don't need to access a second cache line, 5304999Sgblack@eecs.umich.edu //stop now. 5314999Sgblack@eecs.umich.edu if (fault != NoFault || secondAddr <= addr) 5324999Sgblack@eecs.umich.edu { 5336078Sgblack@eecs.umich.edu if (req->isLocked() && fault == NoFault) { 5346078Sgblack@eecs.umich.edu assert(locked); 5356078Sgblack@eecs.umich.edu locked = false; 5366078Sgblack@eecs.umich.edu } 5376739Sgblack@eecs.umich.edu if (fault != NoFault && req->isPrefetch()) { 5386739Sgblack@eecs.umich.edu return NoFault; 5396739Sgblack@eecs.umich.edu } else { 5406739Sgblack@eecs.umich.edu return fault; 5416739Sgblack@eecs.umich.edu } 5423170Sstever@eecs.umich.edu } 5433170Sstever@eecs.umich.edu 5444999Sgblack@eecs.umich.edu /* 5454999Sgblack@eecs.umich.edu * Set up for accessing the second cache line. 5464999Sgblack@eecs.umich.edu */ 5474999Sgblack@eecs.umich.edu 5484999Sgblack@eecs.umich.edu //Move the pointer we're reading into to the correct location. 5497520Sgblack@eecs.umich.edu data += size; 5504999Sgblack@eecs.umich.edu //Adjust the size to get the remaining bytes. 5517520Sgblack@eecs.umich.edu size = addr + fullSize - secondAddr; 5524999Sgblack@eecs.umich.edu //And access the right address. 5534999Sgblack@eecs.umich.edu addr = secondAddr; 5542623SN/A } 5552623SN/A} 5562623SN/A 5572623SN/A 5587520Sgblack@eecs.umich.edutemplate <class T> 5597520Sgblack@eecs.umich.eduFault 5607520Sgblack@eecs.umich.eduAtomicSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res) 5617520Sgblack@eecs.umich.edu{ 5627520Sgblack@eecs.umich.edu uint8_t *dataPtr = (uint8_t *)&data; 5637520Sgblack@eecs.umich.edu if (traceData) 5647520Sgblack@eecs.umich.edu traceData->setData(data); 5657520Sgblack@eecs.umich.edu data = htog(data); 5667520Sgblack@eecs.umich.edu 5677520Sgblack@eecs.umich.edu Fault fault = writeBytes(dataPtr, sizeof(data), addr, flags, res); 5687520Sgblack@eecs.umich.edu if (fault == NoFault && data_write_req.isSwap()) { 5697520Sgblack@eecs.umich.edu *res = gtoh((T)*res); 5707520Sgblack@eecs.umich.edu } 5717520Sgblack@eecs.umich.edu return fault; 5727520Sgblack@eecs.umich.edu} 5737520Sgblack@eecs.umich.edu 5747520Sgblack@eecs.umich.edu 5752623SN/A#ifndef DOXYGEN_SHOULD_SKIP_THIS 5764224Sgblack@eecs.umich.edu 5774224Sgblack@eecs.umich.edutemplate 5784224Sgblack@eecs.umich.eduFault 5794224Sgblack@eecs.umich.eduAtomicSimpleCPU::write(Twin32_t data, Addr addr, 5804224Sgblack@eecs.umich.edu unsigned flags, uint64_t *res); 5814224Sgblack@eecs.umich.edu 5824224Sgblack@eecs.umich.edutemplate 5834224Sgblack@eecs.umich.eduFault 5844224Sgblack@eecs.umich.eduAtomicSimpleCPU::write(Twin64_t data, Addr addr, 5854224Sgblack@eecs.umich.edu unsigned flags, uint64_t *res); 5864224Sgblack@eecs.umich.edu 5872623SN/Atemplate 5882623SN/AFault 5892623SN/AAtomicSimpleCPU::write(uint64_t data, Addr addr, 5902623SN/A unsigned flags, uint64_t *res); 5912623SN/A 5922623SN/Atemplate 5932623SN/AFault 5942623SN/AAtomicSimpleCPU::write(uint32_t data, Addr addr, 5952623SN/A unsigned flags, uint64_t *res); 5962623SN/A 5972623SN/Atemplate 5982623SN/AFault 5992623SN/AAtomicSimpleCPU::write(uint16_t data, Addr addr, 6002623SN/A unsigned flags, uint64_t *res); 6012623SN/A 6022623SN/Atemplate 6032623SN/AFault 6042623SN/AAtomicSimpleCPU::write(uint8_t data, Addr addr, 6052623SN/A unsigned flags, uint64_t *res); 6062623SN/A 6072623SN/A#endif //DOXYGEN_SHOULD_SKIP_THIS 6082623SN/A 6092623SN/Atemplate<> 6102623SN/AFault 6112623SN/AAtomicSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res) 6122623SN/A{ 6132623SN/A return write(*(uint64_t*)&data, addr, flags, res); 6142623SN/A} 6152623SN/A 6162623SN/Atemplate<> 6172623SN/AFault 6182623SN/AAtomicSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res) 6192623SN/A{ 6202623SN/A return write(*(uint32_t*)&data, addr, flags, res); 6212623SN/A} 6222623SN/A 6232623SN/A 6242623SN/Atemplate<> 6252623SN/AFault 6262623SN/AAtomicSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res) 6272623SN/A{ 6282623SN/A return write((uint32_t)data, addr, flags, res); 6292623SN/A} 6302623SN/A 6312623SN/A 6322623SN/Avoid 6332623SN/AAtomicSimpleCPU::tick() 6342623SN/A{ 6354940Snate@binkert.org DPRINTF(SimpleCPU, "Tick\n"); 6364940Snate@binkert.org 6375487Snate@binkert.org Tick latency = 0; 6382623SN/A 6396078Sgblack@eecs.umich.edu for (int i = 0; i < width || locked; ++i) { 6402623SN/A numCycles++; 6412623SN/A 6423387Sgblack@eecs.umich.edu if (!curStaticInst || !curStaticInst->isDelayedCommit()) 6433387Sgblack@eecs.umich.edu checkForInterrupts(); 6442626SN/A 6455348Ssaidi@eecs.umich.edu checkPcEventQueue(); 6468143SAli.Saidi@ARM.com // We must have just got suspended by a PC event 6478143SAli.Saidi@ARM.com if (_status == Idle) 6488143SAli.Saidi@ARM.com return; 6495348Ssaidi@eecs.umich.edu 6505669Sgblack@eecs.umich.edu Fault fault = NoFault; 6515669Sgblack@eecs.umich.edu 6527720Sgblack@eecs.umich.edu TheISA::PCState pcState = thread->pcState(); 6537720Sgblack@eecs.umich.edu 6547720Sgblack@eecs.umich.edu bool needToFetch = !isRomMicroPC(pcState.microPC()) && 6557720Sgblack@eecs.umich.edu !curMacroStaticInst; 6567720Sgblack@eecs.umich.edu if (needToFetch) { 6575894Sgblack@eecs.umich.edu setupFetchRequest(&ifetch_req); 6586023Snate@binkert.org fault = thread->itb->translateAtomic(&ifetch_req, tc, 6596023Snate@binkert.org BaseTLB::Execute); 6605894Sgblack@eecs.umich.edu } 6612623SN/A 6622623SN/A if (fault == NoFault) { 6634182Sgblack@eecs.umich.edu Tick icache_latency = 0; 6644182Sgblack@eecs.umich.edu bool icache_access = false; 6654182Sgblack@eecs.umich.edu dcache_access = false; // assume no dcache access 6662662Sstever@eecs.umich.edu 6677720Sgblack@eecs.umich.edu if (needToFetch) { 6685694Sgblack@eecs.umich.edu // This is commented out because the predecoder would act like 6695694Sgblack@eecs.umich.edu // a tiny cache otherwise. It wouldn't be flushed when needed 6705694Sgblack@eecs.umich.edu // like the I cache. It should be flushed, and when that works 6715694Sgblack@eecs.umich.edu // this code should be uncommented. 6725669Sgblack@eecs.umich.edu //Fetch more instruction memory if necessary 6735669Sgblack@eecs.umich.edu //if(predecoder.needMoreBytes()) 6745669Sgblack@eecs.umich.edu //{ 6755669Sgblack@eecs.umich.edu icache_access = true; 6765669Sgblack@eecs.umich.edu Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq, 6775669Sgblack@eecs.umich.edu Packet::Broadcast); 6785669Sgblack@eecs.umich.edu ifetch_pkt.dataStatic(&inst); 6792623SN/A 6805669Sgblack@eecs.umich.edu if (hasPhysMemPort && ifetch_pkt.getAddr() == physMemAddr) 6815669Sgblack@eecs.umich.edu icache_latency = physmemPort.sendAtomic(&ifetch_pkt); 6825669Sgblack@eecs.umich.edu else 6835669Sgblack@eecs.umich.edu icache_latency = icachePort.sendAtomic(&ifetch_pkt); 6844968Sacolyte@umich.edu 6855669Sgblack@eecs.umich.edu assert(!ifetch_pkt.isError()); 6864968Sacolyte@umich.edu 6875669Sgblack@eecs.umich.edu // ifetch_req is initialized to read the instruction directly 6885669Sgblack@eecs.umich.edu // into the CPU object's inst field. 6895669Sgblack@eecs.umich.edu //} 6905669Sgblack@eecs.umich.edu } 6914182Sgblack@eecs.umich.edu 6922623SN/A preExecute(); 6933814Ssaidi@eecs.umich.edu 6945001Sgblack@eecs.umich.edu if (curStaticInst) { 6954182Sgblack@eecs.umich.edu fault = curStaticInst->execute(this, traceData); 6964998Sgblack@eecs.umich.edu 6974998Sgblack@eecs.umich.edu // keep an instruction count 6984998Sgblack@eecs.umich.edu if (fault == NoFault) 6994998Sgblack@eecs.umich.edu countInst(); 7007655Sali.saidi@arm.com else if (traceData && !DTRACE(ExecFaulting)) { 7015001Sgblack@eecs.umich.edu delete traceData; 7025001Sgblack@eecs.umich.edu traceData = NULL; 7035001Sgblack@eecs.umich.edu } 7044998Sgblack@eecs.umich.edu 7054182Sgblack@eecs.umich.edu postExecute(); 7064182Sgblack@eecs.umich.edu } 7072623SN/A 7083814Ssaidi@eecs.umich.edu // @todo remove me after debugging with legion done 7094539Sgblack@eecs.umich.edu if (curStaticInst && (!curStaticInst->isMicroop() || 7104539Sgblack@eecs.umich.edu curStaticInst->isFirstMicroop())) 7113814Ssaidi@eecs.umich.edu instCnt++; 7123814Ssaidi@eecs.umich.edu 7135487Snate@binkert.org Tick stall_ticks = 0; 7145487Snate@binkert.org if (simulate_inst_stalls && icache_access) 7155487Snate@binkert.org stall_ticks += icache_latency; 7165487Snate@binkert.org 7175487Snate@binkert.org if (simulate_data_stalls && dcache_access) 7185487Snate@binkert.org stall_ticks += dcache_latency; 7195487Snate@binkert.org 7205487Snate@binkert.org if (stall_ticks) { 7215487Snate@binkert.org Tick stall_cycles = stall_ticks / ticks(1); 7225487Snate@binkert.org Tick aligned_stall_ticks = ticks(stall_cycles); 7235487Snate@binkert.org 7245487Snate@binkert.org if (aligned_stall_ticks < stall_ticks) 7255487Snate@binkert.org aligned_stall_ticks += 1; 7265487Snate@binkert.org 7275487Snate@binkert.org latency += aligned_stall_ticks; 7282623SN/A } 7292623SN/A 7302623SN/A } 7314377Sgblack@eecs.umich.edu if(fault != NoFault || !stayAtPC) 7324182Sgblack@eecs.umich.edu advancePC(fault); 7332623SN/A } 7342623SN/A 7355487Snate@binkert.org // instruction takes at least one cycle 7365487Snate@binkert.org if (latency < ticks(1)) 7375487Snate@binkert.org latency = ticks(1); 7385487Snate@binkert.org 7392626SN/A if (_status != Idle) 7407823Ssteve.reinhardt@amd.com schedule(tickEvent, curTick() + latency); 7412623SN/A} 7422623SN/A 7432623SN/A 7445315Sstever@gmail.comvoid 7455315Sstever@gmail.comAtomicSimpleCPU::printAddr(Addr a) 7465315Sstever@gmail.com{ 7475315Sstever@gmail.com dcachePort.printAddr(a); 7485315Sstever@gmail.com} 7495315Sstever@gmail.com 7505315Sstever@gmail.com 7512623SN/A//////////////////////////////////////////////////////////////////////// 7522623SN/A// 7532623SN/A// AtomicSimpleCPU Simulation Object 7542623SN/A// 7554762Snate@binkert.orgAtomicSimpleCPU * 7564762Snate@binkert.orgAtomicSimpleCPUParams::create() 7572623SN/A{ 7585529Snate@binkert.org numThreads = 1; 7595529Snate@binkert.org#if !FULL_SYSTEM 7604762Snate@binkert.org if (workload.size() != 1) 7614762Snate@binkert.org panic("only one workload allowed"); 7622623SN/A#endif 7635529Snate@binkert.org return new AtomicSimpleCPU(this); 7642623SN/A} 765