atomic.cc revision 5914
12623SN/A/* 22623SN/A * Copyright (c) 2002-2005 The Regents of The University of Michigan 32623SN/A * All rights reserved. 42623SN/A * 52623SN/A * Redistribution and use in source and binary forms, with or without 62623SN/A * modification, are permitted provided that the following conditions are 72623SN/A * met: redistributions of source code must retain the above copyright 82623SN/A * notice, this list of conditions and the following disclaimer; 92623SN/A * redistributions in binary form must reproduce the above copyright 102623SN/A * notice, this list of conditions and the following disclaimer in the 112623SN/A * documentation and/or other materials provided with the distribution; 122623SN/A * neither the name of the copyright holders nor the names of its 132623SN/A * contributors may be used to endorse or promote products derived from 142623SN/A * this software without specific prior written permission. 152623SN/A * 162623SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 172623SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 182623SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 192623SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 202623SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 212623SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 222623SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 232623SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 242623SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 252623SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 262623SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 272665Ssaidi@eecs.umich.edu * 282665Ssaidi@eecs.umich.edu * Authors: Steve Reinhardt 292623SN/A */ 302623SN/A 313170Sstever@eecs.umich.edu#include "arch/locked_mem.hh" 323806Ssaidi@eecs.umich.edu#include "arch/mmaped_ipr.hh" 332623SN/A#include "arch/utility.hh" 344040Ssaidi@eecs.umich.edu#include "base/bigint.hh" 352623SN/A#include "cpu/exetrace.hh" 362623SN/A#include "cpu/simple/atomic.hh" 373348Sbinkertn@umich.edu#include "mem/packet.hh" 383348Sbinkertn@umich.edu#include "mem/packet_access.hh" 394762Snate@binkert.org#include "params/AtomicSimpleCPU.hh" 402901Ssaidi@eecs.umich.edu#include "sim/system.hh" 412623SN/A 422623SN/Ausing namespace std; 432623SN/Ausing namespace TheISA; 442623SN/A 452623SN/AAtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c) 465606Snate@binkert.org : Event(CPU_Tick_Pri), cpu(c) 472623SN/A{ 482623SN/A} 492623SN/A 502623SN/A 512623SN/Avoid 522623SN/AAtomicSimpleCPU::TickEvent::process() 532623SN/A{ 542623SN/A cpu->tick(); 552623SN/A} 562623SN/A 572623SN/Aconst char * 585336Shines@cs.fsu.eduAtomicSimpleCPU::TickEvent::description() const 592623SN/A{ 604873Sstever@eecs.umich.edu return "AtomicSimpleCPU tick"; 612623SN/A} 622623SN/A 632856Srdreslin@umich.eduPort * 642856Srdreslin@umich.eduAtomicSimpleCPU::getPort(const std::string &if_name, int idx) 652856Srdreslin@umich.edu{ 662856Srdreslin@umich.edu if (if_name == "dcache_port") 672856Srdreslin@umich.edu return &dcachePort; 682856Srdreslin@umich.edu else if (if_name == "icache_port") 692856Srdreslin@umich.edu return &icachePort; 704968Sacolyte@umich.edu else if (if_name == "physmem_port") { 714968Sacolyte@umich.edu hasPhysMemPort = true; 724968Sacolyte@umich.edu return &physmemPort; 734968Sacolyte@umich.edu } 742856Srdreslin@umich.edu else 752856Srdreslin@umich.edu panic("No Such Port\n"); 762856Srdreslin@umich.edu} 772623SN/A 782623SN/Avoid 792623SN/AAtomicSimpleCPU::init() 802623SN/A{ 812623SN/A BaseCPU::init(); 822623SN/A#if FULL_SYSTEM 832680Sktlim@umich.edu for (int i = 0; i < threadContexts.size(); ++i) { 842680Sktlim@umich.edu ThreadContext *tc = threadContexts[i]; 852623SN/A 862623SN/A // initialize CPU, including PC 875714Shsul@eecs.umich.edu TheISA::initCPU(tc, tc->contextId()); 882623SN/A } 892623SN/A#endif 904968Sacolyte@umich.edu if (hasPhysMemPort) { 914968Sacolyte@umich.edu bool snoop = false; 924968Sacolyte@umich.edu AddrRangeList pmAddrList; 934968Sacolyte@umich.edu physmemPort.getPeerAddressRanges(pmAddrList, snoop); 944968Sacolyte@umich.edu physMemAddr = *pmAddrList.begin(); 954968Sacolyte@umich.edu } 965714Shsul@eecs.umich.edu // Atomic doesn't do MT right now, so contextId == threadId 975712Shsul@eecs.umich.edu ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 985712Shsul@eecs.umich.edu data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 995712Shsul@eecs.umich.edu data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 1002623SN/A} 1012623SN/A 1022623SN/Abool 1033349Sbinkertn@umich.eduAtomicSimpleCPU::CpuPort::recvTiming(PacketPtr pkt) 1042623SN/A{ 1053184Srdreslin@umich.edu panic("AtomicSimpleCPU doesn't expect recvTiming callback!"); 1062623SN/A return true; 1072623SN/A} 1082623SN/A 1092623SN/ATick 1103349Sbinkertn@umich.eduAtomicSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt) 1112623SN/A{ 1123310Srdreslin@umich.edu //Snooping a coherence request, just return 1133649Srdreslin@umich.edu return 0; 1142623SN/A} 1152623SN/A 1162623SN/Avoid 1173349Sbinkertn@umich.eduAtomicSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt) 1182623SN/A{ 1193184Srdreslin@umich.edu //No internal storage to update, just return 1203184Srdreslin@umich.edu return; 1212623SN/A} 1222623SN/A 1232623SN/Avoid 1242623SN/AAtomicSimpleCPU::CpuPort::recvStatusChange(Status status) 1252623SN/A{ 1263647Srdreslin@umich.edu if (status == RangeChange) { 1273647Srdreslin@umich.edu if (!snoopRangeSent) { 1283647Srdreslin@umich.edu snoopRangeSent = true; 1293647Srdreslin@umich.edu sendStatusChange(Port::RangeChange); 1303647Srdreslin@umich.edu } 1312626SN/A return; 1323647Srdreslin@umich.edu } 1332626SN/A 1342623SN/A panic("AtomicSimpleCPU doesn't expect recvStatusChange callback!"); 1352623SN/A} 1362623SN/A 1372657Ssaidi@eecs.umich.eduvoid 1382623SN/AAtomicSimpleCPU::CpuPort::recvRetry() 1392623SN/A{ 1402623SN/A panic("AtomicSimpleCPU doesn't expect recvRetry callback!"); 1412623SN/A} 1422623SN/A 1434192Sktlim@umich.eduvoid 1444192Sktlim@umich.eduAtomicSimpleCPU::DcachePort::setPeer(Port *port) 1454192Sktlim@umich.edu{ 1464192Sktlim@umich.edu Port::setPeer(port); 1474192Sktlim@umich.edu 1484192Sktlim@umich.edu#if FULL_SYSTEM 1494192Sktlim@umich.edu // Update the ThreadContext's memory ports (Functional/Virtual 1504192Sktlim@umich.edu // Ports) 1515497Ssaidi@eecs.umich.edu cpu->tcBase()->connectMemPorts(cpu->tcBase()); 1524192Sktlim@umich.edu#endif 1534192Sktlim@umich.edu} 1542623SN/A 1555529Snate@binkert.orgAtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 1565487Snate@binkert.org : BaseSimpleCPU(p), tickEvent(this), width(p->width), 1575487Snate@binkert.org simulate_data_stalls(p->simulate_data_stalls), 1585487Snate@binkert.org simulate_inst_stalls(p->simulate_inst_stalls), 1594968Sacolyte@umich.edu icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this), 1604968Sacolyte@umich.edu physmemPort(name() + "-iport", this), hasPhysMemPort(false) 1612623SN/A{ 1622623SN/A _status = Idle; 1632623SN/A 1643647Srdreslin@umich.edu icachePort.snoopRangeSent = false; 1653647Srdreslin@umich.edu dcachePort.snoopRangeSent = false; 1663647Srdreslin@umich.edu 1672623SN/A} 1682623SN/A 1692623SN/A 1702623SN/AAtomicSimpleCPU::~AtomicSimpleCPU() 1712623SN/A{ 1722623SN/A} 1732623SN/A 1742623SN/Avoid 1752623SN/AAtomicSimpleCPU::serialize(ostream &os) 1762623SN/A{ 1772915Sktlim@umich.edu SimObject::State so_state = SimObject::getState(); 1782915Sktlim@umich.edu SERIALIZE_ENUM(so_state); 1793145Shsul@eecs.umich.edu BaseSimpleCPU::serialize(os); 1802623SN/A nameOut(os, csprintf("%s.tickEvent", name())); 1812623SN/A tickEvent.serialize(os); 1822623SN/A} 1832623SN/A 1842623SN/Avoid 1852623SN/AAtomicSimpleCPU::unserialize(Checkpoint *cp, const string §ion) 1862623SN/A{ 1872915Sktlim@umich.edu SimObject::State so_state; 1882915Sktlim@umich.edu UNSERIALIZE_ENUM(so_state); 1893145Shsul@eecs.umich.edu BaseSimpleCPU::unserialize(cp, section); 1902915Sktlim@umich.edu tickEvent.unserialize(cp, csprintf("%s.tickEvent", section)); 1912915Sktlim@umich.edu} 1922915Sktlim@umich.edu 1932915Sktlim@umich.eduvoid 1942915Sktlim@umich.eduAtomicSimpleCPU::resume() 1952915Sktlim@umich.edu{ 1965220Ssaidi@eecs.umich.edu if (_status == Idle || _status == SwitchedOut) 1975220Ssaidi@eecs.umich.edu return; 1985220Ssaidi@eecs.umich.edu 1994940Snate@binkert.org DPRINTF(SimpleCPU, "Resume\n"); 2005220Ssaidi@eecs.umich.edu assert(system->getMemoryMode() == Enums::atomic); 2013324Shsul@eecs.umich.edu 2025220Ssaidi@eecs.umich.edu changeState(SimObject::Running); 2035220Ssaidi@eecs.umich.edu if (thread->status() == ThreadContext::Active) { 2045606Snate@binkert.org if (!tickEvent.scheduled()) 2055606Snate@binkert.org schedule(tickEvent, nextCycle()); 2062915Sktlim@umich.edu } 2072623SN/A} 2082623SN/A 2092623SN/Avoid 2102798Sktlim@umich.eduAtomicSimpleCPU::switchOut() 2112623SN/A{ 2125496Ssaidi@eecs.umich.edu assert(_status == Running || _status == Idle); 2132798Sktlim@umich.edu _status = SwitchedOut; 2142623SN/A 2152798Sktlim@umich.edu tickEvent.squash(); 2162623SN/A} 2172623SN/A 2182623SN/A 2192623SN/Avoid 2202623SN/AAtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 2212623SN/A{ 2224192Sktlim@umich.edu BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort); 2232623SN/A 2242623SN/A assert(!tickEvent.scheduled()); 2252623SN/A 2262680Sktlim@umich.edu // if any of this CPU's ThreadContexts are active, mark the CPU as 2272623SN/A // running and schedule its tick event. 2282680Sktlim@umich.edu for (int i = 0; i < threadContexts.size(); ++i) { 2292680Sktlim@umich.edu ThreadContext *tc = threadContexts[i]; 2302680Sktlim@umich.edu if (tc->status() == ThreadContext::Active && _status != Running) { 2312623SN/A _status = Running; 2325606Snate@binkert.org schedule(tickEvent, nextCycle()); 2332623SN/A break; 2342623SN/A } 2352623SN/A } 2363512Sktlim@umich.edu if (_status != Running) { 2373512Sktlim@umich.edu _status = Idle; 2383512Sktlim@umich.edu } 2395169Ssaidi@eecs.umich.edu assert(threadContexts.size() == 1); 2405712Shsul@eecs.umich.edu ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 2415712Shsul@eecs.umich.edu data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 2425712Shsul@eecs.umich.edu data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 2432623SN/A} 2442623SN/A 2452623SN/A 2462623SN/Avoid 2472623SN/AAtomicSimpleCPU::activateContext(int thread_num, int delay) 2482623SN/A{ 2494940Snate@binkert.org DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); 2504940Snate@binkert.org 2512623SN/A assert(thread_num == 0); 2522683Sktlim@umich.edu assert(thread); 2532623SN/A 2542623SN/A assert(_status == Idle); 2552623SN/A assert(!tickEvent.scheduled()); 2562623SN/A 2572623SN/A notIdleFraction++; 2585101Ssaidi@eecs.umich.edu numCycles += tickToCycles(thread->lastActivate - thread->lastSuspend); 2593686Sktlim@umich.edu 2603430Sgblack@eecs.umich.edu //Make sure ticks are still on multiples of cycles 2615606Snate@binkert.org schedule(tickEvent, nextCycle(curTick + ticks(delay))); 2622623SN/A _status = Running; 2632623SN/A} 2642623SN/A 2652623SN/A 2662623SN/Avoid 2672623SN/AAtomicSimpleCPU::suspendContext(int thread_num) 2682623SN/A{ 2694940Snate@binkert.org DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 2704940Snate@binkert.org 2712623SN/A assert(thread_num == 0); 2722683Sktlim@umich.edu assert(thread); 2732623SN/A 2742623SN/A assert(_status == Running); 2752626SN/A 2762626SN/A // tick event may not be scheduled if this gets called from inside 2772626SN/A // an instruction's execution, e.g. "quiesce" 2782626SN/A if (tickEvent.scheduled()) 2795606Snate@binkert.org deschedule(tickEvent); 2802623SN/A 2812623SN/A notIdleFraction--; 2822623SN/A _status = Idle; 2832623SN/A} 2842623SN/A 2852623SN/A 2862623SN/Atemplate <class T> 2872623SN/AFault 2882623SN/AAtomicSimpleCPU::read(Addr addr, T &data, unsigned flags) 2892623SN/A{ 2903169Sstever@eecs.umich.edu // use the CPU's statically allocated read request and packet objects 2914870Sstever@eecs.umich.edu Request *req = &data_read_req; 2922623SN/A 2932623SN/A if (traceData) { 2942623SN/A traceData->setAddr(addr); 2952623SN/A } 2962623SN/A 2974999Sgblack@eecs.umich.edu //The block size of our peer. 2984999Sgblack@eecs.umich.edu int blockSize = dcachePort.peerBlockSize(); 2994999Sgblack@eecs.umich.edu //The size of the data we're trying to read. 3004999Sgblack@eecs.umich.edu int dataSize = sizeof(T); 3012623SN/A 3024999Sgblack@eecs.umich.edu uint8_t * dataPtr = (uint8_t *)&data; 3032623SN/A 3044999Sgblack@eecs.umich.edu //The address of the second part of this access if it needs to be split 3054999Sgblack@eecs.umich.edu //across a cache line boundary. 3064999Sgblack@eecs.umich.edu Addr secondAddr = roundDown(addr + dataSize - 1, blockSize); 3074999Sgblack@eecs.umich.edu 3084999Sgblack@eecs.umich.edu if(secondAddr > addr) 3094999Sgblack@eecs.umich.edu dataSize = secondAddr - addr; 3104999Sgblack@eecs.umich.edu 3114999Sgblack@eecs.umich.edu dcache_latency = 0; 3124999Sgblack@eecs.umich.edu 3134999Sgblack@eecs.umich.edu while(1) { 3144999Sgblack@eecs.umich.edu req->setVirt(0, addr, dataSize, flags, thread->readPC()); 3154999Sgblack@eecs.umich.edu 3164999Sgblack@eecs.umich.edu // translate to physical address 3175891Sgblack@eecs.umich.edu Fault fault = thread->dtb->translateAtomic(req, tc, false); 3184999Sgblack@eecs.umich.edu 3194999Sgblack@eecs.umich.edu // Now do the access. 3204999Sgblack@eecs.umich.edu if (fault == NoFault) { 3214999Sgblack@eecs.umich.edu Packet pkt = Packet(req, 3224999Sgblack@eecs.umich.edu req->isLocked() ? MemCmd::LoadLockedReq : MemCmd::ReadReq, 3234999Sgblack@eecs.umich.edu Packet::Broadcast); 3244999Sgblack@eecs.umich.edu pkt.dataStatic(dataPtr); 3254999Sgblack@eecs.umich.edu 3264999Sgblack@eecs.umich.edu if (req->isMmapedIpr()) 3274999Sgblack@eecs.umich.edu dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 3284999Sgblack@eecs.umich.edu else { 3294999Sgblack@eecs.umich.edu if (hasPhysMemPort && pkt.getAddr() == physMemAddr) 3304999Sgblack@eecs.umich.edu dcache_latency += physmemPort.sendAtomic(&pkt); 3314999Sgblack@eecs.umich.edu else 3324999Sgblack@eecs.umich.edu dcache_latency += dcachePort.sendAtomic(&pkt); 3334999Sgblack@eecs.umich.edu } 3344999Sgblack@eecs.umich.edu dcache_access = true; 3355012Sgblack@eecs.umich.edu 3364999Sgblack@eecs.umich.edu assert(!pkt.isError()); 3374999Sgblack@eecs.umich.edu 3384999Sgblack@eecs.umich.edu if (req->isLocked()) { 3394999Sgblack@eecs.umich.edu TheISA::handleLockedRead(thread, req); 3404999Sgblack@eecs.umich.edu } 3414968Sacolyte@umich.edu } 3424986Ssaidi@eecs.umich.edu 3434999Sgblack@eecs.umich.edu // This will need a new way to tell if it has a dcache attached. 3444999Sgblack@eecs.umich.edu if (req->isUncacheable()) 3454999Sgblack@eecs.umich.edu recordEvent("Uncached Read"); 3464762Snate@binkert.org 3474999Sgblack@eecs.umich.edu //If there's a fault, return it 3484999Sgblack@eecs.umich.edu if (fault != NoFault) 3494999Sgblack@eecs.umich.edu return fault; 3504999Sgblack@eecs.umich.edu //If we don't need to access a second cache line, stop now. 3514999Sgblack@eecs.umich.edu if (secondAddr <= addr) 3524999Sgblack@eecs.umich.edu { 3534999Sgblack@eecs.umich.edu data = gtoh(data); 3545408Sgblack@eecs.umich.edu if (traceData) { 3555408Sgblack@eecs.umich.edu traceData->setData(data); 3565408Sgblack@eecs.umich.edu } 3574999Sgblack@eecs.umich.edu return fault; 3584968Sacolyte@umich.edu } 3593170Sstever@eecs.umich.edu 3604999Sgblack@eecs.umich.edu /* 3614999Sgblack@eecs.umich.edu * Set up for accessing the second cache line. 3624999Sgblack@eecs.umich.edu */ 3634999Sgblack@eecs.umich.edu 3644999Sgblack@eecs.umich.edu //Move the pointer we're reading into to the correct location. 3654999Sgblack@eecs.umich.edu dataPtr += dataSize; 3664999Sgblack@eecs.umich.edu //Adjust the size to get the remaining bytes. 3674999Sgblack@eecs.umich.edu dataSize = addr + sizeof(T) - secondAddr; 3684999Sgblack@eecs.umich.edu //And access the right address. 3694999Sgblack@eecs.umich.edu addr = secondAddr; 3702623SN/A } 3712623SN/A} 3722623SN/A 3732623SN/A#ifndef DOXYGEN_SHOULD_SKIP_THIS 3742623SN/A 3752623SN/Atemplate 3762623SN/AFault 3774115Ssaidi@eecs.umich.eduAtomicSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags); 3784115Ssaidi@eecs.umich.edu 3794115Ssaidi@eecs.umich.edutemplate 3804115Ssaidi@eecs.umich.eduFault 3814040Ssaidi@eecs.umich.eduAtomicSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags); 3824040Ssaidi@eecs.umich.edu 3834040Ssaidi@eecs.umich.edutemplate 3844040Ssaidi@eecs.umich.eduFault 3852623SN/AAtomicSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags); 3862623SN/A 3872623SN/Atemplate 3882623SN/AFault 3892623SN/AAtomicSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags); 3902623SN/A 3912623SN/Atemplate 3922623SN/AFault 3932623SN/AAtomicSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags); 3942623SN/A 3952623SN/Atemplate 3962623SN/AFault 3972623SN/AAtomicSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags); 3982623SN/A 3992623SN/A#endif //DOXYGEN_SHOULD_SKIP_THIS 4002623SN/A 4012623SN/Atemplate<> 4022623SN/AFault 4032623SN/AAtomicSimpleCPU::read(Addr addr, double &data, unsigned flags) 4042623SN/A{ 4052623SN/A return read(addr, *(uint64_t*)&data, flags); 4062623SN/A} 4072623SN/A 4082623SN/Atemplate<> 4092623SN/AFault 4102623SN/AAtomicSimpleCPU::read(Addr addr, float &data, unsigned flags) 4112623SN/A{ 4122623SN/A return read(addr, *(uint32_t*)&data, flags); 4132623SN/A} 4142623SN/A 4152623SN/A 4162623SN/Atemplate<> 4172623SN/AFault 4182623SN/AAtomicSimpleCPU::read(Addr addr, int32_t &data, unsigned flags) 4192623SN/A{ 4202623SN/A return read(addr, (uint32_t&)data, flags); 4212623SN/A} 4222623SN/A 4232623SN/A 4242623SN/Atemplate <class T> 4252623SN/AFault 4262623SN/AAtomicSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res) 4272623SN/A{ 4283169Sstever@eecs.umich.edu // use the CPU's statically allocated write request and packet objects 4294870Sstever@eecs.umich.edu Request *req = &data_write_req; 4302623SN/A 4312623SN/A if (traceData) { 4322623SN/A traceData->setAddr(addr); 4332623SN/A } 4342623SN/A 4354999Sgblack@eecs.umich.edu //The block size of our peer. 4364999Sgblack@eecs.umich.edu int blockSize = dcachePort.peerBlockSize(); 4374999Sgblack@eecs.umich.edu //The size of the data we're trying to read. 4384999Sgblack@eecs.umich.edu int dataSize = sizeof(T); 4392623SN/A 4404999Sgblack@eecs.umich.edu uint8_t * dataPtr = (uint8_t *)&data; 4412623SN/A 4424999Sgblack@eecs.umich.edu //The address of the second part of this access if it needs to be split 4434999Sgblack@eecs.umich.edu //across a cache line boundary. 4444999Sgblack@eecs.umich.edu Addr secondAddr = roundDown(addr + dataSize - 1, blockSize); 4454999Sgblack@eecs.umich.edu 4464999Sgblack@eecs.umich.edu if(secondAddr > addr) 4474999Sgblack@eecs.umich.edu dataSize = secondAddr - addr; 4484999Sgblack@eecs.umich.edu 4494999Sgblack@eecs.umich.edu dcache_latency = 0; 4504999Sgblack@eecs.umich.edu 4514999Sgblack@eecs.umich.edu while(1) { 4524999Sgblack@eecs.umich.edu req->setVirt(0, addr, dataSize, flags, thread->readPC()); 4534999Sgblack@eecs.umich.edu 4544999Sgblack@eecs.umich.edu // translate to physical address 4555891Sgblack@eecs.umich.edu Fault fault = thread->dtb->translateAtomic(req, tc, true); 4564999Sgblack@eecs.umich.edu 4574999Sgblack@eecs.umich.edu // Now do the access. 4584999Sgblack@eecs.umich.edu if (fault == NoFault) { 4594999Sgblack@eecs.umich.edu MemCmd cmd = MemCmd::WriteReq; // default 4604999Sgblack@eecs.umich.edu bool do_access = true; // flag to suppress cache access 4614999Sgblack@eecs.umich.edu 4624999Sgblack@eecs.umich.edu if (req->isLocked()) { 4634999Sgblack@eecs.umich.edu cmd = MemCmd::StoreCondReq; 4644999Sgblack@eecs.umich.edu do_access = TheISA::handleLockedWrite(thread, req); 4654999Sgblack@eecs.umich.edu } else if (req->isSwap()) { 4664999Sgblack@eecs.umich.edu cmd = MemCmd::SwapReq; 4674999Sgblack@eecs.umich.edu if (req->isCondSwap()) { 4684999Sgblack@eecs.umich.edu assert(res); 4694999Sgblack@eecs.umich.edu req->setExtraData(*res); 4704999Sgblack@eecs.umich.edu } 4714999Sgblack@eecs.umich.edu } 4724999Sgblack@eecs.umich.edu 4734999Sgblack@eecs.umich.edu if (do_access) { 4744999Sgblack@eecs.umich.edu Packet pkt = Packet(req, cmd, Packet::Broadcast); 4754999Sgblack@eecs.umich.edu pkt.dataStatic(dataPtr); 4764999Sgblack@eecs.umich.edu 4774999Sgblack@eecs.umich.edu if (req->isMmapedIpr()) { 4784999Sgblack@eecs.umich.edu dcache_latency += 4794999Sgblack@eecs.umich.edu TheISA::handleIprWrite(thread->getTC(), &pkt); 4804999Sgblack@eecs.umich.edu } else { 4814999Sgblack@eecs.umich.edu //XXX This needs to be outside of the loop in order to 4824999Sgblack@eecs.umich.edu //work properly for cache line boundary crossing 4834999Sgblack@eecs.umich.edu //accesses in transendian simulations. 4844999Sgblack@eecs.umich.edu data = htog(data); 4854999Sgblack@eecs.umich.edu if (hasPhysMemPort && pkt.getAddr() == physMemAddr) 4864999Sgblack@eecs.umich.edu dcache_latency += physmemPort.sendAtomic(&pkt); 4874999Sgblack@eecs.umich.edu else 4884999Sgblack@eecs.umich.edu dcache_latency += dcachePort.sendAtomic(&pkt); 4894999Sgblack@eecs.umich.edu } 4904999Sgblack@eecs.umich.edu dcache_access = true; 4914999Sgblack@eecs.umich.edu assert(!pkt.isError()); 4924999Sgblack@eecs.umich.edu 4934999Sgblack@eecs.umich.edu if (req->isSwap()) { 4944999Sgblack@eecs.umich.edu assert(res); 4954999Sgblack@eecs.umich.edu *res = pkt.get<T>(); 4964999Sgblack@eecs.umich.edu } 4974999Sgblack@eecs.umich.edu } 4984999Sgblack@eecs.umich.edu 4994999Sgblack@eecs.umich.edu if (res && !req->isSwap()) { 5004999Sgblack@eecs.umich.edu *res = req->getExtraData(); 5014878Sstever@eecs.umich.edu } 5024040Ssaidi@eecs.umich.edu } 5034040Ssaidi@eecs.umich.edu 5044999Sgblack@eecs.umich.edu // This will need a new way to tell if it's hooked up to a cache or not. 5054999Sgblack@eecs.umich.edu if (req->isUncacheable()) 5064999Sgblack@eecs.umich.edu recordEvent("Uncached Write"); 5072631SN/A 5084999Sgblack@eecs.umich.edu //If there's a fault or we don't need to access a second cache line, 5094999Sgblack@eecs.umich.edu //stop now. 5104999Sgblack@eecs.umich.edu if (fault != NoFault || secondAddr <= addr) 5114999Sgblack@eecs.umich.edu { 5124999Sgblack@eecs.umich.edu // If the write needs to have a fault on the access, consider 5134999Sgblack@eecs.umich.edu // calling changeStatus() and changing it to "bad addr write" 5144999Sgblack@eecs.umich.edu // or something. 5155408Sgblack@eecs.umich.edu if (traceData) { 5165408Sgblack@eecs.umich.edu traceData->setData(data); 5175408Sgblack@eecs.umich.edu } 5184999Sgblack@eecs.umich.edu return fault; 5193170Sstever@eecs.umich.edu } 5203170Sstever@eecs.umich.edu 5214999Sgblack@eecs.umich.edu /* 5224999Sgblack@eecs.umich.edu * Set up for accessing the second cache line. 5234999Sgblack@eecs.umich.edu */ 5244999Sgblack@eecs.umich.edu 5254999Sgblack@eecs.umich.edu //Move the pointer we're reading into to the correct location. 5264999Sgblack@eecs.umich.edu dataPtr += dataSize; 5274999Sgblack@eecs.umich.edu //Adjust the size to get the remaining bytes. 5284999Sgblack@eecs.umich.edu dataSize = addr + sizeof(T) - secondAddr; 5294999Sgblack@eecs.umich.edu //And access the right address. 5304999Sgblack@eecs.umich.edu addr = secondAddr; 5312623SN/A } 5322623SN/A} 5332623SN/A 5342623SN/A 5352623SN/A#ifndef DOXYGEN_SHOULD_SKIP_THIS 5364224Sgblack@eecs.umich.edu 5374224Sgblack@eecs.umich.edutemplate 5384224Sgblack@eecs.umich.eduFault 5394224Sgblack@eecs.umich.eduAtomicSimpleCPU::write(Twin32_t data, Addr addr, 5404224Sgblack@eecs.umich.edu unsigned flags, uint64_t *res); 5414224Sgblack@eecs.umich.edu 5424224Sgblack@eecs.umich.edutemplate 5434224Sgblack@eecs.umich.eduFault 5444224Sgblack@eecs.umich.eduAtomicSimpleCPU::write(Twin64_t data, Addr addr, 5454224Sgblack@eecs.umich.edu unsigned flags, uint64_t *res); 5464224Sgblack@eecs.umich.edu 5472623SN/Atemplate 5482623SN/AFault 5492623SN/AAtomicSimpleCPU::write(uint64_t data, Addr addr, 5502623SN/A unsigned flags, uint64_t *res); 5512623SN/A 5522623SN/Atemplate 5532623SN/AFault 5542623SN/AAtomicSimpleCPU::write(uint32_t data, Addr addr, 5552623SN/A unsigned flags, uint64_t *res); 5562623SN/A 5572623SN/Atemplate 5582623SN/AFault 5592623SN/AAtomicSimpleCPU::write(uint16_t data, Addr addr, 5602623SN/A unsigned flags, uint64_t *res); 5612623SN/A 5622623SN/Atemplate 5632623SN/AFault 5642623SN/AAtomicSimpleCPU::write(uint8_t data, Addr addr, 5652623SN/A unsigned flags, uint64_t *res); 5662623SN/A 5672623SN/A#endif //DOXYGEN_SHOULD_SKIP_THIS 5682623SN/A 5692623SN/Atemplate<> 5702623SN/AFault 5712623SN/AAtomicSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res) 5722623SN/A{ 5732623SN/A return write(*(uint64_t*)&data, addr, flags, res); 5742623SN/A} 5752623SN/A 5762623SN/Atemplate<> 5772623SN/AFault 5782623SN/AAtomicSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res) 5792623SN/A{ 5802623SN/A return write(*(uint32_t*)&data, addr, flags, res); 5812623SN/A} 5822623SN/A 5832623SN/A 5842623SN/Atemplate<> 5852623SN/AFault 5862623SN/AAtomicSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res) 5872623SN/A{ 5882623SN/A return write((uint32_t)data, addr, flags, res); 5892623SN/A} 5902623SN/A 5912623SN/A 5922623SN/Avoid 5932623SN/AAtomicSimpleCPU::tick() 5942623SN/A{ 5954940Snate@binkert.org DPRINTF(SimpleCPU, "Tick\n"); 5964940Snate@binkert.org 5975487Snate@binkert.org Tick latency = 0; 5982623SN/A 5992623SN/A for (int i = 0; i < width; ++i) { 6002623SN/A numCycles++; 6012623SN/A 6023387Sgblack@eecs.umich.edu if (!curStaticInst || !curStaticInst->isDelayedCommit()) 6033387Sgblack@eecs.umich.edu checkForInterrupts(); 6042626SN/A 6055348Ssaidi@eecs.umich.edu checkPcEventQueue(); 6065348Ssaidi@eecs.umich.edu 6075669Sgblack@eecs.umich.edu Fault fault = NoFault; 6085669Sgblack@eecs.umich.edu 6095669Sgblack@eecs.umich.edu bool fromRom = isRomMicroPC(thread->readMicroPC()); 6105914Sgblack@eecs.umich.edu if (!fromRom && !curMacroStaticInst) { 6115894Sgblack@eecs.umich.edu setupFetchRequest(&ifetch_req); 6125894Sgblack@eecs.umich.edu fault = thread->itb->translateAtomic(&ifetch_req, tc); 6135894Sgblack@eecs.umich.edu } 6142623SN/A 6152623SN/A if (fault == NoFault) { 6164182Sgblack@eecs.umich.edu Tick icache_latency = 0; 6174182Sgblack@eecs.umich.edu bool icache_access = false; 6184182Sgblack@eecs.umich.edu dcache_access = false; // assume no dcache access 6192662Sstever@eecs.umich.edu 6205914Sgblack@eecs.umich.edu if (!fromRom && !curMacroStaticInst) { 6215694Sgblack@eecs.umich.edu // This is commented out because the predecoder would act like 6225694Sgblack@eecs.umich.edu // a tiny cache otherwise. It wouldn't be flushed when needed 6235694Sgblack@eecs.umich.edu // like the I cache. It should be flushed, and when that works 6245694Sgblack@eecs.umich.edu // this code should be uncommented. 6255669Sgblack@eecs.umich.edu //Fetch more instruction memory if necessary 6265669Sgblack@eecs.umich.edu //if(predecoder.needMoreBytes()) 6275669Sgblack@eecs.umich.edu //{ 6285669Sgblack@eecs.umich.edu icache_access = true; 6295669Sgblack@eecs.umich.edu Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq, 6305669Sgblack@eecs.umich.edu Packet::Broadcast); 6315669Sgblack@eecs.umich.edu ifetch_pkt.dataStatic(&inst); 6322623SN/A 6335669Sgblack@eecs.umich.edu if (hasPhysMemPort && ifetch_pkt.getAddr() == physMemAddr) 6345669Sgblack@eecs.umich.edu icache_latency = physmemPort.sendAtomic(&ifetch_pkt); 6355669Sgblack@eecs.umich.edu else 6365669Sgblack@eecs.umich.edu icache_latency = icachePort.sendAtomic(&ifetch_pkt); 6374968Sacolyte@umich.edu 6385669Sgblack@eecs.umich.edu assert(!ifetch_pkt.isError()); 6394968Sacolyte@umich.edu 6405669Sgblack@eecs.umich.edu // ifetch_req is initialized to read the instruction directly 6415669Sgblack@eecs.umich.edu // into the CPU object's inst field. 6425669Sgblack@eecs.umich.edu //} 6435669Sgblack@eecs.umich.edu } 6444182Sgblack@eecs.umich.edu 6452623SN/A preExecute(); 6463814Ssaidi@eecs.umich.edu 6475001Sgblack@eecs.umich.edu if (curStaticInst) { 6484182Sgblack@eecs.umich.edu fault = curStaticInst->execute(this, traceData); 6494998Sgblack@eecs.umich.edu 6504998Sgblack@eecs.umich.edu // keep an instruction count 6514998Sgblack@eecs.umich.edu if (fault == NoFault) 6524998Sgblack@eecs.umich.edu countInst(); 6535001Sgblack@eecs.umich.edu else if (traceData) { 6545001Sgblack@eecs.umich.edu // If there was a fault, we should trace this instruction. 6555001Sgblack@eecs.umich.edu delete traceData; 6565001Sgblack@eecs.umich.edu traceData = NULL; 6575001Sgblack@eecs.umich.edu } 6584998Sgblack@eecs.umich.edu 6594182Sgblack@eecs.umich.edu postExecute(); 6604182Sgblack@eecs.umich.edu } 6612623SN/A 6623814Ssaidi@eecs.umich.edu // @todo remove me after debugging with legion done 6634539Sgblack@eecs.umich.edu if (curStaticInst && (!curStaticInst->isMicroop() || 6644539Sgblack@eecs.umich.edu curStaticInst->isFirstMicroop())) 6653814Ssaidi@eecs.umich.edu instCnt++; 6663814Ssaidi@eecs.umich.edu 6675487Snate@binkert.org Tick stall_ticks = 0; 6685487Snate@binkert.org if (simulate_inst_stalls && icache_access) 6695487Snate@binkert.org stall_ticks += icache_latency; 6705487Snate@binkert.org 6715487Snate@binkert.org if (simulate_data_stalls && dcache_access) 6725487Snate@binkert.org stall_ticks += dcache_latency; 6735487Snate@binkert.org 6745487Snate@binkert.org if (stall_ticks) { 6755487Snate@binkert.org Tick stall_cycles = stall_ticks / ticks(1); 6765487Snate@binkert.org Tick aligned_stall_ticks = ticks(stall_cycles); 6775487Snate@binkert.org 6785487Snate@binkert.org if (aligned_stall_ticks < stall_ticks) 6795487Snate@binkert.org aligned_stall_ticks += 1; 6805487Snate@binkert.org 6815487Snate@binkert.org latency += aligned_stall_ticks; 6822623SN/A } 6832623SN/A 6842623SN/A } 6854377Sgblack@eecs.umich.edu if(fault != NoFault || !stayAtPC) 6864182Sgblack@eecs.umich.edu advancePC(fault); 6872623SN/A } 6882623SN/A 6895487Snate@binkert.org // instruction takes at least one cycle 6905487Snate@binkert.org if (latency < ticks(1)) 6915487Snate@binkert.org latency = ticks(1); 6925487Snate@binkert.org 6932626SN/A if (_status != Idle) 6945606Snate@binkert.org schedule(tickEvent, curTick + latency); 6952623SN/A} 6962623SN/A 6972623SN/A 6985315Sstever@gmail.comvoid 6995315Sstever@gmail.comAtomicSimpleCPU::printAddr(Addr a) 7005315Sstever@gmail.com{ 7015315Sstever@gmail.com dcachePort.printAddr(a); 7025315Sstever@gmail.com} 7035315Sstever@gmail.com 7045315Sstever@gmail.com 7052623SN/A//////////////////////////////////////////////////////////////////////// 7062623SN/A// 7072623SN/A// AtomicSimpleCPU Simulation Object 7082623SN/A// 7094762Snate@binkert.orgAtomicSimpleCPU * 7104762Snate@binkert.orgAtomicSimpleCPUParams::create() 7112623SN/A{ 7125529Snate@binkert.org numThreads = 1; 7135529Snate@binkert.org#if !FULL_SYSTEM 7144762Snate@binkert.org if (workload.size() != 1) 7154762Snate@binkert.org panic("only one workload allowed"); 7162623SN/A#endif 7175529Snate@binkert.org return new AtomicSimpleCPU(this); 7182623SN/A} 719