atomic.cc revision 8799
12623SN/A/*
22623SN/A * Copyright (c) 2002-2005 The Regents of The University of Michigan
32623SN/A * All rights reserved.
42623SN/A *
52623SN/A * Redistribution and use in source and binary forms, with or without
62623SN/A * modification, are permitted provided that the following conditions are
72623SN/A * met: redistributions of source code must retain the above copyright
82623SN/A * notice, this list of conditions and the following disclaimer;
92623SN/A * redistributions in binary form must reproduce the above copyright
102623SN/A * notice, this list of conditions and the following disclaimer in the
112623SN/A * documentation and/or other materials provided with the distribution;
122623SN/A * neither the name of the copyright holders nor the names of its
132623SN/A * contributors may be used to endorse or promote products derived from
142623SN/A * this software without specific prior written permission.
152623SN/A *
162623SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
172623SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
182623SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
192623SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
202623SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
212623SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
222623SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
232623SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
242623SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
252623SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
262623SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
272665Ssaidi@eecs.umich.edu *
282665Ssaidi@eecs.umich.edu * Authors: Steve Reinhardt
292623SN/A */
302623SN/A
313170Sstever@eecs.umich.edu#include "arch/locked_mem.hh"
328105Sgblack@eecs.umich.edu#include "arch/mmapped_ipr.hh"
332623SN/A#include "arch/utility.hh"
344040Ssaidi@eecs.umich.edu#include "base/bigint.hh"
356658Snate@binkert.org#include "config/the_isa.hh"
368229Snate@binkert.org#include "cpu/simple/atomic.hh"
372623SN/A#include "cpu/exetrace.hh"
388232Snate@binkert.org#include "debug/ExecFaulting.hh"
398232Snate@binkert.org#include "debug/SimpleCPU.hh"
403348Sbinkertn@umich.edu#include "mem/packet.hh"
413348Sbinkertn@umich.edu#include "mem/packet_access.hh"
424762Snate@binkert.org#include "params/AtomicSimpleCPU.hh"
437678Sgblack@eecs.umich.edu#include "sim/faults.hh"
442901Ssaidi@eecs.umich.edu#include "sim/system.hh"
458779Sgblack@eecs.umich.edu#include "sim/full_system.hh"
462623SN/A
472623SN/Ausing namespace std;
482623SN/Ausing namespace TheISA;
492623SN/A
502623SN/AAtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c)
515606Snate@binkert.org    : Event(CPU_Tick_Pri), cpu(c)
522623SN/A{
532623SN/A}
542623SN/A
552623SN/A
562623SN/Avoid
572623SN/AAtomicSimpleCPU::TickEvent::process()
582623SN/A{
592623SN/A    cpu->tick();
602623SN/A}
612623SN/A
622623SN/Aconst char *
635336Shines@cs.fsu.eduAtomicSimpleCPU::TickEvent::description() const
642623SN/A{
654873Sstever@eecs.umich.edu    return "AtomicSimpleCPU tick";
662623SN/A}
672623SN/A
682856Srdreslin@umich.eduPort *
696227Snate@binkert.orgAtomicSimpleCPU::getPort(const string &if_name, int idx)
702856Srdreslin@umich.edu{
712856Srdreslin@umich.edu    if (if_name == "dcache_port")
722856Srdreslin@umich.edu        return &dcachePort;
732856Srdreslin@umich.edu    else if (if_name == "icache_port")
742856Srdreslin@umich.edu        return &icachePort;
754968Sacolyte@umich.edu    else if (if_name == "physmem_port") {
764968Sacolyte@umich.edu        hasPhysMemPort = true;
774968Sacolyte@umich.edu        return &physmemPort;
784968Sacolyte@umich.edu    }
792856Srdreslin@umich.edu    else
802856Srdreslin@umich.edu        panic("No Such Port\n");
812856Srdreslin@umich.edu}
822623SN/A
832623SN/Avoid
842623SN/AAtomicSimpleCPU::init()
852623SN/A{
862623SN/A    BaseCPU::init();
878779Sgblack@eecs.umich.edu    if (FullSystem) {
888779Sgblack@eecs.umich.edu        ThreadID size = threadContexts.size();
898779Sgblack@eecs.umich.edu        for (ThreadID i = 0; i < size; ++i) {
908779Sgblack@eecs.umich.edu            ThreadContext *tc = threadContexts[i];
918779Sgblack@eecs.umich.edu            // initialize CPU, including PC
928779Sgblack@eecs.umich.edu            TheISA::initCPU(tc, tc->contextId());
938779Sgblack@eecs.umich.edu        }
942623SN/A    }
958706Sandreas.hansson@arm.com
968706Sandreas.hansson@arm.com    // Initialise the ThreadContext's memory proxies
978706Sandreas.hansson@arm.com    tcBase()->initMemProxies(tcBase());
988799Sgblack@eecs.umich.edu
994968Sacolyte@umich.edu    if (hasPhysMemPort) {
1008711Sandreas.hansson@arm.com        AddrRangeList pmAddrList = physmemPort.getPeer()->getAddrRanges();
1014968Sacolyte@umich.edu        physMemAddr = *pmAddrList.begin();
1024968Sacolyte@umich.edu    }
1035714Shsul@eecs.umich.edu    // Atomic doesn't do MT right now, so contextId == threadId
1045712Shsul@eecs.umich.edu    ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT
1055712Shsul@eecs.umich.edu    data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too
1065712Shsul@eecs.umich.edu    data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too
1072623SN/A}
1082623SN/A
1095529Snate@binkert.orgAtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p)
1106078Sgblack@eecs.umich.edu    : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false),
1115487Snate@binkert.org      simulate_data_stalls(p->simulate_data_stalls),
1125487Snate@binkert.org      simulate_inst_stalls(p->simulate_inst_stalls),
1134968Sacolyte@umich.edu      icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this),
1144968Sacolyte@umich.edu      physmemPort(name() + "-iport", this), hasPhysMemPort(false)
1152623SN/A{
1162623SN/A    _status = Idle;
1172623SN/A}
1182623SN/A
1192623SN/A
1202623SN/AAtomicSimpleCPU::~AtomicSimpleCPU()
1212623SN/A{
1226775SBrad.Beckmann@amd.com    if (tickEvent.scheduled()) {
1236775SBrad.Beckmann@amd.com        deschedule(tickEvent);
1246775SBrad.Beckmann@amd.com    }
1252623SN/A}
1262623SN/A
1272623SN/Avoid
1282623SN/AAtomicSimpleCPU::serialize(ostream &os)
1292623SN/A{
1302915Sktlim@umich.edu    SimObject::State so_state = SimObject::getState();
1312915Sktlim@umich.edu    SERIALIZE_ENUM(so_state);
1326078Sgblack@eecs.umich.edu    SERIALIZE_SCALAR(locked);
1333145Shsul@eecs.umich.edu    BaseSimpleCPU::serialize(os);
1342623SN/A    nameOut(os, csprintf("%s.tickEvent", name()));
1352623SN/A    tickEvent.serialize(os);
1362623SN/A}
1372623SN/A
1382623SN/Avoid
1392623SN/AAtomicSimpleCPU::unserialize(Checkpoint *cp, const string &section)
1402623SN/A{
1412915Sktlim@umich.edu    SimObject::State so_state;
1422915Sktlim@umich.edu    UNSERIALIZE_ENUM(so_state);
1436078Sgblack@eecs.umich.edu    UNSERIALIZE_SCALAR(locked);
1443145Shsul@eecs.umich.edu    BaseSimpleCPU::unserialize(cp, section);
1452915Sktlim@umich.edu    tickEvent.unserialize(cp, csprintf("%s.tickEvent", section));
1462915Sktlim@umich.edu}
1472915Sktlim@umich.edu
1482915Sktlim@umich.eduvoid
1492915Sktlim@umich.eduAtomicSimpleCPU::resume()
1502915Sktlim@umich.edu{
1515220Ssaidi@eecs.umich.edu    if (_status == Idle || _status == SwitchedOut)
1525220Ssaidi@eecs.umich.edu        return;
1535220Ssaidi@eecs.umich.edu
1544940Snate@binkert.org    DPRINTF(SimpleCPU, "Resume\n");
1555220Ssaidi@eecs.umich.edu    assert(system->getMemoryMode() == Enums::atomic);
1563324Shsul@eecs.umich.edu
1575220Ssaidi@eecs.umich.edu    changeState(SimObject::Running);
1585220Ssaidi@eecs.umich.edu    if (thread->status() == ThreadContext::Active) {
1595606Snate@binkert.org        if (!tickEvent.scheduled())
1605606Snate@binkert.org            schedule(tickEvent, nextCycle());
1612915Sktlim@umich.edu    }
1627897Shestness@cs.utexas.edu    system->totalNumInsts = 0;
1632623SN/A}
1642623SN/A
1652623SN/Avoid
1662798Sktlim@umich.eduAtomicSimpleCPU::switchOut()
1672623SN/A{
1685496Ssaidi@eecs.umich.edu    assert(_status == Running || _status == Idle);
1692798Sktlim@umich.edu    _status = SwitchedOut;
1702623SN/A
1712798Sktlim@umich.edu    tickEvent.squash();
1722623SN/A}
1732623SN/A
1742623SN/A
1752623SN/Avoid
1762623SN/AAtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
1772623SN/A{
1784192Sktlim@umich.edu    BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
1792623SN/A
1802623SN/A    assert(!tickEvent.scheduled());
1812623SN/A
1822680Sktlim@umich.edu    // if any of this CPU's ThreadContexts are active, mark the CPU as
1832623SN/A    // running and schedule its tick event.
1846221Snate@binkert.org    ThreadID size = threadContexts.size();
1856221Snate@binkert.org    for (ThreadID i = 0; i < size; ++i) {
1862680Sktlim@umich.edu        ThreadContext *tc = threadContexts[i];
1872680Sktlim@umich.edu        if (tc->status() == ThreadContext::Active && _status != Running) {
1882623SN/A            _status = Running;
1895606Snate@binkert.org            schedule(tickEvent, nextCycle());
1902623SN/A            break;
1912623SN/A        }
1922623SN/A    }
1933512Sktlim@umich.edu    if (_status != Running) {
1943512Sktlim@umich.edu        _status = Idle;
1953512Sktlim@umich.edu    }
1965169Ssaidi@eecs.umich.edu    assert(threadContexts.size() == 1);
1975712Shsul@eecs.umich.edu    ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT
1985712Shsul@eecs.umich.edu    data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too
1995712Shsul@eecs.umich.edu    data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too
2002623SN/A}
2012623SN/A
2022623SN/A
2032623SN/Avoid
2042623SN/AAtomicSimpleCPU::activateContext(int thread_num, int delay)
2052623SN/A{
2064940Snate@binkert.org    DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
2074940Snate@binkert.org
2082623SN/A    assert(thread_num == 0);
2092683Sktlim@umich.edu    assert(thread);
2102623SN/A
2112623SN/A    assert(_status == Idle);
2122623SN/A    assert(!tickEvent.scheduled());
2132623SN/A
2142623SN/A    notIdleFraction++;
2155101Ssaidi@eecs.umich.edu    numCycles += tickToCycles(thread->lastActivate - thread->lastSuspend);
2163686Sktlim@umich.edu
2173430Sgblack@eecs.umich.edu    //Make sure ticks are still on multiples of cycles
2187823Ssteve.reinhardt@amd.com    schedule(tickEvent, nextCycle(curTick() + ticks(delay)));
2192623SN/A    _status = Running;
2202623SN/A}
2212623SN/A
2222623SN/A
2232623SN/Avoid
2242623SN/AAtomicSimpleCPU::suspendContext(int thread_num)
2252623SN/A{
2264940Snate@binkert.org    DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
2274940Snate@binkert.org
2282623SN/A    assert(thread_num == 0);
2292683Sktlim@umich.edu    assert(thread);
2302623SN/A
2316043Sgblack@eecs.umich.edu    if (_status == Idle)
2326043Sgblack@eecs.umich.edu        return;
2336043Sgblack@eecs.umich.edu
2342623SN/A    assert(_status == Running);
2352626SN/A
2362626SN/A    // tick event may not be scheduled if this gets called from inside
2372626SN/A    // an instruction's execution, e.g. "quiesce"
2382626SN/A    if (tickEvent.scheduled())
2395606Snate@binkert.org        deschedule(tickEvent);
2402623SN/A
2412623SN/A    notIdleFraction--;
2422623SN/A    _status = Idle;
2432623SN/A}
2442623SN/A
2452623SN/A
2462623SN/AFault
2478444Sgblack@eecs.umich.eduAtomicSimpleCPU::readMem(Addr addr, uint8_t * data,
2488444Sgblack@eecs.umich.edu                         unsigned size, unsigned flags)
2492623SN/A{
2503169Sstever@eecs.umich.edu    // use the CPU's statically allocated read request and packet objects
2514870Sstever@eecs.umich.edu    Request *req = &data_read_req;
2522623SN/A
2532623SN/A    if (traceData) {
2542623SN/A        traceData->setAddr(addr);
2552623SN/A    }
2562623SN/A
2574999Sgblack@eecs.umich.edu    //The block size of our peer.
2586227Snate@binkert.org    unsigned blockSize = dcachePort.peerBlockSize();
2594999Sgblack@eecs.umich.edu    //The size of the data we're trying to read.
2607520Sgblack@eecs.umich.edu    int fullSize = size;
2612623SN/A
2624999Sgblack@eecs.umich.edu    //The address of the second part of this access if it needs to be split
2634999Sgblack@eecs.umich.edu    //across a cache line boundary.
2647520Sgblack@eecs.umich.edu    Addr secondAddr = roundDown(addr + size - 1, blockSize);
2654999Sgblack@eecs.umich.edu
2667520Sgblack@eecs.umich.edu    if (secondAddr > addr)
2677520Sgblack@eecs.umich.edu        size = secondAddr - addr;
2684999Sgblack@eecs.umich.edu
2694999Sgblack@eecs.umich.edu    dcache_latency = 0;
2704999Sgblack@eecs.umich.edu
2717520Sgblack@eecs.umich.edu    while (1) {
2727720Sgblack@eecs.umich.edu        req->setVirt(0, addr, size, flags, thread->pcState().instAddr());
2734999Sgblack@eecs.umich.edu
2744999Sgblack@eecs.umich.edu        // translate to physical address
2756023Snate@binkert.org        Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read);
2764999Sgblack@eecs.umich.edu
2774999Sgblack@eecs.umich.edu        // Now do the access.
2786623Sgblack@eecs.umich.edu        if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
2794999Sgblack@eecs.umich.edu            Packet pkt = Packet(req,
2806102Sgblack@eecs.umich.edu                    req->isLLSC() ? MemCmd::LoadLockedReq : MemCmd::ReadReq,
2814999Sgblack@eecs.umich.edu                    Packet::Broadcast);
2827520Sgblack@eecs.umich.edu            pkt.dataStatic(data);
2834999Sgblack@eecs.umich.edu
2848105Sgblack@eecs.umich.edu            if (req->isMmappedIpr())
2854999Sgblack@eecs.umich.edu                dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt);
2864999Sgblack@eecs.umich.edu            else {
2874999Sgblack@eecs.umich.edu                if (hasPhysMemPort && pkt.getAddr() == physMemAddr)
2884999Sgblack@eecs.umich.edu                    dcache_latency += physmemPort.sendAtomic(&pkt);
2894999Sgblack@eecs.umich.edu                else
2904999Sgblack@eecs.umich.edu                    dcache_latency += dcachePort.sendAtomic(&pkt);
2914999Sgblack@eecs.umich.edu            }
2924999Sgblack@eecs.umich.edu            dcache_access = true;
2935012Sgblack@eecs.umich.edu
2944999Sgblack@eecs.umich.edu            assert(!pkt.isError());
2954999Sgblack@eecs.umich.edu
2966102Sgblack@eecs.umich.edu            if (req->isLLSC()) {
2974999Sgblack@eecs.umich.edu                TheISA::handleLockedRead(thread, req);
2984999Sgblack@eecs.umich.edu            }
2994968Sacolyte@umich.edu        }
3004986Ssaidi@eecs.umich.edu
3014999Sgblack@eecs.umich.edu        //If there's a fault, return it
3026739Sgblack@eecs.umich.edu        if (fault != NoFault) {
3036739Sgblack@eecs.umich.edu            if (req->isPrefetch()) {
3046739Sgblack@eecs.umich.edu                return NoFault;
3056739Sgblack@eecs.umich.edu            } else {
3066739Sgblack@eecs.umich.edu                return fault;
3076739Sgblack@eecs.umich.edu            }
3086739Sgblack@eecs.umich.edu        }
3096739Sgblack@eecs.umich.edu
3104999Sgblack@eecs.umich.edu        //If we don't need to access a second cache line, stop now.
3114999Sgblack@eecs.umich.edu        if (secondAddr <= addr)
3124999Sgblack@eecs.umich.edu        {
3136078Sgblack@eecs.umich.edu            if (req->isLocked() && fault == NoFault) {
3146078Sgblack@eecs.umich.edu                assert(!locked);
3156078Sgblack@eecs.umich.edu                locked = true;
3166078Sgblack@eecs.umich.edu            }
3174999Sgblack@eecs.umich.edu            return fault;
3184968Sacolyte@umich.edu        }
3193170Sstever@eecs.umich.edu
3204999Sgblack@eecs.umich.edu        /*
3214999Sgblack@eecs.umich.edu         * Set up for accessing the second cache line.
3224999Sgblack@eecs.umich.edu         */
3234999Sgblack@eecs.umich.edu
3244999Sgblack@eecs.umich.edu        //Move the pointer we're reading into to the correct location.
3257520Sgblack@eecs.umich.edu        data += size;
3264999Sgblack@eecs.umich.edu        //Adjust the size to get the remaining bytes.
3277520Sgblack@eecs.umich.edu        size = addr + fullSize - secondAddr;
3284999Sgblack@eecs.umich.edu        //And access the right address.
3294999Sgblack@eecs.umich.edu        addr = secondAddr;
3302623SN/A    }
3312623SN/A}
3322623SN/A
3337520Sgblack@eecs.umich.edu
3342623SN/AFault
3358444Sgblack@eecs.umich.eduAtomicSimpleCPU::writeMem(uint8_t *data, unsigned size,
3368444Sgblack@eecs.umich.edu                          Addr addr, unsigned flags, uint64_t *res)
3372623SN/A{
3383169Sstever@eecs.umich.edu    // use the CPU's statically allocated write request and packet objects
3394870Sstever@eecs.umich.edu    Request *req = &data_write_req;
3402623SN/A
3412623SN/A    if (traceData) {
3422623SN/A        traceData->setAddr(addr);
3432623SN/A    }
3442623SN/A
3454999Sgblack@eecs.umich.edu    //The block size of our peer.
3466227Snate@binkert.org    unsigned blockSize = dcachePort.peerBlockSize();
3474999Sgblack@eecs.umich.edu    //The size of the data we're trying to read.
3487520Sgblack@eecs.umich.edu    int fullSize = size;
3492623SN/A
3504999Sgblack@eecs.umich.edu    //The address of the second part of this access if it needs to be split
3514999Sgblack@eecs.umich.edu    //across a cache line boundary.
3527520Sgblack@eecs.umich.edu    Addr secondAddr = roundDown(addr + size - 1, blockSize);
3534999Sgblack@eecs.umich.edu
3544999Sgblack@eecs.umich.edu    if(secondAddr > addr)
3557520Sgblack@eecs.umich.edu        size = secondAddr - addr;
3564999Sgblack@eecs.umich.edu
3574999Sgblack@eecs.umich.edu    dcache_latency = 0;
3584999Sgblack@eecs.umich.edu
3594999Sgblack@eecs.umich.edu    while(1) {
3607720Sgblack@eecs.umich.edu        req->setVirt(0, addr, size, flags, thread->pcState().instAddr());
3614999Sgblack@eecs.umich.edu
3624999Sgblack@eecs.umich.edu        // translate to physical address
3636023Snate@binkert.org        Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write);
3644999Sgblack@eecs.umich.edu
3654999Sgblack@eecs.umich.edu        // Now do the access.
3664999Sgblack@eecs.umich.edu        if (fault == NoFault) {
3674999Sgblack@eecs.umich.edu            MemCmd cmd = MemCmd::WriteReq; // default
3684999Sgblack@eecs.umich.edu            bool do_access = true;  // flag to suppress cache access
3694999Sgblack@eecs.umich.edu
3706102Sgblack@eecs.umich.edu            if (req->isLLSC()) {
3714999Sgblack@eecs.umich.edu                cmd = MemCmd::StoreCondReq;
3724999Sgblack@eecs.umich.edu                do_access = TheISA::handleLockedWrite(thread, req);
3734999Sgblack@eecs.umich.edu            } else if (req->isSwap()) {
3744999Sgblack@eecs.umich.edu                cmd = MemCmd::SwapReq;
3754999Sgblack@eecs.umich.edu                if (req->isCondSwap()) {
3764999Sgblack@eecs.umich.edu                    assert(res);
3774999Sgblack@eecs.umich.edu                    req->setExtraData(*res);
3784999Sgblack@eecs.umich.edu                }
3794999Sgblack@eecs.umich.edu            }
3804999Sgblack@eecs.umich.edu
3816623Sgblack@eecs.umich.edu            if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) {
3824999Sgblack@eecs.umich.edu                Packet pkt = Packet(req, cmd, Packet::Broadcast);
3837520Sgblack@eecs.umich.edu                pkt.dataStatic(data);
3844999Sgblack@eecs.umich.edu
3858105Sgblack@eecs.umich.edu                if (req->isMmappedIpr()) {
3864999Sgblack@eecs.umich.edu                    dcache_latency +=
3874999Sgblack@eecs.umich.edu                        TheISA::handleIprWrite(thread->getTC(), &pkt);
3884999Sgblack@eecs.umich.edu                } else {
3894999Sgblack@eecs.umich.edu                    if (hasPhysMemPort && pkt.getAddr() == physMemAddr)
3904999Sgblack@eecs.umich.edu                        dcache_latency += physmemPort.sendAtomic(&pkt);
3914999Sgblack@eecs.umich.edu                    else
3924999Sgblack@eecs.umich.edu                        dcache_latency += dcachePort.sendAtomic(&pkt);
3934999Sgblack@eecs.umich.edu                }
3944999Sgblack@eecs.umich.edu                dcache_access = true;
3954999Sgblack@eecs.umich.edu                assert(!pkt.isError());
3964999Sgblack@eecs.umich.edu
3974999Sgblack@eecs.umich.edu                if (req->isSwap()) {
3984999Sgblack@eecs.umich.edu                    assert(res);
3997520Sgblack@eecs.umich.edu                    memcpy(res, pkt.getPtr<uint8_t>(), fullSize);
4004999Sgblack@eecs.umich.edu                }
4014999Sgblack@eecs.umich.edu            }
4024999Sgblack@eecs.umich.edu
4034999Sgblack@eecs.umich.edu            if (res && !req->isSwap()) {
4044999Sgblack@eecs.umich.edu                *res = req->getExtraData();
4054878Sstever@eecs.umich.edu            }
4064040Ssaidi@eecs.umich.edu        }
4074040Ssaidi@eecs.umich.edu
4084999Sgblack@eecs.umich.edu        //If there's a fault or we don't need to access a second cache line,
4094999Sgblack@eecs.umich.edu        //stop now.
4104999Sgblack@eecs.umich.edu        if (fault != NoFault || secondAddr <= addr)
4114999Sgblack@eecs.umich.edu        {
4126078Sgblack@eecs.umich.edu            if (req->isLocked() && fault == NoFault) {
4136078Sgblack@eecs.umich.edu                assert(locked);
4146078Sgblack@eecs.umich.edu                locked = false;
4156078Sgblack@eecs.umich.edu            }
4166739Sgblack@eecs.umich.edu            if (fault != NoFault && req->isPrefetch()) {
4176739Sgblack@eecs.umich.edu                return NoFault;
4186739Sgblack@eecs.umich.edu            } else {
4196739Sgblack@eecs.umich.edu                return fault;
4206739Sgblack@eecs.umich.edu            }
4213170Sstever@eecs.umich.edu        }
4223170Sstever@eecs.umich.edu
4234999Sgblack@eecs.umich.edu        /*
4244999Sgblack@eecs.umich.edu         * Set up for accessing the second cache line.
4254999Sgblack@eecs.umich.edu         */
4264999Sgblack@eecs.umich.edu
4274999Sgblack@eecs.umich.edu        //Move the pointer we're reading into to the correct location.
4287520Sgblack@eecs.umich.edu        data += size;
4294999Sgblack@eecs.umich.edu        //Adjust the size to get the remaining bytes.
4307520Sgblack@eecs.umich.edu        size = addr + fullSize - secondAddr;
4314999Sgblack@eecs.umich.edu        //And access the right address.
4324999Sgblack@eecs.umich.edu        addr = secondAddr;
4332623SN/A    }
4342623SN/A}
4352623SN/A
4362623SN/A
4372623SN/Avoid
4382623SN/AAtomicSimpleCPU::tick()
4392623SN/A{
4404940Snate@binkert.org    DPRINTF(SimpleCPU, "Tick\n");
4414940Snate@binkert.org
4425487Snate@binkert.org    Tick latency = 0;
4432623SN/A
4446078Sgblack@eecs.umich.edu    for (int i = 0; i < width || locked; ++i) {
4452623SN/A        numCycles++;
4462623SN/A
4473387Sgblack@eecs.umich.edu        if (!curStaticInst || !curStaticInst->isDelayedCommit())
4483387Sgblack@eecs.umich.edu            checkForInterrupts();
4492626SN/A
4505348Ssaidi@eecs.umich.edu        checkPcEventQueue();
4518143SAli.Saidi@ARM.com        // We must have just got suspended by a PC event
4528143SAli.Saidi@ARM.com        if (_status == Idle)
4538143SAli.Saidi@ARM.com            return;
4545348Ssaidi@eecs.umich.edu
4555669Sgblack@eecs.umich.edu        Fault fault = NoFault;
4565669Sgblack@eecs.umich.edu
4577720Sgblack@eecs.umich.edu        TheISA::PCState pcState = thread->pcState();
4587720Sgblack@eecs.umich.edu
4597720Sgblack@eecs.umich.edu        bool needToFetch = !isRomMicroPC(pcState.microPC()) &&
4607720Sgblack@eecs.umich.edu                           !curMacroStaticInst;
4617720Sgblack@eecs.umich.edu        if (needToFetch) {
4625894Sgblack@eecs.umich.edu            setupFetchRequest(&ifetch_req);
4636023Snate@binkert.org            fault = thread->itb->translateAtomic(&ifetch_req, tc,
4646023Snate@binkert.org                                                 BaseTLB::Execute);
4655894Sgblack@eecs.umich.edu        }
4662623SN/A
4672623SN/A        if (fault == NoFault) {
4684182Sgblack@eecs.umich.edu            Tick icache_latency = 0;
4694182Sgblack@eecs.umich.edu            bool icache_access = false;
4704182Sgblack@eecs.umich.edu            dcache_access = false; // assume no dcache access
4712662Sstever@eecs.umich.edu
4727720Sgblack@eecs.umich.edu            if (needToFetch) {
4735694Sgblack@eecs.umich.edu                // This is commented out because the predecoder would act like
4745694Sgblack@eecs.umich.edu                // a tiny cache otherwise. It wouldn't be flushed when needed
4755694Sgblack@eecs.umich.edu                // like the I cache. It should be flushed, and when that works
4765694Sgblack@eecs.umich.edu                // this code should be uncommented.
4775669Sgblack@eecs.umich.edu                //Fetch more instruction memory if necessary
4785669Sgblack@eecs.umich.edu                //if(predecoder.needMoreBytes())
4795669Sgblack@eecs.umich.edu                //{
4805669Sgblack@eecs.umich.edu                    icache_access = true;
4815669Sgblack@eecs.umich.edu                    Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq,
4825669Sgblack@eecs.umich.edu                                               Packet::Broadcast);
4835669Sgblack@eecs.umich.edu                    ifetch_pkt.dataStatic(&inst);
4842623SN/A
4855669Sgblack@eecs.umich.edu                    if (hasPhysMemPort && ifetch_pkt.getAddr() == physMemAddr)
4865669Sgblack@eecs.umich.edu                        icache_latency = physmemPort.sendAtomic(&ifetch_pkt);
4875669Sgblack@eecs.umich.edu                    else
4885669Sgblack@eecs.umich.edu                        icache_latency = icachePort.sendAtomic(&ifetch_pkt);
4894968Sacolyte@umich.edu
4905669Sgblack@eecs.umich.edu                    assert(!ifetch_pkt.isError());
4914968Sacolyte@umich.edu
4925669Sgblack@eecs.umich.edu                    // ifetch_req is initialized to read the instruction directly
4935669Sgblack@eecs.umich.edu                    // into the CPU object's inst field.
4945669Sgblack@eecs.umich.edu                //}
4955669Sgblack@eecs.umich.edu            }
4964182Sgblack@eecs.umich.edu
4972623SN/A            preExecute();
4983814Ssaidi@eecs.umich.edu
4995001Sgblack@eecs.umich.edu            if (curStaticInst) {
5004182Sgblack@eecs.umich.edu                fault = curStaticInst->execute(this, traceData);
5014998Sgblack@eecs.umich.edu
5024998Sgblack@eecs.umich.edu                // keep an instruction count
5034998Sgblack@eecs.umich.edu                if (fault == NoFault)
5044998Sgblack@eecs.umich.edu                    countInst();
5057655Sali.saidi@arm.com                else if (traceData && !DTRACE(ExecFaulting)) {
5065001Sgblack@eecs.umich.edu                    delete traceData;
5075001Sgblack@eecs.umich.edu                    traceData = NULL;
5085001Sgblack@eecs.umich.edu                }
5094998Sgblack@eecs.umich.edu
5104182Sgblack@eecs.umich.edu                postExecute();
5114182Sgblack@eecs.umich.edu            }
5122623SN/A
5133814Ssaidi@eecs.umich.edu            // @todo remove me after debugging with legion done
5144539Sgblack@eecs.umich.edu            if (curStaticInst && (!curStaticInst->isMicroop() ||
5154539Sgblack@eecs.umich.edu                        curStaticInst->isFirstMicroop()))
5163814Ssaidi@eecs.umich.edu                instCnt++;
5173814Ssaidi@eecs.umich.edu
5185487Snate@binkert.org            Tick stall_ticks = 0;
5195487Snate@binkert.org            if (simulate_inst_stalls && icache_access)
5205487Snate@binkert.org                stall_ticks += icache_latency;
5215487Snate@binkert.org
5225487Snate@binkert.org            if (simulate_data_stalls && dcache_access)
5235487Snate@binkert.org                stall_ticks += dcache_latency;
5245487Snate@binkert.org
5255487Snate@binkert.org            if (stall_ticks) {
5265487Snate@binkert.org                Tick stall_cycles = stall_ticks / ticks(1);
5275487Snate@binkert.org                Tick aligned_stall_ticks = ticks(stall_cycles);
5285487Snate@binkert.org
5295487Snate@binkert.org                if (aligned_stall_ticks < stall_ticks)
5305487Snate@binkert.org                    aligned_stall_ticks += 1;
5315487Snate@binkert.org
5325487Snate@binkert.org                latency += aligned_stall_ticks;
5332623SN/A            }
5342623SN/A
5352623SN/A        }
5364377Sgblack@eecs.umich.edu        if(fault != NoFault || !stayAtPC)
5374182Sgblack@eecs.umich.edu            advancePC(fault);
5382623SN/A    }
5392623SN/A
5405487Snate@binkert.org    // instruction takes at least one cycle
5415487Snate@binkert.org    if (latency < ticks(1))
5425487Snate@binkert.org        latency = ticks(1);
5435487Snate@binkert.org
5442626SN/A    if (_status != Idle)
5457823Ssteve.reinhardt@amd.com        schedule(tickEvent, curTick() + latency);
5462623SN/A}
5472623SN/A
5482623SN/A
5495315Sstever@gmail.comvoid
5505315Sstever@gmail.comAtomicSimpleCPU::printAddr(Addr a)
5515315Sstever@gmail.com{
5525315Sstever@gmail.com    dcachePort.printAddr(a);
5535315Sstever@gmail.com}
5545315Sstever@gmail.com
5555315Sstever@gmail.com
5562623SN/A////////////////////////////////////////////////////////////////////////
5572623SN/A//
5582623SN/A//  AtomicSimpleCPU Simulation Object
5592623SN/A//
5604762Snate@binkert.orgAtomicSimpleCPU *
5614762Snate@binkert.orgAtomicSimpleCPUParams::create()
5622623SN/A{
5635529Snate@binkert.org    numThreads = 1;
5648779Sgblack@eecs.umich.edu    if (!FullSystem && workload.size() != 1)
5654762Snate@binkert.org        panic("only one workload allowed");
5665529Snate@binkert.org    return new AtomicSimpleCPU(this);
5672623SN/A}
568