atomic.cc revision 6076
12623SN/A/*
22623SN/A * Copyright (c) 2002-2005 The Regents of The University of Michigan
32623SN/A * All rights reserved.
42623SN/A *
52623SN/A * Redistribution and use in source and binary forms, with or without
62623SN/A * modification, are permitted provided that the following conditions are
72623SN/A * met: redistributions of source code must retain the above copyright
82623SN/A * notice, this list of conditions and the following disclaimer;
92623SN/A * redistributions in binary form must reproduce the above copyright
102623SN/A * notice, this list of conditions and the following disclaimer in the
112623SN/A * documentation and/or other materials provided with the distribution;
122623SN/A * neither the name of the copyright holders nor the names of its
132623SN/A * contributors may be used to endorse or promote products derived from
142623SN/A * this software without specific prior written permission.
152623SN/A *
162623SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
172623SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
182623SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
192623SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
202623SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
212623SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
222623SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
232623SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
242623SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
252623SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
262623SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
272665Ssaidi@eecs.umich.edu *
282665Ssaidi@eecs.umich.edu * Authors: Steve Reinhardt
292623SN/A */
302623SN/A
313170Sstever@eecs.umich.edu#include "arch/locked_mem.hh"
323806Ssaidi@eecs.umich.edu#include "arch/mmaped_ipr.hh"
332623SN/A#include "arch/utility.hh"
344040Ssaidi@eecs.umich.edu#include "base/bigint.hh"
352623SN/A#include "cpu/exetrace.hh"
362623SN/A#include "cpu/simple/atomic.hh"
373348Sbinkertn@umich.edu#include "mem/packet.hh"
383348Sbinkertn@umich.edu#include "mem/packet_access.hh"
394762Snate@binkert.org#include "params/AtomicSimpleCPU.hh"
402901Ssaidi@eecs.umich.edu#include "sim/system.hh"
412623SN/A
422623SN/Ausing namespace std;
432623SN/Ausing namespace TheISA;
442623SN/A
452623SN/AAtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c)
465606Snate@binkert.org    : Event(CPU_Tick_Pri), cpu(c)
472623SN/A{
482623SN/A}
492623SN/A
502623SN/A
512623SN/Avoid
522623SN/AAtomicSimpleCPU::TickEvent::process()
532623SN/A{
542623SN/A    cpu->tick();
552623SN/A}
562623SN/A
572623SN/Aconst char *
585336Shines@cs.fsu.eduAtomicSimpleCPU::TickEvent::description() const
592623SN/A{
604873Sstever@eecs.umich.edu    return "AtomicSimpleCPU tick";
612623SN/A}
622623SN/A
632856Srdreslin@umich.eduPort *
642856Srdreslin@umich.eduAtomicSimpleCPU::getPort(const std::string &if_name, int idx)
652856Srdreslin@umich.edu{
662856Srdreslin@umich.edu    if (if_name == "dcache_port")
672856Srdreslin@umich.edu        return &dcachePort;
682856Srdreslin@umich.edu    else if (if_name == "icache_port")
692856Srdreslin@umich.edu        return &icachePort;
704968Sacolyte@umich.edu    else if (if_name == "physmem_port") {
714968Sacolyte@umich.edu        hasPhysMemPort = true;
724968Sacolyte@umich.edu        return &physmemPort;
734968Sacolyte@umich.edu    }
742856Srdreslin@umich.edu    else
752856Srdreslin@umich.edu        panic("No Such Port\n");
762856Srdreslin@umich.edu}
772623SN/A
782623SN/Avoid
792623SN/AAtomicSimpleCPU::init()
802623SN/A{
812623SN/A    BaseCPU::init();
822623SN/A#if FULL_SYSTEM
832680Sktlim@umich.edu    for (int i = 0; i < threadContexts.size(); ++i) {
842680Sktlim@umich.edu        ThreadContext *tc = threadContexts[i];
852623SN/A
862623SN/A        // initialize CPU, including PC
875714Shsul@eecs.umich.edu        TheISA::initCPU(tc, tc->contextId());
882623SN/A    }
892623SN/A#endif
904968Sacolyte@umich.edu    if (hasPhysMemPort) {
914968Sacolyte@umich.edu        bool snoop = false;
924968Sacolyte@umich.edu        AddrRangeList pmAddrList;
934968Sacolyte@umich.edu        physmemPort.getPeerAddressRanges(pmAddrList, snoop);
944968Sacolyte@umich.edu        physMemAddr = *pmAddrList.begin();
954968Sacolyte@umich.edu    }
965714Shsul@eecs.umich.edu    // Atomic doesn't do MT right now, so contextId == threadId
975712Shsul@eecs.umich.edu    ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT
985712Shsul@eecs.umich.edu    data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too
995712Shsul@eecs.umich.edu    data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too
1002623SN/A}
1012623SN/A
1022623SN/Abool
1033349Sbinkertn@umich.eduAtomicSimpleCPU::CpuPort::recvTiming(PacketPtr pkt)
1042623SN/A{
1053184Srdreslin@umich.edu    panic("AtomicSimpleCPU doesn't expect recvTiming callback!");
1062623SN/A    return true;
1072623SN/A}
1082623SN/A
1092623SN/ATick
1103349Sbinkertn@umich.eduAtomicSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
1112623SN/A{
1123310Srdreslin@umich.edu    //Snooping a coherence request, just return
1133649Srdreslin@umich.edu    return 0;
1142623SN/A}
1152623SN/A
1162623SN/Avoid
1173349Sbinkertn@umich.eduAtomicSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
1182623SN/A{
1193184Srdreslin@umich.edu    //No internal storage to update, just return
1203184Srdreslin@umich.edu    return;
1212623SN/A}
1222623SN/A
1232623SN/Avoid
1242623SN/AAtomicSimpleCPU::CpuPort::recvStatusChange(Status status)
1252623SN/A{
1263647Srdreslin@umich.edu    if (status == RangeChange) {
1273647Srdreslin@umich.edu        if (!snoopRangeSent) {
1283647Srdreslin@umich.edu            snoopRangeSent = true;
1293647Srdreslin@umich.edu            sendStatusChange(Port::RangeChange);
1303647Srdreslin@umich.edu        }
1312626SN/A        return;
1323647Srdreslin@umich.edu    }
1332626SN/A
1342623SN/A    panic("AtomicSimpleCPU doesn't expect recvStatusChange callback!");
1352623SN/A}
1362623SN/A
1372657Ssaidi@eecs.umich.eduvoid
1382623SN/AAtomicSimpleCPU::CpuPort::recvRetry()
1392623SN/A{
1402623SN/A    panic("AtomicSimpleCPU doesn't expect recvRetry callback!");
1412623SN/A}
1422623SN/A
1434192Sktlim@umich.eduvoid
1444192Sktlim@umich.eduAtomicSimpleCPU::DcachePort::setPeer(Port *port)
1454192Sktlim@umich.edu{
1464192Sktlim@umich.edu    Port::setPeer(port);
1474192Sktlim@umich.edu
1484192Sktlim@umich.edu#if FULL_SYSTEM
1494192Sktlim@umich.edu    // Update the ThreadContext's memory ports (Functional/Virtual
1504192Sktlim@umich.edu    // Ports)
1515497Ssaidi@eecs.umich.edu    cpu->tcBase()->connectMemPorts(cpu->tcBase());
1524192Sktlim@umich.edu#endif
1534192Sktlim@umich.edu}
1542623SN/A
1555529Snate@binkert.orgAtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p)
1565487Snate@binkert.org    : BaseSimpleCPU(p), tickEvent(this), width(p->width),
1575487Snate@binkert.org      simulate_data_stalls(p->simulate_data_stalls),
1585487Snate@binkert.org      simulate_inst_stalls(p->simulate_inst_stalls),
1594968Sacolyte@umich.edu      icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this),
1604968Sacolyte@umich.edu      physmemPort(name() + "-iport", this), hasPhysMemPort(false)
1612623SN/A{
1622623SN/A    _status = Idle;
1632623SN/A
1643647Srdreslin@umich.edu    icachePort.snoopRangeSent = false;
1653647Srdreslin@umich.edu    dcachePort.snoopRangeSent = false;
1663647Srdreslin@umich.edu
1672623SN/A}
1682623SN/A
1692623SN/A
1702623SN/AAtomicSimpleCPU::~AtomicSimpleCPU()
1712623SN/A{
1722623SN/A}
1732623SN/A
1742623SN/Avoid
1752623SN/AAtomicSimpleCPU::serialize(ostream &os)
1762623SN/A{
1772915Sktlim@umich.edu    SimObject::State so_state = SimObject::getState();
1782915Sktlim@umich.edu    SERIALIZE_ENUM(so_state);
1793145Shsul@eecs.umich.edu    BaseSimpleCPU::serialize(os);
1802623SN/A    nameOut(os, csprintf("%s.tickEvent", name()));
1812623SN/A    tickEvent.serialize(os);
1822623SN/A}
1832623SN/A
1842623SN/Avoid
1852623SN/AAtomicSimpleCPU::unserialize(Checkpoint *cp, const string &section)
1862623SN/A{
1872915Sktlim@umich.edu    SimObject::State so_state;
1882915Sktlim@umich.edu    UNSERIALIZE_ENUM(so_state);
1893145Shsul@eecs.umich.edu    BaseSimpleCPU::unserialize(cp, section);
1902915Sktlim@umich.edu    tickEvent.unserialize(cp, csprintf("%s.tickEvent", section));
1912915Sktlim@umich.edu}
1922915Sktlim@umich.edu
1932915Sktlim@umich.eduvoid
1942915Sktlim@umich.eduAtomicSimpleCPU::resume()
1952915Sktlim@umich.edu{
1965220Ssaidi@eecs.umich.edu    if (_status == Idle || _status == SwitchedOut)
1975220Ssaidi@eecs.umich.edu        return;
1985220Ssaidi@eecs.umich.edu
1994940Snate@binkert.org    DPRINTF(SimpleCPU, "Resume\n");
2005220Ssaidi@eecs.umich.edu    assert(system->getMemoryMode() == Enums::atomic);
2013324Shsul@eecs.umich.edu
2025220Ssaidi@eecs.umich.edu    changeState(SimObject::Running);
2035220Ssaidi@eecs.umich.edu    if (thread->status() == ThreadContext::Active) {
2045606Snate@binkert.org        if (!tickEvent.scheduled())
2055606Snate@binkert.org            schedule(tickEvent, nextCycle());
2062915Sktlim@umich.edu    }
2072623SN/A}
2082623SN/A
2092623SN/Avoid
2102798Sktlim@umich.eduAtomicSimpleCPU::switchOut()
2112623SN/A{
2125496Ssaidi@eecs.umich.edu    assert(_status == Running || _status == Idle);
2132798Sktlim@umich.edu    _status = SwitchedOut;
2142623SN/A
2152798Sktlim@umich.edu    tickEvent.squash();
2162623SN/A}
2172623SN/A
2182623SN/A
2192623SN/Avoid
2202623SN/AAtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
2212623SN/A{
2224192Sktlim@umich.edu    BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
2232623SN/A
2242623SN/A    assert(!tickEvent.scheduled());
2252623SN/A
2262680Sktlim@umich.edu    // if any of this CPU's ThreadContexts are active, mark the CPU as
2272623SN/A    // running and schedule its tick event.
2282680Sktlim@umich.edu    for (int i = 0; i < threadContexts.size(); ++i) {
2292680Sktlim@umich.edu        ThreadContext *tc = threadContexts[i];
2302680Sktlim@umich.edu        if (tc->status() == ThreadContext::Active && _status != Running) {
2312623SN/A            _status = Running;
2325606Snate@binkert.org            schedule(tickEvent, nextCycle());
2332623SN/A            break;
2342623SN/A        }
2352623SN/A    }
2363512Sktlim@umich.edu    if (_status != Running) {
2373512Sktlim@umich.edu        _status = Idle;
2383512Sktlim@umich.edu    }
2395169Ssaidi@eecs.umich.edu    assert(threadContexts.size() == 1);
2405712Shsul@eecs.umich.edu    ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT
2415712Shsul@eecs.umich.edu    data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too
2425712Shsul@eecs.umich.edu    data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too
2432623SN/A}
2442623SN/A
2452623SN/A
2462623SN/Avoid
2472623SN/AAtomicSimpleCPU::activateContext(int thread_num, int delay)
2482623SN/A{
2494940Snate@binkert.org    DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
2504940Snate@binkert.org
2512623SN/A    assert(thread_num == 0);
2522683Sktlim@umich.edu    assert(thread);
2532623SN/A
2542623SN/A    assert(_status == Idle);
2552623SN/A    assert(!tickEvent.scheduled());
2562623SN/A
2572623SN/A    notIdleFraction++;
2585101Ssaidi@eecs.umich.edu    numCycles += tickToCycles(thread->lastActivate - thread->lastSuspend);
2593686Sktlim@umich.edu
2603430Sgblack@eecs.umich.edu    //Make sure ticks are still on multiples of cycles
2615606Snate@binkert.org    schedule(tickEvent, nextCycle(curTick + ticks(delay)));
2622623SN/A    _status = Running;
2632623SN/A}
2642623SN/A
2652623SN/A
2662623SN/Avoid
2672623SN/AAtomicSimpleCPU::suspendContext(int thread_num)
2682623SN/A{
2694940Snate@binkert.org    DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
2704940Snate@binkert.org
2712623SN/A    assert(thread_num == 0);
2722683Sktlim@umich.edu    assert(thread);
2732623SN/A
2746043Sgblack@eecs.umich.edu    if (_status == Idle)
2756043Sgblack@eecs.umich.edu        return;
2766043Sgblack@eecs.umich.edu
2772623SN/A    assert(_status == Running);
2782626SN/A
2792626SN/A    // tick event may not be scheduled if this gets called from inside
2802626SN/A    // an instruction's execution, e.g. "quiesce"
2812626SN/A    if (tickEvent.scheduled())
2825606Snate@binkert.org        deschedule(tickEvent);
2832623SN/A
2842623SN/A    notIdleFraction--;
2852623SN/A    _status = Idle;
2862623SN/A}
2872623SN/A
2882623SN/A
2892623SN/Atemplate <class T>
2902623SN/AFault
2912623SN/AAtomicSimpleCPU::read(Addr addr, T &data, unsigned flags)
2922623SN/A{
2933169Sstever@eecs.umich.edu    // use the CPU's statically allocated read request and packet objects
2944870Sstever@eecs.umich.edu    Request *req = &data_read_req;
2952623SN/A
2962623SN/A    if (traceData) {
2972623SN/A        traceData->setAddr(addr);
2982623SN/A    }
2992623SN/A
3004999Sgblack@eecs.umich.edu    //The block size of our peer.
3014999Sgblack@eecs.umich.edu    int blockSize = dcachePort.peerBlockSize();
3024999Sgblack@eecs.umich.edu    //The size of the data we're trying to read.
3034999Sgblack@eecs.umich.edu    int dataSize = sizeof(T);
3042623SN/A
3054999Sgblack@eecs.umich.edu    uint8_t * dataPtr = (uint8_t *)&data;
3062623SN/A
3074999Sgblack@eecs.umich.edu    //The address of the second part of this access if it needs to be split
3084999Sgblack@eecs.umich.edu    //across a cache line boundary.
3094999Sgblack@eecs.umich.edu    Addr secondAddr = roundDown(addr + dataSize - 1, blockSize);
3104999Sgblack@eecs.umich.edu
3114999Sgblack@eecs.umich.edu    if(secondAddr > addr)
3124999Sgblack@eecs.umich.edu        dataSize = secondAddr - addr;
3134999Sgblack@eecs.umich.edu
3144999Sgblack@eecs.umich.edu    dcache_latency = 0;
3154999Sgblack@eecs.umich.edu
3164999Sgblack@eecs.umich.edu    while(1) {
3174999Sgblack@eecs.umich.edu        req->setVirt(0, addr, dataSize, flags, thread->readPC());
3184999Sgblack@eecs.umich.edu
3194999Sgblack@eecs.umich.edu        // translate to physical address
3206023Snate@binkert.org        Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read);
3214999Sgblack@eecs.umich.edu
3224999Sgblack@eecs.umich.edu        // Now do the access.
3234999Sgblack@eecs.umich.edu        if (fault == NoFault) {
3244999Sgblack@eecs.umich.edu            Packet pkt = Packet(req,
3256076Sgblack@eecs.umich.edu                    req->isLlsc() ? MemCmd::LoadLockedReq : MemCmd::ReadReq,
3264999Sgblack@eecs.umich.edu                    Packet::Broadcast);
3274999Sgblack@eecs.umich.edu            pkt.dataStatic(dataPtr);
3284999Sgblack@eecs.umich.edu
3294999Sgblack@eecs.umich.edu            if (req->isMmapedIpr())
3304999Sgblack@eecs.umich.edu                dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt);
3314999Sgblack@eecs.umich.edu            else {
3324999Sgblack@eecs.umich.edu                if (hasPhysMemPort && pkt.getAddr() == physMemAddr)
3334999Sgblack@eecs.umich.edu                    dcache_latency += physmemPort.sendAtomic(&pkt);
3344999Sgblack@eecs.umich.edu                else
3354999Sgblack@eecs.umich.edu                    dcache_latency += dcachePort.sendAtomic(&pkt);
3364999Sgblack@eecs.umich.edu            }
3374999Sgblack@eecs.umich.edu            dcache_access = true;
3385012Sgblack@eecs.umich.edu
3394999Sgblack@eecs.umich.edu            assert(!pkt.isError());
3404999Sgblack@eecs.umich.edu
3416076Sgblack@eecs.umich.edu            if (req->isLlsc()) {
3424999Sgblack@eecs.umich.edu                TheISA::handleLockedRead(thread, req);
3434999Sgblack@eecs.umich.edu            }
3444968Sacolyte@umich.edu        }
3454986Ssaidi@eecs.umich.edu
3464999Sgblack@eecs.umich.edu        // This will need a new way to tell if it has a dcache attached.
3474999Sgblack@eecs.umich.edu        if (req->isUncacheable())
3484999Sgblack@eecs.umich.edu            recordEvent("Uncached Read");
3494762Snate@binkert.org
3504999Sgblack@eecs.umich.edu        //If there's a fault, return it
3514999Sgblack@eecs.umich.edu        if (fault != NoFault)
3524999Sgblack@eecs.umich.edu            return fault;
3534999Sgblack@eecs.umich.edu        //If we don't need to access a second cache line, stop now.
3544999Sgblack@eecs.umich.edu        if (secondAddr <= addr)
3554999Sgblack@eecs.umich.edu        {
3564999Sgblack@eecs.umich.edu            data = gtoh(data);
3575408Sgblack@eecs.umich.edu            if (traceData) {
3585408Sgblack@eecs.umich.edu                traceData->setData(data);
3595408Sgblack@eecs.umich.edu            }
3604999Sgblack@eecs.umich.edu            return fault;
3614968Sacolyte@umich.edu        }
3623170Sstever@eecs.umich.edu
3634999Sgblack@eecs.umich.edu        /*
3644999Sgblack@eecs.umich.edu         * Set up for accessing the second cache line.
3654999Sgblack@eecs.umich.edu         */
3664999Sgblack@eecs.umich.edu
3674999Sgblack@eecs.umich.edu        //Move the pointer we're reading into to the correct location.
3684999Sgblack@eecs.umich.edu        dataPtr += dataSize;
3694999Sgblack@eecs.umich.edu        //Adjust the size to get the remaining bytes.
3704999Sgblack@eecs.umich.edu        dataSize = addr + sizeof(T) - secondAddr;
3714999Sgblack@eecs.umich.edu        //And access the right address.
3724999Sgblack@eecs.umich.edu        addr = secondAddr;
3732623SN/A    }
3742623SN/A}
3752623SN/A
3762623SN/A#ifndef DOXYGEN_SHOULD_SKIP_THIS
3772623SN/A
3782623SN/Atemplate
3792623SN/AFault
3804115Ssaidi@eecs.umich.eduAtomicSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags);
3814115Ssaidi@eecs.umich.edu
3824115Ssaidi@eecs.umich.edutemplate
3834115Ssaidi@eecs.umich.eduFault
3844040Ssaidi@eecs.umich.eduAtomicSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags);
3854040Ssaidi@eecs.umich.edu
3864040Ssaidi@eecs.umich.edutemplate
3874040Ssaidi@eecs.umich.eduFault
3882623SN/AAtomicSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
3892623SN/A
3902623SN/Atemplate
3912623SN/AFault
3922623SN/AAtomicSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
3932623SN/A
3942623SN/Atemplate
3952623SN/AFault
3962623SN/AAtomicSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
3972623SN/A
3982623SN/Atemplate
3992623SN/AFault
4002623SN/AAtomicSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
4012623SN/A
4022623SN/A#endif //DOXYGEN_SHOULD_SKIP_THIS
4032623SN/A
4042623SN/Atemplate<>
4052623SN/AFault
4062623SN/AAtomicSimpleCPU::read(Addr addr, double &data, unsigned flags)
4072623SN/A{
4082623SN/A    return read(addr, *(uint64_t*)&data, flags);
4092623SN/A}
4102623SN/A
4112623SN/Atemplate<>
4122623SN/AFault
4132623SN/AAtomicSimpleCPU::read(Addr addr, float &data, unsigned flags)
4142623SN/A{
4152623SN/A    return read(addr, *(uint32_t*)&data, flags);
4162623SN/A}
4172623SN/A
4182623SN/A
4192623SN/Atemplate<>
4202623SN/AFault
4212623SN/AAtomicSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
4222623SN/A{
4232623SN/A    return read(addr, (uint32_t&)data, flags);
4242623SN/A}
4252623SN/A
4262623SN/A
4272623SN/Atemplate <class T>
4282623SN/AFault
4292623SN/AAtomicSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
4302623SN/A{
4313169Sstever@eecs.umich.edu    // use the CPU's statically allocated write request and packet objects
4324870Sstever@eecs.umich.edu    Request *req = &data_write_req;
4332623SN/A
4342623SN/A    if (traceData) {
4352623SN/A        traceData->setAddr(addr);
4362623SN/A    }
4372623SN/A
4384999Sgblack@eecs.umich.edu    //The block size of our peer.
4394999Sgblack@eecs.umich.edu    int blockSize = dcachePort.peerBlockSize();
4404999Sgblack@eecs.umich.edu    //The size of the data we're trying to read.
4414999Sgblack@eecs.umich.edu    int dataSize = sizeof(T);
4422623SN/A
4434999Sgblack@eecs.umich.edu    uint8_t * dataPtr = (uint8_t *)&data;
4442623SN/A
4454999Sgblack@eecs.umich.edu    //The address of the second part of this access if it needs to be split
4464999Sgblack@eecs.umich.edu    //across a cache line boundary.
4474999Sgblack@eecs.umich.edu    Addr secondAddr = roundDown(addr + dataSize - 1, blockSize);
4484999Sgblack@eecs.umich.edu
4494999Sgblack@eecs.umich.edu    if(secondAddr > addr)
4504999Sgblack@eecs.umich.edu        dataSize = secondAddr - addr;
4514999Sgblack@eecs.umich.edu
4524999Sgblack@eecs.umich.edu    dcache_latency = 0;
4534999Sgblack@eecs.umich.edu
4544999Sgblack@eecs.umich.edu    while(1) {
4554999Sgblack@eecs.umich.edu        req->setVirt(0, addr, dataSize, flags, thread->readPC());
4564999Sgblack@eecs.umich.edu
4574999Sgblack@eecs.umich.edu        // translate to physical address
4586023Snate@binkert.org        Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write);
4594999Sgblack@eecs.umich.edu
4604999Sgblack@eecs.umich.edu        // Now do the access.
4614999Sgblack@eecs.umich.edu        if (fault == NoFault) {
4624999Sgblack@eecs.umich.edu            MemCmd cmd = MemCmd::WriteReq; // default
4634999Sgblack@eecs.umich.edu            bool do_access = true;  // flag to suppress cache access
4644999Sgblack@eecs.umich.edu
4656076Sgblack@eecs.umich.edu            if (req->isLlsc()) {
4664999Sgblack@eecs.umich.edu                cmd = MemCmd::StoreCondReq;
4674999Sgblack@eecs.umich.edu                do_access = TheISA::handleLockedWrite(thread, req);
4684999Sgblack@eecs.umich.edu            } else if (req->isSwap()) {
4694999Sgblack@eecs.umich.edu                cmd = MemCmd::SwapReq;
4704999Sgblack@eecs.umich.edu                if (req->isCondSwap()) {
4714999Sgblack@eecs.umich.edu                    assert(res);
4724999Sgblack@eecs.umich.edu                    req->setExtraData(*res);
4734999Sgblack@eecs.umich.edu                }
4744999Sgblack@eecs.umich.edu            }
4754999Sgblack@eecs.umich.edu
4764999Sgblack@eecs.umich.edu            if (do_access) {
4774999Sgblack@eecs.umich.edu                Packet pkt = Packet(req, cmd, Packet::Broadcast);
4784999Sgblack@eecs.umich.edu                pkt.dataStatic(dataPtr);
4794999Sgblack@eecs.umich.edu
4804999Sgblack@eecs.umich.edu                if (req->isMmapedIpr()) {
4814999Sgblack@eecs.umich.edu                    dcache_latency +=
4824999Sgblack@eecs.umich.edu                        TheISA::handleIprWrite(thread->getTC(), &pkt);
4834999Sgblack@eecs.umich.edu                } else {
4844999Sgblack@eecs.umich.edu                    //XXX This needs to be outside of the loop in order to
4854999Sgblack@eecs.umich.edu                    //work properly for cache line boundary crossing
4864999Sgblack@eecs.umich.edu                    //accesses in transendian simulations.
4874999Sgblack@eecs.umich.edu                    data = htog(data);
4884999Sgblack@eecs.umich.edu                    if (hasPhysMemPort && pkt.getAddr() == physMemAddr)
4894999Sgblack@eecs.umich.edu                        dcache_latency += physmemPort.sendAtomic(&pkt);
4904999Sgblack@eecs.umich.edu                    else
4914999Sgblack@eecs.umich.edu                        dcache_latency += dcachePort.sendAtomic(&pkt);
4924999Sgblack@eecs.umich.edu                }
4934999Sgblack@eecs.umich.edu                dcache_access = true;
4944999Sgblack@eecs.umich.edu                assert(!pkt.isError());
4954999Sgblack@eecs.umich.edu
4964999Sgblack@eecs.umich.edu                if (req->isSwap()) {
4974999Sgblack@eecs.umich.edu                    assert(res);
4984999Sgblack@eecs.umich.edu                    *res = pkt.get<T>();
4994999Sgblack@eecs.umich.edu                }
5004999Sgblack@eecs.umich.edu            }
5014999Sgblack@eecs.umich.edu
5024999Sgblack@eecs.umich.edu            if (res && !req->isSwap()) {
5034999Sgblack@eecs.umich.edu                *res = req->getExtraData();
5044878Sstever@eecs.umich.edu            }
5054040Ssaidi@eecs.umich.edu        }
5064040Ssaidi@eecs.umich.edu
5074999Sgblack@eecs.umich.edu        // This will need a new way to tell if it's hooked up to a cache or not.
5084999Sgblack@eecs.umich.edu        if (req->isUncacheable())
5094999Sgblack@eecs.umich.edu            recordEvent("Uncached Write");
5102631SN/A
5114999Sgblack@eecs.umich.edu        //If there's a fault or we don't need to access a second cache line,
5124999Sgblack@eecs.umich.edu        //stop now.
5134999Sgblack@eecs.umich.edu        if (fault != NoFault || secondAddr <= addr)
5144999Sgblack@eecs.umich.edu        {
5154999Sgblack@eecs.umich.edu            // If the write needs to have a fault on the access, consider
5164999Sgblack@eecs.umich.edu            // calling changeStatus() and changing it to "bad addr write"
5174999Sgblack@eecs.umich.edu            // or something.
5185408Sgblack@eecs.umich.edu            if (traceData) {
5196012Ssteve.reinhardt@amd.com                traceData->setData(gtoh(data));
5205408Sgblack@eecs.umich.edu            }
5214999Sgblack@eecs.umich.edu            return fault;
5223170Sstever@eecs.umich.edu        }
5233170Sstever@eecs.umich.edu
5244999Sgblack@eecs.umich.edu        /*
5254999Sgblack@eecs.umich.edu         * Set up for accessing the second cache line.
5264999Sgblack@eecs.umich.edu         */
5274999Sgblack@eecs.umich.edu
5284999Sgblack@eecs.umich.edu        //Move the pointer we're reading into to the correct location.
5294999Sgblack@eecs.umich.edu        dataPtr += dataSize;
5304999Sgblack@eecs.umich.edu        //Adjust the size to get the remaining bytes.
5314999Sgblack@eecs.umich.edu        dataSize = addr + sizeof(T) - secondAddr;
5324999Sgblack@eecs.umich.edu        //And access the right address.
5334999Sgblack@eecs.umich.edu        addr = secondAddr;
5342623SN/A    }
5352623SN/A}
5362623SN/A
5372623SN/A
5382623SN/A#ifndef DOXYGEN_SHOULD_SKIP_THIS
5394224Sgblack@eecs.umich.edu
5404224Sgblack@eecs.umich.edutemplate
5414224Sgblack@eecs.umich.eduFault
5424224Sgblack@eecs.umich.eduAtomicSimpleCPU::write(Twin32_t data, Addr addr,
5434224Sgblack@eecs.umich.edu                       unsigned flags, uint64_t *res);
5444224Sgblack@eecs.umich.edu
5454224Sgblack@eecs.umich.edutemplate
5464224Sgblack@eecs.umich.eduFault
5474224Sgblack@eecs.umich.eduAtomicSimpleCPU::write(Twin64_t data, Addr addr,
5484224Sgblack@eecs.umich.edu                       unsigned flags, uint64_t *res);
5494224Sgblack@eecs.umich.edu
5502623SN/Atemplate
5512623SN/AFault
5522623SN/AAtomicSimpleCPU::write(uint64_t data, Addr addr,
5532623SN/A                       unsigned flags, uint64_t *res);
5542623SN/A
5552623SN/Atemplate
5562623SN/AFault
5572623SN/AAtomicSimpleCPU::write(uint32_t data, Addr addr,
5582623SN/A                       unsigned flags, uint64_t *res);
5592623SN/A
5602623SN/Atemplate
5612623SN/AFault
5622623SN/AAtomicSimpleCPU::write(uint16_t data, Addr addr,
5632623SN/A                       unsigned flags, uint64_t *res);
5642623SN/A
5652623SN/Atemplate
5662623SN/AFault
5672623SN/AAtomicSimpleCPU::write(uint8_t data, Addr addr,
5682623SN/A                       unsigned flags, uint64_t *res);
5692623SN/A
5702623SN/A#endif //DOXYGEN_SHOULD_SKIP_THIS
5712623SN/A
5722623SN/Atemplate<>
5732623SN/AFault
5742623SN/AAtomicSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
5752623SN/A{
5762623SN/A    return write(*(uint64_t*)&data, addr, flags, res);
5772623SN/A}
5782623SN/A
5792623SN/Atemplate<>
5802623SN/AFault
5812623SN/AAtomicSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
5822623SN/A{
5832623SN/A    return write(*(uint32_t*)&data, addr, flags, res);
5842623SN/A}
5852623SN/A
5862623SN/A
5872623SN/Atemplate<>
5882623SN/AFault
5892623SN/AAtomicSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
5902623SN/A{
5912623SN/A    return write((uint32_t)data, addr, flags, res);
5922623SN/A}
5932623SN/A
5942623SN/A
5952623SN/Avoid
5962623SN/AAtomicSimpleCPU::tick()
5972623SN/A{
5984940Snate@binkert.org    DPRINTF(SimpleCPU, "Tick\n");
5994940Snate@binkert.org
6005487Snate@binkert.org    Tick latency = 0;
6012623SN/A
6022623SN/A    for (int i = 0; i < width; ++i) {
6032623SN/A        numCycles++;
6042623SN/A
6053387Sgblack@eecs.umich.edu        if (!curStaticInst || !curStaticInst->isDelayedCommit())
6063387Sgblack@eecs.umich.edu            checkForInterrupts();
6072626SN/A
6085348Ssaidi@eecs.umich.edu        checkPcEventQueue();
6095348Ssaidi@eecs.umich.edu
6105669Sgblack@eecs.umich.edu        Fault fault = NoFault;
6115669Sgblack@eecs.umich.edu
6125669Sgblack@eecs.umich.edu        bool fromRom = isRomMicroPC(thread->readMicroPC());
6135914Sgblack@eecs.umich.edu        if (!fromRom && !curMacroStaticInst) {
6145894Sgblack@eecs.umich.edu            setupFetchRequest(&ifetch_req);
6156023Snate@binkert.org            fault = thread->itb->translateAtomic(&ifetch_req, tc,
6166023Snate@binkert.org                                                 BaseTLB::Execute);
6175894Sgblack@eecs.umich.edu        }
6182623SN/A
6192623SN/A        if (fault == NoFault) {
6204182Sgblack@eecs.umich.edu            Tick icache_latency = 0;
6214182Sgblack@eecs.umich.edu            bool icache_access = false;
6224182Sgblack@eecs.umich.edu            dcache_access = false; // assume no dcache access
6232662Sstever@eecs.umich.edu
6245914Sgblack@eecs.umich.edu            if (!fromRom && !curMacroStaticInst) {
6255694Sgblack@eecs.umich.edu                // This is commented out because the predecoder would act like
6265694Sgblack@eecs.umich.edu                // a tiny cache otherwise. It wouldn't be flushed when needed
6275694Sgblack@eecs.umich.edu                // like the I cache. It should be flushed, and when that works
6285694Sgblack@eecs.umich.edu                // this code should be uncommented.
6295669Sgblack@eecs.umich.edu                //Fetch more instruction memory if necessary
6305669Sgblack@eecs.umich.edu                //if(predecoder.needMoreBytes())
6315669Sgblack@eecs.umich.edu                //{
6325669Sgblack@eecs.umich.edu                    icache_access = true;
6335669Sgblack@eecs.umich.edu                    Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq,
6345669Sgblack@eecs.umich.edu                                               Packet::Broadcast);
6355669Sgblack@eecs.umich.edu                    ifetch_pkt.dataStatic(&inst);
6362623SN/A
6375669Sgblack@eecs.umich.edu                    if (hasPhysMemPort && ifetch_pkt.getAddr() == physMemAddr)
6385669Sgblack@eecs.umich.edu                        icache_latency = physmemPort.sendAtomic(&ifetch_pkt);
6395669Sgblack@eecs.umich.edu                    else
6405669Sgblack@eecs.umich.edu                        icache_latency = icachePort.sendAtomic(&ifetch_pkt);
6414968Sacolyte@umich.edu
6425669Sgblack@eecs.umich.edu                    assert(!ifetch_pkt.isError());
6434968Sacolyte@umich.edu
6445669Sgblack@eecs.umich.edu                    // ifetch_req is initialized to read the instruction directly
6455669Sgblack@eecs.umich.edu                    // into the CPU object's inst field.
6465669Sgblack@eecs.umich.edu                //}
6475669Sgblack@eecs.umich.edu            }
6484182Sgblack@eecs.umich.edu
6492623SN/A            preExecute();
6503814Ssaidi@eecs.umich.edu
6515001Sgblack@eecs.umich.edu            if (curStaticInst) {
6524182Sgblack@eecs.umich.edu                fault = curStaticInst->execute(this, traceData);
6534998Sgblack@eecs.umich.edu
6544998Sgblack@eecs.umich.edu                // keep an instruction count
6554998Sgblack@eecs.umich.edu                if (fault == NoFault)
6564998Sgblack@eecs.umich.edu                    countInst();
6575001Sgblack@eecs.umich.edu                else if (traceData) {
6585001Sgblack@eecs.umich.edu                    // If there was a fault, we should trace this instruction.
6595001Sgblack@eecs.umich.edu                    delete traceData;
6605001Sgblack@eecs.umich.edu                    traceData = NULL;
6615001Sgblack@eecs.umich.edu                }
6624998Sgblack@eecs.umich.edu
6634182Sgblack@eecs.umich.edu                postExecute();
6644182Sgblack@eecs.umich.edu            }
6652623SN/A
6663814Ssaidi@eecs.umich.edu            // @todo remove me after debugging with legion done
6674539Sgblack@eecs.umich.edu            if (curStaticInst && (!curStaticInst->isMicroop() ||
6684539Sgblack@eecs.umich.edu                        curStaticInst->isFirstMicroop()))
6693814Ssaidi@eecs.umich.edu                instCnt++;
6703814Ssaidi@eecs.umich.edu
6715487Snate@binkert.org            Tick stall_ticks = 0;
6725487Snate@binkert.org            if (simulate_inst_stalls && icache_access)
6735487Snate@binkert.org                stall_ticks += icache_latency;
6745487Snate@binkert.org
6755487Snate@binkert.org            if (simulate_data_stalls && dcache_access)
6765487Snate@binkert.org                stall_ticks += dcache_latency;
6775487Snate@binkert.org
6785487Snate@binkert.org            if (stall_ticks) {
6795487Snate@binkert.org                Tick stall_cycles = stall_ticks / ticks(1);
6805487Snate@binkert.org                Tick aligned_stall_ticks = ticks(stall_cycles);
6815487Snate@binkert.org
6825487Snate@binkert.org                if (aligned_stall_ticks < stall_ticks)
6835487Snate@binkert.org                    aligned_stall_ticks += 1;
6845487Snate@binkert.org
6855487Snate@binkert.org                latency += aligned_stall_ticks;
6862623SN/A            }
6872623SN/A
6882623SN/A        }
6894377Sgblack@eecs.umich.edu        if(fault != NoFault || !stayAtPC)
6904182Sgblack@eecs.umich.edu            advancePC(fault);
6912623SN/A    }
6922623SN/A
6935487Snate@binkert.org    // instruction takes at least one cycle
6945487Snate@binkert.org    if (latency < ticks(1))
6955487Snate@binkert.org        latency = ticks(1);
6965487Snate@binkert.org
6972626SN/A    if (_status != Idle)
6985606Snate@binkert.org        schedule(tickEvent, curTick + latency);
6992623SN/A}
7002623SN/A
7012623SN/A
7025315Sstever@gmail.comvoid
7035315Sstever@gmail.comAtomicSimpleCPU::printAddr(Addr a)
7045315Sstever@gmail.com{
7055315Sstever@gmail.com    dcachePort.printAddr(a);
7065315Sstever@gmail.com}
7075315Sstever@gmail.com
7085315Sstever@gmail.com
7092623SN/A////////////////////////////////////////////////////////////////////////
7102623SN/A//
7112623SN/A//  AtomicSimpleCPU Simulation Object
7122623SN/A//
7134762Snate@binkert.orgAtomicSimpleCPU *
7144762Snate@binkert.orgAtomicSimpleCPUParams::create()
7152623SN/A{
7165529Snate@binkert.org    numThreads = 1;
7175529Snate@binkert.org#if !FULL_SYSTEM
7184762Snate@binkert.org    if (workload.size() != 1)
7194762Snate@binkert.org        panic("only one workload allowed");
7202623SN/A#endif
7215529Snate@binkert.org    return new AtomicSimpleCPU(this);
7222623SN/A}
723