atomic.cc revision 9814
12623SN/A/*
28926Sandreas.hansson@arm.com * Copyright (c) 2012 ARM Limited
38926Sandreas.hansson@arm.com * All rights reserved.
48926Sandreas.hansson@arm.com *
58926Sandreas.hansson@arm.com * The license below extends only to copyright in the software and shall
68926Sandreas.hansson@arm.com * not be construed as granting a license to any other intellectual
78926Sandreas.hansson@arm.com * property including but not limited to intellectual property relating
88926Sandreas.hansson@arm.com * to a hardware implementation of the functionality of the software
98926Sandreas.hansson@arm.com * licensed hereunder.  You may use the software subject to the license
108926Sandreas.hansson@arm.com * terms below provided that you ensure that this notice is replicated
118926Sandreas.hansson@arm.com * unmodified and in its entirety in all distributions of the software,
128926Sandreas.hansson@arm.com * modified or unmodified, in source code or in binary form.
138926Sandreas.hansson@arm.com *
142623SN/A * Copyright (c) 2002-2005 The Regents of The University of Michigan
152623SN/A * All rights reserved.
162623SN/A *
172623SN/A * Redistribution and use in source and binary forms, with or without
182623SN/A * modification, are permitted provided that the following conditions are
192623SN/A * met: redistributions of source code must retain the above copyright
202623SN/A * notice, this list of conditions and the following disclaimer;
212623SN/A * redistributions in binary form must reproduce the above copyright
222623SN/A * notice, this list of conditions and the following disclaimer in the
232623SN/A * documentation and/or other materials provided with the distribution;
242623SN/A * neither the name of the copyright holders nor the names of its
252623SN/A * contributors may be used to endorse or promote products derived from
262623SN/A * this software without specific prior written permission.
272623SN/A *
282623SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
292623SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
302623SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
312623SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
322623SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
332623SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
342623SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
352623SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
362623SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
372623SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
382623SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
392665Ssaidi@eecs.umich.edu *
402665Ssaidi@eecs.umich.edu * Authors: Steve Reinhardt
412623SN/A */
422623SN/A
433170Sstever@eecs.umich.edu#include "arch/locked_mem.hh"
448105Sgblack@eecs.umich.edu#include "arch/mmapped_ipr.hh"
452623SN/A#include "arch/utility.hh"
464040Ssaidi@eecs.umich.edu#include "base/bigint.hh"
479647Sdam.sunwoo@arm.com#include "base/output.hh"
486658Snate@binkert.org#include "config/the_isa.hh"
498229Snate@binkert.org#include "cpu/simple/atomic.hh"
502623SN/A#include "cpu/exetrace.hh"
519443SAndreas.Sandberg@ARM.com#include "debug/Drain.hh"
528232Snate@binkert.org#include "debug/ExecFaulting.hh"
538232Snate@binkert.org#include "debug/SimpleCPU.hh"
543348Sbinkertn@umich.edu#include "mem/packet.hh"
553348Sbinkertn@umich.edu#include "mem/packet_access.hh"
568926Sandreas.hansson@arm.com#include "mem/physical.hh"
574762Snate@binkert.org#include "params/AtomicSimpleCPU.hh"
587678Sgblack@eecs.umich.edu#include "sim/faults.hh"
592901Ssaidi@eecs.umich.edu#include "sim/system.hh"
608779Sgblack@eecs.umich.edu#include "sim/full_system.hh"
612623SN/A
622623SN/Ausing namespace std;
632623SN/Ausing namespace TheISA;
642623SN/A
652623SN/AAtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c)
665606Snate@binkert.org    : Event(CPU_Tick_Pri), cpu(c)
672623SN/A{
682623SN/A}
692623SN/A
702623SN/A
712623SN/Avoid
722623SN/AAtomicSimpleCPU::TickEvent::process()
732623SN/A{
742623SN/A    cpu->tick();
752623SN/A}
762623SN/A
772623SN/Aconst char *
785336Shines@cs.fsu.eduAtomicSimpleCPU::TickEvent::description() const
792623SN/A{
804873Sstever@eecs.umich.edu    return "AtomicSimpleCPU tick";
812623SN/A}
822623SN/A
832623SN/Avoid
842623SN/AAtomicSimpleCPU::init()
852623SN/A{
862623SN/A    BaseCPU::init();
878921Sandreas.hansson@arm.com
888921Sandreas.hansson@arm.com    // Initialise the ThreadContext's memory proxies
898921Sandreas.hansson@arm.com    tcBase()->initMemProxies(tcBase());
908921Sandreas.hansson@arm.com
919433SAndreas.Sandberg@ARM.com    if (FullSystem && !params()->switched_out) {
928779Sgblack@eecs.umich.edu        ThreadID size = threadContexts.size();
938779Sgblack@eecs.umich.edu        for (ThreadID i = 0; i < size; ++i) {
948779Sgblack@eecs.umich.edu            ThreadContext *tc = threadContexts[i];
958779Sgblack@eecs.umich.edu            // initialize CPU, including PC
968779Sgblack@eecs.umich.edu            TheISA::initCPU(tc, tc->contextId());
978779Sgblack@eecs.umich.edu        }
982623SN/A    }
998706Sandreas.hansson@arm.com
1005714Shsul@eecs.umich.edu    // Atomic doesn't do MT right now, so contextId == threadId
1015712Shsul@eecs.umich.edu    ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT
1025712Shsul@eecs.umich.edu    data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too
1035712Shsul@eecs.umich.edu    data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too
1042623SN/A}
1052623SN/A
1065529Snate@binkert.orgAtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p)
1076078Sgblack@eecs.umich.edu    : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false),
1085487Snate@binkert.org      simulate_data_stalls(p->simulate_data_stalls),
1095487Snate@binkert.org      simulate_inst_stalls(p->simulate_inst_stalls),
1109443SAndreas.Sandberg@ARM.com      drain_manager(NULL),
1119095Sandreas.hansson@arm.com      icachePort(name() + ".icache_port", this),
1129095Sandreas.hansson@arm.com      dcachePort(name() + ".dcache_port", this),
1139647Sdam.sunwoo@arm.com      fastmem(p->fastmem),
1149647Sdam.sunwoo@arm.com      simpoint(p->simpoint_profile),
1159647Sdam.sunwoo@arm.com      intervalSize(p->simpoint_interval),
1169647Sdam.sunwoo@arm.com      intervalCount(0),
1179647Sdam.sunwoo@arm.com      intervalDrift(0),
1189647Sdam.sunwoo@arm.com      simpointStream(NULL),
1199647Sdam.sunwoo@arm.com      currentBBV(0, 0),
1209647Sdam.sunwoo@arm.com      currentBBVInstCount(0)
1212623SN/A{
1222623SN/A    _status = Idle;
1239647Sdam.sunwoo@arm.com
1249647Sdam.sunwoo@arm.com    if (simpoint) {
1259647Sdam.sunwoo@arm.com        simpointStream = simout.create(p->simpoint_profile_file, false);
1269647Sdam.sunwoo@arm.com    }
1272623SN/A}
1282623SN/A
1292623SN/A
1302623SN/AAtomicSimpleCPU::~AtomicSimpleCPU()
1312623SN/A{
1326775SBrad.Beckmann@amd.com    if (tickEvent.scheduled()) {
1336775SBrad.Beckmann@amd.com        deschedule(tickEvent);
1346775SBrad.Beckmann@amd.com    }
1359647Sdam.sunwoo@arm.com    if (simpointStream) {
1369647Sdam.sunwoo@arm.com        simout.close(simpointStream);
1379647Sdam.sunwoo@arm.com    }
1382623SN/A}
1392623SN/A
1409443SAndreas.Sandberg@ARM.comunsigned int
1419443SAndreas.Sandberg@ARM.comAtomicSimpleCPU::drain(DrainManager *dm)
1422623SN/A{
1439443SAndreas.Sandberg@ARM.com    assert(!drain_manager);
1449448SAndreas.Sandberg@ARM.com    if (switchedOut())
1459443SAndreas.Sandberg@ARM.com        return 0;
1462623SN/A
1479443SAndreas.Sandberg@ARM.com    if (!isDrained()) {
1489443SAndreas.Sandberg@ARM.com        DPRINTF(Drain, "Requesting drain: %s\n", pcState());
1499443SAndreas.Sandberg@ARM.com        drain_manager = dm;
1509443SAndreas.Sandberg@ARM.com        return 1;
1519443SAndreas.Sandberg@ARM.com    } else {
1529443SAndreas.Sandberg@ARM.com        if (tickEvent.scheduled())
1539443SAndreas.Sandberg@ARM.com            deschedule(tickEvent);
1542915Sktlim@umich.edu
1559443SAndreas.Sandberg@ARM.com        DPRINTF(Drain, "Not executing microcode, no need to drain.\n");
1569443SAndreas.Sandberg@ARM.com        return 0;
1579443SAndreas.Sandberg@ARM.com    }
1589342SAndreas.Sandberg@arm.com}
1599342SAndreas.Sandberg@arm.com
1602915Sktlim@umich.eduvoid
1619342SAndreas.Sandberg@arm.comAtomicSimpleCPU::drainResume()
1622915Sktlim@umich.edu{
1639448SAndreas.Sandberg@ARM.com    assert(!tickEvent.scheduled());
1649443SAndreas.Sandberg@ARM.com    assert(!drain_manager);
1659448SAndreas.Sandberg@ARM.com    if (switchedOut())
1665220Ssaidi@eecs.umich.edu        return;
1675220Ssaidi@eecs.umich.edu
1684940Snate@binkert.org    DPRINTF(SimpleCPU, "Resume\n");
1699523SAndreas.Sandberg@ARM.com    verifyMemoryMode();
1703324Shsul@eecs.umich.edu
1719448SAndreas.Sandberg@ARM.com    assert(!threadContexts.empty());
1729448SAndreas.Sandberg@ARM.com    if (threadContexts.size() > 1)
1739448SAndreas.Sandberg@ARM.com        fatal("The atomic CPU only supports one thread.\n");
1749448SAndreas.Sandberg@ARM.com
1759448SAndreas.Sandberg@ARM.com    if (thread->status() == ThreadContext::Active) {
1769443SAndreas.Sandberg@ARM.com        schedule(tickEvent, nextCycle());
1779448SAndreas.Sandberg@ARM.com        _status = BaseSimpleCPU::Running;
1789448SAndreas.Sandberg@ARM.com    } else {
1799448SAndreas.Sandberg@ARM.com        _status = BaseSimpleCPU::Idle;
1809448SAndreas.Sandberg@ARM.com    }
1819443SAndreas.Sandberg@ARM.com
1827897Shestness@cs.utexas.edu    system->totalNumInsts = 0;
1832623SN/A}
1842623SN/A
1859443SAndreas.Sandberg@ARM.combool
1869443SAndreas.Sandberg@ARM.comAtomicSimpleCPU::tryCompleteDrain()
1879443SAndreas.Sandberg@ARM.com{
1889443SAndreas.Sandberg@ARM.com    if (!drain_manager)
1899443SAndreas.Sandberg@ARM.com        return false;
1909443SAndreas.Sandberg@ARM.com
1919443SAndreas.Sandberg@ARM.com    DPRINTF(Drain, "tryCompleteDrain: %s\n", pcState());
1929443SAndreas.Sandberg@ARM.com    if (!isDrained())
1939443SAndreas.Sandberg@ARM.com        return false;
1949443SAndreas.Sandberg@ARM.com
1959443SAndreas.Sandberg@ARM.com    DPRINTF(Drain, "CPU done draining, processing drain event\n");
1969443SAndreas.Sandberg@ARM.com    drain_manager->signalDrainDone();
1979443SAndreas.Sandberg@ARM.com    drain_manager = NULL;
1989443SAndreas.Sandberg@ARM.com
1999443SAndreas.Sandberg@ARM.com    return true;
2009443SAndreas.Sandberg@ARM.com}
2019443SAndreas.Sandberg@ARM.com
2029443SAndreas.Sandberg@ARM.com
2032623SN/Avoid
2042798Sktlim@umich.eduAtomicSimpleCPU::switchOut()
2052623SN/A{
2069429SAndreas.Sandberg@ARM.com    BaseSimpleCPU::switchOut();
2079429SAndreas.Sandberg@ARM.com
2089443SAndreas.Sandberg@ARM.com    assert(!tickEvent.scheduled());
2099342SAndreas.Sandberg@arm.com    assert(_status == BaseSimpleCPU::Running || _status == Idle);
2109443SAndreas.Sandberg@ARM.com    assert(isDrained());
2112623SN/A}
2122623SN/A
2132623SN/A
2142623SN/Avoid
2152623SN/AAtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
2162623SN/A{
2179429SAndreas.Sandberg@ARM.com    BaseSimpleCPU::takeOverFrom(oldCPU);
2182623SN/A
2199443SAndreas.Sandberg@ARM.com    // The tick event should have been descheduled by drain()
2202623SN/A    assert(!tickEvent.scheduled());
2212623SN/A
2225712Shsul@eecs.umich.edu    ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT
2235712Shsul@eecs.umich.edu    data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too
2245712Shsul@eecs.umich.edu    data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too
2252623SN/A}
2262623SN/A
2279523SAndreas.Sandberg@ARM.comvoid
2289523SAndreas.Sandberg@ARM.comAtomicSimpleCPU::verifyMemoryMode() const
2299523SAndreas.Sandberg@ARM.com{
2309524SAndreas.Sandberg@ARM.com    if (!system->isAtomicMode()) {
2319523SAndreas.Sandberg@ARM.com        fatal("The atomic CPU requires the memory system to be in "
2329523SAndreas.Sandberg@ARM.com              "'atomic' mode.\n");
2339523SAndreas.Sandberg@ARM.com    }
2349523SAndreas.Sandberg@ARM.com}
2352623SN/A
2362623SN/Avoid
2379180Sandreas.hansson@arm.comAtomicSimpleCPU::activateContext(ThreadID thread_num, Cycles delay)
2382623SN/A{
2394940Snate@binkert.org    DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
2404940Snate@binkert.org
2412623SN/A    assert(thread_num == 0);
2422683Sktlim@umich.edu    assert(thread);
2432623SN/A
2442623SN/A    assert(_status == Idle);
2452623SN/A    assert(!tickEvent.scheduled());
2462623SN/A
2472623SN/A    notIdleFraction++;
2489180Sandreas.hansson@arm.com    numCycles += ticksToCycles(thread->lastActivate - thread->lastSuspend);
2493686Sktlim@umich.edu
2503430Sgblack@eecs.umich.edu    //Make sure ticks are still on multiples of cycles
2519179Sandreas.hansson@arm.com    schedule(tickEvent, clockEdge(delay));
2529342SAndreas.Sandberg@arm.com    _status = BaseSimpleCPU::Running;
2532623SN/A}
2542623SN/A
2552623SN/A
2562623SN/Avoid
2578737Skoansin.tan@gmail.comAtomicSimpleCPU::suspendContext(ThreadID thread_num)
2582623SN/A{
2594940Snate@binkert.org    DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
2604940Snate@binkert.org
2612623SN/A    assert(thread_num == 0);
2622683Sktlim@umich.edu    assert(thread);
2632623SN/A
2646043Sgblack@eecs.umich.edu    if (_status == Idle)
2656043Sgblack@eecs.umich.edu        return;
2666043Sgblack@eecs.umich.edu
2679342SAndreas.Sandberg@arm.com    assert(_status == BaseSimpleCPU::Running);
2682626SN/A
2692626SN/A    // tick event may not be scheduled if this gets called from inside
2702626SN/A    // an instruction's execution, e.g. "quiesce"
2712626SN/A    if (tickEvent.scheduled())
2725606Snate@binkert.org        deschedule(tickEvent);
2732623SN/A
2742623SN/A    notIdleFraction--;
2752623SN/A    _status = Idle;
2762623SN/A}
2772623SN/A
2782623SN/A
2792623SN/AFault
2808444Sgblack@eecs.umich.eduAtomicSimpleCPU::readMem(Addr addr, uint8_t * data,
2818444Sgblack@eecs.umich.edu                         unsigned size, unsigned flags)
2822623SN/A{
2833169Sstever@eecs.umich.edu    // use the CPU's statically allocated read request and packet objects
2844870Sstever@eecs.umich.edu    Request *req = &data_read_req;
2852623SN/A
2862623SN/A    if (traceData) {
2872623SN/A        traceData->setAddr(addr);
2882623SN/A    }
2892623SN/A
2904999Sgblack@eecs.umich.edu    //The size of the data we're trying to read.
2917520Sgblack@eecs.umich.edu    int fullSize = size;
2922623SN/A
2934999Sgblack@eecs.umich.edu    //The address of the second part of this access if it needs to be split
2944999Sgblack@eecs.umich.edu    //across a cache line boundary.
2959814Sandreas.hansson@arm.com    Addr secondAddr = roundDown(addr + size - 1, cacheLineSize());
2964999Sgblack@eecs.umich.edu
2977520Sgblack@eecs.umich.edu    if (secondAddr > addr)
2987520Sgblack@eecs.umich.edu        size = secondAddr - addr;
2994999Sgblack@eecs.umich.edu
3004999Sgblack@eecs.umich.edu    dcache_latency = 0;
3014999Sgblack@eecs.umich.edu
3027520Sgblack@eecs.umich.edu    while (1) {
3038832SAli.Saidi@ARM.com        req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr());
3044999Sgblack@eecs.umich.edu
3054999Sgblack@eecs.umich.edu        // translate to physical address
3066023Snate@binkert.org        Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read);
3074999Sgblack@eecs.umich.edu
3084999Sgblack@eecs.umich.edu        // Now do the access.
3096623Sgblack@eecs.umich.edu        if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
3104999Sgblack@eecs.umich.edu            Packet pkt = Packet(req,
3118949Sandreas.hansson@arm.com                                req->isLLSC() ? MemCmd::LoadLockedReq :
3128949Sandreas.hansson@arm.com                                MemCmd::ReadReq);
3137520Sgblack@eecs.umich.edu            pkt.dataStatic(data);
3144999Sgblack@eecs.umich.edu
3158105Sgblack@eecs.umich.edu            if (req->isMmappedIpr())
3164999Sgblack@eecs.umich.edu                dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt);
3174999Sgblack@eecs.umich.edu            else {
3188931Sandreas.hansson@arm.com                if (fastmem && system->isMemAddr(pkt.getAddr()))
3198931Sandreas.hansson@arm.com                    system->getPhysMem().access(&pkt);
3204999Sgblack@eecs.umich.edu                else
3214999Sgblack@eecs.umich.edu                    dcache_latency += dcachePort.sendAtomic(&pkt);
3224999Sgblack@eecs.umich.edu            }
3234999Sgblack@eecs.umich.edu            dcache_access = true;
3245012Sgblack@eecs.umich.edu
3254999Sgblack@eecs.umich.edu            assert(!pkt.isError());
3264999Sgblack@eecs.umich.edu
3276102Sgblack@eecs.umich.edu            if (req->isLLSC()) {
3284999Sgblack@eecs.umich.edu                TheISA::handleLockedRead(thread, req);
3294999Sgblack@eecs.umich.edu            }
3304968Sacolyte@umich.edu        }
3314986Ssaidi@eecs.umich.edu
3324999Sgblack@eecs.umich.edu        //If there's a fault, return it
3336739Sgblack@eecs.umich.edu        if (fault != NoFault) {
3346739Sgblack@eecs.umich.edu            if (req->isPrefetch()) {
3356739Sgblack@eecs.umich.edu                return NoFault;
3366739Sgblack@eecs.umich.edu            } else {
3376739Sgblack@eecs.umich.edu                return fault;
3386739Sgblack@eecs.umich.edu            }
3396739Sgblack@eecs.umich.edu        }
3406739Sgblack@eecs.umich.edu
3414999Sgblack@eecs.umich.edu        //If we don't need to access a second cache line, stop now.
3424999Sgblack@eecs.umich.edu        if (secondAddr <= addr)
3434999Sgblack@eecs.umich.edu        {
3446078Sgblack@eecs.umich.edu            if (req->isLocked() && fault == NoFault) {
3456078Sgblack@eecs.umich.edu                assert(!locked);
3466078Sgblack@eecs.umich.edu                locked = true;
3476078Sgblack@eecs.umich.edu            }
3484999Sgblack@eecs.umich.edu            return fault;
3494968Sacolyte@umich.edu        }
3503170Sstever@eecs.umich.edu
3514999Sgblack@eecs.umich.edu        /*
3524999Sgblack@eecs.umich.edu         * Set up for accessing the second cache line.
3534999Sgblack@eecs.umich.edu         */
3544999Sgblack@eecs.umich.edu
3554999Sgblack@eecs.umich.edu        //Move the pointer we're reading into to the correct location.
3567520Sgblack@eecs.umich.edu        data += size;
3574999Sgblack@eecs.umich.edu        //Adjust the size to get the remaining bytes.
3587520Sgblack@eecs.umich.edu        size = addr + fullSize - secondAddr;
3594999Sgblack@eecs.umich.edu        //And access the right address.
3604999Sgblack@eecs.umich.edu        addr = secondAddr;
3612623SN/A    }
3622623SN/A}
3632623SN/A
3647520Sgblack@eecs.umich.edu
3652623SN/AFault
3668444Sgblack@eecs.umich.eduAtomicSimpleCPU::writeMem(uint8_t *data, unsigned size,
3678444Sgblack@eecs.umich.edu                          Addr addr, unsigned flags, uint64_t *res)
3682623SN/A{
3693169Sstever@eecs.umich.edu    // use the CPU's statically allocated write request and packet objects
3704870Sstever@eecs.umich.edu    Request *req = &data_write_req;
3712623SN/A
3722623SN/A    if (traceData) {
3732623SN/A        traceData->setAddr(addr);
3742623SN/A    }
3752623SN/A
3764999Sgblack@eecs.umich.edu    //The size of the data we're trying to read.
3777520Sgblack@eecs.umich.edu    int fullSize = size;
3782623SN/A
3794999Sgblack@eecs.umich.edu    //The address of the second part of this access if it needs to be split
3804999Sgblack@eecs.umich.edu    //across a cache line boundary.
3819814Sandreas.hansson@arm.com    Addr secondAddr = roundDown(addr + size - 1, cacheLineSize());
3824999Sgblack@eecs.umich.edu
3834999Sgblack@eecs.umich.edu    if(secondAddr > addr)
3847520Sgblack@eecs.umich.edu        size = secondAddr - addr;
3854999Sgblack@eecs.umich.edu
3864999Sgblack@eecs.umich.edu    dcache_latency = 0;
3874999Sgblack@eecs.umich.edu
3884999Sgblack@eecs.umich.edu    while(1) {
3898832SAli.Saidi@ARM.com        req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr());
3904999Sgblack@eecs.umich.edu
3914999Sgblack@eecs.umich.edu        // translate to physical address
3926023Snate@binkert.org        Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write);
3934999Sgblack@eecs.umich.edu
3944999Sgblack@eecs.umich.edu        // Now do the access.
3954999Sgblack@eecs.umich.edu        if (fault == NoFault) {
3964999Sgblack@eecs.umich.edu            MemCmd cmd = MemCmd::WriteReq; // default
3974999Sgblack@eecs.umich.edu            bool do_access = true;  // flag to suppress cache access
3984999Sgblack@eecs.umich.edu
3996102Sgblack@eecs.umich.edu            if (req->isLLSC()) {
4004999Sgblack@eecs.umich.edu                cmd = MemCmd::StoreCondReq;
4014999Sgblack@eecs.umich.edu                do_access = TheISA::handleLockedWrite(thread, req);
4024999Sgblack@eecs.umich.edu            } else if (req->isSwap()) {
4034999Sgblack@eecs.umich.edu                cmd = MemCmd::SwapReq;
4044999Sgblack@eecs.umich.edu                if (req->isCondSwap()) {
4054999Sgblack@eecs.umich.edu                    assert(res);
4064999Sgblack@eecs.umich.edu                    req->setExtraData(*res);
4074999Sgblack@eecs.umich.edu                }
4084999Sgblack@eecs.umich.edu            }
4094999Sgblack@eecs.umich.edu
4106623Sgblack@eecs.umich.edu            if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) {
4118949Sandreas.hansson@arm.com                Packet pkt = Packet(req, cmd);
4127520Sgblack@eecs.umich.edu                pkt.dataStatic(data);
4134999Sgblack@eecs.umich.edu
4148105Sgblack@eecs.umich.edu                if (req->isMmappedIpr()) {
4154999Sgblack@eecs.umich.edu                    dcache_latency +=
4164999Sgblack@eecs.umich.edu                        TheISA::handleIprWrite(thread->getTC(), &pkt);
4174999Sgblack@eecs.umich.edu                } else {
4188931Sandreas.hansson@arm.com                    if (fastmem && system->isMemAddr(pkt.getAddr()))
4198931Sandreas.hansson@arm.com                        system->getPhysMem().access(&pkt);
4204999Sgblack@eecs.umich.edu                    else
4214999Sgblack@eecs.umich.edu                        dcache_latency += dcachePort.sendAtomic(&pkt);
4224999Sgblack@eecs.umich.edu                }
4234999Sgblack@eecs.umich.edu                dcache_access = true;
4244999Sgblack@eecs.umich.edu                assert(!pkt.isError());
4254999Sgblack@eecs.umich.edu
4264999Sgblack@eecs.umich.edu                if (req->isSwap()) {
4274999Sgblack@eecs.umich.edu                    assert(res);
4287520Sgblack@eecs.umich.edu                    memcpy(res, pkt.getPtr<uint8_t>(), fullSize);
4294999Sgblack@eecs.umich.edu                }
4304999Sgblack@eecs.umich.edu            }
4314999Sgblack@eecs.umich.edu
4324999Sgblack@eecs.umich.edu            if (res && !req->isSwap()) {
4334999Sgblack@eecs.umich.edu                *res = req->getExtraData();
4344878Sstever@eecs.umich.edu            }
4354040Ssaidi@eecs.umich.edu        }
4364040Ssaidi@eecs.umich.edu
4374999Sgblack@eecs.umich.edu        //If there's a fault or we don't need to access a second cache line,
4384999Sgblack@eecs.umich.edu        //stop now.
4394999Sgblack@eecs.umich.edu        if (fault != NoFault || secondAddr <= addr)
4404999Sgblack@eecs.umich.edu        {
4416078Sgblack@eecs.umich.edu            if (req->isLocked() && fault == NoFault) {
4426078Sgblack@eecs.umich.edu                assert(locked);
4436078Sgblack@eecs.umich.edu                locked = false;
4446078Sgblack@eecs.umich.edu            }
4456739Sgblack@eecs.umich.edu            if (fault != NoFault && req->isPrefetch()) {
4466739Sgblack@eecs.umich.edu                return NoFault;
4476739Sgblack@eecs.umich.edu            } else {
4486739Sgblack@eecs.umich.edu                return fault;
4496739Sgblack@eecs.umich.edu            }
4503170Sstever@eecs.umich.edu        }
4513170Sstever@eecs.umich.edu
4524999Sgblack@eecs.umich.edu        /*
4534999Sgblack@eecs.umich.edu         * Set up for accessing the second cache line.
4544999Sgblack@eecs.umich.edu         */
4554999Sgblack@eecs.umich.edu
4564999Sgblack@eecs.umich.edu        //Move the pointer we're reading into to the correct location.
4577520Sgblack@eecs.umich.edu        data += size;
4584999Sgblack@eecs.umich.edu        //Adjust the size to get the remaining bytes.
4597520Sgblack@eecs.umich.edu        size = addr + fullSize - secondAddr;
4604999Sgblack@eecs.umich.edu        //And access the right address.
4614999Sgblack@eecs.umich.edu        addr = secondAddr;
4622623SN/A    }
4632623SN/A}
4642623SN/A
4652623SN/A
4662623SN/Avoid
4672623SN/AAtomicSimpleCPU::tick()
4682623SN/A{
4694940Snate@binkert.org    DPRINTF(SimpleCPU, "Tick\n");
4704940Snate@binkert.org
4715487Snate@binkert.org    Tick latency = 0;
4722623SN/A
4736078Sgblack@eecs.umich.edu    for (int i = 0; i < width || locked; ++i) {
4742623SN/A        numCycles++;
4752623SN/A
4763387Sgblack@eecs.umich.edu        if (!curStaticInst || !curStaticInst->isDelayedCommit())
4773387Sgblack@eecs.umich.edu            checkForInterrupts();
4782626SN/A
4795348Ssaidi@eecs.umich.edu        checkPcEventQueue();
4808143SAli.Saidi@ARM.com        // We must have just got suspended by a PC event
4819443SAndreas.Sandberg@ARM.com        if (_status == Idle) {
4829443SAndreas.Sandberg@ARM.com            tryCompleteDrain();
4838143SAli.Saidi@ARM.com            return;
4849443SAndreas.Sandberg@ARM.com        }
4855348Ssaidi@eecs.umich.edu
4865669Sgblack@eecs.umich.edu        Fault fault = NoFault;
4875669Sgblack@eecs.umich.edu
4887720Sgblack@eecs.umich.edu        TheISA::PCState pcState = thread->pcState();
4897720Sgblack@eecs.umich.edu
4907720Sgblack@eecs.umich.edu        bool needToFetch = !isRomMicroPC(pcState.microPC()) &&
4917720Sgblack@eecs.umich.edu                           !curMacroStaticInst;
4927720Sgblack@eecs.umich.edu        if (needToFetch) {
4935894Sgblack@eecs.umich.edu            setupFetchRequest(&ifetch_req);
4946023Snate@binkert.org            fault = thread->itb->translateAtomic(&ifetch_req, tc,
4956023Snate@binkert.org                                                 BaseTLB::Execute);
4965894Sgblack@eecs.umich.edu        }
4972623SN/A
4982623SN/A        if (fault == NoFault) {
4994182Sgblack@eecs.umich.edu            Tick icache_latency = 0;
5004182Sgblack@eecs.umich.edu            bool icache_access = false;
5014182Sgblack@eecs.umich.edu            dcache_access = false; // assume no dcache access
5022662Sstever@eecs.umich.edu
5037720Sgblack@eecs.umich.edu            if (needToFetch) {
5049023Sgblack@eecs.umich.edu                // This is commented out because the decoder would act like
5055694Sgblack@eecs.umich.edu                // a tiny cache otherwise. It wouldn't be flushed when needed
5065694Sgblack@eecs.umich.edu                // like the I cache. It should be flushed, and when that works
5075694Sgblack@eecs.umich.edu                // this code should be uncommented.
5085669Sgblack@eecs.umich.edu                //Fetch more instruction memory if necessary
5099023Sgblack@eecs.umich.edu                //if(decoder.needMoreBytes())
5105669Sgblack@eecs.umich.edu                //{
5115669Sgblack@eecs.umich.edu                    icache_access = true;
5128949Sandreas.hansson@arm.com                    Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq);
5135669Sgblack@eecs.umich.edu                    ifetch_pkt.dataStatic(&inst);
5142623SN/A
5158931Sandreas.hansson@arm.com                    if (fastmem && system->isMemAddr(ifetch_pkt.getAddr()))
5168931Sandreas.hansson@arm.com                        system->getPhysMem().access(&ifetch_pkt);
5175669Sgblack@eecs.umich.edu                    else
5185669Sgblack@eecs.umich.edu                        icache_latency = icachePort.sendAtomic(&ifetch_pkt);
5194968Sacolyte@umich.edu
5205669Sgblack@eecs.umich.edu                    assert(!ifetch_pkt.isError());
5214968Sacolyte@umich.edu
5225669Sgblack@eecs.umich.edu                    // ifetch_req is initialized to read the instruction directly
5235669Sgblack@eecs.umich.edu                    // into the CPU object's inst field.
5245669Sgblack@eecs.umich.edu                //}
5255669Sgblack@eecs.umich.edu            }
5264182Sgblack@eecs.umich.edu
5272623SN/A            preExecute();
5283814Ssaidi@eecs.umich.edu
5295001Sgblack@eecs.umich.edu            if (curStaticInst) {
5304182Sgblack@eecs.umich.edu                fault = curStaticInst->execute(this, traceData);
5314998Sgblack@eecs.umich.edu
5324998Sgblack@eecs.umich.edu                // keep an instruction count
5334998Sgblack@eecs.umich.edu                if (fault == NoFault)
5344998Sgblack@eecs.umich.edu                    countInst();
5357655Sali.saidi@arm.com                else if (traceData && !DTRACE(ExecFaulting)) {
5365001Sgblack@eecs.umich.edu                    delete traceData;
5375001Sgblack@eecs.umich.edu                    traceData = NULL;
5385001Sgblack@eecs.umich.edu                }
5394998Sgblack@eecs.umich.edu
5404182Sgblack@eecs.umich.edu                postExecute();
5414182Sgblack@eecs.umich.edu            }
5422623SN/A
5433814Ssaidi@eecs.umich.edu            // @todo remove me after debugging with legion done
5444539Sgblack@eecs.umich.edu            if (curStaticInst && (!curStaticInst->isMicroop() ||
5454539Sgblack@eecs.umich.edu                        curStaticInst->isFirstMicroop()))
5463814Ssaidi@eecs.umich.edu                instCnt++;
5473814Ssaidi@eecs.umich.edu
5489647Sdam.sunwoo@arm.com            // profile for SimPoints if enabled and macro inst is finished
5499647Sdam.sunwoo@arm.com            if (simpoint && curStaticInst && (fault == NoFault) &&
5509647Sdam.sunwoo@arm.com                    (!curStaticInst->isMicroop() ||
5519647Sdam.sunwoo@arm.com                     curStaticInst->isLastMicroop())) {
5529647Sdam.sunwoo@arm.com                profileSimPoint();
5539647Sdam.sunwoo@arm.com            }
5549647Sdam.sunwoo@arm.com
5555487Snate@binkert.org            Tick stall_ticks = 0;
5565487Snate@binkert.org            if (simulate_inst_stalls && icache_access)
5575487Snate@binkert.org                stall_ticks += icache_latency;
5585487Snate@binkert.org
5595487Snate@binkert.org            if (simulate_data_stalls && dcache_access)
5605487Snate@binkert.org                stall_ticks += dcache_latency;
5615487Snate@binkert.org
5625487Snate@binkert.org            if (stall_ticks) {
5639180Sandreas.hansson@arm.com                // the atomic cpu does its accounting in ticks, so
5649180Sandreas.hansson@arm.com                // keep counting in ticks but round to the clock
5659180Sandreas.hansson@arm.com                // period
5669180Sandreas.hansson@arm.com                latency += divCeil(stall_ticks, clockPeriod()) *
5679180Sandreas.hansson@arm.com                    clockPeriod();
5682623SN/A            }
5692623SN/A
5702623SN/A        }
5714377Sgblack@eecs.umich.edu        if(fault != NoFault || !stayAtPC)
5724182Sgblack@eecs.umich.edu            advancePC(fault);
5732623SN/A    }
5742623SN/A
5759443SAndreas.Sandberg@ARM.com    if (tryCompleteDrain())
5769443SAndreas.Sandberg@ARM.com        return;
5779443SAndreas.Sandberg@ARM.com
5785487Snate@binkert.org    // instruction takes at least one cycle
5799179Sandreas.hansson@arm.com    if (latency < clockPeriod())
5809179Sandreas.hansson@arm.com        latency = clockPeriod();
5815487Snate@binkert.org
5822626SN/A    if (_status != Idle)
5837823Ssteve.reinhardt@amd.com        schedule(tickEvent, curTick() + latency);
5842623SN/A}
5852623SN/A
5862623SN/A
5875315Sstever@gmail.comvoid
5885315Sstever@gmail.comAtomicSimpleCPU::printAddr(Addr a)
5895315Sstever@gmail.com{
5905315Sstever@gmail.com    dcachePort.printAddr(a);
5915315Sstever@gmail.com}
5925315Sstever@gmail.com
5939647Sdam.sunwoo@arm.comvoid
5949647Sdam.sunwoo@arm.comAtomicSimpleCPU::profileSimPoint()
5959647Sdam.sunwoo@arm.com{
5969647Sdam.sunwoo@arm.com    if (!currentBBVInstCount)
5979647Sdam.sunwoo@arm.com        currentBBV.first = thread->pcState().instAddr();
5989647Sdam.sunwoo@arm.com
5999647Sdam.sunwoo@arm.com    ++intervalCount;
6009647Sdam.sunwoo@arm.com    ++currentBBVInstCount;
6019647Sdam.sunwoo@arm.com
6029647Sdam.sunwoo@arm.com    // If inst is control inst, assume end of basic block.
6039647Sdam.sunwoo@arm.com    if (curStaticInst->isControl()) {
6049647Sdam.sunwoo@arm.com        currentBBV.second = thread->pcState().instAddr();
6059647Sdam.sunwoo@arm.com
6069647Sdam.sunwoo@arm.com        auto map_itr = bbMap.find(currentBBV);
6079647Sdam.sunwoo@arm.com        if (map_itr == bbMap.end()){
6089647Sdam.sunwoo@arm.com            // If a new (previously unseen) basic block is found,
6099647Sdam.sunwoo@arm.com            // add a new unique id, record num of insts and insert into bbMap.
6109647Sdam.sunwoo@arm.com            BBInfo info;
6119647Sdam.sunwoo@arm.com            info.id = bbMap.size() + 1;
6129647Sdam.sunwoo@arm.com            info.insts = currentBBVInstCount;
6139647Sdam.sunwoo@arm.com            info.count = currentBBVInstCount;
6149647Sdam.sunwoo@arm.com            bbMap.insert(std::make_pair(currentBBV, info));
6159647Sdam.sunwoo@arm.com        } else {
6169647Sdam.sunwoo@arm.com            // If basic block is seen before, just increment the count by the
6179647Sdam.sunwoo@arm.com            // number of insts in basic block.
6189647Sdam.sunwoo@arm.com            BBInfo& info = map_itr->second;
6199647Sdam.sunwoo@arm.com            assert(info.insts == currentBBVInstCount);
6209647Sdam.sunwoo@arm.com            info.count += currentBBVInstCount;
6219647Sdam.sunwoo@arm.com        }
6229647Sdam.sunwoo@arm.com        currentBBVInstCount = 0;
6239647Sdam.sunwoo@arm.com
6249647Sdam.sunwoo@arm.com        // Reached end of interval if the sum of the current inst count
6259647Sdam.sunwoo@arm.com        // (intervalCount) and the excessive inst count from the previous
6269647Sdam.sunwoo@arm.com        // interval (intervalDrift) is greater than/equal to the interval size.
6279647Sdam.sunwoo@arm.com        if (intervalCount + intervalDrift >= intervalSize) {
6289647Sdam.sunwoo@arm.com            // summarize interval and display BBV info
6299647Sdam.sunwoo@arm.com            std::vector<pair<uint64_t, uint64_t> > counts;
6309647Sdam.sunwoo@arm.com            for (auto map_itr = bbMap.begin(); map_itr != bbMap.end();
6319647Sdam.sunwoo@arm.com                    ++map_itr) {
6329647Sdam.sunwoo@arm.com                BBInfo& info = map_itr->second;
6339647Sdam.sunwoo@arm.com                if (info.count != 0) {
6349647Sdam.sunwoo@arm.com                    counts.push_back(std::make_pair(info.id, info.count));
6359647Sdam.sunwoo@arm.com                    info.count = 0;
6369647Sdam.sunwoo@arm.com                }
6379647Sdam.sunwoo@arm.com            }
6389647Sdam.sunwoo@arm.com            std::sort(counts.begin(), counts.end());
6399647Sdam.sunwoo@arm.com
6409647Sdam.sunwoo@arm.com            // Print output BBV info
6419647Sdam.sunwoo@arm.com            *simpointStream << "T";
6429647Sdam.sunwoo@arm.com            for (auto cnt_itr = counts.begin(); cnt_itr != counts.end();
6439647Sdam.sunwoo@arm.com                    ++cnt_itr) {
6449647Sdam.sunwoo@arm.com                *simpointStream << ":" << cnt_itr->first
6459647Sdam.sunwoo@arm.com                                << ":" << cnt_itr->second << " ";
6469647Sdam.sunwoo@arm.com            }
6479647Sdam.sunwoo@arm.com            *simpointStream << "\n";
6489647Sdam.sunwoo@arm.com
6499647Sdam.sunwoo@arm.com            intervalDrift = (intervalCount + intervalDrift) - intervalSize;
6509647Sdam.sunwoo@arm.com            intervalCount = 0;
6519647Sdam.sunwoo@arm.com        }
6529647Sdam.sunwoo@arm.com    }
6539647Sdam.sunwoo@arm.com}
6545315Sstever@gmail.com
6552623SN/A////////////////////////////////////////////////////////////////////////
6562623SN/A//
6572623SN/A//  AtomicSimpleCPU Simulation Object
6582623SN/A//
6594762Snate@binkert.orgAtomicSimpleCPU *
6604762Snate@binkert.orgAtomicSimpleCPUParams::create()
6612623SN/A{
6625529Snate@binkert.org    numThreads = 1;
6638779Sgblack@eecs.umich.edu    if (!FullSystem && workload.size() != 1)
6644762Snate@binkert.org        panic("only one workload allowed");
6655529Snate@binkert.org    return new AtomicSimpleCPU(this);
6662623SN/A}
667