atomic.cc revision 9837
12623SN/A/*
28926Sandreas.hansson@arm.com * Copyright (c) 2012 ARM Limited
38926Sandreas.hansson@arm.com * All rights reserved.
48926Sandreas.hansson@arm.com *
58926Sandreas.hansson@arm.com * The license below extends only to copyright in the software and shall
68926Sandreas.hansson@arm.com * not be construed as granting a license to any other intellectual
78926Sandreas.hansson@arm.com * property including but not limited to intellectual property relating
88926Sandreas.hansson@arm.com * to a hardware implementation of the functionality of the software
98926Sandreas.hansson@arm.com * licensed hereunder.  You may use the software subject to the license
108926Sandreas.hansson@arm.com * terms below provided that you ensure that this notice is replicated
118926Sandreas.hansson@arm.com * unmodified and in its entirety in all distributions of the software,
128926Sandreas.hansson@arm.com * modified or unmodified, in source code or in binary form.
138926Sandreas.hansson@arm.com *
142623SN/A * Copyright (c) 2002-2005 The Regents of The University of Michigan
152623SN/A * All rights reserved.
162623SN/A *
172623SN/A * Redistribution and use in source and binary forms, with or without
182623SN/A * modification, are permitted provided that the following conditions are
192623SN/A * met: redistributions of source code must retain the above copyright
202623SN/A * notice, this list of conditions and the following disclaimer;
212623SN/A * redistributions in binary form must reproduce the above copyright
222623SN/A * notice, this list of conditions and the following disclaimer in the
232623SN/A * documentation and/or other materials provided with the distribution;
242623SN/A * neither the name of the copyright holders nor the names of its
252623SN/A * contributors may be used to endorse or promote products derived from
262623SN/A * this software without specific prior written permission.
272623SN/A *
282623SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
292623SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
302623SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
312623SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
322623SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
332623SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
342623SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
352623SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
362623SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
372623SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
382623SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
392665Ssaidi@eecs.umich.edu *
402665Ssaidi@eecs.umich.edu * Authors: Steve Reinhardt
412623SN/A */
422623SN/A
433170Sstever@eecs.umich.edu#include "arch/locked_mem.hh"
448105Sgblack@eecs.umich.edu#include "arch/mmapped_ipr.hh"
452623SN/A#include "arch/utility.hh"
464040Ssaidi@eecs.umich.edu#include "base/bigint.hh"
479647Sdam.sunwoo@arm.com#include "base/output.hh"
486658Snate@binkert.org#include "config/the_isa.hh"
498229Snate@binkert.org#include "cpu/simple/atomic.hh"
502623SN/A#include "cpu/exetrace.hh"
519443SAndreas.Sandberg@ARM.com#include "debug/Drain.hh"
528232Snate@binkert.org#include "debug/ExecFaulting.hh"
538232Snate@binkert.org#include "debug/SimpleCPU.hh"
543348Sbinkertn@umich.edu#include "mem/packet.hh"
553348Sbinkertn@umich.edu#include "mem/packet_access.hh"
568926Sandreas.hansson@arm.com#include "mem/physical.hh"
574762Snate@binkert.org#include "params/AtomicSimpleCPU.hh"
587678Sgblack@eecs.umich.edu#include "sim/faults.hh"
592901Ssaidi@eecs.umich.edu#include "sim/system.hh"
608779Sgblack@eecs.umich.edu#include "sim/full_system.hh"
612623SN/A
622623SN/Ausing namespace std;
632623SN/Ausing namespace TheISA;
642623SN/A
652623SN/AAtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c)
665606Snate@binkert.org    : Event(CPU_Tick_Pri), cpu(c)
672623SN/A{
682623SN/A}
692623SN/A
702623SN/A
712623SN/Avoid
722623SN/AAtomicSimpleCPU::TickEvent::process()
732623SN/A{
742623SN/A    cpu->tick();
752623SN/A}
762623SN/A
772623SN/Aconst char *
785336Shines@cs.fsu.eduAtomicSimpleCPU::TickEvent::description() const
792623SN/A{
804873Sstever@eecs.umich.edu    return "AtomicSimpleCPU tick";
812623SN/A}
822623SN/A
832623SN/Avoid
842623SN/AAtomicSimpleCPU::init()
852623SN/A{
862623SN/A    BaseCPU::init();
878921Sandreas.hansson@arm.com
888921Sandreas.hansson@arm.com    // Initialise the ThreadContext's memory proxies
898921Sandreas.hansson@arm.com    tcBase()->initMemProxies(tcBase());
908921Sandreas.hansson@arm.com
919433SAndreas.Sandberg@ARM.com    if (FullSystem && !params()->switched_out) {
928779Sgblack@eecs.umich.edu        ThreadID size = threadContexts.size();
938779Sgblack@eecs.umich.edu        for (ThreadID i = 0; i < size; ++i) {
948779Sgblack@eecs.umich.edu            ThreadContext *tc = threadContexts[i];
958779Sgblack@eecs.umich.edu            // initialize CPU, including PC
968779Sgblack@eecs.umich.edu            TheISA::initCPU(tc, tc->contextId());
978779Sgblack@eecs.umich.edu        }
982623SN/A    }
998706Sandreas.hansson@arm.com
1005714Shsul@eecs.umich.edu    // Atomic doesn't do MT right now, so contextId == threadId
1015712Shsul@eecs.umich.edu    ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT
1025712Shsul@eecs.umich.edu    data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too
1035712Shsul@eecs.umich.edu    data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too
1042623SN/A}
1052623SN/A
1065529Snate@binkert.orgAtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p)
1076078Sgblack@eecs.umich.edu    : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false),
1085487Snate@binkert.org      simulate_data_stalls(p->simulate_data_stalls),
1095487Snate@binkert.org      simulate_inst_stalls(p->simulate_inst_stalls),
1109443SAndreas.Sandberg@ARM.com      drain_manager(NULL),
1119095Sandreas.hansson@arm.com      icachePort(name() + ".icache_port", this),
1129095Sandreas.hansson@arm.com      dcachePort(name() + ".dcache_port", this),
1139647Sdam.sunwoo@arm.com      fastmem(p->fastmem),
1149647Sdam.sunwoo@arm.com      simpoint(p->simpoint_profile),
1159647Sdam.sunwoo@arm.com      intervalSize(p->simpoint_interval),
1169647Sdam.sunwoo@arm.com      intervalCount(0),
1179647Sdam.sunwoo@arm.com      intervalDrift(0),
1189647Sdam.sunwoo@arm.com      simpointStream(NULL),
1199647Sdam.sunwoo@arm.com      currentBBV(0, 0),
1209647Sdam.sunwoo@arm.com      currentBBVInstCount(0)
1212623SN/A{
1222623SN/A    _status = Idle;
1239647Sdam.sunwoo@arm.com
1249647Sdam.sunwoo@arm.com    if (simpoint) {
1259647Sdam.sunwoo@arm.com        simpointStream = simout.create(p->simpoint_profile_file, false);
1269647Sdam.sunwoo@arm.com    }
1272623SN/A}
1282623SN/A
1292623SN/A
1302623SN/AAtomicSimpleCPU::~AtomicSimpleCPU()
1312623SN/A{
1326775SBrad.Beckmann@amd.com    if (tickEvent.scheduled()) {
1336775SBrad.Beckmann@amd.com        deschedule(tickEvent);
1346775SBrad.Beckmann@amd.com    }
1359647Sdam.sunwoo@arm.com    if (simpointStream) {
1369647Sdam.sunwoo@arm.com        simout.close(simpointStream);
1379647Sdam.sunwoo@arm.com    }
1382623SN/A}
1392623SN/A
1409443SAndreas.Sandberg@ARM.comunsigned int
1419443SAndreas.Sandberg@ARM.comAtomicSimpleCPU::drain(DrainManager *dm)
1422623SN/A{
1439443SAndreas.Sandberg@ARM.com    assert(!drain_manager);
1449448SAndreas.Sandberg@ARM.com    if (switchedOut())
1459443SAndreas.Sandberg@ARM.com        return 0;
1462623SN/A
1479443SAndreas.Sandberg@ARM.com    if (!isDrained()) {
1489443SAndreas.Sandberg@ARM.com        DPRINTF(Drain, "Requesting drain: %s\n", pcState());
1499443SAndreas.Sandberg@ARM.com        drain_manager = dm;
1509443SAndreas.Sandberg@ARM.com        return 1;
1519443SAndreas.Sandberg@ARM.com    } else {
1529443SAndreas.Sandberg@ARM.com        if (tickEvent.scheduled())
1539443SAndreas.Sandberg@ARM.com            deschedule(tickEvent);
1542915Sktlim@umich.edu
1559443SAndreas.Sandberg@ARM.com        DPRINTF(Drain, "Not executing microcode, no need to drain.\n");
1569443SAndreas.Sandberg@ARM.com        return 0;
1579443SAndreas.Sandberg@ARM.com    }
1589342SAndreas.Sandberg@arm.com}
1599342SAndreas.Sandberg@arm.com
1602915Sktlim@umich.eduvoid
1619342SAndreas.Sandberg@arm.comAtomicSimpleCPU::drainResume()
1622915Sktlim@umich.edu{
1639448SAndreas.Sandberg@ARM.com    assert(!tickEvent.scheduled());
1649443SAndreas.Sandberg@ARM.com    assert(!drain_manager);
1659448SAndreas.Sandberg@ARM.com    if (switchedOut())
1665220Ssaidi@eecs.umich.edu        return;
1675220Ssaidi@eecs.umich.edu
1684940Snate@binkert.org    DPRINTF(SimpleCPU, "Resume\n");
1699523SAndreas.Sandberg@ARM.com    verifyMemoryMode();
1703324Shsul@eecs.umich.edu
1719448SAndreas.Sandberg@ARM.com    assert(!threadContexts.empty());
1729448SAndreas.Sandberg@ARM.com    if (threadContexts.size() > 1)
1739448SAndreas.Sandberg@ARM.com        fatal("The atomic CPU only supports one thread.\n");
1749448SAndreas.Sandberg@ARM.com
1759448SAndreas.Sandberg@ARM.com    if (thread->status() == ThreadContext::Active) {
1769443SAndreas.Sandberg@ARM.com        schedule(tickEvent, nextCycle());
1779448SAndreas.Sandberg@ARM.com        _status = BaseSimpleCPU::Running;
1789837Slena@cs.wisc,edu        notIdleFraction = 1;
1799448SAndreas.Sandberg@ARM.com    } else {
1809448SAndreas.Sandberg@ARM.com        _status = BaseSimpleCPU::Idle;
1819837Slena@cs.wisc,edu        notIdleFraction = 0;
1829448SAndreas.Sandberg@ARM.com    }
1839443SAndreas.Sandberg@ARM.com
1847897Shestness@cs.utexas.edu    system->totalNumInsts = 0;
1852623SN/A}
1862623SN/A
1879443SAndreas.Sandberg@ARM.combool
1889443SAndreas.Sandberg@ARM.comAtomicSimpleCPU::tryCompleteDrain()
1899443SAndreas.Sandberg@ARM.com{
1909443SAndreas.Sandberg@ARM.com    if (!drain_manager)
1919443SAndreas.Sandberg@ARM.com        return false;
1929443SAndreas.Sandberg@ARM.com
1939443SAndreas.Sandberg@ARM.com    DPRINTF(Drain, "tryCompleteDrain: %s\n", pcState());
1949443SAndreas.Sandberg@ARM.com    if (!isDrained())
1959443SAndreas.Sandberg@ARM.com        return false;
1969443SAndreas.Sandberg@ARM.com
1979443SAndreas.Sandberg@ARM.com    DPRINTF(Drain, "CPU done draining, processing drain event\n");
1989443SAndreas.Sandberg@ARM.com    drain_manager->signalDrainDone();
1999443SAndreas.Sandberg@ARM.com    drain_manager = NULL;
2009443SAndreas.Sandberg@ARM.com
2019443SAndreas.Sandberg@ARM.com    return true;
2029443SAndreas.Sandberg@ARM.com}
2039443SAndreas.Sandberg@ARM.com
2049443SAndreas.Sandberg@ARM.com
2052623SN/Avoid
2062798Sktlim@umich.eduAtomicSimpleCPU::switchOut()
2072623SN/A{
2089429SAndreas.Sandberg@ARM.com    BaseSimpleCPU::switchOut();
2099429SAndreas.Sandberg@ARM.com
2109443SAndreas.Sandberg@ARM.com    assert(!tickEvent.scheduled());
2119342SAndreas.Sandberg@arm.com    assert(_status == BaseSimpleCPU::Running || _status == Idle);
2129443SAndreas.Sandberg@ARM.com    assert(isDrained());
2132623SN/A}
2142623SN/A
2152623SN/A
2162623SN/Avoid
2172623SN/AAtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
2182623SN/A{
2199429SAndreas.Sandberg@ARM.com    BaseSimpleCPU::takeOverFrom(oldCPU);
2202623SN/A
2219443SAndreas.Sandberg@ARM.com    // The tick event should have been descheduled by drain()
2222623SN/A    assert(!tickEvent.scheduled());
2232623SN/A
2245712Shsul@eecs.umich.edu    ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT
2255712Shsul@eecs.umich.edu    data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too
2265712Shsul@eecs.umich.edu    data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too
2272623SN/A}
2282623SN/A
2299523SAndreas.Sandberg@ARM.comvoid
2309523SAndreas.Sandberg@ARM.comAtomicSimpleCPU::verifyMemoryMode() const
2319523SAndreas.Sandberg@ARM.com{
2329524SAndreas.Sandberg@ARM.com    if (!system->isAtomicMode()) {
2339523SAndreas.Sandberg@ARM.com        fatal("The atomic CPU requires the memory system to be in "
2349523SAndreas.Sandberg@ARM.com              "'atomic' mode.\n");
2359523SAndreas.Sandberg@ARM.com    }
2369523SAndreas.Sandberg@ARM.com}
2372623SN/A
2382623SN/Avoid
2399180Sandreas.hansson@arm.comAtomicSimpleCPU::activateContext(ThreadID thread_num, Cycles delay)
2402623SN/A{
2414940Snate@binkert.org    DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
2424940Snate@binkert.org
2432623SN/A    assert(thread_num == 0);
2442683Sktlim@umich.edu    assert(thread);
2452623SN/A
2462623SN/A    assert(_status == Idle);
2472623SN/A    assert(!tickEvent.scheduled());
2482623SN/A
2499837Slena@cs.wisc,edu    notIdleFraction = 1;
2509180Sandreas.hansson@arm.com    numCycles += ticksToCycles(thread->lastActivate - thread->lastSuspend);
2513686Sktlim@umich.edu
2523430Sgblack@eecs.umich.edu    //Make sure ticks are still on multiples of cycles
2539179Sandreas.hansson@arm.com    schedule(tickEvent, clockEdge(delay));
2549342SAndreas.Sandberg@arm.com    _status = BaseSimpleCPU::Running;
2552623SN/A}
2562623SN/A
2572623SN/A
2582623SN/Avoid
2598737Skoansin.tan@gmail.comAtomicSimpleCPU::suspendContext(ThreadID thread_num)
2602623SN/A{
2614940Snate@binkert.org    DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
2624940Snate@binkert.org
2632623SN/A    assert(thread_num == 0);
2642683Sktlim@umich.edu    assert(thread);
2652623SN/A
2666043Sgblack@eecs.umich.edu    if (_status == Idle)
2676043Sgblack@eecs.umich.edu        return;
2686043Sgblack@eecs.umich.edu
2699342SAndreas.Sandberg@arm.com    assert(_status == BaseSimpleCPU::Running);
2702626SN/A
2712626SN/A    // tick event may not be scheduled if this gets called from inside
2722626SN/A    // an instruction's execution, e.g. "quiesce"
2732626SN/A    if (tickEvent.scheduled())
2745606Snate@binkert.org        deschedule(tickEvent);
2752623SN/A
2769837Slena@cs.wisc,edu    notIdleFraction = 0;
2772623SN/A    _status = Idle;
2782623SN/A}
2792623SN/A
2802623SN/A
2812623SN/AFault
2828444Sgblack@eecs.umich.eduAtomicSimpleCPU::readMem(Addr addr, uint8_t * data,
2838444Sgblack@eecs.umich.edu                         unsigned size, unsigned flags)
2842623SN/A{
2853169Sstever@eecs.umich.edu    // use the CPU's statically allocated read request and packet objects
2864870Sstever@eecs.umich.edu    Request *req = &data_read_req;
2872623SN/A
2882623SN/A    if (traceData) {
2892623SN/A        traceData->setAddr(addr);
2902623SN/A    }
2912623SN/A
2924999Sgblack@eecs.umich.edu    //The size of the data we're trying to read.
2937520Sgblack@eecs.umich.edu    int fullSize = size;
2942623SN/A
2954999Sgblack@eecs.umich.edu    //The address of the second part of this access if it needs to be split
2964999Sgblack@eecs.umich.edu    //across a cache line boundary.
2979814Sandreas.hansson@arm.com    Addr secondAddr = roundDown(addr + size - 1, cacheLineSize());
2984999Sgblack@eecs.umich.edu
2997520Sgblack@eecs.umich.edu    if (secondAddr > addr)
3007520Sgblack@eecs.umich.edu        size = secondAddr - addr;
3014999Sgblack@eecs.umich.edu
3024999Sgblack@eecs.umich.edu    dcache_latency = 0;
3034999Sgblack@eecs.umich.edu
3047520Sgblack@eecs.umich.edu    while (1) {
3058832SAli.Saidi@ARM.com        req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr());
3064999Sgblack@eecs.umich.edu
3074999Sgblack@eecs.umich.edu        // translate to physical address
3086023Snate@binkert.org        Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read);
3094999Sgblack@eecs.umich.edu
3104999Sgblack@eecs.umich.edu        // Now do the access.
3116623Sgblack@eecs.umich.edu        if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
3124999Sgblack@eecs.umich.edu            Packet pkt = Packet(req,
3138949Sandreas.hansson@arm.com                                req->isLLSC() ? MemCmd::LoadLockedReq :
3148949Sandreas.hansson@arm.com                                MemCmd::ReadReq);
3157520Sgblack@eecs.umich.edu            pkt.dataStatic(data);
3164999Sgblack@eecs.umich.edu
3178105Sgblack@eecs.umich.edu            if (req->isMmappedIpr())
3184999Sgblack@eecs.umich.edu                dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt);
3194999Sgblack@eecs.umich.edu            else {
3208931Sandreas.hansson@arm.com                if (fastmem && system->isMemAddr(pkt.getAddr()))
3218931Sandreas.hansson@arm.com                    system->getPhysMem().access(&pkt);
3224999Sgblack@eecs.umich.edu                else
3234999Sgblack@eecs.umich.edu                    dcache_latency += dcachePort.sendAtomic(&pkt);
3244999Sgblack@eecs.umich.edu            }
3254999Sgblack@eecs.umich.edu            dcache_access = true;
3265012Sgblack@eecs.umich.edu
3274999Sgblack@eecs.umich.edu            assert(!pkt.isError());
3284999Sgblack@eecs.umich.edu
3296102Sgblack@eecs.umich.edu            if (req->isLLSC()) {
3304999Sgblack@eecs.umich.edu                TheISA::handleLockedRead(thread, req);
3314999Sgblack@eecs.umich.edu            }
3324968Sacolyte@umich.edu        }
3334986Ssaidi@eecs.umich.edu
3344999Sgblack@eecs.umich.edu        //If there's a fault, return it
3356739Sgblack@eecs.umich.edu        if (fault != NoFault) {
3366739Sgblack@eecs.umich.edu            if (req->isPrefetch()) {
3376739Sgblack@eecs.umich.edu                return NoFault;
3386739Sgblack@eecs.umich.edu            } else {
3396739Sgblack@eecs.umich.edu                return fault;
3406739Sgblack@eecs.umich.edu            }
3416739Sgblack@eecs.umich.edu        }
3426739Sgblack@eecs.umich.edu
3434999Sgblack@eecs.umich.edu        //If we don't need to access a second cache line, stop now.
3444999Sgblack@eecs.umich.edu        if (secondAddr <= addr)
3454999Sgblack@eecs.umich.edu        {
3466078Sgblack@eecs.umich.edu            if (req->isLocked() && fault == NoFault) {
3476078Sgblack@eecs.umich.edu                assert(!locked);
3486078Sgblack@eecs.umich.edu                locked = true;
3496078Sgblack@eecs.umich.edu            }
3504999Sgblack@eecs.umich.edu            return fault;
3514968Sacolyte@umich.edu        }
3523170Sstever@eecs.umich.edu
3534999Sgblack@eecs.umich.edu        /*
3544999Sgblack@eecs.umich.edu         * Set up for accessing the second cache line.
3554999Sgblack@eecs.umich.edu         */
3564999Sgblack@eecs.umich.edu
3574999Sgblack@eecs.umich.edu        //Move the pointer we're reading into to the correct location.
3587520Sgblack@eecs.umich.edu        data += size;
3594999Sgblack@eecs.umich.edu        //Adjust the size to get the remaining bytes.
3607520Sgblack@eecs.umich.edu        size = addr + fullSize - secondAddr;
3614999Sgblack@eecs.umich.edu        //And access the right address.
3624999Sgblack@eecs.umich.edu        addr = secondAddr;
3632623SN/A    }
3642623SN/A}
3652623SN/A
3667520Sgblack@eecs.umich.edu
3672623SN/AFault
3688444Sgblack@eecs.umich.eduAtomicSimpleCPU::writeMem(uint8_t *data, unsigned size,
3698444Sgblack@eecs.umich.edu                          Addr addr, unsigned flags, uint64_t *res)
3702623SN/A{
3713169Sstever@eecs.umich.edu    // use the CPU's statically allocated write request and packet objects
3724870Sstever@eecs.umich.edu    Request *req = &data_write_req;
3732623SN/A
3742623SN/A    if (traceData) {
3752623SN/A        traceData->setAddr(addr);
3762623SN/A    }
3772623SN/A
3784999Sgblack@eecs.umich.edu    //The size of the data we're trying to read.
3797520Sgblack@eecs.umich.edu    int fullSize = size;
3802623SN/A
3814999Sgblack@eecs.umich.edu    //The address of the second part of this access if it needs to be split
3824999Sgblack@eecs.umich.edu    //across a cache line boundary.
3839814Sandreas.hansson@arm.com    Addr secondAddr = roundDown(addr + size - 1, cacheLineSize());
3844999Sgblack@eecs.umich.edu
3854999Sgblack@eecs.umich.edu    if(secondAddr > addr)
3867520Sgblack@eecs.umich.edu        size = secondAddr - addr;
3874999Sgblack@eecs.umich.edu
3884999Sgblack@eecs.umich.edu    dcache_latency = 0;
3894999Sgblack@eecs.umich.edu
3904999Sgblack@eecs.umich.edu    while(1) {
3918832SAli.Saidi@ARM.com        req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr());
3924999Sgblack@eecs.umich.edu
3934999Sgblack@eecs.umich.edu        // translate to physical address
3946023Snate@binkert.org        Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write);
3954999Sgblack@eecs.umich.edu
3964999Sgblack@eecs.umich.edu        // Now do the access.
3974999Sgblack@eecs.umich.edu        if (fault == NoFault) {
3984999Sgblack@eecs.umich.edu            MemCmd cmd = MemCmd::WriteReq; // default
3994999Sgblack@eecs.umich.edu            bool do_access = true;  // flag to suppress cache access
4004999Sgblack@eecs.umich.edu
4016102Sgblack@eecs.umich.edu            if (req->isLLSC()) {
4024999Sgblack@eecs.umich.edu                cmd = MemCmd::StoreCondReq;
4034999Sgblack@eecs.umich.edu                do_access = TheISA::handleLockedWrite(thread, req);
4044999Sgblack@eecs.umich.edu            } else if (req->isSwap()) {
4054999Sgblack@eecs.umich.edu                cmd = MemCmd::SwapReq;
4064999Sgblack@eecs.umich.edu                if (req->isCondSwap()) {
4074999Sgblack@eecs.umich.edu                    assert(res);
4084999Sgblack@eecs.umich.edu                    req->setExtraData(*res);
4094999Sgblack@eecs.umich.edu                }
4104999Sgblack@eecs.umich.edu            }
4114999Sgblack@eecs.umich.edu
4126623Sgblack@eecs.umich.edu            if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) {
4138949Sandreas.hansson@arm.com                Packet pkt = Packet(req, cmd);
4147520Sgblack@eecs.umich.edu                pkt.dataStatic(data);
4154999Sgblack@eecs.umich.edu
4168105Sgblack@eecs.umich.edu                if (req->isMmappedIpr()) {
4174999Sgblack@eecs.umich.edu                    dcache_latency +=
4184999Sgblack@eecs.umich.edu                        TheISA::handleIprWrite(thread->getTC(), &pkt);
4194999Sgblack@eecs.umich.edu                } else {
4208931Sandreas.hansson@arm.com                    if (fastmem && system->isMemAddr(pkt.getAddr()))
4218931Sandreas.hansson@arm.com                        system->getPhysMem().access(&pkt);
4224999Sgblack@eecs.umich.edu                    else
4234999Sgblack@eecs.umich.edu                        dcache_latency += dcachePort.sendAtomic(&pkt);
4244999Sgblack@eecs.umich.edu                }
4254999Sgblack@eecs.umich.edu                dcache_access = true;
4264999Sgblack@eecs.umich.edu                assert(!pkt.isError());
4274999Sgblack@eecs.umich.edu
4284999Sgblack@eecs.umich.edu                if (req->isSwap()) {
4294999Sgblack@eecs.umich.edu                    assert(res);
4307520Sgblack@eecs.umich.edu                    memcpy(res, pkt.getPtr<uint8_t>(), fullSize);
4314999Sgblack@eecs.umich.edu                }
4324999Sgblack@eecs.umich.edu            }
4334999Sgblack@eecs.umich.edu
4344999Sgblack@eecs.umich.edu            if (res && !req->isSwap()) {
4354999Sgblack@eecs.umich.edu                *res = req->getExtraData();
4364878Sstever@eecs.umich.edu            }
4374040Ssaidi@eecs.umich.edu        }
4384040Ssaidi@eecs.umich.edu
4394999Sgblack@eecs.umich.edu        //If there's a fault or we don't need to access a second cache line,
4404999Sgblack@eecs.umich.edu        //stop now.
4414999Sgblack@eecs.umich.edu        if (fault != NoFault || secondAddr <= addr)
4424999Sgblack@eecs.umich.edu        {
4436078Sgblack@eecs.umich.edu            if (req->isLocked() && fault == NoFault) {
4446078Sgblack@eecs.umich.edu                assert(locked);
4456078Sgblack@eecs.umich.edu                locked = false;
4466078Sgblack@eecs.umich.edu            }
4476739Sgblack@eecs.umich.edu            if (fault != NoFault && req->isPrefetch()) {
4486739Sgblack@eecs.umich.edu                return NoFault;
4496739Sgblack@eecs.umich.edu            } else {
4506739Sgblack@eecs.umich.edu                return fault;
4516739Sgblack@eecs.umich.edu            }
4523170Sstever@eecs.umich.edu        }
4533170Sstever@eecs.umich.edu
4544999Sgblack@eecs.umich.edu        /*
4554999Sgblack@eecs.umich.edu         * Set up for accessing the second cache line.
4564999Sgblack@eecs.umich.edu         */
4574999Sgblack@eecs.umich.edu
4584999Sgblack@eecs.umich.edu        //Move the pointer we're reading into to the correct location.
4597520Sgblack@eecs.umich.edu        data += size;
4604999Sgblack@eecs.umich.edu        //Adjust the size to get the remaining bytes.
4617520Sgblack@eecs.umich.edu        size = addr + fullSize - secondAddr;
4624999Sgblack@eecs.umich.edu        //And access the right address.
4634999Sgblack@eecs.umich.edu        addr = secondAddr;
4642623SN/A    }
4652623SN/A}
4662623SN/A
4672623SN/A
4682623SN/Avoid
4692623SN/AAtomicSimpleCPU::tick()
4702623SN/A{
4714940Snate@binkert.org    DPRINTF(SimpleCPU, "Tick\n");
4724940Snate@binkert.org
4735487Snate@binkert.org    Tick latency = 0;
4742623SN/A
4756078Sgblack@eecs.umich.edu    for (int i = 0; i < width || locked; ++i) {
4762623SN/A        numCycles++;
4772623SN/A
4783387Sgblack@eecs.umich.edu        if (!curStaticInst || !curStaticInst->isDelayedCommit())
4793387Sgblack@eecs.umich.edu            checkForInterrupts();
4802626SN/A
4815348Ssaidi@eecs.umich.edu        checkPcEventQueue();
4828143SAli.Saidi@ARM.com        // We must have just got suspended by a PC event
4839443SAndreas.Sandberg@ARM.com        if (_status == Idle) {
4849443SAndreas.Sandberg@ARM.com            tryCompleteDrain();
4858143SAli.Saidi@ARM.com            return;
4869443SAndreas.Sandberg@ARM.com        }
4875348Ssaidi@eecs.umich.edu
4885669Sgblack@eecs.umich.edu        Fault fault = NoFault;
4895669Sgblack@eecs.umich.edu
4907720Sgblack@eecs.umich.edu        TheISA::PCState pcState = thread->pcState();
4917720Sgblack@eecs.umich.edu
4927720Sgblack@eecs.umich.edu        bool needToFetch = !isRomMicroPC(pcState.microPC()) &&
4937720Sgblack@eecs.umich.edu                           !curMacroStaticInst;
4947720Sgblack@eecs.umich.edu        if (needToFetch) {
4955894Sgblack@eecs.umich.edu            setupFetchRequest(&ifetch_req);
4966023Snate@binkert.org            fault = thread->itb->translateAtomic(&ifetch_req, tc,
4976023Snate@binkert.org                                                 BaseTLB::Execute);
4985894Sgblack@eecs.umich.edu        }
4992623SN/A
5002623SN/A        if (fault == NoFault) {
5014182Sgblack@eecs.umich.edu            Tick icache_latency = 0;
5024182Sgblack@eecs.umich.edu            bool icache_access = false;
5034182Sgblack@eecs.umich.edu            dcache_access = false; // assume no dcache access
5042662Sstever@eecs.umich.edu
5057720Sgblack@eecs.umich.edu            if (needToFetch) {
5069023Sgblack@eecs.umich.edu                // This is commented out because the decoder would act like
5075694Sgblack@eecs.umich.edu                // a tiny cache otherwise. It wouldn't be flushed when needed
5085694Sgblack@eecs.umich.edu                // like the I cache. It should be flushed, and when that works
5095694Sgblack@eecs.umich.edu                // this code should be uncommented.
5105669Sgblack@eecs.umich.edu                //Fetch more instruction memory if necessary
5119023Sgblack@eecs.umich.edu                //if(decoder.needMoreBytes())
5125669Sgblack@eecs.umich.edu                //{
5135669Sgblack@eecs.umich.edu                    icache_access = true;
5148949Sandreas.hansson@arm.com                    Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq);
5155669Sgblack@eecs.umich.edu                    ifetch_pkt.dataStatic(&inst);
5162623SN/A
5178931Sandreas.hansson@arm.com                    if (fastmem && system->isMemAddr(ifetch_pkt.getAddr()))
5188931Sandreas.hansson@arm.com                        system->getPhysMem().access(&ifetch_pkt);
5195669Sgblack@eecs.umich.edu                    else
5205669Sgblack@eecs.umich.edu                        icache_latency = icachePort.sendAtomic(&ifetch_pkt);
5214968Sacolyte@umich.edu
5225669Sgblack@eecs.umich.edu                    assert(!ifetch_pkt.isError());
5234968Sacolyte@umich.edu
5245669Sgblack@eecs.umich.edu                    // ifetch_req is initialized to read the instruction directly
5255669Sgblack@eecs.umich.edu                    // into the CPU object's inst field.
5265669Sgblack@eecs.umich.edu                //}
5275669Sgblack@eecs.umich.edu            }
5284182Sgblack@eecs.umich.edu
5292623SN/A            preExecute();
5303814Ssaidi@eecs.umich.edu
5315001Sgblack@eecs.umich.edu            if (curStaticInst) {
5324182Sgblack@eecs.umich.edu                fault = curStaticInst->execute(this, traceData);
5334998Sgblack@eecs.umich.edu
5344998Sgblack@eecs.umich.edu                // keep an instruction count
5354998Sgblack@eecs.umich.edu                if (fault == NoFault)
5364998Sgblack@eecs.umich.edu                    countInst();
5377655Sali.saidi@arm.com                else if (traceData && !DTRACE(ExecFaulting)) {
5385001Sgblack@eecs.umich.edu                    delete traceData;
5395001Sgblack@eecs.umich.edu                    traceData = NULL;
5405001Sgblack@eecs.umich.edu                }
5414998Sgblack@eecs.umich.edu
5424182Sgblack@eecs.umich.edu                postExecute();
5434182Sgblack@eecs.umich.edu            }
5442623SN/A
5453814Ssaidi@eecs.umich.edu            // @todo remove me after debugging with legion done
5464539Sgblack@eecs.umich.edu            if (curStaticInst && (!curStaticInst->isMicroop() ||
5474539Sgblack@eecs.umich.edu                        curStaticInst->isFirstMicroop()))
5483814Ssaidi@eecs.umich.edu                instCnt++;
5493814Ssaidi@eecs.umich.edu
5509647Sdam.sunwoo@arm.com            // profile for SimPoints if enabled and macro inst is finished
5519647Sdam.sunwoo@arm.com            if (simpoint && curStaticInst && (fault == NoFault) &&
5529647Sdam.sunwoo@arm.com                    (!curStaticInst->isMicroop() ||
5539647Sdam.sunwoo@arm.com                     curStaticInst->isLastMicroop())) {
5549647Sdam.sunwoo@arm.com                profileSimPoint();
5559647Sdam.sunwoo@arm.com            }
5569647Sdam.sunwoo@arm.com
5575487Snate@binkert.org            Tick stall_ticks = 0;
5585487Snate@binkert.org            if (simulate_inst_stalls && icache_access)
5595487Snate@binkert.org                stall_ticks += icache_latency;
5605487Snate@binkert.org
5615487Snate@binkert.org            if (simulate_data_stalls && dcache_access)
5625487Snate@binkert.org                stall_ticks += dcache_latency;
5635487Snate@binkert.org
5645487Snate@binkert.org            if (stall_ticks) {
5659180Sandreas.hansson@arm.com                // the atomic cpu does its accounting in ticks, so
5669180Sandreas.hansson@arm.com                // keep counting in ticks but round to the clock
5679180Sandreas.hansson@arm.com                // period
5689180Sandreas.hansson@arm.com                latency += divCeil(stall_ticks, clockPeriod()) *
5699180Sandreas.hansson@arm.com                    clockPeriod();
5702623SN/A            }
5712623SN/A
5722623SN/A        }
5734377Sgblack@eecs.umich.edu        if(fault != NoFault || !stayAtPC)
5744182Sgblack@eecs.umich.edu            advancePC(fault);
5752623SN/A    }
5762623SN/A
5779443SAndreas.Sandberg@ARM.com    if (tryCompleteDrain())
5789443SAndreas.Sandberg@ARM.com        return;
5799443SAndreas.Sandberg@ARM.com
5805487Snate@binkert.org    // instruction takes at least one cycle
5819179Sandreas.hansson@arm.com    if (latency < clockPeriod())
5829179Sandreas.hansson@arm.com        latency = clockPeriod();
5835487Snate@binkert.org
5842626SN/A    if (_status != Idle)
5857823Ssteve.reinhardt@amd.com        schedule(tickEvent, curTick() + latency);
5862623SN/A}
5872623SN/A
5882623SN/A
5895315Sstever@gmail.comvoid
5905315Sstever@gmail.comAtomicSimpleCPU::printAddr(Addr a)
5915315Sstever@gmail.com{
5925315Sstever@gmail.com    dcachePort.printAddr(a);
5935315Sstever@gmail.com}
5945315Sstever@gmail.com
5959647Sdam.sunwoo@arm.comvoid
5969647Sdam.sunwoo@arm.comAtomicSimpleCPU::profileSimPoint()
5979647Sdam.sunwoo@arm.com{
5989647Sdam.sunwoo@arm.com    if (!currentBBVInstCount)
5999647Sdam.sunwoo@arm.com        currentBBV.first = thread->pcState().instAddr();
6009647Sdam.sunwoo@arm.com
6019647Sdam.sunwoo@arm.com    ++intervalCount;
6029647Sdam.sunwoo@arm.com    ++currentBBVInstCount;
6039647Sdam.sunwoo@arm.com
6049647Sdam.sunwoo@arm.com    // If inst is control inst, assume end of basic block.
6059647Sdam.sunwoo@arm.com    if (curStaticInst->isControl()) {
6069647Sdam.sunwoo@arm.com        currentBBV.second = thread->pcState().instAddr();
6079647Sdam.sunwoo@arm.com
6089647Sdam.sunwoo@arm.com        auto map_itr = bbMap.find(currentBBV);
6099647Sdam.sunwoo@arm.com        if (map_itr == bbMap.end()){
6109647Sdam.sunwoo@arm.com            // If a new (previously unseen) basic block is found,
6119647Sdam.sunwoo@arm.com            // add a new unique id, record num of insts and insert into bbMap.
6129647Sdam.sunwoo@arm.com            BBInfo info;
6139647Sdam.sunwoo@arm.com            info.id = bbMap.size() + 1;
6149647Sdam.sunwoo@arm.com            info.insts = currentBBVInstCount;
6159647Sdam.sunwoo@arm.com            info.count = currentBBVInstCount;
6169647Sdam.sunwoo@arm.com            bbMap.insert(std::make_pair(currentBBV, info));
6179647Sdam.sunwoo@arm.com        } else {
6189647Sdam.sunwoo@arm.com            // If basic block is seen before, just increment the count by the
6199647Sdam.sunwoo@arm.com            // number of insts in basic block.
6209647Sdam.sunwoo@arm.com            BBInfo& info = map_itr->second;
6219647Sdam.sunwoo@arm.com            assert(info.insts == currentBBVInstCount);
6229647Sdam.sunwoo@arm.com            info.count += currentBBVInstCount;
6239647Sdam.sunwoo@arm.com        }
6249647Sdam.sunwoo@arm.com        currentBBVInstCount = 0;
6259647Sdam.sunwoo@arm.com
6269647Sdam.sunwoo@arm.com        // Reached end of interval if the sum of the current inst count
6279647Sdam.sunwoo@arm.com        // (intervalCount) and the excessive inst count from the previous
6289647Sdam.sunwoo@arm.com        // interval (intervalDrift) is greater than/equal to the interval size.
6299647Sdam.sunwoo@arm.com        if (intervalCount + intervalDrift >= intervalSize) {
6309647Sdam.sunwoo@arm.com            // summarize interval and display BBV info
6319647Sdam.sunwoo@arm.com            std::vector<pair<uint64_t, uint64_t> > counts;
6329647Sdam.sunwoo@arm.com            for (auto map_itr = bbMap.begin(); map_itr != bbMap.end();
6339647Sdam.sunwoo@arm.com                    ++map_itr) {
6349647Sdam.sunwoo@arm.com                BBInfo& info = map_itr->second;
6359647Sdam.sunwoo@arm.com                if (info.count != 0) {
6369647Sdam.sunwoo@arm.com                    counts.push_back(std::make_pair(info.id, info.count));
6379647Sdam.sunwoo@arm.com                    info.count = 0;
6389647Sdam.sunwoo@arm.com                }
6399647Sdam.sunwoo@arm.com            }
6409647Sdam.sunwoo@arm.com            std::sort(counts.begin(), counts.end());
6419647Sdam.sunwoo@arm.com
6429647Sdam.sunwoo@arm.com            // Print output BBV info
6439647Sdam.sunwoo@arm.com            *simpointStream << "T";
6449647Sdam.sunwoo@arm.com            for (auto cnt_itr = counts.begin(); cnt_itr != counts.end();
6459647Sdam.sunwoo@arm.com                    ++cnt_itr) {
6469647Sdam.sunwoo@arm.com                *simpointStream << ":" << cnt_itr->first
6479647Sdam.sunwoo@arm.com                                << ":" << cnt_itr->second << " ";
6489647Sdam.sunwoo@arm.com            }
6499647Sdam.sunwoo@arm.com            *simpointStream << "\n";
6509647Sdam.sunwoo@arm.com
6519647Sdam.sunwoo@arm.com            intervalDrift = (intervalCount + intervalDrift) - intervalSize;
6529647Sdam.sunwoo@arm.com            intervalCount = 0;
6539647Sdam.sunwoo@arm.com        }
6549647Sdam.sunwoo@arm.com    }
6559647Sdam.sunwoo@arm.com}
6565315Sstever@gmail.com
6572623SN/A////////////////////////////////////////////////////////////////////////
6582623SN/A//
6592623SN/A//  AtomicSimpleCPU Simulation Object
6602623SN/A//
6614762Snate@binkert.orgAtomicSimpleCPU *
6624762Snate@binkert.orgAtomicSimpleCPUParams::create()
6632623SN/A{
6645529Snate@binkert.org    numThreads = 1;
6658779Sgblack@eecs.umich.edu    if (!FullSystem && workload.size() != 1)
6664762Snate@binkert.org        panic("only one workload allowed");
6675529Snate@binkert.org    return new AtomicSimpleCPU(this);
6682623SN/A}
669