atomic.cc revision 10024
12623SN/A/*
28926Sandreas.hansson@arm.com * Copyright (c) 2012 ARM Limited
38926Sandreas.hansson@arm.com * All rights reserved.
48926Sandreas.hansson@arm.com *
58926Sandreas.hansson@arm.com * The license below extends only to copyright in the software and shall
68926Sandreas.hansson@arm.com * not be construed as granting a license to any other intellectual
78926Sandreas.hansson@arm.com * property including but not limited to intellectual property relating
88926Sandreas.hansson@arm.com * to a hardware implementation of the functionality of the software
98926Sandreas.hansson@arm.com * licensed hereunder.  You may use the software subject to the license
108926Sandreas.hansson@arm.com * terms below provided that you ensure that this notice is replicated
118926Sandreas.hansson@arm.com * unmodified and in its entirety in all distributions of the software,
128926Sandreas.hansson@arm.com * modified or unmodified, in source code or in binary form.
138926Sandreas.hansson@arm.com *
142623SN/A * Copyright (c) 2002-2005 The Regents of The University of Michigan
152623SN/A * All rights reserved.
162623SN/A *
172623SN/A * Redistribution and use in source and binary forms, with or without
182623SN/A * modification, are permitted provided that the following conditions are
192623SN/A * met: redistributions of source code must retain the above copyright
202623SN/A * notice, this list of conditions and the following disclaimer;
212623SN/A * redistributions in binary form must reproduce the above copyright
222623SN/A * notice, this list of conditions and the following disclaimer in the
232623SN/A * documentation and/or other materials provided with the distribution;
242623SN/A * neither the name of the copyright holders nor the names of its
252623SN/A * contributors may be used to endorse or promote products derived from
262623SN/A * this software without specific prior written permission.
272623SN/A *
282623SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
292623SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
302623SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
312623SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
322623SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
332623SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
342623SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
352623SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
362623SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
372623SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
382623SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
392665Ssaidi@eecs.umich.edu *
402665Ssaidi@eecs.umich.edu * Authors: Steve Reinhardt
412623SN/A */
422623SN/A
433170Sstever@eecs.umich.edu#include "arch/locked_mem.hh"
448105Sgblack@eecs.umich.edu#include "arch/mmapped_ipr.hh"
452623SN/A#include "arch/utility.hh"
464040Ssaidi@eecs.umich.edu#include "base/bigint.hh"
479647Sdam.sunwoo@arm.com#include "base/output.hh"
486658Snate@binkert.org#include "config/the_isa.hh"
498229Snate@binkert.org#include "cpu/simple/atomic.hh"
502623SN/A#include "cpu/exetrace.hh"
519443SAndreas.Sandberg@ARM.com#include "debug/Drain.hh"
528232Snate@binkert.org#include "debug/ExecFaulting.hh"
538232Snate@binkert.org#include "debug/SimpleCPU.hh"
543348Sbinkertn@umich.edu#include "mem/packet.hh"
553348Sbinkertn@umich.edu#include "mem/packet_access.hh"
568926Sandreas.hansson@arm.com#include "mem/physical.hh"
574762Snate@binkert.org#include "params/AtomicSimpleCPU.hh"
587678Sgblack@eecs.umich.edu#include "sim/faults.hh"
592901Ssaidi@eecs.umich.edu#include "sim/system.hh"
608779Sgblack@eecs.umich.edu#include "sim/full_system.hh"
612623SN/A
622623SN/Ausing namespace std;
632623SN/Ausing namespace TheISA;
642623SN/A
652623SN/AAtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c)
665606Snate@binkert.org    : Event(CPU_Tick_Pri), cpu(c)
672623SN/A{
682623SN/A}
692623SN/A
702623SN/A
712623SN/Avoid
722623SN/AAtomicSimpleCPU::TickEvent::process()
732623SN/A{
742623SN/A    cpu->tick();
752623SN/A}
762623SN/A
772623SN/Aconst char *
785336Shines@cs.fsu.eduAtomicSimpleCPU::TickEvent::description() const
792623SN/A{
804873Sstever@eecs.umich.edu    return "AtomicSimpleCPU tick";
812623SN/A}
822623SN/A
832623SN/Avoid
842623SN/AAtomicSimpleCPU::init()
852623SN/A{
862623SN/A    BaseCPU::init();
878921Sandreas.hansson@arm.com
888921Sandreas.hansson@arm.com    // Initialise the ThreadContext's memory proxies
898921Sandreas.hansson@arm.com    tcBase()->initMemProxies(tcBase());
908921Sandreas.hansson@arm.com
919433SAndreas.Sandberg@ARM.com    if (FullSystem && !params()->switched_out) {
928779Sgblack@eecs.umich.edu        ThreadID size = threadContexts.size();
938779Sgblack@eecs.umich.edu        for (ThreadID i = 0; i < size; ++i) {
948779Sgblack@eecs.umich.edu            ThreadContext *tc = threadContexts[i];
958779Sgblack@eecs.umich.edu            // initialize CPU, including PC
968779Sgblack@eecs.umich.edu            TheISA::initCPU(tc, tc->contextId());
978779Sgblack@eecs.umich.edu        }
982623SN/A    }
998706Sandreas.hansson@arm.com
1005714Shsul@eecs.umich.edu    // Atomic doesn't do MT right now, so contextId == threadId
1015712Shsul@eecs.umich.edu    ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT
1025712Shsul@eecs.umich.edu    data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too
1035712Shsul@eecs.umich.edu    data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too
1042623SN/A}
1052623SN/A
1065529Snate@binkert.orgAtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p)
1076078Sgblack@eecs.umich.edu    : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false),
1085487Snate@binkert.org      simulate_data_stalls(p->simulate_data_stalls),
1095487Snate@binkert.org      simulate_inst_stalls(p->simulate_inst_stalls),
1109443SAndreas.Sandberg@ARM.com      drain_manager(NULL),
1119095Sandreas.hansson@arm.com      icachePort(name() + ".icache_port", this),
1129095Sandreas.hansson@arm.com      dcachePort(name() + ".dcache_port", this),
1139647Sdam.sunwoo@arm.com      fastmem(p->fastmem),
1149647Sdam.sunwoo@arm.com      simpoint(p->simpoint_profile),
1159647Sdam.sunwoo@arm.com      intervalSize(p->simpoint_interval),
1169647Sdam.sunwoo@arm.com      intervalCount(0),
1179647Sdam.sunwoo@arm.com      intervalDrift(0),
1189647Sdam.sunwoo@arm.com      simpointStream(NULL),
1199647Sdam.sunwoo@arm.com      currentBBV(0, 0),
1209647Sdam.sunwoo@arm.com      currentBBVInstCount(0)
1212623SN/A{
1222623SN/A    _status = Idle;
1239647Sdam.sunwoo@arm.com
1249647Sdam.sunwoo@arm.com    if (simpoint) {
1259647Sdam.sunwoo@arm.com        simpointStream = simout.create(p->simpoint_profile_file, false);
1269647Sdam.sunwoo@arm.com    }
1272623SN/A}
1282623SN/A
1292623SN/A
1302623SN/AAtomicSimpleCPU::~AtomicSimpleCPU()
1312623SN/A{
1326775SBrad.Beckmann@amd.com    if (tickEvent.scheduled()) {
1336775SBrad.Beckmann@amd.com        deschedule(tickEvent);
1346775SBrad.Beckmann@amd.com    }
1359647Sdam.sunwoo@arm.com    if (simpointStream) {
1369647Sdam.sunwoo@arm.com        simout.close(simpointStream);
1379647Sdam.sunwoo@arm.com    }
1382623SN/A}
1392623SN/A
1409443SAndreas.Sandberg@ARM.comunsigned int
1419443SAndreas.Sandberg@ARM.comAtomicSimpleCPU::drain(DrainManager *dm)
1422623SN/A{
1439443SAndreas.Sandberg@ARM.com    assert(!drain_manager);
1449448SAndreas.Sandberg@ARM.com    if (switchedOut())
1459443SAndreas.Sandberg@ARM.com        return 0;
1462623SN/A
1479443SAndreas.Sandberg@ARM.com    if (!isDrained()) {
1489443SAndreas.Sandberg@ARM.com        DPRINTF(Drain, "Requesting drain: %s\n", pcState());
1499443SAndreas.Sandberg@ARM.com        drain_manager = dm;
1509443SAndreas.Sandberg@ARM.com        return 1;
1519443SAndreas.Sandberg@ARM.com    } else {
1529443SAndreas.Sandberg@ARM.com        if (tickEvent.scheduled())
1539443SAndreas.Sandberg@ARM.com            deschedule(tickEvent);
1542915Sktlim@umich.edu
1559443SAndreas.Sandberg@ARM.com        DPRINTF(Drain, "Not executing microcode, no need to drain.\n");
1569443SAndreas.Sandberg@ARM.com        return 0;
1579443SAndreas.Sandberg@ARM.com    }
1589342SAndreas.Sandberg@arm.com}
1599342SAndreas.Sandberg@arm.com
1602915Sktlim@umich.eduvoid
1619342SAndreas.Sandberg@arm.comAtomicSimpleCPU::drainResume()
1622915Sktlim@umich.edu{
1639448SAndreas.Sandberg@ARM.com    assert(!tickEvent.scheduled());
1649443SAndreas.Sandberg@ARM.com    assert(!drain_manager);
1659448SAndreas.Sandberg@ARM.com    if (switchedOut())
1665220Ssaidi@eecs.umich.edu        return;
1675220Ssaidi@eecs.umich.edu
1684940Snate@binkert.org    DPRINTF(SimpleCPU, "Resume\n");
1699523SAndreas.Sandberg@ARM.com    verifyMemoryMode();
1703324Shsul@eecs.umich.edu
1719448SAndreas.Sandberg@ARM.com    assert(!threadContexts.empty());
1729448SAndreas.Sandberg@ARM.com    if (threadContexts.size() > 1)
1739448SAndreas.Sandberg@ARM.com        fatal("The atomic CPU only supports one thread.\n");
1749448SAndreas.Sandberg@ARM.com
1759448SAndreas.Sandberg@ARM.com    if (thread->status() == ThreadContext::Active) {
1769443SAndreas.Sandberg@ARM.com        schedule(tickEvent, nextCycle());
1779448SAndreas.Sandberg@ARM.com        _status = BaseSimpleCPU::Running;
1789837Slena@cs.wisc,edu        notIdleFraction = 1;
1799448SAndreas.Sandberg@ARM.com    } else {
1809448SAndreas.Sandberg@ARM.com        _status = BaseSimpleCPU::Idle;
1819837Slena@cs.wisc,edu        notIdleFraction = 0;
1829448SAndreas.Sandberg@ARM.com    }
1839443SAndreas.Sandberg@ARM.com
1847897Shestness@cs.utexas.edu    system->totalNumInsts = 0;
1852623SN/A}
1862623SN/A
1879443SAndreas.Sandberg@ARM.combool
1889443SAndreas.Sandberg@ARM.comAtomicSimpleCPU::tryCompleteDrain()
1899443SAndreas.Sandberg@ARM.com{
1909443SAndreas.Sandberg@ARM.com    if (!drain_manager)
1919443SAndreas.Sandberg@ARM.com        return false;
1929443SAndreas.Sandberg@ARM.com
1939443SAndreas.Sandberg@ARM.com    DPRINTF(Drain, "tryCompleteDrain: %s\n", pcState());
1949443SAndreas.Sandberg@ARM.com    if (!isDrained())
1959443SAndreas.Sandberg@ARM.com        return false;
1969443SAndreas.Sandberg@ARM.com
1979443SAndreas.Sandberg@ARM.com    DPRINTF(Drain, "CPU done draining, processing drain event\n");
1989443SAndreas.Sandberg@ARM.com    drain_manager->signalDrainDone();
1999443SAndreas.Sandberg@ARM.com    drain_manager = NULL;
2009443SAndreas.Sandberg@ARM.com
2019443SAndreas.Sandberg@ARM.com    return true;
2029443SAndreas.Sandberg@ARM.com}
2039443SAndreas.Sandberg@ARM.com
2049443SAndreas.Sandberg@ARM.com
2052623SN/Avoid
2062798Sktlim@umich.eduAtomicSimpleCPU::switchOut()
2072623SN/A{
2089429SAndreas.Sandberg@ARM.com    BaseSimpleCPU::switchOut();
2099429SAndreas.Sandberg@ARM.com
2109443SAndreas.Sandberg@ARM.com    assert(!tickEvent.scheduled());
2119342SAndreas.Sandberg@arm.com    assert(_status == BaseSimpleCPU::Running || _status == Idle);
2129443SAndreas.Sandberg@ARM.com    assert(isDrained());
2132623SN/A}
2142623SN/A
2152623SN/A
2162623SN/Avoid
2172623SN/AAtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
2182623SN/A{
2199429SAndreas.Sandberg@ARM.com    BaseSimpleCPU::takeOverFrom(oldCPU);
2202623SN/A
2219443SAndreas.Sandberg@ARM.com    // The tick event should have been descheduled by drain()
2222623SN/A    assert(!tickEvent.scheduled());
2232623SN/A
2245712Shsul@eecs.umich.edu    ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT
2255712Shsul@eecs.umich.edu    data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too
2265712Shsul@eecs.umich.edu    data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too
2272623SN/A}
2282623SN/A
2299523SAndreas.Sandberg@ARM.comvoid
2309523SAndreas.Sandberg@ARM.comAtomicSimpleCPU::verifyMemoryMode() const
2319523SAndreas.Sandberg@ARM.com{
2329524SAndreas.Sandberg@ARM.com    if (!system->isAtomicMode()) {
2339523SAndreas.Sandberg@ARM.com        fatal("The atomic CPU requires the memory system to be in "
2349523SAndreas.Sandberg@ARM.com              "'atomic' mode.\n");
2359523SAndreas.Sandberg@ARM.com    }
2369523SAndreas.Sandberg@ARM.com}
2372623SN/A
2382623SN/Avoid
2399180Sandreas.hansson@arm.comAtomicSimpleCPU::activateContext(ThreadID thread_num, Cycles delay)
2402623SN/A{
2414940Snate@binkert.org    DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
2424940Snate@binkert.org
2432623SN/A    assert(thread_num == 0);
2442683Sktlim@umich.edu    assert(thread);
2452623SN/A
2462623SN/A    assert(_status == Idle);
2472623SN/A    assert(!tickEvent.scheduled());
2482623SN/A
2499837Slena@cs.wisc,edu    notIdleFraction = 1;
2509180Sandreas.hansson@arm.com    numCycles += ticksToCycles(thread->lastActivate - thread->lastSuspend);
2513686Sktlim@umich.edu
2523430Sgblack@eecs.umich.edu    //Make sure ticks are still on multiples of cycles
2539179Sandreas.hansson@arm.com    schedule(tickEvent, clockEdge(delay));
2549342SAndreas.Sandberg@arm.com    _status = BaseSimpleCPU::Running;
2552623SN/A}
2562623SN/A
2572623SN/A
2582623SN/Avoid
2598737Skoansin.tan@gmail.comAtomicSimpleCPU::suspendContext(ThreadID thread_num)
2602623SN/A{
2614940Snate@binkert.org    DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
2624940Snate@binkert.org
2632623SN/A    assert(thread_num == 0);
2642683Sktlim@umich.edu    assert(thread);
2652623SN/A
2666043Sgblack@eecs.umich.edu    if (_status == Idle)
2676043Sgblack@eecs.umich.edu        return;
2686043Sgblack@eecs.umich.edu
2699342SAndreas.Sandberg@arm.com    assert(_status == BaseSimpleCPU::Running);
2702626SN/A
2712626SN/A    // tick event may not be scheduled if this gets called from inside
2722626SN/A    // an instruction's execution, e.g. "quiesce"
2732626SN/A    if (tickEvent.scheduled())
2745606Snate@binkert.org        deschedule(tickEvent);
2752623SN/A
2769837Slena@cs.wisc,edu    notIdleFraction = 0;
2772623SN/A    _status = Idle;
2782623SN/A}
2792623SN/A
2802623SN/A
2812623SN/AFault
2828444Sgblack@eecs.umich.eduAtomicSimpleCPU::readMem(Addr addr, uint8_t * data,
2838444Sgblack@eecs.umich.edu                         unsigned size, unsigned flags)
2842623SN/A{
2853169Sstever@eecs.umich.edu    // use the CPU's statically allocated read request and packet objects
2864870Sstever@eecs.umich.edu    Request *req = &data_read_req;
2872623SN/A
2882623SN/A    if (traceData) {
2892623SN/A        traceData->setAddr(addr);
2902623SN/A    }
2912623SN/A
2924999Sgblack@eecs.umich.edu    //The size of the data we're trying to read.
2937520Sgblack@eecs.umich.edu    int fullSize = size;
2942623SN/A
2954999Sgblack@eecs.umich.edu    //The address of the second part of this access if it needs to be split
2964999Sgblack@eecs.umich.edu    //across a cache line boundary.
2979814Sandreas.hansson@arm.com    Addr secondAddr = roundDown(addr + size - 1, cacheLineSize());
2984999Sgblack@eecs.umich.edu
2997520Sgblack@eecs.umich.edu    if (secondAddr > addr)
3007520Sgblack@eecs.umich.edu        size = secondAddr - addr;
3014999Sgblack@eecs.umich.edu
3024999Sgblack@eecs.umich.edu    dcache_latency = 0;
3034999Sgblack@eecs.umich.edu
30410024Sdam.sunwoo@arm.com    req->taskId(taskId());
3057520Sgblack@eecs.umich.edu    while (1) {
3068832SAli.Saidi@ARM.com        req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr());
3074999Sgblack@eecs.umich.edu
3084999Sgblack@eecs.umich.edu        // translate to physical address
3096023Snate@binkert.org        Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read);
3104999Sgblack@eecs.umich.edu
3114999Sgblack@eecs.umich.edu        // Now do the access.
3126623Sgblack@eecs.umich.edu        if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
3134999Sgblack@eecs.umich.edu            Packet pkt = Packet(req,
3148949Sandreas.hansson@arm.com                                req->isLLSC() ? MemCmd::LoadLockedReq :
3158949Sandreas.hansson@arm.com                                MemCmd::ReadReq);
3167520Sgblack@eecs.umich.edu            pkt.dataStatic(data);
3174999Sgblack@eecs.umich.edu
3188105Sgblack@eecs.umich.edu            if (req->isMmappedIpr())
3194999Sgblack@eecs.umich.edu                dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt);
3204999Sgblack@eecs.umich.edu            else {
3218931Sandreas.hansson@arm.com                if (fastmem && system->isMemAddr(pkt.getAddr()))
3228931Sandreas.hansson@arm.com                    system->getPhysMem().access(&pkt);
3234999Sgblack@eecs.umich.edu                else
3244999Sgblack@eecs.umich.edu                    dcache_latency += dcachePort.sendAtomic(&pkt);
3254999Sgblack@eecs.umich.edu            }
3264999Sgblack@eecs.umich.edu            dcache_access = true;
3275012Sgblack@eecs.umich.edu
3284999Sgblack@eecs.umich.edu            assert(!pkt.isError());
3294999Sgblack@eecs.umich.edu
3306102Sgblack@eecs.umich.edu            if (req->isLLSC()) {
3314999Sgblack@eecs.umich.edu                TheISA::handleLockedRead(thread, req);
3324999Sgblack@eecs.umich.edu            }
3334968Sacolyte@umich.edu        }
3344986Ssaidi@eecs.umich.edu
3354999Sgblack@eecs.umich.edu        //If there's a fault, return it
3366739Sgblack@eecs.umich.edu        if (fault != NoFault) {
3376739Sgblack@eecs.umich.edu            if (req->isPrefetch()) {
3386739Sgblack@eecs.umich.edu                return NoFault;
3396739Sgblack@eecs.umich.edu            } else {
3406739Sgblack@eecs.umich.edu                return fault;
3416739Sgblack@eecs.umich.edu            }
3426739Sgblack@eecs.umich.edu        }
3436739Sgblack@eecs.umich.edu
3444999Sgblack@eecs.umich.edu        //If we don't need to access a second cache line, stop now.
3454999Sgblack@eecs.umich.edu        if (secondAddr <= addr)
3464999Sgblack@eecs.umich.edu        {
3476078Sgblack@eecs.umich.edu            if (req->isLocked() && fault == NoFault) {
3486078Sgblack@eecs.umich.edu                assert(!locked);
3496078Sgblack@eecs.umich.edu                locked = true;
3506078Sgblack@eecs.umich.edu            }
3514999Sgblack@eecs.umich.edu            return fault;
3524968Sacolyte@umich.edu        }
3533170Sstever@eecs.umich.edu
3544999Sgblack@eecs.umich.edu        /*
3554999Sgblack@eecs.umich.edu         * Set up for accessing the second cache line.
3564999Sgblack@eecs.umich.edu         */
3574999Sgblack@eecs.umich.edu
3584999Sgblack@eecs.umich.edu        //Move the pointer we're reading into to the correct location.
3597520Sgblack@eecs.umich.edu        data += size;
3604999Sgblack@eecs.umich.edu        //Adjust the size to get the remaining bytes.
3617520Sgblack@eecs.umich.edu        size = addr + fullSize - secondAddr;
3624999Sgblack@eecs.umich.edu        //And access the right address.
3634999Sgblack@eecs.umich.edu        addr = secondAddr;
3642623SN/A    }
3652623SN/A}
3662623SN/A
3677520Sgblack@eecs.umich.edu
3682623SN/AFault
3698444Sgblack@eecs.umich.eduAtomicSimpleCPU::writeMem(uint8_t *data, unsigned size,
3708444Sgblack@eecs.umich.edu                          Addr addr, unsigned flags, uint64_t *res)
3712623SN/A{
3723169Sstever@eecs.umich.edu    // use the CPU's statically allocated write request and packet objects
3734870Sstever@eecs.umich.edu    Request *req = &data_write_req;
3742623SN/A
3752623SN/A    if (traceData) {
3762623SN/A        traceData->setAddr(addr);
3772623SN/A    }
3782623SN/A
3794999Sgblack@eecs.umich.edu    //The size of the data we're trying to read.
3807520Sgblack@eecs.umich.edu    int fullSize = size;
3812623SN/A
3824999Sgblack@eecs.umich.edu    //The address of the second part of this access if it needs to be split
3834999Sgblack@eecs.umich.edu    //across a cache line boundary.
3849814Sandreas.hansson@arm.com    Addr secondAddr = roundDown(addr + size - 1, cacheLineSize());
3854999Sgblack@eecs.umich.edu
3864999Sgblack@eecs.umich.edu    if(secondAddr > addr)
3877520Sgblack@eecs.umich.edu        size = secondAddr - addr;
3884999Sgblack@eecs.umich.edu
3894999Sgblack@eecs.umich.edu    dcache_latency = 0;
3904999Sgblack@eecs.umich.edu
39110024Sdam.sunwoo@arm.com    req->taskId(taskId());
3924999Sgblack@eecs.umich.edu    while(1) {
3938832SAli.Saidi@ARM.com        req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr());
3944999Sgblack@eecs.umich.edu
3954999Sgblack@eecs.umich.edu        // translate to physical address
3966023Snate@binkert.org        Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write);
3974999Sgblack@eecs.umich.edu
3984999Sgblack@eecs.umich.edu        // Now do the access.
3994999Sgblack@eecs.umich.edu        if (fault == NoFault) {
4004999Sgblack@eecs.umich.edu            MemCmd cmd = MemCmd::WriteReq; // default
4014999Sgblack@eecs.umich.edu            bool do_access = true;  // flag to suppress cache access
4024999Sgblack@eecs.umich.edu
4036102Sgblack@eecs.umich.edu            if (req->isLLSC()) {
4044999Sgblack@eecs.umich.edu                cmd = MemCmd::StoreCondReq;
4054999Sgblack@eecs.umich.edu                do_access = TheISA::handleLockedWrite(thread, req);
4064999Sgblack@eecs.umich.edu            } else if (req->isSwap()) {
4074999Sgblack@eecs.umich.edu                cmd = MemCmd::SwapReq;
4084999Sgblack@eecs.umich.edu                if (req->isCondSwap()) {
4094999Sgblack@eecs.umich.edu                    assert(res);
4104999Sgblack@eecs.umich.edu                    req->setExtraData(*res);
4114999Sgblack@eecs.umich.edu                }
4124999Sgblack@eecs.umich.edu            }
4134999Sgblack@eecs.umich.edu
4146623Sgblack@eecs.umich.edu            if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) {
4158949Sandreas.hansson@arm.com                Packet pkt = Packet(req, cmd);
4167520Sgblack@eecs.umich.edu                pkt.dataStatic(data);
4174999Sgblack@eecs.umich.edu
4188105Sgblack@eecs.umich.edu                if (req->isMmappedIpr()) {
4194999Sgblack@eecs.umich.edu                    dcache_latency +=
4204999Sgblack@eecs.umich.edu                        TheISA::handleIprWrite(thread->getTC(), &pkt);
4214999Sgblack@eecs.umich.edu                } else {
4228931Sandreas.hansson@arm.com                    if (fastmem && system->isMemAddr(pkt.getAddr()))
4238931Sandreas.hansson@arm.com                        system->getPhysMem().access(&pkt);
4244999Sgblack@eecs.umich.edu                    else
4254999Sgblack@eecs.umich.edu                        dcache_latency += dcachePort.sendAtomic(&pkt);
4264999Sgblack@eecs.umich.edu                }
4274999Sgblack@eecs.umich.edu                dcache_access = true;
4284999Sgblack@eecs.umich.edu                assert(!pkt.isError());
4294999Sgblack@eecs.umich.edu
4304999Sgblack@eecs.umich.edu                if (req->isSwap()) {
4314999Sgblack@eecs.umich.edu                    assert(res);
4327520Sgblack@eecs.umich.edu                    memcpy(res, pkt.getPtr<uint8_t>(), fullSize);
4334999Sgblack@eecs.umich.edu                }
4344999Sgblack@eecs.umich.edu            }
4354999Sgblack@eecs.umich.edu
4364999Sgblack@eecs.umich.edu            if (res && !req->isSwap()) {
4374999Sgblack@eecs.umich.edu                *res = req->getExtraData();
4384878Sstever@eecs.umich.edu            }
4394040Ssaidi@eecs.umich.edu        }
4404040Ssaidi@eecs.umich.edu
4414999Sgblack@eecs.umich.edu        //If there's a fault or we don't need to access a second cache line,
4424999Sgblack@eecs.umich.edu        //stop now.
4434999Sgblack@eecs.umich.edu        if (fault != NoFault || secondAddr <= addr)
4444999Sgblack@eecs.umich.edu        {
4456078Sgblack@eecs.umich.edu            if (req->isLocked() && fault == NoFault) {
4466078Sgblack@eecs.umich.edu                assert(locked);
4476078Sgblack@eecs.umich.edu                locked = false;
4486078Sgblack@eecs.umich.edu            }
4496739Sgblack@eecs.umich.edu            if (fault != NoFault && req->isPrefetch()) {
4506739Sgblack@eecs.umich.edu                return NoFault;
4516739Sgblack@eecs.umich.edu            } else {
4526739Sgblack@eecs.umich.edu                return fault;
4536739Sgblack@eecs.umich.edu            }
4543170Sstever@eecs.umich.edu        }
4553170Sstever@eecs.umich.edu
4564999Sgblack@eecs.umich.edu        /*
4574999Sgblack@eecs.umich.edu         * Set up for accessing the second cache line.
4584999Sgblack@eecs.umich.edu         */
4594999Sgblack@eecs.umich.edu
4604999Sgblack@eecs.umich.edu        //Move the pointer we're reading into to the correct location.
4617520Sgblack@eecs.umich.edu        data += size;
4624999Sgblack@eecs.umich.edu        //Adjust the size to get the remaining bytes.
4637520Sgblack@eecs.umich.edu        size = addr + fullSize - secondAddr;
4644999Sgblack@eecs.umich.edu        //And access the right address.
4654999Sgblack@eecs.umich.edu        addr = secondAddr;
4662623SN/A    }
4672623SN/A}
4682623SN/A
4692623SN/A
4702623SN/Avoid
4712623SN/AAtomicSimpleCPU::tick()
4722623SN/A{
4734940Snate@binkert.org    DPRINTF(SimpleCPU, "Tick\n");
4744940Snate@binkert.org
4755487Snate@binkert.org    Tick latency = 0;
4762623SN/A
4776078Sgblack@eecs.umich.edu    for (int i = 0; i < width || locked; ++i) {
4782623SN/A        numCycles++;
4792623SN/A
4803387Sgblack@eecs.umich.edu        if (!curStaticInst || !curStaticInst->isDelayedCommit())
4813387Sgblack@eecs.umich.edu            checkForInterrupts();
4822626SN/A
4835348Ssaidi@eecs.umich.edu        checkPcEventQueue();
4848143SAli.Saidi@ARM.com        // We must have just got suspended by a PC event
4859443SAndreas.Sandberg@ARM.com        if (_status == Idle) {
4869443SAndreas.Sandberg@ARM.com            tryCompleteDrain();
4878143SAli.Saidi@ARM.com            return;
4889443SAndreas.Sandberg@ARM.com        }
4895348Ssaidi@eecs.umich.edu
4905669Sgblack@eecs.umich.edu        Fault fault = NoFault;
4915669Sgblack@eecs.umich.edu
4927720Sgblack@eecs.umich.edu        TheISA::PCState pcState = thread->pcState();
4937720Sgblack@eecs.umich.edu
4947720Sgblack@eecs.umich.edu        bool needToFetch = !isRomMicroPC(pcState.microPC()) &&
4957720Sgblack@eecs.umich.edu                           !curMacroStaticInst;
4967720Sgblack@eecs.umich.edu        if (needToFetch) {
49710024Sdam.sunwoo@arm.com            ifetch_req.taskId(taskId());
4985894Sgblack@eecs.umich.edu            setupFetchRequest(&ifetch_req);
4996023Snate@binkert.org            fault = thread->itb->translateAtomic(&ifetch_req, tc,
5006023Snate@binkert.org                                                 BaseTLB::Execute);
5015894Sgblack@eecs.umich.edu        }
5022623SN/A
5032623SN/A        if (fault == NoFault) {
5044182Sgblack@eecs.umich.edu            Tick icache_latency = 0;
5054182Sgblack@eecs.umich.edu            bool icache_access = false;
5064182Sgblack@eecs.umich.edu            dcache_access = false; // assume no dcache access
5072662Sstever@eecs.umich.edu
5087720Sgblack@eecs.umich.edu            if (needToFetch) {
5099023Sgblack@eecs.umich.edu                // This is commented out because the decoder would act like
5105694Sgblack@eecs.umich.edu                // a tiny cache otherwise. It wouldn't be flushed when needed
5115694Sgblack@eecs.umich.edu                // like the I cache. It should be flushed, and when that works
5125694Sgblack@eecs.umich.edu                // this code should be uncommented.
5135669Sgblack@eecs.umich.edu                //Fetch more instruction memory if necessary
5149023Sgblack@eecs.umich.edu                //if(decoder.needMoreBytes())
5155669Sgblack@eecs.umich.edu                //{
5165669Sgblack@eecs.umich.edu                    icache_access = true;
5178949Sandreas.hansson@arm.com                    Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq);
5185669Sgblack@eecs.umich.edu                    ifetch_pkt.dataStatic(&inst);
5192623SN/A
5208931Sandreas.hansson@arm.com                    if (fastmem && system->isMemAddr(ifetch_pkt.getAddr()))
5218931Sandreas.hansson@arm.com                        system->getPhysMem().access(&ifetch_pkt);
5225669Sgblack@eecs.umich.edu                    else
5235669Sgblack@eecs.umich.edu                        icache_latency = icachePort.sendAtomic(&ifetch_pkt);
5244968Sacolyte@umich.edu
5255669Sgblack@eecs.umich.edu                    assert(!ifetch_pkt.isError());
5264968Sacolyte@umich.edu
5275669Sgblack@eecs.umich.edu                    // ifetch_req is initialized to read the instruction directly
5285669Sgblack@eecs.umich.edu                    // into the CPU object's inst field.
5295669Sgblack@eecs.umich.edu                //}
5305669Sgblack@eecs.umich.edu            }
5314182Sgblack@eecs.umich.edu
5322623SN/A            preExecute();
5333814Ssaidi@eecs.umich.edu
5345001Sgblack@eecs.umich.edu            if (curStaticInst) {
5354182Sgblack@eecs.umich.edu                fault = curStaticInst->execute(this, traceData);
5364998Sgblack@eecs.umich.edu
5374998Sgblack@eecs.umich.edu                // keep an instruction count
5384998Sgblack@eecs.umich.edu                if (fault == NoFault)
5394998Sgblack@eecs.umich.edu                    countInst();
5407655Sali.saidi@arm.com                else if (traceData && !DTRACE(ExecFaulting)) {
5415001Sgblack@eecs.umich.edu                    delete traceData;
5425001Sgblack@eecs.umich.edu                    traceData = NULL;
5435001Sgblack@eecs.umich.edu                }
5444998Sgblack@eecs.umich.edu
5454182Sgblack@eecs.umich.edu                postExecute();
5464182Sgblack@eecs.umich.edu            }
5472623SN/A
5483814Ssaidi@eecs.umich.edu            // @todo remove me after debugging with legion done
5494539Sgblack@eecs.umich.edu            if (curStaticInst && (!curStaticInst->isMicroop() ||
5504539Sgblack@eecs.umich.edu                        curStaticInst->isFirstMicroop()))
5513814Ssaidi@eecs.umich.edu                instCnt++;
5523814Ssaidi@eecs.umich.edu
5539647Sdam.sunwoo@arm.com            // profile for SimPoints if enabled and macro inst is finished
5549647Sdam.sunwoo@arm.com            if (simpoint && curStaticInst && (fault == NoFault) &&
5559647Sdam.sunwoo@arm.com                    (!curStaticInst->isMicroop() ||
5569647Sdam.sunwoo@arm.com                     curStaticInst->isLastMicroop())) {
5579647Sdam.sunwoo@arm.com                profileSimPoint();
5589647Sdam.sunwoo@arm.com            }
5599647Sdam.sunwoo@arm.com
5605487Snate@binkert.org            Tick stall_ticks = 0;
5615487Snate@binkert.org            if (simulate_inst_stalls && icache_access)
5625487Snate@binkert.org                stall_ticks += icache_latency;
5635487Snate@binkert.org
5645487Snate@binkert.org            if (simulate_data_stalls && dcache_access)
5655487Snate@binkert.org                stall_ticks += dcache_latency;
5665487Snate@binkert.org
5675487Snate@binkert.org            if (stall_ticks) {
5689180Sandreas.hansson@arm.com                // the atomic cpu does its accounting in ticks, so
5699180Sandreas.hansson@arm.com                // keep counting in ticks but round to the clock
5709180Sandreas.hansson@arm.com                // period
5719180Sandreas.hansson@arm.com                latency += divCeil(stall_ticks, clockPeriod()) *
5729180Sandreas.hansson@arm.com                    clockPeriod();
5732623SN/A            }
5742623SN/A
5752623SN/A        }
5764377Sgblack@eecs.umich.edu        if(fault != NoFault || !stayAtPC)
5774182Sgblack@eecs.umich.edu            advancePC(fault);
5782623SN/A    }
5792623SN/A
5809443SAndreas.Sandberg@ARM.com    if (tryCompleteDrain())
5819443SAndreas.Sandberg@ARM.com        return;
5829443SAndreas.Sandberg@ARM.com
5835487Snate@binkert.org    // instruction takes at least one cycle
5849179Sandreas.hansson@arm.com    if (latency < clockPeriod())
5859179Sandreas.hansson@arm.com        latency = clockPeriod();
5865487Snate@binkert.org
5872626SN/A    if (_status != Idle)
5887823Ssteve.reinhardt@amd.com        schedule(tickEvent, curTick() + latency);
5892623SN/A}
5902623SN/A
5912623SN/A
5925315Sstever@gmail.comvoid
5935315Sstever@gmail.comAtomicSimpleCPU::printAddr(Addr a)
5945315Sstever@gmail.com{
5955315Sstever@gmail.com    dcachePort.printAddr(a);
5965315Sstever@gmail.com}
5975315Sstever@gmail.com
5989647Sdam.sunwoo@arm.comvoid
5999647Sdam.sunwoo@arm.comAtomicSimpleCPU::profileSimPoint()
6009647Sdam.sunwoo@arm.com{
6019647Sdam.sunwoo@arm.com    if (!currentBBVInstCount)
6029647Sdam.sunwoo@arm.com        currentBBV.first = thread->pcState().instAddr();
6039647Sdam.sunwoo@arm.com
6049647Sdam.sunwoo@arm.com    ++intervalCount;
6059647Sdam.sunwoo@arm.com    ++currentBBVInstCount;
6069647Sdam.sunwoo@arm.com
6079647Sdam.sunwoo@arm.com    // If inst is control inst, assume end of basic block.
6089647Sdam.sunwoo@arm.com    if (curStaticInst->isControl()) {
6099647Sdam.sunwoo@arm.com        currentBBV.second = thread->pcState().instAddr();
6109647Sdam.sunwoo@arm.com
6119647Sdam.sunwoo@arm.com        auto map_itr = bbMap.find(currentBBV);
6129647Sdam.sunwoo@arm.com        if (map_itr == bbMap.end()){
6139647Sdam.sunwoo@arm.com            // If a new (previously unseen) basic block is found,
6149647Sdam.sunwoo@arm.com            // add a new unique id, record num of insts and insert into bbMap.
6159647Sdam.sunwoo@arm.com            BBInfo info;
6169647Sdam.sunwoo@arm.com            info.id = bbMap.size() + 1;
6179647Sdam.sunwoo@arm.com            info.insts = currentBBVInstCount;
6189647Sdam.sunwoo@arm.com            info.count = currentBBVInstCount;
6199647Sdam.sunwoo@arm.com            bbMap.insert(std::make_pair(currentBBV, info));
6209647Sdam.sunwoo@arm.com        } else {
6219647Sdam.sunwoo@arm.com            // If basic block is seen before, just increment the count by the
6229647Sdam.sunwoo@arm.com            // number of insts in basic block.
6239647Sdam.sunwoo@arm.com            BBInfo& info = map_itr->second;
6249647Sdam.sunwoo@arm.com            info.count += currentBBVInstCount;
6259647Sdam.sunwoo@arm.com        }
6269647Sdam.sunwoo@arm.com        currentBBVInstCount = 0;
6279647Sdam.sunwoo@arm.com
6289647Sdam.sunwoo@arm.com        // Reached end of interval if the sum of the current inst count
6299647Sdam.sunwoo@arm.com        // (intervalCount) and the excessive inst count from the previous
6309647Sdam.sunwoo@arm.com        // interval (intervalDrift) is greater than/equal to the interval size.
6319647Sdam.sunwoo@arm.com        if (intervalCount + intervalDrift >= intervalSize) {
6329647Sdam.sunwoo@arm.com            // summarize interval and display BBV info
6339647Sdam.sunwoo@arm.com            std::vector<pair<uint64_t, uint64_t> > counts;
6349647Sdam.sunwoo@arm.com            for (auto map_itr = bbMap.begin(); map_itr != bbMap.end();
6359647Sdam.sunwoo@arm.com                    ++map_itr) {
6369647Sdam.sunwoo@arm.com                BBInfo& info = map_itr->second;
6379647Sdam.sunwoo@arm.com                if (info.count != 0) {
6389647Sdam.sunwoo@arm.com                    counts.push_back(std::make_pair(info.id, info.count));
6399647Sdam.sunwoo@arm.com                    info.count = 0;
6409647Sdam.sunwoo@arm.com                }
6419647Sdam.sunwoo@arm.com            }
6429647Sdam.sunwoo@arm.com            std::sort(counts.begin(), counts.end());
6439647Sdam.sunwoo@arm.com
6449647Sdam.sunwoo@arm.com            // Print output BBV info
6459647Sdam.sunwoo@arm.com            *simpointStream << "T";
6469647Sdam.sunwoo@arm.com            for (auto cnt_itr = counts.begin(); cnt_itr != counts.end();
6479647Sdam.sunwoo@arm.com                    ++cnt_itr) {
6489647Sdam.sunwoo@arm.com                *simpointStream << ":" << cnt_itr->first
6499647Sdam.sunwoo@arm.com                                << ":" << cnt_itr->second << " ";
6509647Sdam.sunwoo@arm.com            }
6519647Sdam.sunwoo@arm.com            *simpointStream << "\n";
6529647Sdam.sunwoo@arm.com
6539647Sdam.sunwoo@arm.com            intervalDrift = (intervalCount + intervalDrift) - intervalSize;
6549647Sdam.sunwoo@arm.com            intervalCount = 0;
6559647Sdam.sunwoo@arm.com        }
6569647Sdam.sunwoo@arm.com    }
6579647Sdam.sunwoo@arm.com}
6585315Sstever@gmail.com
6592623SN/A////////////////////////////////////////////////////////////////////////
6602623SN/A//
6612623SN/A//  AtomicSimpleCPU Simulation Object
6622623SN/A//
6634762Snate@binkert.orgAtomicSimpleCPU *
6644762Snate@binkert.orgAtomicSimpleCPUParams::create()
6652623SN/A{
6665529Snate@binkert.org    numThreads = 1;
6678779Sgblack@eecs.umich.edu    if (!FullSystem && workload.size() != 1)
6684762Snate@binkert.org        panic("only one workload allowed");
6695529Snate@binkert.org    return new AtomicSimpleCPU(this);
6702623SN/A}
671