atomic.cc revision 10030
12623SN/A/*
210030SAli.Saidi@ARM.com * Copyright (c) 2012-2013 ARM Limited
38926Sandreas.hansson@arm.com * All rights reserved.
48926Sandreas.hansson@arm.com *
58926Sandreas.hansson@arm.com * The license below extends only to copyright in the software and shall
68926Sandreas.hansson@arm.com * not be construed as granting a license to any other intellectual
78926Sandreas.hansson@arm.com * property including but not limited to intellectual property relating
88926Sandreas.hansson@arm.com * to a hardware implementation of the functionality of the software
98926Sandreas.hansson@arm.com * licensed hereunder.  You may use the software subject to the license
108926Sandreas.hansson@arm.com * terms below provided that you ensure that this notice is replicated
118926Sandreas.hansson@arm.com * unmodified and in its entirety in all distributions of the software,
128926Sandreas.hansson@arm.com * modified or unmodified, in source code or in binary form.
138926Sandreas.hansson@arm.com *
142623SN/A * Copyright (c) 2002-2005 The Regents of The University of Michigan
152623SN/A * All rights reserved.
162623SN/A *
172623SN/A * Redistribution and use in source and binary forms, with or without
182623SN/A * modification, are permitted provided that the following conditions are
192623SN/A * met: redistributions of source code must retain the above copyright
202623SN/A * notice, this list of conditions and the following disclaimer;
212623SN/A * redistributions in binary form must reproduce the above copyright
222623SN/A * notice, this list of conditions and the following disclaimer in the
232623SN/A * documentation and/or other materials provided with the distribution;
242623SN/A * neither the name of the copyright holders nor the names of its
252623SN/A * contributors may be used to endorse or promote products derived from
262623SN/A * this software without specific prior written permission.
272623SN/A *
282623SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
292623SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
302623SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
312623SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
322623SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
332623SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
342623SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
352623SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
362623SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
372623SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
382623SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
392665Ssaidi@eecs.umich.edu *
402665Ssaidi@eecs.umich.edu * Authors: Steve Reinhardt
412623SN/A */
422623SN/A
433170Sstever@eecs.umich.edu#include "arch/locked_mem.hh"
448105Sgblack@eecs.umich.edu#include "arch/mmapped_ipr.hh"
452623SN/A#include "arch/utility.hh"
464040Ssaidi@eecs.umich.edu#include "base/bigint.hh"
479647Sdam.sunwoo@arm.com#include "base/output.hh"
486658Snate@binkert.org#include "config/the_isa.hh"
498229Snate@binkert.org#include "cpu/simple/atomic.hh"
502623SN/A#include "cpu/exetrace.hh"
519443SAndreas.Sandberg@ARM.com#include "debug/Drain.hh"
528232Snate@binkert.org#include "debug/ExecFaulting.hh"
538232Snate@binkert.org#include "debug/SimpleCPU.hh"
543348Sbinkertn@umich.edu#include "mem/packet.hh"
553348Sbinkertn@umich.edu#include "mem/packet_access.hh"
568926Sandreas.hansson@arm.com#include "mem/physical.hh"
574762Snate@binkert.org#include "params/AtomicSimpleCPU.hh"
587678Sgblack@eecs.umich.edu#include "sim/faults.hh"
592901Ssaidi@eecs.umich.edu#include "sim/system.hh"
608779Sgblack@eecs.umich.edu#include "sim/full_system.hh"
612623SN/A
622623SN/Ausing namespace std;
632623SN/Ausing namespace TheISA;
642623SN/A
652623SN/AAtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c)
665606Snate@binkert.org    : Event(CPU_Tick_Pri), cpu(c)
672623SN/A{
682623SN/A}
692623SN/A
702623SN/A
712623SN/Avoid
722623SN/AAtomicSimpleCPU::TickEvent::process()
732623SN/A{
742623SN/A    cpu->tick();
752623SN/A}
762623SN/A
772623SN/Aconst char *
785336Shines@cs.fsu.eduAtomicSimpleCPU::TickEvent::description() const
792623SN/A{
804873Sstever@eecs.umich.edu    return "AtomicSimpleCPU tick";
812623SN/A}
822623SN/A
832623SN/Avoid
842623SN/AAtomicSimpleCPU::init()
852623SN/A{
862623SN/A    BaseCPU::init();
878921Sandreas.hansson@arm.com
888921Sandreas.hansson@arm.com    // Initialise the ThreadContext's memory proxies
898921Sandreas.hansson@arm.com    tcBase()->initMemProxies(tcBase());
908921Sandreas.hansson@arm.com
919433SAndreas.Sandberg@ARM.com    if (FullSystem && !params()->switched_out) {
928779Sgblack@eecs.umich.edu        ThreadID size = threadContexts.size();
938779Sgblack@eecs.umich.edu        for (ThreadID i = 0; i < size; ++i) {
948779Sgblack@eecs.umich.edu            ThreadContext *tc = threadContexts[i];
958779Sgblack@eecs.umich.edu            // initialize CPU, including PC
968779Sgblack@eecs.umich.edu            TheISA::initCPU(tc, tc->contextId());
978779Sgblack@eecs.umich.edu        }
982623SN/A    }
998706Sandreas.hansson@arm.com
1005714Shsul@eecs.umich.edu    // Atomic doesn't do MT right now, so contextId == threadId
1015712Shsul@eecs.umich.edu    ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT
1025712Shsul@eecs.umich.edu    data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too
1035712Shsul@eecs.umich.edu    data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too
1042623SN/A}
1052623SN/A
1065529Snate@binkert.orgAtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p)
1076078Sgblack@eecs.umich.edu    : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false),
1085487Snate@binkert.org      simulate_data_stalls(p->simulate_data_stalls),
1095487Snate@binkert.org      simulate_inst_stalls(p->simulate_inst_stalls),
1109443SAndreas.Sandberg@ARM.com      drain_manager(NULL),
1119095Sandreas.hansson@arm.com      icachePort(name() + ".icache_port", this),
1129095Sandreas.hansson@arm.com      dcachePort(name() + ".dcache_port", this),
1139647Sdam.sunwoo@arm.com      fastmem(p->fastmem),
1149647Sdam.sunwoo@arm.com      simpoint(p->simpoint_profile),
1159647Sdam.sunwoo@arm.com      intervalSize(p->simpoint_interval),
1169647Sdam.sunwoo@arm.com      intervalCount(0),
1179647Sdam.sunwoo@arm.com      intervalDrift(0),
1189647Sdam.sunwoo@arm.com      simpointStream(NULL),
1199647Sdam.sunwoo@arm.com      currentBBV(0, 0),
1209647Sdam.sunwoo@arm.com      currentBBVInstCount(0)
1212623SN/A{
1222623SN/A    _status = Idle;
1239647Sdam.sunwoo@arm.com
1249647Sdam.sunwoo@arm.com    if (simpoint) {
1259647Sdam.sunwoo@arm.com        simpointStream = simout.create(p->simpoint_profile_file, false);
1269647Sdam.sunwoo@arm.com    }
1272623SN/A}
1282623SN/A
1292623SN/A
1302623SN/AAtomicSimpleCPU::~AtomicSimpleCPU()
1312623SN/A{
1326775SBrad.Beckmann@amd.com    if (tickEvent.scheduled()) {
1336775SBrad.Beckmann@amd.com        deschedule(tickEvent);
1346775SBrad.Beckmann@amd.com    }
1359647Sdam.sunwoo@arm.com    if (simpointStream) {
1369647Sdam.sunwoo@arm.com        simout.close(simpointStream);
1379647Sdam.sunwoo@arm.com    }
1382623SN/A}
1392623SN/A
1409443SAndreas.Sandberg@ARM.comunsigned int
1419443SAndreas.Sandberg@ARM.comAtomicSimpleCPU::drain(DrainManager *dm)
1422623SN/A{
1439443SAndreas.Sandberg@ARM.com    assert(!drain_manager);
1449448SAndreas.Sandberg@ARM.com    if (switchedOut())
1459443SAndreas.Sandberg@ARM.com        return 0;
1462623SN/A
1479443SAndreas.Sandberg@ARM.com    if (!isDrained()) {
1489443SAndreas.Sandberg@ARM.com        DPRINTF(Drain, "Requesting drain: %s\n", pcState());
1499443SAndreas.Sandberg@ARM.com        drain_manager = dm;
1509443SAndreas.Sandberg@ARM.com        return 1;
1519443SAndreas.Sandberg@ARM.com    } else {
1529443SAndreas.Sandberg@ARM.com        if (tickEvent.scheduled())
1539443SAndreas.Sandberg@ARM.com            deschedule(tickEvent);
1542915Sktlim@umich.edu
1559443SAndreas.Sandberg@ARM.com        DPRINTF(Drain, "Not executing microcode, no need to drain.\n");
1569443SAndreas.Sandberg@ARM.com        return 0;
1579443SAndreas.Sandberg@ARM.com    }
1589342SAndreas.Sandberg@arm.com}
1599342SAndreas.Sandberg@arm.com
1602915Sktlim@umich.eduvoid
1619342SAndreas.Sandberg@arm.comAtomicSimpleCPU::drainResume()
1622915Sktlim@umich.edu{
1639448SAndreas.Sandberg@ARM.com    assert(!tickEvent.scheduled());
1649443SAndreas.Sandberg@ARM.com    assert(!drain_manager);
1659448SAndreas.Sandberg@ARM.com    if (switchedOut())
1665220Ssaidi@eecs.umich.edu        return;
1675220Ssaidi@eecs.umich.edu
1684940Snate@binkert.org    DPRINTF(SimpleCPU, "Resume\n");
1699523SAndreas.Sandberg@ARM.com    verifyMemoryMode();
1703324Shsul@eecs.umich.edu
1719448SAndreas.Sandberg@ARM.com    assert(!threadContexts.empty());
1729448SAndreas.Sandberg@ARM.com    if (threadContexts.size() > 1)
1739448SAndreas.Sandberg@ARM.com        fatal("The atomic CPU only supports one thread.\n");
1749448SAndreas.Sandberg@ARM.com
1759448SAndreas.Sandberg@ARM.com    if (thread->status() == ThreadContext::Active) {
1769443SAndreas.Sandberg@ARM.com        schedule(tickEvent, nextCycle());
1779448SAndreas.Sandberg@ARM.com        _status = BaseSimpleCPU::Running;
1789837Slena@cs.wisc,edu        notIdleFraction = 1;
1799448SAndreas.Sandberg@ARM.com    } else {
1809448SAndreas.Sandberg@ARM.com        _status = BaseSimpleCPU::Idle;
1819837Slena@cs.wisc,edu        notIdleFraction = 0;
1829448SAndreas.Sandberg@ARM.com    }
1839443SAndreas.Sandberg@ARM.com
1847897Shestness@cs.utexas.edu    system->totalNumInsts = 0;
1852623SN/A}
1862623SN/A
1879443SAndreas.Sandberg@ARM.combool
1889443SAndreas.Sandberg@ARM.comAtomicSimpleCPU::tryCompleteDrain()
1899443SAndreas.Sandberg@ARM.com{
1909443SAndreas.Sandberg@ARM.com    if (!drain_manager)
1919443SAndreas.Sandberg@ARM.com        return false;
1929443SAndreas.Sandberg@ARM.com
1939443SAndreas.Sandberg@ARM.com    DPRINTF(Drain, "tryCompleteDrain: %s\n", pcState());
1949443SAndreas.Sandberg@ARM.com    if (!isDrained())
1959443SAndreas.Sandberg@ARM.com        return false;
1969443SAndreas.Sandberg@ARM.com
1979443SAndreas.Sandberg@ARM.com    DPRINTF(Drain, "CPU done draining, processing drain event\n");
1989443SAndreas.Sandberg@ARM.com    drain_manager->signalDrainDone();
1999443SAndreas.Sandberg@ARM.com    drain_manager = NULL;
2009443SAndreas.Sandberg@ARM.com
2019443SAndreas.Sandberg@ARM.com    return true;
2029443SAndreas.Sandberg@ARM.com}
2039443SAndreas.Sandberg@ARM.com
2049443SAndreas.Sandberg@ARM.com
2052623SN/Avoid
2062798Sktlim@umich.eduAtomicSimpleCPU::switchOut()
2072623SN/A{
2089429SAndreas.Sandberg@ARM.com    BaseSimpleCPU::switchOut();
2099429SAndreas.Sandberg@ARM.com
2109443SAndreas.Sandberg@ARM.com    assert(!tickEvent.scheduled());
2119342SAndreas.Sandberg@arm.com    assert(_status == BaseSimpleCPU::Running || _status == Idle);
2129443SAndreas.Sandberg@ARM.com    assert(isDrained());
2132623SN/A}
2142623SN/A
2152623SN/A
2162623SN/Avoid
2172623SN/AAtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
2182623SN/A{
2199429SAndreas.Sandberg@ARM.com    BaseSimpleCPU::takeOverFrom(oldCPU);
2202623SN/A
2219443SAndreas.Sandberg@ARM.com    // The tick event should have been descheduled by drain()
2222623SN/A    assert(!tickEvent.scheduled());
2232623SN/A
2245712Shsul@eecs.umich.edu    ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT
2255712Shsul@eecs.umich.edu    data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too
2265712Shsul@eecs.umich.edu    data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too
2272623SN/A}
2282623SN/A
2299523SAndreas.Sandberg@ARM.comvoid
2309523SAndreas.Sandberg@ARM.comAtomicSimpleCPU::verifyMemoryMode() const
2319523SAndreas.Sandberg@ARM.com{
2329524SAndreas.Sandberg@ARM.com    if (!system->isAtomicMode()) {
2339523SAndreas.Sandberg@ARM.com        fatal("The atomic CPU requires the memory system to be in "
2349523SAndreas.Sandberg@ARM.com              "'atomic' mode.\n");
2359523SAndreas.Sandberg@ARM.com    }
2369523SAndreas.Sandberg@ARM.com}
2372623SN/A
2382623SN/Avoid
2399180Sandreas.hansson@arm.comAtomicSimpleCPU::activateContext(ThreadID thread_num, Cycles delay)
2402623SN/A{
2414940Snate@binkert.org    DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
2424940Snate@binkert.org
2432623SN/A    assert(thread_num == 0);
2442683Sktlim@umich.edu    assert(thread);
2452623SN/A
2462623SN/A    assert(_status == Idle);
2472623SN/A    assert(!tickEvent.scheduled());
2482623SN/A
2499837Slena@cs.wisc,edu    notIdleFraction = 1;
2509180Sandreas.hansson@arm.com    numCycles += ticksToCycles(thread->lastActivate - thread->lastSuspend);
2513686Sktlim@umich.edu
2523430Sgblack@eecs.umich.edu    //Make sure ticks are still on multiples of cycles
2539179Sandreas.hansson@arm.com    schedule(tickEvent, clockEdge(delay));
2549342SAndreas.Sandberg@arm.com    _status = BaseSimpleCPU::Running;
2552623SN/A}
2562623SN/A
2572623SN/A
2582623SN/Avoid
2598737Skoansin.tan@gmail.comAtomicSimpleCPU::suspendContext(ThreadID thread_num)
2602623SN/A{
2614940Snate@binkert.org    DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
2624940Snate@binkert.org
2632623SN/A    assert(thread_num == 0);
2642683Sktlim@umich.edu    assert(thread);
2652623SN/A
2666043Sgblack@eecs.umich.edu    if (_status == Idle)
2676043Sgblack@eecs.umich.edu        return;
2686043Sgblack@eecs.umich.edu
2699342SAndreas.Sandberg@arm.com    assert(_status == BaseSimpleCPU::Running);
2702626SN/A
2712626SN/A    // tick event may not be scheduled if this gets called from inside
2722626SN/A    // an instruction's execution, e.g. "quiesce"
2732626SN/A    if (tickEvent.scheduled())
2745606Snate@binkert.org        deschedule(tickEvent);
2752623SN/A
2769837Slena@cs.wisc,edu    notIdleFraction = 0;
2772623SN/A    _status = Idle;
2782623SN/A}
2792623SN/A
2802623SN/A
28110030SAli.Saidi@ARM.comTick
28210030SAli.Saidi@ARM.comAtomicSimpleCPU::AtomicCPUDPort::recvAtomicSnoop(PacketPtr pkt)
28310030SAli.Saidi@ARM.com{
28410030SAli.Saidi@ARM.com    DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(),
28510030SAli.Saidi@ARM.com            pkt->cmdString());
28610030SAli.Saidi@ARM.com
28710030SAli.Saidi@ARM.com    // if snoop invalidates, release any associated locks
28810030SAli.Saidi@ARM.com    if (pkt->isInvalidate()) {
28910030SAli.Saidi@ARM.com        DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n",
29010030SAli.Saidi@ARM.com                pkt->getAddr());
29110030SAli.Saidi@ARM.com        TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask);
29210030SAli.Saidi@ARM.com    }
29310030SAli.Saidi@ARM.com
29410030SAli.Saidi@ARM.com    return 0;
29510030SAli.Saidi@ARM.com}
29610030SAli.Saidi@ARM.com
29710030SAli.Saidi@ARM.comvoid
29810030SAli.Saidi@ARM.comAtomicSimpleCPU::AtomicCPUDPort::recvFunctionalSnoop(PacketPtr pkt)
29910030SAli.Saidi@ARM.com{
30010030SAli.Saidi@ARM.com    DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(),
30110030SAli.Saidi@ARM.com            pkt->cmdString());
30210030SAli.Saidi@ARM.com
30310030SAli.Saidi@ARM.com    // if snoop invalidates, release any associated locks
30410030SAli.Saidi@ARM.com    if (pkt->isInvalidate()) {
30510030SAli.Saidi@ARM.com        DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n",
30610030SAli.Saidi@ARM.com                pkt->getAddr());
30710030SAli.Saidi@ARM.com        TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask);
30810030SAli.Saidi@ARM.com    }
30910030SAli.Saidi@ARM.com}
31010030SAli.Saidi@ARM.com
3112623SN/AFault
3128444Sgblack@eecs.umich.eduAtomicSimpleCPU::readMem(Addr addr, uint8_t * data,
3138444Sgblack@eecs.umich.edu                         unsigned size, unsigned flags)
3142623SN/A{
3153169Sstever@eecs.umich.edu    // use the CPU's statically allocated read request and packet objects
3164870Sstever@eecs.umich.edu    Request *req = &data_read_req;
3172623SN/A
3182623SN/A    if (traceData) {
3192623SN/A        traceData->setAddr(addr);
3202623SN/A    }
3212623SN/A
3224999Sgblack@eecs.umich.edu    //The size of the data we're trying to read.
3237520Sgblack@eecs.umich.edu    int fullSize = size;
3242623SN/A
3254999Sgblack@eecs.umich.edu    //The address of the second part of this access if it needs to be split
3264999Sgblack@eecs.umich.edu    //across a cache line boundary.
3279814Sandreas.hansson@arm.com    Addr secondAddr = roundDown(addr + size - 1, cacheLineSize());
3284999Sgblack@eecs.umich.edu
3297520Sgblack@eecs.umich.edu    if (secondAddr > addr)
3307520Sgblack@eecs.umich.edu        size = secondAddr - addr;
3314999Sgblack@eecs.umich.edu
3324999Sgblack@eecs.umich.edu    dcache_latency = 0;
3334999Sgblack@eecs.umich.edu
33410024Sdam.sunwoo@arm.com    req->taskId(taskId());
3357520Sgblack@eecs.umich.edu    while (1) {
3368832SAli.Saidi@ARM.com        req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr());
3374999Sgblack@eecs.umich.edu
3384999Sgblack@eecs.umich.edu        // translate to physical address
3396023Snate@binkert.org        Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read);
3404999Sgblack@eecs.umich.edu
3414999Sgblack@eecs.umich.edu        // Now do the access.
3426623Sgblack@eecs.umich.edu        if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
3434999Sgblack@eecs.umich.edu            Packet pkt = Packet(req,
3448949Sandreas.hansson@arm.com                                req->isLLSC() ? MemCmd::LoadLockedReq :
3458949Sandreas.hansson@arm.com                                MemCmd::ReadReq);
3467520Sgblack@eecs.umich.edu            pkt.dataStatic(data);
3474999Sgblack@eecs.umich.edu
3488105Sgblack@eecs.umich.edu            if (req->isMmappedIpr())
3494999Sgblack@eecs.umich.edu                dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt);
3504999Sgblack@eecs.umich.edu            else {
3518931Sandreas.hansson@arm.com                if (fastmem && system->isMemAddr(pkt.getAddr()))
3528931Sandreas.hansson@arm.com                    system->getPhysMem().access(&pkt);
3534999Sgblack@eecs.umich.edu                else
3544999Sgblack@eecs.umich.edu                    dcache_latency += dcachePort.sendAtomic(&pkt);
3554999Sgblack@eecs.umich.edu            }
3564999Sgblack@eecs.umich.edu            dcache_access = true;
3575012Sgblack@eecs.umich.edu
3584999Sgblack@eecs.umich.edu            assert(!pkt.isError());
3594999Sgblack@eecs.umich.edu
3606102Sgblack@eecs.umich.edu            if (req->isLLSC()) {
3614999Sgblack@eecs.umich.edu                TheISA::handleLockedRead(thread, req);
3624999Sgblack@eecs.umich.edu            }
3634968Sacolyte@umich.edu        }
3644986Ssaidi@eecs.umich.edu
3654999Sgblack@eecs.umich.edu        //If there's a fault, return it
3666739Sgblack@eecs.umich.edu        if (fault != NoFault) {
3676739Sgblack@eecs.umich.edu            if (req->isPrefetch()) {
3686739Sgblack@eecs.umich.edu                return NoFault;
3696739Sgblack@eecs.umich.edu            } else {
3706739Sgblack@eecs.umich.edu                return fault;
3716739Sgblack@eecs.umich.edu            }
3726739Sgblack@eecs.umich.edu        }
3736739Sgblack@eecs.umich.edu
3744999Sgblack@eecs.umich.edu        //If we don't need to access a second cache line, stop now.
3754999Sgblack@eecs.umich.edu        if (secondAddr <= addr)
3764999Sgblack@eecs.umich.edu        {
3776078Sgblack@eecs.umich.edu            if (req->isLocked() && fault == NoFault) {
3786078Sgblack@eecs.umich.edu                assert(!locked);
3796078Sgblack@eecs.umich.edu                locked = true;
3806078Sgblack@eecs.umich.edu            }
3814999Sgblack@eecs.umich.edu            return fault;
3824968Sacolyte@umich.edu        }
3833170Sstever@eecs.umich.edu
3844999Sgblack@eecs.umich.edu        /*
3854999Sgblack@eecs.umich.edu         * Set up for accessing the second cache line.
3864999Sgblack@eecs.umich.edu         */
3874999Sgblack@eecs.umich.edu
3884999Sgblack@eecs.umich.edu        //Move the pointer we're reading into to the correct location.
3897520Sgblack@eecs.umich.edu        data += size;
3904999Sgblack@eecs.umich.edu        //Adjust the size to get the remaining bytes.
3917520Sgblack@eecs.umich.edu        size = addr + fullSize - secondAddr;
3924999Sgblack@eecs.umich.edu        //And access the right address.
3934999Sgblack@eecs.umich.edu        addr = secondAddr;
3942623SN/A    }
3952623SN/A}
3962623SN/A
3977520Sgblack@eecs.umich.edu
3982623SN/AFault
3998444Sgblack@eecs.umich.eduAtomicSimpleCPU::writeMem(uint8_t *data, unsigned size,
4008444Sgblack@eecs.umich.edu                          Addr addr, unsigned flags, uint64_t *res)
4012623SN/A{
4023169Sstever@eecs.umich.edu    // use the CPU's statically allocated write request and packet objects
4034870Sstever@eecs.umich.edu    Request *req = &data_write_req;
4042623SN/A
4052623SN/A    if (traceData) {
4062623SN/A        traceData->setAddr(addr);
4072623SN/A    }
4082623SN/A
4094999Sgblack@eecs.umich.edu    //The size of the data we're trying to read.
4107520Sgblack@eecs.umich.edu    int fullSize = size;
4112623SN/A
4124999Sgblack@eecs.umich.edu    //The address of the second part of this access if it needs to be split
4134999Sgblack@eecs.umich.edu    //across a cache line boundary.
4149814Sandreas.hansson@arm.com    Addr secondAddr = roundDown(addr + size - 1, cacheLineSize());
4154999Sgblack@eecs.umich.edu
4164999Sgblack@eecs.umich.edu    if(secondAddr > addr)
4177520Sgblack@eecs.umich.edu        size = secondAddr - addr;
4184999Sgblack@eecs.umich.edu
4194999Sgblack@eecs.umich.edu    dcache_latency = 0;
4204999Sgblack@eecs.umich.edu
42110024Sdam.sunwoo@arm.com    req->taskId(taskId());
4224999Sgblack@eecs.umich.edu    while(1) {
4238832SAli.Saidi@ARM.com        req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr());
4244999Sgblack@eecs.umich.edu
4254999Sgblack@eecs.umich.edu        // translate to physical address
4266023Snate@binkert.org        Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write);
4274999Sgblack@eecs.umich.edu
4284999Sgblack@eecs.umich.edu        // Now do the access.
4294999Sgblack@eecs.umich.edu        if (fault == NoFault) {
4304999Sgblack@eecs.umich.edu            MemCmd cmd = MemCmd::WriteReq; // default
4314999Sgblack@eecs.umich.edu            bool do_access = true;  // flag to suppress cache access
4324999Sgblack@eecs.umich.edu
4336102Sgblack@eecs.umich.edu            if (req->isLLSC()) {
4344999Sgblack@eecs.umich.edu                cmd = MemCmd::StoreCondReq;
43510030SAli.Saidi@ARM.com                do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask);
4364999Sgblack@eecs.umich.edu            } else if (req->isSwap()) {
4374999Sgblack@eecs.umich.edu                cmd = MemCmd::SwapReq;
4384999Sgblack@eecs.umich.edu                if (req->isCondSwap()) {
4394999Sgblack@eecs.umich.edu                    assert(res);
4404999Sgblack@eecs.umich.edu                    req->setExtraData(*res);
4414999Sgblack@eecs.umich.edu                }
4424999Sgblack@eecs.umich.edu            }
4434999Sgblack@eecs.umich.edu
4446623Sgblack@eecs.umich.edu            if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) {
4458949Sandreas.hansson@arm.com                Packet pkt = Packet(req, cmd);
4467520Sgblack@eecs.umich.edu                pkt.dataStatic(data);
4474999Sgblack@eecs.umich.edu
4488105Sgblack@eecs.umich.edu                if (req->isMmappedIpr()) {
4494999Sgblack@eecs.umich.edu                    dcache_latency +=
4504999Sgblack@eecs.umich.edu                        TheISA::handleIprWrite(thread->getTC(), &pkt);
4514999Sgblack@eecs.umich.edu                } else {
4528931Sandreas.hansson@arm.com                    if (fastmem && system->isMemAddr(pkt.getAddr()))
4538931Sandreas.hansson@arm.com                        system->getPhysMem().access(&pkt);
4544999Sgblack@eecs.umich.edu                    else
4554999Sgblack@eecs.umich.edu                        dcache_latency += dcachePort.sendAtomic(&pkt);
4564999Sgblack@eecs.umich.edu                }
4574999Sgblack@eecs.umich.edu                dcache_access = true;
4584999Sgblack@eecs.umich.edu                assert(!pkt.isError());
4594999Sgblack@eecs.umich.edu
4604999Sgblack@eecs.umich.edu                if (req->isSwap()) {
4614999Sgblack@eecs.umich.edu                    assert(res);
4627520Sgblack@eecs.umich.edu                    memcpy(res, pkt.getPtr<uint8_t>(), fullSize);
4634999Sgblack@eecs.umich.edu                }
4644999Sgblack@eecs.umich.edu            }
4654999Sgblack@eecs.umich.edu
4664999Sgblack@eecs.umich.edu            if (res && !req->isSwap()) {
4674999Sgblack@eecs.umich.edu                *res = req->getExtraData();
4684878Sstever@eecs.umich.edu            }
4694040Ssaidi@eecs.umich.edu        }
4704040Ssaidi@eecs.umich.edu
4714999Sgblack@eecs.umich.edu        //If there's a fault or we don't need to access a second cache line,
4724999Sgblack@eecs.umich.edu        //stop now.
4734999Sgblack@eecs.umich.edu        if (fault != NoFault || secondAddr <= addr)
4744999Sgblack@eecs.umich.edu        {
4756078Sgblack@eecs.umich.edu            if (req->isLocked() && fault == NoFault) {
4766078Sgblack@eecs.umich.edu                assert(locked);
4776078Sgblack@eecs.umich.edu                locked = false;
4786078Sgblack@eecs.umich.edu            }
4796739Sgblack@eecs.umich.edu            if (fault != NoFault && req->isPrefetch()) {
4806739Sgblack@eecs.umich.edu                return NoFault;
4816739Sgblack@eecs.umich.edu            } else {
4826739Sgblack@eecs.umich.edu                return fault;
4836739Sgblack@eecs.umich.edu            }
4843170Sstever@eecs.umich.edu        }
4853170Sstever@eecs.umich.edu
4864999Sgblack@eecs.umich.edu        /*
4874999Sgblack@eecs.umich.edu         * Set up for accessing the second cache line.
4884999Sgblack@eecs.umich.edu         */
4894999Sgblack@eecs.umich.edu
4904999Sgblack@eecs.umich.edu        //Move the pointer we're reading into to the correct location.
4917520Sgblack@eecs.umich.edu        data += size;
4924999Sgblack@eecs.umich.edu        //Adjust the size to get the remaining bytes.
4937520Sgblack@eecs.umich.edu        size = addr + fullSize - secondAddr;
4944999Sgblack@eecs.umich.edu        //And access the right address.
4954999Sgblack@eecs.umich.edu        addr = secondAddr;
4962623SN/A    }
4972623SN/A}
4982623SN/A
4992623SN/A
5002623SN/Avoid
5012623SN/AAtomicSimpleCPU::tick()
5022623SN/A{
5034940Snate@binkert.org    DPRINTF(SimpleCPU, "Tick\n");
5044940Snate@binkert.org
5055487Snate@binkert.org    Tick latency = 0;
5062623SN/A
5076078Sgblack@eecs.umich.edu    for (int i = 0; i < width || locked; ++i) {
5082623SN/A        numCycles++;
5092623SN/A
5103387Sgblack@eecs.umich.edu        if (!curStaticInst || !curStaticInst->isDelayedCommit())
5113387Sgblack@eecs.umich.edu            checkForInterrupts();
5122626SN/A
5135348Ssaidi@eecs.umich.edu        checkPcEventQueue();
5148143SAli.Saidi@ARM.com        // We must have just got suspended by a PC event
5159443SAndreas.Sandberg@ARM.com        if (_status == Idle) {
5169443SAndreas.Sandberg@ARM.com            tryCompleteDrain();
5178143SAli.Saidi@ARM.com            return;
5189443SAndreas.Sandberg@ARM.com        }
5195348Ssaidi@eecs.umich.edu
5205669Sgblack@eecs.umich.edu        Fault fault = NoFault;
5215669Sgblack@eecs.umich.edu
5227720Sgblack@eecs.umich.edu        TheISA::PCState pcState = thread->pcState();
5237720Sgblack@eecs.umich.edu
5247720Sgblack@eecs.umich.edu        bool needToFetch = !isRomMicroPC(pcState.microPC()) &&
5257720Sgblack@eecs.umich.edu                           !curMacroStaticInst;
5267720Sgblack@eecs.umich.edu        if (needToFetch) {
52710024Sdam.sunwoo@arm.com            ifetch_req.taskId(taskId());
5285894Sgblack@eecs.umich.edu            setupFetchRequest(&ifetch_req);
5296023Snate@binkert.org            fault = thread->itb->translateAtomic(&ifetch_req, tc,
5306023Snate@binkert.org                                                 BaseTLB::Execute);
5315894Sgblack@eecs.umich.edu        }
5322623SN/A
5332623SN/A        if (fault == NoFault) {
5344182Sgblack@eecs.umich.edu            Tick icache_latency = 0;
5354182Sgblack@eecs.umich.edu            bool icache_access = false;
5364182Sgblack@eecs.umich.edu            dcache_access = false; // assume no dcache access
5372662Sstever@eecs.umich.edu
5387720Sgblack@eecs.umich.edu            if (needToFetch) {
5399023Sgblack@eecs.umich.edu                // This is commented out because the decoder would act like
5405694Sgblack@eecs.umich.edu                // a tiny cache otherwise. It wouldn't be flushed when needed
5415694Sgblack@eecs.umich.edu                // like the I cache. It should be flushed, and when that works
5425694Sgblack@eecs.umich.edu                // this code should be uncommented.
5435669Sgblack@eecs.umich.edu                //Fetch more instruction memory if necessary
5449023Sgblack@eecs.umich.edu                //if(decoder.needMoreBytes())
5455669Sgblack@eecs.umich.edu                //{
5465669Sgblack@eecs.umich.edu                    icache_access = true;
5478949Sandreas.hansson@arm.com                    Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq);
5485669Sgblack@eecs.umich.edu                    ifetch_pkt.dataStatic(&inst);
5492623SN/A
5508931Sandreas.hansson@arm.com                    if (fastmem && system->isMemAddr(ifetch_pkt.getAddr()))
5518931Sandreas.hansson@arm.com                        system->getPhysMem().access(&ifetch_pkt);
5525669Sgblack@eecs.umich.edu                    else
5535669Sgblack@eecs.umich.edu                        icache_latency = icachePort.sendAtomic(&ifetch_pkt);
5544968Sacolyte@umich.edu
5555669Sgblack@eecs.umich.edu                    assert(!ifetch_pkt.isError());
5564968Sacolyte@umich.edu
5575669Sgblack@eecs.umich.edu                    // ifetch_req is initialized to read the instruction directly
5585669Sgblack@eecs.umich.edu                    // into the CPU object's inst field.
5595669Sgblack@eecs.umich.edu                //}
5605669Sgblack@eecs.umich.edu            }
5614182Sgblack@eecs.umich.edu
5622623SN/A            preExecute();
5633814Ssaidi@eecs.umich.edu
5645001Sgblack@eecs.umich.edu            if (curStaticInst) {
5654182Sgblack@eecs.umich.edu                fault = curStaticInst->execute(this, traceData);
5664998Sgblack@eecs.umich.edu
5674998Sgblack@eecs.umich.edu                // keep an instruction count
5684998Sgblack@eecs.umich.edu                if (fault == NoFault)
5694998Sgblack@eecs.umich.edu                    countInst();
5707655Sali.saidi@arm.com                else if (traceData && !DTRACE(ExecFaulting)) {
5715001Sgblack@eecs.umich.edu                    delete traceData;
5725001Sgblack@eecs.umich.edu                    traceData = NULL;
5735001Sgblack@eecs.umich.edu                }
5744998Sgblack@eecs.umich.edu
5754182Sgblack@eecs.umich.edu                postExecute();
5764182Sgblack@eecs.umich.edu            }
5772623SN/A
5783814Ssaidi@eecs.umich.edu            // @todo remove me after debugging with legion done
5794539Sgblack@eecs.umich.edu            if (curStaticInst && (!curStaticInst->isMicroop() ||
5804539Sgblack@eecs.umich.edu                        curStaticInst->isFirstMicroop()))
5813814Ssaidi@eecs.umich.edu                instCnt++;
5823814Ssaidi@eecs.umich.edu
5839647Sdam.sunwoo@arm.com            // profile for SimPoints if enabled and macro inst is finished
5849647Sdam.sunwoo@arm.com            if (simpoint && curStaticInst && (fault == NoFault) &&
5859647Sdam.sunwoo@arm.com                    (!curStaticInst->isMicroop() ||
5869647Sdam.sunwoo@arm.com                     curStaticInst->isLastMicroop())) {
5879647Sdam.sunwoo@arm.com                profileSimPoint();
5889647Sdam.sunwoo@arm.com            }
5899647Sdam.sunwoo@arm.com
5905487Snate@binkert.org            Tick stall_ticks = 0;
5915487Snate@binkert.org            if (simulate_inst_stalls && icache_access)
5925487Snate@binkert.org                stall_ticks += icache_latency;
5935487Snate@binkert.org
5945487Snate@binkert.org            if (simulate_data_stalls && dcache_access)
5955487Snate@binkert.org                stall_ticks += dcache_latency;
5965487Snate@binkert.org
5975487Snate@binkert.org            if (stall_ticks) {
5989180Sandreas.hansson@arm.com                // the atomic cpu does its accounting in ticks, so
5999180Sandreas.hansson@arm.com                // keep counting in ticks but round to the clock
6009180Sandreas.hansson@arm.com                // period
6019180Sandreas.hansson@arm.com                latency += divCeil(stall_ticks, clockPeriod()) *
6029180Sandreas.hansson@arm.com                    clockPeriod();
6032623SN/A            }
6042623SN/A
6052623SN/A        }
6064377Sgblack@eecs.umich.edu        if(fault != NoFault || !stayAtPC)
6074182Sgblack@eecs.umich.edu            advancePC(fault);
6082623SN/A    }
6092623SN/A
6109443SAndreas.Sandberg@ARM.com    if (tryCompleteDrain())
6119443SAndreas.Sandberg@ARM.com        return;
6129443SAndreas.Sandberg@ARM.com
6135487Snate@binkert.org    // instruction takes at least one cycle
6149179Sandreas.hansson@arm.com    if (latency < clockPeriod())
6159179Sandreas.hansson@arm.com        latency = clockPeriod();
6165487Snate@binkert.org
6172626SN/A    if (_status != Idle)
6187823Ssteve.reinhardt@amd.com        schedule(tickEvent, curTick() + latency);
6192623SN/A}
6202623SN/A
6212623SN/A
6225315Sstever@gmail.comvoid
6235315Sstever@gmail.comAtomicSimpleCPU::printAddr(Addr a)
6245315Sstever@gmail.com{
6255315Sstever@gmail.com    dcachePort.printAddr(a);
6265315Sstever@gmail.com}
6275315Sstever@gmail.com
6289647Sdam.sunwoo@arm.comvoid
6299647Sdam.sunwoo@arm.comAtomicSimpleCPU::profileSimPoint()
6309647Sdam.sunwoo@arm.com{
6319647Sdam.sunwoo@arm.com    if (!currentBBVInstCount)
6329647Sdam.sunwoo@arm.com        currentBBV.first = thread->pcState().instAddr();
6339647Sdam.sunwoo@arm.com
6349647Sdam.sunwoo@arm.com    ++intervalCount;
6359647Sdam.sunwoo@arm.com    ++currentBBVInstCount;
6369647Sdam.sunwoo@arm.com
6379647Sdam.sunwoo@arm.com    // If inst is control inst, assume end of basic block.
6389647Sdam.sunwoo@arm.com    if (curStaticInst->isControl()) {
6399647Sdam.sunwoo@arm.com        currentBBV.second = thread->pcState().instAddr();
6409647Sdam.sunwoo@arm.com
6419647Sdam.sunwoo@arm.com        auto map_itr = bbMap.find(currentBBV);
6429647Sdam.sunwoo@arm.com        if (map_itr == bbMap.end()){
6439647Sdam.sunwoo@arm.com            // If a new (previously unseen) basic block is found,
6449647Sdam.sunwoo@arm.com            // add a new unique id, record num of insts and insert into bbMap.
6459647Sdam.sunwoo@arm.com            BBInfo info;
6469647Sdam.sunwoo@arm.com            info.id = bbMap.size() + 1;
6479647Sdam.sunwoo@arm.com            info.insts = currentBBVInstCount;
6489647Sdam.sunwoo@arm.com            info.count = currentBBVInstCount;
6499647Sdam.sunwoo@arm.com            bbMap.insert(std::make_pair(currentBBV, info));
6509647Sdam.sunwoo@arm.com        } else {
6519647Sdam.sunwoo@arm.com            // If basic block is seen before, just increment the count by the
6529647Sdam.sunwoo@arm.com            // number of insts in basic block.
6539647Sdam.sunwoo@arm.com            BBInfo& info = map_itr->second;
6549647Sdam.sunwoo@arm.com            info.count += currentBBVInstCount;
6559647Sdam.sunwoo@arm.com        }
6569647Sdam.sunwoo@arm.com        currentBBVInstCount = 0;
6579647Sdam.sunwoo@arm.com
6589647Sdam.sunwoo@arm.com        // Reached end of interval if the sum of the current inst count
6599647Sdam.sunwoo@arm.com        // (intervalCount) and the excessive inst count from the previous
6609647Sdam.sunwoo@arm.com        // interval (intervalDrift) is greater than/equal to the interval size.
6619647Sdam.sunwoo@arm.com        if (intervalCount + intervalDrift >= intervalSize) {
6629647Sdam.sunwoo@arm.com            // summarize interval and display BBV info
6639647Sdam.sunwoo@arm.com            std::vector<pair<uint64_t, uint64_t> > counts;
6649647Sdam.sunwoo@arm.com            for (auto map_itr = bbMap.begin(); map_itr != bbMap.end();
6659647Sdam.sunwoo@arm.com                    ++map_itr) {
6669647Sdam.sunwoo@arm.com                BBInfo& info = map_itr->second;
6679647Sdam.sunwoo@arm.com                if (info.count != 0) {
6689647Sdam.sunwoo@arm.com                    counts.push_back(std::make_pair(info.id, info.count));
6699647Sdam.sunwoo@arm.com                    info.count = 0;
6709647Sdam.sunwoo@arm.com                }
6719647Sdam.sunwoo@arm.com            }
6729647Sdam.sunwoo@arm.com            std::sort(counts.begin(), counts.end());
6739647Sdam.sunwoo@arm.com
6749647Sdam.sunwoo@arm.com            // Print output BBV info
6759647Sdam.sunwoo@arm.com            *simpointStream << "T";
6769647Sdam.sunwoo@arm.com            for (auto cnt_itr = counts.begin(); cnt_itr != counts.end();
6779647Sdam.sunwoo@arm.com                    ++cnt_itr) {
6789647Sdam.sunwoo@arm.com                *simpointStream << ":" << cnt_itr->first
6799647Sdam.sunwoo@arm.com                                << ":" << cnt_itr->second << " ";
6809647Sdam.sunwoo@arm.com            }
6819647Sdam.sunwoo@arm.com            *simpointStream << "\n";
6829647Sdam.sunwoo@arm.com
6839647Sdam.sunwoo@arm.com            intervalDrift = (intervalCount + intervalDrift) - intervalSize;
6849647Sdam.sunwoo@arm.com            intervalCount = 0;
6859647Sdam.sunwoo@arm.com        }
6869647Sdam.sunwoo@arm.com    }
6879647Sdam.sunwoo@arm.com}
6885315Sstever@gmail.com
6892623SN/A////////////////////////////////////////////////////////////////////////
6902623SN/A//
6912623SN/A//  AtomicSimpleCPU Simulation Object
6922623SN/A//
6934762Snate@binkert.orgAtomicSimpleCPU *
6944762Snate@binkert.orgAtomicSimpleCPUParams::create()
6952623SN/A{
6965529Snate@binkert.org    numThreads = 1;
6978779Sgblack@eecs.umich.edu    if (!FullSystem && workload.size() != 1)
6984762Snate@binkert.org        panic("only one workload allowed");
6995529Snate@binkert.org    return new AtomicSimpleCPU(this);
7002623SN/A}
701