atomic.cc revision 10342
12623SN/A/*
210030SAli.Saidi@ARM.com * Copyright (c) 2012-2013 ARM Limited
38926Sandreas.hansson@arm.com * All rights reserved.
48926Sandreas.hansson@arm.com *
58926Sandreas.hansson@arm.com * The license below extends only to copyright in the software and shall
68926Sandreas.hansson@arm.com * not be construed as granting a license to any other intellectual
78926Sandreas.hansson@arm.com * property including but not limited to intellectual property relating
88926Sandreas.hansson@arm.com * to a hardware implementation of the functionality of the software
98926Sandreas.hansson@arm.com * licensed hereunder.  You may use the software subject to the license
108926Sandreas.hansson@arm.com * terms below provided that you ensure that this notice is replicated
118926Sandreas.hansson@arm.com * unmodified and in its entirety in all distributions of the software,
128926Sandreas.hansson@arm.com * modified or unmodified, in source code or in binary form.
138926Sandreas.hansson@arm.com *
142623SN/A * Copyright (c) 2002-2005 The Regents of The University of Michigan
152623SN/A * All rights reserved.
162623SN/A *
172623SN/A * Redistribution and use in source and binary forms, with or without
182623SN/A * modification, are permitted provided that the following conditions are
192623SN/A * met: redistributions of source code must retain the above copyright
202623SN/A * notice, this list of conditions and the following disclaimer;
212623SN/A * redistributions in binary form must reproduce the above copyright
222623SN/A * notice, this list of conditions and the following disclaimer in the
232623SN/A * documentation and/or other materials provided with the distribution;
242623SN/A * neither the name of the copyright holders nor the names of its
252623SN/A * contributors may be used to endorse or promote products derived from
262623SN/A * this software without specific prior written permission.
272623SN/A *
282623SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
292623SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
302623SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
312623SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
322623SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
332623SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
342623SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
352623SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
362623SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
372623SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
382623SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
392665Ssaidi@eecs.umich.edu *
402665Ssaidi@eecs.umich.edu * Authors: Steve Reinhardt
412623SN/A */
422623SN/A
433170Sstever@eecs.umich.edu#include "arch/locked_mem.hh"
448105Sgblack@eecs.umich.edu#include "arch/mmapped_ipr.hh"
452623SN/A#include "arch/utility.hh"
464040Ssaidi@eecs.umich.edu#include "base/bigint.hh"
479647Sdam.sunwoo@arm.com#include "base/output.hh"
486658Snate@binkert.org#include "config/the_isa.hh"
498229Snate@binkert.org#include "cpu/simple/atomic.hh"
502623SN/A#include "cpu/exetrace.hh"
519443SAndreas.Sandberg@ARM.com#include "debug/Drain.hh"
528232Snate@binkert.org#include "debug/ExecFaulting.hh"
538232Snate@binkert.org#include "debug/SimpleCPU.hh"
543348Sbinkertn@umich.edu#include "mem/packet.hh"
553348Sbinkertn@umich.edu#include "mem/packet_access.hh"
568926Sandreas.hansson@arm.com#include "mem/physical.hh"
574762Snate@binkert.org#include "params/AtomicSimpleCPU.hh"
587678Sgblack@eecs.umich.edu#include "sim/faults.hh"
592901Ssaidi@eecs.umich.edu#include "sim/system.hh"
608779Sgblack@eecs.umich.edu#include "sim/full_system.hh"
612623SN/A
622623SN/Ausing namespace std;
632623SN/Ausing namespace TheISA;
642623SN/A
652623SN/AAtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c)
665606Snate@binkert.org    : Event(CPU_Tick_Pri), cpu(c)
672623SN/A{
682623SN/A}
692623SN/A
702623SN/A
712623SN/Avoid
722623SN/AAtomicSimpleCPU::TickEvent::process()
732623SN/A{
742623SN/A    cpu->tick();
752623SN/A}
762623SN/A
772623SN/Aconst char *
785336Shines@cs.fsu.eduAtomicSimpleCPU::TickEvent::description() const
792623SN/A{
804873Sstever@eecs.umich.edu    return "AtomicSimpleCPU tick";
812623SN/A}
822623SN/A
832623SN/Avoid
842623SN/AAtomicSimpleCPU::init()
852623SN/A{
862623SN/A    BaseCPU::init();
878921Sandreas.hansson@arm.com
888921Sandreas.hansson@arm.com    // Initialise the ThreadContext's memory proxies
898921Sandreas.hansson@arm.com    tcBase()->initMemProxies(tcBase());
908921Sandreas.hansson@arm.com
919433SAndreas.Sandberg@ARM.com    if (FullSystem && !params()->switched_out) {
928779Sgblack@eecs.umich.edu        ThreadID size = threadContexts.size();
938779Sgblack@eecs.umich.edu        for (ThreadID i = 0; i < size; ++i) {
948779Sgblack@eecs.umich.edu            ThreadContext *tc = threadContexts[i];
958779Sgblack@eecs.umich.edu            // initialize CPU, including PC
968779Sgblack@eecs.umich.edu            TheISA::initCPU(tc, tc->contextId());
978779Sgblack@eecs.umich.edu        }
982623SN/A    }
998706Sandreas.hansson@arm.com
1005714Shsul@eecs.umich.edu    // Atomic doesn't do MT right now, so contextId == threadId
1015712Shsul@eecs.umich.edu    ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT
1025712Shsul@eecs.umich.edu    data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too
1035712Shsul@eecs.umich.edu    data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too
1042623SN/A}
1052623SN/A
1065529Snate@binkert.orgAtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p)
1076078Sgblack@eecs.umich.edu    : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false),
1085487Snate@binkert.org      simulate_data_stalls(p->simulate_data_stalls),
1095487Snate@binkert.org      simulate_inst_stalls(p->simulate_inst_stalls),
1109443SAndreas.Sandberg@ARM.com      drain_manager(NULL),
1119095Sandreas.hansson@arm.com      icachePort(name() + ".icache_port", this),
1129095Sandreas.hansson@arm.com      dcachePort(name() + ".dcache_port", this),
1139647Sdam.sunwoo@arm.com      fastmem(p->fastmem),
1149647Sdam.sunwoo@arm.com      simpoint(p->simpoint_profile),
1159647Sdam.sunwoo@arm.com      intervalSize(p->simpoint_interval),
1169647Sdam.sunwoo@arm.com      intervalCount(0),
1179647Sdam.sunwoo@arm.com      intervalDrift(0),
1189647Sdam.sunwoo@arm.com      simpointStream(NULL),
1199647Sdam.sunwoo@arm.com      currentBBV(0, 0),
1209647Sdam.sunwoo@arm.com      currentBBVInstCount(0)
1212623SN/A{
1222623SN/A    _status = Idle;
1239647Sdam.sunwoo@arm.com
1249647Sdam.sunwoo@arm.com    if (simpoint) {
1259647Sdam.sunwoo@arm.com        simpointStream = simout.create(p->simpoint_profile_file, false);
1269647Sdam.sunwoo@arm.com    }
1272623SN/A}
1282623SN/A
1292623SN/A
1302623SN/AAtomicSimpleCPU::~AtomicSimpleCPU()
1312623SN/A{
1326775SBrad.Beckmann@amd.com    if (tickEvent.scheduled()) {
1336775SBrad.Beckmann@amd.com        deschedule(tickEvent);
1346775SBrad.Beckmann@amd.com    }
1359647Sdam.sunwoo@arm.com    if (simpointStream) {
1369647Sdam.sunwoo@arm.com        simout.close(simpointStream);
1379647Sdam.sunwoo@arm.com    }
1382623SN/A}
1392623SN/A
1409443SAndreas.Sandberg@ARM.comunsigned int
1419443SAndreas.Sandberg@ARM.comAtomicSimpleCPU::drain(DrainManager *dm)
1422623SN/A{
1439443SAndreas.Sandberg@ARM.com    assert(!drain_manager);
1449448SAndreas.Sandberg@ARM.com    if (switchedOut())
1459443SAndreas.Sandberg@ARM.com        return 0;
1462623SN/A
1479443SAndreas.Sandberg@ARM.com    if (!isDrained()) {
1489443SAndreas.Sandberg@ARM.com        DPRINTF(Drain, "Requesting drain: %s\n", pcState());
1499443SAndreas.Sandberg@ARM.com        drain_manager = dm;
1509443SAndreas.Sandberg@ARM.com        return 1;
1519443SAndreas.Sandberg@ARM.com    } else {
1529443SAndreas.Sandberg@ARM.com        if (tickEvent.scheduled())
1539443SAndreas.Sandberg@ARM.com            deschedule(tickEvent);
1542915Sktlim@umich.edu
1559443SAndreas.Sandberg@ARM.com        DPRINTF(Drain, "Not executing microcode, no need to drain.\n");
1569443SAndreas.Sandberg@ARM.com        return 0;
1579443SAndreas.Sandberg@ARM.com    }
1589342SAndreas.Sandberg@arm.com}
1599342SAndreas.Sandberg@arm.com
1602915Sktlim@umich.eduvoid
1619342SAndreas.Sandberg@arm.comAtomicSimpleCPU::drainResume()
1622915Sktlim@umich.edu{
1639448SAndreas.Sandberg@ARM.com    assert(!tickEvent.scheduled());
1649443SAndreas.Sandberg@ARM.com    assert(!drain_manager);
1659448SAndreas.Sandberg@ARM.com    if (switchedOut())
1665220Ssaidi@eecs.umich.edu        return;
1675220Ssaidi@eecs.umich.edu
1684940Snate@binkert.org    DPRINTF(SimpleCPU, "Resume\n");
1699523SAndreas.Sandberg@ARM.com    verifyMemoryMode();
1703324Shsul@eecs.umich.edu
1719448SAndreas.Sandberg@ARM.com    assert(!threadContexts.empty());
1729448SAndreas.Sandberg@ARM.com    if (threadContexts.size() > 1)
1739448SAndreas.Sandberg@ARM.com        fatal("The atomic CPU only supports one thread.\n");
1749448SAndreas.Sandberg@ARM.com
1759448SAndreas.Sandberg@ARM.com    if (thread->status() == ThreadContext::Active) {
1769443SAndreas.Sandberg@ARM.com        schedule(tickEvent, nextCycle());
1779448SAndreas.Sandberg@ARM.com        _status = BaseSimpleCPU::Running;
1789837Slena@cs.wisc,edu        notIdleFraction = 1;
1799448SAndreas.Sandberg@ARM.com    } else {
1809448SAndreas.Sandberg@ARM.com        _status = BaseSimpleCPU::Idle;
1819837Slena@cs.wisc,edu        notIdleFraction = 0;
1829448SAndreas.Sandberg@ARM.com    }
1839443SAndreas.Sandberg@ARM.com
1847897Shestness@cs.utexas.edu    system->totalNumInsts = 0;
1852623SN/A}
1862623SN/A
1879443SAndreas.Sandberg@ARM.combool
1889443SAndreas.Sandberg@ARM.comAtomicSimpleCPU::tryCompleteDrain()
1899443SAndreas.Sandberg@ARM.com{
1909443SAndreas.Sandberg@ARM.com    if (!drain_manager)
1919443SAndreas.Sandberg@ARM.com        return false;
1929443SAndreas.Sandberg@ARM.com
1939443SAndreas.Sandberg@ARM.com    DPRINTF(Drain, "tryCompleteDrain: %s\n", pcState());
1949443SAndreas.Sandberg@ARM.com    if (!isDrained())
1959443SAndreas.Sandberg@ARM.com        return false;
1969443SAndreas.Sandberg@ARM.com
1979443SAndreas.Sandberg@ARM.com    DPRINTF(Drain, "CPU done draining, processing drain event\n");
1989443SAndreas.Sandberg@ARM.com    drain_manager->signalDrainDone();
1999443SAndreas.Sandberg@ARM.com    drain_manager = NULL;
2009443SAndreas.Sandberg@ARM.com
2019443SAndreas.Sandberg@ARM.com    return true;
2029443SAndreas.Sandberg@ARM.com}
2039443SAndreas.Sandberg@ARM.com
2049443SAndreas.Sandberg@ARM.com
2052623SN/Avoid
2062798Sktlim@umich.eduAtomicSimpleCPU::switchOut()
2072623SN/A{
2089429SAndreas.Sandberg@ARM.com    BaseSimpleCPU::switchOut();
2099429SAndreas.Sandberg@ARM.com
2109443SAndreas.Sandberg@ARM.com    assert(!tickEvent.scheduled());
2119342SAndreas.Sandberg@arm.com    assert(_status == BaseSimpleCPU::Running || _status == Idle);
2129443SAndreas.Sandberg@ARM.com    assert(isDrained());
2132623SN/A}
2142623SN/A
2152623SN/A
2162623SN/Avoid
2172623SN/AAtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
2182623SN/A{
2199429SAndreas.Sandberg@ARM.com    BaseSimpleCPU::takeOverFrom(oldCPU);
2202623SN/A
2219443SAndreas.Sandberg@ARM.com    // The tick event should have been descheduled by drain()
2222623SN/A    assert(!tickEvent.scheduled());
2232623SN/A
2245712Shsul@eecs.umich.edu    ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT
2255712Shsul@eecs.umich.edu    data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too
2265712Shsul@eecs.umich.edu    data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too
2272623SN/A}
2282623SN/A
2299523SAndreas.Sandberg@ARM.comvoid
2309523SAndreas.Sandberg@ARM.comAtomicSimpleCPU::verifyMemoryMode() const
2319523SAndreas.Sandberg@ARM.com{
2329524SAndreas.Sandberg@ARM.com    if (!system->isAtomicMode()) {
2339523SAndreas.Sandberg@ARM.com        fatal("The atomic CPU requires the memory system to be in "
2349523SAndreas.Sandberg@ARM.com              "'atomic' mode.\n");
2359523SAndreas.Sandberg@ARM.com    }
2369523SAndreas.Sandberg@ARM.com}
2372623SN/A
2382623SN/Avoid
2399180Sandreas.hansson@arm.comAtomicSimpleCPU::activateContext(ThreadID thread_num, Cycles delay)
2402623SN/A{
2414940Snate@binkert.org    DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
2424940Snate@binkert.org
2432623SN/A    assert(thread_num == 0);
2442683Sktlim@umich.edu    assert(thread);
2452623SN/A
2462623SN/A    assert(_status == Idle);
2472623SN/A    assert(!tickEvent.scheduled());
2482623SN/A
2499837Slena@cs.wisc,edu    notIdleFraction = 1;
2509180Sandreas.hansson@arm.com    numCycles += ticksToCycles(thread->lastActivate - thread->lastSuspend);
2513686Sktlim@umich.edu
2523430Sgblack@eecs.umich.edu    //Make sure ticks are still on multiples of cycles
2539179Sandreas.hansson@arm.com    schedule(tickEvent, clockEdge(delay));
2549342SAndreas.Sandberg@arm.com    _status = BaseSimpleCPU::Running;
2552623SN/A}
2562623SN/A
2572623SN/A
2582623SN/Avoid
2598737Skoansin.tan@gmail.comAtomicSimpleCPU::suspendContext(ThreadID thread_num)
2602623SN/A{
2614940Snate@binkert.org    DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
2624940Snate@binkert.org
2632623SN/A    assert(thread_num == 0);
2642683Sktlim@umich.edu    assert(thread);
2652623SN/A
2666043Sgblack@eecs.umich.edu    if (_status == Idle)
2676043Sgblack@eecs.umich.edu        return;
2686043Sgblack@eecs.umich.edu
2699342SAndreas.Sandberg@arm.com    assert(_status == BaseSimpleCPU::Running);
2702626SN/A
2712626SN/A    // tick event may not be scheduled if this gets called from inside
2722626SN/A    // an instruction's execution, e.g. "quiesce"
2732626SN/A    if (tickEvent.scheduled())
2745606Snate@binkert.org        deschedule(tickEvent);
2752623SN/A
2769837Slena@cs.wisc,edu    notIdleFraction = 0;
2772623SN/A    _status = Idle;
2782623SN/A}
2792623SN/A
2802623SN/A
28110030SAli.Saidi@ARM.comTick
28210030SAli.Saidi@ARM.comAtomicSimpleCPU::AtomicCPUDPort::recvAtomicSnoop(PacketPtr pkt)
28310030SAli.Saidi@ARM.com{
28410030SAli.Saidi@ARM.com    DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(),
28510030SAli.Saidi@ARM.com            pkt->cmdString());
28610030SAli.Saidi@ARM.com
28710030SAli.Saidi@ARM.com    // if snoop invalidates, release any associated locks
28810030SAli.Saidi@ARM.com    if (pkt->isInvalidate()) {
28910030SAli.Saidi@ARM.com        DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n",
29010030SAli.Saidi@ARM.com                pkt->getAddr());
29110030SAli.Saidi@ARM.com        TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask);
29210030SAli.Saidi@ARM.com    }
29310030SAli.Saidi@ARM.com
29410030SAli.Saidi@ARM.com    return 0;
29510030SAli.Saidi@ARM.com}
29610030SAli.Saidi@ARM.com
29710030SAli.Saidi@ARM.comvoid
29810030SAli.Saidi@ARM.comAtomicSimpleCPU::AtomicCPUDPort::recvFunctionalSnoop(PacketPtr pkt)
29910030SAli.Saidi@ARM.com{
30010030SAli.Saidi@ARM.com    DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(),
30110030SAli.Saidi@ARM.com            pkt->cmdString());
30210030SAli.Saidi@ARM.com
30310030SAli.Saidi@ARM.com    // if snoop invalidates, release any associated locks
30410030SAli.Saidi@ARM.com    if (pkt->isInvalidate()) {
30510030SAli.Saidi@ARM.com        DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n",
30610030SAli.Saidi@ARM.com                pkt->getAddr());
30710030SAli.Saidi@ARM.com        TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask);
30810030SAli.Saidi@ARM.com    }
30910030SAli.Saidi@ARM.com}
31010030SAli.Saidi@ARM.com
3112623SN/AFault
3128444Sgblack@eecs.umich.eduAtomicSimpleCPU::readMem(Addr addr, uint8_t * data,
3138444Sgblack@eecs.umich.edu                         unsigned size, unsigned flags)
3142623SN/A{
3153169Sstever@eecs.umich.edu    // use the CPU's statically allocated read request and packet objects
3164870Sstever@eecs.umich.edu    Request *req = &data_read_req;
3172623SN/A
3182623SN/A    if (traceData) {
3192623SN/A        traceData->setAddr(addr);
3202623SN/A    }
3212623SN/A
3224999Sgblack@eecs.umich.edu    //The size of the data we're trying to read.
3237520Sgblack@eecs.umich.edu    int fullSize = size;
3242623SN/A
3254999Sgblack@eecs.umich.edu    //The address of the second part of this access if it needs to be split
3264999Sgblack@eecs.umich.edu    //across a cache line boundary.
3279814Sandreas.hansson@arm.com    Addr secondAddr = roundDown(addr + size - 1, cacheLineSize());
3284999Sgblack@eecs.umich.edu
3297520Sgblack@eecs.umich.edu    if (secondAddr > addr)
3307520Sgblack@eecs.umich.edu        size = secondAddr - addr;
3314999Sgblack@eecs.umich.edu
3324999Sgblack@eecs.umich.edu    dcache_latency = 0;
3334999Sgblack@eecs.umich.edu
33410024Sdam.sunwoo@arm.com    req->taskId(taskId());
3357520Sgblack@eecs.umich.edu    while (1) {
3368832SAli.Saidi@ARM.com        req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr());
3374999Sgblack@eecs.umich.edu
3384999Sgblack@eecs.umich.edu        // translate to physical address
3396023Snate@binkert.org        Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read);
3404999Sgblack@eecs.umich.edu
3414999Sgblack@eecs.umich.edu        // Now do the access.
3426623Sgblack@eecs.umich.edu        if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
34310342SCurtis.Dunham@arm.com            Packet pkt(req, MemCmd::ReadReq);
34410342SCurtis.Dunham@arm.com            pkt.refineCommand();
3457520Sgblack@eecs.umich.edu            pkt.dataStatic(data);
3464999Sgblack@eecs.umich.edu
3478105Sgblack@eecs.umich.edu            if (req->isMmappedIpr())
3484999Sgblack@eecs.umich.edu                dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt);
3494999Sgblack@eecs.umich.edu            else {
3508931Sandreas.hansson@arm.com                if (fastmem && system->isMemAddr(pkt.getAddr()))
3518931Sandreas.hansson@arm.com                    system->getPhysMem().access(&pkt);
3524999Sgblack@eecs.umich.edu                else
3534999Sgblack@eecs.umich.edu                    dcache_latency += dcachePort.sendAtomic(&pkt);
3544999Sgblack@eecs.umich.edu            }
3554999Sgblack@eecs.umich.edu            dcache_access = true;
3565012Sgblack@eecs.umich.edu
3574999Sgblack@eecs.umich.edu            assert(!pkt.isError());
3584999Sgblack@eecs.umich.edu
3596102Sgblack@eecs.umich.edu            if (req->isLLSC()) {
3604999Sgblack@eecs.umich.edu                TheISA::handleLockedRead(thread, req);
3614999Sgblack@eecs.umich.edu            }
3624968Sacolyte@umich.edu        }
3634986Ssaidi@eecs.umich.edu
3644999Sgblack@eecs.umich.edu        //If there's a fault, return it
3656739Sgblack@eecs.umich.edu        if (fault != NoFault) {
3666739Sgblack@eecs.umich.edu            if (req->isPrefetch()) {
3676739Sgblack@eecs.umich.edu                return NoFault;
3686739Sgblack@eecs.umich.edu            } else {
3696739Sgblack@eecs.umich.edu                return fault;
3706739Sgblack@eecs.umich.edu            }
3716739Sgblack@eecs.umich.edu        }
3726739Sgblack@eecs.umich.edu
3734999Sgblack@eecs.umich.edu        //If we don't need to access a second cache line, stop now.
3744999Sgblack@eecs.umich.edu        if (secondAddr <= addr)
3754999Sgblack@eecs.umich.edu        {
3766078Sgblack@eecs.umich.edu            if (req->isLocked() && fault == NoFault) {
3776078Sgblack@eecs.umich.edu                assert(!locked);
3786078Sgblack@eecs.umich.edu                locked = true;
3796078Sgblack@eecs.umich.edu            }
3804999Sgblack@eecs.umich.edu            return fault;
3814968Sacolyte@umich.edu        }
3823170Sstever@eecs.umich.edu
3834999Sgblack@eecs.umich.edu        /*
3844999Sgblack@eecs.umich.edu         * Set up for accessing the second cache line.
3854999Sgblack@eecs.umich.edu         */
3864999Sgblack@eecs.umich.edu
3874999Sgblack@eecs.umich.edu        //Move the pointer we're reading into to the correct location.
3887520Sgblack@eecs.umich.edu        data += size;
3894999Sgblack@eecs.umich.edu        //Adjust the size to get the remaining bytes.
3907520Sgblack@eecs.umich.edu        size = addr + fullSize - secondAddr;
3914999Sgblack@eecs.umich.edu        //And access the right address.
3924999Sgblack@eecs.umich.edu        addr = secondAddr;
3932623SN/A    }
3942623SN/A}
3952623SN/A
3967520Sgblack@eecs.umich.edu
3972623SN/AFault
3988444Sgblack@eecs.umich.eduAtomicSimpleCPU::writeMem(uint8_t *data, unsigned size,
3998444Sgblack@eecs.umich.edu                          Addr addr, unsigned flags, uint64_t *res)
4002623SN/A{
40110031SAli.Saidi@ARM.com
40210031SAli.Saidi@ARM.com    static uint8_t zero_array[64] = {};
40310031SAli.Saidi@ARM.com
40410031SAli.Saidi@ARM.com    if (data == NULL) {
40510031SAli.Saidi@ARM.com        assert(size <= 64);
40610031SAli.Saidi@ARM.com        assert(flags & Request::CACHE_BLOCK_ZERO);
40710031SAli.Saidi@ARM.com        // This must be a cache block cleaning request
40810031SAli.Saidi@ARM.com        data = zero_array;
40910031SAli.Saidi@ARM.com    }
41010031SAli.Saidi@ARM.com
4113169Sstever@eecs.umich.edu    // use the CPU's statically allocated write request and packet objects
4124870Sstever@eecs.umich.edu    Request *req = &data_write_req;
4132623SN/A
4142623SN/A    if (traceData) {
4152623SN/A        traceData->setAddr(addr);
4162623SN/A    }
4172623SN/A
4184999Sgblack@eecs.umich.edu    //The size of the data we're trying to read.
4197520Sgblack@eecs.umich.edu    int fullSize = size;
4202623SN/A
4214999Sgblack@eecs.umich.edu    //The address of the second part of this access if it needs to be split
4224999Sgblack@eecs.umich.edu    //across a cache line boundary.
4239814Sandreas.hansson@arm.com    Addr secondAddr = roundDown(addr + size - 1, cacheLineSize());
4244999Sgblack@eecs.umich.edu
4254999Sgblack@eecs.umich.edu    if(secondAddr > addr)
4267520Sgblack@eecs.umich.edu        size = secondAddr - addr;
4274999Sgblack@eecs.umich.edu
4284999Sgblack@eecs.umich.edu    dcache_latency = 0;
4294999Sgblack@eecs.umich.edu
43010024Sdam.sunwoo@arm.com    req->taskId(taskId());
4314999Sgblack@eecs.umich.edu    while(1) {
4328832SAli.Saidi@ARM.com        req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr());
4334999Sgblack@eecs.umich.edu
4344999Sgblack@eecs.umich.edu        // translate to physical address
4356023Snate@binkert.org        Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write);
4364999Sgblack@eecs.umich.edu
4374999Sgblack@eecs.umich.edu        // Now do the access.
4384999Sgblack@eecs.umich.edu        if (fault == NoFault) {
4394999Sgblack@eecs.umich.edu            MemCmd cmd = MemCmd::WriteReq; // default
4404999Sgblack@eecs.umich.edu            bool do_access = true;  // flag to suppress cache access
4414999Sgblack@eecs.umich.edu
4426102Sgblack@eecs.umich.edu            if (req->isLLSC()) {
4434999Sgblack@eecs.umich.edu                cmd = MemCmd::StoreCondReq;
44410030SAli.Saidi@ARM.com                do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask);
4454999Sgblack@eecs.umich.edu            } else if (req->isSwap()) {
4464999Sgblack@eecs.umich.edu                cmd = MemCmd::SwapReq;
4474999Sgblack@eecs.umich.edu                if (req->isCondSwap()) {
4484999Sgblack@eecs.umich.edu                    assert(res);
4494999Sgblack@eecs.umich.edu                    req->setExtraData(*res);
4504999Sgblack@eecs.umich.edu                }
4514999Sgblack@eecs.umich.edu            }
4524999Sgblack@eecs.umich.edu
4536623Sgblack@eecs.umich.edu            if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) {
4548949Sandreas.hansson@arm.com                Packet pkt = Packet(req, cmd);
4557520Sgblack@eecs.umich.edu                pkt.dataStatic(data);
4564999Sgblack@eecs.umich.edu
4578105Sgblack@eecs.umich.edu                if (req->isMmappedIpr()) {
4584999Sgblack@eecs.umich.edu                    dcache_latency +=
4594999Sgblack@eecs.umich.edu                        TheISA::handleIprWrite(thread->getTC(), &pkt);
4604999Sgblack@eecs.umich.edu                } else {
4618931Sandreas.hansson@arm.com                    if (fastmem && system->isMemAddr(pkt.getAddr()))
4628931Sandreas.hansson@arm.com                        system->getPhysMem().access(&pkt);
4634999Sgblack@eecs.umich.edu                    else
4644999Sgblack@eecs.umich.edu                        dcache_latency += dcachePort.sendAtomic(&pkt);
4654999Sgblack@eecs.umich.edu                }
4664999Sgblack@eecs.umich.edu                dcache_access = true;
4674999Sgblack@eecs.umich.edu                assert(!pkt.isError());
4684999Sgblack@eecs.umich.edu
4694999Sgblack@eecs.umich.edu                if (req->isSwap()) {
4704999Sgblack@eecs.umich.edu                    assert(res);
4717520Sgblack@eecs.umich.edu                    memcpy(res, pkt.getPtr<uint8_t>(), fullSize);
4724999Sgblack@eecs.umich.edu                }
4734999Sgblack@eecs.umich.edu            }
4744999Sgblack@eecs.umich.edu
4754999Sgblack@eecs.umich.edu            if (res && !req->isSwap()) {
4764999Sgblack@eecs.umich.edu                *res = req->getExtraData();
4774878Sstever@eecs.umich.edu            }
4784040Ssaidi@eecs.umich.edu        }
4794040Ssaidi@eecs.umich.edu
4804999Sgblack@eecs.umich.edu        //If there's a fault or we don't need to access a second cache line,
4814999Sgblack@eecs.umich.edu        //stop now.
4824999Sgblack@eecs.umich.edu        if (fault != NoFault || secondAddr <= addr)
4834999Sgblack@eecs.umich.edu        {
4846078Sgblack@eecs.umich.edu            if (req->isLocked() && fault == NoFault) {
4856078Sgblack@eecs.umich.edu                assert(locked);
4866078Sgblack@eecs.umich.edu                locked = false;
4876078Sgblack@eecs.umich.edu            }
4886739Sgblack@eecs.umich.edu            if (fault != NoFault && req->isPrefetch()) {
4896739Sgblack@eecs.umich.edu                return NoFault;
4906739Sgblack@eecs.umich.edu            } else {
4916739Sgblack@eecs.umich.edu                return fault;
4926739Sgblack@eecs.umich.edu            }
4933170Sstever@eecs.umich.edu        }
4943170Sstever@eecs.umich.edu
4954999Sgblack@eecs.umich.edu        /*
4964999Sgblack@eecs.umich.edu         * Set up for accessing the second cache line.
4974999Sgblack@eecs.umich.edu         */
4984999Sgblack@eecs.umich.edu
4994999Sgblack@eecs.umich.edu        //Move the pointer we're reading into to the correct location.
5007520Sgblack@eecs.umich.edu        data += size;
5014999Sgblack@eecs.umich.edu        //Adjust the size to get the remaining bytes.
5027520Sgblack@eecs.umich.edu        size = addr + fullSize - secondAddr;
5034999Sgblack@eecs.umich.edu        //And access the right address.
5044999Sgblack@eecs.umich.edu        addr = secondAddr;
5052623SN/A    }
5062623SN/A}
5072623SN/A
5082623SN/A
5092623SN/Avoid
5102623SN/AAtomicSimpleCPU::tick()
5112623SN/A{
5124940Snate@binkert.org    DPRINTF(SimpleCPU, "Tick\n");
5134940Snate@binkert.org
5145487Snate@binkert.org    Tick latency = 0;
5152623SN/A
5166078Sgblack@eecs.umich.edu    for (int i = 0; i < width || locked; ++i) {
5172623SN/A        numCycles++;
5182623SN/A
5193387Sgblack@eecs.umich.edu        if (!curStaticInst || !curStaticInst->isDelayedCommit())
5203387Sgblack@eecs.umich.edu            checkForInterrupts();
5212626SN/A
5225348Ssaidi@eecs.umich.edu        checkPcEventQueue();
5238143SAli.Saidi@ARM.com        // We must have just got suspended by a PC event
5249443SAndreas.Sandberg@ARM.com        if (_status == Idle) {
5259443SAndreas.Sandberg@ARM.com            tryCompleteDrain();
5268143SAli.Saidi@ARM.com            return;
5279443SAndreas.Sandberg@ARM.com        }
5285348Ssaidi@eecs.umich.edu
5295669Sgblack@eecs.umich.edu        Fault fault = NoFault;
5305669Sgblack@eecs.umich.edu
5317720Sgblack@eecs.umich.edu        TheISA::PCState pcState = thread->pcState();
5327720Sgblack@eecs.umich.edu
5337720Sgblack@eecs.umich.edu        bool needToFetch = !isRomMicroPC(pcState.microPC()) &&
5347720Sgblack@eecs.umich.edu                           !curMacroStaticInst;
5357720Sgblack@eecs.umich.edu        if (needToFetch) {
53610024Sdam.sunwoo@arm.com            ifetch_req.taskId(taskId());
5375894Sgblack@eecs.umich.edu            setupFetchRequest(&ifetch_req);
5386023Snate@binkert.org            fault = thread->itb->translateAtomic(&ifetch_req, tc,
5396023Snate@binkert.org                                                 BaseTLB::Execute);
5405894Sgblack@eecs.umich.edu        }
5412623SN/A
5422623SN/A        if (fault == NoFault) {
5434182Sgblack@eecs.umich.edu            Tick icache_latency = 0;
5444182Sgblack@eecs.umich.edu            bool icache_access = false;
5454182Sgblack@eecs.umich.edu            dcache_access = false; // assume no dcache access
5462662Sstever@eecs.umich.edu
5477720Sgblack@eecs.umich.edu            if (needToFetch) {
5489023Sgblack@eecs.umich.edu                // This is commented out because the decoder would act like
5495694Sgblack@eecs.umich.edu                // a tiny cache otherwise. It wouldn't be flushed when needed
5505694Sgblack@eecs.umich.edu                // like the I cache. It should be flushed, and when that works
5515694Sgblack@eecs.umich.edu                // this code should be uncommented.
5525669Sgblack@eecs.umich.edu                //Fetch more instruction memory if necessary
5539023Sgblack@eecs.umich.edu                //if(decoder.needMoreBytes())
5545669Sgblack@eecs.umich.edu                //{
5555669Sgblack@eecs.umich.edu                    icache_access = true;
5568949Sandreas.hansson@arm.com                    Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq);
5575669Sgblack@eecs.umich.edu                    ifetch_pkt.dataStatic(&inst);
5582623SN/A
5598931Sandreas.hansson@arm.com                    if (fastmem && system->isMemAddr(ifetch_pkt.getAddr()))
5608931Sandreas.hansson@arm.com                        system->getPhysMem().access(&ifetch_pkt);
5615669Sgblack@eecs.umich.edu                    else
5625669Sgblack@eecs.umich.edu                        icache_latency = icachePort.sendAtomic(&ifetch_pkt);
5634968Sacolyte@umich.edu
5645669Sgblack@eecs.umich.edu                    assert(!ifetch_pkt.isError());
5654968Sacolyte@umich.edu
5665669Sgblack@eecs.umich.edu                    // ifetch_req is initialized to read the instruction directly
5675669Sgblack@eecs.umich.edu                    // into the CPU object's inst field.
5685669Sgblack@eecs.umich.edu                //}
5695669Sgblack@eecs.umich.edu            }
5704182Sgblack@eecs.umich.edu
5712623SN/A            preExecute();
5723814Ssaidi@eecs.umich.edu
5735001Sgblack@eecs.umich.edu            if (curStaticInst) {
5744182Sgblack@eecs.umich.edu                fault = curStaticInst->execute(this, traceData);
5754998Sgblack@eecs.umich.edu
5764998Sgblack@eecs.umich.edu                // keep an instruction count
5774998Sgblack@eecs.umich.edu                if (fault == NoFault)
5784998Sgblack@eecs.umich.edu                    countInst();
5797655Sali.saidi@arm.com                else if (traceData && !DTRACE(ExecFaulting)) {
5805001Sgblack@eecs.umich.edu                    delete traceData;
5815001Sgblack@eecs.umich.edu                    traceData = NULL;
5825001Sgblack@eecs.umich.edu                }
5834998Sgblack@eecs.umich.edu
5844182Sgblack@eecs.umich.edu                postExecute();
5854182Sgblack@eecs.umich.edu            }
5862623SN/A
5873814Ssaidi@eecs.umich.edu            // @todo remove me after debugging with legion done
5884539Sgblack@eecs.umich.edu            if (curStaticInst && (!curStaticInst->isMicroop() ||
5894539Sgblack@eecs.umich.edu                        curStaticInst->isFirstMicroop()))
5903814Ssaidi@eecs.umich.edu                instCnt++;
5913814Ssaidi@eecs.umich.edu
5929647Sdam.sunwoo@arm.com            // profile for SimPoints if enabled and macro inst is finished
5939647Sdam.sunwoo@arm.com            if (simpoint && curStaticInst && (fault == NoFault) &&
5949647Sdam.sunwoo@arm.com                    (!curStaticInst->isMicroop() ||
5959647Sdam.sunwoo@arm.com                     curStaticInst->isLastMicroop())) {
5969647Sdam.sunwoo@arm.com                profileSimPoint();
5979647Sdam.sunwoo@arm.com            }
5989647Sdam.sunwoo@arm.com
5995487Snate@binkert.org            Tick stall_ticks = 0;
6005487Snate@binkert.org            if (simulate_inst_stalls && icache_access)
6015487Snate@binkert.org                stall_ticks += icache_latency;
6025487Snate@binkert.org
6035487Snate@binkert.org            if (simulate_data_stalls && dcache_access)
6045487Snate@binkert.org                stall_ticks += dcache_latency;
6055487Snate@binkert.org
6065487Snate@binkert.org            if (stall_ticks) {
6079180Sandreas.hansson@arm.com                // the atomic cpu does its accounting in ticks, so
6089180Sandreas.hansson@arm.com                // keep counting in ticks but round to the clock
6099180Sandreas.hansson@arm.com                // period
6109180Sandreas.hansson@arm.com                latency += divCeil(stall_ticks, clockPeriod()) *
6119180Sandreas.hansson@arm.com                    clockPeriod();
6122623SN/A            }
6132623SN/A
6142623SN/A        }
6154377Sgblack@eecs.umich.edu        if(fault != NoFault || !stayAtPC)
6164182Sgblack@eecs.umich.edu            advancePC(fault);
6172623SN/A    }
6182623SN/A
6199443SAndreas.Sandberg@ARM.com    if (tryCompleteDrain())
6209443SAndreas.Sandberg@ARM.com        return;
6219443SAndreas.Sandberg@ARM.com
6225487Snate@binkert.org    // instruction takes at least one cycle
6239179Sandreas.hansson@arm.com    if (latency < clockPeriod())
6249179Sandreas.hansson@arm.com        latency = clockPeriod();
6255487Snate@binkert.org
6262626SN/A    if (_status != Idle)
6277823Ssteve.reinhardt@amd.com        schedule(tickEvent, curTick() + latency);
6282623SN/A}
6292623SN/A
6302623SN/A
6315315Sstever@gmail.comvoid
6325315Sstever@gmail.comAtomicSimpleCPU::printAddr(Addr a)
6335315Sstever@gmail.com{
6345315Sstever@gmail.com    dcachePort.printAddr(a);
6355315Sstever@gmail.com}
6365315Sstever@gmail.com
6379647Sdam.sunwoo@arm.comvoid
6389647Sdam.sunwoo@arm.comAtomicSimpleCPU::profileSimPoint()
6399647Sdam.sunwoo@arm.com{
6409647Sdam.sunwoo@arm.com    if (!currentBBVInstCount)
6419647Sdam.sunwoo@arm.com        currentBBV.first = thread->pcState().instAddr();
6429647Sdam.sunwoo@arm.com
6439647Sdam.sunwoo@arm.com    ++intervalCount;
6449647Sdam.sunwoo@arm.com    ++currentBBVInstCount;
6459647Sdam.sunwoo@arm.com
6469647Sdam.sunwoo@arm.com    // If inst is control inst, assume end of basic block.
6479647Sdam.sunwoo@arm.com    if (curStaticInst->isControl()) {
6489647Sdam.sunwoo@arm.com        currentBBV.second = thread->pcState().instAddr();
6499647Sdam.sunwoo@arm.com
6509647Sdam.sunwoo@arm.com        auto map_itr = bbMap.find(currentBBV);
6519647Sdam.sunwoo@arm.com        if (map_itr == bbMap.end()){
6529647Sdam.sunwoo@arm.com            // If a new (previously unseen) basic block is found,
6539647Sdam.sunwoo@arm.com            // add a new unique id, record num of insts and insert into bbMap.
6549647Sdam.sunwoo@arm.com            BBInfo info;
6559647Sdam.sunwoo@arm.com            info.id = bbMap.size() + 1;
6569647Sdam.sunwoo@arm.com            info.insts = currentBBVInstCount;
6579647Sdam.sunwoo@arm.com            info.count = currentBBVInstCount;
6589647Sdam.sunwoo@arm.com            bbMap.insert(std::make_pair(currentBBV, info));
6599647Sdam.sunwoo@arm.com        } else {
6609647Sdam.sunwoo@arm.com            // If basic block is seen before, just increment the count by the
6619647Sdam.sunwoo@arm.com            // number of insts in basic block.
6629647Sdam.sunwoo@arm.com            BBInfo& info = map_itr->second;
6639647Sdam.sunwoo@arm.com            info.count += currentBBVInstCount;
6649647Sdam.sunwoo@arm.com        }
6659647Sdam.sunwoo@arm.com        currentBBVInstCount = 0;
6669647Sdam.sunwoo@arm.com
6679647Sdam.sunwoo@arm.com        // Reached end of interval if the sum of the current inst count
6689647Sdam.sunwoo@arm.com        // (intervalCount) and the excessive inst count from the previous
6699647Sdam.sunwoo@arm.com        // interval (intervalDrift) is greater than/equal to the interval size.
6709647Sdam.sunwoo@arm.com        if (intervalCount + intervalDrift >= intervalSize) {
6719647Sdam.sunwoo@arm.com            // summarize interval and display BBV info
6729647Sdam.sunwoo@arm.com            std::vector<pair<uint64_t, uint64_t> > counts;
6739647Sdam.sunwoo@arm.com            for (auto map_itr = bbMap.begin(); map_itr != bbMap.end();
6749647Sdam.sunwoo@arm.com                    ++map_itr) {
6759647Sdam.sunwoo@arm.com                BBInfo& info = map_itr->second;
6769647Sdam.sunwoo@arm.com                if (info.count != 0) {
6779647Sdam.sunwoo@arm.com                    counts.push_back(std::make_pair(info.id, info.count));
6789647Sdam.sunwoo@arm.com                    info.count = 0;
6799647Sdam.sunwoo@arm.com                }
6809647Sdam.sunwoo@arm.com            }
6819647Sdam.sunwoo@arm.com            std::sort(counts.begin(), counts.end());
6829647Sdam.sunwoo@arm.com
6839647Sdam.sunwoo@arm.com            // Print output BBV info
6849647Sdam.sunwoo@arm.com            *simpointStream << "T";
6859647Sdam.sunwoo@arm.com            for (auto cnt_itr = counts.begin(); cnt_itr != counts.end();
6869647Sdam.sunwoo@arm.com                    ++cnt_itr) {
6879647Sdam.sunwoo@arm.com                *simpointStream << ":" << cnt_itr->first
6889647Sdam.sunwoo@arm.com                                << ":" << cnt_itr->second << " ";
6899647Sdam.sunwoo@arm.com            }
6909647Sdam.sunwoo@arm.com            *simpointStream << "\n";
6919647Sdam.sunwoo@arm.com
6929647Sdam.sunwoo@arm.com            intervalDrift = (intervalCount + intervalDrift) - intervalSize;
6939647Sdam.sunwoo@arm.com            intervalCount = 0;
6949647Sdam.sunwoo@arm.com        }
6959647Sdam.sunwoo@arm.com    }
6969647Sdam.sunwoo@arm.com}
6975315Sstever@gmail.com
6982623SN/A////////////////////////////////////////////////////////////////////////
6992623SN/A//
7002623SN/A//  AtomicSimpleCPU Simulation Object
7012623SN/A//
7024762Snate@binkert.orgAtomicSimpleCPU *
7034762Snate@binkert.orgAtomicSimpleCPUParams::create()
7042623SN/A{
7055529Snate@binkert.org    numThreads = 1;
7068779Sgblack@eecs.umich.edu    if (!FullSystem && workload.size() != 1)
7074762Snate@binkert.org        panic("only one workload allowed");
7085529Snate@binkert.org    return new AtomicSimpleCPU(this);
7092623SN/A}
710