atomic.cc revision 13652
12623SN/A/*
210596Sgabeblack@google.com * Copyright 2014 Google, Inc.
313012Sandreas.sandberg@arm.com * Copyright (c) 2012-2013,2015,2017-2018 ARM Limited
48926Sandreas.hansson@arm.com * All rights reserved.
58926Sandreas.hansson@arm.com *
68926Sandreas.hansson@arm.com * The license below extends only to copyright in the software and shall
78926Sandreas.hansson@arm.com * not be construed as granting a license to any other intellectual
88926Sandreas.hansson@arm.com * property including but not limited to intellectual property relating
98926Sandreas.hansson@arm.com * to a hardware implementation of the functionality of the software
108926Sandreas.hansson@arm.com * licensed hereunder.  You may use the software subject to the license
118926Sandreas.hansson@arm.com * terms below provided that you ensure that this notice is replicated
128926Sandreas.hansson@arm.com * unmodified and in its entirety in all distributions of the software,
138926Sandreas.hansson@arm.com * modified or unmodified, in source code or in binary form.
148926Sandreas.hansson@arm.com *
152623SN/A * Copyright (c) 2002-2005 The Regents of The University of Michigan
162623SN/A * All rights reserved.
172623SN/A *
182623SN/A * Redistribution and use in source and binary forms, with or without
192623SN/A * modification, are permitted provided that the following conditions are
202623SN/A * met: redistributions of source code must retain the above copyright
212623SN/A * notice, this list of conditions and the following disclaimer;
222623SN/A * redistributions in binary form must reproduce the above copyright
232623SN/A * notice, this list of conditions and the following disclaimer in the
242623SN/A * documentation and/or other materials provided with the distribution;
252623SN/A * neither the name of the copyright holders nor the names of its
262623SN/A * contributors may be used to endorse or promote products derived from
272623SN/A * this software without specific prior written permission.
282623SN/A *
292623SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
302623SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
312623SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
322623SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
332623SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
342623SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
352623SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
362623SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
372623SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
382623SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
392623SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
402665Ssaidi@eecs.umich.edu *
412665Ssaidi@eecs.umich.edu * Authors: Steve Reinhardt
422623SN/A */
432623SN/A
4411793Sbrandon.potter@amd.com#include "cpu/simple/atomic.hh"
4511793Sbrandon.potter@amd.com
463170Sstever@eecs.umich.edu#include "arch/locked_mem.hh"
478105Sgblack@eecs.umich.edu#include "arch/mmapped_ipr.hh"
482623SN/A#include "arch/utility.hh"
499647Sdam.sunwoo@arm.com#include "base/output.hh"
506658Snate@binkert.org#include "config/the_isa.hh"
512623SN/A#include "cpu/exetrace.hh"
529443SAndreas.Sandberg@ARM.com#include "debug/Drain.hh"
538232Snate@binkert.org#include "debug/ExecFaulting.hh"
548232Snate@binkert.org#include "debug/SimpleCPU.hh"
553348Sbinkertn@umich.edu#include "mem/packet.hh"
563348Sbinkertn@umich.edu#include "mem/packet_access.hh"
578926Sandreas.hansson@arm.com#include "mem/physical.hh"
584762Snate@binkert.org#include "params/AtomicSimpleCPU.hh"
597678Sgblack@eecs.umich.edu#include "sim/faults.hh"
6011793Sbrandon.potter@amd.com#include "sim/full_system.hh"
612901Ssaidi@eecs.umich.edu#include "sim/system.hh"
622623SN/A
632623SN/Ausing namespace std;
642623SN/Ausing namespace TheISA;
652623SN/A
662623SN/Avoid
672623SN/AAtomicSimpleCPU::init()
682623SN/A{
6911147Smitch.hayenga@arm.com    BaseSimpleCPU::init();
708921Sandreas.hansson@arm.com
7111148Smitch.hayenga@arm.com    int cid = threadContexts[0]->contextId();
7212749Sgiacomo.travaglini@arm.com    ifetch_req->setContext(cid);
7312749Sgiacomo.travaglini@arm.com    data_read_req->setContext(cid);
7412749Sgiacomo.travaglini@arm.com    data_write_req->setContext(cid);
7513652Sqtt2@cornell.edu    data_amo_req->setContext(cid);
762623SN/A}
772623SN/A
785529Snate@binkert.orgAtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p)
7912127Sspwilson2@wisc.edu    : BaseSimpleCPU(p),
8012127Sspwilson2@wisc.edu      tickEvent([this]{ tick(); }, "AtomicSimpleCPU tick",
8112127Sspwilson2@wisc.edu                false, Event::CPU_Tick_Pri),
8212127Sspwilson2@wisc.edu      width(p->width), locked(false),
835487Snate@binkert.org      simulate_data_stalls(p->simulate_data_stalls),
845487Snate@binkert.org      simulate_inst_stalls(p->simulate_inst_stalls),
859095Sandreas.hansson@arm.com      icachePort(name() + ".icache_port", this),
869095Sandreas.hansson@arm.com      dcachePort(name() + ".dcache_port", this),
8713012Sandreas.sandberg@arm.com      dcache_access(false), dcache_latency(0),
8810537Sandreas.hansson@arm.com      ppCommit(nullptr)
892623SN/A{
902623SN/A    _status = Idle;
9112749Sgiacomo.travaglini@arm.com    ifetch_req = std::make_shared<Request>();
9212749Sgiacomo.travaglini@arm.com    data_read_req = std::make_shared<Request>();
9312749Sgiacomo.travaglini@arm.com    data_write_req = std::make_shared<Request>();
9413652Sqtt2@cornell.edu    data_amo_req = std::make_shared<Request>();
952623SN/A}
962623SN/A
972623SN/A
982623SN/AAtomicSimpleCPU::~AtomicSimpleCPU()
992623SN/A{
1006775SBrad.Beckmann@amd.com    if (tickEvent.scheduled()) {
1016775SBrad.Beckmann@amd.com        deschedule(tickEvent);
1026775SBrad.Beckmann@amd.com    }
1032623SN/A}
1042623SN/A
10510913Sandreas.sandberg@arm.comDrainState
10610913Sandreas.sandberg@arm.comAtomicSimpleCPU::drain()
1072623SN/A{
10812276Sanouk.vanlaer@arm.com    // Deschedule any power gating event (if any)
10912276Sanouk.vanlaer@arm.com    deschedulePowerGatingEvent();
11012276Sanouk.vanlaer@arm.com
1119448SAndreas.Sandberg@ARM.com    if (switchedOut())
11210913Sandreas.sandberg@arm.com        return DrainState::Drained;
1132623SN/A
1149443SAndreas.Sandberg@ARM.com    if (!isDrained()) {
11511147Smitch.hayenga@arm.com        DPRINTF(Drain, "Requesting drain.\n");
11610913Sandreas.sandberg@arm.com        return DrainState::Draining;
1179443SAndreas.Sandberg@ARM.com    } else {
1189443SAndreas.Sandberg@ARM.com        if (tickEvent.scheduled())
1199443SAndreas.Sandberg@ARM.com            deschedule(tickEvent);
1202915Sktlim@umich.edu
12111147Smitch.hayenga@arm.com        activeThreads.clear();
1229443SAndreas.Sandberg@ARM.com        DPRINTF(Drain, "Not executing microcode, no need to drain.\n");
12310913Sandreas.sandberg@arm.com        return DrainState::Drained;
1249443SAndreas.Sandberg@ARM.com    }
1259342SAndreas.Sandberg@arm.com}
1269342SAndreas.Sandberg@arm.com
1272915Sktlim@umich.eduvoid
12811148Smitch.hayenga@arm.comAtomicSimpleCPU::threadSnoop(PacketPtr pkt, ThreadID sender)
12911148Smitch.hayenga@arm.com{
13011148Smitch.hayenga@arm.com    DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(),
13111148Smitch.hayenga@arm.com            pkt->cmdString());
13211148Smitch.hayenga@arm.com
13311148Smitch.hayenga@arm.com    for (ThreadID tid = 0; tid < numThreads; tid++) {
13411148Smitch.hayenga@arm.com        if (tid != sender) {
13511321Ssteve.reinhardt@amd.com            if (getCpuAddrMonitor(tid)->doMonitor(pkt)) {
13611151Smitch.hayenga@arm.com                wakeup(tid);
13711148Smitch.hayenga@arm.com            }
13811148Smitch.hayenga@arm.com
13911148Smitch.hayenga@arm.com            TheISA::handleLockedSnoop(threadInfo[tid]->thread,
14011148Smitch.hayenga@arm.com                                      pkt, dcachePort.cacheBlockMask);
14111148Smitch.hayenga@arm.com        }
14211148Smitch.hayenga@arm.com    }
14311148Smitch.hayenga@arm.com}
14411148Smitch.hayenga@arm.com
14511148Smitch.hayenga@arm.comvoid
1469342SAndreas.Sandberg@arm.comAtomicSimpleCPU::drainResume()
1472915Sktlim@umich.edu{
1489448SAndreas.Sandberg@ARM.com    assert(!tickEvent.scheduled());
1499448SAndreas.Sandberg@ARM.com    if (switchedOut())
1505220Ssaidi@eecs.umich.edu        return;
1515220Ssaidi@eecs.umich.edu
1524940Snate@binkert.org    DPRINTF(SimpleCPU, "Resume\n");
1539523SAndreas.Sandberg@ARM.com    verifyMemoryMode();
1543324Shsul@eecs.umich.edu
1559448SAndreas.Sandberg@ARM.com    assert(!threadContexts.empty());
1569448SAndreas.Sandberg@ARM.com
15711147Smitch.hayenga@arm.com    _status = BaseSimpleCPU::Idle;
15811147Smitch.hayenga@arm.com
15911147Smitch.hayenga@arm.com    for (ThreadID tid = 0; tid < numThreads; tid++) {
16011147Smitch.hayenga@arm.com        if (threadInfo[tid]->thread->status() == ThreadContext::Active) {
16111147Smitch.hayenga@arm.com            threadInfo[tid]->notIdleFraction = 1;
16211147Smitch.hayenga@arm.com            activeThreads.push_back(tid);
16311147Smitch.hayenga@arm.com            _status = BaseSimpleCPU::Running;
16411147Smitch.hayenga@arm.com
16511147Smitch.hayenga@arm.com            // Tick if any threads active
16611147Smitch.hayenga@arm.com            if (!tickEvent.scheduled()) {
16711147Smitch.hayenga@arm.com                schedule(tickEvent, nextCycle());
16811147Smitch.hayenga@arm.com            }
16911147Smitch.hayenga@arm.com        } else {
17011147Smitch.hayenga@arm.com            threadInfo[tid]->notIdleFraction = 0;
17111147Smitch.hayenga@arm.com        }
1729448SAndreas.Sandberg@ARM.com    }
17312276Sanouk.vanlaer@arm.com
17412276Sanouk.vanlaer@arm.com    // Reschedule any power gating event (if any)
17512276Sanouk.vanlaer@arm.com    schedulePowerGatingEvent();
1762623SN/A}
1772623SN/A
1789443SAndreas.Sandberg@ARM.combool
1799443SAndreas.Sandberg@ARM.comAtomicSimpleCPU::tryCompleteDrain()
1809443SAndreas.Sandberg@ARM.com{
18110913Sandreas.sandberg@arm.com    if (drainState() != DrainState::Draining)
1829443SAndreas.Sandberg@ARM.com        return false;
1839443SAndreas.Sandberg@ARM.com
18411147Smitch.hayenga@arm.com    DPRINTF(Drain, "tryCompleteDrain.\n");
1859443SAndreas.Sandberg@ARM.com    if (!isDrained())
1869443SAndreas.Sandberg@ARM.com        return false;
1879443SAndreas.Sandberg@ARM.com
1889443SAndreas.Sandberg@ARM.com    DPRINTF(Drain, "CPU done draining, processing drain event\n");
18910913Sandreas.sandberg@arm.com    signalDrainDone();
1909443SAndreas.Sandberg@ARM.com
1919443SAndreas.Sandberg@ARM.com    return true;
1929443SAndreas.Sandberg@ARM.com}
1939443SAndreas.Sandberg@ARM.com
1949443SAndreas.Sandberg@ARM.com
1952623SN/Avoid
1962798Sktlim@umich.eduAtomicSimpleCPU::switchOut()
1972623SN/A{
1989429SAndreas.Sandberg@ARM.com    BaseSimpleCPU::switchOut();
1999429SAndreas.Sandberg@ARM.com
2009443SAndreas.Sandberg@ARM.com    assert(!tickEvent.scheduled());
2019342SAndreas.Sandberg@arm.com    assert(_status == BaseSimpleCPU::Running || _status == Idle);
2029443SAndreas.Sandberg@ARM.com    assert(isDrained());
2032623SN/A}
2042623SN/A
2052623SN/A
2062623SN/Avoid
2072623SN/AAtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
2082623SN/A{
2099429SAndreas.Sandberg@ARM.com    BaseSimpleCPU::takeOverFrom(oldCPU);
2102623SN/A
2119443SAndreas.Sandberg@ARM.com    // The tick event should have been descheduled by drain()
2122623SN/A    assert(!tickEvent.scheduled());
2132623SN/A}
2142623SN/A
2159523SAndreas.Sandberg@ARM.comvoid
2169523SAndreas.Sandberg@ARM.comAtomicSimpleCPU::verifyMemoryMode() const
2179523SAndreas.Sandberg@ARM.com{
2189524SAndreas.Sandberg@ARM.com    if (!system->isAtomicMode()) {
2199523SAndreas.Sandberg@ARM.com        fatal("The atomic CPU requires the memory system to be in "
2209523SAndreas.Sandberg@ARM.com              "'atomic' mode.\n");
2219523SAndreas.Sandberg@ARM.com    }
2229523SAndreas.Sandberg@ARM.com}
2232623SN/A
2242623SN/Avoid
22510407Smitch.hayenga@arm.comAtomicSimpleCPU::activateContext(ThreadID thread_num)
2262623SN/A{
22710407Smitch.hayenga@arm.com    DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num);
2284940Snate@binkert.org
22911147Smitch.hayenga@arm.com    assert(thread_num < numThreads);
2302623SN/A
23111147Smitch.hayenga@arm.com    threadInfo[thread_num]->notIdleFraction = 1;
23211147Smitch.hayenga@arm.com    Cycles delta = ticksToCycles(threadInfo[thread_num]->thread->lastActivate -
23311147Smitch.hayenga@arm.com                                 threadInfo[thread_num]->thread->lastSuspend);
23410464SAndreas.Sandberg@ARM.com    numCycles += delta;
2353686Sktlim@umich.edu
23611147Smitch.hayenga@arm.com    if (!tickEvent.scheduled()) {
23711147Smitch.hayenga@arm.com        //Make sure ticks are still on multiples of cycles
23811147Smitch.hayenga@arm.com        schedule(tickEvent, clockEdge(Cycles(0)));
23911147Smitch.hayenga@arm.com    }
2409342SAndreas.Sandberg@arm.com    _status = BaseSimpleCPU::Running;
24111147Smitch.hayenga@arm.com    if (std::find(activeThreads.begin(), activeThreads.end(), thread_num)
24211147Smitch.hayenga@arm.com        == activeThreads.end()) {
24311147Smitch.hayenga@arm.com        activeThreads.push_back(thread_num);
24411147Smitch.hayenga@arm.com    }
24511526Sdavid.guillen@arm.com
24611526Sdavid.guillen@arm.com    BaseCPU::activateContext(thread_num);
2472623SN/A}
2482623SN/A
2492623SN/A
2502623SN/Avoid
2518737Skoansin.tan@gmail.comAtomicSimpleCPU::suspendContext(ThreadID thread_num)
2522623SN/A{
2534940Snate@binkert.org    DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
2544940Snate@binkert.org
25511147Smitch.hayenga@arm.com    assert(thread_num < numThreads);
25611147Smitch.hayenga@arm.com    activeThreads.remove(thread_num);
2572623SN/A
2586043Sgblack@eecs.umich.edu    if (_status == Idle)
2596043Sgblack@eecs.umich.edu        return;
2606043Sgblack@eecs.umich.edu
2619342SAndreas.Sandberg@arm.com    assert(_status == BaseSimpleCPU::Running);
2622626SN/A
26311147Smitch.hayenga@arm.com    threadInfo[thread_num]->notIdleFraction = 0;
2642623SN/A
26511147Smitch.hayenga@arm.com    if (activeThreads.empty()) {
26611147Smitch.hayenga@arm.com        _status = Idle;
26711147Smitch.hayenga@arm.com
26811147Smitch.hayenga@arm.com        if (tickEvent.scheduled()) {
26911147Smitch.hayenga@arm.com            deschedule(tickEvent);
27011147Smitch.hayenga@arm.com        }
27111147Smitch.hayenga@arm.com    }
27211147Smitch.hayenga@arm.com
27311526Sdavid.guillen@arm.com    BaseCPU::suspendContext(thread_num);
2742623SN/A}
2752623SN/A
27613012Sandreas.sandberg@arm.comTick
27713012Sandreas.sandberg@arm.comAtomicSimpleCPU::sendPacket(MasterPort &port, const PacketPtr &pkt)
27813012Sandreas.sandberg@arm.com{
27913012Sandreas.sandberg@arm.com    return port.sendAtomic(pkt);
28013012Sandreas.sandberg@arm.com}
2812623SN/A
28210030SAli.Saidi@ARM.comTick
28310030SAli.Saidi@ARM.comAtomicSimpleCPU::AtomicCPUDPort::recvAtomicSnoop(PacketPtr pkt)
28410030SAli.Saidi@ARM.com{
28510030SAli.Saidi@ARM.com    DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(),
28610030SAli.Saidi@ARM.com            pkt->cmdString());
28710030SAli.Saidi@ARM.com
28810529Smorr@cs.wisc.edu    // X86 ISA: Snooping an invalidation for monitor/mwait
28910529Smorr@cs.wisc.edu    AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner);
29011148Smitch.hayenga@arm.com
29111148Smitch.hayenga@arm.com    for (ThreadID tid = 0; tid < cpu->numThreads; tid++) {
29211148Smitch.hayenga@arm.com        if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
29311151Smitch.hayenga@arm.com            cpu->wakeup(tid);
29411148Smitch.hayenga@arm.com        }
29510529Smorr@cs.wisc.edu    }
29610529Smorr@cs.wisc.edu
29710030SAli.Saidi@ARM.com    // if snoop invalidates, release any associated locks
29811356Skrinat01@arm.com    // When run without caches, Invalidation packets will not be received
29911356Skrinat01@arm.com    // hence we must check if the incoming packets are writes and wakeup
30011356Skrinat01@arm.com    // the processor accordingly
30111356Skrinat01@arm.com    if (pkt->isInvalidate() || pkt->isWrite()) {
30210030SAli.Saidi@ARM.com        DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n",
30310030SAli.Saidi@ARM.com                pkt->getAddr());
30411147Smitch.hayenga@arm.com        for (auto &t_info : cpu->threadInfo) {
30511147Smitch.hayenga@arm.com            TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask);
30611147Smitch.hayenga@arm.com        }
30710030SAli.Saidi@ARM.com    }
30810030SAli.Saidi@ARM.com
30910030SAli.Saidi@ARM.com    return 0;
31010030SAli.Saidi@ARM.com}
31110030SAli.Saidi@ARM.com
31210030SAli.Saidi@ARM.comvoid
31310030SAli.Saidi@ARM.comAtomicSimpleCPU::AtomicCPUDPort::recvFunctionalSnoop(PacketPtr pkt)
31410030SAli.Saidi@ARM.com{
31510030SAli.Saidi@ARM.com    DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(),
31610030SAli.Saidi@ARM.com            pkt->cmdString());
31710030SAli.Saidi@ARM.com
31810529Smorr@cs.wisc.edu    // X86 ISA: Snooping an invalidation for monitor/mwait
31910529Smorr@cs.wisc.edu    AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner);
32011148Smitch.hayenga@arm.com    for (ThreadID tid = 0; tid < cpu->numThreads; tid++) {
32111321Ssteve.reinhardt@amd.com        if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
32211151Smitch.hayenga@arm.com            cpu->wakeup(tid);
32311148Smitch.hayenga@arm.com        }
32410529Smorr@cs.wisc.edu    }
32510529Smorr@cs.wisc.edu
32610030SAli.Saidi@ARM.com    // if snoop invalidates, release any associated locks
32710030SAli.Saidi@ARM.com    if (pkt->isInvalidate()) {
32810030SAli.Saidi@ARM.com        DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n",
32910030SAli.Saidi@ARM.com                pkt->getAddr());
33011147Smitch.hayenga@arm.com        for (auto &t_info : cpu->threadInfo) {
33111147Smitch.hayenga@arm.com            TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask);
33211147Smitch.hayenga@arm.com        }
33310030SAli.Saidi@ARM.com    }
33410030SAli.Saidi@ARM.com}
33510030SAli.Saidi@ARM.com
3362623SN/AFault
33711608Snikos.nikoleris@arm.comAtomicSimpleCPU::readMem(Addr addr, uint8_t * data, unsigned size,
33811608Snikos.nikoleris@arm.com                         Request::Flags flags)
3392623SN/A{
34011147Smitch.hayenga@arm.com    SimpleExecContext& t_info = *threadInfo[curThread];
34111147Smitch.hayenga@arm.com    SimpleThread* thread = t_info.thread;
34211147Smitch.hayenga@arm.com
3433169Sstever@eecs.umich.edu    // use the CPU's statically allocated read request and packet objects
34412749Sgiacomo.travaglini@arm.com    const RequestPtr &req = data_read_req;
3452623SN/A
34610665SAli.Saidi@ARM.com    if (traceData)
34710665SAli.Saidi@ARM.com        traceData->setMem(addr, size, flags);
3482623SN/A
3494999Sgblack@eecs.umich.edu    //The size of the data we're trying to read.
3507520Sgblack@eecs.umich.edu    int fullSize = size;
3512623SN/A
3524999Sgblack@eecs.umich.edu    //The address of the second part of this access if it needs to be split
3534999Sgblack@eecs.umich.edu    //across a cache line boundary.
3549814Sandreas.hansson@arm.com    Addr secondAddr = roundDown(addr + size - 1, cacheLineSize());
3554999Sgblack@eecs.umich.edu
3567520Sgblack@eecs.umich.edu    if (secondAddr > addr)
3577520Sgblack@eecs.umich.edu        size = secondAddr - addr;
3584999Sgblack@eecs.umich.edu
3594999Sgblack@eecs.umich.edu    dcache_latency = 0;
3604999Sgblack@eecs.umich.edu
36110024Sdam.sunwoo@arm.com    req->taskId(taskId());
3627520Sgblack@eecs.umich.edu    while (1) {
3638832SAli.Saidi@ARM.com        req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr());
3644999Sgblack@eecs.umich.edu
3654999Sgblack@eecs.umich.edu        // translate to physical address
36611147Smitch.hayenga@arm.com        Fault fault = thread->dtb->translateAtomic(req, thread->getTC(),
36711147Smitch.hayenga@arm.com                                                          BaseTLB::Read);
3684999Sgblack@eecs.umich.edu
3694999Sgblack@eecs.umich.edu        // Now do the access.
3706623Sgblack@eecs.umich.edu        if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
37110739Ssteve.reinhardt@amd.com            Packet pkt(req, Packet::makeReadCmd(req));
3727520Sgblack@eecs.umich.edu            pkt.dataStatic(data);
3734999Sgblack@eecs.umich.edu
37413012Sandreas.sandberg@arm.com            if (req->isMmappedIpr()) {
3754999Sgblack@eecs.umich.edu                dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt);
37613012Sandreas.sandberg@arm.com            } else {
37713012Sandreas.sandberg@arm.com                dcache_latency += sendPacket(dcachePort, &pkt);
3784999Sgblack@eecs.umich.edu            }
3794999Sgblack@eecs.umich.edu            dcache_access = true;
3805012Sgblack@eecs.umich.edu
3814999Sgblack@eecs.umich.edu            assert(!pkt.isError());
3824999Sgblack@eecs.umich.edu
3836102Sgblack@eecs.umich.edu            if (req->isLLSC()) {
3844999Sgblack@eecs.umich.edu                TheISA::handleLockedRead(thread, req);
3854999Sgblack@eecs.umich.edu            }
3864968Sacolyte@umich.edu        }
3874986Ssaidi@eecs.umich.edu
3884999Sgblack@eecs.umich.edu        //If there's a fault, return it
3896739Sgblack@eecs.umich.edu        if (fault != NoFault) {
3906739Sgblack@eecs.umich.edu            if (req->isPrefetch()) {
3916739Sgblack@eecs.umich.edu                return NoFault;
3926739Sgblack@eecs.umich.edu            } else {
3936739Sgblack@eecs.umich.edu                return fault;
3946739Sgblack@eecs.umich.edu            }
3956739Sgblack@eecs.umich.edu        }
3966739Sgblack@eecs.umich.edu
3974999Sgblack@eecs.umich.edu        //If we don't need to access a second cache line, stop now.
3984999Sgblack@eecs.umich.edu        if (secondAddr <= addr)
3994999Sgblack@eecs.umich.edu        {
40010760Ssteve.reinhardt@amd.com            if (req->isLockedRMW() && fault == NoFault) {
4016078Sgblack@eecs.umich.edu                assert(!locked);
4026078Sgblack@eecs.umich.edu                locked = true;
4036078Sgblack@eecs.umich.edu            }
40411147Smitch.hayenga@arm.com
4054999Sgblack@eecs.umich.edu            return fault;
4064968Sacolyte@umich.edu        }
4073170Sstever@eecs.umich.edu
4084999Sgblack@eecs.umich.edu        /*
4094999Sgblack@eecs.umich.edu         * Set up for accessing the second cache line.
4104999Sgblack@eecs.umich.edu         */
4114999Sgblack@eecs.umich.edu
4124999Sgblack@eecs.umich.edu        //Move the pointer we're reading into to the correct location.
4137520Sgblack@eecs.umich.edu        data += size;
4144999Sgblack@eecs.umich.edu        //Adjust the size to get the remaining bytes.
4157520Sgblack@eecs.umich.edu        size = addr + fullSize - secondAddr;
4164999Sgblack@eecs.umich.edu        //And access the right address.
4174999Sgblack@eecs.umich.edu        addr = secondAddr;
4182623SN/A    }
4192623SN/A}
4202623SN/A
42111303Ssteve.reinhardt@amd.comFault
42211608Snikos.nikoleris@arm.comAtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, Addr addr,
42311608Snikos.nikoleris@arm.com                          Request::Flags flags, uint64_t *res)
4242623SN/A{
42511147Smitch.hayenga@arm.com    SimpleExecContext& t_info = *threadInfo[curThread];
42611147Smitch.hayenga@arm.com    SimpleThread* thread = t_info.thread;
42710031SAli.Saidi@ARM.com    static uint8_t zero_array[64] = {};
42810031SAli.Saidi@ARM.com
42910031SAli.Saidi@ARM.com    if (data == NULL) {
43010031SAli.Saidi@ARM.com        assert(size <= 64);
43112355Snikos.nikoleris@arm.com        assert(flags & Request::STORE_NO_DATA);
43210031SAli.Saidi@ARM.com        // This must be a cache block cleaning request
43310031SAli.Saidi@ARM.com        data = zero_array;
43410031SAli.Saidi@ARM.com    }
43510031SAli.Saidi@ARM.com
4363169Sstever@eecs.umich.edu    // use the CPU's statically allocated write request and packet objects
43712749Sgiacomo.travaglini@arm.com    const RequestPtr &req = data_write_req;
4382623SN/A
43910665SAli.Saidi@ARM.com    if (traceData)
44010665SAli.Saidi@ARM.com        traceData->setMem(addr, size, flags);
4412623SN/A
4424999Sgblack@eecs.umich.edu    //The size of the data we're trying to read.
4437520Sgblack@eecs.umich.edu    int fullSize = size;
4442623SN/A
4454999Sgblack@eecs.umich.edu    //The address of the second part of this access if it needs to be split
4464999Sgblack@eecs.umich.edu    //across a cache line boundary.
4479814Sandreas.hansson@arm.com    Addr secondAddr = roundDown(addr + size - 1, cacheLineSize());
4484999Sgblack@eecs.umich.edu
44911321Ssteve.reinhardt@amd.com    if (secondAddr > addr)
4507520Sgblack@eecs.umich.edu        size = secondAddr - addr;
4514999Sgblack@eecs.umich.edu
4524999Sgblack@eecs.umich.edu    dcache_latency = 0;
4534999Sgblack@eecs.umich.edu
45410024Sdam.sunwoo@arm.com    req->taskId(taskId());
45511321Ssteve.reinhardt@amd.com    while (1) {
4568832SAli.Saidi@ARM.com        req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr());
4574999Sgblack@eecs.umich.edu
4584999Sgblack@eecs.umich.edu        // translate to physical address
45911147Smitch.hayenga@arm.com        Fault fault = thread->dtb->translateAtomic(req, thread->getTC(), BaseTLB::Write);
4604999Sgblack@eecs.umich.edu
4614999Sgblack@eecs.umich.edu        // Now do the access.
4624999Sgblack@eecs.umich.edu        if (fault == NoFault) {
4634999Sgblack@eecs.umich.edu            bool do_access = true;  // flag to suppress cache access
4644999Sgblack@eecs.umich.edu
4656102Sgblack@eecs.umich.edu            if (req->isLLSC()) {
46610030SAli.Saidi@ARM.com                do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask);
4674999Sgblack@eecs.umich.edu            } else if (req->isSwap()) {
4684999Sgblack@eecs.umich.edu                if (req->isCondSwap()) {
4694999Sgblack@eecs.umich.edu                    assert(res);
4704999Sgblack@eecs.umich.edu                    req->setExtraData(*res);
4714999Sgblack@eecs.umich.edu                }
4724999Sgblack@eecs.umich.edu            }
4734999Sgblack@eecs.umich.edu
4746623Sgblack@eecs.umich.edu            if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) {
47512355Snikos.nikoleris@arm.com                Packet pkt(req, Packet::makeWriteCmd(req));
4767520Sgblack@eecs.umich.edu                pkt.dataStatic(data);
4774999Sgblack@eecs.umich.edu
4788105Sgblack@eecs.umich.edu                if (req->isMmappedIpr()) {
4794999Sgblack@eecs.umich.edu                    dcache_latency +=
4804999Sgblack@eecs.umich.edu                        TheISA::handleIprWrite(thread->getTC(), &pkt);
4814999Sgblack@eecs.umich.edu                } else {
48213012Sandreas.sandberg@arm.com                    dcache_latency += sendPacket(dcachePort, &pkt);
48311148Smitch.hayenga@arm.com
48411148Smitch.hayenga@arm.com                    // Notify other threads on this CPU of write
48511148Smitch.hayenga@arm.com                    threadSnoop(&pkt, curThread);
4864999Sgblack@eecs.umich.edu                }
4874999Sgblack@eecs.umich.edu                dcache_access = true;
4884999Sgblack@eecs.umich.edu                assert(!pkt.isError());
4894999Sgblack@eecs.umich.edu
4904999Sgblack@eecs.umich.edu                if (req->isSwap()) {
4914999Sgblack@eecs.umich.edu                    assert(res);
49210563Sandreas.hansson@arm.com                    memcpy(res, pkt.getConstPtr<uint8_t>(), fullSize);
4934999Sgblack@eecs.umich.edu                }
4944999Sgblack@eecs.umich.edu            }
4954999Sgblack@eecs.umich.edu
4964999Sgblack@eecs.umich.edu            if (res && !req->isSwap()) {
4974999Sgblack@eecs.umich.edu                *res = req->getExtraData();
4984878Sstever@eecs.umich.edu            }
4994040Ssaidi@eecs.umich.edu        }
5004040Ssaidi@eecs.umich.edu
5014999Sgblack@eecs.umich.edu        //If there's a fault or we don't need to access a second cache line,
5024999Sgblack@eecs.umich.edu        //stop now.
5034999Sgblack@eecs.umich.edu        if (fault != NoFault || secondAddr <= addr)
5044999Sgblack@eecs.umich.edu        {
50510760Ssteve.reinhardt@amd.com            if (req->isLockedRMW() && fault == NoFault) {
5066078Sgblack@eecs.umich.edu                assert(locked);
5076078Sgblack@eecs.umich.edu                locked = false;
5086078Sgblack@eecs.umich.edu            }
50911147Smitch.hayenga@arm.com
51011147Smitch.hayenga@arm.com
5116739Sgblack@eecs.umich.edu            if (fault != NoFault && req->isPrefetch()) {
5126739Sgblack@eecs.umich.edu                return NoFault;
5136739Sgblack@eecs.umich.edu            } else {
5146739Sgblack@eecs.umich.edu                return fault;
5156739Sgblack@eecs.umich.edu            }
5163170Sstever@eecs.umich.edu        }
5173170Sstever@eecs.umich.edu
5184999Sgblack@eecs.umich.edu        /*
5194999Sgblack@eecs.umich.edu         * Set up for accessing the second cache line.
5204999Sgblack@eecs.umich.edu         */
5214999Sgblack@eecs.umich.edu
5224999Sgblack@eecs.umich.edu        //Move the pointer we're reading into to the correct location.
5237520Sgblack@eecs.umich.edu        data += size;
5244999Sgblack@eecs.umich.edu        //Adjust the size to get the remaining bytes.
5257520Sgblack@eecs.umich.edu        size = addr + fullSize - secondAddr;
5264999Sgblack@eecs.umich.edu        //And access the right address.
5274999Sgblack@eecs.umich.edu        addr = secondAddr;
5282623SN/A    }
5292623SN/A}
5302623SN/A
53113652Sqtt2@cornell.eduFault
53213652Sqtt2@cornell.eduAtomicSimpleCPU::amoMem(Addr addr, uint8_t* data, unsigned size,
53313652Sqtt2@cornell.edu                        Request::Flags flags, AtomicOpFunctor *amo_op)
53413652Sqtt2@cornell.edu{
53513652Sqtt2@cornell.edu    SimpleExecContext& t_info = *threadInfo[curThread];
53613652Sqtt2@cornell.edu    SimpleThread* thread = t_info.thread;
53713652Sqtt2@cornell.edu
53813652Sqtt2@cornell.edu    // use the CPU's statically allocated amo request and packet objects
53913652Sqtt2@cornell.edu    const RequestPtr &req = data_amo_req;
54013652Sqtt2@cornell.edu
54113652Sqtt2@cornell.edu    if (traceData)
54213652Sqtt2@cornell.edu        traceData->setMem(addr, size, flags);
54313652Sqtt2@cornell.edu
54413652Sqtt2@cornell.edu    //The address of the second part of this access if it needs to be split
54513652Sqtt2@cornell.edu    //across a cache line boundary.
54613652Sqtt2@cornell.edu    Addr secondAddr = roundDown(addr + size - 1, cacheLineSize());
54713652Sqtt2@cornell.edu
54813652Sqtt2@cornell.edu    // AMO requests that access across a cache line boundary are not
54913652Sqtt2@cornell.edu    // allowed since the cache does not guarantee AMO ops to be executed
55013652Sqtt2@cornell.edu    // atomically in two cache lines
55113652Sqtt2@cornell.edu    // For ISAs such as x86 that requires AMO operations to work on
55213652Sqtt2@cornell.edu    // accesses that cross cache-line boundaries, the cache needs to be
55313652Sqtt2@cornell.edu    // modified to support locking both cache lines to guarantee the
55413652Sqtt2@cornell.edu    // atomicity.
55513652Sqtt2@cornell.edu    if (secondAddr > addr) {
55613652Sqtt2@cornell.edu        panic("AMO request should not access across a cache line boundary\n");
55713652Sqtt2@cornell.edu    }
55813652Sqtt2@cornell.edu
55913652Sqtt2@cornell.edu    dcache_latency = 0;
56013652Sqtt2@cornell.edu
56113652Sqtt2@cornell.edu    req->taskId(taskId());
56213652Sqtt2@cornell.edu    req->setVirt(0, addr, size, flags, dataMasterId(),
56313652Sqtt2@cornell.edu                 thread->pcState().instAddr(), amo_op);
56413652Sqtt2@cornell.edu
56513652Sqtt2@cornell.edu    // translate to physical address
56613652Sqtt2@cornell.edu    Fault fault = thread->dtb->translateAtomic(req, thread->getTC(),
56713652Sqtt2@cornell.edu                                                      BaseTLB::Write);
56813652Sqtt2@cornell.edu
56913652Sqtt2@cornell.edu    // Now do the access.
57013652Sqtt2@cornell.edu    if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
57113652Sqtt2@cornell.edu        // We treat AMO accesses as Write accesses with SwapReq command
57213652Sqtt2@cornell.edu        // data will hold the return data of the AMO access
57313652Sqtt2@cornell.edu        Packet pkt(req, Packet::makeWriteCmd(req));
57413652Sqtt2@cornell.edu        pkt.dataStatic(data);
57513652Sqtt2@cornell.edu
57613652Sqtt2@cornell.edu        if (req->isMmappedIpr())
57713652Sqtt2@cornell.edu            dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt);
57813652Sqtt2@cornell.edu        else {
57913652Sqtt2@cornell.edu            dcache_latency += sendPacket(dcachePort, &pkt);
58013652Sqtt2@cornell.edu        }
58113652Sqtt2@cornell.edu
58213652Sqtt2@cornell.edu        dcache_access = true;
58313652Sqtt2@cornell.edu
58413652Sqtt2@cornell.edu        assert(!pkt.isError());
58513652Sqtt2@cornell.edu        assert(!req->isLLSC());
58613652Sqtt2@cornell.edu    }
58713652Sqtt2@cornell.edu
58813652Sqtt2@cornell.edu    if (fault != NoFault && req->isPrefetch()) {
58913652Sqtt2@cornell.edu        return NoFault;
59013652Sqtt2@cornell.edu    }
59113652Sqtt2@cornell.edu
59213652Sqtt2@cornell.edu    //If there's a fault and we're not doing prefetch, return it
59313652Sqtt2@cornell.edu    return fault;
59413652Sqtt2@cornell.edu}
5952623SN/A
5962623SN/Avoid
5972623SN/AAtomicSimpleCPU::tick()
5982623SN/A{
5994940Snate@binkert.org    DPRINTF(SimpleCPU, "Tick\n");
6004940Snate@binkert.org
60111147Smitch.hayenga@arm.com    // Change thread if multi-threaded
60211147Smitch.hayenga@arm.com    swapActiveThread();
60311147Smitch.hayenga@arm.com
60411147Smitch.hayenga@arm.com    // Set memroy request ids to current thread
60511147Smitch.hayenga@arm.com    if (numThreads > 1) {
60611148Smitch.hayenga@arm.com        ContextID cid = threadContexts[curThread]->contextId();
60711148Smitch.hayenga@arm.com
60812749Sgiacomo.travaglini@arm.com        ifetch_req->setContext(cid);
60912749Sgiacomo.travaglini@arm.com        data_read_req->setContext(cid);
61012749Sgiacomo.travaglini@arm.com        data_write_req->setContext(cid);
61113652Sqtt2@cornell.edu        data_amo_req->setContext(cid);
61211147Smitch.hayenga@arm.com    }
61311147Smitch.hayenga@arm.com
61411147Smitch.hayenga@arm.com    SimpleExecContext& t_info = *threadInfo[curThread];
61511147Smitch.hayenga@arm.com    SimpleThread* thread = t_info.thread;
61611147Smitch.hayenga@arm.com
6175487Snate@binkert.org    Tick latency = 0;
6182623SN/A
6196078Sgblack@eecs.umich.edu    for (int i = 0; i < width || locked; ++i) {
6202623SN/A        numCycles++;
62112284Sjose.marinho@arm.com        updateCycleCounters(BaseCPU::CPU_STATE_ON);
6222623SN/A
62310596Sgabeblack@google.com        if (!curStaticInst || !curStaticInst->isDelayedCommit()) {
6243387Sgblack@eecs.umich.edu            checkForInterrupts();
62510596Sgabeblack@google.com            checkPcEventQueue();
62610596Sgabeblack@google.com        }
6272626SN/A
6288143SAli.Saidi@ARM.com        // We must have just got suspended by a PC event
6299443SAndreas.Sandberg@ARM.com        if (_status == Idle) {
6309443SAndreas.Sandberg@ARM.com            tryCompleteDrain();
6318143SAli.Saidi@ARM.com            return;
6329443SAndreas.Sandberg@ARM.com        }
6335348Ssaidi@eecs.umich.edu
6345669Sgblack@eecs.umich.edu        Fault fault = NoFault;
6355669Sgblack@eecs.umich.edu
6367720Sgblack@eecs.umich.edu        TheISA::PCState pcState = thread->pcState();
6377720Sgblack@eecs.umich.edu
6387720Sgblack@eecs.umich.edu        bool needToFetch = !isRomMicroPC(pcState.microPC()) &&
6397720Sgblack@eecs.umich.edu                           !curMacroStaticInst;
6407720Sgblack@eecs.umich.edu        if (needToFetch) {
64112749Sgiacomo.travaglini@arm.com            ifetch_req->taskId(taskId());
64212749Sgiacomo.travaglini@arm.com            setupFetchRequest(ifetch_req);
64312749Sgiacomo.travaglini@arm.com            fault = thread->itb->translateAtomic(ifetch_req, thread->getTC(),
6446023Snate@binkert.org                                                 BaseTLB::Execute);
6455894Sgblack@eecs.umich.edu        }
6462623SN/A
6472623SN/A        if (fault == NoFault) {
6484182Sgblack@eecs.umich.edu            Tick icache_latency = 0;
6494182Sgblack@eecs.umich.edu            bool icache_access = false;
6504182Sgblack@eecs.umich.edu            dcache_access = false; // assume no dcache access
6512662Sstever@eecs.umich.edu
6527720Sgblack@eecs.umich.edu            if (needToFetch) {
6539023Sgblack@eecs.umich.edu                // This is commented out because the decoder would act like
6545694Sgblack@eecs.umich.edu                // a tiny cache otherwise. It wouldn't be flushed when needed
6555694Sgblack@eecs.umich.edu                // like the I cache. It should be flushed, and when that works
6565694Sgblack@eecs.umich.edu                // this code should be uncommented.
6575669Sgblack@eecs.umich.edu                //Fetch more instruction memory if necessary
65811321Ssteve.reinhardt@amd.com                //if (decoder.needMoreBytes())
6595669Sgblack@eecs.umich.edu                //{
6605669Sgblack@eecs.umich.edu                    icache_access = true;
66112749Sgiacomo.travaglini@arm.com                    Packet ifetch_pkt = Packet(ifetch_req, MemCmd::ReadReq);
6625669Sgblack@eecs.umich.edu                    ifetch_pkt.dataStatic(&inst);
6632623SN/A
66413012Sandreas.sandberg@arm.com                    icache_latency = sendPacket(icachePort, &ifetch_pkt);
6654968Sacolyte@umich.edu
6665669Sgblack@eecs.umich.edu                    assert(!ifetch_pkt.isError());
6674968Sacolyte@umich.edu
6685669Sgblack@eecs.umich.edu                    // ifetch_req is initialized to read the instruction directly
6695669Sgblack@eecs.umich.edu                    // into the CPU object's inst field.
6705669Sgblack@eecs.umich.edu                //}
6715669Sgblack@eecs.umich.edu            }
6724182Sgblack@eecs.umich.edu
6732623SN/A            preExecute();
6743814Ssaidi@eecs.umich.edu
67511877Sbrandon.potter@amd.com            Tick stall_ticks = 0;
6765001Sgblack@eecs.umich.edu            if (curStaticInst) {
67711147Smitch.hayenga@arm.com                fault = curStaticInst->execute(&t_info, traceData);
6784998Sgblack@eecs.umich.edu
6794998Sgblack@eecs.umich.edu                // keep an instruction count
68010381Sdam.sunwoo@arm.com                if (fault == NoFault) {
6814998Sgblack@eecs.umich.edu                    countInst();
68210651Snikos.nikoleris@gmail.com                    ppCommit->notify(std::make_pair(thread, curStaticInst));
68310381Sdam.sunwoo@arm.com                }
6847655Sali.saidi@arm.com                else if (traceData && !DTRACE(ExecFaulting)) {
6855001Sgblack@eecs.umich.edu                    delete traceData;
6865001Sgblack@eecs.umich.edu                    traceData = NULL;
6875001Sgblack@eecs.umich.edu                }
6884998Sgblack@eecs.umich.edu
68912710Sgiacomo.travaglini@arm.com                if (fault != NoFault &&
69012710Sgiacomo.travaglini@arm.com                    dynamic_pointer_cast<SyscallRetryFault>(fault)) {
69111877Sbrandon.potter@amd.com                    // Retry execution of system calls after a delay.
69211877Sbrandon.potter@amd.com                    // Prevents immediate re-execution since conditions which
69311877Sbrandon.potter@amd.com                    // caused the retry are unlikely to change every tick.
69411877Sbrandon.potter@amd.com                    stall_ticks += clockEdge(syscallRetryLatency) - curTick();
69511877Sbrandon.potter@amd.com                }
69611877Sbrandon.potter@amd.com
6974182Sgblack@eecs.umich.edu                postExecute();
6984182Sgblack@eecs.umich.edu            }
6992623SN/A
7003814Ssaidi@eecs.umich.edu            // @todo remove me after debugging with legion done
7014539Sgblack@eecs.umich.edu            if (curStaticInst && (!curStaticInst->isMicroop() ||
7024539Sgblack@eecs.umich.edu                        curStaticInst->isFirstMicroop()))
7033814Ssaidi@eecs.umich.edu                instCnt++;
7043814Ssaidi@eecs.umich.edu
7055487Snate@binkert.org            if (simulate_inst_stalls && icache_access)
7065487Snate@binkert.org                stall_ticks += icache_latency;
7075487Snate@binkert.org
7085487Snate@binkert.org            if (simulate_data_stalls && dcache_access)
7095487Snate@binkert.org                stall_ticks += dcache_latency;
7105487Snate@binkert.org
7115487Snate@binkert.org            if (stall_ticks) {
7129180Sandreas.hansson@arm.com                // the atomic cpu does its accounting in ticks, so
7139180Sandreas.hansson@arm.com                // keep counting in ticks but round to the clock
7149180Sandreas.hansson@arm.com                // period
7159180Sandreas.hansson@arm.com                latency += divCeil(stall_ticks, clockPeriod()) *
7169180Sandreas.hansson@arm.com                    clockPeriod();
7172623SN/A            }
7182623SN/A
7192623SN/A        }
72011321Ssteve.reinhardt@amd.com        if (fault != NoFault || !t_info.stayAtPC)
7214182Sgblack@eecs.umich.edu            advancePC(fault);
7222623SN/A    }
7232623SN/A
7249443SAndreas.Sandberg@ARM.com    if (tryCompleteDrain())
7259443SAndreas.Sandberg@ARM.com        return;
7269443SAndreas.Sandberg@ARM.com
7275487Snate@binkert.org    // instruction takes at least one cycle
7289179Sandreas.hansson@arm.com    if (latency < clockPeriod())
7299179Sandreas.hansson@arm.com        latency = clockPeriod();
7305487Snate@binkert.org
7312626SN/A    if (_status != Idle)
73211147Smitch.hayenga@arm.com        reschedule(tickEvent, curTick() + latency, true);
7332623SN/A}
7342623SN/A
73510381Sdam.sunwoo@arm.comvoid
73610381Sdam.sunwoo@arm.comAtomicSimpleCPU::regProbePoints()
73710381Sdam.sunwoo@arm.com{
73810464SAndreas.Sandberg@ARM.com    BaseCPU::regProbePoints();
73910464SAndreas.Sandberg@ARM.com
74010381Sdam.sunwoo@arm.com    ppCommit = new ProbePointArg<pair<SimpleThread*, const StaticInstPtr>>
74110381Sdam.sunwoo@arm.com                                (getProbeManager(), "Commit");
74210381Sdam.sunwoo@arm.com}
7432623SN/A
7445315Sstever@gmail.comvoid
7455315Sstever@gmail.comAtomicSimpleCPU::printAddr(Addr a)
7465315Sstever@gmail.com{
7475315Sstever@gmail.com    dcachePort.printAddr(a);
7485315Sstever@gmail.com}
7495315Sstever@gmail.com
7502623SN/A////////////////////////////////////////////////////////////////////////
7512623SN/A//
7522623SN/A//  AtomicSimpleCPU Simulation Object
7532623SN/A//
7544762Snate@binkert.orgAtomicSimpleCPU *
7554762Snate@binkert.orgAtomicSimpleCPUParams::create()
7562623SN/A{
7575529Snate@binkert.org    return new AtomicSimpleCPU(this);
7582623SN/A}
759