atomic.cc revision 10031:79d034cd6ba3
111317Sm.alian1369@gmail.com/*
211317Sm.alian1369@gmail.com * Copyright (c) 2012-2013 ARM Limited
311317Sm.alian1369@gmail.com * All rights reserved.
411317Sm.alian1369@gmail.com *
511317Sm.alian1369@gmail.com * The license below extends only to copyright in the software and shall
611317Sm.alian1369@gmail.com * not be construed as granting a license to any other intellectual
711317Sm.alian1369@gmail.com * property including but not limited to intellectual property relating
811317Sm.alian1369@gmail.com * to a hardware implementation of the functionality of the software
911317Sm.alian1369@gmail.com * licensed hereunder.  You may use the software subject to the license
1011317Sm.alian1369@gmail.com * terms below provided that you ensure that this notice is replicated
1111317Sm.alian1369@gmail.com * unmodified and in its entirety in all distributions of the software,
1211317Sm.alian1369@gmail.com * modified or unmodified, in source code or in binary form.
1311317Sm.alian1369@gmail.com *
1411317Sm.alian1369@gmail.com * Copyright (c) 2002-2005 The Regents of The University of Michigan
1511317Sm.alian1369@gmail.com * All rights reserved.
1611317Sm.alian1369@gmail.com *
1711317Sm.alian1369@gmail.com * Redistribution and use in source and binary forms, with or without
1811317Sm.alian1369@gmail.com * modification, are permitted provided that the following conditions are
1911317Sm.alian1369@gmail.com * met: redistributions of source code must retain the above copyright
2011317Sm.alian1369@gmail.com * notice, this list of conditions and the following disclaimer;
2111317Sm.alian1369@gmail.com * redistributions in binary form must reproduce the above copyright
2211317Sm.alian1369@gmail.com * notice, this list of conditions and the following disclaimer in the
2311317Sm.alian1369@gmail.com * documentation and/or other materials provided with the distribution;
2411317Sm.alian1369@gmail.com * neither the name of the copyright holders nor the names of its
2511317Sm.alian1369@gmail.com * contributors may be used to endorse or promote products derived from
2611317Sm.alian1369@gmail.com * this software without specific prior written permission.
2711317Sm.alian1369@gmail.com *
2811317Sm.alian1369@gmail.com * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
2911317Sm.alian1369@gmail.com * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
3011317Sm.alian1369@gmail.com * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
3111317Sm.alian1369@gmail.com * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
3211317Sm.alian1369@gmail.com * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
3311317Sm.alian1369@gmail.com * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
3411317Sm.alian1369@gmail.com * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
3511317Sm.alian1369@gmail.com * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
3611317Sm.alian1369@gmail.com * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
3711317Sm.alian1369@gmail.com * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
3811317Sm.alian1369@gmail.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3911800Sbrandon.potter@amd.com *
4011317Sm.alian1369@gmail.com * Authors: Steve Reinhardt
4111800Sbrandon.potter@amd.com */
4211317Sm.alian1369@gmail.com
4311317Sm.alian1369@gmail.com#include "arch/locked_mem.hh"
4411317Sm.alian1369@gmail.com#include "arch/mmapped_ipr.hh"
4511317Sm.alian1369@gmail.com#include "arch/utility.hh"
4613766Sgabeblack@google.com#include "base/bigint.hh"
4711317Sm.alian1369@gmail.com#include "base/output.hh"
4811317Sm.alian1369@gmail.com#include "config/the_isa.hh"
4911317Sm.alian1369@gmail.com#include "cpu/simple/atomic.hh"
5011317Sm.alian1369@gmail.com#include "cpu/exetrace.hh"
5111317Sm.alian1369@gmail.com#include "debug/Drain.hh"
5211533Sm.alian1369@gmail.com#include "debug/ExecFaulting.hh"
5311317Sm.alian1369@gmail.com#include "debug/SimpleCPU.hh"
5411317Sm.alian1369@gmail.com#include "mem/packet.hh"
5511317Sm.alian1369@gmail.com#include "mem/packet_access.hh"
5611317Sm.alian1369@gmail.com#include "mem/physical.hh"
5711317Sm.alian1369@gmail.com#include "params/AtomicSimpleCPU.hh"
5811317Sm.alian1369@gmail.com#include "sim/faults.hh"
5911317Sm.alian1369@gmail.com#include "sim/system.hh"
6011317Sm.alian1369@gmail.com#include "sim/full_system.hh"
6111317Sm.alian1369@gmail.com
6211317Sm.alian1369@gmail.comusing namespace std;
6311317Sm.alian1369@gmail.comusing namespace TheISA;
6411317Sm.alian1369@gmail.com
6511317Sm.alian1369@gmail.comAtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c)
6611317Sm.alian1369@gmail.com    : Event(CPU_Tick_Pri), cpu(c)
6711317Sm.alian1369@gmail.com{
6811317Sm.alian1369@gmail.com}
6911317Sm.alian1369@gmail.com
7011317Sm.alian1369@gmail.com
7111317Sm.alian1369@gmail.comvoid
7211317Sm.alian1369@gmail.comAtomicSimpleCPU::TickEvent::process()
7311317Sm.alian1369@gmail.com{
7411317Sm.alian1369@gmail.com    cpu->tick();
7511317Sm.alian1369@gmail.com}
7611317Sm.alian1369@gmail.com
7711533Sm.alian1369@gmail.comconst char *
7811533Sm.alian1369@gmail.comAtomicSimpleCPU::TickEvent::description() const
7911533Sm.alian1369@gmail.com{
8011533Sm.alian1369@gmail.com    return "AtomicSimpleCPU tick";
8111533Sm.alian1369@gmail.com}
8211533Sm.alian1369@gmail.com
8311533Sm.alian1369@gmail.comvoid
8411533Sm.alian1369@gmail.comAtomicSimpleCPU::init()
8511533Sm.alian1369@gmail.com{
8611533Sm.alian1369@gmail.com    BaseCPU::init();
8711533Sm.alian1369@gmail.com
8811533Sm.alian1369@gmail.com    // Initialise the ThreadContext's memory proxies
8911533Sm.alian1369@gmail.com    tcBase()->initMemProxies(tcBase());
9011533Sm.alian1369@gmail.com
9111533Sm.alian1369@gmail.com    if (FullSystem && !params()->switched_out) {
9211533Sm.alian1369@gmail.com        ThreadID size = threadContexts.size();
9311533Sm.alian1369@gmail.com        for (ThreadID i = 0; i < size; ++i) {
9411533Sm.alian1369@gmail.com            ThreadContext *tc = threadContexts[i];
9511533Sm.alian1369@gmail.com            // initialize CPU, including PC
9611533Sm.alian1369@gmail.com            TheISA::initCPU(tc, tc->contextId());
9711533Sm.alian1369@gmail.com        }
9811533Sm.alian1369@gmail.com    }
9911533Sm.alian1369@gmail.com
10011533Sm.alian1369@gmail.com    // Atomic doesn't do MT right now, so contextId == threadId
10111533Sm.alian1369@gmail.com    ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT
10211533Sm.alian1369@gmail.com    data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too
10311533Sm.alian1369@gmail.com    data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too
10411533Sm.alian1369@gmail.com}
10511533Sm.alian1369@gmail.com
10611533Sm.alian1369@gmail.comAtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p)
10711533Sm.alian1369@gmail.com    : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false),
10811533Sm.alian1369@gmail.com      simulate_data_stalls(p->simulate_data_stalls),
10911533Sm.alian1369@gmail.com      simulate_inst_stalls(p->simulate_inst_stalls),
11011533Sm.alian1369@gmail.com      drain_manager(NULL),
11111533Sm.alian1369@gmail.com      icachePort(name() + ".icache_port", this),
11211533Sm.alian1369@gmail.com      dcachePort(name() + ".dcache_port", this),
11311533Sm.alian1369@gmail.com      fastmem(p->fastmem),
11411533Sm.alian1369@gmail.com      simpoint(p->simpoint_profile),
11511533Sm.alian1369@gmail.com      intervalSize(p->simpoint_interval),
11611533Sm.alian1369@gmail.com      intervalCount(0),
11711533Sm.alian1369@gmail.com      intervalDrift(0),
11811533Sm.alian1369@gmail.com      simpointStream(NULL),
11911533Sm.alian1369@gmail.com      currentBBV(0, 0),
12011533Sm.alian1369@gmail.com      currentBBVInstCount(0)
12111533Sm.alian1369@gmail.com{
12211533Sm.alian1369@gmail.com    _status = Idle;
12311533Sm.alian1369@gmail.com
12411533Sm.alian1369@gmail.com    if (simpoint) {
12511533Sm.alian1369@gmail.com        simpointStream = simout.create(p->simpoint_profile_file, false);
12611533Sm.alian1369@gmail.com    }
12711533Sm.alian1369@gmail.com}
12811317Sm.alian1369@gmail.com
12911317Sm.alian1369@gmail.com
13011317Sm.alian1369@gmail.comAtomicSimpleCPU::~AtomicSimpleCPU()
13111533Sm.alian1369@gmail.com{
13211317Sm.alian1369@gmail.com    if (tickEvent.scheduled()) {
13311533Sm.alian1369@gmail.com        deschedule(tickEvent);
13412087Sspwilson2@wisc.edu    }
13512087Sspwilson2@wisc.edu    if (simpointStream) {
13611317Sm.alian1369@gmail.com        simout.close(simpointStream);
13711317Sm.alian1369@gmail.com    }
13811317Sm.alian1369@gmail.com}
13911317Sm.alian1369@gmail.com
14011317Sm.alian1369@gmail.comunsigned int
14111317Sm.alian1369@gmail.comAtomicSimpleCPU::drain(DrainManager *dm)
14211317Sm.alian1369@gmail.com{
14311317Sm.alian1369@gmail.com    assert(!drain_manager);
14411317Sm.alian1369@gmail.com    if (switchedOut())
14511317Sm.alian1369@gmail.com        return 0;
14611317Sm.alian1369@gmail.com
14711317Sm.alian1369@gmail.com    if (!isDrained()) {
14811317Sm.alian1369@gmail.com        DPRINTF(Drain, "Requesting drain: %s\n", pcState());
14911317Sm.alian1369@gmail.com        drain_manager = dm;
15011317Sm.alian1369@gmail.com        return 1;
15111533Sm.alian1369@gmail.com    } else {
15211317Sm.alian1369@gmail.com        if (tickEvent.scheduled())
15311317Sm.alian1369@gmail.com            deschedule(tickEvent);
15411317Sm.alian1369@gmail.com
15511317Sm.alian1369@gmail.com        DPRINTF(Drain, "Not executing microcode, no need to drain.\n");
15611317Sm.alian1369@gmail.com        return 0;
15711533Sm.alian1369@gmail.com    }
15811317Sm.alian1369@gmail.com}
15911317Sm.alian1369@gmail.com
16011317Sm.alian1369@gmail.comvoid
16111317Sm.alian1369@gmail.comAtomicSimpleCPU::drainResume()
16211317Sm.alian1369@gmail.com{
16311317Sm.alian1369@gmail.com    assert(!tickEvent.scheduled());
16411317Sm.alian1369@gmail.com    assert(!drain_manager);
16511317Sm.alian1369@gmail.com    if (switchedOut())
16611317Sm.alian1369@gmail.com        return;
16711533Sm.alian1369@gmail.com
16811317Sm.alian1369@gmail.com    DPRINTF(SimpleCPU, "Resume\n");
16911317Sm.alian1369@gmail.com    verifyMemoryMode();
17011533Sm.alian1369@gmail.com
17111533Sm.alian1369@gmail.com    assert(!threadContexts.empty());
17211533Sm.alian1369@gmail.com    if (threadContexts.size() > 1)
17311533Sm.alian1369@gmail.com        fatal("The atomic CPU only supports one thread.\n");
17411533Sm.alian1369@gmail.com
17511533Sm.alian1369@gmail.com    if (thread->status() == ThreadContext::Active) {
17611317Sm.alian1369@gmail.com        schedule(tickEvent, nextCycle());
17711533Sm.alian1369@gmail.com        _status = BaseSimpleCPU::Running;
17811562Sm.alian1369@gmail.com        notIdleFraction = 1;
17911317Sm.alian1369@gmail.com    } else {
18011317Sm.alian1369@gmail.com        _status = BaseSimpleCPU::Idle;
18111317Sm.alian1369@gmail.com        notIdleFraction = 0;
18211317Sm.alian1369@gmail.com    }
18311317Sm.alian1369@gmail.com
18411317Sm.alian1369@gmail.com    system->totalNumInsts = 0;
18511317Sm.alian1369@gmail.com}
18611317Sm.alian1369@gmail.com
18711317Sm.alian1369@gmail.combool
18811317Sm.alian1369@gmail.comAtomicSimpleCPU::tryCompleteDrain()
18911317Sm.alian1369@gmail.com{
19011317Sm.alian1369@gmail.com    if (!drain_manager)
19111317Sm.alian1369@gmail.com        return false;
19211317Sm.alian1369@gmail.com
19311317Sm.alian1369@gmail.com    DPRINTF(Drain, "tryCompleteDrain: %s\n", pcState());
19411317Sm.alian1369@gmail.com    if (!isDrained())
19511317Sm.alian1369@gmail.com        return false;
19611317Sm.alian1369@gmail.com
19711317Sm.alian1369@gmail.com    DPRINTF(Drain, "CPU done draining, processing drain event\n");
19811317Sm.alian1369@gmail.com    drain_manager->signalDrainDone();
19911317Sm.alian1369@gmail.com    drain_manager = NULL;
20011317Sm.alian1369@gmail.com
20111317Sm.alian1369@gmail.com    return true;
20211317Sm.alian1369@gmail.com}
20311317Sm.alian1369@gmail.com
20411317Sm.alian1369@gmail.com
20511317Sm.alian1369@gmail.comvoid
20611701Smichael.lebeane@amd.comAtomicSimpleCPU::switchOut()
20711317Sm.alian1369@gmail.com{
20811317Sm.alian1369@gmail.com    BaseSimpleCPU::switchOut();
20911317Sm.alian1369@gmail.com
21011317Sm.alian1369@gmail.com    assert(!tickEvent.scheduled());
21111317Sm.alian1369@gmail.com    assert(_status == BaseSimpleCPU::Running || _status == Idle);
21211317Sm.alian1369@gmail.com    assert(isDrained());
21311317Sm.alian1369@gmail.com}
21411317Sm.alian1369@gmail.com
21511317Sm.alian1369@gmail.com
21611317Sm.alian1369@gmail.comvoid
21711317Sm.alian1369@gmail.comAtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
21811317Sm.alian1369@gmail.com{
21911317Sm.alian1369@gmail.com    BaseSimpleCPU::takeOverFrom(oldCPU);
22011317Sm.alian1369@gmail.com
22111317Sm.alian1369@gmail.com    // The tick event should have been descheduled by drain()
22211317Sm.alian1369@gmail.com    assert(!tickEvent.scheduled());
22311317Sm.alian1369@gmail.com
22411317Sm.alian1369@gmail.com    ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT
22511317Sm.alian1369@gmail.com    data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too
22611317Sm.alian1369@gmail.com    data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too
22711317Sm.alian1369@gmail.com}
22811317Sm.alian1369@gmail.com
22911317Sm.alian1369@gmail.comvoid
23011317Sm.alian1369@gmail.comAtomicSimpleCPU::verifyMemoryMode() const
23111317Sm.alian1369@gmail.com{
23211317Sm.alian1369@gmail.com    if (!system->isAtomicMode()) {
23311317Sm.alian1369@gmail.com        fatal("The atomic CPU requires the memory system to be in "
23411317Sm.alian1369@gmail.com              "'atomic' mode.\n");
23511317Sm.alian1369@gmail.com    }
23611317Sm.alian1369@gmail.com}
23711317Sm.alian1369@gmail.com
23811317Sm.alian1369@gmail.comvoid
23911317Sm.alian1369@gmail.comAtomicSimpleCPU::activateContext(ThreadID thread_num, Cycles delay)
24011317Sm.alian1369@gmail.com{
24111317Sm.alian1369@gmail.com    DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
24211317Sm.alian1369@gmail.com
24311317Sm.alian1369@gmail.com    assert(thread_num == 0);
24411317Sm.alian1369@gmail.com    assert(thread);
24511317Sm.alian1369@gmail.com
24611317Sm.alian1369@gmail.com    assert(_status == Idle);
24711317Sm.alian1369@gmail.com    assert(!tickEvent.scheduled());
24811317Sm.alian1369@gmail.com
24911317Sm.alian1369@gmail.com    notIdleFraction = 1;
25011317Sm.alian1369@gmail.com    numCycles += ticksToCycles(thread->lastActivate - thread->lastSuspend);
25111317Sm.alian1369@gmail.com
25211317Sm.alian1369@gmail.com    //Make sure ticks are still on multiples of cycles
25311317Sm.alian1369@gmail.com    schedule(tickEvent, clockEdge(delay));
25411317Sm.alian1369@gmail.com    _status = BaseSimpleCPU::Running;
25511317Sm.alian1369@gmail.com}
25611317Sm.alian1369@gmail.com
25711317Sm.alian1369@gmail.com
25811317Sm.alian1369@gmail.comvoid
25911317Sm.alian1369@gmail.comAtomicSimpleCPU::suspendContext(ThreadID thread_num)
26011317Sm.alian1369@gmail.com{
26111317Sm.alian1369@gmail.com    DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
26211317Sm.alian1369@gmail.com
26311317Sm.alian1369@gmail.com    assert(thread_num == 0);
26411317Sm.alian1369@gmail.com    assert(thread);
26511533Sm.alian1369@gmail.com
26611533Sm.alian1369@gmail.com    if (_status == Idle)
26711317Sm.alian1369@gmail.com        return;
26811317Sm.alian1369@gmail.com
26911317Sm.alian1369@gmail.com    assert(_status == BaseSimpleCPU::Running);
27011317Sm.alian1369@gmail.com
27111317Sm.alian1369@gmail.com    // tick event may not be scheduled if this gets called from inside
27211317Sm.alian1369@gmail.com    // an instruction's execution, e.g. "quiesce"
27311533Sm.alian1369@gmail.com    if (tickEvent.scheduled())
27411533Sm.alian1369@gmail.com        deschedule(tickEvent);
27511317Sm.alian1369@gmail.com
27611317Sm.alian1369@gmail.com    notIdleFraction = 0;
27711317Sm.alian1369@gmail.com    _status = Idle;
27811533Sm.alian1369@gmail.com}
27911317Sm.alian1369@gmail.com
28011317Sm.alian1369@gmail.com
28111533Sm.alian1369@gmail.comTick
28211533Sm.alian1369@gmail.comAtomicSimpleCPU::AtomicCPUDPort::recvAtomicSnoop(PacketPtr pkt)
28311317Sm.alian1369@gmail.com{
28411317Sm.alian1369@gmail.com    DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(),
28511533Sm.alian1369@gmail.com            pkt->cmdString());
28611317Sm.alian1369@gmail.com
28711533Sm.alian1369@gmail.com    // if snoop invalidates, release any associated locks
28811317Sm.alian1369@gmail.com    if (pkt->isInvalidate()) {
28911317Sm.alian1369@gmail.com        DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n",
29011317Sm.alian1369@gmail.com                pkt->getAddr());
29111533Sm.alian1369@gmail.com        TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask);
29211317Sm.alian1369@gmail.com    }
29311317Sm.alian1369@gmail.com
29411533Sm.alian1369@gmail.com    return 0;
29511533Sm.alian1369@gmail.com}
29611317Sm.alian1369@gmail.com
29711317Sm.alian1369@gmail.comvoid
29811533Sm.alian1369@gmail.comAtomicSimpleCPU::AtomicCPUDPort::recvFunctionalSnoop(PacketPtr pkt)
29911317Sm.alian1369@gmail.com{
30011317Sm.alian1369@gmail.com    DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(),
30111533Sm.alian1369@gmail.com            pkt->cmdString());
30211533Sm.alian1369@gmail.com
30311317Sm.alian1369@gmail.com    // if snoop invalidates, release any associated locks
30411533Sm.alian1369@gmail.com    if (pkt->isInvalidate()) {
30511533Sm.alian1369@gmail.com        DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n",
30611533Sm.alian1369@gmail.com                pkt->getAddr());
30711533Sm.alian1369@gmail.com        TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask);
30811533Sm.alian1369@gmail.com    }
30911533Sm.alian1369@gmail.com}
31011533Sm.alian1369@gmail.com
31111533Sm.alian1369@gmail.comFault
31211533Sm.alian1369@gmail.comAtomicSimpleCPU::readMem(Addr addr, uint8_t * data,
31311533Sm.alian1369@gmail.com                         unsigned size, unsigned flags)
31411533Sm.alian1369@gmail.com{
31511533Sm.alian1369@gmail.com    // use the CPU's statically allocated read request and packet objects
31611533Sm.alian1369@gmail.com    Request *req = &data_read_req;
31711533Sm.alian1369@gmail.com
31811533Sm.alian1369@gmail.com    if (traceData) {
31911533Sm.alian1369@gmail.com        traceData->setAddr(addr);
32011533Sm.alian1369@gmail.com    }
32111533Sm.alian1369@gmail.com
32211533Sm.alian1369@gmail.com    //The size of the data we're trying to read.
32311533Sm.alian1369@gmail.com    int fullSize = size;
32411533Sm.alian1369@gmail.com
32511533Sm.alian1369@gmail.com    //The address of the second part of this access if it needs to be split
32611533Sm.alian1369@gmail.com    //across a cache line boundary.
32711533Sm.alian1369@gmail.com    Addr secondAddr = roundDown(addr + size - 1, cacheLineSize());
32811533Sm.alian1369@gmail.com
32911533Sm.alian1369@gmail.com    if (secondAddr > addr)
33011533Sm.alian1369@gmail.com        size = secondAddr - addr;
33111533Sm.alian1369@gmail.com
33211533Sm.alian1369@gmail.com    dcache_latency = 0;
33311533Sm.alian1369@gmail.com
33411533Sm.alian1369@gmail.com    req->taskId(taskId());
33511533Sm.alian1369@gmail.com    while (1) {
33611533Sm.alian1369@gmail.com        req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr());
33711533Sm.alian1369@gmail.com
33811533Sm.alian1369@gmail.com        // translate to physical address
33911533Sm.alian1369@gmail.com        Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read);
34011533Sm.alian1369@gmail.com
34111533Sm.alian1369@gmail.com        // Now do the access.
34211533Sm.alian1369@gmail.com        if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
34311533Sm.alian1369@gmail.com            Packet pkt = Packet(req,
34411533Sm.alian1369@gmail.com                                req->isLLSC() ? MemCmd::LoadLockedReq :
34511533Sm.alian1369@gmail.com                                MemCmd::ReadReq);
34611533Sm.alian1369@gmail.com            pkt.dataStatic(data);
34711533Sm.alian1369@gmail.com
34811533Sm.alian1369@gmail.com            if (req->isMmappedIpr())
34911533Sm.alian1369@gmail.com                dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt);
35011533Sm.alian1369@gmail.com            else {
35111317Sm.alian1369@gmail.com                if (fastmem && system->isMemAddr(pkt.getAddr()))
35211317Sm.alian1369@gmail.com                    system->getPhysMem().access(&pkt);
35311317Sm.alian1369@gmail.com                else
35411317Sm.alian1369@gmail.com                    dcache_latency += dcachePort.sendAtomic(&pkt);
35511317Sm.alian1369@gmail.com            }
35611317Sm.alian1369@gmail.com            dcache_access = true;
35711317Sm.alian1369@gmail.com
358            assert(!pkt.isError());
359
360            if (req->isLLSC()) {
361                TheISA::handleLockedRead(thread, req);
362            }
363        }
364
365        //If there's a fault, return it
366        if (fault != NoFault) {
367            if (req->isPrefetch()) {
368                return NoFault;
369            } else {
370                return fault;
371            }
372        }
373
374        //If we don't need to access a second cache line, stop now.
375        if (secondAddr <= addr)
376        {
377            if (req->isLocked() && fault == NoFault) {
378                assert(!locked);
379                locked = true;
380            }
381            return fault;
382        }
383
384        /*
385         * Set up for accessing the second cache line.
386         */
387
388        //Move the pointer we're reading into to the correct location.
389        data += size;
390        //Adjust the size to get the remaining bytes.
391        size = addr + fullSize - secondAddr;
392        //And access the right address.
393        addr = secondAddr;
394    }
395}
396
397
398Fault
399AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size,
400                          Addr addr, unsigned flags, uint64_t *res)
401{
402
403    static uint8_t zero_array[64] = {};
404
405    if (data == NULL) {
406        assert(size <= 64);
407        assert(flags & Request::CACHE_BLOCK_ZERO);
408        // This must be a cache block cleaning request
409        data = zero_array;
410    }
411
412    // use the CPU's statically allocated write request and packet objects
413    Request *req = &data_write_req;
414
415    if (traceData) {
416        traceData->setAddr(addr);
417    }
418
419    //The size of the data we're trying to read.
420    int fullSize = size;
421
422    //The address of the second part of this access if it needs to be split
423    //across a cache line boundary.
424    Addr secondAddr = roundDown(addr + size - 1, cacheLineSize());
425
426    if(secondAddr > addr)
427        size = secondAddr - addr;
428
429    dcache_latency = 0;
430
431    req->taskId(taskId());
432    while(1) {
433        req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr());
434
435        // translate to physical address
436        Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write);
437
438        // Now do the access.
439        if (fault == NoFault) {
440            MemCmd cmd = MemCmd::WriteReq; // default
441            bool do_access = true;  // flag to suppress cache access
442
443            if (req->isLLSC()) {
444                cmd = MemCmd::StoreCondReq;
445                do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask);
446            } else if (req->isSwap()) {
447                cmd = MemCmd::SwapReq;
448                if (req->isCondSwap()) {
449                    assert(res);
450                    req->setExtraData(*res);
451                }
452            }
453
454            if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) {
455                Packet pkt = Packet(req, cmd);
456                pkt.dataStatic(data);
457
458                if (req->isMmappedIpr()) {
459                    dcache_latency +=
460                        TheISA::handleIprWrite(thread->getTC(), &pkt);
461                } else {
462                    if (fastmem && system->isMemAddr(pkt.getAddr()))
463                        system->getPhysMem().access(&pkt);
464                    else
465                        dcache_latency += dcachePort.sendAtomic(&pkt);
466                }
467                dcache_access = true;
468                assert(!pkt.isError());
469
470                if (req->isSwap()) {
471                    assert(res);
472                    memcpy(res, pkt.getPtr<uint8_t>(), fullSize);
473                }
474            }
475
476            if (res && !req->isSwap()) {
477                *res = req->getExtraData();
478            }
479        }
480
481        //If there's a fault or we don't need to access a second cache line,
482        //stop now.
483        if (fault != NoFault || secondAddr <= addr)
484        {
485            if (req->isLocked() && fault == NoFault) {
486                assert(locked);
487                locked = false;
488            }
489            if (fault != NoFault && req->isPrefetch()) {
490                return NoFault;
491            } else {
492                return fault;
493            }
494        }
495
496        /*
497         * Set up for accessing the second cache line.
498         */
499
500        //Move the pointer we're reading into to the correct location.
501        data += size;
502        //Adjust the size to get the remaining bytes.
503        size = addr + fullSize - secondAddr;
504        //And access the right address.
505        addr = secondAddr;
506    }
507}
508
509
510void
511AtomicSimpleCPU::tick()
512{
513    DPRINTF(SimpleCPU, "Tick\n");
514
515    Tick latency = 0;
516
517    for (int i = 0; i < width || locked; ++i) {
518        numCycles++;
519
520        if (!curStaticInst || !curStaticInst->isDelayedCommit())
521            checkForInterrupts();
522
523        checkPcEventQueue();
524        // We must have just got suspended by a PC event
525        if (_status == Idle) {
526            tryCompleteDrain();
527            return;
528        }
529
530        Fault fault = NoFault;
531
532        TheISA::PCState pcState = thread->pcState();
533
534        bool needToFetch = !isRomMicroPC(pcState.microPC()) &&
535                           !curMacroStaticInst;
536        if (needToFetch) {
537            ifetch_req.taskId(taskId());
538            setupFetchRequest(&ifetch_req);
539            fault = thread->itb->translateAtomic(&ifetch_req, tc,
540                                                 BaseTLB::Execute);
541        }
542
543        if (fault == NoFault) {
544            Tick icache_latency = 0;
545            bool icache_access = false;
546            dcache_access = false; // assume no dcache access
547
548            if (needToFetch) {
549                // This is commented out because the decoder would act like
550                // a tiny cache otherwise. It wouldn't be flushed when needed
551                // like the I cache. It should be flushed, and when that works
552                // this code should be uncommented.
553                //Fetch more instruction memory if necessary
554                //if(decoder.needMoreBytes())
555                //{
556                    icache_access = true;
557                    Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq);
558                    ifetch_pkt.dataStatic(&inst);
559
560                    if (fastmem && system->isMemAddr(ifetch_pkt.getAddr()))
561                        system->getPhysMem().access(&ifetch_pkt);
562                    else
563                        icache_latency = icachePort.sendAtomic(&ifetch_pkt);
564
565                    assert(!ifetch_pkt.isError());
566
567                    // ifetch_req is initialized to read the instruction directly
568                    // into the CPU object's inst field.
569                //}
570            }
571
572            preExecute();
573
574            if (curStaticInst) {
575                fault = curStaticInst->execute(this, traceData);
576
577                // keep an instruction count
578                if (fault == NoFault)
579                    countInst();
580                else if (traceData && !DTRACE(ExecFaulting)) {
581                    delete traceData;
582                    traceData = NULL;
583                }
584
585                postExecute();
586            }
587
588            // @todo remove me after debugging with legion done
589            if (curStaticInst && (!curStaticInst->isMicroop() ||
590                        curStaticInst->isFirstMicroop()))
591                instCnt++;
592
593            // profile for SimPoints if enabled and macro inst is finished
594            if (simpoint && curStaticInst && (fault == NoFault) &&
595                    (!curStaticInst->isMicroop() ||
596                     curStaticInst->isLastMicroop())) {
597                profileSimPoint();
598            }
599
600            Tick stall_ticks = 0;
601            if (simulate_inst_stalls && icache_access)
602                stall_ticks += icache_latency;
603
604            if (simulate_data_stalls && dcache_access)
605                stall_ticks += dcache_latency;
606
607            if (stall_ticks) {
608                // the atomic cpu does its accounting in ticks, so
609                // keep counting in ticks but round to the clock
610                // period
611                latency += divCeil(stall_ticks, clockPeriod()) *
612                    clockPeriod();
613            }
614
615        }
616        if(fault != NoFault || !stayAtPC)
617            advancePC(fault);
618    }
619
620    if (tryCompleteDrain())
621        return;
622
623    // instruction takes at least one cycle
624    if (latency < clockPeriod())
625        latency = clockPeriod();
626
627    if (_status != Idle)
628        schedule(tickEvent, curTick() + latency);
629}
630
631
632void
633AtomicSimpleCPU::printAddr(Addr a)
634{
635    dcachePort.printAddr(a);
636}
637
638void
639AtomicSimpleCPU::profileSimPoint()
640{
641    if (!currentBBVInstCount)
642        currentBBV.first = thread->pcState().instAddr();
643
644    ++intervalCount;
645    ++currentBBVInstCount;
646
647    // If inst is control inst, assume end of basic block.
648    if (curStaticInst->isControl()) {
649        currentBBV.second = thread->pcState().instAddr();
650
651        auto map_itr = bbMap.find(currentBBV);
652        if (map_itr == bbMap.end()){
653            // If a new (previously unseen) basic block is found,
654            // add a new unique id, record num of insts and insert into bbMap.
655            BBInfo info;
656            info.id = bbMap.size() + 1;
657            info.insts = currentBBVInstCount;
658            info.count = currentBBVInstCount;
659            bbMap.insert(std::make_pair(currentBBV, info));
660        } else {
661            // If basic block is seen before, just increment the count by the
662            // number of insts in basic block.
663            BBInfo& info = map_itr->second;
664            info.count += currentBBVInstCount;
665        }
666        currentBBVInstCount = 0;
667
668        // Reached end of interval if the sum of the current inst count
669        // (intervalCount) and the excessive inst count from the previous
670        // interval (intervalDrift) is greater than/equal to the interval size.
671        if (intervalCount + intervalDrift >= intervalSize) {
672            // summarize interval and display BBV info
673            std::vector<pair<uint64_t, uint64_t> > counts;
674            for (auto map_itr = bbMap.begin(); map_itr != bbMap.end();
675                    ++map_itr) {
676                BBInfo& info = map_itr->second;
677                if (info.count != 0) {
678                    counts.push_back(std::make_pair(info.id, info.count));
679                    info.count = 0;
680                }
681            }
682            std::sort(counts.begin(), counts.end());
683
684            // Print output BBV info
685            *simpointStream << "T";
686            for (auto cnt_itr = counts.begin(); cnt_itr != counts.end();
687                    ++cnt_itr) {
688                *simpointStream << ":" << cnt_itr->first
689                                << ":" << cnt_itr->second << " ";
690            }
691            *simpointStream << "\n";
692
693            intervalDrift = (intervalCount + intervalDrift) - intervalSize;
694            intervalCount = 0;
695        }
696    }
697}
698
699////////////////////////////////////////////////////////////////////////
700//
701//  AtomicSimpleCPU Simulation Object
702//
703AtomicSimpleCPU *
704AtomicSimpleCPUParams::create()
705{
706    numThreads = 1;
707    if (!FullSystem && workload.size() != 1)
708        panic("only one workload allowed");
709    return new AtomicSimpleCPU(this);
710}
711