base.cc revision 5875
12SN/A/*
21762SN/A * Copyright (c) 2002-2005 The Regents of The University of Michigan
32SN/A * All rights reserved.
42SN/A *
52SN/A * Redistribution and use in source and binary forms, with or without
62SN/A * modification, are permitted provided that the following conditions are
72SN/A * met: redistributions of source code must retain the above copyright
82SN/A * notice, this list of conditions and the following disclaimer;
92SN/A * redistributions in binary form must reproduce the above copyright
102SN/A * notice, this list of conditions and the following disclaimer in the
112SN/A * documentation and/or other materials provided with the distribution;
122SN/A * neither the name of the copyright holders nor the names of its
132SN/A * contributors may be used to endorse or promote products derived from
142SN/A * this software without specific prior written permission.
152SN/A *
162SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
172SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
182SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
192SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
202SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
212SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
222SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
232SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
242SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
252SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
262SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
272665Ssaidi@eecs.umich.edu *
282665Ssaidi@eecs.umich.edu * Authors: Steve Reinhardt
292665Ssaidi@eecs.umich.edu *          Nathan Binkert
302SN/A */
312SN/A
321388SN/A#include <iostream>
332SN/A#include <string>
342SN/A#include <sstream>
352SN/A
361191SN/A#include "base/cprintf.hh"
371191SN/A#include "base/loader/symtab.hh"
381191SN/A#include "base/misc.hh"
391388SN/A#include "base/output.hh"
405529Snate@binkert.org#include "base/trace.hh"
411717SN/A#include "cpu/base.hh"
422651Ssaidi@eecs.umich.edu#include "cpu/cpuevent.hh"
432680Sktlim@umich.edu#include "cpu/thread_context.hh"
441977SN/A#include "cpu/profile.hh"
455529Snate@binkert.org#include "params/BaseCPU.hh"
463144Shsul@eecs.umich.edu#include "sim/sim_exit.hh"
472190SN/A#include "sim/process.hh"
4856SN/A#include "sim/sim_events.hh"
492190SN/A#include "sim/system.hh"
502SN/A
512359SN/A// Hack
522359SN/A#include "sim/stat_control.hh"
532359SN/A
542SN/Ausing namespace std;
552SN/A
562SN/Avector<BaseCPU *> BaseCPU::cpuList;
572SN/A
582SN/A// This variable reflects the max number of threads in any CPU.  Be
592SN/A// careful to only use it once all the CPUs that you care about have
602SN/A// been initialized
612SN/Aint maxThreadsPerCPU = 1;
622SN/A
635606Snate@binkert.orgCPUProgressEvent::CPUProgressEvent(BaseCPU *_cpu, Tick ival)
645606Snate@binkert.org    : Event(Event::Progress_Event_Pri), interval(ival), lastNumInst(0),
655606Snate@binkert.org      cpu(_cpu)
663126Sktlim@umich.edu{
673126Sktlim@umich.edu    if (interval)
685606Snate@binkert.org        cpu->schedule(this, curTick + interval);
693126Sktlim@umich.edu}
703126Sktlim@umich.edu
712356SN/Avoid
722356SN/ACPUProgressEvent::process()
732356SN/A{
742367SN/A    Counter temp = cpu->totalInstructions();
752356SN/A#ifndef NDEBUG
765100Ssaidi@eecs.umich.edu    double ipc = double(temp - lastNumInst) / (interval / cpu->ticks(1));
772367SN/A
782356SN/A    DPRINTFN("%s progress event, instructions committed: %lli, IPC: %0.8d\n",
792356SN/A             cpu->name(), temp - lastNumInst, ipc);
802356SN/A    ipc = 0.0;
812367SN/A#else
822367SN/A    cprintf("%lli: %s progress event, instructions committed: %lli\n",
832367SN/A            curTick, cpu->name(), temp - lastNumInst);
842367SN/A#endif
852356SN/A    lastNumInst = temp;
865606Snate@binkert.org    cpu->schedule(this, curTick + interval);
872356SN/A}
882356SN/A
892356SN/Aconst char *
905336Shines@cs.fsu.eduCPUProgressEvent::description() const
912356SN/A{
924873Sstever@eecs.umich.edu    return "CPU Progress";
932356SN/A}
942356SN/A
951858SN/A#if FULL_SYSTEM
961400SN/ABaseCPU::BaseCPU(Params *p)
975712Shsul@eecs.umich.edu    : MemObject(p), clock(p->clock), instCnt(0), _cpuId(p->cpu_id),
985712Shsul@eecs.umich.edu      interrupts(p->interrupts),
995529Snate@binkert.org      number_of_threads(p->numThreads), system(p->system),
1003661Srdreslin@umich.edu      phase(p->phase)
1012SN/A#else
1021400SN/ABaseCPU::BaseCPU(Params *p)
1035712Shsul@eecs.umich.edu    : MemObject(p), clock(p->clock), _cpuId(p->cpu_id),
1045529Snate@binkert.org      number_of_threads(p->numThreads), system(p->system),
1053661Srdreslin@umich.edu      phase(p->phase)
1062SN/A#endif
1072SN/A{
1082359SN/A//    currentTick = curTick;
1091062SN/A
1105712Shsul@eecs.umich.edu    // if Python did not provide a valid ID, do it here
1115712Shsul@eecs.umich.edu    if (_cpuId == -1 ) {
1125712Shsul@eecs.umich.edu        _cpuId = cpuList.size();
1135712Shsul@eecs.umich.edu    }
1145712Shsul@eecs.umich.edu
1152SN/A    // add self to global list of CPUs
1162SN/A    cpuList.push_back(this);
1172SN/A
1185712Shsul@eecs.umich.edu    DPRINTF(SyscallVerbose, "Constructing CPU with id %d\n", _cpuId);
1195712Shsul@eecs.umich.edu
1202SN/A    if (number_of_threads > maxThreadsPerCPU)
1212SN/A        maxThreadsPerCPU = number_of_threads;
1222SN/A
1232SN/A    // allocate per-thread instruction-based event queues
1241354SN/A    comInstEventQueue = new EventQueue *[number_of_threads];
1252SN/A    for (int i = 0; i < number_of_threads; ++i)
126503SN/A        comInstEventQueue[i] = new EventQueue("instruction-based event queue");
1272SN/A
1282SN/A    //
1292SN/A    // set up instruction-count-based termination events, if any
1302SN/A    //
1315606Snate@binkert.org    if (p->max_insts_any_thread != 0) {
1325606Snate@binkert.org        const char *cause = "a thread reached the max instruction count";
1335606Snate@binkert.org        for (int i = 0; i < number_of_threads; ++i) {
1345606Snate@binkert.org            Event *event = new SimLoopExitEvent(cause, 0);
1355606Snate@binkert.org            comInstEventQueue[i]->schedule(event, p->max_insts_any_thread);
1365606Snate@binkert.org        }
1375606Snate@binkert.org    }
1382SN/A
1391400SN/A    if (p->max_insts_all_threads != 0) {
1405606Snate@binkert.org        const char *cause = "all threads reached the max instruction count";
1415606Snate@binkert.org
1422SN/A        // allocate & initialize shared downcounter: each event will
1432SN/A        // decrement this when triggered; simulation will terminate
1442SN/A        // when counter reaches 0
1452SN/A        int *counter = new int;
1462SN/A        *counter = number_of_threads;
1475606Snate@binkert.org        for (int i = 0; i < number_of_threads; ++i) {
1485606Snate@binkert.org            Event *event = new CountedExitEvent(cause, *counter);
1495606Snate@binkert.org            comInstEventQueue[i]->schedule(event, p->max_insts_any_thread);
1505606Snate@binkert.org        }
1512SN/A    }
1522SN/A
153124SN/A    // allocate per-thread load-based event queues
1541354SN/A    comLoadEventQueue = new EventQueue *[number_of_threads];
155124SN/A    for (int i = 0; i < number_of_threads; ++i)
156124SN/A        comLoadEventQueue[i] = new EventQueue("load-based event queue");
157124SN/A
158124SN/A    //
159124SN/A    // set up instruction-count-based termination events, if any
160124SN/A    //
1615606Snate@binkert.org    if (p->max_loads_any_thread != 0) {
1625606Snate@binkert.org        const char *cause = "a thread reached the max load count";
1635606Snate@binkert.org        for (int i = 0; i < number_of_threads; ++i) {
1645606Snate@binkert.org            Event *event = new SimLoopExitEvent(cause, 0);
1655606Snate@binkert.org            comLoadEventQueue[i]->schedule(event, p->max_loads_any_thread);
1665606Snate@binkert.org        }
1675606Snate@binkert.org    }
168124SN/A
1691400SN/A    if (p->max_loads_all_threads != 0) {
1705606Snate@binkert.org        const char *cause = "all threads reached the max load count";
171124SN/A        // allocate & initialize shared downcounter: each event will
172124SN/A        // decrement this when triggered; simulation will terminate
173124SN/A        // when counter reaches 0
174124SN/A        int *counter = new int;
175124SN/A        *counter = number_of_threads;
1765606Snate@binkert.org        for (int i = 0; i < number_of_threads; ++i) {
1775606Snate@binkert.org            Event *event = new CountedExitEvent(cause, *counter);
1785606Snate@binkert.org            comLoadEventQueue[i]->schedule(event, p->max_loads_all_threads);
1795606Snate@binkert.org        }
180124SN/A    }
181124SN/A
1821191SN/A    functionTracingEnabled = false;
1835529Snate@binkert.org    if (p->function_trace) {
1841388SN/A        functionTraceStream = simout.find(csprintf("ftrace.%s", name()));
1851191SN/A        currentFunctionStart = currentFunctionEnd = 0;
1865529Snate@binkert.org        functionEntryTick = p->function_trace_start;
1871191SN/A
1885529Snate@binkert.org        if (p->function_trace_start == 0) {
1891191SN/A            functionTracingEnabled = true;
1901191SN/A        } else {
1915606Snate@binkert.org            typedef EventWrapper<BaseCPU, &BaseCPU::enableFunctionTrace> wrap;
1925606Snate@binkert.org            Event *event = new wrap(this, true);
1935606Snate@binkert.org            schedule(event, p->function_trace_start);
1941191SN/A        }
1951191SN/A    }
1961917SN/A#if FULL_SYSTEM
1975810Sgblack@eecs.umich.edu    interrupts->setCPU(this);
1985810Sgblack@eecs.umich.edu
1991917SN/A    profileEvent = NULL;
2005529Snate@binkert.org    if (params()->profile)
2015529Snate@binkert.org        profileEvent = new ProfileEvent(this, params()->profile);
2021917SN/A#endif
2035529Snate@binkert.org    tracer = params()->tracer;
2041917SN/A}
2051191SN/A
2061191SN/Avoid
2071191SN/ABaseCPU::enableFunctionTrace()
2081191SN/A{
2091191SN/A    functionTracingEnabled = true;
2101191SN/A}
2111191SN/A
2121191SN/ABaseCPU::~BaseCPU()
2131191SN/A{
2141191SN/A}
2151191SN/A
2161129SN/Avoid
2171129SN/ABaseCPU::init()
2181129SN/A{
2195529Snate@binkert.org    if (!params()->defer_registration)
2202680Sktlim@umich.edu        registerThreadContexts();
2211129SN/A}
222180SN/A
2232SN/Avoid
2241917SN/ABaseCPU::startup()
2251917SN/A{
2261917SN/A#if FULL_SYSTEM
2275529Snate@binkert.org    if (!params()->defer_registration && profileEvent)
2285606Snate@binkert.org        schedule(profileEvent, curTick);
2291917SN/A#endif
2302356SN/A
2315529Snate@binkert.org    if (params()->progress_interval) {
2325606Snate@binkert.org        Tick num_ticks = ticks(params()->progress_interval);
2335606Snate@binkert.org        Event *event = new CPUProgressEvent(this, num_ticks);
2345606Snate@binkert.org        schedule(event, curTick + num_ticks);
2352356SN/A    }
2361917SN/A}
2371917SN/A
2381917SN/A
2391917SN/Avoid
2402SN/ABaseCPU::regStats()
2412SN/A{
242729SN/A    using namespace Stats;
243707SN/A
244707SN/A    numCycles
245707SN/A        .name(name() + ".numCycles")
246707SN/A        .desc("number of cpu cycles simulated")
247707SN/A        ;
248707SN/A
2492680Sktlim@umich.edu    int size = threadContexts.size();
2502SN/A    if (size > 1) {
2512SN/A        for (int i = 0; i < size; ++i) {
2522SN/A            stringstream namestr;
2532SN/A            ccprintf(namestr, "%s.ctx%d", name(), i);
2542680Sktlim@umich.edu            threadContexts[i]->regStats(namestr.str());
2552SN/A        }
2562SN/A    } else if (size == 1)
2572680Sktlim@umich.edu        threadContexts[0]->regStats(name());
2582190SN/A
2592190SN/A#if FULL_SYSTEM
2602190SN/A#endif
2612SN/A}
2622SN/A
2633495Sktlim@umich.eduTick
2643495Sktlim@umich.eduBaseCPU::nextCycle()
2653495Sktlim@umich.edu{
2663661Srdreslin@umich.edu    Tick next_tick = curTick - phase + clock - 1;
2673495Sktlim@umich.edu    next_tick -= (next_tick % clock);
2683661Srdreslin@umich.edu    next_tick += phase;
2693495Sktlim@umich.edu    return next_tick;
2703495Sktlim@umich.edu}
2713495Sktlim@umich.edu
2723495Sktlim@umich.eduTick
2733495Sktlim@umich.eduBaseCPU::nextCycle(Tick begin_tick)
2743495Sktlim@umich.edu{
2753495Sktlim@umich.edu    Tick next_tick = begin_tick;
2764599Sacolyte@umich.edu    if (next_tick % clock != 0)
2774599Sacolyte@umich.edu        next_tick = next_tick - (next_tick % clock) + clock;
2783661Srdreslin@umich.edu    next_tick += phase;
2793495Sktlim@umich.edu
2803495Sktlim@umich.edu    assert(next_tick >= curTick);
2813495Sktlim@umich.edu    return next_tick;
2823495Sktlim@umich.edu}
283180SN/A
284180SN/Avoid
2852680Sktlim@umich.eduBaseCPU::registerThreadContexts()
286180SN/A{
2872680Sktlim@umich.edu    for (int i = 0; i < threadContexts.size(); ++i) {
2882680Sktlim@umich.edu        ThreadContext *tc = threadContexts[i];
2892378SN/A
2905718Shsul@eecs.umich.edu        /** This is so that contextId and cpuId match where there is a
2915718Shsul@eecs.umich.edu         * 1cpu:1context relationship.  Otherwise, the order of registration
2925718Shsul@eecs.umich.edu         * could affect the assignment and cpu 1 could have context id 3, for
2935718Shsul@eecs.umich.edu         * example.  We may even want to do something like this for SMT so that
2945718Shsul@eecs.umich.edu         * cpu 0 has the lowest thread contexts and cpu N has the highest, but
2955718Shsul@eecs.umich.edu         * I'll just do this for now
2965718Shsul@eecs.umich.edu         */
2975718Shsul@eecs.umich.edu        if (number_of_threads == 1)
2985718Shsul@eecs.umich.edu            tc->setContextId(system->registerThreadContext(tc, _cpuId));
2995718Shsul@eecs.umich.edu        else
3005718Shsul@eecs.umich.edu            tc->setContextId(system->registerThreadContext(tc));
3015713Shsul@eecs.umich.edu#if !FULL_SYSTEM
3025714Shsul@eecs.umich.edu        tc->getProcessPtr()->assignThreadContext(tc->contextId());
303180SN/A#endif
304180SN/A    }
305180SN/A}
306180SN/A
307180SN/A
3084000Ssaidi@eecs.umich.eduint
3094000Ssaidi@eecs.umich.eduBaseCPU::findContext(ThreadContext *tc)
3104000Ssaidi@eecs.umich.edu{
3114000Ssaidi@eecs.umich.edu    for (int i = 0; i < threadContexts.size(); ++i) {
3124000Ssaidi@eecs.umich.edu        if (tc == threadContexts[i])
3134000Ssaidi@eecs.umich.edu            return i;
3144000Ssaidi@eecs.umich.edu    }
3154000Ssaidi@eecs.umich.edu    return 0;
3164000Ssaidi@eecs.umich.edu}
3174000Ssaidi@eecs.umich.edu
318180SN/Avoid
3192798Sktlim@umich.eduBaseCPU::switchOut()
320180SN/A{
3212359SN/A//    panic("This CPU doesn't support sampling!");
3222359SN/A#if FULL_SYSTEM
3232359SN/A    if (profileEvent && profileEvent->scheduled())
3245606Snate@binkert.org        deschedule(profileEvent);
3252359SN/A#endif
326180SN/A}
327180SN/A
328180SN/Avoid
3294192Sktlim@umich.eduBaseCPU::takeOverFrom(BaseCPU *oldCPU, Port *ic, Port *dc)
330180SN/A{
3312680Sktlim@umich.edu    assert(threadContexts.size() == oldCPU->threadContexts.size());
332180SN/A
3335712Shsul@eecs.umich.edu    _cpuId = oldCPU->cpuId();
3345712Shsul@eecs.umich.edu
3352680Sktlim@umich.edu    for (int i = 0; i < threadContexts.size(); ++i) {
3362680Sktlim@umich.edu        ThreadContext *newTC = threadContexts[i];
3372680Sktlim@umich.edu        ThreadContext *oldTC = oldCPU->threadContexts[i];
338180SN/A
3392680Sktlim@umich.edu        newTC->takeOverFrom(oldTC);
3402651Ssaidi@eecs.umich.edu
3412680Sktlim@umich.edu        CpuEvent::replaceThreadContext(oldTC, newTC);
3422651Ssaidi@eecs.umich.edu
3435714Shsul@eecs.umich.edu        assert(newTC->contextId() == oldTC->contextId());
3445715Shsul@eecs.umich.edu        assert(newTC->threadId() == oldTC->threadId());
3455714Shsul@eecs.umich.edu        system->replaceThreadContext(newTC, newTC->contextId());
3462359SN/A
3475875Ssteve.reinhardt@amd.com        /* This code no longer works since the zero register (e.g.,
3485875Ssteve.reinhardt@amd.com         * r31 on Alpha) doesn't necessarily contain zero at this
3495875Ssteve.reinhardt@amd.com         * point.
3505875Ssteve.reinhardt@amd.com           if (DTRACE(Context))
3515217Ssaidi@eecs.umich.edu            ThreadContext::compare(oldTC, newTC);
3525875Ssteve.reinhardt@amd.com        */
353180SN/A    }
354605SN/A
3551858SN/A#if FULL_SYSTEM
3563520Sgblack@eecs.umich.edu    interrupts = oldCPU->interrupts;
3575810Sgblack@eecs.umich.edu    interrupts->setCPU(this);
3582254SN/A
3592680Sktlim@umich.edu    for (int i = 0; i < threadContexts.size(); ++i)
3602680Sktlim@umich.edu        threadContexts[i]->profileClear();
3612254SN/A
3624947Snate@binkert.org    if (profileEvent)
3635606Snate@binkert.org        schedule(profileEvent, curTick);
364612SN/A#endif
3654192Sktlim@umich.edu
3664192Sktlim@umich.edu    // Connect new CPU to old CPU's memory only if new CPU isn't
3674192Sktlim@umich.edu    // connected to anything.  Also connect old CPU's memory to new
3684192Sktlim@umich.edu    // CPU.
3695476Snate@binkert.org    if (!ic->isConnected()) {
3705476Snate@binkert.org        Port *peer = oldCPU->getPort("icache_port")->getPeer();
3714192Sktlim@umich.edu        ic->setPeer(peer);
3725476Snate@binkert.org        peer->setPeer(ic);
3734192Sktlim@umich.edu    }
3744192Sktlim@umich.edu
3755476Snate@binkert.org    if (!dc->isConnected()) {
3765476Snate@binkert.org        Port *peer = oldCPU->getPort("dcache_port")->getPeer();
3774192Sktlim@umich.edu        dc->setPeer(peer);
3785476Snate@binkert.org        peer->setPeer(dc);
3794192Sktlim@umich.edu    }
380180SN/A}
381180SN/A
382180SN/A
3831858SN/A#if FULL_SYSTEM
3845536Srstrong@hp.comBaseCPU::ProfileEvent::ProfileEvent(BaseCPU *_cpu, Tick _interval)
3855606Snate@binkert.org    : cpu(_cpu), interval(_interval)
3861917SN/A{ }
3871917SN/A
3881917SN/Avoid
3891917SN/ABaseCPU::ProfileEvent::process()
3901917SN/A{
3912680Sktlim@umich.edu    for (int i = 0, size = cpu->threadContexts.size(); i < size; ++i) {
3922680Sktlim@umich.edu        ThreadContext *tc = cpu->threadContexts[i];
3932680Sktlim@umich.edu        tc->profileSample();
3941917SN/A    }
3952254SN/A
3965606Snate@binkert.org    cpu->schedule(this, curTick + interval);
3971917SN/A}
3981917SN/A
3992SN/Avoid
400921SN/ABaseCPU::serialize(std::ostream &os)
401921SN/A{
4024000Ssaidi@eecs.umich.edu    SERIALIZE_SCALAR(instCnt);
4035647Sgblack@eecs.umich.edu    interrupts->serialize(os);
404921SN/A}
405921SN/A
406921SN/Avoid
407921SN/ABaseCPU::unserialize(Checkpoint *cp, const std::string &section)
408921SN/A{
4094000Ssaidi@eecs.umich.edu    UNSERIALIZE_SCALAR(instCnt);
4105647Sgblack@eecs.umich.edu    interrupts->unserialize(cp, section);
411921SN/A}
412921SN/A
4132SN/A#endif // FULL_SYSTEM
4142SN/A
4151191SN/Avoid
4161191SN/ABaseCPU::traceFunctionsInternal(Addr pc)
4171191SN/A{
4181191SN/A    if (!debugSymbolTable)
4191191SN/A        return;
4201191SN/A
4211191SN/A    // if pc enters different function, print new function symbol and
4221191SN/A    // update saved range.  Otherwise do nothing.
4231191SN/A    if (pc < currentFunctionStart || pc >= currentFunctionEnd) {
4241191SN/A        string sym_str;
4251191SN/A        bool found = debugSymbolTable->findNearestSymbol(pc, sym_str,
4261191SN/A                                                         currentFunctionStart,
4271191SN/A                                                         currentFunctionEnd);
4281191SN/A
4291191SN/A        if (!found) {
4301191SN/A            // no symbol found: use addr as label
4311191SN/A            sym_str = csprintf("0x%x", pc);
4321191SN/A            currentFunctionStart = pc;
4331191SN/A            currentFunctionEnd = pc + 1;
4341191SN/A        }
4351191SN/A
4361191SN/A        ccprintf(*functionTraceStream, " (%d)\n%d: %s",
4371191SN/A                 curTick - functionEntryTick, curTick, sym_str);
4381191SN/A        functionEntryTick = curTick;
4391191SN/A    }
4401191SN/A}
441