base.cc revision 11526
1/*
2 * Copyright (c) 2011-2012,2016 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2011 Regents of the University of California
16 * Copyright (c) 2013 Advanced Micro Devices, Inc.
17 * Copyright (c) 2013 Mark D. Hill and David A. Wood
18 * All rights reserved.
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are
22 * met: redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer;
24 * redistributions in binary form must reproduce the above copyright
25 * notice, this list of conditions and the following disclaimer in the
26 * documentation and/or other materials provided with the distribution;
27 * neither the name of the copyright holders nor the names of its
28 * contributors may be used to endorse or promote products derived from
29 * this software without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 *
43 * Authors: Steve Reinhardt
44 *          Nathan Binkert
45 *          Rick Strong
46 */
47
48#include <iostream>
49#include <sstream>
50#include <string>
51
52#include "arch/tlb.hh"
53#include "base/loader/symtab.hh"
54#include "base/cprintf.hh"
55#include "base/misc.hh"
56#include "base/output.hh"
57#include "base/trace.hh"
58#include "cpu/checker/cpu.hh"
59#include "cpu/base.hh"
60#include "cpu/cpuevent.hh"
61#include "cpu/profile.hh"
62#include "cpu/thread_context.hh"
63#include "debug/Mwait.hh"
64#include "debug/SyscallVerbose.hh"
65#include "mem/page_table.hh"
66#include "params/BaseCPU.hh"
67#include "sim/clocked_object.hh"
68#include "sim/full_system.hh"
69#include "sim/process.hh"
70#include "sim/sim_events.hh"
71#include "sim/sim_exit.hh"
72#include "sim/system.hh"
73
74// Hack
75#include "sim/stat_control.hh"
76
77using namespace std;
78
79vector<BaseCPU *> BaseCPU::cpuList;
80
81// This variable reflects the max number of threads in any CPU.  Be
82// careful to only use it once all the CPUs that you care about have
83// been initialized
84int maxThreadsPerCPU = 1;
85
86CPUProgressEvent::CPUProgressEvent(BaseCPU *_cpu, Tick ival)
87    : Event(Event::Progress_Event_Pri), _interval(ival), lastNumInst(0),
88      cpu(_cpu), _repeatEvent(true)
89{
90    if (_interval)
91        cpu->schedule(this, curTick() + _interval);
92}
93
94void
95CPUProgressEvent::process()
96{
97    Counter temp = cpu->totalOps();
98
99    if (_repeatEvent)
100      cpu->schedule(this, curTick() + _interval);
101
102    if (cpu->switchedOut()) {
103      return;
104    }
105
106#ifndef NDEBUG
107    double ipc = double(temp - lastNumInst) / (_interval / cpu->clockPeriod());
108
109    DPRINTFN("%s progress event, total committed:%i, progress insts committed: "
110             "%lli, IPC: %0.8d\n", cpu->name(), temp, temp - lastNumInst,
111             ipc);
112    ipc = 0.0;
113#else
114    cprintf("%lli: %s progress event, total committed:%i, progress insts "
115            "committed: %lli\n", curTick(), cpu->name(), temp,
116            temp - lastNumInst);
117#endif
118    lastNumInst = temp;
119}
120
121const char *
122CPUProgressEvent::description() const
123{
124    return "CPU Progress";
125}
126
127BaseCPU::BaseCPU(Params *p, bool is_checker)
128    : MemObject(p), instCnt(0), _cpuId(p->cpu_id), _socketId(p->socket_id),
129      _instMasterId(p->system->getMasterId(name() + ".inst")),
130      _dataMasterId(p->system->getMasterId(name() + ".data")),
131      _taskId(ContextSwitchTaskId::Unknown), _pid(invldPid),
132      _switchedOut(p->switched_out), _cacheLineSize(p->system->cacheLineSize()),
133      interrupts(p->interrupts), profileEvent(NULL),
134      numThreads(p->numThreads), system(p->system),
135      functionTraceStream(nullptr), currentFunctionStart(0),
136      currentFunctionEnd(0), functionEntryTick(0),
137      addressMonitor(p->numThreads)
138{
139    // if Python did not provide a valid ID, do it here
140    if (_cpuId == -1 ) {
141        _cpuId = cpuList.size();
142    }
143
144    // add self to global list of CPUs
145    cpuList.push_back(this);
146
147    DPRINTF(SyscallVerbose, "Constructing CPU with id %d, socket id %d\n",
148                _cpuId, _socketId);
149
150    if (numThreads > maxThreadsPerCPU)
151        maxThreadsPerCPU = numThreads;
152
153    // allocate per-thread instruction-based event queues
154    comInstEventQueue = new EventQueue *[numThreads];
155    for (ThreadID tid = 0; tid < numThreads; ++tid)
156        comInstEventQueue[tid] =
157            new EventQueue("instruction-based event queue");
158
159    //
160    // set up instruction-count-based termination events, if any
161    //
162    if (p->max_insts_any_thread != 0) {
163        const char *cause = "a thread reached the max instruction count";
164        for (ThreadID tid = 0; tid < numThreads; ++tid)
165            scheduleInstStop(tid, p->max_insts_any_thread, cause);
166    }
167
168    // Set up instruction-count-based termination events for SimPoints
169    // Typically, there are more than one action points.
170    // Simulation.py is responsible to take the necessary actions upon
171    // exitting the simulation loop.
172    if (!p->simpoint_start_insts.empty()) {
173        const char *cause = "simpoint starting point found";
174        for (size_t i = 0; i < p->simpoint_start_insts.size(); ++i)
175            scheduleInstStop(0, p->simpoint_start_insts[i], cause);
176    }
177
178    if (p->max_insts_all_threads != 0) {
179        const char *cause = "all threads reached the max instruction count";
180
181        // allocate & initialize shared downcounter: each event will
182        // decrement this when triggered; simulation will terminate
183        // when counter reaches 0
184        int *counter = new int;
185        *counter = numThreads;
186        for (ThreadID tid = 0; tid < numThreads; ++tid) {
187            Event *event = new CountedExitEvent(cause, *counter);
188            comInstEventQueue[tid]->schedule(event, p->max_insts_all_threads);
189        }
190    }
191
192    // allocate per-thread load-based event queues
193    comLoadEventQueue = new EventQueue *[numThreads];
194    for (ThreadID tid = 0; tid < numThreads; ++tid)
195        comLoadEventQueue[tid] = new EventQueue("load-based event queue");
196
197    //
198    // set up instruction-count-based termination events, if any
199    //
200    if (p->max_loads_any_thread != 0) {
201        const char *cause = "a thread reached the max load count";
202        for (ThreadID tid = 0; tid < numThreads; ++tid)
203            scheduleLoadStop(tid, p->max_loads_any_thread, cause);
204    }
205
206    if (p->max_loads_all_threads != 0) {
207        const char *cause = "all threads reached the max load count";
208        // allocate & initialize shared downcounter: each event will
209        // decrement this when triggered; simulation will terminate
210        // when counter reaches 0
211        int *counter = new int;
212        *counter = numThreads;
213        for (ThreadID tid = 0; tid < numThreads; ++tid) {
214            Event *event = new CountedExitEvent(cause, *counter);
215            comLoadEventQueue[tid]->schedule(event, p->max_loads_all_threads);
216        }
217    }
218
219    functionTracingEnabled = false;
220    if (p->function_trace) {
221        const string fname = csprintf("ftrace.%s", name());
222        functionTraceStream = simout.findOrCreate(fname)->stream();
223
224        currentFunctionStart = currentFunctionEnd = 0;
225        functionEntryTick = p->function_trace_start;
226
227        if (p->function_trace_start == 0) {
228            functionTracingEnabled = true;
229        } else {
230            typedef EventWrapper<BaseCPU, &BaseCPU::enableFunctionTrace> wrap;
231            Event *event = new wrap(this, true);
232            schedule(event, p->function_trace_start);
233        }
234    }
235
236    // The interrupts should always be present unless this CPU is
237    // switched in later or in case it is a checker CPU
238    if (!params()->switched_out && !is_checker) {
239        fatal_if(interrupts.size() != numThreads,
240                 "CPU %s has %i interrupt controllers, but is expecting one "
241                 "per thread (%i)\n",
242                 name(), interrupts.size(), numThreads);
243        for (ThreadID tid = 0; tid < numThreads; tid++)
244            interrupts[tid]->setCPU(this);
245    }
246
247    if (FullSystem) {
248        if (params()->profile)
249            profileEvent = new ProfileEvent(this, params()->profile);
250    }
251    tracer = params()->tracer;
252
253    if (params()->isa.size() != numThreads) {
254        fatal("Number of ISAs (%i) assigned to the CPU does not equal number "
255              "of threads (%i).\n", params()->isa.size(), numThreads);
256    }
257}
258
259void
260BaseCPU::enableFunctionTrace()
261{
262    functionTracingEnabled = true;
263}
264
265BaseCPU::~BaseCPU()
266{
267    delete profileEvent;
268    delete[] comLoadEventQueue;
269    delete[] comInstEventQueue;
270}
271
272void
273BaseCPU::armMonitor(ThreadID tid, Addr address)
274{
275    assert(tid < numThreads);
276    AddressMonitor &monitor = addressMonitor[tid];
277
278    monitor.armed = true;
279    monitor.vAddr = address;
280    monitor.pAddr = 0x0;
281    DPRINTF(Mwait,"[tid:%d] Armed monitor (vAddr=0x%lx)\n", tid, address);
282}
283
284bool
285BaseCPU::mwait(ThreadID tid, PacketPtr pkt)
286{
287    assert(tid < numThreads);
288    AddressMonitor &monitor = addressMonitor[tid];
289
290    if (!monitor.gotWakeup) {
291        int block_size = cacheLineSize();
292        uint64_t mask = ~((uint64_t)(block_size - 1));
293
294        assert(pkt->req->hasPaddr());
295        monitor.pAddr = pkt->getAddr() & mask;
296        monitor.waiting = true;
297
298        DPRINTF(Mwait,"[tid:%d] mwait called (vAddr=0x%lx, "
299                "line's paddr=0x%lx)\n", tid, monitor.vAddr, monitor.pAddr);
300        return true;
301    } else {
302        monitor.gotWakeup = false;
303        return false;
304    }
305}
306
307void
308BaseCPU::mwaitAtomic(ThreadID tid, ThreadContext *tc, TheISA::TLB *dtb)
309{
310    assert(tid < numThreads);
311    AddressMonitor &monitor = addressMonitor[tid];
312
313    Request req;
314    Addr addr = monitor.vAddr;
315    int block_size = cacheLineSize();
316    uint64_t mask = ~((uint64_t)(block_size - 1));
317    int size = block_size;
318
319    //The address of the next line if it crosses a cache line boundary.
320    Addr secondAddr = roundDown(addr + size - 1, block_size);
321
322    if (secondAddr > addr)
323        size = secondAddr - addr;
324
325    req.setVirt(0, addr, size, 0x0, dataMasterId(), tc->instAddr());
326
327    // translate to physical address
328    Fault fault = dtb->translateAtomic(&req, tc, BaseTLB::Read);
329    assert(fault == NoFault);
330
331    monitor.pAddr = req.getPaddr() & mask;
332    monitor.waiting = true;
333
334    DPRINTF(Mwait,"[tid:%d] mwait called (vAddr=0x%lx, line's paddr=0x%lx)\n",
335            tid, monitor.vAddr, monitor.pAddr);
336}
337
338void
339BaseCPU::init()
340{
341    if (!params()->switched_out) {
342        registerThreadContexts();
343
344        verifyMemoryMode();
345    }
346}
347
348void
349BaseCPU::startup()
350{
351    if (FullSystem) {
352        if (!params()->switched_out && profileEvent)
353            schedule(profileEvent, curTick());
354    }
355
356    if (params()->progress_interval) {
357        new CPUProgressEvent(this, params()->progress_interval);
358    }
359
360    // Assumption CPU start to operate instantaneously without any latency
361    if (ClockedObject::pwrState() == Enums::PwrState::UNDEFINED)
362        ClockedObject::pwrState(Enums::PwrState::ON);
363
364}
365
366ProbePoints::PMUUPtr
367BaseCPU::pmuProbePoint(const char *name)
368{
369    ProbePoints::PMUUPtr ptr;
370    ptr.reset(new ProbePoints::PMU(getProbeManager(), name));
371
372    return ptr;
373}
374
375void
376BaseCPU::regProbePoints()
377{
378    ppCycles = pmuProbePoint("Cycles");
379
380    ppRetiredInsts = pmuProbePoint("RetiredInsts");
381    ppRetiredLoads = pmuProbePoint("RetiredLoads");
382    ppRetiredStores = pmuProbePoint("RetiredStores");
383    ppRetiredBranches = pmuProbePoint("RetiredBranches");
384}
385
386void
387BaseCPU::probeInstCommit(const StaticInstPtr &inst)
388{
389    if (!inst->isMicroop() || inst->isLastMicroop())
390        ppRetiredInsts->notify(1);
391
392
393    if (inst->isLoad())
394        ppRetiredLoads->notify(1);
395
396    if (inst->isStore())
397        ppRetiredStores->notify(1);
398
399    if (inst->isControl())
400        ppRetiredBranches->notify(1);
401}
402
403void
404BaseCPU::regStats()
405{
406    MemObject::regStats();
407
408    using namespace Stats;
409
410    numCycles
411        .name(name() + ".numCycles")
412        .desc("number of cpu cycles simulated")
413        ;
414
415    numWorkItemsStarted
416        .name(name() + ".numWorkItemsStarted")
417        .desc("number of work items this cpu started")
418        ;
419
420    numWorkItemsCompleted
421        .name(name() + ".numWorkItemsCompleted")
422        .desc("number of work items this cpu completed")
423        ;
424
425    int size = threadContexts.size();
426    if (size > 1) {
427        for (int i = 0; i < size; ++i) {
428            stringstream namestr;
429            ccprintf(namestr, "%s.ctx%d", name(), i);
430            threadContexts[i]->regStats(namestr.str());
431        }
432    } else if (size == 1)
433        threadContexts[0]->regStats(name());
434}
435
436BaseMasterPort &
437BaseCPU::getMasterPort(const string &if_name, PortID idx)
438{
439    // Get the right port based on name. This applies to all the
440    // subclasses of the base CPU and relies on their implementation
441    // of getDataPort and getInstPort. In all cases there methods
442    // return a MasterPort pointer.
443    if (if_name == "dcache_port")
444        return getDataPort();
445    else if (if_name == "icache_port")
446        return getInstPort();
447    else
448        return MemObject::getMasterPort(if_name, idx);
449}
450
451void
452BaseCPU::registerThreadContexts()
453{
454    assert(system->multiThread || numThreads == 1);
455
456    ThreadID size = threadContexts.size();
457    for (ThreadID tid = 0; tid < size; ++tid) {
458        ThreadContext *tc = threadContexts[tid];
459
460        if (system->multiThread) {
461            tc->setContextId(system->registerThreadContext(tc));
462        } else {
463            tc->setContextId(system->registerThreadContext(tc, _cpuId));
464        }
465
466        if (!FullSystem)
467            tc->getProcessPtr()->assignThreadContext(tc->contextId());
468    }
469}
470
471
472int
473BaseCPU::findContext(ThreadContext *tc)
474{
475    ThreadID size = threadContexts.size();
476    for (ThreadID tid = 0; tid < size; ++tid) {
477        if (tc == threadContexts[tid])
478            return tid;
479    }
480    return 0;
481}
482
483void
484BaseCPU::activateContext(ThreadID thread_num)
485{
486    // For any active thread running, update CPU power state to active (ON)
487    ClockedObject::pwrState(Enums::PwrState::ON);
488}
489
490void
491BaseCPU::suspendContext(ThreadID thread_num)
492{
493    // Check if all threads are suspended
494    for (auto t : threadContexts) {
495        if (t->status() != ThreadContext::Suspended) {
496            return;
497        }
498    }
499
500    // All CPU threads suspended, enter lower power state for the CPU
501    ClockedObject::pwrState(Enums::PwrState::CLK_GATED);
502}
503
504void
505BaseCPU::switchOut()
506{
507    assert(!_switchedOut);
508    _switchedOut = true;
509    if (profileEvent && profileEvent->scheduled())
510        deschedule(profileEvent);
511
512    // Flush all TLBs in the CPU to avoid having stale translations if
513    // it gets switched in later.
514    flushTLBs();
515}
516
517void
518BaseCPU::takeOverFrom(BaseCPU *oldCPU)
519{
520    assert(threadContexts.size() == oldCPU->threadContexts.size());
521    assert(_cpuId == oldCPU->cpuId());
522    assert(_switchedOut);
523    assert(oldCPU != this);
524    _pid = oldCPU->getPid();
525    _taskId = oldCPU->taskId();
526    _switchedOut = false;
527
528    ThreadID size = threadContexts.size();
529    for (ThreadID i = 0; i < size; ++i) {
530        ThreadContext *newTC = threadContexts[i];
531        ThreadContext *oldTC = oldCPU->threadContexts[i];
532
533        newTC->takeOverFrom(oldTC);
534
535        CpuEvent::replaceThreadContext(oldTC, newTC);
536
537        assert(newTC->contextId() == oldTC->contextId());
538        assert(newTC->threadId() == oldTC->threadId());
539        system->replaceThreadContext(newTC, newTC->contextId());
540
541        /* This code no longer works since the zero register (e.g.,
542         * r31 on Alpha) doesn't necessarily contain zero at this
543         * point.
544           if (DTRACE(Context))
545            ThreadContext::compare(oldTC, newTC);
546        */
547
548        BaseMasterPort *old_itb_port = oldTC->getITBPtr()->getMasterPort();
549        BaseMasterPort *old_dtb_port = oldTC->getDTBPtr()->getMasterPort();
550        BaseMasterPort *new_itb_port = newTC->getITBPtr()->getMasterPort();
551        BaseMasterPort *new_dtb_port = newTC->getDTBPtr()->getMasterPort();
552
553        // Move over any table walker ports if they exist
554        if (new_itb_port) {
555            assert(!new_itb_port->isConnected());
556            assert(old_itb_port);
557            assert(old_itb_port->isConnected());
558            BaseSlavePort &slavePort = old_itb_port->getSlavePort();
559            old_itb_port->unbind();
560            new_itb_port->bind(slavePort);
561        }
562        if (new_dtb_port) {
563            assert(!new_dtb_port->isConnected());
564            assert(old_dtb_port);
565            assert(old_dtb_port->isConnected());
566            BaseSlavePort &slavePort = old_dtb_port->getSlavePort();
567            old_dtb_port->unbind();
568            new_dtb_port->bind(slavePort);
569        }
570        newTC->getITBPtr()->takeOverFrom(oldTC->getITBPtr());
571        newTC->getDTBPtr()->takeOverFrom(oldTC->getDTBPtr());
572
573        // Checker whether or not we have to transfer CheckerCPU
574        // objects over in the switch
575        CheckerCPU *oldChecker = oldTC->getCheckerCpuPtr();
576        CheckerCPU *newChecker = newTC->getCheckerCpuPtr();
577        if (oldChecker && newChecker) {
578            BaseMasterPort *old_checker_itb_port =
579                oldChecker->getITBPtr()->getMasterPort();
580            BaseMasterPort *old_checker_dtb_port =
581                oldChecker->getDTBPtr()->getMasterPort();
582            BaseMasterPort *new_checker_itb_port =
583                newChecker->getITBPtr()->getMasterPort();
584            BaseMasterPort *new_checker_dtb_port =
585                newChecker->getDTBPtr()->getMasterPort();
586
587            newChecker->getITBPtr()->takeOverFrom(oldChecker->getITBPtr());
588            newChecker->getDTBPtr()->takeOverFrom(oldChecker->getDTBPtr());
589
590            // Move over any table walker ports if they exist for checker
591            if (new_checker_itb_port) {
592                assert(!new_checker_itb_port->isConnected());
593                assert(old_checker_itb_port);
594                assert(old_checker_itb_port->isConnected());
595                BaseSlavePort &slavePort =
596                    old_checker_itb_port->getSlavePort();
597                old_checker_itb_port->unbind();
598                new_checker_itb_port->bind(slavePort);
599            }
600            if (new_checker_dtb_port) {
601                assert(!new_checker_dtb_port->isConnected());
602                assert(old_checker_dtb_port);
603                assert(old_checker_dtb_port->isConnected());
604                BaseSlavePort &slavePort =
605                    old_checker_dtb_port->getSlavePort();
606                old_checker_dtb_port->unbind();
607                new_checker_dtb_port->bind(slavePort);
608            }
609        }
610    }
611
612    interrupts = oldCPU->interrupts;
613    for (ThreadID tid = 0; tid < numThreads; tid++) {
614        interrupts[tid]->setCPU(this);
615    }
616    oldCPU->interrupts.clear();
617
618    if (FullSystem) {
619        for (ThreadID i = 0; i < size; ++i)
620            threadContexts[i]->profileClear();
621
622        if (profileEvent)
623            schedule(profileEvent, curTick());
624    }
625
626    // All CPUs have an instruction and a data port, and the new CPU's
627    // ports are dangling while the old CPU has its ports connected
628    // already. Unbind the old CPU and then bind the ports of the one
629    // we are switching to.
630    assert(!getInstPort().isConnected());
631    assert(oldCPU->getInstPort().isConnected());
632    BaseSlavePort &inst_peer_port = oldCPU->getInstPort().getSlavePort();
633    oldCPU->getInstPort().unbind();
634    getInstPort().bind(inst_peer_port);
635
636    assert(!getDataPort().isConnected());
637    assert(oldCPU->getDataPort().isConnected());
638    BaseSlavePort &data_peer_port = oldCPU->getDataPort().getSlavePort();
639    oldCPU->getDataPort().unbind();
640    getDataPort().bind(data_peer_port);
641}
642
643void
644BaseCPU::flushTLBs()
645{
646    for (ThreadID i = 0; i < threadContexts.size(); ++i) {
647        ThreadContext &tc(*threadContexts[i]);
648        CheckerCPU *checker(tc.getCheckerCpuPtr());
649
650        tc.getITBPtr()->flushAll();
651        tc.getDTBPtr()->flushAll();
652        if (checker) {
653            checker->getITBPtr()->flushAll();
654            checker->getDTBPtr()->flushAll();
655        }
656    }
657}
658
659
660BaseCPU::ProfileEvent::ProfileEvent(BaseCPU *_cpu, Tick _interval)
661    : cpu(_cpu), interval(_interval)
662{ }
663
664void
665BaseCPU::ProfileEvent::process()
666{
667    ThreadID size = cpu->threadContexts.size();
668    for (ThreadID i = 0; i < size; ++i) {
669        ThreadContext *tc = cpu->threadContexts[i];
670        tc->profileSample();
671    }
672
673    cpu->schedule(this, curTick() + interval);
674}
675
676void
677BaseCPU::serialize(CheckpointOut &cp) const
678{
679    SERIALIZE_SCALAR(instCnt);
680
681    if (!_switchedOut) {
682        /* Unlike _pid, _taskId is not serialized, as they are dynamically
683         * assigned unique ids that are only meaningful for the duration of
684         * a specific run. We will need to serialize the entire taskMap in
685         * system. */
686        SERIALIZE_SCALAR(_pid);
687
688        // Serialize the threads, this is done by the CPU implementation.
689        for (ThreadID i = 0; i < numThreads; ++i) {
690            ScopedCheckpointSection sec(cp, csprintf("xc.%i", i));
691            interrupts[i]->serialize(cp);
692            serializeThread(cp, i);
693        }
694    }
695}
696
697void
698BaseCPU::unserialize(CheckpointIn &cp)
699{
700    UNSERIALIZE_SCALAR(instCnt);
701
702    if (!_switchedOut) {
703        UNSERIALIZE_SCALAR(_pid);
704
705        // Unserialize the threads, this is done by the CPU implementation.
706        for (ThreadID i = 0; i < numThreads; ++i) {
707            ScopedCheckpointSection sec(cp, csprintf("xc.%i", i));
708            interrupts[i]->unserialize(cp);
709            unserializeThread(cp, i);
710        }
711    }
712}
713
714void
715BaseCPU::scheduleInstStop(ThreadID tid, Counter insts, const char *cause)
716{
717    const Tick now(comInstEventQueue[tid]->getCurTick());
718    Event *event(new LocalSimLoopExitEvent(cause, 0));
719
720    comInstEventQueue[tid]->schedule(event, now + insts);
721}
722
723uint64_t
724BaseCPU::getCurrentInstCount(ThreadID tid)
725{
726    return Tick(comInstEventQueue[tid]->getCurTick());
727}
728
729AddressMonitor::AddressMonitor() {
730    armed = false;
731    waiting = false;
732    gotWakeup = false;
733}
734
735bool AddressMonitor::doMonitor(PacketPtr pkt) {
736    assert(pkt->req->hasPaddr());
737    if (armed && waiting) {
738        if (pAddr == pkt->getAddr()) {
739            DPRINTF(Mwait,"pAddr=0x%lx invalidated: waking up core\n",
740                    pkt->getAddr());
741            waiting = false;
742            return true;
743        }
744    }
745    return false;
746}
747
748void
749BaseCPU::scheduleLoadStop(ThreadID tid, Counter loads, const char *cause)
750{
751    const Tick now(comLoadEventQueue[tid]->getCurTick());
752    Event *event(new LocalSimLoopExitEvent(cause, 0));
753
754    comLoadEventQueue[tid]->schedule(event, now + loads);
755}
756
757
758void
759BaseCPU::traceFunctionsInternal(Addr pc)
760{
761    if (!debugSymbolTable)
762        return;
763
764    // if pc enters different function, print new function symbol and
765    // update saved range.  Otherwise do nothing.
766    if (pc < currentFunctionStart || pc >= currentFunctionEnd) {
767        string sym_str;
768        bool found = debugSymbolTable->findNearestSymbol(pc, sym_str,
769                                                         currentFunctionStart,
770                                                         currentFunctionEnd);
771
772        if (!found) {
773            // no symbol found: use addr as label
774            sym_str = csprintf("0x%x", pc);
775            currentFunctionStart = pc;
776            currentFunctionEnd = pc + 1;
777        }
778
779        ccprintf(*functionTraceStream, " (%d)\n%d: %s",
780                 curTick() - functionEntryTick, curTick(), sym_str);
781        functionEntryTick = curTick();
782    }
783}
784